Coverage Report

Created: 2025-08-29 06:29

/src/WasmEdge/lib/llvm/compiler.cpp
Line
Count
Source (jump to first uncovered line)
1
// SPDX-License-Identifier: Apache-2.0
2
// SPDX-FileCopyrightText: 2019-2024 Second State INC
3
4
#include "llvm/compiler.h"
5
6
#include "aot/version.h"
7
#include "common/defines.h"
8
#include "common/filesystem.h"
9
#include "common/spdlog.h"
10
#include "data.h"
11
#include "llvm.h"
12
#include "system/allocator.h"
13
14
#include <algorithm>
15
#include <array>
16
#include <cinttypes>
17
#include <cstdint>
18
#include <cstdlib>
19
#include <limits>
20
#include <memory>
21
#include <numeric>
22
#include <string>
23
#include <string_view>
24
#include <system_error>
25
26
namespace LLVM = WasmEdge::LLVM;
27
using namespace std::literals;
28
29
namespace {
30
31
static bool
32
isVoidReturn(WasmEdge::Span<const WasmEdge::ValType> ValTypes) noexcept;
33
static LLVM::Type toLLVMType(LLVM::Context LLContext,
34
                             const WasmEdge::ValType &ValType) noexcept;
35
static std::vector<LLVM::Type>
36
toLLVMArgsType(LLVM::Context LLContext, LLVM::Type ExecCtxPtrTy,
37
               WasmEdge::Span<const WasmEdge::ValType> ValTypes) noexcept;
38
static LLVM::Type
39
toLLVMRetsType(LLVM::Context LLContext,
40
               WasmEdge::Span<const WasmEdge::ValType> ValTypes) noexcept;
41
static LLVM::Type
42
toLLVMType(LLVM::Context LLContext, LLVM::Type ExecCtxPtrTy,
43
           const WasmEdge::AST::FunctionType &FuncType) noexcept;
44
static LLVM::Value
45
toLLVMConstantZero(LLVM::Context LLContext,
46
                   const WasmEdge::ValType &ValType) noexcept;
47
static std::vector<LLVM::Value> unpackStruct(LLVM::Builder &Builder,
48
                                             LLVM::Value Struct) noexcept;
49
class FunctionCompiler;
50
51
// XXX: Misalignment handler not implemented yet, forcing unalignment
52
// force unalignment load/store
53
static inline constexpr const bool kForceUnalignment = true;
54
55
// force checking div/rem on zero
56
static inline constexpr const bool kForceDivCheck = true;
57
58
// Size of a ValVariant
59
static inline constexpr const uint32_t kValSize = sizeof(WasmEdge::ValVariant);
60
61
// Translate Compiler::OptimizationLevel to llvm::PassBuilder version
62
#if LLVM_VERSION_MAJOR >= 13
63
static inline const char *
64
toLLVMLevel(WasmEdge::CompilerConfigure::OptimizationLevel Level) noexcept {
65
  using OL = WasmEdge::CompilerConfigure::OptimizationLevel;
66
  switch (Level) {
67
  case OL::O0:
68
    return "default<O0>,function(tailcallelim)";
69
  case OL::O1:
70
    return "default<O1>,function(tailcallelim)";
71
  case OL::O2:
72
    return "default<O2>";
73
  case OL::O3:
74
    return "default<O3>";
75
  case OL::Os:
76
    return "default<Os>";
77
  case OL::Oz:
78
    return "default<Oz>";
79
  default:
80
    assumingUnreachable();
81
  }
82
}
83
#else
84
static inline std::pair<unsigned int, unsigned int>
85
1.96k
toLLVMLevel(WasmEdge::CompilerConfigure::OptimizationLevel Level) noexcept {
86
1.96k
  using OL = WasmEdge::CompilerConfigure::OptimizationLevel;
87
1.96k
  switch (Level) {
88
0
  case OL::O0:
89
0
    return {0, 0};
90
0
  case OL::O1:
91
0
    return {1, 0};
92
0
  case OL::O2:
93
0
    return {2, 0};
94
1.96k
  case OL::O3:
95
1.96k
    return {3, 0};
96
0
  case OL::Os:
97
0
    return {2, 1};
98
0
  case OL::Oz:
99
0
    return {2, 2};
100
0
  default:
101
0
    assumingUnreachable();
102
1.96k
  }
103
1.96k
}
104
#endif
105
106
static inline LLVMCodeGenOptLevel toLLVMCodeGenLevel(
107
1.96k
    WasmEdge::CompilerConfigure::OptimizationLevel Level) noexcept {
108
1.96k
  using OL = WasmEdge::CompilerConfigure::OptimizationLevel;
109
1.96k
  switch (Level) {
110
0
  case OL::O0:
111
0
    return LLVMCodeGenLevelNone;
112
0
  case OL::O1:
113
0
    return LLVMCodeGenLevelLess;
114
0
  case OL::O2:
115
0
    return LLVMCodeGenLevelDefault;
116
1.96k
  case OL::O3:
117
1.96k
    return LLVMCodeGenLevelAggressive;
118
0
  case OL::Os:
119
0
    return LLVMCodeGenLevelDefault;
120
0
  case OL::Oz:
121
0
    return LLVMCodeGenLevelDefault;
122
0
  default:
123
0
    assumingUnreachable();
124
1.96k
  }
125
1.96k
}
126
} // namespace
127
128
struct LLVM::Compiler::CompileContext {
129
  LLVM::Context LLContext;
130
  LLVM::Module &LLModule;
131
  LLVM::Attribute Cold;
132
  LLVM::Attribute NoAlias;
133
  LLVM::Attribute NoInline;
134
  LLVM::Attribute NoReturn;
135
  LLVM::Attribute ReadOnly;
136
  LLVM::Attribute StrictFP;
137
  LLVM::Attribute UWTable;
138
  LLVM::Attribute NoStackArgProbe;
139
  LLVM::Type VoidTy;
140
  LLVM::Type Int8Ty;
141
  LLVM::Type Int16Ty;
142
  LLVM::Type Int32Ty;
143
  LLVM::Type Int64Ty;
144
  LLVM::Type Int128Ty;
145
  LLVM::Type FloatTy;
146
  LLVM::Type DoubleTy;
147
  LLVM::Type Int8x16Ty;
148
  LLVM::Type Int16x8Ty;
149
  LLVM::Type Int32x4Ty;
150
  LLVM::Type Floatx4Ty;
151
  LLVM::Type Int64x2Ty;
152
  LLVM::Type Doublex2Ty;
153
  LLVM::Type Int128x1Ty;
154
  LLVM::Type Int8PtrTy;
155
  LLVM::Type Int32PtrTy;
156
  LLVM::Type Int64PtrTy;
157
  LLVM::Type Int128PtrTy;
158
  LLVM::Type Int8PtrPtrTy;
159
  LLVM::Type ExecCtxTy;
160
  LLVM::Type ExecCtxPtrTy;
161
  LLVM::Type IntrinsicsTableTy;
162
  LLVM::Type IntrinsicsTablePtrTy;
163
  LLVM::Message SubtargetFeatures;
164
165
#if defined(__x86_64__)
166
#if defined(__XOP__)
167
  bool SupportXOP = true;
168
#else
169
  bool SupportXOP = false;
170
#endif
171
172
#if defined(__SSE4_1__)
173
  bool SupportSSE4_1 = true;
174
#else
175
  bool SupportSSE4_1 = false;
176
#endif
177
178
#if defined(__SSSE3__)
179
  bool SupportSSSE3 = true;
180
#else
181
  bool SupportSSSE3 = false;
182
#endif
183
184
#if defined(__SSE2__)
185
  bool SupportSSE2 = true;
186
#else
187
  bool SupportSSE2 = false;
188
#endif
189
#endif
190
191
#if defined(__aarch64__)
192
#if defined(__ARM_NEON__) || defined(__ARM_NEON) || defined(__ARM_NEON_FP)
193
  bool SupportNEON = true;
194
#else
195
  bool SupportNEON = false;
196
#endif
197
#endif
198
199
  std::vector<const AST::CompositeType *> CompositeTypes;
200
  std::vector<LLVM::Value> FunctionWrappers;
201
  std::vector<std::tuple<uint32_t, LLVM::FunctionCallee,
202
                         const WasmEdge::AST::CodeSegment *>>
203
      Functions;
204
  std::vector<LLVM::Type> Globals;
205
  LLVM::Value IntrinsicsTable;
206
  LLVM::FunctionCallee Trap;
207
  CompileContext(LLVM::Context C, LLVM::Module &M,
208
                 bool IsGenericBinary) noexcept
209
1.96k
      : LLContext(C), LLModule(M),
210
1.96k
        Cold(LLVM::Attribute::createEnum(C, LLVM::Core::Cold, 0)),
211
1.96k
        NoAlias(LLVM::Attribute::createEnum(C, LLVM::Core::NoAlias, 0)),
212
1.96k
        NoInline(LLVM::Attribute::createEnum(C, LLVM::Core::NoInline, 0)),
213
1.96k
        NoReturn(LLVM::Attribute::createEnum(C, LLVM::Core::NoReturn, 0)),
214
1.96k
        ReadOnly(LLVM::Attribute::createEnum(C, LLVM::Core::ReadOnly, 0)),
215
1.96k
        StrictFP(LLVM::Attribute::createEnum(C, LLVM::Core::StrictFP, 0)),
216
1.96k
        UWTable(LLVM::Attribute::createEnum(C, LLVM::Core::UWTable,
217
1.96k
                                            LLVM::Core::UWTableDefault)),
218
        NoStackArgProbe(
219
1.96k
            LLVM::Attribute::createString(C, "no-stack-arg-probe"sv, {})),
220
1.96k
        VoidTy(LLContext.getVoidTy()), Int8Ty(LLContext.getInt8Ty()),
221
1.96k
        Int16Ty(LLContext.getInt16Ty()), Int32Ty(LLContext.getInt32Ty()),
222
1.96k
        Int64Ty(LLContext.getInt64Ty()), Int128Ty(LLContext.getInt128Ty()),
223
1.96k
        FloatTy(LLContext.getFloatTy()), DoubleTy(LLContext.getDoubleTy()),
224
1.96k
        Int8x16Ty(LLVM::Type::getVectorType(Int8Ty, 16)),
225
1.96k
        Int16x8Ty(LLVM::Type::getVectorType(Int16Ty, 8)),
226
1.96k
        Int32x4Ty(LLVM::Type::getVectorType(Int32Ty, 4)),
227
1.96k
        Floatx4Ty(LLVM::Type::getVectorType(FloatTy, 4)),
228
1.96k
        Int64x2Ty(LLVM::Type::getVectorType(Int64Ty, 2)),
229
1.96k
        Doublex2Ty(LLVM::Type::getVectorType(DoubleTy, 2)),
230
1.96k
        Int128x1Ty(LLVM::Type::getVectorType(Int128Ty, 1)),
231
1.96k
        Int8PtrTy(Int8Ty.getPointerTo()), Int32PtrTy(Int32Ty.getPointerTo()),
232
1.96k
        Int64PtrTy(Int64Ty.getPointerTo()),
233
1.96k
        Int128PtrTy(Int128Ty.getPointerTo()),
234
1.96k
        Int8PtrPtrTy(Int8PtrTy.getPointerTo()),
235
1.96k
        ExecCtxTy(LLVM::Type::getStructType(
236
1.96k
            "ExecCtx",
237
1.96k
            std::initializer_list<LLVM::Type>{
238
                // Memory
239
1.96k
                Int8PtrTy.getPointerTo(),
240
                // Globals
241
1.96k
                Int128PtrTy.getPointerTo(),
242
                // InstrCount
243
1.96k
                Int64PtrTy,
244
                // CostTable
245
1.96k
                LLVM::Type::getArrayType(Int64Ty, UINT16_MAX + 1)
246
1.96k
                    .getPointerTo(),
247
                // Gas
248
1.96k
                Int64PtrTy,
249
                // GasLimit
250
1.96k
                Int64Ty,
251
                // StopToken
252
1.96k
                Int32PtrTy,
253
1.96k
            })),
254
1.96k
        ExecCtxPtrTy(ExecCtxTy.getPointerTo()),
255
1.96k
        IntrinsicsTableTy(LLVM::Type::getArrayType(
256
1.96k
            Int8PtrTy,
257
1.96k
            static_cast<uint32_t>(Executable::Intrinsics::kIntrinsicMax))),
258
1.96k
        IntrinsicsTablePtrTy(IntrinsicsTableTy.getPointerTo()),
259
1.96k
        IntrinsicsTable(LLModule.addGlobal(IntrinsicsTablePtrTy, true,
260
1.96k
                                           LLVMExternalLinkage, LLVM::Value(),
261
1.96k
                                           "intrinsics")) {
262
1.96k
    Trap.Ty = LLVM::Type::getFunctionType(VoidTy, {Int32Ty});
263
1.96k
    Trap.Fn = LLModule.addFunction(Trap.Ty, LLVMPrivateLinkage, "trap");
264
1.96k
    Trap.Fn.setDSOLocal(true);
265
1.96k
    Trap.Fn.addFnAttr(NoStackArgProbe);
266
1.96k
    Trap.Fn.addFnAttr(StrictFP);
267
1.96k
    Trap.Fn.addFnAttr(UWTable);
268
1.96k
    Trap.Fn.addFnAttr(NoReturn);
269
1.96k
    Trap.Fn.addFnAttr(Cold);
270
1.96k
    Trap.Fn.addFnAttr(NoInline);
271
272
1.96k
    LLModule.addGlobal(Int32Ty, true, LLVMExternalLinkage,
273
1.96k
                       LLVM::Value::getConstInt(Int32Ty, AOT::kBinaryVersion),
274
1.96k
                       "version");
275
276
1.96k
    if (!IsGenericBinary) {
277
1.96k
      SubtargetFeatures = LLVM::getHostCPUFeatures();
278
1.96k
      auto Features = SubtargetFeatures.string_view();
279
170k
      while (!Features.empty()) {
280
168k
        std::string_view Feature;
281
168k
        if (auto Pos = Features.find(','); Pos != std::string_view::npos) {
282
166k
          Feature = Features.substr(0, Pos);
283
166k
          Features = Features.substr(Pos + 1);
284
166k
        } else {
285
1.96k
          Feature = std::exchange(Features, std::string_view());
286
1.96k
        }
287
168k
        if (Feature[0] != '+') {
288
94.1k
          continue;
289
94.1k
        }
290
74.5k
        Feature = Feature.substr(1);
291
292
74.5k
#if defined(__x86_64__)
293
74.5k
        if (!SupportXOP && Feature == "xop"sv) {
294
0
          SupportXOP = true;
295
0
        }
296
74.5k
        if (!SupportSSE4_1 && Feature == "sse4.1"sv) {
297
1.96k
          SupportSSE4_1 = true;
298
1.96k
        }
299
74.5k
        if (!SupportSSSE3 && Feature == "ssse3"sv) {
300
1.96k
          SupportSSSE3 = true;
301
1.96k
        }
302
74.5k
        if (!SupportSSE2 && Feature == "sse2"sv) {
303
0
          SupportSSE2 = true;
304
0
        }
305
#elif defined(__aarch64__)
306
        if (!SupportNEON && Feature == "neon"sv) {
307
          SupportNEON = true;
308
        }
309
#endif
310
74.5k
      }
311
1.96k
    }
312
313
1.96k
    {
314
      // create trap
315
1.96k
      LLVM::Builder Builder(LLContext);
316
1.96k
      Builder.positionAtEnd(
317
1.96k
          LLVM::BasicBlock::create(LLContext, Trap.Fn, "entry"));
318
1.96k
      auto FnTy = LLVM::Type::getFunctionType(VoidTy, {Int32Ty});
319
1.96k
      auto CallTrap = Builder.createCall(
320
1.96k
          getIntrinsic(Builder, Executable::Intrinsics::kTrap, FnTy),
321
1.96k
          {Trap.Fn.getFirstParam()});
322
1.96k
      CallTrap.addCallSiteAttribute(NoReturn);
323
1.96k
      Builder.createUnreachable();
324
1.96k
    }
325
1.96k
  }
326
  LLVM::Value getMemory(LLVM::Builder &Builder, LLVM::Value ExecCtx,
327
19.7k
                        uint32_t Index) noexcept {
328
19.7k
    auto Array = Builder.createExtractValue(ExecCtx, 0);
329
#if WASMEDGE_ALLOCATOR_IS_STABLE
330
    auto VPtr = Builder.createLoad(
331
        Int8PtrTy, Builder.createInBoundsGEP1(Int8PtrTy, Array,
332
                                              LLContext.getInt64(Index)));
333
    VPtr.setMetadata(LLContext, LLVM::Core::InvariantGroup,
334
                     LLVM::Metadata(LLContext, {}));
335
#else
336
19.7k
    auto VPtrPtr = Builder.createLoad(
337
19.7k
        Int8PtrPtrTy, Builder.createInBoundsGEP1(Int8PtrPtrTy, Array,
338
19.7k
                                                 LLContext.getInt64(Index)));
339
19.7k
    VPtrPtr.setMetadata(LLContext, LLVM::Core::InvariantGroup,
340
19.7k
                        LLVM::Metadata(LLContext, {}));
341
19.7k
    auto VPtr = Builder.createLoad(
342
19.7k
        Int8PtrTy,
343
19.7k
        Builder.createInBoundsGEP1(Int8PtrTy, VPtrPtr, LLContext.getInt64(0)));
344
19.7k
#endif
345
19.7k
    return Builder.createBitCast(VPtr, Int8PtrTy);
346
19.7k
  }
347
  std::pair<LLVM::Type, LLVM::Value> getGlobal(LLVM::Builder &Builder,
348
                                               LLVM::Value ExecCtx,
349
354
                                               uint32_t Index) noexcept {
350
354
    auto Ty = Globals[Index];
351
354
    auto Array = Builder.createExtractValue(ExecCtx, 1);
352
354
    auto VPtr = Builder.createLoad(
353
354
        Int128PtrTy, Builder.createInBoundsGEP1(Int8PtrTy, Array,
354
354
                                                LLContext.getInt64(Index)));
355
354
    VPtr.setMetadata(LLContext, LLVM::Core::InvariantGroup,
356
354
                     LLVM::Metadata(LLContext, {}));
357
354
    auto Ptr = Builder.createBitCast(VPtr, Ty.getPointerTo());
358
354
    return {Ty, Ptr};
359
354
  }
360
  LLVM::Value getInstrCount(LLVM::Builder &Builder,
361
0
                            LLVM::Value ExecCtx) noexcept {
362
0
    return Builder.createExtractValue(ExecCtx, 2);
363
0
  }
364
  LLVM::Value getCostTable(LLVM::Builder &Builder,
365
0
                           LLVM::Value ExecCtx) noexcept {
366
0
    return Builder.createExtractValue(ExecCtx, 3);
367
0
  }
368
0
  LLVM::Value getGas(LLVM::Builder &Builder, LLVM::Value ExecCtx) noexcept {
369
0
    return Builder.createExtractValue(ExecCtx, 4);
370
0
  }
371
  LLVM::Value getGasLimit(LLVM::Builder &Builder,
372
0
                          LLVM::Value ExecCtx) noexcept {
373
0
    return Builder.createExtractValue(ExecCtx, 5);
374
0
  }
375
  LLVM::Value getStopToken(LLVM::Builder &Builder,
376
0
                           LLVM::Value ExecCtx) noexcept {
377
0
    return Builder.createExtractValue(ExecCtx, 6);
378
0
  }
379
  LLVM::FunctionCallee getIntrinsic(LLVM::Builder &Builder,
380
                                    Executable::Intrinsics Index,
381
5.93k
                                    LLVM::Type Ty) noexcept {
382
5.93k
    const auto Value = static_cast<uint32_t>(Index);
383
5.93k
    auto PtrTy = Ty.getPointerTo();
384
5.93k
    auto PtrPtrTy = PtrTy.getPointerTo();
385
5.93k
    auto IT = Builder.createLoad(IntrinsicsTablePtrTy, IntrinsicsTable);
386
5.93k
    IT.setMetadata(LLContext, LLVM::Core::InvariantGroup,
387
5.93k
                   LLVM::Metadata(LLContext, {}));
388
5.93k
    auto VPtr =
389
5.93k
        Builder.createInBoundsGEP2(IntrinsicsTableTy, IT, LLContext.getInt64(0),
390
5.93k
                                   LLContext.getInt64(Value));
391
5.93k
    auto Ptr = Builder.createBitCast(VPtr, PtrPtrTy);
392
5.93k
    return {Ty, Builder.createLoad(PtrTy, Ptr)};
393
5.93k
  }
394
  std::pair<std::vector<ValType>, std::vector<ValType>>
395
16.2k
  resolveBlockType(const BlockType &BType) const noexcept {
396
16.2k
    using VecT = std::vector<ValType>;
397
16.2k
    using RetT = std::pair<VecT, VecT>;
398
16.2k
    if (BType.isEmpty()) {
399
1.91k
      return RetT{};
400
1.91k
    }
401
14.3k
    if (BType.isValType()) {
402
2.38k
      return RetT{{}, {BType.getValType()}};
403
11.9k
    } else {
404
      // Type index case. t2* = type[index].returns
405
11.9k
      const uint32_t TypeIdx = BType.getTypeIndex();
406
11.9k
      const auto &FType = CompositeTypes[TypeIdx]->getFuncType();
407
11.9k
      return RetT{
408
11.9k
          VecT(FType.getParamTypes().begin(), FType.getParamTypes().end()),
409
11.9k
          VecT(FType.getReturnTypes().begin(), FType.getReturnTypes().end())};
410
11.9k
    }
411
14.3k
  }
412
};
413
414
namespace {
415
416
using namespace WasmEdge;
417
418
30.1k
static bool isVoidReturn(Span<const ValType> ValTypes) noexcept {
419
30.1k
  return ValTypes.empty();
420
30.1k
}
421
422
static LLVM::Type toLLVMType(LLVM::Context LLContext,
423
2.68M
                             const ValType &ValType) noexcept {
424
2.68M
  switch (ValType.getCode()) {
425
491k
  case TypeCode::I32:
426
491k
    return LLContext.getInt32Ty();
427
206k
  case TypeCode::I64:
428
206k
    return LLContext.getInt64Ty();
429
0
  case TypeCode::Ref:
430
22.4k
  case TypeCode::RefNull:
431
1.91M
  case TypeCode::V128:
432
1.91M
    return LLVM::Type::getVectorType(LLContext.getInt64Ty(), 2);
433
54.5k
  case TypeCode::F32:
434
54.5k
    return LLContext.getFloatTy();
435
20.2k
  case TypeCode::F64:
436
20.2k
    return LLContext.getDoubleTy();
437
0
  default:
438
0
    assumingUnreachable();
439
2.68M
  }
440
2.68M
}
441
442
static std::vector<LLVM::Type>
443
toLLVMTypeVector(LLVM::Context LLContext,
444
17.3k
                 Span<const ValType> ValTypes) noexcept {
445
17.3k
  std::vector<LLVM::Type> Result;
446
17.3k
  Result.reserve(ValTypes.size());
447
17.3k
  for (const auto &Type : ValTypes) {
448
17.3k
    Result.push_back(toLLVMType(LLContext, Type));
449
17.3k
  }
450
17.3k
  return Result;
451
17.3k
}
452
453
static std::vector<LLVM::Type>
454
toLLVMArgsType(LLVM::Context LLContext, LLVM::Type ExecCtxPtrTy,
455
13.9k
               Span<const ValType> ValTypes) noexcept {
456
13.9k
  auto Result = toLLVMTypeVector(LLContext, ValTypes);
457
13.9k
  Result.insert(Result.begin(), ExecCtxPtrTy);
458
13.9k
  return Result;
459
13.9k
}
460
461
static LLVM::Type toLLVMRetsType(LLVM::Context LLContext,
462
13.9k
                                 Span<const ValType> ValTypes) noexcept {
463
13.9k
  if (isVoidReturn(ValTypes)) {
464
3.35k
    return LLContext.getVoidTy();
465
3.35k
  }
466
10.5k
  if (ValTypes.size() == 1) {
467
9.99k
    return toLLVMType(LLContext, ValTypes.front());
468
9.99k
  }
469
596
  std::vector<LLVM::Type> Result;
470
596
  Result.reserve(ValTypes.size());
471
1.61k
  for (const auto &Type : ValTypes) {
472
1.61k
    Result.push_back(toLLVMType(LLContext, Type));
473
1.61k
  }
474
596
  return LLVM::Type::getStructType(Result);
475
10.5k
}
476
477
static LLVM::Type toLLVMType(LLVM::Context LLContext, LLVM::Type ExecCtxPtrTy,
478
13.9k
                             const AST::FunctionType &FuncType) noexcept {
479
13.9k
  auto ArgsTy =
480
13.9k
      toLLVMArgsType(LLContext, ExecCtxPtrTy, FuncType.getParamTypes());
481
13.9k
  auto RetTy = toLLVMRetsType(LLContext, FuncType.getReturnTypes());
482
13.9k
  return LLVM::Type::getFunctionType(RetTy, ArgsTy);
483
13.9k
}
484
485
static LLVM::Value toLLVMConstantZero(LLVM::Context LLContext,
486
2.65M
                                      const ValType &ValType) noexcept {
487
2.65M
  switch (ValType.getCode()) {
488
475k
  case TypeCode::I32:
489
475k
    return LLVM::Value::getConstNull(LLContext.getInt32Ty());
490
203k
  case TypeCode::I64:
491
203k
    return LLVM::Value::getConstNull(LLContext.getInt64Ty());
492
0
  case TypeCode::Ref:
493
21.9k
  case TypeCode::RefNull: {
494
21.9k
    std::array<uint8_t, 16> Data{};
495
21.9k
    const auto Raw = ValType.getRawData();
496
21.9k
    std::copy(Raw.begin(), Raw.end(), Data.begin());
497
21.9k
    return LLVM::Value::getConstVector8(LLContext, Data);
498
0
  }
499
1.88M
  case TypeCode::V128:
500
1.88M
    return LLVM::Value::getConstNull(
501
1.88M
        LLVM::Type::getVectorType(LLContext.getInt64Ty(), 2));
502
52.4k
  case TypeCode::F32:
503
52.4k
    return LLVM::Value::getConstNull(LLContext.getFloatTy());
504
17.7k
  case TypeCode::F64:
505
17.7k
    return LLVM::Value::getConstNull(LLContext.getDoubleTy());
506
0
  default:
507
0
    assumingUnreachable();
508
2.65M
  }
509
2.65M
}
510
511
class FunctionCompiler {
512
  struct Control;
513
514
public:
515
  FunctionCompiler(LLVM::Compiler::CompileContext &Context,
516
                   LLVM::FunctionCallee F, Span<const ValType> Locals,
517
                   bool Interruptible, bool InstructionCounting,
518
                   bool GasMeasuring) noexcept
519
9.34k
      : Context(Context), LLContext(Context.LLContext),
520
9.34k
        Interruptible(Interruptible), F(F), Builder(LLContext) {
521
9.34k
    if (F.Fn) {
522
9.34k
      Builder.positionAtEnd(LLVM::BasicBlock::create(LLContext, F.Fn, "entry"));
523
9.34k
      ExecCtx = Builder.createLoad(Context.ExecCtxTy, F.Fn.getFirstParam());
524
525
9.34k
      if (InstructionCounting) {
526
0
        LocalInstrCount = Builder.createAlloca(Context.Int64Ty);
527
0
        Builder.createStore(LLContext.getInt64(0), LocalInstrCount);
528
0
      }
529
530
9.34k
      if (GasMeasuring) {
531
0
        LocalGas = Builder.createAlloca(Context.Int64Ty);
532
0
        Builder.createStore(LLContext.getInt64(0), LocalGas);
533
0
      }
534
535
18.1k
      for (LLVM::Value Arg = F.Fn.getFirstParam().getNextParam(); Arg;
536
9.34k
           Arg = Arg.getNextParam()) {
537
8.77k
        LLVM::Type Ty = Arg.getType();
538
8.77k
        LLVM::Value ArgPtr = Builder.createAlloca(Ty);
539
8.77k
        Builder.createStore(Arg, ArgPtr);
540
8.77k
        Local.emplace_back(Ty, ArgPtr);
541
8.77k
      }
542
543
2.65M
      for (const auto &Type : Locals) {
544
2.65M
        LLVM::Type Ty = toLLVMType(LLContext, Type);
545
2.65M
        LLVM::Value ArgPtr = Builder.createAlloca(Ty);
546
2.65M
        Builder.createStore(toLLVMConstantZero(LLContext, Type), ArgPtr);
547
2.65M
        Local.emplace_back(Ty, ArgPtr);
548
2.65M
      }
549
9.34k
    }
550
9.34k
  }
551
552
30.3k
  LLVM::BasicBlock getTrapBB(ErrCode::Value Error) noexcept {
553
30.3k
    if (auto Iter = TrapBB.find(Error); Iter != TrapBB.end()) {
554
27.5k
      return Iter->second;
555
27.5k
    }
556
2.77k
    auto BB = LLVM::BasicBlock::create(LLContext, F.Fn, "trap");
557
2.77k
    TrapBB.emplace(Error, BB);
558
2.77k
    return BB;
559
30.3k
  }
560
561
  void
562
  compile(const AST::CodeSegment &Code,
563
9.34k
          std::pair<std::vector<ValType>, std::vector<ValType>> Type) noexcept {
564
9.34k
    auto RetBB = LLVM::BasicBlock::create(LLContext, F.Fn, "ret");
565
9.34k
    Type.first.clear();
566
9.34k
    enterBlock(RetBB, {}, {}, {}, std::move(Type));
567
9.34k
    compile(Code.getExpr().getInstrs());
568
9.34k
    assuming(ControlStack.empty());
569
9.34k
    compileReturn();
570
571
9.34k
    for (auto &[Error, BB] : TrapBB) {
572
2.77k
      Builder.positionAtEnd(BB);
573
2.77k
      updateInstrCount();
574
2.77k
      updateGasAtTrap();
575
2.77k
      auto CallTrap = Builder.createCall(
576
2.77k
          Context.Trap, {LLContext.getInt32(static_cast<uint32_t>(Error))});
577
2.77k
      CallTrap.addCallSiteAttribute(Context.NoReturn);
578
2.77k
      Builder.createUnreachable();
579
2.77k
    }
580
9.34k
  }
581
582
9.34k
  void compile(AST::InstrView Instrs) noexcept {
583
1.40M
    auto Dispatch = [this](const AST::Instruction &Instr) -> void {
584
1.40M
      switch (Instr.getOpCode()) {
585
      // Control instructions (for blocks)
586
3.18k
      case OpCode::Block: {
587
3.18k
        auto Block = LLVM::BasicBlock::create(LLContext, F.Fn, "block");
588
3.18k
        auto EndBlock = LLVM::BasicBlock::create(LLContext, F.Fn, "block.end");
589
3.18k
        Builder.createBr(Block);
590
591
3.18k
        Builder.positionAtEnd(Block);
592
3.18k
        auto Type = Context.resolveBlockType(Instr.getBlockType());
593
3.18k
        const auto Arity = Type.first.size();
594
3.18k
        std::vector<LLVM::Value> Args(Arity);
595
3.18k
        if (isUnreachable()) {
596
798
          for (size_t I = 0; I < Arity; ++I) {
597
289
            auto Ty = toLLVMType(LLContext, Type.first[I]);
598
289
            Args[I] = LLVM::Value::getUndef(Ty);
599
289
          }
600
2.67k
        } else {
601
3.14k
          for (size_t I = 0; I < Arity; ++I) {
602
472
            const size_t J = Arity - 1 - I;
603
472
            Args[J] = stackPop();
604
472
          }
605
2.67k
        }
606
3.18k
        enterBlock(EndBlock, {}, {}, std::move(Args), std::move(Type));
607
3.18k
        checkStop();
608
3.18k
        updateGas();
609
3.18k
        return;
610
0
      }
611
1.45k
      case OpCode::Loop: {
612
1.45k
        auto Curr = Builder.getInsertBlock();
613
1.45k
        auto Loop = LLVM::BasicBlock::create(LLContext, F.Fn, "loop");
614
1.45k
        auto EndLoop = LLVM::BasicBlock::create(LLContext, F.Fn, "loop.end");
615
1.45k
        Builder.createBr(Loop);
616
617
1.45k
        Builder.positionAtEnd(Loop);
618
1.45k
        auto Type = Context.resolveBlockType(Instr.getBlockType());
619
1.45k
        const auto Arity = Type.first.size();
620
1.45k
        std::vector<LLVM::Value> Args(Arity);
621
1.45k
        if (isUnreachable()) {
622
690
          for (size_t I = 0; I < Arity; ++I) {
623
284
            auto Ty = toLLVMType(LLContext, Type.first[I]);
624
284
            auto Value = LLVM::Value::getUndef(Ty);
625
284
            auto PHINode = Builder.createPHI(Ty);
626
284
            PHINode.addIncoming(Value, Curr);
627
284
            Args[I] = PHINode;
628
284
          }
629
1.04k
        } else {
630
1.52k
          for (size_t I = 0; I < Arity; ++I) {
631
477
            const size_t J = Arity - 1 - I;
632
477
            auto Value = stackPop();
633
477
            auto PHINode = Builder.createPHI(Value.getType());
634
477
            PHINode.addIncoming(Value, Curr);
635
477
            Args[J] = PHINode;
636
477
          }
637
1.04k
        }
638
1.45k
        enterBlock(Loop, EndLoop, {}, std::move(Args), std::move(Type));
639
1.45k
        checkStop();
640
1.45k
        updateGas();
641
1.45k
        return;
642
0
      }
643
2.25k
      case OpCode::If: {
644
2.25k
        auto Then = LLVM::BasicBlock::create(LLContext, F.Fn, "then");
645
2.25k
        auto Else = LLVM::BasicBlock::create(LLContext, F.Fn, "else");
646
2.25k
        auto EndIf = LLVM::BasicBlock::create(LLContext, F.Fn, "if.end");
647
2.25k
        LLVM::Value Cond;
648
2.25k
        if (isUnreachable()) {
649
444
          Cond = LLVM::Value::getUndef(LLContext.getInt1Ty());
650
1.80k
        } else {
651
1.80k
          Cond = Builder.createICmpNE(stackPop(), LLContext.getInt32(0));
652
1.80k
        }
653
2.25k
        Builder.createCondBr(Cond, Then, Else);
654
655
2.25k
        Builder.positionAtEnd(Then);
656
2.25k
        auto Type = Context.resolveBlockType(Instr.getBlockType());
657
2.25k
        const auto Arity = Type.first.size();
658
2.25k
        std::vector<LLVM::Value> Args(Arity);
659
2.25k
        if (isUnreachable()) {
660
904
          for (size_t I = 0; I < Arity; ++I) {
661
460
            auto Ty = toLLVMType(LLContext, Type.first[I]);
662
460
            Args[I] = LLVM::Value::getUndef(Ty);
663
460
          }
664
1.80k
        } else {
665
2.57k
          for (size_t I = 0; I < Arity; ++I) {
666
764
            const size_t J = Arity - 1 - I;
667
764
            Args[J] = stackPop();
668
764
          }
669
1.80k
        }
670
2.25k
        enterBlock(EndIf, {}, Else, std::move(Args), std::move(Type));
671
2.25k
        return;
672
0
      }
673
16.2k
      case OpCode::End: {
674
16.2k
        auto Entry = leaveBlock();
675
16.2k
        if (Entry.ElseBlock) {
676
883
          auto Block = Builder.getInsertBlock();
677
883
          Builder.positionAtEnd(Entry.ElseBlock);
678
883
          enterBlock(Block, {}, {}, std::move(Entry.Args),
679
883
                     std::move(Entry.Type), std::move(Entry.ReturnPHI));
680
883
          Entry = leaveBlock();
681
883
        }
682
16.2k
        buildPHI(Entry.Type.second, Entry.ReturnPHI);
683
16.2k
        return;
684
0
      }
685
1.36k
      case OpCode::Else: {
686
1.36k
        auto Entry = leaveBlock();
687
1.36k
        Builder.positionAtEnd(Entry.ElseBlock);
688
1.36k
        enterBlock(Entry.JumpBlock, {}, {}, std::move(Entry.Args),
689
1.36k
                   std::move(Entry.Type), std::move(Entry.ReturnPHI));
690
1.36k
        return;
691
0
      }
692
1.37M
      default:
693
1.37M
        break;
694
1.40M
      }
695
696
1.37M
      if (isUnreachable()) {
697
430k
        return;
698
430k
      }
699
700
945k
      switch (Instr.getOpCode()) {
701
      // Control instructions
702
2.72k
      case OpCode::Unreachable:
703
2.72k
        Builder.createBr(getTrapBB(ErrCode::Value::Unreachable));
704
2.72k
        setUnreachable();
705
2.72k
        Builder.positionAtEnd(
706
2.72k
            LLVM::BasicBlock::create(LLContext, F.Fn, "unreachable.end"));
707
2.72k
        break;
708
36.0k
      case OpCode::Nop:
709
36.0k
        break;
710
      // LEGACY-EH: remove the `Try` cases after deprecating legacy EH.
711
      // case OpCode::Try:
712
      // case OpCode::Throw:
713
      // case OpCode::Throw_ref:
714
711
      case OpCode::Br: {
715
711
        const auto Label = Instr.getJump().TargetIndex;
716
711
        setLableJumpPHI(Label);
717
711
        Builder.createBr(getLabel(Label));
718
711
        setUnreachable();
719
711
        Builder.positionAtEnd(
720
711
            LLVM::BasicBlock::create(LLContext, F.Fn, "br.end"));
721
711
        break;
722
0
      }
723
346
      case OpCode::Br_if: {
724
346
        const auto Label = Instr.getJump().TargetIndex;
725
346
        auto Cond = Builder.createICmpNE(stackPop(), LLContext.getInt32(0));
726
346
        setLableJumpPHI(Label);
727
346
        auto Next = LLVM::BasicBlock::create(LLContext, F.Fn, "br_if.end");
728
346
        Builder.createCondBr(Cond, getLabel(Label), Next);
729
346
        Builder.positionAtEnd(Next);
730
346
        break;
731
0
      }
732
922
      case OpCode::Br_table: {
733
922
        auto LabelTable = Instr.getLabelList();
734
922
        assuming(LabelTable.size() <= std::numeric_limits<uint32_t>::max());
735
922
        const auto LabelTableSize =
736
922
            static_cast<uint32_t>(LabelTable.size() - 1);
737
922
        auto Value = stackPop();
738
922
        setLableJumpPHI(LabelTable[LabelTableSize].TargetIndex);
739
922
        auto Switch = Builder.createSwitch(
740
922
            Value, getLabel(LabelTable[LabelTableSize].TargetIndex),
741
922
            LabelTableSize);
742
36.4k
        for (uint32_t I = 0; I < LabelTableSize; ++I) {
743
35.4k
          setLableJumpPHI(LabelTable[I].TargetIndex);
744
35.4k
          Switch.addCase(LLContext.getInt32(I),
745
35.4k
                         getLabel(LabelTable[I].TargetIndex));
746
35.4k
        }
747
922
        setUnreachable();
748
922
        Builder.positionAtEnd(
749
922
            LLVM::BasicBlock::create(LLContext, F.Fn, "br_table.end"));
750
922
        break;
751
922
      }
752
0
      case OpCode::Br_on_null: {
753
0
        const auto Label = Instr.getJump().TargetIndex;
754
0
        auto Value = Builder.createBitCast(stackPop(), Context.Int64x2Ty);
755
0
        auto Cond = Builder.createICmpEQ(
756
0
            Builder.createExtractElement(Value, LLContext.getInt64(1)),
757
0
            LLContext.getInt64(0));
758
0
        setLableJumpPHI(Label);
759
0
        auto Next = LLVM::BasicBlock::create(LLContext, F.Fn, "br_on_null.end");
760
0
        Builder.createCondBr(Cond, getLabel(Label), Next);
761
0
        Builder.positionAtEnd(Next);
762
0
        stackPush(Value);
763
0
        break;
764
922
      }
765
0
      case OpCode::Br_on_non_null: {
766
0
        const auto Label = Instr.getJump().TargetIndex;
767
0
        auto Cond = Builder.createICmpNE(
768
0
            Builder.createExtractElement(
769
0
                Builder.createBitCast(Stack.back(), Context.Int64x2Ty),
770
0
                LLContext.getInt64(1)),
771
0
            LLContext.getInt64(0));
772
0
        setLableJumpPHI(Label);
773
0
        auto Next =
774
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "br_on_non_null.end");
775
0
        Builder.createCondBr(Cond, getLabel(Label), Next);
776
0
        Builder.positionAtEnd(Next);
777
0
        stackPop();
778
0
        break;
779
922
      }
780
0
      case OpCode::Br_on_cast:
781
0
      case OpCode::Br_on_cast_fail: {
782
0
        auto Ref = Builder.createBitCast(Stack.back(), Context.Int64x2Ty);
783
0
        const auto Label = Instr.getBrCast().Jump.TargetIndex;
784
0
        std::array<uint8_t, 16> Buf = {0};
785
0
        std::copy_n(Instr.getBrCast().RType2.getRawData().cbegin(), 8,
786
0
                    Buf.begin());
787
0
        auto VType = Builder.createExtractElement(
788
0
            Builder.createBitCast(LLVM::Value::getConstVector8(LLContext, Buf),
789
0
                                  Context.Int64x2Ty),
790
0
            LLContext.getInt64(0));
791
0
        auto IsRefTest = Builder.createCall(
792
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kRefTest,
793
0
                                 LLVM::Type::getFunctionType(
794
0
                                     Context.Int32Ty,
795
0
                                     {Context.Int64x2Ty, Context.Int64Ty},
796
0
                                     false)),
797
0
            {Ref, VType});
798
0
        auto Cond =
799
0
            (Instr.getOpCode() == OpCode::Br_on_cast)
800
0
                ? Builder.createICmpNE(IsRefTest, LLContext.getInt32(0))
801
0
                : Builder.createICmpEQ(IsRefTest, LLContext.getInt32(0));
802
0
        setLableJumpPHI(Label);
803
0
        auto Next = LLVM::BasicBlock::create(LLContext, F.Fn, "br_on_cast.end");
804
0
        Builder.createCondBr(Cond, getLabel(Label), Next);
805
0
        Builder.positionAtEnd(Next);
806
0
        break;
807
0
      }
808
679
      case OpCode::Return:
809
679
        compileReturn();
810
679
        setUnreachable();
811
679
        Builder.positionAtEnd(
812
679
            LLVM::BasicBlock::create(LLContext, F.Fn, "ret.end"));
813
679
        break;
814
3.29k
      case OpCode::Call:
815
3.29k
        updateInstrCount();
816
3.29k
        updateGas();
817
3.29k
        compileCallOp(Instr.getTargetIndex());
818
3.29k
        break;
819
610
      case OpCode::Call_indirect:
820
610
        updateInstrCount();
821
610
        updateGas();
822
610
        compileIndirectCallOp(Instr.getSourceIndex(), Instr.getTargetIndex());
823
610
        break;
824
0
      case OpCode::Return_call:
825
0
        updateInstrCount();
826
0
        updateGas();
827
0
        compileReturnCallOp(Instr.getTargetIndex());
828
0
        setUnreachable();
829
0
        Builder.positionAtEnd(
830
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "ret_call.end"));
831
0
        break;
832
0
      case OpCode::Return_call_indirect:
833
0
        updateInstrCount();
834
0
        updateGas();
835
0
        compileReturnIndirectCallOp(Instr.getSourceIndex(),
836
0
                                    Instr.getTargetIndex());
837
0
        setUnreachable();
838
0
        Builder.positionAtEnd(
839
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "ret_call_indir.end"));
840
0
        break;
841
0
      case OpCode::Call_ref:
842
0
        updateInstrCount();
843
0
        updateGas();
844
0
        compileCallRefOp(Instr.getTargetIndex());
845
0
        break;
846
0
      case OpCode::Return_call_ref:
847
0
        updateInstrCount();
848
0
        updateGas();
849
0
        compileReturnCallRefOp(Instr.getTargetIndex());
850
0
        setUnreachable();
851
0
        Builder.positionAtEnd(
852
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "ret_call_ref.end"));
853
0
        break;
854
        // LEGACY-EH: remove the `Catch` cases after deprecating legacy EH.
855
        // case OpCode::Catch:
856
        // case OpCode::Catch_all:
857
        // case OpCode::Try_table:
858
859
      // Reference Instructions
860
1.08k
      case OpCode::Ref__null: {
861
1.08k
        std::array<uint8_t, 16> Buf = {0};
862
        // For null references, the dynamic type down scaling is needed.
863
1.08k
        ValType VType;
864
1.08k
        if (Instr.getValType().isAbsHeapType()) {
865
1.08k
          switch (Instr.getValType().getHeapTypeCode()) {
866
0
          case TypeCode::NullFuncRef:
867
482
          case TypeCode::FuncRef:
868
482
            VType = TypeCode::NullFuncRef;
869
482
            break;
870
0
          case TypeCode::NullExternRef:
871
601
          case TypeCode::ExternRef:
872
601
            VType = TypeCode::NullExternRef;
873
601
            break;
874
0
          case TypeCode::NullRef:
875
0
          case TypeCode::AnyRef:
876
0
          case TypeCode::EqRef:
877
0
          case TypeCode::I31Ref:
878
0
          case TypeCode::StructRef:
879
0
          case TypeCode::ArrayRef:
880
0
            VType = TypeCode::NullRef;
881
0
            break;
882
0
          default:
883
0
            assumingUnreachable();
884
1.08k
          }
885
1.08k
        } else {
886
0
          assuming(Instr.getValType().getTypeIndex() <
887
0
                   Context.CompositeTypes.size());
888
0
          const auto *CompType =
889
0
              Context.CompositeTypes[Instr.getValType().getTypeIndex()];
890
0
          assuming(CompType != nullptr);
891
0
          if (CompType->isFunc()) {
892
0
            VType = TypeCode::NullFuncRef;
893
0
          } else {
894
0
            VType = TypeCode::NullRef;
895
0
          }
896
0
        }
897
1.08k
        std::copy_n(VType.getRawData().cbegin(), 8, Buf.begin());
898
1.08k
        stackPush(Builder.createBitCast(
899
1.08k
            LLVM::Value::getConstVector8(LLContext, Buf), Context.Int64x2Ty));
900
1.08k
        break;
901
1.08k
      }
902
561
      case OpCode::Ref__is_null:
903
561
        stackPush(Builder.createZExt(
904
561
            Builder.createICmpEQ(
905
561
                Builder.createExtractElement(
906
561
                    Builder.createBitCast(stackPop(), Context.Int64x2Ty),
907
561
                    LLContext.getInt64(1)),
908
561
                LLContext.getInt64(0)),
909
561
            Context.Int32Ty));
910
561
        break;
911
27
      case OpCode::Ref__func:
912
27
        stackPush(Builder.createCall(
913
27
            Context.getIntrinsic(Builder, Executable::Intrinsics::kRefFunc,
914
27
                                 LLVM::Type::getFunctionType(Context.Int64x2Ty,
915
27
                                                             {Context.Int32Ty},
916
27
                                                             false)),
917
27
            {LLContext.getInt32(Instr.getTargetIndex())}));
918
27
        break;
919
0
      case OpCode::Ref__eq: {
920
0
        LLVM::Value RHS = stackPop();
921
0
        LLVM::Value LHS = stackPop();
922
0
        stackPush(Builder.createZExt(
923
0
            Builder.createICmpEQ(
924
0
                Builder.createExtractElement(LHS, LLContext.getInt64(1)),
925
0
                Builder.createExtractElement(RHS, LLContext.getInt64(1))),
926
0
            Context.Int32Ty));
927
0
        break;
928
1.08k
      }
929
0
      case OpCode::Ref__as_non_null: {
930
0
        auto Next =
931
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "ref_as_non_null.ok");
932
0
        Stack.back() = Builder.createBitCast(Stack.back(), Context.Int64x2Ty);
933
0
        auto IsNotNull = Builder.createLikely(Builder.createICmpNE(
934
0
            Builder.createExtractElement(Stack.back(), LLContext.getInt64(1)),
935
0
            LLContext.getInt64(0)));
936
0
        Builder.createCondBr(IsNotNull, Next,
937
0
                             getTrapBB(ErrCode::Value::CastNullToNonNull));
938
0
        Builder.positionAtEnd(Next);
939
0
        break;
940
1.08k
      }
941
942
      // Reference Instructions (GC proposal)
943
0
      case OpCode::Struct__new:
944
0
      case OpCode::Struct__new_default: {
945
0
        LLVM::Value Args = LLVM::Value::getConstPointerNull(Context.Int8PtrTy);
946
0
        assuming(Instr.getTargetIndex() < Context.CompositeTypes.size());
947
0
        const auto *CompType = Context.CompositeTypes[Instr.getTargetIndex()];
948
0
        assuming(CompType != nullptr && !CompType->isFunc());
949
0
        auto ArgSize = CompType->getFieldTypes().size();
950
0
        if (Instr.getOpCode() == OpCode::Struct__new) {
951
0
          std::vector<LLVM::Value> ArgsVec(ArgSize, nullptr);
952
0
          for (size_t I = 0; I < ArgSize; ++I) {
953
0
            ArgsVec[ArgSize - I - 1] = stackPop();
954
0
          }
955
0
          Args = Builder.createArray(ArgSize, kValSize);
956
0
          Builder.createArrayPtrStore(ArgsVec, Args, Context.Int8Ty, kValSize);
957
0
        } else {
958
0
          ArgSize = 0;
959
0
        }
960
0
        stackPush(Builder.createCall(
961
0
            Context.getIntrinsic(
962
0
                Builder, Executable::Intrinsics::kStructNew,
963
0
                LLVM::Type::getFunctionType(
964
0
                    Context.Int64x2Ty,
965
0
                    {Context.Int32Ty, Context.Int8PtrTy, Context.Int32Ty},
966
0
                    false)),
967
0
            {LLContext.getInt32(Instr.getTargetIndex()), Args,
968
0
             LLContext.getInt32(static_cast<uint32_t>(ArgSize))}));
969
0
        break;
970
0
      }
971
0
      case OpCode::Struct__get:
972
0
      case OpCode::Struct__get_u:
973
0
      case OpCode::Struct__get_s: {
974
0
        assuming(static_cast<size_t>(Instr.getTargetIndex()) <
975
0
                 Context.CompositeTypes.size());
976
0
        const auto *CompType = Context.CompositeTypes[Instr.getTargetIndex()];
977
0
        assuming(CompType != nullptr && !CompType->isFunc());
978
0
        assuming(static_cast<size_t>(Instr.getSourceIndex()) <
979
0
                 CompType->getFieldTypes().size());
980
0
        const auto &StorageType =
981
0
            CompType->getFieldTypes()[Instr.getSourceIndex()].getStorageType();
982
0
        auto Ref = stackPop();
983
0
        auto IsSigned = (Instr.getOpCode() == OpCode::Struct__get_s)
984
0
                            ? LLContext.getInt8(1)
985
0
                            : LLContext.getInt8(0);
986
0
        LLVM::Value Ret = Builder.createAlloca(Context.Int64x2Ty);
987
0
        Builder.createCall(
988
0
            Context.getIntrinsic(
989
0
                Builder, Executable::Intrinsics::kStructGet,
990
0
                LLVM::Type::getFunctionType(Context.VoidTy,
991
0
                                            {Context.Int64x2Ty, Context.Int32Ty,
992
0
                                             Context.Int32Ty, Context.Int8Ty,
993
0
                                             Context.Int8PtrTy},
994
0
                                            false)),
995
0
            {Ref, LLContext.getInt32(Instr.getTargetIndex()),
996
0
             LLContext.getInt32(Instr.getSourceIndex()), IsSigned, Ret});
997
998
0
        switch (StorageType.getCode()) {
999
0
        case TypeCode::I8:
1000
0
        case TypeCode::I16:
1001
0
        case TypeCode::I32: {
1002
0
          stackPush(Builder.createValuePtrLoad(Context.Int32Ty, Ret,
1003
0
                                               Context.Int64x2Ty));
1004
0
          break;
1005
0
        }
1006
0
        case TypeCode::I64: {
1007
0
          stackPush(Builder.createValuePtrLoad(Context.Int64Ty, Ret,
1008
0
                                               Context.Int64x2Ty));
1009
0
          break;
1010
0
        }
1011
0
        case TypeCode::F32: {
1012
0
          stackPush(Builder.createValuePtrLoad(Context.FloatTy, Ret,
1013
0
                                               Context.Int64x2Ty));
1014
0
          break;
1015
0
        }
1016
0
        case TypeCode::F64: {
1017
0
          stackPush(Builder.createValuePtrLoad(Context.DoubleTy, Ret,
1018
0
                                               Context.Int64x2Ty));
1019
0
          break;
1020
0
        }
1021
0
        case TypeCode::V128:
1022
0
        case TypeCode::Ref:
1023
0
        case TypeCode::RefNull: {
1024
0
          stackPush(Builder.createValuePtrLoad(Context.Int64x2Ty, Ret,
1025
0
                                               Context.Int64x2Ty));
1026
0
          break;
1027
0
        }
1028
0
        default:
1029
0
          assumingUnreachable();
1030
0
        }
1031
0
        break;
1032
0
      }
1033
0
      case OpCode::Struct__set: {
1034
0
        auto Val = stackPop();
1035
0
        auto Ref = stackPop();
1036
0
        LLVM::Value Arg = Builder.createAlloca(Context.Int64x2Ty);
1037
0
        Builder.createValuePtrStore(Val, Arg, Context.Int64x2Ty);
1038
0
        Builder.createCall(
1039
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kStructSet,
1040
0
                                 LLVM::Type::getFunctionType(
1041
0
                                     Context.VoidTy,
1042
0
                                     {Context.Int64x2Ty, Context.Int32Ty,
1043
0
                                      Context.Int32Ty, Context.Int8PtrTy},
1044
0
                                     false)),
1045
0
            {Ref, LLContext.getInt32(Instr.getTargetIndex()),
1046
0
             LLContext.getInt32(Instr.getSourceIndex()), Arg});
1047
0
        break;
1048
0
      }
1049
0
      case OpCode::Array__new: {
1050
0
        auto Length = stackPop();
1051
0
        auto Val = stackPop();
1052
0
        LLVM::Value Arg = Builder.createAlloca(Context.Int64x2Ty);
1053
0
        Builder.createValuePtrStore(Val, Arg, Context.Int64x2Ty);
1054
0
        stackPush(Builder.createCall(
1055
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kArrayNew,
1056
0
                                 LLVM::Type::getFunctionType(
1057
0
                                     Context.Int64x2Ty,
1058
0
                                     {Context.Int32Ty, Context.Int32Ty,
1059
0
                                      Context.Int8PtrTy, Context.Int32Ty},
1060
0
                                     false)),
1061
0
            {LLContext.getInt32(Instr.getTargetIndex()), Length, Arg,
1062
0
             LLContext.getInt32(1)}));
1063
0
        break;
1064
0
      }
1065
0
      case OpCode::Array__new_default: {
1066
0
        auto Length = stackPop();
1067
0
        LLVM::Value Arg = LLVM::Value::getConstPointerNull(Context.Int8PtrTy);
1068
0
        stackPush(Builder.createCall(
1069
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kArrayNew,
1070
0
                                 LLVM::Type::getFunctionType(
1071
0
                                     Context.Int64x2Ty,
1072
0
                                     {Context.Int32Ty, Context.Int32Ty,
1073
0
                                      Context.Int8PtrTy, Context.Int32Ty},
1074
0
                                     false)),
1075
0
            {LLContext.getInt32(Instr.getTargetIndex()), Length, Arg,
1076
0
             LLContext.getInt32(0)}));
1077
0
        break;
1078
0
      }
1079
0
      case OpCode::Array__new_fixed: {
1080
0
        const auto ArgSize = Instr.getSourceIndex();
1081
0
        std::vector<LLVM::Value> ArgsVec(ArgSize, nullptr);
1082
0
        for (size_t I = 0; I < ArgSize; ++I) {
1083
0
          ArgsVec[ArgSize - I - 1] = stackPop();
1084
0
        }
1085
0
        LLVM::Value Args = Builder.createArray(ArgSize, kValSize);
1086
0
        Builder.createArrayPtrStore(ArgsVec, Args, Context.Int8Ty, kValSize);
1087
0
        stackPush(Builder.createCall(
1088
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kArrayNew,
1089
0
                                 LLVM::Type::getFunctionType(
1090
0
                                     Context.Int64x2Ty,
1091
0
                                     {Context.Int32Ty, Context.Int32Ty,
1092
0
                                      Context.Int8PtrTy, Context.Int32Ty},
1093
0
                                     false)),
1094
0
            {LLContext.getInt32(Instr.getTargetIndex()),
1095
0
             LLContext.getInt32(ArgSize), Args, LLContext.getInt32(ArgSize)}));
1096
0
        break;
1097
0
      }
1098
0
      case OpCode::Array__new_data:
1099
0
      case OpCode::Array__new_elem: {
1100
0
        auto Length = stackPop();
1101
0
        auto Start = stackPop();
1102
0
        stackPush(Builder.createCall(
1103
0
            Context.getIntrinsic(
1104
0
                Builder,
1105
0
                ((Instr.getOpCode() == OpCode::Array__new_data)
1106
0
                     ? Executable::Intrinsics::kArrayNewData
1107
0
                     : Executable::Intrinsics::kArrayNewElem),
1108
0
                LLVM::Type::getFunctionType(Context.Int64x2Ty,
1109
0
                                            {Context.Int32Ty, Context.Int32Ty,
1110
0
                                             Context.Int32Ty, Context.Int32Ty},
1111
0
                                            false)),
1112
0
            {LLContext.getInt32(Instr.getTargetIndex()),
1113
0
             LLContext.getInt32(Instr.getSourceIndex()), Start, Length}));
1114
0
        break;
1115
0
      }
1116
0
      case OpCode::Array__get:
1117
0
      case OpCode::Array__get_u:
1118
0
      case OpCode::Array__get_s: {
1119
0
        assuming(static_cast<size_t>(Instr.getTargetIndex()) <
1120
0
                 Context.CompositeTypes.size());
1121
0
        const auto *CompType = Context.CompositeTypes[Instr.getTargetIndex()];
1122
0
        assuming(CompType != nullptr && !CompType->isFunc());
1123
0
        assuming(static_cast<size_t>(1) == CompType->getFieldTypes().size());
1124
0
        const auto &StorageType = CompType->getFieldTypes()[0].getStorageType();
1125
0
        auto Idx = stackPop();
1126
0
        auto Ref = stackPop();
1127
0
        auto IsSigned = (Instr.getOpCode() == OpCode::Array__get_s)
1128
0
                            ? LLContext.getInt8(1)
1129
0
                            : LLContext.getInt8(0);
1130
0
        LLVM::Value Ret = Builder.createAlloca(Context.Int64x2Ty);
1131
0
        Builder.createCall(
1132
0
            Context.getIntrinsic(
1133
0
                Builder, Executable::Intrinsics::kArrayGet,
1134
0
                LLVM::Type::getFunctionType(Context.VoidTy,
1135
0
                                            {Context.Int64x2Ty, Context.Int32Ty,
1136
0
                                             Context.Int32Ty, Context.Int8Ty,
1137
0
                                             Context.Int8PtrTy},
1138
0
                                            false)),
1139
0
            {Ref, LLContext.getInt32(Instr.getTargetIndex()), Idx, IsSigned,
1140
0
             Ret});
1141
1142
0
        switch (StorageType.getCode()) {
1143
0
        case TypeCode::I8:
1144
0
        case TypeCode::I16:
1145
0
        case TypeCode::I32: {
1146
0
          stackPush(Builder.createValuePtrLoad(Context.Int32Ty, Ret,
1147
0
                                               Context.Int64x2Ty));
1148
0
          break;
1149
0
        }
1150
0
        case TypeCode::I64: {
1151
0
          stackPush(Builder.createValuePtrLoad(Context.Int64Ty, Ret,
1152
0
                                               Context.Int64x2Ty));
1153
0
          break;
1154
0
        }
1155
0
        case TypeCode::F32: {
1156
0
          stackPush(Builder.createValuePtrLoad(Context.FloatTy, Ret,
1157
0
                                               Context.Int64x2Ty));
1158
0
          break;
1159
0
        }
1160
0
        case TypeCode::F64: {
1161
0
          stackPush(Builder.createValuePtrLoad(Context.DoubleTy, Ret,
1162
0
                                               Context.Int64x2Ty));
1163
0
          break;
1164
0
        }
1165
0
        case TypeCode::V128:
1166
0
        case TypeCode::Ref:
1167
0
        case TypeCode::RefNull: {
1168
0
          stackPush(Builder.createValuePtrLoad(Context.Int64x2Ty, Ret,
1169
0
                                               Context.Int64x2Ty));
1170
0
          break;
1171
0
        }
1172
0
        default:
1173
0
          assumingUnreachable();
1174
0
        }
1175
0
        break;
1176
0
      }
1177
0
      case OpCode::Array__set: {
1178
0
        auto Val = stackPop();
1179
0
        auto Idx = stackPop();
1180
0
        auto Ref = stackPop();
1181
0
        LLVM::Value Arg = Builder.createAlloca(Context.Int64x2Ty);
1182
0
        Builder.createValuePtrStore(Val, Arg, Context.Int64x2Ty);
1183
0
        Builder.createCall(
1184
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kArraySet,
1185
0
                                 LLVM::Type::getFunctionType(
1186
0
                                     Context.VoidTy,
1187
0
                                     {Context.Int64x2Ty, Context.Int32Ty,
1188
0
                                      Context.Int32Ty, Context.Int8PtrTy},
1189
0
                                     false)),
1190
0
            {Ref, LLContext.getInt32(Instr.getTargetIndex()), Idx, Arg});
1191
0
        break;
1192
0
      }
1193
0
      case OpCode::Array__len: {
1194
0
        auto Ref = stackPop();
1195
0
        stackPush(Builder.createCall(
1196
0
            Context.getIntrinsic(
1197
0
                Builder, Executable::Intrinsics::kArrayLen,
1198
0
                LLVM::Type::getFunctionType(Context.Int32Ty,
1199
0
                                            {Context.Int64x2Ty}, false)),
1200
0
            {Ref}));
1201
0
        break;
1202
0
      }
1203
0
      case OpCode::Array__fill: {
1204
0
        auto Cnt = stackPop();
1205
0
        auto Val = stackPop();
1206
0
        auto Off = stackPop();
1207
0
        auto Ref = stackPop();
1208
0
        LLVM::Value Arg = Builder.createAlloca(Context.Int64x2Ty);
1209
0
        Builder.createValuePtrStore(Val, Arg, Context.Int64x2Ty);
1210
0
        Builder.createCall(
1211
0
            Context.getIntrinsic(
1212
0
                Builder, Executable::Intrinsics::kArrayFill,
1213
0
                LLVM::Type::getFunctionType(Context.VoidTy,
1214
0
                                            {Context.Int64x2Ty, Context.Int32Ty,
1215
0
                                             Context.Int32Ty, Context.Int32Ty,
1216
0
                                             Context.Int8PtrTy},
1217
0
                                            false)),
1218
0
            {Ref, LLContext.getInt32(Instr.getTargetIndex()), Off, Cnt, Arg});
1219
0
        break;
1220
0
      }
1221
0
      case OpCode::Array__copy: {
1222
0
        auto Cnt = stackPop();
1223
0
        auto SrcOff = stackPop();
1224
0
        auto SrcRef = stackPop();
1225
0
        auto DstOff = stackPop();
1226
0
        auto DstRef = stackPop();
1227
0
        Builder.createCall(
1228
0
            Context.getIntrinsic(
1229
0
                Builder, Executable::Intrinsics::kArrayCopy,
1230
0
                LLVM::Type::getFunctionType(Context.VoidTy,
1231
0
                                            {Context.Int64x2Ty, Context.Int32Ty,
1232
0
                                             Context.Int32Ty, Context.Int64x2Ty,
1233
0
                                             Context.Int32Ty, Context.Int32Ty,
1234
0
                                             Context.Int32Ty},
1235
0
                                            false)),
1236
0
            {DstRef, LLContext.getInt32(Instr.getTargetIndex()), DstOff, SrcRef,
1237
0
             LLContext.getInt32(Instr.getSourceIndex()), SrcOff, Cnt});
1238
0
        break;
1239
0
      }
1240
0
      case OpCode::Array__init_data:
1241
0
      case OpCode::Array__init_elem: {
1242
0
        auto Cnt = stackPop();
1243
0
        auto SrcOff = stackPop();
1244
0
        auto DstOff = stackPop();
1245
0
        auto Ref = stackPop();
1246
0
        Builder.createCall(
1247
0
            Context.getIntrinsic(
1248
0
                Builder,
1249
0
                ((Instr.getOpCode() == OpCode::Array__init_data)
1250
0
                     ? Executable::Intrinsics::kArrayInitData
1251
0
                     : Executable::Intrinsics::kArrayInitElem),
1252
0
                LLVM::Type::getFunctionType(Context.VoidTy,
1253
0
                                            {Context.Int64x2Ty, Context.Int32Ty,
1254
0
                                             Context.Int32Ty, Context.Int32Ty,
1255
0
                                             Context.Int32Ty, Context.Int32Ty},
1256
0
                                            false)),
1257
0
            {Ref, LLContext.getInt32(Instr.getTargetIndex()),
1258
0
             LLContext.getInt32(Instr.getSourceIndex()), DstOff, SrcOff, Cnt});
1259
0
        break;
1260
0
      }
1261
0
      case OpCode::Ref__test:
1262
0
      case OpCode::Ref__test_null: {
1263
0
        auto Ref = stackPop();
1264
0
        std::array<uint8_t, 16> Buf = {0};
1265
0
        std::copy_n(Instr.getValType().getRawData().cbegin(), 8, Buf.begin());
1266
0
        auto VType = Builder.createExtractElement(
1267
0
            Builder.createBitCast(LLVM::Value::getConstVector8(LLContext, Buf),
1268
0
                                  Context.Int64x2Ty),
1269
0
            LLContext.getInt64(0));
1270
0
        stackPush(Builder.createCall(
1271
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kRefTest,
1272
0
                                 LLVM::Type::getFunctionType(
1273
0
                                     Context.Int32Ty,
1274
0
                                     {Context.Int64x2Ty, Context.Int64Ty},
1275
0
                                     false)),
1276
0
            {Ref, VType}));
1277
0
        break;
1278
0
      }
1279
0
      case OpCode::Ref__cast:
1280
0
      case OpCode::Ref__cast_null: {
1281
0
        auto Ref = stackPop();
1282
0
        std::array<uint8_t, 16> Buf = {0};
1283
0
        std::copy_n(Instr.getValType().getRawData().cbegin(), 8, Buf.begin());
1284
0
        auto VType = Builder.createExtractElement(
1285
0
            Builder.createBitCast(LLVM::Value::getConstVector8(LLContext, Buf),
1286
0
                                  Context.Int64x2Ty),
1287
0
            LLContext.getInt64(0));
1288
0
        stackPush(Builder.createCall(
1289
0
            Context.getIntrinsic(Builder, Executable::Intrinsics::kRefCast,
1290
0
                                 LLVM::Type::getFunctionType(
1291
0
                                     Context.Int64x2Ty,
1292
0
                                     {Context.Int64x2Ty, Context.Int64Ty},
1293
0
                                     false)),
1294
0
            {Ref, VType}));
1295
0
        break;
1296
0
      }
1297
0
      case OpCode::Any__convert_extern: {
1298
0
        std::array<uint8_t, 16> RawRef = {0};
1299
0
        auto Ref = stackPop();
1300
0
        auto PtrVal = Builder.createExtractElement(Ref, LLContext.getInt64(1));
1301
0
        auto IsNullBB =
1302
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "any_conv_extern.null");
1303
0
        auto NotNullBB = LLVM::BasicBlock::create(LLContext, F.Fn,
1304
0
                                                  "any_conv_extern.not_null");
1305
0
        auto IsExtrefBB = LLVM::BasicBlock::create(LLContext, F.Fn,
1306
0
                                                   "any_conv_extern.is_extref");
1307
0
        auto EndBB =
1308
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "any_conv_extern.end");
1309
0
        auto CondIsNull = Builder.createICmpEQ(PtrVal, LLContext.getInt64(0));
1310
0
        Builder.createCondBr(CondIsNull, IsNullBB, NotNullBB);
1311
1312
0
        Builder.positionAtEnd(IsNullBB);
1313
0
        auto VT = ValType(TypeCode::RefNull, TypeCode::NullRef);
1314
0
        std::copy_n(VT.getRawData().cbegin(), 8, RawRef.begin());
1315
0
        auto Ret1 = Builder.createBitCast(
1316
0
            LLVM::Value::getConstVector8(LLContext, RawRef), Context.Int64x2Ty);
1317
0
        Builder.createBr(EndBB);
1318
1319
0
        Builder.positionAtEnd(NotNullBB);
1320
0
        auto Ret2 = Builder.createBitCast(
1321
0
            Builder.createInsertElement(
1322
0
                Builder.createBitCast(Ref, Context.Int8x16Ty),
1323
0
                LLContext.getInt8(0), LLContext.getInt64(1)),
1324
0
            Context.Int64x2Ty);
1325
0
        auto HType = Builder.createExtractElement(
1326
0
            Builder.createBitCast(Ret2, Context.Int8x16Ty),
1327
0
            LLContext.getInt64(3));
1328
0
        auto CondIsExtref = Builder.createOr(
1329
0
            Builder.createICmpEQ(HType, LLContext.getInt8(static_cast<uint8_t>(
1330
0
                                            TypeCode::ExternRef))),
1331
0
            Builder.createICmpEQ(HType, LLContext.getInt8(static_cast<uint8_t>(
1332
0
                                            TypeCode::NullExternRef))));
1333
0
        Builder.createCondBr(CondIsExtref, IsExtrefBB, EndBB);
1334
1335
0
        Builder.positionAtEnd(IsExtrefBB);
1336
0
        VT = ValType(TypeCode::Ref, TypeCode::AnyRef);
1337
0
        std::copy_n(VT.getRawData().cbegin(), 8, RawRef.begin());
1338
0
        auto Ret3 = Builder.createInsertElement(
1339
0
            Builder.createBitCast(
1340
0
                LLVM::Value::getConstVector8(LLContext, RawRef),
1341
0
                Context.Int64x2Ty),
1342
0
            PtrVal, LLContext.getInt64(1));
1343
0
        Builder.createBr(EndBB);
1344
1345
0
        Builder.positionAtEnd(EndBB);
1346
0
        auto Ret = Builder.createPHI(Context.Int64x2Ty);
1347
0
        Ret.addIncoming(Ret1, IsNullBB);
1348
0
        Ret.addIncoming(Ret2, NotNullBB);
1349
0
        Ret.addIncoming(Ret3, IsExtrefBB);
1350
0
        stackPush(Ret);
1351
0
        break;
1352
0
      }
1353
0
      case OpCode::Extern__convert_any: {
1354
0
        std::array<uint8_t, 16> RawRef = {0};
1355
0
        auto Ref = stackPop();
1356
0
        auto IsNullBB =
1357
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "extern_conv_any.null");
1358
0
        auto NotNullBB = LLVM::BasicBlock::create(LLContext, F.Fn,
1359
0
                                                  "extern_conv_any.not_null");
1360
0
        auto EndBB =
1361
0
            LLVM::BasicBlock::create(LLContext, F.Fn, "extern_conv_any.end");
1362
0
        auto CondIsNull = Builder.createICmpEQ(
1363
0
            Builder.createExtractElement(Ref, LLContext.getInt64(1)),
1364
0
            LLContext.getInt64(0));
1365
0
        Builder.createCondBr(CondIsNull, IsNullBB, NotNullBB);
1366
1367
0
        Builder.positionAtEnd(IsNullBB);
1368
0
        auto VT = ValType(TypeCode::RefNull, TypeCode::NullExternRef);
1369
0
        std::copy_n(VT.getRawData().cbegin(), 8, RawRef.begin());
1370
0
        auto Ret1 = Builder.createBitCast(
1371
0
            LLVM::Value::getConstVector8(LLContext, RawRef), Context.Int64x2Ty);
1372
0
        Builder.createBr(EndBB);
1373
1374
0
        Builder.positionAtEnd(NotNullBB);
1375
0
        auto Ret2 = Builder.createBitCast(
1376
0
            Builder.createInsertElement(
1377
0
                Builder.createBitCast(Ref, Context.Int8x16Ty),
1378
0
                LLContext.getInt8(1), LLContext.getInt64(1)),
1379
0
            Context.Int64x2Ty);
1380
0
        Builder.createBr(EndBB);
1381
1382
0
        Builder.positionAtEnd(EndBB);
1383
0
        auto Ret = Builder.createPHI(Context.Int64x2Ty);
1384
0
        Ret.addIncoming(Ret1, IsNullBB);
1385
0
        Ret.addIncoming(Ret2, NotNullBB);
1386
0
        stackPush(Ret);
1387
0
        break;
1388
0
      }
1389
0
      case OpCode::Ref__i31: {
1390
0
        std::array<uint8_t, 16> RawRef = {0};
1391
0
        auto VT = ValType(TypeCode::Ref, TypeCode::I31Ref);
1392
0
        std::copy_n(VT.getRawData().cbegin(), 8, RawRef.begin());
1393
0
        auto Ref = Builder.createBitCast(
1394
0
            LLVM::Value::getConstVector8(LLContext, RawRef), Context.Int64x2Ty);
1395
0
        auto Val = Builder.createZExt(
1396
0
            Builder.createOr(
1397
0
                Builder.createAnd(stackPop(), LLContext.getInt32(0x7FFFFFFFU)),
1398
0
                LLContext.getInt32(0x80000000U)),
1399
0
            Context.Int64Ty);
1400
0
        stackPush(Builder.createInsertElement(Ref, Val, LLContext.getInt64(1)));
1401
0
        break;
1402
0
      }
1403
0
      case OpCode::I31__get_s: {
1404
0
        auto Next = LLVM::BasicBlock::create(LLContext, F.Fn, "i31.get.ok");
1405
0
        auto Ref = Builder.createBitCast(stackPop(), Context.Int64x2Ty);
1406
0
        auto Val = Builder.createTrunc(
1407
0
            Builder.createExtractElement(Ref, LLContext.getInt64(1)),
1408
0
            Context.Int32Ty);
1409
0
        auto IsNotNull = Builder.createLikely(Builder.createICmpNE(
1410
0
            Builder.createAnd(Val, LLContext.getInt32(0x80000000U)),
1411
0
            LLContext.getInt32(0)));
1412
0
        Builder.createCondBr(IsNotNull, Next,
1413
0
                             getTrapBB(ErrCode::Value::AccessNullI31));
1414
0
        Builder.positionAtEnd(Next);
1415
0
        Val = Builder.createAnd(Val, LLContext.getInt32(0x7FFFFFFFU));
1416
0
        stackPush(Builder.createOr(
1417
0
            Val, Builder.createShl(
1418
0
                     Builder.createAnd(Val, LLContext.getInt32(0x40000000U)),
1419
0
                     LLContext.getInt32(1))));
1420
0
        break;
1421
0
      }
1422
0
      case OpCode::I31__get_u: {
1423
0
        auto Next = LLVM::BasicBlock::create(LLContext, F.Fn, "i31.get.ok");
1424
0
        auto Ref = Builder.createBitCast(stackPop(), Context.Int64x2Ty);
1425
0
        auto Val = Builder.createTrunc(
1426
0
            Builder.createExtractElement(Ref, LLContext.getInt64(1)),
1427
0
            Context.Int32Ty);
1428
0
        auto IsNotNull = Builder.createLikely(Builder.createICmpNE(
1429
0
            Builder.createAnd(Val, LLContext.getInt32(0x80000000U)),
1430
0
            LLContext.getInt32(0)));
1431
0
        Builder.createCondBr(IsNotNull, Next,
1432
0
                             getTrapBB(ErrCode::Value::AccessNullI31));
1433
0
        Builder.positionAtEnd(Next);
1434
0
        stackPush(Builder.createAnd(Val, LLContext.getInt32(0x7FFFFFFFU)));
1435
0
        break;
1436
0
      }
1437
1438
      // Parametric Instructions
1439
3.23k
      case OpCode::Drop:
1440
3.23k
        stackPop();
1441
3.23k
        break;
1442
622
      case OpCode::Select:
1443
1.05k
      case OpCode::Select_t: {
1444
1.05k
        auto Cond = Builder.createICmpNE(stackPop(), LLContext.getInt32(0));
1445
1.05k
        auto False = stackPop();
1446
1.05k
        auto True = stackPop();
1447
1.05k
        stackPush(Builder.createSelect(Cond, True, False));
1448
1.05k
        break;
1449
622
      }
1450
1451
      // Variable Instructions
1452
10.4k
      case OpCode::Local__get: {
1453
10.4k
        const auto &L = Local[Instr.getTargetIndex()];
1454
10.4k
        stackPush(Builder.createLoad(L.first, L.second));
1455
10.4k
        break;
1456
622
      }
1457
3.52k
      case OpCode::Local__set:
1458
3.52k
        Builder.createStore(stackPop(), Local[Instr.getTargetIndex()].second);
1459
3.52k
        break;
1460
771
      case OpCode::Local__tee:
1461
771
        Builder.createStore(Stack.back(), Local[Instr.getTargetIndex()].second);
1462
771
        break;
1463
302
      case OpCode::Global__get: {
1464
302
        const auto G =
1465
302
            Context.getGlobal(Builder, ExecCtx, Instr.getTargetIndex());
1466
302
        stackPush(Builder.createLoad(G.first, G.second));
1467
302
        break;
1468
622
      }
1469
52
      case OpCode::Global__set:
1470
52
        Builder.createStore(
1471
52
            stackPop(),
1472
52
            Context.getGlobal(Builder, ExecCtx, Instr.getTargetIndex()).second);
1473
52
        break;
1474
1475
      // Table Instructions
1476
33
      case OpCode::Table__get: {
1477
33
        auto Idx = stackPop();
1478
33
        stackPush(Builder.createCall(
1479
33
            Context.getIntrinsic(
1480
33
                Builder, Executable::Intrinsics::kTableGet,
1481
33
                LLVM::Type::getFunctionType(Context.Int64x2Ty,
1482
33
                                            {Context.Int32Ty, Context.Int32Ty},
1483
33
                                            false)),
1484
33
            {LLContext.getInt32(Instr.getTargetIndex()), Idx}));
1485
33
        break;
1486
622
      }
1487
26
      case OpCode::Table__set: {
1488
26
        auto Ref = stackPop();
1489
26
        auto Idx = stackPop();
1490
26
        Builder.createCall(
1491
26
            Context.getIntrinsic(
1492
26
                Builder, Executable::Intrinsics::kTableSet,
1493
26
                LLVM::Type::getFunctionType(
1494
26
                    Context.Int64Ty,
1495
26
                    {Context.Int32Ty, Context.Int32Ty, Context.Int64x2Ty},
1496
26
                    false)),
1497
26
            {LLContext.getInt32(Instr.getTargetIndex()), Idx, Ref});
1498
26
        break;
1499
622
      }
1500
27
      case OpCode::Table__init: {
1501
27
        auto Len = stackPop();
1502
27
        auto Src = stackPop();
1503
27
        auto Dst = stackPop();
1504
27
        Builder.createCall(
1505
27
            Context.getIntrinsic(
1506
27
                Builder, Executable::Intrinsics::kTableInit,
1507
27
                LLVM::Type::getFunctionType(Context.VoidTy,
1508
27
                                            {Context.Int32Ty, Context.Int32Ty,
1509
27
                                             Context.Int32Ty, Context.Int32Ty,
1510
27
                                             Context.Int32Ty},
1511
27
                                            false)),
1512
27
            {LLContext.getInt32(Instr.getTargetIndex()),
1513
27
             LLContext.getInt32(Instr.getSourceIndex()), Dst, Src, Len});
1514
27
        break;
1515
622
      }
1516
33
      case OpCode::Elem__drop: {
1517
33
        Builder.createCall(
1518
33
            Context.getIntrinsic(Builder, Executable::Intrinsics::kElemDrop,
1519
33
                                 LLVM::Type::getFunctionType(
1520
33
                                     Context.VoidTy, {Context.Int32Ty}, false)),
1521
33
            {LLContext.getInt32(Instr.getTargetIndex())});
1522
33
        break;
1523
622
      }
1524
16
      case OpCode::Table__copy: {
1525
16
        auto Len = stackPop();
1526
16
        auto Src = stackPop();
1527
16
        auto Dst = stackPop();
1528
16
        Builder.createCall(
1529
16
            Context.getIntrinsic(
1530
16
                Builder, Executable::Intrinsics::kTableCopy,
1531
16
                LLVM::Type::getFunctionType(Context.VoidTy,
1532
16
                                            {Context.Int32Ty, Context.Int32Ty,
1533
16
                                             Context.Int32Ty, Context.Int32Ty,
1534
16
                                             Context.Int32Ty},
1535
16
                                            false)),
1536
16
            {LLContext.getInt32(Instr.getTargetIndex()),
1537
16
             LLContext.getInt32(Instr.getSourceIndex()), Dst, Src, Len});
1538
16
        break;
1539
622
      }
1540
18
      case OpCode::Table__grow: {
1541
18
        auto NewSize = stackPop();
1542
18
        auto Val = stackPop();
1543
18
        stackPush(Builder.createCall(
1544
18
            Context.getIntrinsic(
1545
18
                Builder, Executable::Intrinsics::kTableGrow,
1546
18
                LLVM::Type::getFunctionType(
1547
18
                    Context.Int32Ty,
1548
18
                    {Context.Int32Ty, Context.Int64x2Ty, Context.Int32Ty},
1549
18
                    false)),
1550
18
            {LLContext.getInt32(Instr.getTargetIndex()), Val, NewSize}));
1551
18
        break;
1552
622
      }
1553
21
      case OpCode::Table__size: {
1554
21
        stackPush(Builder.createCall(
1555
21
            Context.getIntrinsic(Builder, Executable::Intrinsics::kTableSize,
1556
21
                                 LLVM::Type::getFunctionType(Context.Int32Ty,
1557
21
                                                             {Context.Int32Ty},
1558
21
                                                             false)),
1559
21
            {LLContext.getInt32(Instr.getTargetIndex())}));
1560
21
        break;
1561
622
      }
1562
3
      case OpCode::Table__fill: {
1563
3
        auto Len = stackPop();
1564
3
        auto Val = stackPop();
1565
3
        auto Off = stackPop();
1566
3
        Builder.createCall(
1567
3
            Context.getIntrinsic(Builder, Executable::Intrinsics::kTableFill,
1568
3
                                 LLVM::Type::getFunctionType(
1569
3
                                     Context.Int32Ty,
1570
3
                                     {Context.Int32Ty, Context.Int32Ty,
1571
3
                                      Context.Int64x2Ty, Context.Int32Ty},
1572
3
                                     false)),
1573
3
            {LLContext.getInt32(Instr.getTargetIndex()), Off, Val, Len});
1574
3
        break;
1575
622
      }
1576
1577
      // Memory Instructions
1578
1.05k
      case OpCode::I32__load:
1579
1.05k
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1580
1.05k
                      Instr.getMemoryAlign(), Context.Int32Ty);
1581
1.05k
        break;
1582
2.87k
      case OpCode::I64__load:
1583
2.87k
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1584
2.87k
                      Instr.getMemoryAlign(), Context.Int64Ty);
1585
2.87k
        break;
1586
98
      case OpCode::F32__load:
1587
98
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1588
98
                      Instr.getMemoryAlign(), Context.FloatTy);
1589
98
        break;
1590
223
      case OpCode::F64__load:
1591
223
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1592
223
                      Instr.getMemoryAlign(), Context.DoubleTy);
1593
223
        break;
1594
457
      case OpCode::I32__load8_s:
1595
457
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1596
457
                      Instr.getMemoryAlign(), Context.Int8Ty, Context.Int32Ty,
1597
457
                      true);
1598
457
        break;
1599
171
      case OpCode::I32__load8_u:
1600
171
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1601
171
                      Instr.getMemoryAlign(), Context.Int8Ty, Context.Int32Ty,
1602
171
                      false);
1603
171
        break;
1604
321
      case OpCode::I32__load16_s:
1605
321
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1606
321
                      Instr.getMemoryAlign(), Context.Int16Ty, Context.Int32Ty,
1607
321
                      true);
1608
321
        break;
1609
1.55k
      case OpCode::I32__load16_u:
1610
1.55k
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1611
1.55k
                      Instr.getMemoryAlign(), Context.Int16Ty, Context.Int32Ty,
1612
1.55k
                      false);
1613
1.55k
        break;
1614
646
      case OpCode::I64__load8_s:
1615
646
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1616
646
                      Instr.getMemoryAlign(), Context.Int8Ty, Context.Int64Ty,
1617
646
                      true);
1618
646
        break;
1619
412
      case OpCode::I64__load8_u:
1620
412
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1621
412
                      Instr.getMemoryAlign(), Context.Int8Ty, Context.Int64Ty,
1622
412
                      false);
1623
412
        break;
1624
355
      case OpCode::I64__load16_s:
1625
355
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1626
355
                      Instr.getMemoryAlign(), Context.Int16Ty, Context.Int64Ty,
1627
355
                      true);
1628
355
        break;
1629
560
      case OpCode::I64__load16_u:
1630
560
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1631
560
                      Instr.getMemoryAlign(), Context.Int16Ty, Context.Int64Ty,
1632
560
                      false);
1633
560
        break;
1634
355
      case OpCode::I64__load32_s:
1635
355
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1636
355
                      Instr.getMemoryAlign(), Context.Int32Ty, Context.Int64Ty,
1637
355
                      true);
1638
355
        break;
1639
435
      case OpCode::I64__load32_u:
1640
435
        compileLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1641
435
                      Instr.getMemoryAlign(), Context.Int32Ty, Context.Int64Ty,
1642
435
                      false);
1643
435
        break;
1644
449
      case OpCode::I32__store:
1645
449
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1646
449
                       Instr.getMemoryAlign(), Context.Int32Ty);
1647
449
        break;
1648
1.34k
      case OpCode::I64__store:
1649
1.34k
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1650
1.34k
                       Instr.getMemoryAlign(), Context.Int64Ty);
1651
1.34k
        break;
1652
56
      case OpCode::F32__store:
1653
56
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1654
56
                       Instr.getMemoryAlign(), Context.FloatTy);
1655
56
        break;
1656
41
      case OpCode::F64__store:
1657
41
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1658
41
                       Instr.getMemoryAlign(), Context.DoubleTy);
1659
41
        break;
1660
300
      case OpCode::I32__store8:
1661
317
      case OpCode::I64__store8:
1662
317
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1663
317
                       Instr.getMemoryAlign(), Context.Int8Ty, true);
1664
317
        break;
1665
190
      case OpCode::I32__store16:
1666
240
      case OpCode::I64__store16:
1667
240
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1668
240
                       Instr.getMemoryAlign(), Context.Int16Ty, true);
1669
240
        break;
1670
29
      case OpCode::I64__store32:
1671
29
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
1672
29
                       Instr.getMemoryAlign(), Context.Int32Ty, true);
1673
29
        break;
1674
888
      case OpCode::Memory__size:
1675
888
        stackPush(Builder.createCall(
1676
888
            Context.getIntrinsic(Builder, Executable::Intrinsics::kMemSize,
1677
888
                                 LLVM::Type::getFunctionType(Context.Int32Ty,
1678
888
                                                             {Context.Int32Ty},
1679
888
                                                             false)),
1680
888
            {LLContext.getInt32(Instr.getTargetIndex())}));
1681
888
        break;
1682
487
      case OpCode::Memory__grow: {
1683
487
        auto Diff = stackPop();
1684
487
        stackPush(Builder.createCall(
1685
487
            Context.getIntrinsic(
1686
487
                Builder, Executable::Intrinsics::kMemGrow,
1687
487
                LLVM::Type::getFunctionType(Context.Int32Ty,
1688
487
                                            {Context.Int32Ty, Context.Int32Ty},
1689
487
                                            false)),
1690
487
            {LLContext.getInt32(Instr.getTargetIndex()), Diff}));
1691
487
        break;
1692
190
      }
1693
24
      case OpCode::Memory__init: {
1694
24
        auto Len = stackPop();
1695
24
        auto Src = stackPop();
1696
24
        auto Dst = stackPop();
1697
24
        Builder.createCall(
1698
24
            Context.getIntrinsic(
1699
24
                Builder, Executable::Intrinsics::kMemInit,
1700
24
                LLVM::Type::getFunctionType(Context.VoidTy,
1701
24
                                            {Context.Int32Ty, Context.Int32Ty,
1702
24
                                             Context.Int32Ty, Context.Int32Ty,
1703
24
                                             Context.Int32Ty},
1704
24
                                            false)),
1705
24
            {LLContext.getInt32(Instr.getTargetIndex()),
1706
24
             LLContext.getInt32(Instr.getSourceIndex()), Dst, Src, Len});
1707
24
        break;
1708
190
      }
1709
22
      case OpCode::Data__drop: {
1710
22
        Builder.createCall(
1711
22
            Context.getIntrinsic(Builder, Executable::Intrinsics::kDataDrop,
1712
22
                                 LLVM::Type::getFunctionType(
1713
22
                                     Context.VoidTy, {Context.Int32Ty}, false)),
1714
22
            {LLContext.getInt32(Instr.getTargetIndex())});
1715
22
        break;
1716
190
      }
1717
253
      case OpCode::Memory__copy: {
1718
253
        auto Len = stackPop();
1719
253
        auto Src = stackPop();
1720
253
        auto Dst = stackPop();
1721
253
        Builder.createCall(
1722
253
            Context.getIntrinsic(
1723
253
                Builder, Executable::Intrinsics::kMemCopy,
1724
253
                LLVM::Type::getFunctionType(Context.VoidTy,
1725
253
                                            {Context.Int32Ty, Context.Int32Ty,
1726
253
                                             Context.Int32Ty, Context.Int32Ty,
1727
253
                                             Context.Int32Ty},
1728
253
                                            false)),
1729
253
            {LLContext.getInt32(Instr.getTargetIndex()),
1730
253
             LLContext.getInt32(Instr.getSourceIndex()), Dst, Src, Len});
1731
253
        break;
1732
190
      }
1733
568
      case OpCode::Memory__fill: {
1734
568
        auto Len = stackPop();
1735
568
        auto Val = Builder.createTrunc(stackPop(), Context.Int8Ty);
1736
568
        auto Off = stackPop();
1737
568
        Builder.createCall(
1738
568
            Context.getIntrinsic(
1739
568
                Builder, Executable::Intrinsics::kMemFill,
1740
568
                LLVM::Type::getFunctionType(Context.VoidTy,
1741
568
                                            {Context.Int32Ty, Context.Int32Ty,
1742
568
                                             Context.Int8Ty, Context.Int32Ty},
1743
568
                                            false)),
1744
568
            {LLContext.getInt32(Instr.getTargetIndex()), Off, Val, Len});
1745
568
        break;
1746
190
      }
1747
1748
      // Const Numeric Instructions
1749
504k
      case OpCode::I32__const:
1750
504k
        stackPush(LLContext.getInt32(Instr.getNum().get<uint32_t>()));
1751
504k
        break;
1752
87.3k
      case OpCode::I64__const:
1753
87.3k
        stackPush(LLContext.getInt64(Instr.getNum().get<uint64_t>()));
1754
87.3k
        break;
1755
13.9k
      case OpCode::F32__const:
1756
13.9k
        stackPush(LLContext.getFloat(Instr.getNum().get<float>()));
1757
13.9k
        break;
1758
6.21k
      case OpCode::F64__const:
1759
6.21k
        stackPush(LLContext.getDouble(Instr.getNum().get<double>()));
1760
6.21k
        break;
1761
1762
      // Unary Numeric Instructions
1763
6.73k
      case OpCode::I32__eqz:
1764
6.73k
        stackPush(Builder.createZExt(
1765
6.73k
            Builder.createICmpEQ(stackPop(), LLContext.getInt32(0)),
1766
6.73k
            Context.Int32Ty));
1767
6.73k
        break;
1768
1.28k
      case OpCode::I64__eqz:
1769
1.28k
        stackPush(Builder.createZExt(
1770
1.28k
            Builder.createICmpEQ(stackPop(), LLContext.getInt64(0)),
1771
1.28k
            Context.Int32Ty));
1772
1.28k
        break;
1773
2.06k
      case OpCode::I32__clz:
1774
2.06k
        assuming(LLVM::Core::Ctlz != LLVM::Core::NotIntrinsic);
1775
2.06k
        stackPush(Builder.createIntrinsic(LLVM::Core::Ctlz, {Context.Int32Ty},
1776
2.06k
                                          {stackPop(), LLContext.getFalse()}));
1777
2.06k
        break;
1778
861
      case OpCode::I64__clz:
1779
861
        assuming(LLVM::Core::Ctlz != LLVM::Core::NotIntrinsic);
1780
861
        stackPush(Builder.createIntrinsic(LLVM::Core::Ctlz, {Context.Int64Ty},
1781
861
                                          {stackPop(), LLContext.getFalse()}));
1782
861
        break;
1783
1.67k
      case OpCode::I32__ctz:
1784
1.67k
        assuming(LLVM::Core::Cttz != LLVM::Core::NotIntrinsic);
1785
1.67k
        stackPush(Builder.createIntrinsic(LLVM::Core::Cttz, {Context.Int32Ty},
1786
1.67k
                                          {stackPop(), LLContext.getFalse()}));
1787
1.67k
        break;
1788
417
      case OpCode::I64__ctz:
1789
417
        assuming(LLVM::Core::Cttz != LLVM::Core::NotIntrinsic);
1790
417
        stackPush(Builder.createIntrinsic(LLVM::Core::Cttz, {Context.Int64Ty},
1791
417
                                          {stackPop(), LLContext.getFalse()}));
1792
417
        break;
1793
12.6k
      case OpCode::I32__popcnt:
1794
14.6k
      case OpCode::I64__popcnt:
1795
14.6k
        assuming(LLVM::Core::Ctpop != LLVM::Core::NotIntrinsic);
1796
14.6k
        stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Ctpop, stackPop()));
1797
14.6k
        break;
1798
825
      case OpCode::F32__abs:
1799
1.37k
      case OpCode::F64__abs:
1800
1.37k
        assuming(LLVM::Core::Fabs != LLVM::Core::NotIntrinsic);
1801
1.37k
        stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Fabs, stackPop()));
1802
1.37k
        break;
1803
1.05k
      case OpCode::F32__neg:
1804
1.81k
      case OpCode::F64__neg:
1805
1.81k
        stackPush(Builder.createFNeg(stackPop()));
1806
1.81k
        break;
1807
1.85k
      case OpCode::F32__ceil:
1808
4.43k
      case OpCode::F64__ceil:
1809
4.43k
        assuming(LLVM::Core::Ceil != LLVM::Core::NotIntrinsic);
1810
4.43k
        stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Ceil, stackPop()));
1811
4.43k
        break;
1812
887
      case OpCode::F32__floor:
1813
1.26k
      case OpCode::F64__floor:
1814
1.26k
        assuming(LLVM::Core::Floor != LLVM::Core::NotIntrinsic);
1815
1.26k
        stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Floor, stackPop()));
1816
1.26k
        break;
1817
504
      case OpCode::F32__trunc:
1818
791
      case OpCode::F64__trunc:
1819
791
        assuming(LLVM::Core::Trunc != LLVM::Core::NotIntrinsic);
1820
791
        stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Trunc, stackPop()));
1821
791
        break;
1822
835
      case OpCode::F32__nearest:
1823
1.19k
      case OpCode::F64__nearest: {
1824
1.19k
        const bool IsFloat = Instr.getOpCode() == OpCode::F32__nearest;
1825
1.19k
        LLVM::Value Value = stackPop();
1826
1827
1.19k
#if LLVM_VERSION_MAJOR >= 12 && !defined(__s390x__)
1828
1.19k
        assuming(LLVM::Core::Roundeven != LLVM::Core::NotIntrinsic);
1829
1.19k
        if (LLVM::Core::Roundeven != LLVM::Core::NotIntrinsic) {
1830
1.19k
          stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Roundeven, Value));
1831
1.19k
          break;
1832
1.19k
        }
1833
0
#endif
1834
1835
        // The VectorSize is only used when SSE4_1 or NEON is supported.
1836
0
        [[maybe_unused]] const uint32_t VectorSize = IsFloat ? 4 : 2;
1837
0
#if defined(__x86_64__)
1838
0
        if (Context.SupportSSE4_1) {
1839
0
          auto Zero = LLContext.getInt64(0);
1840
0
          auto VectorTy =
1841
0
              LLVM::Type::getVectorType(Value.getType(), VectorSize);
1842
0
          LLVM::Value Ret = LLVM::Value::getUndef(VectorTy);
1843
0
          Ret = Builder.createInsertElement(Ret, Value, Zero);
1844
0
          auto ID = IsFloat ? LLVM::Core::X86SSE41RoundSs
1845
0
                            : LLVM::Core::X86SSE41RoundSd;
1846
0
          assuming(ID != LLVM::Core::NotIntrinsic);
1847
0
          Ret = Builder.createIntrinsic(ID, {},
1848
0
                                        {Ret, Ret, LLContext.getInt32(8)});
1849
0
          Ret = Builder.createExtractElement(Ret, Zero);
1850
0
          stackPush(Ret);
1851
0
          break;
1852
0
        }
1853
0
#endif
1854
1855
#if defined(__aarch64__)
1856
        if (Context.SupportNEON &&
1857
            LLVM::Core::AArch64NeonFRIntN != LLVM::Core::NotIntrinsic) {
1858
          auto Zero = LLContext.getInt64(0);
1859
          auto VectorTy =
1860
              LLVM::Type::getVectorType(Value.getType(), VectorSize);
1861
          LLVM::Value Ret = LLVM::Value::getUndef(VectorTy);
1862
          Ret = Builder.createInsertElement(Ret, Value, Zero);
1863
          Ret =
1864
              Builder.createUnaryIntrinsic(LLVM::Core::AArch64NeonFRIntN, Ret);
1865
          Ret = Builder.createExtractElement(Ret, Zero);
1866
          stackPush(Ret);
1867
          break;
1868
        }
1869
#endif
1870
1871
        // Fallback case.
1872
        // If the SSE4.1 is not supported on the x86_64 platform or
1873
        // the NEON is not supported on the aarch64 platform,
1874
        // then fallback to this.
1875
0
        assuming(LLVM::Core::Nearbyint != LLVM::Core::NotIntrinsic);
1876
0
        stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Nearbyint, Value));
1877
0
        break;
1878
0
      }
1879
398
      case OpCode::F32__sqrt:
1880
3.23k
      case OpCode::F64__sqrt:
1881
3.23k
        assuming(LLVM::Core::Sqrt != LLVM::Core::NotIntrinsic);
1882
3.23k
        stackPush(Builder.createUnaryIntrinsic(LLVM::Core::Sqrt, stackPop()));
1883
3.23k
        break;
1884
296
      case OpCode::I32__wrap_i64:
1885
296
        stackPush(Builder.createTrunc(stackPop(), Context.Int32Ty));
1886
296
        break;
1887
1.31k
      case OpCode::I32__trunc_f32_s:
1888
1.31k
        compileSignedTrunc(Context.Int32Ty);
1889
1.31k
        break;
1890
256
      case OpCode::I32__trunc_f64_s:
1891
256
        compileSignedTrunc(Context.Int32Ty);
1892
256
        break;
1893
165
      case OpCode::I32__trunc_f32_u:
1894
165
        compileUnsignedTrunc(Context.Int32Ty);
1895
165
        break;
1896
1.34k
      case OpCode::I32__trunc_f64_u:
1897
1.34k
        compileUnsignedTrunc(Context.Int32Ty);
1898
1.34k
        break;
1899
1.95k
      case OpCode::I64__extend_i32_s:
1900
1.95k
        stackPush(Builder.createSExt(stackPop(), Context.Int64Ty));
1901
1.95k
        break;
1902
336
      case OpCode::I64__extend_i32_u:
1903
336
        stackPush(Builder.createZExt(stackPop(), Context.Int64Ty));
1904
336
        break;
1905
54
      case OpCode::I64__trunc_f32_s:
1906
54
        compileSignedTrunc(Context.Int64Ty);
1907
54
        break;
1908
426
      case OpCode::I64__trunc_f64_s:
1909
426
        compileSignedTrunc(Context.Int64Ty);
1910
426
        break;
1911
952
      case OpCode::I64__trunc_f32_u:
1912
952
        compileUnsignedTrunc(Context.Int64Ty);
1913
952
        break;
1914
1.29k
      case OpCode::I64__trunc_f64_u:
1915
1.29k
        compileUnsignedTrunc(Context.Int64Ty);
1916
1.29k
        break;
1917
1.58k
      case OpCode::F32__convert_i32_s:
1918
2.01k
      case OpCode::F32__convert_i64_s:
1919
2.01k
        stackPush(Builder.createSIToFP(stackPop(), Context.FloatTy));
1920
2.01k
        break;
1921
637
      case OpCode::F32__convert_i32_u:
1922
1.78k
      case OpCode::F32__convert_i64_u:
1923
1.78k
        stackPush(Builder.createUIToFP(stackPop(), Context.FloatTy));
1924
1.78k
        break;
1925
1.65k
      case OpCode::F64__convert_i32_s:
1926
6.00k
      case OpCode::F64__convert_i64_s:
1927
6.00k
        stackPush(Builder.createSIToFP(stackPop(), Context.DoubleTy));
1928
6.00k
        break;
1929
1.42k
      case OpCode::F64__convert_i32_u:
1930
1.61k
      case OpCode::F64__convert_i64_u:
1931
1.61k
        stackPush(Builder.createUIToFP(stackPop(), Context.DoubleTy));
1932
1.61k
        break;
1933
175
      case OpCode::F32__demote_f64:
1934
175
        stackPush(Builder.createFPTrunc(stackPop(), Context.FloatTy));
1935
175
        break;
1936
92
      case OpCode::F64__promote_f32:
1937
92
        stackPush(Builder.createFPExt(stackPop(), Context.DoubleTy));
1938
92
        break;
1939
524
      case OpCode::I32__reinterpret_f32:
1940
524
        stackPush(Builder.createBitCast(stackPop(), Context.Int32Ty));
1941
524
        break;
1942
711
      case OpCode::I64__reinterpret_f64:
1943
711
        stackPush(Builder.createBitCast(stackPop(), Context.Int64Ty));
1944
711
        break;
1945
4.06k
      case OpCode::F32__reinterpret_i32:
1946
4.06k
        stackPush(Builder.createBitCast(stackPop(), Context.FloatTy));
1947
4.06k
        break;
1948
1.21k
      case OpCode::F64__reinterpret_i64:
1949
1.21k
        stackPush(Builder.createBitCast(stackPop(), Context.DoubleTy));
1950
1.21k
        break;
1951
2.30k
      case OpCode::I32__extend8_s:
1952
2.30k
        stackPush(Builder.createSExt(
1953
2.30k
            Builder.createTrunc(stackPop(), Context.Int8Ty), Context.Int32Ty));
1954
2.30k
        break;
1955
2.99k
      case OpCode::I32__extend16_s:
1956
2.99k
        stackPush(Builder.createSExt(
1957
2.99k
            Builder.createTrunc(stackPop(), Context.Int16Ty), Context.Int32Ty));
1958
2.99k
        break;
1959
372
      case OpCode::I64__extend8_s:
1960
372
        stackPush(Builder.createSExt(
1961
372
            Builder.createTrunc(stackPop(), Context.Int8Ty), Context.Int64Ty));
1962
372
        break;
1963
616
      case OpCode::I64__extend16_s:
1964
616
        stackPush(Builder.createSExt(
1965
616
            Builder.createTrunc(stackPop(), Context.Int16Ty), Context.Int64Ty));
1966
616
        break;
1967
754
      case OpCode::I64__extend32_s:
1968
754
        stackPush(Builder.createSExt(
1969
754
            Builder.createTrunc(stackPop(), Context.Int32Ty), Context.Int64Ty));
1970
754
        break;
1971
1972
      // Binary Numeric Instructions
1973
1.12k
      case OpCode::I32__eq:
1974
1.39k
      case OpCode::I64__eq: {
1975
1.39k
        LLVM::Value RHS = stackPop();
1976
1.39k
        LLVM::Value LHS = stackPop();
1977
1.39k
        stackPush(Builder.createZExt(Builder.createICmpEQ(LHS, RHS),
1978
1.39k
                                     Context.Int32Ty));
1979
1.39k
        break;
1980
1.12k
      }
1981
666
      case OpCode::I32__ne:
1982
693
      case OpCode::I64__ne: {
1983
693
        LLVM::Value RHS = stackPop();
1984
693
        LLVM::Value LHS = stackPop();
1985
693
        stackPush(Builder.createZExt(Builder.createICmpNE(LHS, RHS),
1986
693
                                     Context.Int32Ty));
1987
693
        break;
1988
666
      }
1989
4.29k
      case OpCode::I32__lt_s:
1990
4.95k
      case OpCode::I64__lt_s: {
1991
4.95k
        LLVM::Value RHS = stackPop();
1992
4.95k
        LLVM::Value LHS = stackPop();
1993
4.95k
        stackPush(Builder.createZExt(Builder.createICmpSLT(LHS, RHS),
1994
4.95k
                                     Context.Int32Ty));
1995
4.95k
        break;
1996
4.29k
      }
1997
5.92k
      case OpCode::I32__lt_u:
1998
6.30k
      case OpCode::I64__lt_u: {
1999
6.30k
        LLVM::Value RHS = stackPop();
2000
6.30k
        LLVM::Value LHS = stackPop();
2001
6.30k
        stackPush(Builder.createZExt(Builder.createICmpULT(LHS, RHS),
2002
6.30k
                                     Context.Int32Ty));
2003
6.30k
        break;
2004
5.92k
      }
2005
975
      case OpCode::I32__gt_s:
2006
1.41k
      case OpCode::I64__gt_s: {
2007
1.41k
        LLVM::Value RHS = stackPop();
2008
1.41k
        LLVM::Value LHS = stackPop();
2009
1.41k
        stackPush(Builder.createZExt(Builder.createICmpSGT(LHS, RHS),
2010
1.41k
                                     Context.Int32Ty));
2011
1.41k
        break;
2012
975
      }
2013
6.36k
      case OpCode::I32__gt_u:
2014
6.59k
      case OpCode::I64__gt_u: {
2015
6.59k
        LLVM::Value RHS = stackPop();
2016
6.59k
        LLVM::Value LHS = stackPop();
2017
6.59k
        stackPush(Builder.createZExt(Builder.createICmpUGT(LHS, RHS),
2018
6.59k
                                     Context.Int32Ty));
2019
6.59k
        break;
2020
6.36k
      }
2021
1.83k
      case OpCode::I32__le_s:
2022
2.74k
      case OpCode::I64__le_s: {
2023
2.74k
        LLVM::Value RHS = stackPop();
2024
2.74k
        LLVM::Value LHS = stackPop();
2025
2.74k
        stackPush(Builder.createZExt(Builder.createICmpSLE(LHS, RHS),
2026
2.74k
                                     Context.Int32Ty));
2027
2.74k
        break;
2028
1.83k
      }
2029
438
      case OpCode::I32__le_u:
2030
2.10k
      case OpCode::I64__le_u: {
2031
2.10k
        LLVM::Value RHS = stackPop();
2032
2.10k
        LLVM::Value LHS = stackPop();
2033
2.10k
        stackPush(Builder.createZExt(Builder.createICmpULE(LHS, RHS),
2034
2.10k
                                     Context.Int32Ty));
2035
2.10k
        break;
2036
438
      }
2037
1.13k
      case OpCode::I32__ge_s:
2038
1.16k
      case OpCode::I64__ge_s: {
2039
1.16k
        LLVM::Value RHS = stackPop();
2040
1.16k
        LLVM::Value LHS = stackPop();
2041
1.16k
        stackPush(Builder.createZExt(Builder.createICmpSGE(LHS, RHS),
2042
1.16k
                                     Context.Int32Ty));
2043
1.16k
        break;
2044
1.13k
      }
2045
1.56k
      case OpCode::I32__ge_u:
2046
2.21k
      case OpCode::I64__ge_u: {
2047
2.21k
        LLVM::Value RHS = stackPop();
2048
2.21k
        LLVM::Value LHS = stackPop();
2049
2.21k
        stackPush(Builder.createZExt(Builder.createICmpUGE(LHS, RHS),
2050
2.21k
                                     Context.Int32Ty));
2051
2.21k
        break;
2052
1.56k
      }
2053
158
      case OpCode::F32__eq:
2054
212
      case OpCode::F64__eq: {
2055
212
        LLVM::Value RHS = stackPop();
2056
212
        LLVM::Value LHS = stackPop();
2057
212
        stackPush(Builder.createZExt(Builder.createFCmpOEQ(LHS, RHS),
2058
212
                                     Context.Int32Ty));
2059
212
        break;
2060
158
      }
2061
92
      case OpCode::F32__ne:
2062
123
      case OpCode::F64__ne: {
2063
123
        LLVM::Value RHS = stackPop();
2064
123
        LLVM::Value LHS = stackPop();
2065
123
        stackPush(Builder.createZExt(Builder.createFCmpUNE(LHS, RHS),
2066
123
                                     Context.Int32Ty));
2067
123
        break;
2068
92
      }
2069
193
      case OpCode::F32__lt:
2070
319
      case OpCode::F64__lt: {
2071
319
        LLVM::Value RHS = stackPop();
2072
319
        LLVM::Value LHS = stackPop();
2073
319
        stackPush(Builder.createZExt(Builder.createFCmpOLT(LHS, RHS),
2074
319
                                     Context.Int32Ty));
2075
319
        break;
2076
193
      }
2077
147
      case OpCode::F32__gt:
2078
230
      case OpCode::F64__gt: {
2079
230
        LLVM::Value RHS = stackPop();
2080
230
        LLVM::Value LHS = stackPop();
2081
230
        stackPush(Builder.createZExt(Builder.createFCmpOGT(LHS, RHS),
2082
230
                                     Context.Int32Ty));
2083
230
        break;
2084
147
      }
2085
74
      case OpCode::F32__le:
2086
175
      case OpCode::F64__le: {
2087
175
        LLVM::Value RHS = stackPop();
2088
175
        LLVM::Value LHS = stackPop();
2089
175
        stackPush(Builder.createZExt(Builder.createFCmpOLE(LHS, RHS),
2090
175
                                     Context.Int32Ty));
2091
175
        break;
2092
74
      }
2093
232
      case OpCode::F32__ge:
2094
258
      case OpCode::F64__ge: {
2095
258
        LLVM::Value RHS = stackPop();
2096
258
        LLVM::Value LHS = stackPop();
2097
258
        stackPush(Builder.createZExt(Builder.createFCmpOGE(LHS, RHS),
2098
258
                                     Context.Int32Ty));
2099
258
        break;
2100
232
      }
2101
719
      case OpCode::I32__add:
2102
1.18k
      case OpCode::I64__add: {
2103
1.18k
        LLVM::Value RHS = stackPop();
2104
1.18k
        LLVM::Value LHS = stackPop();
2105
1.18k
        stackPush(Builder.createAdd(LHS, RHS));
2106
1.18k
        break;
2107
719
      }
2108
1.51k
      case OpCode::I32__sub:
2109
1.90k
      case OpCode::I64__sub: {
2110
1.90k
        LLVM::Value RHS = stackPop();
2111
1.90k
        LLVM::Value LHS = stackPop();
2112
2113
1.90k
        stackPush(Builder.createSub(LHS, RHS));
2114
1.90k
        break;
2115
1.51k
      }
2116
610
      case OpCode::I32__mul:
2117
1.04k
      case OpCode::I64__mul: {
2118
1.04k
        LLVM::Value RHS = stackPop();
2119
1.04k
        LLVM::Value LHS = stackPop();
2120
1.04k
        stackPush(Builder.createMul(LHS, RHS));
2121
1.04k
        break;
2122
610
      }
2123
1.37k
      case OpCode::I32__div_s:
2124
1.94k
      case OpCode::I64__div_s: {
2125
1.94k
        LLVM::Value RHS = stackPop();
2126
1.94k
        LLVM::Value LHS = stackPop();
2127
1.94k
        if constexpr (kForceDivCheck) {
2128
1.94k
          const bool Is32 = Instr.getOpCode() == OpCode::I32__div_s;
2129
1.94k
          LLVM::Value IntZero =
2130
1.94k
              Is32 ? LLContext.getInt32(0) : LLContext.getInt64(0);
2131
1.94k
          LLVM::Value IntMinusOne =
2132
1.94k
              Is32 ? LLContext.getInt32(static_cast<uint32_t>(INT32_C(-1)))
2133
1.94k
                   : LLContext.getInt64(static_cast<uint64_t>(INT64_C(-1)));
2134
1.94k
          LLVM::Value IntMin = Is32 ? LLContext.getInt32(static_cast<uint32_t>(
2135
1.37k
                                          std::numeric_limits<int32_t>::min()))
2136
1.94k
                                    : LLContext.getInt64(static_cast<uint64_t>(
2137
570
                                          std::numeric_limits<int64_t>::min()));
2138
2139
1.94k
          auto NoZeroBB =
2140
1.94k
              LLVM::BasicBlock::create(LLContext, F.Fn, "div.nozero");
2141
1.94k
          auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "div.ok");
2142
2143
1.94k
          auto IsNotZero =
2144
1.94k
              Builder.createLikely(Builder.createICmpNE(RHS, IntZero));
2145
1.94k
          Builder.createCondBr(IsNotZero, NoZeroBB,
2146
1.94k
                               getTrapBB(ErrCode::Value::DivideByZero));
2147
2148
1.94k
          Builder.positionAtEnd(NoZeroBB);
2149
1.94k
          auto NotOverflow = Builder.createLikely(
2150
1.94k
              Builder.createOr(Builder.createICmpNE(LHS, IntMin),
2151
1.94k
                               Builder.createICmpNE(RHS, IntMinusOne)));
2152
1.94k
          Builder.createCondBr(NotOverflow, OkBB,
2153
1.94k
                               getTrapBB(ErrCode::Value::IntegerOverflow));
2154
2155
1.94k
          Builder.positionAtEnd(OkBB);
2156
1.94k
        }
2157
1.94k
        stackPush(Builder.createSDiv(LHS, RHS));
2158
1.94k
        break;
2159
1.37k
      }
2160
3.05k
      case OpCode::I32__div_u:
2161
3.37k
      case OpCode::I64__div_u: {
2162
3.37k
        LLVM::Value RHS = stackPop();
2163
3.37k
        LLVM::Value LHS = stackPop();
2164
3.37k
        if constexpr (kForceDivCheck) {
2165
3.37k
          const bool Is32 = Instr.getOpCode() == OpCode::I32__div_u;
2166
3.37k
          LLVM::Value IntZero =
2167
3.37k
              Is32 ? LLContext.getInt32(0) : LLContext.getInt64(0);
2168
3.37k
          auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "div.ok");
2169
2170
3.37k
          auto IsNotZero =
2171
3.37k
              Builder.createLikely(Builder.createICmpNE(RHS, IntZero));
2172
3.37k
          Builder.createCondBr(IsNotZero, OkBB,
2173
3.37k
                               getTrapBB(ErrCode::Value::DivideByZero));
2174
3.37k
          Builder.positionAtEnd(OkBB);
2175
3.37k
        }
2176
3.37k
        stackPush(Builder.createUDiv(LHS, RHS));
2177
3.37k
        break;
2178
3.05k
      }
2179
920
      case OpCode::I32__rem_s:
2180
1.37k
      case OpCode::I64__rem_s: {
2181
1.37k
        LLVM::Value RHS = stackPop();
2182
1.37k
        LLVM::Value LHS = stackPop();
2183
        // handle INT32_MIN % -1
2184
1.37k
        const bool Is32 = Instr.getOpCode() == OpCode::I32__rem_s;
2185
1.37k
        LLVM::Value IntMinusOne =
2186
1.37k
            Is32 ? LLContext.getInt32(static_cast<uint32_t>(INT32_C(-1)))
2187
1.37k
                 : LLContext.getInt64(static_cast<uint64_t>(INT64_C(-1)));
2188
1.37k
        LLVM::Value IntMin = Is32 ? LLContext.getInt32(static_cast<uint32_t>(
2189
920
                                        std::numeric_limits<int32_t>::min()))
2190
1.37k
                                  : LLContext.getInt64(static_cast<uint64_t>(
2191
450
                                        std::numeric_limits<int64_t>::min()));
2192
1.37k
        LLVM::Value IntZero =
2193
1.37k
            Is32 ? LLContext.getInt32(0) : LLContext.getInt64(0);
2194
2195
1.37k
        auto NoOverflowBB =
2196
1.37k
            LLVM::BasicBlock::create(LLContext, F.Fn, "no.overflow");
2197
1.37k
        auto EndBB = LLVM::BasicBlock::create(LLContext, F.Fn, "end.overflow");
2198
2199
1.37k
        if constexpr (kForceDivCheck) {
2200
1.37k
          auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "rem.ok");
2201
2202
1.37k
          auto IsNotZero =
2203
1.37k
              Builder.createLikely(Builder.createICmpNE(RHS, IntZero));
2204
1.37k
          Builder.createCondBr(IsNotZero, OkBB,
2205
1.37k
                               getTrapBB(ErrCode::Value::DivideByZero));
2206
1.37k
          Builder.positionAtEnd(OkBB);
2207
1.37k
        }
2208
2209
1.37k
        auto CurrBB = Builder.getInsertBlock();
2210
2211
1.37k
        auto NotOverflow = Builder.createLikely(
2212
1.37k
            Builder.createOr(Builder.createICmpNE(LHS, IntMin),
2213
1.37k
                             Builder.createICmpNE(RHS, IntMinusOne)));
2214
1.37k
        Builder.createCondBr(NotOverflow, NoOverflowBB, EndBB);
2215
2216
1.37k
        Builder.positionAtEnd(NoOverflowBB);
2217
1.37k
        auto Ret1 = Builder.createSRem(LHS, RHS);
2218
1.37k
        Builder.createBr(EndBB);
2219
2220
1.37k
        Builder.positionAtEnd(EndBB);
2221
1.37k
        auto Ret = Builder.createPHI(Ret1.getType());
2222
1.37k
        Ret.addIncoming(Ret1, NoOverflowBB);
2223
1.37k
        Ret.addIncoming(IntZero, CurrBB);
2224
2225
1.37k
        stackPush(Ret);
2226
1.37k
        break;
2227
920
      }
2228
975
      case OpCode::I32__rem_u:
2229
1.55k
      case OpCode::I64__rem_u: {
2230
1.55k
        LLVM::Value RHS = stackPop();
2231
1.55k
        LLVM::Value LHS = stackPop();
2232
1.55k
        if constexpr (kForceDivCheck) {
2233
1.55k
          LLVM::Value IntZero = Instr.getOpCode() == OpCode::I32__rem_u
2234
1.55k
                                    ? LLContext.getInt32(0)
2235
1.55k
                                    : LLContext.getInt64(0);
2236
1.55k
          auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "rem.ok");
2237
2238
1.55k
          auto IsNotZero =
2239
1.55k
              Builder.createLikely(Builder.createICmpNE(RHS, IntZero));
2240
1.55k
          Builder.createCondBr(IsNotZero, OkBB,
2241
1.55k
                               getTrapBB(ErrCode::Value::DivideByZero));
2242
1.55k
          Builder.positionAtEnd(OkBB);
2243
1.55k
        }
2244
1.55k
        stackPush(Builder.createURem(LHS, RHS));
2245
1.55k
        break;
2246
975
      }
2247
641
      case OpCode::I32__and:
2248
1.98k
      case OpCode::I64__and: {
2249
1.98k
        LLVM::Value RHS = stackPop();
2250
1.98k
        LLVM::Value LHS = stackPop();
2251
1.98k
        stackPush(Builder.createAnd(LHS, RHS));
2252
1.98k
        break;
2253
641
      }
2254
937
      case OpCode::I32__or:
2255
1.28k
      case OpCode::I64__or: {
2256
1.28k
        LLVM::Value RHS = stackPop();
2257
1.28k
        LLVM::Value LHS = stackPop();
2258
1.28k
        stackPush(Builder.createOr(LHS, RHS));
2259
1.28k
        break;
2260
937
      }
2261
1.10k
      case OpCode::I32__xor:
2262
1.60k
      case OpCode::I64__xor: {
2263
1.60k
        LLVM::Value RHS = stackPop();
2264
1.60k
        LLVM::Value LHS = stackPop();
2265
1.60k
        stackPush(Builder.createXor(LHS, RHS));
2266
1.60k
        break;
2267
1.10k
      }
2268
1.48k
      case OpCode::I32__shl:
2269
1.85k
      case OpCode::I64__shl: {
2270
1.85k
        LLVM::Value Mask = Instr.getOpCode() == OpCode::I32__shl
2271
1.85k
                               ? LLContext.getInt32(31)
2272
1.85k
                               : LLContext.getInt64(63);
2273
1.85k
        LLVM::Value RHS = Builder.createAnd(stackPop(), Mask);
2274
1.85k
        LLVM::Value LHS = stackPop();
2275
1.85k
        stackPush(Builder.createShl(LHS, RHS));
2276
1.85k
        break;
2277
1.48k
      }
2278
1.74k
      case OpCode::I32__shr_s:
2279
2.12k
      case OpCode::I64__shr_s: {
2280
2.12k
        LLVM::Value Mask = Instr.getOpCode() == OpCode::I32__shr_s
2281
2.12k
                               ? LLContext.getInt32(31)
2282
2.12k
                               : LLContext.getInt64(63);
2283
2.12k
        LLVM::Value RHS = Builder.createAnd(stackPop(), Mask);
2284
2.12k
        LLVM::Value LHS = stackPop();
2285
2.12k
        stackPush(Builder.createAShr(LHS, RHS));
2286
2.12k
        break;
2287
1.74k
      }
2288
3.87k
      case OpCode::I32__shr_u:
2289
4.16k
      case OpCode::I64__shr_u: {
2290
4.16k
        LLVM::Value Mask = Instr.getOpCode() == OpCode::I32__shr_u
2291
4.16k
                               ? LLContext.getInt32(31)
2292
4.16k
                               : LLContext.getInt64(63);
2293
4.16k
        LLVM::Value RHS = Builder.createAnd(stackPop(), Mask);
2294
4.16k
        LLVM::Value LHS = stackPop();
2295
4.16k
        stackPush(Builder.createLShr(LHS, RHS));
2296
4.16k
        break;
2297
3.87k
      }
2298
2.51k
      case OpCode::I32__rotl: {
2299
2.51k
        LLVM::Value RHS = stackPop();
2300
2.51k
        LLVM::Value LHS = stackPop();
2301
2.51k
        assuming(LLVM::Core::FShl != LLVM::Core::NotIntrinsic);
2302
2.51k
        stackPush(Builder.createIntrinsic(LLVM::Core::FShl, {Context.Int32Ty},
2303
2.51k
                                          {LHS, LHS, RHS}));
2304
2.51k
        break;
2305
2.51k
      }
2306
886
      case OpCode::I32__rotr: {
2307
886
        LLVM::Value RHS = stackPop();
2308
886
        LLVM::Value LHS = stackPop();
2309
886
        assuming(LLVM::Core::FShr != LLVM::Core::NotIntrinsic);
2310
886
        stackPush(Builder.createIntrinsic(LLVM::Core::FShr, {Context.Int32Ty},
2311
886
                                          {LHS, LHS, RHS}));
2312
886
        break;
2313
886
      }
2314
870
      case OpCode::I64__rotl: {
2315
870
        LLVM::Value RHS = stackPop();
2316
870
        LLVM::Value LHS = stackPop();
2317
870
        assuming(LLVM::Core::FShl != LLVM::Core::NotIntrinsic);
2318
870
        stackPush(Builder.createIntrinsic(LLVM::Core::FShl, {Context.Int64Ty},
2319
870
                                          {LHS, LHS, RHS}));
2320
870
        break;
2321
870
      }
2322
1.37k
      case OpCode::I64__rotr: {
2323
1.37k
        LLVM::Value RHS = stackPop();
2324
1.37k
        LLVM::Value LHS = stackPop();
2325
1.37k
        assuming(LLVM::Core::FShr != LLVM::Core::NotIntrinsic);
2326
1.37k
        stackPush(Builder.createIntrinsic(LLVM::Core::FShr, {Context.Int64Ty},
2327
1.37k
                                          {LHS, LHS, RHS}));
2328
1.37k
        break;
2329
1.37k
      }
2330
274
      case OpCode::F32__add:
2331
577
      case OpCode::F64__add: {
2332
577
        LLVM::Value RHS = stackPop();
2333
577
        LLVM::Value LHS = stackPop();
2334
577
        stackPush(Builder.createFAdd(LHS, RHS));
2335
577
        break;
2336
274
      }
2337
134
      case OpCode::F32__sub:
2338
427
      case OpCode::F64__sub: {
2339
427
        LLVM::Value RHS = stackPop();
2340
427
        LLVM::Value LHS = stackPop();
2341
427
        stackPush(Builder.createFSub(LHS, RHS));
2342
427
        break;
2343
134
      }
2344
539
      case OpCode::F32__mul:
2345
701
      case OpCode::F64__mul: {
2346
701
        LLVM::Value RHS = stackPop();
2347
701
        LLVM::Value LHS = stackPop();
2348
701
        stackPush(Builder.createFMul(LHS, RHS));
2349
701
        break;
2350
539
      }
2351
226
      case OpCode::F32__div:
2352
569
      case OpCode::F64__div: {
2353
569
        LLVM::Value RHS = stackPop();
2354
569
        LLVM::Value LHS = stackPop();
2355
569
        stackPush(Builder.createFDiv(LHS, RHS));
2356
569
        break;
2357
226
      }
2358
309
      case OpCode::F32__min:
2359
657
      case OpCode::F64__min: {
2360
657
        LLVM::Value RHS = stackPop();
2361
657
        LLVM::Value LHS = stackPop();
2362
657
        auto FpTy = Instr.getOpCode() == OpCode::F32__min ? Context.FloatTy
2363
657
                                                          : Context.DoubleTy;
2364
657
        auto IntTy = Instr.getOpCode() == OpCode::F32__min ? Context.Int32Ty
2365
657
                                                           : Context.Int64Ty;
2366
2367
657
        auto UEQ = Builder.createFCmpUEQ(LHS, RHS);
2368
657
        auto UNO = Builder.createFCmpUNO(LHS, RHS);
2369
2370
657
        auto LHSInt = Builder.createBitCast(LHS, IntTy);
2371
657
        auto RHSInt = Builder.createBitCast(RHS, IntTy);
2372
657
        auto OrInt = Builder.createOr(LHSInt, RHSInt);
2373
657
        auto OrFp = Builder.createBitCast(OrInt, FpTy);
2374
2375
657
        auto AddFp = Builder.createFAdd(LHS, RHS);
2376
2377
657
        assuming(LLVM::Core::MinNum != LLVM::Core::NotIntrinsic);
2378
657
        auto MinFp = Builder.createIntrinsic(LLVM::Core::MinNum,
2379
657
                                             {LHS.getType()}, {LHS, RHS});
2380
2381
657
        auto Ret = Builder.createSelect(
2382
657
            UEQ, Builder.createSelect(UNO, AddFp, OrFp), MinFp);
2383
657
        stackPush(Ret);
2384
657
        break;
2385
657
      }
2386
329
      case OpCode::F32__max:
2387
935
      case OpCode::F64__max: {
2388
935
        LLVM::Value RHS = stackPop();
2389
935
        LLVM::Value LHS = stackPop();
2390
935
        auto FpTy = Instr.getOpCode() == OpCode::F32__max ? Context.FloatTy
2391
935
                                                          : Context.DoubleTy;
2392
935
        auto IntTy = Instr.getOpCode() == OpCode::F32__max ? Context.Int32Ty
2393
935
                                                           : Context.Int64Ty;
2394
2395
935
        auto UEQ = Builder.createFCmpUEQ(LHS, RHS);
2396
935
        auto UNO = Builder.createFCmpUNO(LHS, RHS);
2397
2398
935
        auto LHSInt = Builder.createBitCast(LHS, IntTy);
2399
935
        auto RHSInt = Builder.createBitCast(RHS, IntTy);
2400
935
        auto AndInt = Builder.createAnd(LHSInt, RHSInt);
2401
935
        auto AndFp = Builder.createBitCast(AndInt, FpTy);
2402
2403
935
        auto AddFp = Builder.createFAdd(LHS, RHS);
2404
2405
935
        assuming(LLVM::Core::MaxNum != LLVM::Core::NotIntrinsic);
2406
935
        auto MaxFp = Builder.createIntrinsic(LLVM::Core::MaxNum,
2407
935
                                             {LHS.getType()}, {LHS, RHS});
2408
2409
935
        auto Ret = Builder.createSelect(
2410
935
            UEQ, Builder.createSelect(UNO, AddFp, AndFp), MaxFp);
2411
935
        stackPush(Ret);
2412
935
        break;
2413
935
      }
2414
437
      case OpCode::F32__copysign:
2415
841
      case OpCode::F64__copysign: {
2416
841
        LLVM::Value RHS = stackPop();
2417
841
        LLVM::Value LHS = stackPop();
2418
841
        assuming(LLVM::Core::CopySign != LLVM::Core::NotIntrinsic);
2419
841
        stackPush(Builder.createIntrinsic(LLVM::Core::CopySign, {LHS.getType()},
2420
841
                                          {LHS, RHS}));
2421
841
        break;
2422
841
      }
2423
2424
      // Saturating Truncation Numeric Instructions
2425
171
      case OpCode::I32__trunc_sat_f32_s:
2426
171
        compileSignedTruncSat(Context.Int32Ty);
2427
171
        break;
2428
93
      case OpCode::I32__trunc_sat_f32_u:
2429
93
        compileUnsignedTruncSat(Context.Int32Ty);
2430
93
        break;
2431
334
      case OpCode::I32__trunc_sat_f64_s:
2432
334
        compileSignedTruncSat(Context.Int32Ty);
2433
334
        break;
2434
243
      case OpCode::I32__trunc_sat_f64_u:
2435
243
        compileUnsignedTruncSat(Context.Int32Ty);
2436
243
        break;
2437
422
      case OpCode::I64__trunc_sat_f32_s:
2438
422
        compileSignedTruncSat(Context.Int64Ty);
2439
422
        break;
2440
355
      case OpCode::I64__trunc_sat_f32_u:
2441
355
        compileUnsignedTruncSat(Context.Int64Ty);
2442
355
        break;
2443
196
      case OpCode::I64__trunc_sat_f64_s:
2444
196
        compileSignedTruncSat(Context.Int64Ty);
2445
196
        break;
2446
264
      case OpCode::I64__trunc_sat_f64_u:
2447
264
        compileUnsignedTruncSat(Context.Int64Ty);
2448
264
        break;
2449
2450
      // SIMD Memory Instructions
2451
4.63k
      case OpCode::V128__load:
2452
4.63k
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2453
4.63k
                            Instr.getMemoryAlign(), Context.Int128x1Ty);
2454
4.63k
        break;
2455
150
      case OpCode::V128__load8x8_s:
2456
150
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2457
150
                            Instr.getMemoryAlign(),
2458
150
                            LLVM::Type::getVectorType(Context.Int8Ty, 8),
2459
150
                            Context.Int16x8Ty, true);
2460
150
        break;
2461
45
      case OpCode::V128__load8x8_u:
2462
45
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2463
45
                            Instr.getMemoryAlign(),
2464
45
                            LLVM::Type::getVectorType(Context.Int8Ty, 8),
2465
45
                            Context.Int16x8Ty, false);
2466
45
        break;
2467
365
      case OpCode::V128__load16x4_s:
2468
365
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2469
365
                            Instr.getMemoryAlign(),
2470
365
                            LLVM::Type::getVectorType(Context.Int16Ty, 4),
2471
365
                            Context.Int32x4Ty, true);
2472
365
        break;
2473
444
      case OpCode::V128__load16x4_u:
2474
444
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2475
444
                            Instr.getMemoryAlign(),
2476
444
                            LLVM::Type::getVectorType(Context.Int16Ty, 4),
2477
444
                            Context.Int32x4Ty, false);
2478
444
        break;
2479
139
      case OpCode::V128__load32x2_s:
2480
139
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2481
139
                            Instr.getMemoryAlign(),
2482
139
                            LLVM::Type::getVectorType(Context.Int32Ty, 2),
2483
139
                            Context.Int64x2Ty, true);
2484
139
        break;
2485
132
      case OpCode::V128__load32x2_u:
2486
132
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2487
132
                            Instr.getMemoryAlign(),
2488
132
                            LLVM::Type::getVectorType(Context.Int32Ty, 2),
2489
132
                            Context.Int64x2Ty, false);
2490
132
        break;
2491
73
      case OpCode::V128__load8_splat:
2492
73
        compileSplatLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2493
73
                           Instr.getMemoryAlign(), Context.Int8Ty,
2494
73
                           Context.Int8x16Ty);
2495
73
        break;
2496
125
      case OpCode::V128__load16_splat:
2497
125
        compileSplatLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2498
125
                           Instr.getMemoryAlign(), Context.Int16Ty,
2499
125
                           Context.Int16x8Ty);
2500
125
        break;
2501
231
      case OpCode::V128__load32_splat:
2502
231
        compileSplatLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2503
231
                           Instr.getMemoryAlign(), Context.Int32Ty,
2504
231
                           Context.Int32x4Ty);
2505
231
        break;
2506
124
      case OpCode::V128__load64_splat:
2507
124
        compileSplatLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2508
124
                           Instr.getMemoryAlign(), Context.Int64Ty,
2509
124
                           Context.Int64x2Ty);
2510
124
        break;
2511
81
      case OpCode::V128__load32_zero:
2512
81
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2513
81
                            Instr.getMemoryAlign(), Context.Int32Ty,
2514
81
                            Context.Int128Ty, false);
2515
81
        break;
2516
148
      case OpCode::V128__load64_zero:
2517
148
        compileVectorLoadOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2518
148
                            Instr.getMemoryAlign(), Context.Int64Ty,
2519
148
                            Context.Int128Ty, false);
2520
148
        break;
2521
267
      case OpCode::V128__store:
2522
267
        compileStoreOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2523
267
                       Instr.getMemoryAlign(), Context.Int128x1Ty, false, true);
2524
267
        break;
2525
139
      case OpCode::V128__load8_lane:
2526
139
        compileLoadLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2527
139
                          Instr.getMemoryAlign(), Instr.getMemoryLane(),
2528
139
                          Context.Int8Ty, Context.Int8x16Ty);
2529
139
        break;
2530
140
      case OpCode::V128__load16_lane:
2531
140
        compileLoadLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2532
140
                          Instr.getMemoryAlign(), Instr.getMemoryLane(),
2533
140
                          Context.Int16Ty, Context.Int16x8Ty);
2534
140
        break;
2535
133
      case OpCode::V128__load32_lane:
2536
133
        compileLoadLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2537
133
                          Instr.getMemoryAlign(), Instr.getMemoryLane(),
2538
133
                          Context.Int32Ty, Context.Int32x4Ty);
2539
133
        break;
2540
22
      case OpCode::V128__load64_lane:
2541
22
        compileLoadLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2542
22
                          Instr.getMemoryAlign(), Instr.getMemoryLane(),
2543
22
                          Context.Int64Ty, Context.Int64x2Ty);
2544
22
        break;
2545
118
      case OpCode::V128__store8_lane:
2546
118
        compileStoreLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2547
118
                           Instr.getMemoryAlign(), Instr.getMemoryLane(),
2548
118
                           Context.Int8Ty, Context.Int8x16Ty);
2549
118
        break;
2550
94
      case OpCode::V128__store16_lane:
2551
94
        compileStoreLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2552
94
                           Instr.getMemoryAlign(), Instr.getMemoryLane(),
2553
94
                           Context.Int16Ty, Context.Int16x8Ty);
2554
94
        break;
2555
91
      case OpCode::V128__store32_lane:
2556
91
        compileStoreLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2557
91
                           Instr.getMemoryAlign(), Instr.getMemoryLane(),
2558
91
                           Context.Int32Ty, Context.Int32x4Ty);
2559
91
        break;
2560
35
      case OpCode::V128__store64_lane:
2561
35
        compileStoreLaneOp(Instr.getTargetIndex(), Instr.getMemoryOffset(),
2562
35
                           Instr.getMemoryAlign(), Instr.getMemoryLane(),
2563
35
                           Context.Int64Ty, Context.Int64x2Ty);
2564
35
        break;
2565
2566
      // SIMD Const Instructions
2567
372
      case OpCode::V128__const: {
2568
372
        const auto Value = Instr.getNum().get<uint64x2_t>();
2569
372
        auto Vector =
2570
372
            LLVM::Value::getConstVector64(LLContext, {Value[0], Value[1]});
2571
372
        stackPush(Builder.createBitCast(Vector, Context.Int64x2Ty));
2572
372
        break;
2573
841
      }
2574
2575
      // SIMD Shuffle Instructions
2576
16
      case OpCode::I8x16__shuffle: {
2577
16
        auto V2 = Builder.createBitCast(stackPop(), Context.Int8x16Ty);
2578
16
        auto V1 = Builder.createBitCast(stackPop(), Context.Int8x16Ty);
2579
16
        const auto V3 = Instr.getNum().get<uint128_t>();
2580
16
        std::array<uint8_t, 16> Mask;
2581
272
        for (size_t I = 0; I < 16; ++I) {
2582
256
          auto Num = static_cast<uint8_t>(V3 >> (I * 8));
2583
256
          if constexpr (Endian::native == Endian::little) {
2584
256
            Mask[I] = Num;
2585
          } else {
2586
            Mask[15 - I] = Num < 16 ? 15 - Num : 47 - Num;
2587
          }
2588
256
        }
2589
16
        stackPush(Builder.createBitCast(
2590
16
            Builder.createShuffleVector(
2591
16
                V1, V2, LLVM::Value::getConstVector8(LLContext, Mask)),
2592
16
            Context.Int64x2Ty));
2593
16
        break;
2594
841
      }
2595
2596
      // SIMD Lane Instructions
2597
68
      case OpCode::I8x16__extract_lane_s:
2598
68
        compileExtractLaneOp(Context.Int8x16Ty, Instr.getMemoryLane(),
2599
68
                             Context.Int32Ty, true);
2600
68
        break;
2601
28
      case OpCode::I8x16__extract_lane_u:
2602
28
        compileExtractLaneOp(Context.Int8x16Ty, Instr.getMemoryLane(),
2603
28
                             Context.Int32Ty, false);
2604
28
        break;
2605
155
      case OpCode::I8x16__replace_lane:
2606
155
        compileReplaceLaneOp(Context.Int8x16Ty, Instr.getMemoryLane());
2607
155
        break;
2608
433
      case OpCode::I16x8__extract_lane_s:
2609
433
        compileExtractLaneOp(Context.Int16x8Ty, Instr.getMemoryLane(),
2610
433
                             Context.Int32Ty, true);
2611
433
        break;
2612
455
      case OpCode::I16x8__extract_lane_u:
2613
455
        compileExtractLaneOp(Context.Int16x8Ty, Instr.getMemoryLane(),
2614
455
                             Context.Int32Ty, false);
2615
455
        break;
2616
254
      case OpCode::I16x8__replace_lane:
2617
254
        compileReplaceLaneOp(Context.Int16x8Ty, Instr.getMemoryLane());
2618
254
        break;
2619
67
      case OpCode::I32x4__extract_lane:
2620
67
        compileExtractLaneOp(Context.Int32x4Ty, Instr.getMemoryLane());
2621
67
        break;
2622
216
      case OpCode::I32x4__replace_lane:
2623
216
        compileReplaceLaneOp(Context.Int32x4Ty, Instr.getMemoryLane());
2624
216
        break;
2625
125
      case OpCode::I64x2__extract_lane:
2626
125
        compileExtractLaneOp(Context.Int64x2Ty, Instr.getMemoryLane());
2627
125
        break;
2628
14
      case OpCode::I64x2__replace_lane:
2629
14
        compileReplaceLaneOp(Context.Int64x2Ty, Instr.getMemoryLane());
2630
14
        break;
2631
68
      case OpCode::F32x4__extract_lane:
2632
68
        compileExtractLaneOp(Context.Floatx4Ty, Instr.getMemoryLane());
2633
68
        break;
2634
24
      case OpCode::F32x4__replace_lane:
2635
24
        compileReplaceLaneOp(Context.Floatx4Ty, Instr.getMemoryLane());
2636
24
        break;
2637
74
      case OpCode::F64x2__extract_lane:
2638
74
        compileExtractLaneOp(Context.Doublex2Ty, Instr.getMemoryLane());
2639
74
        break;
2640
8
      case OpCode::F64x2__replace_lane:
2641
8
        compileReplaceLaneOp(Context.Doublex2Ty, Instr.getMemoryLane());
2642
8
        break;
2643
2644
      // SIMD Numeric Instructions
2645
65
      case OpCode::I8x16__swizzle:
2646
65
        compileVectorSwizzle();
2647
65
        break;
2648
32.6k
      case OpCode::I8x16__splat:
2649
32.6k
        compileSplatOp(Context.Int8x16Ty);
2650
32.6k
        break;
2651
9.58k
      case OpCode::I16x8__splat:
2652
9.58k
        compileSplatOp(Context.Int16x8Ty);
2653
9.58k
        break;
2654
1.25k
      case OpCode::I32x4__splat:
2655
1.25k
        compileSplatOp(Context.Int32x4Ty);
2656
1.25k
        break;
2657
400
      case OpCode::I64x2__splat:
2658
400
        compileSplatOp(Context.Int64x2Ty);
2659
400
        break;
2660
355
      case OpCode::F32x4__splat:
2661
355
        compileSplatOp(Context.Floatx4Ty);
2662
355
        break;
2663
64
      case OpCode::F64x2__splat:
2664
64
        compileSplatOp(Context.Doublex2Ty);
2665
64
        break;
2666
106
      case OpCode::I8x16__eq:
2667
106
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntEQ);
2668
106
        break;
2669
308
      case OpCode::I8x16__ne:
2670
308
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntNE);
2671
308
        break;
2672
54
      case OpCode::I8x16__lt_s:
2673
54
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntSLT);
2674
54
        break;
2675
70
      case OpCode::I8x16__lt_u:
2676
70
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntULT);
2677
70
        break;
2678
137
      case OpCode::I8x16__gt_s:
2679
137
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntSGT);
2680
137
        break;
2681
226
      case OpCode::I8x16__gt_u:
2682
226
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntUGT);
2683
226
        break;
2684
90
      case OpCode::I8x16__le_s:
2685
90
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntSLE);
2686
90
        break;
2687
96
      case OpCode::I8x16__le_u:
2688
96
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntULE);
2689
96
        break;
2690
551
      case OpCode::I8x16__ge_s:
2691
551
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntSGE);
2692
551
        break;
2693
118
      case OpCode::I8x16__ge_u:
2694
118
        compileVectorCompareOp(Context.Int8x16Ty, LLVMIntUGE);
2695
118
        break;
2696
71
      case OpCode::I16x8__eq:
2697
71
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntEQ);
2698
71
        break;
2699
225
      case OpCode::I16x8__ne:
2700
225
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntNE);
2701
225
        break;
2702
51
      case OpCode::I16x8__lt_s:
2703
51
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntSLT);
2704
51
        break;
2705
229
      case OpCode::I16x8__lt_u:
2706
229
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntULT);
2707
229
        break;
2708
228
      case OpCode::I16x8__gt_s:
2709
228
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntSGT);
2710
228
        break;
2711
131
      case OpCode::I16x8__gt_u:
2712
131
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntUGT);
2713
131
        break;
2714
76
      case OpCode::I16x8__le_s:
2715
76
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntSLE);
2716
76
        break;
2717
89
      case OpCode::I16x8__le_u:
2718
89
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntULE);
2719
89
        break;
2720
151
      case OpCode::I16x8__ge_s:
2721
151
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntSGE);
2722
151
        break;
2723
67
      case OpCode::I16x8__ge_u:
2724
67
        compileVectorCompareOp(Context.Int16x8Ty, LLVMIntUGE);
2725
67
        break;
2726
70
      case OpCode::I32x4__eq:
2727
70
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntEQ);
2728
70
        break;
2729
105
      case OpCode::I32x4__ne:
2730
105
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntNE);
2731
105
        break;
2732
50
      case OpCode::I32x4__lt_s:
2733
50
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntSLT);
2734
50
        break;
2735
160
      case OpCode::I32x4__lt_u:
2736
160
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntULT);
2737
160
        break;
2738
112
      case OpCode::I32x4__gt_s:
2739
112
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntSGT);
2740
112
        break;
2741
220
      case OpCode::I32x4__gt_u:
2742
220
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntUGT);
2743
220
        break;
2744
299
      case OpCode::I32x4__le_s:
2745
299
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntSLE);
2746
299
        break;
2747
261
      case OpCode::I32x4__le_u:
2748
261
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntULE);
2749
261
        break;
2750
61
      case OpCode::I32x4__ge_s:
2751
61
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntSGE);
2752
61
        break;
2753
98
      case OpCode::I32x4__ge_u:
2754
98
        compileVectorCompareOp(Context.Int32x4Ty, LLVMIntUGE);
2755
98
        break;
2756
102
      case OpCode::I64x2__eq:
2757
102
        compileVectorCompareOp(Context.Int64x2Ty, LLVMIntEQ);
2758
102
        break;
2759
52
      case OpCode::I64x2__ne:
2760
52
        compileVectorCompareOp(Context.Int64x2Ty, LLVMIntNE);
2761
52
        break;
2762
50
      case OpCode::I64x2__lt_s:
2763
50
        compileVectorCompareOp(Context.Int64x2Ty, LLVMIntSLT);
2764
50
        break;
2765
127
      case OpCode::I64x2__gt_s:
2766
127
        compileVectorCompareOp(Context.Int64x2Ty, LLVMIntSGT);
2767
127
        break;
2768
34
      case OpCode::I64x2__le_s:
2769
34
        compileVectorCompareOp(Context.Int64x2Ty, LLVMIntSLE);
2770
34
        break;
2771
42
      case OpCode::I64x2__ge_s:
2772
42
        compileVectorCompareOp(Context.Int64x2Ty, LLVMIntSGE);
2773
42
        break;
2774
1.30k
      case OpCode::F32x4__eq:
2775
1.30k
        compileVectorCompareOp(Context.Floatx4Ty, LLVMRealOEQ,
2776
1.30k
                               Context.Int32x4Ty);
2777
1.30k
        break;
2778
38
      case OpCode::F32x4__ne:
2779
38
        compileVectorCompareOp(Context.Floatx4Ty, LLVMRealUNE,
2780
38
                               Context.Int32x4Ty);
2781
38
        break;
2782
887
      case OpCode::F32x4__lt:
2783
887
        compileVectorCompareOp(Context.Floatx4Ty, LLVMRealOLT,
2784
887
                               Context.Int32x4Ty);
2785
887
        break;
2786
74
      case OpCode::F32x4__gt:
2787
74
        compileVectorCompareOp(Context.Floatx4Ty, LLVMRealOGT,
2788
74
                               Context.Int32x4Ty);
2789
74
        break;
2790
335
      case OpCode::F32x4__le:
2791
335
        compileVectorCompareOp(Context.Floatx4Ty, LLVMRealOLE,
2792
335
                               Context.Int32x4Ty);
2793
335
        break;
2794
76
      case OpCode::F32x4__ge:
2795
76
        compileVectorCompareOp(Context.Floatx4Ty, LLVMRealOGE,
2796
76
                               Context.Int32x4Ty);
2797
76
        break;
2798
58
      case OpCode::F64x2__eq:
2799
58
        compileVectorCompareOp(Context.Doublex2Ty, LLVMRealOEQ,
2800
58
                               Context.Int64x2Ty);
2801
58
        break;
2802
116
      case OpCode::F64x2__ne:
2803
116
        compileVectorCompareOp(Context.Doublex2Ty, LLVMRealUNE,
2804
116
                               Context.Int64x2Ty);
2805
116
        break;
2806
134
      case OpCode::F64x2__lt:
2807
134
        compileVectorCompareOp(Context.Doublex2Ty, LLVMRealOLT,
2808
134
                               Context.Int64x2Ty);
2809
134
        break;
2810
57
      case OpCode::F64x2__gt:
2811
57
        compileVectorCompareOp(Context.Doublex2Ty, LLVMRealOGT,
2812
57
                               Context.Int64x2Ty);
2813
57
        break;
2814
188
      case OpCode::F64x2__le:
2815
188
        compileVectorCompareOp(Context.Doublex2Ty, LLVMRealOLE,
2816
188
                               Context.Int64x2Ty);
2817
188
        break;
2818
87
      case OpCode::F64x2__ge:
2819
87
        compileVectorCompareOp(Context.Doublex2Ty, LLVMRealOGE,
2820
87
                               Context.Int64x2Ty);
2821
87
        break;
2822
136
      case OpCode::V128__not:
2823
136
        Stack.back() = Builder.createNot(Stack.back());
2824
136
        break;
2825
74
      case OpCode::V128__and: {
2826
74
        auto RHS = stackPop();
2827
74
        auto LHS = stackPop();
2828
74
        stackPush(Builder.createAnd(LHS, RHS));
2829
74
        break;
2830
841
      }
2831
92
      case OpCode::V128__andnot: {
2832
92
        auto RHS = stackPop();
2833
92
        auto LHS = stackPop();
2834
92
        stackPush(Builder.createAnd(LHS, Builder.createNot(RHS)));
2835
92
        break;
2836
841
      }
2837
122
      case OpCode::V128__or: {
2838
122
        auto RHS = stackPop();
2839
122
        auto LHS = stackPop();
2840
122
        stackPush(Builder.createOr(LHS, RHS));
2841
122
        break;
2842
841
      }
2843
60
      case OpCode::V128__xor: {
2844
60
        auto RHS = stackPop();
2845
60
        auto LHS = stackPop();
2846
60
        stackPush(Builder.createXor(LHS, RHS));
2847
60
        break;
2848
841
      }
2849
126
      case OpCode::V128__bitselect: {
2850
126
        auto C = stackPop();
2851
126
        auto V2 = stackPop();
2852
126
        auto V1 = stackPop();
2853
126
        stackPush(Builder.createXor(
2854
126
            Builder.createAnd(Builder.createXor(V1, V2), C), V2));
2855
126
        break;
2856
841
      }
2857
107
      case OpCode::V128__any_true:
2858
107
        compileVectorAnyTrue();
2859
107
        break;
2860
829
      case OpCode::I8x16__abs:
2861
829
        compileVectorAbs(Context.Int8x16Ty);
2862
829
        break;
2863
1.44k
      case OpCode::I8x16__neg:
2864
1.44k
        compileVectorNeg(Context.Int8x16Ty);
2865
1.44k
        break;
2866
130
      case OpCode::I8x16__popcnt:
2867
130
        compileVectorPopcnt();
2868
130
        break;
2869
313
      case OpCode::I8x16__all_true:
2870
313
        compileVectorAllTrue(Context.Int8x16Ty);
2871
313
        break;
2872
487
      case OpCode::I8x16__bitmask:
2873
487
        compileVectorBitMask(Context.Int8x16Ty);
2874
487
        break;
2875
82
      case OpCode::I8x16__narrow_i16x8_s:
2876
82
        compileVectorNarrow(Context.Int16x8Ty, true);
2877
82
        break;
2878
177
      case OpCode::I8x16__narrow_i16x8_u:
2879
177
        compileVectorNarrow(Context.Int16x8Ty, false);
2880
177
        break;
2881
107
      case OpCode::I8x16__shl:
2882
107
        compileVectorShl(Context.Int8x16Ty);
2883
107
        break;
2884
1.04k
      case OpCode::I8x16__shr_s:
2885
1.04k
        compileVectorAShr(Context.Int8x16Ty);
2886
1.04k
        break;
2887
59
      case OpCode::I8x16__shr_u:
2888
59
        compileVectorLShr(Context.Int8x16Ty);
2889
59
        break;
2890
56
      case OpCode::I8x16__add:
2891
56
        compileVectorVectorAdd(Context.Int8x16Ty);
2892
56
        break;
2893
432
      case OpCode::I8x16__add_sat_s:
2894
432
        compileVectorVectorAddSat(Context.Int8x16Ty, true);
2895
432
        break;
2896
74
      case OpCode::I8x16__add_sat_u:
2897
74
        compileVectorVectorAddSat(Context.Int8x16Ty, false);
2898
74
        break;
2899
71
      case OpCode::I8x16__sub:
2900
71
        compileVectorVectorSub(Context.Int8x16Ty);
2901
71
        break;
2902
209
      case OpCode::I8x16__sub_sat_s:
2903
209
        compileVectorVectorSubSat(Context.Int8x16Ty, true);
2904
209
        break;
2905
74
      case OpCode::I8x16__sub_sat_u:
2906
74
        compileVectorVectorSubSat(Context.Int8x16Ty, false);
2907
74
        break;
2908
54
      case OpCode::I8x16__min_s:
2909
54
        compileVectorVectorSMin(Context.Int8x16Ty);
2910
54
        break;
2911
61
      case OpCode::I8x16__min_u:
2912
61
        compileVectorVectorUMin(Context.Int8x16Ty);
2913
61
        break;
2914
256
      case OpCode::I8x16__max_s:
2915
256
        compileVectorVectorSMax(Context.Int8x16Ty);
2916
256
        break;
2917
86
      case OpCode::I8x16__max_u:
2918
86
        compileVectorVectorUMax(Context.Int8x16Ty);
2919
86
        break;
2920
116
      case OpCode::I8x16__avgr_u:
2921
116
        compileVectorVectorUAvgr(Context.Int8x16Ty);
2922
116
        break;
2923
208
      case OpCode::I16x8__abs:
2924
208
        compileVectorAbs(Context.Int16x8Ty);
2925
208
        break;
2926
238
      case OpCode::I16x8__neg:
2927
238
        compileVectorNeg(Context.Int16x8Ty);
2928
238
        break;
2929
110
      case OpCode::I16x8__all_true:
2930
110
        compileVectorAllTrue(Context.Int16x8Ty);
2931
110
        break;
2932
104
      case OpCode::I16x8__bitmask:
2933
104
        compileVectorBitMask(Context.Int16x8Ty);
2934
104
        break;
2935
46
      case OpCode::I16x8__narrow_i32x4_s:
2936
46
        compileVectorNarrow(Context.Int32x4Ty, true);
2937
46
        break;
2938
378
      case OpCode::I16x8__narrow_i32x4_u:
2939
378
        compileVectorNarrow(Context.Int32x4Ty, false);
2940
378
        break;
2941
850
      case OpCode::I16x8__extend_low_i8x16_s:
2942
850
        compileVectorExtend(Context.Int8x16Ty, true, true);
2943
850
        break;
2944
58
      case OpCode::I16x8__extend_high_i8x16_s:
2945
58
        compileVectorExtend(Context.Int8x16Ty, true, false);
2946
58
        break;
2947
472
      case OpCode::I16x8__extend_low_i8x16_u:
2948
472
        compileVectorExtend(Context.Int8x16Ty, false, true);
2949
472
        break;
2950
12
      case OpCode::I16x8__extend_high_i8x16_u:
2951
12
        compileVectorExtend(Context.Int8x16Ty, false, false);
2952
12
        break;
2953
76
      case OpCode::I16x8__shl:
2954
76
        compileVectorShl(Context.Int16x8Ty);
2955
76
        break;
2956
253
      case OpCode::I16x8__shr_s:
2957
253
        compileVectorAShr(Context.Int16x8Ty);
2958
253
        break;
2959
52
      case OpCode::I16x8__shr_u:
2960
52
        compileVectorLShr(Context.Int16x8Ty);
2961
52
        break;
2962
102
      case OpCode::I16x8__add:
2963
102
        compileVectorVectorAdd(Context.Int16x8Ty);
2964
102
        break;
2965
20
      case OpCode::I16x8__add_sat_s:
2966
20
        compileVectorVectorAddSat(Context.Int16x8Ty, true);
2967
20
        break;
2968
649
      case OpCode::I16x8__add_sat_u:
2969
649
        compileVectorVectorAddSat(Context.Int16x8Ty, false);
2970
649
        break;
2971
367
      case OpCode::I16x8__sub:
2972
367
        compileVectorVectorSub(Context.Int16x8Ty);
2973
367
        break;
2974
21
      case OpCode::I16x8__sub_sat_s:
2975
21
        compileVectorVectorSubSat(Context.Int16x8Ty, true);
2976
21
        break;
2977
67
      case OpCode::I16x8__sub_sat_u:
2978
67
        compileVectorVectorSubSat(Context.Int16x8Ty, false);
2979
67
        break;
2980
111
      case OpCode::I16x8__mul:
2981
111
        compileVectorVectorMul(Context.Int16x8Ty);
2982
111
        break;
2983
112
      case OpCode::I16x8__min_s:
2984
112
        compileVectorVectorSMin(Context.Int16x8Ty);
2985
112
        break;
2986
124
      case OpCode::I16x8__min_u:
2987
124
        compileVectorVectorUMin(Context.Int16x8Ty);
2988
124
        break;
2989
79
      case OpCode::I16x8__max_s:
2990
79
        compileVectorVectorSMax(Context.Int16x8Ty);
2991
79
        break;
2992
576
      case OpCode::I16x8__max_u:
2993
576
        compileVectorVectorUMax(Context.Int16x8Ty);
2994
576
        break;
2995
104
      case OpCode::I16x8__avgr_u:
2996
104
        compileVectorVectorUAvgr(Context.Int16x8Ty);
2997
104
        break;
2998
69
      case OpCode::I16x8__extmul_low_i8x16_s:
2999
69
        compileVectorExtMul(Context.Int8x16Ty, true, true);
3000
69
        break;
3001
205
      case OpCode::I16x8__extmul_high_i8x16_s:
3002
205
        compileVectorExtMul(Context.Int8x16Ty, true, false);
3003
205
        break;
3004
113
      case OpCode::I16x8__extmul_low_i8x16_u:
3005
113
        compileVectorExtMul(Context.Int8x16Ty, false, true);
3006
113
        break;
3007
507
      case OpCode::I16x8__extmul_high_i8x16_u:
3008
507
        compileVectorExtMul(Context.Int8x16Ty, false, false);
3009
507
        break;
3010
134
      case OpCode::I16x8__q15mulr_sat_s:
3011
134
        compileVectorVectorQ15MulSat();
3012
134
        break;
3013
303
      case OpCode::I16x8__extadd_pairwise_i8x16_s:
3014
303
        compileVectorExtAddPairwise(Context.Int8x16Ty, true);
3015
303
        break;
3016
329
      case OpCode::I16x8__extadd_pairwise_i8x16_u:
3017
329
        compileVectorExtAddPairwise(Context.Int8x16Ty, false);
3018
329
        break;
3019
59
      case OpCode::I32x4__abs:
3020
59
        compileVectorAbs(Context.Int32x4Ty);
3021
59
        break;
3022
189
      case OpCode::I32x4__neg:
3023
189
        compileVectorNeg(Context.Int32x4Ty);
3024
189
        break;
3025
176
      case OpCode::I32x4__all_true:
3026
176
        compileVectorAllTrue(Context.Int32x4Ty);
3027
176
        break;
3028
86
      case OpCode::I32x4__bitmask:
3029
86
        compileVectorBitMask(Context.Int32x4Ty);
3030
86
        break;
3031
113
      case OpCode::I32x4__extend_low_i16x8_s:
3032
113
        compileVectorExtend(Context.Int16x8Ty, true, true);
3033
113
        break;
3034
509
      case OpCode::I32x4__extend_high_i16x8_s:
3035
509
        compileVectorExtend(Context.Int16x8Ty, true, false);
3036
509
        break;
3037
1.89k
      case OpCode::I32x4__extend_low_i16x8_u:
3038
1.89k
        compileVectorExtend(Context.Int16x8Ty, false, true);
3039
1.89k
        break;
3040
138
      case OpCode::I32x4__extend_high_i16x8_u:
3041
138
        compileVectorExtend(Context.Int16x8Ty, false, false);
3042
138
        break;
3043
968
      case OpCode::I32x4__shl:
3044
968
        compileVectorShl(Context.Int32x4Ty);
3045
968
        break;
3046
172
      case OpCode::I32x4__shr_s:
3047
172
        compileVectorAShr(Context.Int32x4Ty);
3048
172
        break;
3049
97
      case OpCode::I32x4__shr_u:
3050
97
        compileVectorLShr(Context.Int32x4Ty);
3051
97
        break;
3052
106
      case OpCode::I32x4__add:
3053
106
        compileVectorVectorAdd(Context.Int32x4Ty);
3054
106
        break;
3055
156
      case OpCode::I32x4__sub:
3056
156
        compileVectorVectorSub(Context.Int32x4Ty);
3057
156
        break;
3058
239
      case OpCode::I32x4__mul:
3059
239
        compileVectorVectorMul(Context.Int32x4Ty);
3060
239
        break;
3061
97
      case OpCode::I32x4__min_s:
3062
97
        compileVectorVectorSMin(Context.Int32x4Ty);
3063
97
        break;
3064
59
      case OpCode::I32x4__min_u:
3065
59
        compileVectorVectorUMin(Context.Int32x4Ty);
3066
59
        break;
3067
62
      case OpCode::I32x4__max_s:
3068
62
        compileVectorVectorSMax(Context.Int32x4Ty);
3069
62
        break;
3070
92
      case OpCode::I32x4__max_u:
3071
92
        compileVectorVectorUMax(Context.Int32x4Ty);
3072
92
        break;
3073
103
      case OpCode::I32x4__extmul_low_i16x8_s:
3074
103
        compileVectorExtMul(Context.Int16x8Ty, true, true);
3075
103
        break;
3076
48
      case OpCode::I32x4__extmul_high_i16x8_s:
3077
48
        compileVectorExtMul(Context.Int16x8Ty, true, false);
3078
48
        break;
3079
222
      case OpCode::I32x4__extmul_low_i16x8_u:
3080
222
        compileVectorExtMul(Context.Int16x8Ty, false, true);
3081
222
        break;
3082
43
      case OpCode::I32x4__extmul_high_i16x8_u:
3083
43
        compileVectorExtMul(Context.Int16x8Ty, false, false);
3084
43
        break;
3085
1.12k
      case OpCode::I32x4__extadd_pairwise_i16x8_s:
3086
1.12k
        compileVectorExtAddPairwise(Context.Int16x8Ty, true);
3087
1.12k
        break;
3088
369
      case OpCode::I32x4__extadd_pairwise_i16x8_u:
3089
369
        compileVectorExtAddPairwise(Context.Int16x8Ty, false);
3090
369
        break;
3091
114
      case OpCode::I32x4__dot_i16x8_s: {
3092
114
        auto ExtendTy = Context.Int16x8Ty.getExtendedElementVectorType();
3093
114
        auto Undef = LLVM::Value::getUndef(ExtendTy);
3094
114
        auto LHS = Builder.createSExt(
3095
114
            Builder.createBitCast(stackPop(), Context.Int16x8Ty), ExtendTy);
3096
114
        auto RHS = Builder.createSExt(
3097
114
            Builder.createBitCast(stackPop(), Context.Int16x8Ty), ExtendTy);
3098
114
        auto M = Builder.createMul(LHS, RHS);
3099
114
        auto L = Builder.createShuffleVector(
3100
114
            M, Undef,
3101
114
            LLVM::Value::getConstVector32(LLContext, {0U, 2U, 4U, 6U}));
3102
114
        auto R = Builder.createShuffleVector(
3103
114
            M, Undef,
3104
114
            LLVM::Value::getConstVector32(LLContext, {1U, 3U, 5U, 7U}));
3105
114
        auto V = Builder.createAdd(L, R);
3106
114
        stackPush(Builder.createBitCast(V, Context.Int64x2Ty));
3107
114
        break;
3108
841
      }
3109
882
      case OpCode::I64x2__abs:
3110
882
        compileVectorAbs(Context.Int64x2Ty);
3111
882
        break;
3112
528
      case OpCode::I64x2__neg:
3113
528
        compileVectorNeg(Context.Int64x2Ty);
3114
528
        break;
3115
275
      case OpCode::I64x2__all_true:
3116
275
        compileVectorAllTrue(Context.Int64x2Ty);
3117
275
        break;
3118
229
      case OpCode::I64x2__bitmask:
3119
229
        compileVectorBitMask(Context.Int64x2Ty);
3120
229
        break;
3121
114
      case OpCode::I64x2__extend_low_i32x4_s:
3122
114
        compileVectorExtend(Context.Int32x4Ty, true, true);
3123
114
        break;
3124
682
      case OpCode::I64x2__extend_high_i32x4_s:
3125
682
        compileVectorExtend(Context.Int32x4Ty, true, false);
3126
682
        break;
3127
187
      case OpCode::I64x2__extend_low_i32x4_u:
3128
187
        compileVectorExtend(Context.Int32x4Ty, false, true);
3129
187
        break;
3130
504
      case OpCode::I64x2__extend_high_i32x4_u:
3131
504
        compileVectorExtend(Context.Int32x4Ty, false, false);
3132
504
        break;
3133
86
      case OpCode::I64x2__shl:
3134
86
        compileVectorShl(Context.Int64x2Ty);
3135
86
        break;
3136
272
      case OpCode::I64x2__shr_s:
3137
272
        compileVectorAShr(Context.Int64x2Ty);
3138
272
        break;
3139
60
      case OpCode::I64x2__shr_u:
3140
60
        compileVectorLShr(Context.Int64x2Ty);
3141
60
        break;
3142
35
      case OpCode::I64x2__add:
3143
35
        compileVectorVectorAdd(Context.Int64x2Ty);
3144
35
        break;
3145
233
      case OpCode::I64x2__sub:
3146
233
        compileVectorVectorSub(Context.Int64x2Ty);
3147
233
        break;
3148
74
      case OpCode::I64x2__mul:
3149
74
        compileVectorVectorMul(Context.Int64x2Ty);
3150
74
        break;
3151
41
      case OpCode::I64x2__extmul_low_i32x4_s:
3152
41
        compileVectorExtMul(Context.Int32x4Ty, true, true);
3153
41
        break;
3154
283
      case OpCode::I64x2__extmul_high_i32x4_s:
3155
283
        compileVectorExtMul(Context.Int32x4Ty, true, false);
3156
283
        break;
3157
34
      case OpCode::I64x2__extmul_low_i32x4_u:
3158
34
        compileVectorExtMul(Context.Int32x4Ty, false, true);
3159
34
        break;
3160
118
      case OpCode::I64x2__extmul_high_i32x4_u:
3161
118
        compileVectorExtMul(Context.Int32x4Ty, false, false);
3162
118
        break;
3163
117
      case OpCode::F32x4__abs:
3164
117
        compileVectorFAbs(Context.Floatx4Ty);
3165
117
        break;
3166
141
      case OpCode::F32x4__neg:
3167
141
        compileVectorFNeg(Context.Floatx4Ty);
3168
141
        break;
3169
201
      case OpCode::F32x4__sqrt:
3170
201
        compileVectorFSqrt(Context.Floatx4Ty);
3171
201
        break;
3172
127
      case OpCode::F32x4__add:
3173
127
        compileVectorVectorFAdd(Context.Floatx4Ty);
3174
127
        break;
3175
238
      case OpCode::F32x4__sub:
3176
238
        compileVectorVectorFSub(Context.Floatx4Ty);
3177
238
        break;
3178
40
      case OpCode::F32x4__mul:
3179
40
        compileVectorVectorFMul(Context.Floatx4Ty);
3180
40
        break;
3181
187
      case OpCode::F32x4__div:
3182
187
        compileVectorVectorFDiv(Context.Floatx4Ty);
3183
187
        break;
3184
123
      case OpCode::F32x4__min:
3185
123
        compileVectorVectorFMin(Context.Floatx4Ty);
3186
123
        break;
3187
36
      case OpCode::F32x4__max:
3188
36
        compileVectorVectorFMax(Context.Floatx4Ty);
3189
36
        break;
3190
54
      case OpCode::F32x4__pmin:
3191
54
        compileVectorVectorFPMin(Context.Floatx4Ty);
3192
54
        break;
3193
243
      case OpCode::F32x4__pmax:
3194
243
        compileVectorVectorFPMax(Context.Floatx4Ty);
3195
243
        break;
3196
740
      case OpCode::F32x4__ceil:
3197
740
        compileVectorFCeil(Context.Floatx4Ty);
3198
740
        break;
3199
1.45k
      case OpCode::F32x4__floor:
3200
1.45k
        compileVectorFFloor(Context.Floatx4Ty);
3201
1.45k
        break;
3202
1.51k
      case OpCode::F32x4__trunc:
3203
1.51k
        compileVectorFTrunc(Context.Floatx4Ty);
3204
1.51k
        break;
3205
202
      case OpCode::F32x4__nearest:
3206
202
        compileVectorFNearest(Context.Floatx4Ty);
3207
202
        break;
3208
441
      case OpCode::F64x2__abs:
3209
441
        compileVectorFAbs(Context.Doublex2Ty);
3210
441
        break;
3211
797
      case OpCode::F64x2__neg:
3212
797
        compileVectorFNeg(Context.Doublex2Ty);
3213
797
        break;
3214
100
      case OpCode::F64x2__sqrt:
3215
100
        compileVectorFSqrt(Context.Doublex2Ty);
3216
100
        break;
3217
52
      case OpCode::F64x2__add:
3218
52
        compileVectorVectorFAdd(Context.Doublex2Ty);
3219
52
        break;
3220
211
      case OpCode::F64x2__sub:
3221
211
        compileVectorVectorFSub(Context.Doublex2Ty);
3222
211
        break;
3223
140
      case OpCode::F64x2__mul:
3224
140
        compileVectorVectorFMul(Context.Doublex2Ty);
3225
140
        break;
3226
37
      case OpCode::F64x2__div:
3227
37
        compileVectorVectorFDiv(Context.Doublex2Ty);
3228
37
        break;
3229
162
      case OpCode::F64x2__min:
3230
162
        compileVectorVectorFMin(Context.Doublex2Ty);
3231
162
        break;
3232
160
      case OpCode::F64x2__max:
3233
160
        compileVectorVectorFMax(Context.Doublex2Ty);
3234
160
        break;
3235
244
      case OpCode::F64x2__pmin:
3236
244
        compileVectorVectorFPMin(Context.Doublex2Ty);
3237
244
        break;
3238
68
      case OpCode::F64x2__pmax:
3239
68
        compileVectorVectorFPMax(Context.Doublex2Ty);
3240
68
        break;
3241
521
      case OpCode::F64x2__ceil:
3242
521
        compileVectorFCeil(Context.Doublex2Ty);
3243
521
        break;
3244
629
      case OpCode::F64x2__floor:
3245
629
        compileVectorFFloor(Context.Doublex2Ty);
3246
629
        break;
3247
117
      case OpCode::F64x2__trunc:
3248
117
        compileVectorFTrunc(Context.Doublex2Ty);
3249
117
        break;
3250
152
      case OpCode::F64x2__nearest:
3251
152
        compileVectorFNearest(Context.Doublex2Ty);
3252
152
        break;
3253
161
      case OpCode::I32x4__trunc_sat_f32x4_s:
3254
161
        compileVectorTruncSatS32(Context.Floatx4Ty, false);
3255
161
        break;
3256
3.68k
      case OpCode::I32x4__trunc_sat_f32x4_u:
3257
3.68k
        compileVectorTruncSatU32(Context.Floatx4Ty, false);
3258
3.68k
        break;
3259
315
      case OpCode::F32x4__convert_i32x4_s:
3260
315
        compileVectorConvertS(Context.Int32x4Ty, Context.Floatx4Ty, false);
3261
315
        break;
3262
699
      case OpCode::F32x4__convert_i32x4_u:
3263
699
        compileVectorConvertU(Context.Int32x4Ty, Context.Floatx4Ty, false);
3264
699
        break;
3265
744
      case OpCode::I32x4__trunc_sat_f64x2_s_zero:
3266
744
        compileVectorTruncSatS32(Context.Doublex2Ty, true);
3267
744
        break;
3268
2.10k
      case OpCode::I32x4__trunc_sat_f64x2_u_zero:
3269
2.10k
        compileVectorTruncSatU32(Context.Doublex2Ty, true);
3270
2.10k
        break;
3271
352
      case OpCode::F64x2__convert_low_i32x4_s:
3272
352
        compileVectorConvertS(Context.Int32x4Ty, Context.Doublex2Ty, true);
3273
352
        break;
3274
1.21k
      case OpCode::F64x2__convert_low_i32x4_u:
3275
1.21k
        compileVectorConvertU(Context.Int32x4Ty, Context.Doublex2Ty, true);
3276
1.21k
        break;
3277
567
      case OpCode::F32x4__demote_f64x2_zero:
3278
567
        compileVectorDemote();
3279
567
        break;
3280
554
      case OpCode::F64x2__promote_low_f32x4:
3281
554
        compileVectorPromote();
3282
554
        break;
3283
3284
      // Relaxed SIMD Instructions
3285
0
      case OpCode::I8x16__relaxed_swizzle:
3286
0
        compileVectorSwizzle();
3287
0
        break;
3288
0
      case OpCode::I32x4__relaxed_trunc_f32x4_s:
3289
0
        compileVectorTruncSatS32(Context.Floatx4Ty, false);
3290
0
        break;
3291
0
      case OpCode::I32x4__relaxed_trunc_f32x4_u:
3292
0
        compileVectorTruncSatU32(Context.Floatx4Ty, false);
3293
0
        break;
3294
0
      case OpCode::I32x4__relaxed_trunc_f64x2_s_zero:
3295
0
        compileVectorTruncSatS32(Context.Doublex2Ty, true);
3296
0
        break;
3297
0
      case OpCode::I32x4__relaxed_trunc_f64x2_u_zero:
3298
0
        compileVectorTruncSatU32(Context.Doublex2Ty, true);
3299
0
        break;
3300
0
      case OpCode::F32x4__relaxed_madd:
3301
0
        compileVectorVectorMAdd(Context.Floatx4Ty);
3302
0
        break;
3303
0
      case OpCode::F32x4__relaxed_nmadd:
3304
0
        compileVectorVectorNMAdd(Context.Floatx4Ty);
3305
0
        break;
3306
0
      case OpCode::F64x2__relaxed_madd:
3307
0
        compileVectorVectorMAdd(Context.Doublex2Ty);
3308
0
        break;
3309
0
      case OpCode::F64x2__relaxed_nmadd:
3310
0
        compileVectorVectorNMAdd(Context.Doublex2Ty);
3311
0
        break;
3312
0
      case OpCode::I8x16__relaxed_laneselect:
3313
0
      case OpCode::I16x8__relaxed_laneselect:
3314
0
      case OpCode::I32x4__relaxed_laneselect:
3315
0
      case OpCode::I64x2__relaxed_laneselect: {
3316
0
        auto C = stackPop();
3317
0
        auto V2 = stackPop();
3318
0
        auto V1 = stackPop();
3319
0
        stackPush(Builder.createXor(
3320
0
            Builder.createAnd(Builder.createXor(V1, V2), C), V2));
3321
0
        break;
3322
0
      }
3323
0
      case OpCode::F32x4__relaxed_min:
3324
0
        compileVectorVectorFMin(Context.Floatx4Ty);
3325
0
        break;
3326
0
      case OpCode::F32x4__relaxed_max:
3327
0
        compileVectorVectorFMax(Context.Floatx4Ty);
3328
0
        break;
3329
0
      case OpCode::F64x2__relaxed_min:
3330
0
        compileVectorVectorFMin(Context.Doublex2Ty);
3331
0
        break;
3332
0
      case OpCode::F64x2__relaxed_max:
3333
0
        compileVectorVectorFMax(Context.Doublex2Ty);
3334
0
        break;
3335
0
      case OpCode::I16x8__relaxed_q15mulr_s:
3336
0
        compileVectorVectorQ15MulSat();
3337
0
        break;
3338
0
      case OpCode::I16x8__relaxed_dot_i8x16_i7x16_s:
3339
0
        compileVectorRelaxedIntegerDotProduct();
3340
0
        break;
3341
0
      case OpCode::I32x4__relaxed_dot_i8x16_i7x16_add_s:
3342
0
        compileVectorRelaxedIntegerDotProductAdd();
3343
0
        break;
3344
3345
      // Atomic Instructions
3346
188
      case OpCode::Atomic__fence:
3347
188
        return compileMemoryFence();
3348
33
      case OpCode::Memory__atomic__notify:
3349
33
        return compileAtomicNotify(Instr.getTargetIndex(),
3350
33
                                   Instr.getMemoryOffset());
3351
5
      case OpCode::Memory__atomic__wait32:
3352
5
        return compileAtomicWait(Instr.getTargetIndex(),
3353
5
                                 Instr.getMemoryOffset(), Context.Int32Ty, 32);
3354
2
      case OpCode::Memory__atomic__wait64:
3355
2
        return compileAtomicWait(Instr.getTargetIndex(),
3356
2
                                 Instr.getMemoryOffset(), Context.Int64Ty, 64);
3357
0
      case OpCode::I32__atomic__load:
3358
0
        return compileAtomicLoad(
3359
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3360
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int32Ty, true);
3361
0
      case OpCode::I64__atomic__load:
3362
0
        return compileAtomicLoad(
3363
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3364
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int64Ty, true);
3365
0
      case OpCode::I32__atomic__load8_u:
3366
0
        return compileAtomicLoad(
3367
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3368
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int8Ty);
3369
0
      case OpCode::I32__atomic__load16_u:
3370
0
        return compileAtomicLoad(
3371
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3372
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int16Ty);
3373
0
      case OpCode::I64__atomic__load8_u:
3374
0
        return compileAtomicLoad(
3375
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3376
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int8Ty);
3377
0
      case OpCode::I64__atomic__load16_u:
3378
0
        return compileAtomicLoad(
3379
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3380
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int16Ty);
3381
0
      case OpCode::I64__atomic__load32_u:
3382
0
        return compileAtomicLoad(
3383
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3384
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int32Ty);
3385
0
      case OpCode::I32__atomic__store:
3386
0
        return compileAtomicStore(
3387
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3388
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int32Ty, true);
3389
0
      case OpCode::I64__atomic__store:
3390
0
        return compileAtomicStore(
3391
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3392
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int64Ty, true);
3393
0
      case OpCode::I32__atomic__store8:
3394
0
        return compileAtomicStore(
3395
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3396
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int8Ty, true);
3397
0
      case OpCode::I32__atomic__store16:
3398
0
        return compileAtomicStore(
3399
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3400
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int16Ty, true);
3401
0
      case OpCode::I64__atomic__store8:
3402
0
        return compileAtomicStore(
3403
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3404
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int8Ty, true);
3405
0
      case OpCode::I64__atomic__store16:
3406
0
        return compileAtomicStore(
3407
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3408
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int16Ty, true);
3409
0
      case OpCode::I64__atomic__store32:
3410
0
        return compileAtomicStore(
3411
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3412
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int32Ty, true);
3413
0
      case OpCode::I32__atomic__rmw__add:
3414
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3415
0
                                  Instr.getMemoryOffset(),
3416
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAdd,
3417
0
                                  Context.Int32Ty, Context.Int32Ty, true);
3418
0
      case OpCode::I64__atomic__rmw__add:
3419
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3420
0
                                  Instr.getMemoryOffset(),
3421
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAdd,
3422
0
                                  Context.Int64Ty, Context.Int64Ty, true);
3423
0
      case OpCode::I32__atomic__rmw8__add_u:
3424
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3425
0
                                  Instr.getMemoryOffset(),
3426
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAdd,
3427
0
                                  Context.Int32Ty, Context.Int8Ty);
3428
0
      case OpCode::I32__atomic__rmw16__add_u:
3429
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3430
0
                                  Instr.getMemoryOffset(),
3431
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAdd,
3432
0
                                  Context.Int32Ty, Context.Int16Ty);
3433
0
      case OpCode::I64__atomic__rmw8__add_u:
3434
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3435
0
                                  Instr.getMemoryOffset(),
3436
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAdd,
3437
0
                                  Context.Int64Ty, Context.Int8Ty);
3438
0
      case OpCode::I64__atomic__rmw16__add_u:
3439
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3440
0
                                  Instr.getMemoryOffset(),
3441
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAdd,
3442
0
                                  Context.Int64Ty, Context.Int16Ty);
3443
0
      case OpCode::I64__atomic__rmw32__add_u:
3444
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3445
0
                                  Instr.getMemoryOffset(),
3446
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAdd,
3447
0
                                  Context.Int64Ty, Context.Int32Ty);
3448
0
      case OpCode::I32__atomic__rmw__sub:
3449
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3450
0
                                  Instr.getMemoryOffset(),
3451
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpSub,
3452
0
                                  Context.Int32Ty, Context.Int32Ty, true);
3453
0
      case OpCode::I64__atomic__rmw__sub:
3454
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3455
0
                                  Instr.getMemoryOffset(),
3456
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpSub,
3457
0
                                  Context.Int64Ty, Context.Int64Ty, true);
3458
0
      case OpCode::I32__atomic__rmw8__sub_u:
3459
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3460
0
                                  Instr.getMemoryOffset(),
3461
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpSub,
3462
0
                                  Context.Int32Ty, Context.Int8Ty);
3463
0
      case OpCode::I32__atomic__rmw16__sub_u:
3464
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3465
0
                                  Instr.getMemoryOffset(),
3466
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpSub,
3467
0
                                  Context.Int32Ty, Context.Int16Ty);
3468
0
      case OpCode::I64__atomic__rmw8__sub_u:
3469
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3470
0
                                  Instr.getMemoryOffset(),
3471
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpSub,
3472
0
                                  Context.Int64Ty, Context.Int8Ty);
3473
0
      case OpCode::I64__atomic__rmw16__sub_u:
3474
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3475
0
                                  Instr.getMemoryOffset(),
3476
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpSub,
3477
0
                                  Context.Int64Ty, Context.Int16Ty);
3478
0
      case OpCode::I64__atomic__rmw32__sub_u:
3479
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3480
0
                                  Instr.getMemoryOffset(),
3481
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpSub,
3482
0
                                  Context.Int64Ty, Context.Int32Ty);
3483
0
      case OpCode::I32__atomic__rmw__and:
3484
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3485
0
                                  Instr.getMemoryOffset(),
3486
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAnd,
3487
0
                                  Context.Int32Ty, Context.Int32Ty, true);
3488
0
      case OpCode::I64__atomic__rmw__and:
3489
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3490
0
                                  Instr.getMemoryOffset(),
3491
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAnd,
3492
0
                                  Context.Int64Ty, Context.Int64Ty, true);
3493
0
      case OpCode::I32__atomic__rmw8__and_u:
3494
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3495
0
                                  Instr.getMemoryOffset(),
3496
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAnd,
3497
0
                                  Context.Int32Ty, Context.Int8Ty);
3498
0
      case OpCode::I32__atomic__rmw16__and_u:
3499
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3500
0
                                  Instr.getMemoryOffset(),
3501
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAnd,
3502
0
                                  Context.Int32Ty, Context.Int16Ty);
3503
0
      case OpCode::I64__atomic__rmw8__and_u:
3504
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3505
0
                                  Instr.getMemoryOffset(),
3506
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAnd,
3507
0
                                  Context.Int64Ty, Context.Int8Ty);
3508
0
      case OpCode::I64__atomic__rmw16__and_u:
3509
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3510
0
                                  Instr.getMemoryOffset(),
3511
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAnd,
3512
0
                                  Context.Int64Ty, Context.Int16Ty);
3513
0
      case OpCode::I64__atomic__rmw32__and_u:
3514
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3515
0
                                  Instr.getMemoryOffset(),
3516
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpAnd,
3517
0
                                  Context.Int64Ty, Context.Int32Ty);
3518
0
      case OpCode::I32__atomic__rmw__or:
3519
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3520
0
                                  Instr.getMemoryOffset(),
3521
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpOr,
3522
0
                                  Context.Int32Ty, Context.Int32Ty, true);
3523
0
      case OpCode::I64__atomic__rmw__or:
3524
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3525
0
                                  Instr.getMemoryOffset(),
3526
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpOr,
3527
0
                                  Context.Int64Ty, Context.Int64Ty, true);
3528
0
      case OpCode::I32__atomic__rmw8__or_u:
3529
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3530
0
                                  Instr.getMemoryOffset(),
3531
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpOr,
3532
0
                                  Context.Int32Ty, Context.Int8Ty);
3533
0
      case OpCode::I32__atomic__rmw16__or_u:
3534
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3535
0
                                  Instr.getMemoryOffset(),
3536
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpOr,
3537
0
                                  Context.Int32Ty, Context.Int16Ty);
3538
0
      case OpCode::I64__atomic__rmw8__or_u:
3539
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3540
0
                                  Instr.getMemoryOffset(),
3541
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpOr,
3542
0
                                  Context.Int64Ty, Context.Int8Ty);
3543
0
      case OpCode::I64__atomic__rmw16__or_u:
3544
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3545
0
                                  Instr.getMemoryOffset(),
3546
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpOr,
3547
0
                                  Context.Int64Ty, Context.Int16Ty);
3548
0
      case OpCode::I64__atomic__rmw32__or_u:
3549
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3550
0
                                  Instr.getMemoryOffset(),
3551
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpOr,
3552
0
                                  Context.Int64Ty, Context.Int32Ty);
3553
0
      case OpCode::I32__atomic__rmw__xor:
3554
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3555
0
                                  Instr.getMemoryOffset(),
3556
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXor,
3557
0
                                  Context.Int32Ty, Context.Int32Ty, true);
3558
0
      case OpCode::I64__atomic__rmw__xor:
3559
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3560
0
                                  Instr.getMemoryOffset(),
3561
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXor,
3562
0
                                  Context.Int64Ty, Context.Int64Ty, true);
3563
0
      case OpCode::I32__atomic__rmw8__xor_u:
3564
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3565
0
                                  Instr.getMemoryOffset(),
3566
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXor,
3567
0
                                  Context.Int32Ty, Context.Int8Ty);
3568
0
      case OpCode::I32__atomic__rmw16__xor_u:
3569
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3570
0
                                  Instr.getMemoryOffset(),
3571
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXor,
3572
0
                                  Context.Int32Ty, Context.Int16Ty);
3573
0
      case OpCode::I64__atomic__rmw8__xor_u:
3574
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3575
0
                                  Instr.getMemoryOffset(),
3576
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXor,
3577
0
                                  Context.Int64Ty, Context.Int8Ty);
3578
0
      case OpCode::I64__atomic__rmw16__xor_u:
3579
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3580
0
                                  Instr.getMemoryOffset(),
3581
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXor,
3582
0
                                  Context.Int64Ty, Context.Int16Ty);
3583
0
      case OpCode::I64__atomic__rmw32__xor_u:
3584
0
        return compileAtomicRMWOp(Instr.getTargetIndex(),
3585
0
                                  Instr.getMemoryOffset(),
3586
0
                                  Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXor,
3587
0
                                  Context.Int64Ty, Context.Int32Ty);
3588
0
      case OpCode::I32__atomic__rmw__xchg:
3589
0
        return compileAtomicRMWOp(
3590
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3591
0
            Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXchg, Context.Int32Ty,
3592
0
            Context.Int32Ty, true);
3593
0
      case OpCode::I64__atomic__rmw__xchg:
3594
0
        return compileAtomicRMWOp(
3595
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3596
0
            Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXchg, Context.Int64Ty,
3597
0
            Context.Int64Ty, true);
3598
0
      case OpCode::I32__atomic__rmw8__xchg_u:
3599
0
        return compileAtomicRMWOp(
3600
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3601
0
            Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXchg, Context.Int32Ty,
3602
0
            Context.Int8Ty);
3603
0
      case OpCode::I32__atomic__rmw16__xchg_u:
3604
0
        return compileAtomicRMWOp(
3605
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3606
0
            Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXchg, Context.Int32Ty,
3607
0
            Context.Int16Ty);
3608
0
      case OpCode::I64__atomic__rmw8__xchg_u:
3609
0
        return compileAtomicRMWOp(
3610
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3611
0
            Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXchg, Context.Int64Ty,
3612
0
            Context.Int8Ty);
3613
0
      case OpCode::I64__atomic__rmw16__xchg_u:
3614
0
        return compileAtomicRMWOp(
3615
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3616
0
            Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXchg, Context.Int64Ty,
3617
0
            Context.Int16Ty);
3618
0
      case OpCode::I64__atomic__rmw32__xchg_u:
3619
0
        return compileAtomicRMWOp(
3620
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3621
0
            Instr.getMemoryAlign(), LLVMAtomicRMWBinOpXchg, Context.Int64Ty,
3622
0
            Context.Int32Ty);
3623
0
      case OpCode::I32__atomic__rmw__cmpxchg:
3624
0
        return compileAtomicCompareExchange(
3625
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3626
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int32Ty, true);
3627
0
      case OpCode::I64__atomic__rmw__cmpxchg:
3628
0
        return compileAtomicCompareExchange(
3629
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3630
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int64Ty, true);
3631
0
      case OpCode::I32__atomic__rmw8__cmpxchg_u:
3632
0
        return compileAtomicCompareExchange(
3633
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3634
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int8Ty);
3635
0
      case OpCode::I32__atomic__rmw16__cmpxchg_u:
3636
0
        return compileAtomicCompareExchange(
3637
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3638
0
            Instr.getMemoryAlign(), Context.Int32Ty, Context.Int16Ty);
3639
0
      case OpCode::I64__atomic__rmw8__cmpxchg_u:
3640
0
        return compileAtomicCompareExchange(
3641
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3642
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int8Ty);
3643
0
      case OpCode::I64__atomic__rmw16__cmpxchg_u:
3644
0
        return compileAtomicCompareExchange(
3645
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3646
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int16Ty);
3647
0
      case OpCode::I64__atomic__rmw32__cmpxchg_u:
3648
0
        return compileAtomicCompareExchange(
3649
0
            Instr.getTargetIndex(), Instr.getMemoryOffset(),
3650
0
            Instr.getMemoryAlign(), Context.Int64Ty, Context.Int32Ty);
3651
3652
0
      default:
3653
0
        assumingUnreachable();
3654
945k
      }
3655
945k
      return;
3656
945k
    };
3657
1.40M
    for (const auto &Instr : Instrs) {
3658
      // Update instruction count
3659
1.40M
      if (LocalInstrCount) {
3660
0
        Builder.createStore(
3661
0
            Builder.createAdd(
3662
0
                Builder.createLoad(Context.Int64Ty, LocalInstrCount),
3663
0
                LLContext.getInt64(1)),
3664
0
            LocalInstrCount);
3665
0
      }
3666
1.40M
      if (LocalGas) {
3667
0
        auto NewGas = Builder.createAdd(
3668
0
            Builder.createLoad(Context.Int64Ty, LocalGas),
3669
0
            Builder.createLoad(
3670
0
                Context.Int64Ty,
3671
0
                Builder.createConstInBoundsGEP2_64(
3672
0
                    LLVM::Type::getArrayType(Context.Int64Ty, UINT16_MAX + 1),
3673
0
                    Context.getCostTable(Builder, ExecCtx), 0,
3674
0
                    uint16_t(Instr.getOpCode()))));
3675
0
        Builder.createStore(NewGas, LocalGas);
3676
0
      }
3677
3678
      // Make the instruction node according to Code.
3679
1.40M
      Dispatch(Instr);
3680
1.40M
    }
3681
9.34k
  }
3682
2.04k
  void compileSignedTrunc(LLVM::Type IntType) noexcept {
3683
2.04k
    auto NormBB = LLVM::BasicBlock::create(LLContext, F.Fn, "strunc.norm");
3684
2.04k
    auto NotMinBB = LLVM::BasicBlock::create(LLContext, F.Fn, "strunc.notmin");
3685
2.04k
    auto NotMaxBB = LLVM::BasicBlock::create(LLContext, F.Fn, "strunc.notmax");
3686
2.04k
    auto Value = stackPop();
3687
2.04k
    const auto [Precise, MinFp, MaxFp] =
3688
2.04k
        [IntType, Value]() -> std::tuple<bool, LLVM::Value, LLVM::Value> {
3689
2.04k
      const auto BitWidth = IntType.getIntegerBitWidth();
3690
2.04k
      const auto [Min, Max] = [BitWidth]() -> std::tuple<int64_t, int64_t> {
3691
2.04k
        switch (BitWidth) {
3692
1.56k
        case 32:
3693
1.56k
          return {std::numeric_limits<int32_t>::min(),
3694
1.56k
                  std::numeric_limits<int32_t>::max()};
3695
480
        case 64:
3696
480
          return {std::numeric_limits<int64_t>::min(),
3697
480
                  std::numeric_limits<int64_t>::max()};
3698
0
        default:
3699
0
          assumingUnreachable();
3700
2.04k
        }
3701
2.04k
      }();
3702
2.04k
      auto FPType = Value.getType();
3703
2.04k
      assuming(FPType.isFloatTy() || FPType.isDoubleTy());
3704
2.04k
      const auto FPWidth = FPType.getFPMantissaWidth();
3705
2.04k
      return {BitWidth <= FPWidth, LLVM::Value::getConstReal(FPType, Min),
3706
2.04k
              LLVM::Value::getConstReal(FPType, Max)};
3707
2.04k
    }();
3708
3709
2.04k
    auto IsNotNan = Builder.createLikely(Builder.createFCmpORD(Value, Value));
3710
2.04k
    Builder.createCondBr(IsNotNan, NormBB,
3711
2.04k
                         getTrapBB(ErrCode::Value::InvalidConvToInt));
3712
3713
2.04k
    Builder.positionAtEnd(NormBB);
3714
2.04k
    assuming(LLVM::Core::Trunc != LLVM::Core::NotIntrinsic);
3715
2.04k
    auto Trunc = Builder.createUnaryIntrinsic(LLVM::Core::Trunc, Value);
3716
2.04k
    auto IsNotUnderflow =
3717
2.04k
        Builder.createLikely(Builder.createFCmpOGE(Trunc, MinFp));
3718
2.04k
    Builder.createCondBr(IsNotUnderflow, NotMinBB,
3719
2.04k
                         getTrapBB(ErrCode::Value::IntegerOverflow));
3720
3721
2.04k
    Builder.positionAtEnd(NotMinBB);
3722
2.04k
    auto IsNotOverflow = Builder.createLikely(
3723
2.04k
        Builder.createFCmp(Precise ? LLVMRealOLE : LLVMRealOLT, Trunc, MaxFp));
3724
2.04k
    Builder.createCondBr(IsNotOverflow, NotMaxBB,
3725
2.04k
                         getTrapBB(ErrCode::Value::IntegerOverflow));
3726
3727
2.04k
    Builder.positionAtEnd(NotMaxBB);
3728
2.04k
    stackPush(Builder.createFPToSI(Trunc, IntType));
3729
2.04k
  }
3730
1.12k
  void compileSignedTruncSat(LLVM::Type IntType) noexcept {
3731
1.12k
    auto CurrBB = Builder.getInsertBlock();
3732
1.12k
    auto NormBB = LLVM::BasicBlock::create(LLContext, F.Fn, "ssat.norm");
3733
1.12k
    auto NotMinBB = LLVM::BasicBlock::create(LLContext, F.Fn, "ssat.notmin");
3734
1.12k
    auto NotMaxBB = LLVM::BasicBlock::create(LLContext, F.Fn, "ssat.notmax");
3735
1.12k
    auto EndBB = LLVM::BasicBlock::create(LLContext, F.Fn, "ssat.end");
3736
1.12k
    auto Value = stackPop();
3737
1.12k
    const auto [Precise, MinInt, MaxInt, MinFp, MaxFp] = [IntType, Value]()
3738
1.12k
        -> std::tuple<bool, uint64_t, uint64_t, LLVM::Value, LLVM::Value> {
3739
1.12k
      const auto BitWidth = IntType.getIntegerBitWidth();
3740
1.12k
      const auto [Min, Max] = [BitWidth]() -> std::tuple<int64_t, int64_t> {
3741
1.12k
        switch (BitWidth) {
3742
505
        case 32:
3743
505
          return {std::numeric_limits<int32_t>::min(),
3744
505
                  std::numeric_limits<int32_t>::max()};
3745
618
        case 64:
3746
618
          return {std::numeric_limits<int64_t>::min(),
3747
618
                  std::numeric_limits<int64_t>::max()};
3748
0
        default:
3749
0
          assumingUnreachable();
3750
1.12k
        }
3751
1.12k
      }();
3752
1.12k
      auto FPType = Value.getType();
3753
1.12k
      assuming(FPType.isFloatTy() || FPType.isDoubleTy());
3754
1.12k
      const auto FPWidth = FPType.getFPMantissaWidth();
3755
1.12k
      return {BitWidth <= FPWidth, static_cast<uint64_t>(Min),
3756
1.12k
              static_cast<uint64_t>(Max),
3757
1.12k
              LLVM::Value::getConstReal(FPType, Min),
3758
1.12k
              LLVM::Value::getConstReal(FPType, Max)};
3759
1.12k
    }();
3760
3761
1.12k
    auto IsNotNan = Builder.createLikely(Builder.createFCmpORD(Value, Value));
3762
1.12k
    Builder.createCondBr(IsNotNan, NormBB, EndBB);
3763
3764
1.12k
    Builder.positionAtEnd(NormBB);
3765
1.12k
    assuming(LLVM::Core::Trunc != LLVM::Core::NotIntrinsic);
3766
1.12k
    auto Trunc = Builder.createUnaryIntrinsic(LLVM::Core::Trunc, Value);
3767
1.12k
    auto IsNotUnderflow =
3768
1.12k
        Builder.createLikely(Builder.createFCmpOGE(Trunc, MinFp));
3769
1.12k
    Builder.createCondBr(IsNotUnderflow, NotMinBB, EndBB);
3770
3771
1.12k
    Builder.positionAtEnd(NotMinBB);
3772
1.12k
    auto IsNotOverflow = Builder.createLikely(
3773
1.12k
        Builder.createFCmp(Precise ? LLVMRealOLE : LLVMRealOLT, Trunc, MaxFp));
3774
1.12k
    Builder.createCondBr(IsNotOverflow, NotMaxBB, EndBB);
3775
3776
1.12k
    Builder.positionAtEnd(NotMaxBB);
3777
1.12k
    auto IntValue = Builder.createFPToSI(Trunc, IntType);
3778
1.12k
    Builder.createBr(EndBB);
3779
3780
1.12k
    Builder.positionAtEnd(EndBB);
3781
1.12k
    auto PHIRet = Builder.createPHI(IntType);
3782
1.12k
    PHIRet.addIncoming(LLVM::Value::getConstInt(IntType, 0, true), CurrBB);
3783
1.12k
    PHIRet.addIncoming(LLVM::Value::getConstInt(IntType, MinInt, true), NormBB);
3784
1.12k
    PHIRet.addIncoming(LLVM::Value::getConstInt(IntType, MaxInt, true),
3785
1.12k
                       NotMinBB);
3786
1.12k
    PHIRet.addIncoming(IntValue, NotMaxBB);
3787
3788
1.12k
    stackPush(PHIRet);
3789
1.12k
  }
3790
3.76k
  void compileUnsignedTrunc(LLVM::Type IntType) noexcept {
3791
3.76k
    auto NormBB = LLVM::BasicBlock::create(LLContext, F.Fn, "utrunc.norm");
3792
3.76k
    auto NotMinBB = LLVM::BasicBlock::create(LLContext, F.Fn, "utrunc.notmin");
3793
3.76k
    auto NotMaxBB = LLVM::BasicBlock::create(LLContext, F.Fn, "utrunc.notmax");
3794
3.76k
    auto Value = stackPop();
3795
3.76k
    const auto [Precise, MinFp, MaxFp] =
3796
3.76k
        [IntType, Value]() -> std::tuple<bool, LLVM::Value, LLVM::Value> {
3797
3.76k
      const auto BitWidth = IntType.getIntegerBitWidth();
3798
3.76k
      const auto [Min, Max] = [BitWidth]() -> std::tuple<uint64_t, uint64_t> {
3799
3.76k
        switch (BitWidth) {
3800
1.51k
        case 32:
3801
1.51k
          return {std::numeric_limits<uint32_t>::min(),
3802
1.51k
                  std::numeric_limits<uint32_t>::max()};
3803
2.24k
        case 64:
3804
2.24k
          return {std::numeric_limits<uint64_t>::min(),
3805
2.24k
                  std::numeric_limits<uint64_t>::max()};
3806
0
        default:
3807
0
          assumingUnreachable();
3808
3.76k
        }
3809
3.76k
      }();
3810
3.76k
      auto FPType = Value.getType();
3811
3.76k
      assuming(FPType.isFloatTy() || FPType.isDoubleTy());
3812
3.76k
      const auto FPWidth = FPType.getFPMantissaWidth();
3813
3.76k
      return {BitWidth <= FPWidth, LLVM::Value::getConstReal(FPType, Min),
3814
3.76k
              LLVM::Value::getConstReal(FPType, Max)};
3815
3.76k
    }();
3816
3817
3.76k
    auto IsNotNan = Builder.createLikely(Builder.createFCmpORD(Value, Value));
3818
3.76k
    Builder.createCondBr(IsNotNan, NormBB,
3819
3.76k
                         getTrapBB(ErrCode::Value::InvalidConvToInt));
3820
3821
3.76k
    Builder.positionAtEnd(NormBB);
3822
3.76k
    assuming(LLVM::Core::Trunc != LLVM::Core::NotIntrinsic);
3823
3.76k
    auto Trunc = Builder.createUnaryIntrinsic(LLVM::Core::Trunc, Value);
3824
3.76k
    auto IsNotUnderflow =
3825
3.76k
        Builder.createLikely(Builder.createFCmpOGE(Trunc, MinFp));
3826
3.76k
    Builder.createCondBr(IsNotUnderflow, NotMinBB,
3827
3.76k
                         getTrapBB(ErrCode::Value::IntegerOverflow));
3828
3829
3.76k
    Builder.positionAtEnd(NotMinBB);
3830
3.76k
    auto IsNotOverflow = Builder.createLikely(
3831
3.76k
        Builder.createFCmp(Precise ? LLVMRealOLE : LLVMRealOLT, Trunc, MaxFp));
3832
3.76k
    Builder.createCondBr(IsNotOverflow, NotMaxBB,
3833
3.76k
                         getTrapBB(ErrCode::Value::IntegerOverflow));
3834
3835
3.76k
    Builder.positionAtEnd(NotMaxBB);
3836
3.76k
    stackPush(Builder.createFPToUI(Trunc, IntType));
3837
3.76k
  }
3838
955
  void compileUnsignedTruncSat(LLVM::Type IntType) noexcept {
3839
955
    auto CurrBB = Builder.getInsertBlock();
3840
955
    auto NormBB = LLVM::BasicBlock::create(LLContext, F.Fn, "usat.norm");
3841
955
    auto NotMaxBB = LLVM::BasicBlock::create(LLContext, F.Fn, "usat.notmax");
3842
955
    auto EndBB = LLVM::BasicBlock::create(LLContext, F.Fn, "usat.end");
3843
955
    auto Value = stackPop();
3844
955
    const auto [Precise, MinInt, MaxInt, MinFp, MaxFp] = [IntType, Value]()
3845
955
        -> std::tuple<bool, uint64_t, uint64_t, LLVM::Value, LLVM::Value> {
3846
955
      const auto BitWidth = IntType.getIntegerBitWidth();
3847
955
      const auto [Min, Max] = [BitWidth]() -> std::tuple<uint64_t, uint64_t> {
3848
955
        switch (BitWidth) {
3849
336
        case 32:
3850
336
          return {std::numeric_limits<uint32_t>::min(),
3851
336
                  std::numeric_limits<uint32_t>::max()};
3852
619
        case 64:
3853
619
          return {std::numeric_limits<uint64_t>::min(),
3854
619
                  std::numeric_limits<uint64_t>::max()};
3855
0
        default:
3856
0
          assumingUnreachable();
3857
955
        }
3858
955
      }();
3859
955
      auto FPType = Value.getType();
3860
955
      assuming(FPType.isFloatTy() || FPType.isDoubleTy());
3861
955
      const auto FPWidth = FPType.getFPMantissaWidth();
3862
955
      return {BitWidth <= FPWidth, Min, Max,
3863
955
              LLVM::Value::getConstReal(FPType, Min),
3864
955
              LLVM::Value::getConstReal(FPType, Max)};
3865
955
    }();
3866
3867
955
    assuming(LLVM::Core::Trunc != LLVM::Core::NotIntrinsic);
3868
955
    auto Trunc = Builder.createUnaryIntrinsic(LLVM::Core::Trunc, Value);
3869
955
    auto IsNotUnderflow =
3870
955
        Builder.createLikely(Builder.createFCmpOGE(Trunc, MinFp));
3871
955
    Builder.createCondBr(IsNotUnderflow, NormBB, EndBB);
3872
3873
955
    Builder.positionAtEnd(NormBB);
3874
955
    auto IsNotOverflow = Builder.createLikely(
3875
955
        Builder.createFCmp(Precise ? LLVMRealOLE : LLVMRealOLT, Trunc, MaxFp));
3876
955
    Builder.createCondBr(IsNotOverflow, NotMaxBB, EndBB);
3877
3878
955
    Builder.positionAtEnd(NotMaxBB);
3879
955
    auto IntValue = Builder.createFPToUI(Trunc, IntType);
3880
955
    Builder.createBr(EndBB);
3881
3882
955
    Builder.positionAtEnd(EndBB);
3883
955
    auto PHIRet = Builder.createPHI(IntType);
3884
955
    PHIRet.addIncoming(LLVM::Value::getConstInt(IntType, MinInt), CurrBB);
3885
955
    PHIRet.addIncoming(LLVM::Value::getConstInt(IntType, MaxInt), NormBB);
3886
955
    PHIRet.addIncoming(IntValue, NotMaxBB);
3887
3888
955
    stackPush(PHIRet);
3889
955
  }
3890
3891
  void compileAtomicCheckOffsetAlignment(LLVM::Value Offset,
3892
40
                                         LLVM::Type IntType) noexcept {
3893
40
    const auto BitWidth = IntType.getIntegerBitWidth();
3894
40
    auto BWMask = LLContext.getInt64((BitWidth >> 3) - 1);
3895
40
    auto Value = Builder.createAnd(Offset, BWMask);
3896
40
    auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "address_align_ok");
3897
40
    auto IsAddressAligned = Builder.createLikely(
3898
40
        Builder.createICmpEQ(Value, LLContext.getInt64(0)));
3899
40
    Builder.createCondBr(IsAddressAligned, OkBB,
3900
40
                         getTrapBB(ErrCode::Value::UnalignedAtomicAccess));
3901
3902
40
    Builder.positionAtEnd(OkBB);
3903
40
  }
3904
3905
188
  void compileMemoryFence() noexcept {
3906
188
    Builder.createFence(LLVMAtomicOrderingSequentiallyConsistent);
3907
188
  }
3908
  void compileAtomicNotify(unsigned MemoryIndex,
3909
33
                           unsigned MemoryOffset) noexcept {
3910
33
    auto Count = stackPop();
3911
33
    auto Addr = Builder.createZExt(Stack.back(), Context.Int64Ty);
3912
33
    if (MemoryOffset != 0) {
3913
26
      Addr = Builder.createAdd(Addr, LLContext.getInt64(MemoryOffset));
3914
26
    }
3915
33
    compileAtomicCheckOffsetAlignment(Addr, Context.Int32Ty);
3916
33
    auto Offset = stackPop();
3917
3918
33
    stackPush(Builder.createCall(
3919
33
        Context.getIntrinsic(
3920
33
            Builder, Executable::Intrinsics::kMemAtomicNotify,
3921
33
            LLVM::Type::getFunctionType(
3922
33
                Context.Int32Ty,
3923
33
                {Context.Int32Ty, Context.Int32Ty, Context.Int32Ty}, false)),
3924
33
        {LLContext.getInt32(MemoryIndex), Offset, Count}));
3925
33
  }
3926
  void compileAtomicWait(unsigned MemoryIndex, unsigned MemoryOffset,
3927
7
                         LLVM::Type TargetType, uint32_t BitWidth) noexcept {
3928
7
    auto Timeout = stackPop();
3929
7
    auto ExpectedValue = Builder.createZExtOrTrunc(stackPop(), Context.Int64Ty);
3930
7
    auto Addr = Builder.createZExt(Stack.back(), Context.Int64Ty);
3931
7
    if (MemoryOffset != 0) {
3932
3
      Addr = Builder.createAdd(Addr, LLContext.getInt64(MemoryOffset));
3933
3
    }
3934
7
    compileAtomicCheckOffsetAlignment(Addr, TargetType);
3935
7
    auto Offset = stackPop();
3936
3937
7
    stackPush(Builder.createCall(
3938
7
        Context.getIntrinsic(
3939
7
            Builder, Executable::Intrinsics::kMemAtomicWait,
3940
7
            LLVM::Type::getFunctionType(Context.Int32Ty,
3941
7
                                        {Context.Int32Ty, Context.Int32Ty,
3942
7
                                         Context.Int64Ty, Context.Int64Ty,
3943
7
                                         Context.Int32Ty},
3944
7
                                        false)),
3945
7
        {LLContext.getInt32(MemoryIndex), Offset, ExpectedValue, Timeout,
3946
7
         LLContext.getInt32(BitWidth)}));
3947
7
  }
3948
  void compileAtomicLoad(unsigned MemoryIndex, unsigned MemoryOffset,
3949
                         unsigned Alignment, LLVM::Type IntType,
3950
0
                         LLVM::Type TargetType, bool Signed = false) noexcept {
3951
3952
0
    auto Offset = Builder.createZExt(Stack.back(), Context.Int64Ty);
3953
0
    if (MemoryOffset != 0) {
3954
0
      Offset = Builder.createAdd(Offset, LLContext.getInt64(MemoryOffset));
3955
0
    }
3956
0
    compileAtomicCheckOffsetAlignment(Offset, TargetType);
3957
0
    auto VPtr = Builder.createInBoundsGEP1(
3958
0
        Context.Int8Ty, Context.getMemory(Builder, ExecCtx, MemoryIndex),
3959
0
        Offset);
3960
3961
0
    auto Ptr = Builder.createBitCast(VPtr, TargetType.getPointerTo());
3962
0
    auto Load = switchEndian(Builder.createLoad(TargetType, Ptr, true));
3963
0
    Load.setAlignment(1 << Alignment);
3964
0
    Load.setOrdering(LLVMAtomicOrderingSequentiallyConsistent);
3965
3966
0
    if (Signed) {
3967
0
      Stack.back() = Builder.createSExt(Load, IntType);
3968
0
    } else {
3969
0
      Stack.back() = Builder.createZExt(Load, IntType);
3970
0
    }
3971
0
  }
3972
  void compileAtomicStore(unsigned MemoryIndex, unsigned MemoryOffset,
3973
                          unsigned Alignment, LLVM::Type, LLVM::Type TargetType,
3974
0
                          bool Signed = false) noexcept {
3975
0
    auto V = stackPop();
3976
3977
0
    if (Signed) {
3978
0
      V = Builder.createSExtOrTrunc(V, TargetType);
3979
0
    } else {
3980
0
      V = Builder.createZExtOrTrunc(V, TargetType);
3981
0
    }
3982
0
    V = switchEndian(V);
3983
0
    auto Offset = Builder.createZExt(Stack.back(), Context.Int64Ty);
3984
0
    if (MemoryOffset != 0) {
3985
0
      Offset = Builder.createAdd(Offset, LLContext.getInt64(MemoryOffset));
3986
0
    }
3987
0
    compileAtomicCheckOffsetAlignment(Offset, TargetType);
3988
0
    auto VPtr = Builder.createInBoundsGEP1(
3989
0
        Context.Int8Ty, Context.getMemory(Builder, ExecCtx, MemoryIndex),
3990
0
        Offset);
3991
0
    auto Ptr = Builder.createBitCast(VPtr, TargetType.getPointerTo());
3992
0
    auto Store = Builder.createStore(V, Ptr, true);
3993
0
    Store.setAlignment(1 << Alignment);
3994
0
    Store.setOrdering(LLVMAtomicOrderingSequentiallyConsistent);
3995
0
  }
3996
3997
  void compileAtomicRMWOp(unsigned MemoryIndex, unsigned MemoryOffset,
3998
                          [[maybe_unused]] unsigned Alignment,
3999
                          LLVMAtomicRMWBinOp BinOp, LLVM::Type IntType,
4000
0
                          LLVM::Type TargetType, bool Signed = false) noexcept {
4001
0
    auto Value = Builder.createSExtOrTrunc(stackPop(), TargetType);
4002
0
    auto Offset = Builder.createZExt(Stack.back(), Context.Int64Ty);
4003
0
    if (MemoryOffset != 0) {
4004
0
      Offset = Builder.createAdd(Offset, LLContext.getInt64(MemoryOffset));
4005
0
    }
4006
0
    compileAtomicCheckOffsetAlignment(Offset, TargetType);
4007
0
    auto VPtr = Builder.createInBoundsGEP1(
4008
0
        Context.Int8Ty, Context.getMemory(Builder, ExecCtx, MemoryIndex),
4009
0
        Offset);
4010
0
    auto Ptr = Builder.createBitCast(VPtr, TargetType.getPointerTo());
4011
4012
0
    LLVM::Value Ret;
4013
    if constexpr (Endian::native == Endian::big) {
4014
      if (BinOp == LLVMAtomicRMWBinOp::LLVMAtomicRMWBinOpAdd ||
4015
          BinOp == LLVMAtomicRMWBinOp::LLVMAtomicRMWBinOpSub) {
4016
        auto AtomicBB = LLVM::BasicBlock::create(LLContext, F.Fn, "atomic.rmw");
4017
        auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "atomic.rmw.ok");
4018
        Builder.createBr(AtomicBB);
4019
        Builder.positionAtEnd(AtomicBB);
4020
4021
        auto Load = Builder.createLoad(TargetType, Ptr, true);
4022
        Load.setOrdering(LLVMAtomicOrderingMonotonic);
4023
        Load.setAlignment(1 << Alignment);
4024
4025
        LLVM::Value New;
4026
        if (BinOp == LLVMAtomicRMWBinOp::LLVMAtomicRMWBinOpAdd)
4027
          New = Builder.createAdd(switchEndian(Load), Value);
4028
        else if (BinOp == LLVMAtomicRMWBinOp::LLVMAtomicRMWBinOpSub) {
4029
          New = Builder.createSub(switchEndian(Load), Value);
4030
        } else {
4031
          assumingUnreachable();
4032
        }
4033
        New = switchEndian(New);
4034
4035
        auto Exchange = Builder.createAtomicCmpXchg(
4036
            Ptr, Load, New, LLVMAtomicOrderingSequentiallyConsistent,
4037
            LLVMAtomicOrderingSequentiallyConsistent);
4038
4039
        Ret = Builder.createExtractValue(Exchange, 0);
4040
        auto Success = Builder.createExtractValue(Exchange, 1);
4041
        Builder.createCondBr(Success, OkBB, AtomicBB);
4042
        Builder.positionAtEnd(OkBB);
4043
      } else {
4044
        Ret = Builder.createAtomicRMW(BinOp, Ptr, switchEndian(Value),
4045
                                      LLVMAtomicOrderingSequentiallyConsistent);
4046
      }
4047
0
    } else {
4048
0
      Ret = Builder.createAtomicRMW(BinOp, Ptr, switchEndian(Value),
4049
0
                                    LLVMAtomicOrderingSequentiallyConsistent);
4050
0
    }
4051
0
    Ret = switchEndian(Ret);
4052
#if LLVM_VERSION_MAJOR >= 13
4053
    Ret.setAlignment(1 << Alignment);
4054
#endif
4055
0
    if (Signed) {
4056
0
      Stack.back() = Builder.createSExt(Ret, IntType);
4057
0
    } else {
4058
0
      Stack.back() = Builder.createZExt(Ret, IntType);
4059
0
    }
4060
0
  }
4061
  void compileAtomicCompareExchange(unsigned MemoryIndex, unsigned MemoryOffset,
4062
                                    [[maybe_unused]] unsigned Alignment,
4063
                                    LLVM::Type IntType, LLVM::Type TargetType,
4064
0
                                    bool Signed = false) noexcept {
4065
4066
0
    auto Replacement = Builder.createSExtOrTrunc(stackPop(), TargetType);
4067
0
    auto Expected = Builder.createSExtOrTrunc(stackPop(), TargetType);
4068
0
    auto Offset = Builder.createZExt(Stack.back(), Context.Int64Ty);
4069
0
    if (MemoryOffset != 0) {
4070
0
      Offset = Builder.createAdd(Offset, LLContext.getInt64(MemoryOffset));
4071
0
    }
4072
0
    compileAtomicCheckOffsetAlignment(Offset, TargetType);
4073
0
    auto VPtr = Builder.createInBoundsGEP1(
4074
0
        Context.Int8Ty, Context.getMemory(Builder, ExecCtx, MemoryIndex),
4075
0
        Offset);
4076
0
    auto Ptr = Builder.createBitCast(VPtr, TargetType.getPointerTo());
4077
4078
0
    auto Ret = Builder.createAtomicCmpXchg(
4079
0
        Ptr, switchEndian(Expected), switchEndian(Replacement),
4080
0
        LLVMAtomicOrderingSequentiallyConsistent,
4081
0
        LLVMAtomicOrderingSequentiallyConsistent);
4082
#if LLVM_VERSION_MAJOR >= 13
4083
    Ret.setAlignment(1 << Alignment);
4084
#endif
4085
0
    auto OldVal = Builder.createExtractValue(Ret, 0);
4086
0
    OldVal = switchEndian(OldVal);
4087
0
    if (Signed) {
4088
0
      Stack.back() = Builder.createSExt(OldVal, IntType);
4089
0
    } else {
4090
0
      Stack.back() = Builder.createZExt(OldVal, IntType);
4091
0
    }
4092
0
  }
4093
4094
10.0k
  void compileReturn() noexcept {
4095
10.0k
    updateInstrCount();
4096
10.0k
    updateGas();
4097
10.0k
    auto Ty = F.Ty.getReturnType();
4098
10.0k
    if (Ty.isVoidTy()) {
4099
1.83k
      Builder.createRetVoid();
4100
8.18k
    } else if (Ty.isStructTy()) {
4101
295
      const auto Count = Ty.getStructNumElements();
4102
295
      std::vector<LLVM::Value> Ret(Count);
4103
1.11k
      for (unsigned I = 0; I < Count; ++I) {
4104
820
        const unsigned J = Count - 1 - I;
4105
820
        Ret[J] = stackPop();
4106
820
      }
4107
295
      Builder.createAggregateRet(Ret);
4108
7.89k
    } else {
4109
7.89k
      Builder.createRet(stackPop());
4110
7.89k
    }
4111
10.0k
  }
4112
4113
16.7k
  void updateInstrCount() noexcept {
4114
16.7k
    if (LocalInstrCount) {
4115
0
      auto Store [[maybe_unused]] = Builder.createAtomicRMW(
4116
0
          LLVMAtomicRMWBinOpAdd, Context.getInstrCount(Builder, ExecCtx),
4117
0
          Builder.createLoad(Context.Int64Ty, LocalInstrCount),
4118
0
          LLVMAtomicOrderingMonotonic);
4119
#if LLVM_VERSION_MAJOR >= 13
4120
      Store.setAlignment(8);
4121
#endif
4122
0
      Builder.createStore(LLContext.getInt64(0), LocalInstrCount);
4123
0
    }
4124
16.7k
  }
4125
4126
18.5k
  void updateGas() noexcept {
4127
18.5k
    if (LocalGas) {
4128
0
      auto CurrBB = Builder.getInsertBlock();
4129
0
      auto CheckBB = LLVM::BasicBlock::create(LLContext, F.Fn, "gas_check");
4130
0
      auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "gas_ok");
4131
0
      auto EndBB = LLVM::BasicBlock::create(LLContext, F.Fn, "gas_end");
4132
4133
0
      auto Cost = Builder.createLoad(Context.Int64Ty, LocalGas);
4134
0
      Cost.setAlignment(64);
4135
0
      auto GasPtr = Context.getGas(Builder, ExecCtx);
4136
0
      auto GasLimit = Context.getGasLimit(Builder, ExecCtx);
4137
0
      auto Gas = Builder.createLoad(Context.Int64Ty, GasPtr);
4138
0
      Gas.setAlignment(64);
4139
0
      Gas.setOrdering(LLVMAtomicOrderingMonotonic);
4140
0
      Builder.createBr(CheckBB);
4141
0
      Builder.positionAtEnd(CheckBB);
4142
4143
0
      auto PHIOldGas = Builder.createPHI(Context.Int64Ty);
4144
0
      auto NewGas = Builder.createAdd(PHIOldGas, Cost);
4145
0
      auto IsGasRemain =
4146
0
          Builder.createLikely(Builder.createICmpULE(NewGas, GasLimit));
4147
0
      Builder.createCondBr(IsGasRemain, OkBB,
4148
0
                           getTrapBB(ErrCode::Value::CostLimitExceeded));
4149
0
      Builder.positionAtEnd(OkBB);
4150
4151
0
      auto RGasAndSucceed = Builder.createAtomicCmpXchg(
4152
0
          GasPtr, PHIOldGas, NewGas, LLVMAtomicOrderingMonotonic,
4153
0
          LLVMAtomicOrderingMonotonic);
4154
#if LLVM_VERSION_MAJOR >= 13
4155
      RGasAndSucceed.setAlignment(8);
4156
#endif
4157
0
      RGasAndSucceed.setWeak(true);
4158
0
      auto RGas = Builder.createExtractValue(RGasAndSucceed, 0);
4159
0
      auto Succeed = Builder.createExtractValue(RGasAndSucceed, 1);
4160
0
      Builder.createCondBr(Builder.createLikely(Succeed), EndBB, CheckBB);
4161
0
      Builder.positionAtEnd(EndBB);
4162
4163
0
      Builder.createStore(LLContext.getInt64(0), LocalGas);
4164
4165
0
      PHIOldGas.addIncoming(Gas, CurrBB);
4166
0
      PHIOldGas.addIncoming(RGas, OkBB);
4167
0
    }
4168
18.5k
  }
4169
4170
2.77k
  void updateGasAtTrap() noexcept {
4171
2.77k
    if (LocalGas) {
4172
0
      auto Update [[maybe_unused]] = Builder.createAtomicRMW(
4173
0
          LLVMAtomicRMWBinOpAdd, Context.getGas(Builder, ExecCtx),
4174
0
          Builder.createLoad(Context.Int64Ty, LocalGas),
4175
0
          LLVMAtomicOrderingMonotonic);
4176
#if LLVM_VERSION_MAJOR >= 13
4177
      Update.setAlignment(8);
4178
#endif
4179
0
    }
4180
2.77k
  }
4181
4182
private:
4183
3.29k
  void compileCallOp(const unsigned int FuncIndex) noexcept {
4184
3.29k
    const auto &FuncType =
4185
3.29k
        Context.CompositeTypes[std::get<0>(Context.Functions[FuncIndex])]
4186
3.29k
            ->getFuncType();
4187
3.29k
    const auto &Function = std::get<1>(Context.Functions[FuncIndex]);
4188
3.29k
    const auto &ParamTypes = FuncType.getParamTypes();
4189
4190
3.29k
    std::vector<LLVM::Value> Args(ParamTypes.size() + 1);
4191
3.29k
    Args[0] = F.Fn.getFirstParam();
4192
4.06k
    for (size_t I = 0; I < ParamTypes.size(); ++I) {
4193
775
      const size_t J = ParamTypes.size() - 1 - I;
4194
775
      Args[J + 1] = stackPop();
4195
775
    }
4196
4197
3.29k
    auto Ret = Builder.createCall(Function, Args);
4198
3.29k
    auto Ty = Ret.getType();
4199
3.29k
    if (Ty.isVoidTy()) {
4200
      // nothing to do
4201
1.72k
    } else if (Ty.isStructTy()) {
4202
170
      for (auto Val : unpackStruct(Builder, Ret)) {
4203
170
        stackPush(Val);
4204
170
      }
4205
1.49k
    } else {
4206
1.49k
      stackPush(Ret);
4207
1.49k
    }
4208
3.29k
  }
4209
4210
  void compileIndirectCallOp(const uint32_t TableIndex,
4211
610
                             const uint32_t FuncTypeIndex) noexcept {
4212
610
    auto NotNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_i.not_null");
4213
610
    auto IsNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_i.is_null");
4214
610
    auto EndBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_i.end");
4215
4216
610
    LLVM::Value FuncIndex = stackPop();
4217
610
    const auto &FuncType = Context.CompositeTypes[FuncTypeIndex]->getFuncType();
4218
610
    auto FTy = toLLVMType(Context.LLContext, Context.ExecCtxPtrTy, FuncType);
4219
610
    auto RTy = FTy.getReturnType();
4220
4221
610
    const size_t ArgSize = FuncType.getParamTypes().size();
4222
610
    const size_t RetSize =
4223
610
        RTy.isVoidTy() ? 0 : FuncType.getReturnTypes().size();
4224
610
    std::vector<LLVM::Value> ArgsVec(ArgSize + 1, nullptr);
4225
610
    ArgsVec[0] = F.Fn.getFirstParam();
4226
1.19k
    for (size_t I = 0; I < ArgSize; ++I) {
4227
587
      const size_t J = ArgSize - I;
4228
587
      ArgsVec[J] = stackPop();
4229
587
    }
4230
4231
610
    std::vector<LLVM::Value> FPtrRetsVec;
4232
610
    FPtrRetsVec.reserve(RetSize);
4233
610
    {
4234
610
      auto FPtr = Builder.createCall(
4235
610
          Context.getIntrinsic(
4236
610
              Builder, Executable::Intrinsics::kTableGetFuncSymbol,
4237
610
              LLVM::Type::getFunctionType(
4238
610
                  FTy.getPointerTo(),
4239
610
                  {Context.Int32Ty, Context.Int32Ty, Context.Int32Ty}, false)),
4240
610
          {LLContext.getInt32(TableIndex), LLContext.getInt32(FuncTypeIndex),
4241
610
           FuncIndex});
4242
610
      Builder.createCondBr(
4243
610
          Builder.createLikely(Builder.createNot(Builder.createIsNull(FPtr))),
4244
610
          NotNullBB, IsNullBB);
4245
610
      Builder.positionAtEnd(NotNullBB);
4246
4247
610
      auto FPtrRet =
4248
610
          Builder.createCall(LLVM::FunctionCallee{FTy, FPtr}, ArgsVec);
4249
610
      if (RetSize == 0) {
4250
        // nothing to do
4251
457
      } else if (RetSize == 1) {
4252
442
        FPtrRetsVec.push_back(FPtrRet);
4253
442
      } else {
4254
30
        for (auto Val : unpackStruct(Builder, FPtrRet)) {
4255
30
          FPtrRetsVec.push_back(Val);
4256
30
        }
4257
15
      }
4258
610
    }
4259
4260
610
    Builder.createBr(EndBB);
4261
610
    Builder.positionAtEnd(IsNullBB);
4262
4263
610
    std::vector<LLVM::Value> RetsVec;
4264
610
    {
4265
610
      LLVM::Value Args = Builder.createArray(ArgSize, kValSize);
4266
610
      LLVM::Value Rets = Builder.createArray(RetSize, kValSize);
4267
610
      Builder.createArrayPtrStore(
4268
610
          Span<LLVM::Value>(ArgsVec.begin() + 1, ArgSize), Args, Context.Int8Ty,
4269
610
          kValSize);
4270
4271
610
      Builder.createCall(
4272
610
          Context.getIntrinsic(
4273
610
              Builder, Executable::Intrinsics::kCallIndirect,
4274
610
              LLVM::Type::getFunctionType(Context.VoidTy,
4275
610
                                          {Context.Int32Ty, Context.Int32Ty,
4276
610
                                           Context.Int32Ty, Context.Int8PtrTy,
4277
610
                                           Context.Int8PtrTy},
4278
610
                                          false)),
4279
610
          {LLContext.getInt32(TableIndex), LLContext.getInt32(FuncTypeIndex),
4280
610
           FuncIndex, Args, Rets});
4281
4282
610
      if (RetSize == 0) {
4283
        // nothing to do
4284
457
      } else if (RetSize == 1) {
4285
442
        RetsVec.push_back(
4286
442
            Builder.createValuePtrLoad(RTy, Rets, Context.Int8Ty));
4287
442
      } else {
4288
15
        RetsVec = Builder.createArrayPtrLoad(RetSize, RTy, Rets, Context.Int8Ty,
4289
15
                                             kValSize);
4290
15
      }
4291
610
      Builder.createBr(EndBB);
4292
610
      Builder.positionAtEnd(EndBB);
4293
610
    }
4294
4295
1.08k
    for (unsigned I = 0; I < RetSize; ++I) {
4296
472
      auto PHIRet = Builder.createPHI(FPtrRetsVec[I].getType());
4297
472
      PHIRet.addIncoming(FPtrRetsVec[I], NotNullBB);
4298
472
      PHIRet.addIncoming(RetsVec[I], IsNullBB);
4299
472
      stackPush(PHIRet);
4300
472
    }
4301
610
  }
4302
4303
0
  void compileReturnCallOp(const unsigned int FuncIndex) noexcept {
4304
0
    const auto &FuncType =
4305
0
        Context.CompositeTypes[std::get<0>(Context.Functions[FuncIndex])]
4306
0
            ->getFuncType();
4307
0
    const auto &Function = std::get<1>(Context.Functions[FuncIndex]);
4308
0
    const auto &ParamTypes = FuncType.getParamTypes();
4309
4310
0
    std::vector<LLVM::Value> Args(ParamTypes.size() + 1);
4311
0
    Args[0] = F.Fn.getFirstParam();
4312
0
    for (size_t I = 0; I < ParamTypes.size(); ++I) {
4313
0
      const size_t J = ParamTypes.size() - 1 - I;
4314
0
      Args[J + 1] = stackPop();
4315
0
    }
4316
4317
0
    auto Ret = Builder.createCall(Function, Args);
4318
0
    auto Ty = Ret.getType();
4319
0
    if (Ty.isVoidTy()) {
4320
0
      Builder.createRetVoid();
4321
0
    } else {
4322
0
      Builder.createRet(Ret);
4323
0
    }
4324
0
  }
4325
4326
  void compileReturnIndirectCallOp(const uint32_t TableIndex,
4327
0
                                   const uint32_t FuncTypeIndex) noexcept {
4328
0
    auto NotNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_i.not_null");
4329
0
    auto IsNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_i.is_null");
4330
4331
0
    LLVM::Value FuncIndex = stackPop();
4332
0
    const auto &FuncType = Context.CompositeTypes[FuncTypeIndex]->getFuncType();
4333
0
    auto FTy = toLLVMType(Context.LLContext, Context.ExecCtxPtrTy, FuncType);
4334
0
    auto RTy = FTy.getReturnType();
4335
4336
0
    const size_t ArgSize = FuncType.getParamTypes().size();
4337
0
    const size_t RetSize =
4338
0
        RTy.isVoidTy() ? 0 : FuncType.getReturnTypes().size();
4339
0
    std::vector<LLVM::Value> ArgsVec(ArgSize + 1, nullptr);
4340
0
    ArgsVec[0] = F.Fn.getFirstParam();
4341
0
    for (size_t I = 0; I < ArgSize; ++I) {
4342
0
      const size_t J = ArgSize - I;
4343
0
      ArgsVec[J] = stackPop();
4344
0
    }
4345
4346
0
    {
4347
0
      auto FPtr = Builder.createCall(
4348
0
          Context.getIntrinsic(
4349
0
              Builder, Executable::Intrinsics::kTableGetFuncSymbol,
4350
0
              LLVM::Type::getFunctionType(
4351
0
                  FTy.getPointerTo(),
4352
0
                  {Context.Int32Ty, Context.Int32Ty, Context.Int32Ty}, false)),
4353
0
          {LLContext.getInt32(TableIndex), LLContext.getInt32(FuncTypeIndex),
4354
0
           FuncIndex});
4355
0
      Builder.createCondBr(
4356
0
          Builder.createLikely(Builder.createNot(Builder.createIsNull(FPtr))),
4357
0
          NotNullBB, IsNullBB);
4358
0
      Builder.positionAtEnd(NotNullBB);
4359
4360
0
      auto FPtrRet =
4361
0
          Builder.createCall(LLVM::FunctionCallee(FTy, FPtr), ArgsVec);
4362
0
      if (RetSize == 0) {
4363
0
        Builder.createRetVoid();
4364
0
      } else {
4365
0
        Builder.createRet(FPtrRet);
4366
0
      }
4367
0
    }
4368
4369
0
    Builder.positionAtEnd(IsNullBB);
4370
4371
0
    {
4372
0
      LLVM::Value Args = Builder.createArray(ArgSize, kValSize);
4373
0
      LLVM::Value Rets = Builder.createArray(RetSize, kValSize);
4374
0
      Builder.createArrayPtrStore(
4375
0
          Span<LLVM::Value>(ArgsVec.begin() + 1, ArgSize), Args, Context.Int8Ty,
4376
0
          kValSize);
4377
4378
0
      Builder.createCall(
4379
0
          Context.getIntrinsic(
4380
0
              Builder, Executable::Intrinsics::kCallIndirect,
4381
0
              LLVM::Type::getFunctionType(Context.VoidTy,
4382
0
                                          {Context.Int32Ty, Context.Int32Ty,
4383
0
                                           Context.Int32Ty, Context.Int8PtrTy,
4384
0
                                           Context.Int8PtrTy},
4385
0
                                          false)),
4386
0
          {LLContext.getInt32(TableIndex), LLContext.getInt32(FuncTypeIndex),
4387
0
           FuncIndex, Args, Rets});
4388
4389
0
      if (RetSize == 0) {
4390
0
        Builder.createRetVoid();
4391
0
      } else if (RetSize == 1) {
4392
0
        Builder.createRet(
4393
0
            Builder.createValuePtrLoad(RTy, Rets, Context.Int8Ty));
4394
0
      } else {
4395
0
        Builder.createAggregateRet(Builder.createArrayPtrLoad(
4396
0
            RetSize, RTy, Rets, Context.Int8Ty, kValSize));
4397
0
      }
4398
0
    }
4399
0
  }
4400
4401
0
  void compileCallRefOp(const unsigned int TypeIndex) noexcept {
4402
0
    auto NotNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_r.not_null");
4403
0
    auto IsNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_r.is_null");
4404
0
    auto EndBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_i.end");
4405
4406
0
    auto Ref = Builder.createBitCast(stackPop(), Context.Int64x2Ty);
4407
0
    auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_r.ref_not_null");
4408
0
    auto IsRefNotNull = Builder.createLikely(Builder.createICmpNE(
4409
0
        Builder.createExtractElement(Ref, LLContext.getInt64(1)),
4410
0
        LLContext.getInt64(0)));
4411
0
    Builder.createCondBr(IsRefNotNull, OkBB,
4412
0
                         getTrapBB(ErrCode::Value::AccessNullFunc));
4413
0
    Builder.positionAtEnd(OkBB);
4414
4415
0
    const auto &FuncType = Context.CompositeTypes[TypeIndex]->getFuncType();
4416
0
    auto FTy = toLLVMType(Context.LLContext, Context.ExecCtxPtrTy, FuncType);
4417
0
    auto RTy = FTy.getReturnType();
4418
4419
0
    const size_t ArgSize = FuncType.getParamTypes().size();
4420
0
    const size_t RetSize =
4421
0
        RTy.isVoidTy() ? 0 : FuncType.getReturnTypes().size();
4422
0
    std::vector<LLVM::Value> ArgsVec(ArgSize + 1, nullptr);
4423
0
    ArgsVec[0] = F.Fn.getFirstParam();
4424
0
    for (size_t I = 0; I < ArgSize; ++I) {
4425
0
      const size_t J = ArgSize - I;
4426
0
      ArgsVec[J] = stackPop();
4427
0
    }
4428
4429
0
    std::vector<LLVM::Value> FPtrRetsVec;
4430
0
    FPtrRetsVec.reserve(RetSize);
4431
0
    {
4432
0
      auto FPtr = Builder.createCall(
4433
0
          Context.getIntrinsic(
4434
0
              Builder, Executable::Intrinsics::kRefGetFuncSymbol,
4435
0
              LLVM::Type::getFunctionType(FTy.getPointerTo(),
4436
0
                                          {Context.Int64x2Ty}, false)),
4437
0
          {Ref});
4438
0
      Builder.createCondBr(
4439
0
          Builder.createLikely(Builder.createNot(Builder.createIsNull(FPtr))),
4440
0
          NotNullBB, IsNullBB);
4441
0
      Builder.positionAtEnd(NotNullBB);
4442
4443
0
      auto FPtrRet =
4444
0
          Builder.createCall(LLVM::FunctionCallee{FTy, FPtr}, ArgsVec);
4445
0
      if (RetSize == 0) {
4446
        // nothing to do
4447
0
      } else if (RetSize == 1) {
4448
0
        FPtrRetsVec.push_back(FPtrRet);
4449
0
      } else {
4450
0
        for (auto Val : unpackStruct(Builder, FPtrRet)) {
4451
0
          FPtrRetsVec.push_back(Val);
4452
0
        }
4453
0
      }
4454
0
    }
4455
4456
0
    Builder.createBr(EndBB);
4457
0
    Builder.positionAtEnd(IsNullBB);
4458
4459
0
    std::vector<LLVM::Value> RetsVec;
4460
0
    {
4461
0
      LLVM::Value Args = Builder.createArray(ArgSize, kValSize);
4462
0
      LLVM::Value Rets = Builder.createArray(RetSize, kValSize);
4463
0
      Builder.createArrayPtrStore(
4464
0
          Span<LLVM::Value>(ArgsVec.begin() + 1, ArgSize), Args, Context.Int8Ty,
4465
0
          kValSize);
4466
4467
0
      Builder.createCall(
4468
0
          Context.getIntrinsic(
4469
0
              Builder, Executable::Intrinsics::kCallRef,
4470
0
              LLVM::Type::getFunctionType(
4471
0
                  Context.VoidTy,
4472
0
                  {Context.Int64x2Ty, Context.Int8PtrTy, Context.Int8PtrTy},
4473
0
                  false)),
4474
0
          {Ref, Args, Rets});
4475
4476
0
      if (RetSize == 0) {
4477
        // nothing to do
4478
0
      } else if (RetSize == 1) {
4479
0
        RetsVec.push_back(
4480
0
            Builder.createValuePtrLoad(RTy, Rets, Context.Int8Ty));
4481
0
      } else {
4482
0
        RetsVec = Builder.createArrayPtrLoad(RetSize, RTy, Rets, Context.Int8Ty,
4483
0
                                             kValSize);
4484
0
      }
4485
0
      Builder.createBr(EndBB);
4486
0
      Builder.positionAtEnd(EndBB);
4487
0
    }
4488
4489
0
    for (unsigned I = 0; I < RetSize; ++I) {
4490
0
      auto PHIRet = Builder.createPHI(FPtrRetsVec[I].getType());
4491
0
      PHIRet.addIncoming(FPtrRetsVec[I], NotNullBB);
4492
0
      PHIRet.addIncoming(RetsVec[I], IsNullBB);
4493
0
      stackPush(PHIRet);
4494
0
    }
4495
0
  }
4496
4497
0
  void compileReturnCallRefOp(const unsigned int TypeIndex) noexcept {
4498
0
    auto NotNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_r.not_null");
4499
0
    auto IsNullBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_r.is_null");
4500
4501
0
    auto Ref = Builder.createBitCast(stackPop(), Context.Int64x2Ty);
4502
0
    auto OkBB = LLVM::BasicBlock::create(LLContext, F.Fn, "c_r.ref_not_null");
4503
0
    auto IsRefNotNull = Builder.createLikely(Builder.createICmpNE(
4504
0
        Builder.createExtractElement(Ref, LLContext.getInt64(1)),
4505
0
        LLContext.getInt64(0)));
4506
0
    Builder.createCondBr(IsRefNotNull, OkBB,
4507
0
                         getTrapBB(ErrCode::Value::AccessNullFunc));
4508
0
    Builder.positionAtEnd(OkBB);
4509
4510
0
    const auto &FuncType = Context.CompositeTypes[TypeIndex]->getFuncType();
4511
0
    auto FTy = toLLVMType(Context.LLContext, Context.ExecCtxPtrTy, FuncType);
4512
0
    auto RTy = FTy.getReturnType();
4513
4514
0
    const size_t ArgSize = FuncType.getParamTypes().size();
4515
0
    const size_t RetSize =
4516
0
        RTy.isVoidTy() ? 0 : FuncType.getReturnTypes().size();
4517
0
    std::vector<LLVM::Value> ArgsVec(ArgSize + 1, nullptr);
4518
0
    ArgsVec[0] = F.Fn.getFirstParam();
4519
0
    for (size_t I = 0; I < ArgSize; ++I) {
4520
0
      const size_t J = ArgSize - I;
4521
0
      ArgsVec[J] = stackPop();
4522
0
    }
4523
4524
0
    {
4525
0
      auto FPtr = Builder.createCall(
4526
0
          Context.getIntrinsic(
4527
0
              Builder, Executable::Intrinsics::kRefGetFuncSymbol,
4528
0
              LLVM::Type::getFunctionType(FTy.getPointerTo(),
4529
0
                                          {Context.Int64x2Ty}, false)),
4530
0
          {Ref});
4531
0
      Builder.createCondBr(
4532
0
          Builder.createLikely(Builder.createNot(Builder.createIsNull(FPtr))),
4533
0
          NotNullBB, IsNullBB);
4534
0
      Builder.positionAtEnd(NotNullBB);
4535
4536
0
      auto FPtrRet =
4537
0
          Builder.createCall(LLVM::FunctionCallee(FTy, FPtr), ArgsVec);
4538
0
      if (RetSize == 0) {
4539
0
        Builder.createRetVoid();
4540
0
      } else {
4541
0
        Builder.createRet(FPtrRet);
4542
0
      }
4543
0
    }
4544
4545
0
    Builder.positionAtEnd(IsNullBB);
4546
4547
0
    {
4548
0
      LLVM::Value Args = Builder.createArray(ArgSize, kValSize);
4549
0
      LLVM::Value Rets = Builder.createArray(RetSize, kValSize);
4550
0
      Builder.createArrayPtrStore(
4551
0
          Span<LLVM::Value>(ArgsVec.begin() + 1, ArgSize), Args, Context.Int8Ty,
4552
0
          kValSize);
4553
4554
0
      Builder.createCall(
4555
0
          Context.getIntrinsic(
4556
0
              Builder, Executable::Intrinsics::kCallRef,
4557
0
              LLVM::Type::getFunctionType(
4558
0
                  Context.VoidTy,
4559
0
                  {Context.Int64x2Ty, Context.Int8PtrTy, Context.Int8PtrTy},
4560
0
                  false)),
4561
0
          {Ref, Args, Rets});
4562
4563
0
      if (RetSize == 0) {
4564
0
        Builder.createRetVoid();
4565
0
      } else if (RetSize == 1) {
4566
0
        Builder.createRet(
4567
0
            Builder.createValuePtrLoad(RTy, Rets, Context.Int8Ty));
4568
0
      } else {
4569
0
        Builder.createAggregateRet(Builder.createArrayPtrLoad(
4570
0
            RetSize, RTy, Rets, Context.Int8Ty, kValSize));
4571
0
      }
4572
0
    }
4573
0
  }
4574
4575
  void compileLoadOp(unsigned MemoryIndex, unsigned Offset, unsigned Alignment,
4576
16.6k
                     LLVM::Type LoadTy) noexcept {
4577
16.6k
    if constexpr (kForceUnalignment) {
4578
16.6k
      Alignment = 0;
4579
16.6k
    }
4580
16.6k
    auto Off = Builder.createZExt(stackPop(), Context.Int64Ty);
4581
16.6k
    if (Offset != 0) {
4582
10.6k
      Off = Builder.createAdd(Off, LLContext.getInt64(Offset));
4583
10.6k
    }
4584
4585
16.6k
    auto VPtr = Builder.createInBoundsGEP1(
4586
16.6k
        Context.Int8Ty, Context.getMemory(Builder, ExecCtx, MemoryIndex), Off);
4587
16.6k
    auto Ptr = Builder.createBitCast(VPtr, LoadTy.getPointerTo());
4588
16.6k
    auto LoadInst = Builder.createLoad(LoadTy, Ptr, true);
4589
16.6k
    LoadInst.setAlignment(1 << Alignment);
4590
16.6k
    stackPush(switchEndian(LoadInst));
4591
16.6k
  }
4592
  void compileLoadOp(unsigned MemoryIndex, unsigned Offset, unsigned Alignment,
4593
                     LLVM::Type LoadTy, LLVM::Type ExtendTy,
4594
6.77k
                     bool Signed) noexcept {
4595
6.77k
    compileLoadOp(MemoryIndex, Offset, Alignment, LoadTy);
4596
6.77k
    if (Signed) {
4597
2.78k
      Stack.back() = Builder.createSExt(Stack.back(), ExtendTy);
4598
3.98k
    } else {
4599
3.98k
      Stack.back() = Builder.createZExt(Stack.back(), ExtendTy);
4600
3.98k
    }
4601
6.77k
  }
4602
  void compileVectorLoadOp(unsigned MemoryIndex, unsigned Offset,
4603
4.63k
                           unsigned Alignment, LLVM::Type LoadTy) noexcept {
4604
4.63k
    compileLoadOp(MemoryIndex, Offset, Alignment, LoadTy);
4605
4.63k
    Stack.back() = Builder.createBitCast(Stack.back(), Context.Int64x2Ty);
4606
4.63k
  }
4607
  void compileVectorLoadOp(unsigned MemoryIndex, unsigned Offset,
4608
                           unsigned Alignment, LLVM::Type LoadTy,
4609
1.50k
                           LLVM::Type ExtendTy, bool Signed) noexcept {
4610
1.50k
    compileLoadOp(MemoryIndex, Offset, Alignment, LoadTy, ExtendTy, Signed);
4611
1.50k
    Stack.back() = Builder.createBitCast(Stack.back(), Context.Int64x2Ty);
4612
1.50k
  }
4613
  void compileSplatLoadOp(unsigned MemoryIndex, unsigned Offset,
4614
                          unsigned Alignment, LLVM::Type LoadTy,
4615
553
                          LLVM::Type VectorTy) noexcept {
4616
553
    compileLoadOp(MemoryIndex, Offset, Alignment, LoadTy);
4617
553
    compileSplatOp(VectorTy);
4618
553
  }
4619
  void compileLoadLaneOp(unsigned MemoryIndex, unsigned Offset,
4620
                         unsigned Alignment, unsigned Index, LLVM::Type LoadTy,
4621
434
                         LLVM::Type VectorTy) noexcept {
4622
434
    auto Vector = stackPop();
4623
434
    compileLoadOp(MemoryIndex, Offset, Alignment, LoadTy);
4624
    if constexpr (Endian::native == Endian::big) {
4625
      Index = VectorTy.getVectorSize() - 1 - Index;
4626
    }
4627
434
    auto Value = Stack.back();
4628
434
    Stack.back() = Builder.createBitCast(
4629
434
        Builder.createInsertElement(Builder.createBitCast(Vector, VectorTy),
4630
434
                                    Value, LLContext.getInt64(Index)),
4631
434
        Context.Int64x2Ty);
4632
434
  }
4633
  void compileStoreOp(unsigned MemoryIndex, unsigned Offset, unsigned Alignment,
4634
                      LLVM::Type LoadTy, bool Trunc = false,
4635
3.08k
                      bool BitCast = false) noexcept {
4636
3.08k
    if constexpr (kForceUnalignment) {
4637
3.08k
      Alignment = 0;
4638
3.08k
    }
4639
3.08k
    auto V = stackPop();
4640
3.08k
    auto Off = Builder.createZExt(stackPop(), Context.Int64Ty);
4641
3.08k
    if (Offset != 0) {
4642
2.31k
      Off = Builder.createAdd(Off, LLContext.getInt64(Offset));
4643
2.31k
    }
4644
4645
3.08k
    if (Trunc) {
4646
586
      V = Builder.createTrunc(V, LoadTy);
4647
586
    }
4648
3.08k
    if (BitCast) {
4649
267
      V = Builder.createBitCast(V, LoadTy);
4650
267
    }
4651
3.08k
    V = switchEndian(V);
4652
3.08k
    auto VPtr = Builder.createInBoundsGEP1(
4653
3.08k
        Context.Int8Ty, Context.getMemory(Builder, ExecCtx, MemoryIndex), Off);
4654
3.08k
    auto Ptr = Builder.createBitCast(VPtr, LoadTy.getPointerTo());
4655
3.08k
    auto StoreInst = Builder.createStore(V, Ptr, true);
4656
3.08k
    StoreInst.setAlignment(1 << Alignment);
4657
3.08k
  }
4658
  void compileStoreLaneOp(unsigned MemoryIndex, unsigned Offset,
4659
                          unsigned Alignment, unsigned Index, LLVM::Type LoadTy,
4660
338
                          LLVM::Type VectorTy) noexcept {
4661
338
    auto Vector = Stack.back();
4662
    if constexpr (Endian::native == Endian::big) {
4663
      Index = VectorTy.getVectorSize() - Index - 1;
4664
    }
4665
338
    Stack.back() = Builder.createExtractElement(
4666
338
        Builder.createBitCast(Vector, VectorTy), LLContext.getInt64(Index));
4667
338
    compileStoreOp(MemoryIndex, Offset, Alignment, LoadTy);
4668
338
  }
4669
44.8k
  void compileSplatOp(LLVM::Type VectorTy) noexcept {
4670
44.8k
    auto Undef = LLVM::Value::getUndef(VectorTy);
4671
44.8k
    auto Zeros = LLVM::Value::getConstNull(
4672
44.8k
        LLVM::Type::getVectorType(Context.Int32Ty, VectorTy.getVectorSize()));
4673
44.8k
    auto Value = Builder.createTrunc(Stack.back(), VectorTy.getElementType());
4674
44.8k
    auto Vector =
4675
44.8k
        Builder.createInsertElement(Undef, Value, LLContext.getInt64(0));
4676
44.8k
    Vector = Builder.createShuffleVector(Vector, Undef, Zeros);
4677
4678
44.8k
    Stack.back() = Builder.createBitCast(Vector, Context.Int64x2Ty);
4679
44.8k
  }
4680
1.31k
  void compileExtractLaneOp(LLVM::Type VectorTy, unsigned Index) noexcept {
4681
1.31k
    auto Vector = Builder.createBitCast(Stack.back(), VectorTy);
4682
    if constexpr (Endian::native == Endian::big) {
4683
      Index = VectorTy.getVectorSize() - Index - 1;
4684
    }
4685
1.31k
    Stack.back() =
4686
1.31k
        Builder.createExtractElement(Vector, LLContext.getInt64(Index));
4687
1.31k
  }
4688
  void compileExtractLaneOp(LLVM::Type VectorTy, unsigned Index,
4689
984
                            LLVM::Type ExtendTy, bool Signed) noexcept {
4690
984
    compileExtractLaneOp(VectorTy, Index);
4691
984
    if (Signed) {
4692
501
      Stack.back() = Builder.createSExt(Stack.back(), ExtendTy);
4693
501
    } else {
4694
483
      Stack.back() = Builder.createZExt(Stack.back(), ExtendTy);
4695
483
    }
4696
984
  }
4697
671
  void compileReplaceLaneOp(LLVM::Type VectorTy, unsigned Index) noexcept {
4698
671
    auto Value = Builder.createTrunc(stackPop(), VectorTy.getElementType());
4699
671
    auto Vector = Stack.back();
4700
    if constexpr (Endian::native == Endian::big) {
4701
      Index = VectorTy.getVectorSize() - Index - 1;
4702
    }
4703
671
    Stack.back() = Builder.createBitCast(
4704
671
        Builder.createInsertElement(Builder.createBitCast(Vector, VectorTy),
4705
671
                                    Value, LLContext.getInt64(Index)),
4706
671
        Context.Int64x2Ty);
4707
671
  }
4708
  void compileVectorCompareOp(LLVM::Type VectorTy,
4709
4.91k
                              LLVMIntPredicate Predicate) noexcept {
4710
4.91k
    auto RHS = stackPop();
4711
4.91k
    auto LHS = stackPop();
4712
4.91k
    auto Result = Builder.createSExt(
4713
4.91k
        Builder.createICmp(Predicate, Builder.createBitCast(LHS, VectorTy),
4714
4.91k
                           Builder.createBitCast(RHS, VectorTy)),
4715
4.91k
        VectorTy);
4716
4.91k
    stackPush(Builder.createBitCast(Result, Context.Int64x2Ty));
4717
4.91k
  }
4718
  void compileVectorCompareOp(LLVM::Type VectorTy, LLVMRealPredicate Predicate,
4719
3.35k
                              LLVM::Type ResultTy) noexcept {
4720
3.35k
    auto RHS = stackPop();
4721
3.35k
    auto LHS = stackPop();
4722
3.35k
    auto Result = Builder.createSExt(
4723
3.35k
        Builder.createFCmp(Predicate, Builder.createBitCast(LHS, VectorTy),
4724
3.35k
                           Builder.createBitCast(RHS, VectorTy)),
4725
3.35k
        ResultTy);
4726
3.35k
    stackPush(Builder.createBitCast(Result, Context.Int64x2Ty));
4727
3.35k
  }
4728
  template <typename Func>
4729
24.1k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
24.1k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
24.1k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
24.1k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorAbs(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorAbs(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
1.97k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
1.97k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
1.97k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
1.97k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorNeg(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorNeg(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
2.40k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
2.40k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
2.40k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
2.40k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorPopcnt()::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorPopcnt()::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
130
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
130
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
130
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
130
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorExtAddPairwise(WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorExtAddPairwise(WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
2.12k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
2.12k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
2.12k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
2.12k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorFAbs(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorFAbs(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
558
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
558
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
558
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
558
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorFNeg(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorFNeg(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
938
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
938
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
938
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
938
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorFSqrt(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorFSqrt(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
301
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
301
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
301
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
301
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorFCeil(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorFCeil(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
1.26k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
1.26k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
1.26k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
1.26k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorFFloor(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorFFloor(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
2.08k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
2.08k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
2.08k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
2.08k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorFTrunc(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorFTrunc(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
1.63k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
1.63k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
1.63k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
1.63k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorFNearest(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorFNearest(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
354
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
354
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
354
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
354
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorTruncSatS32(WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorTruncSatS32(WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
905
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
905
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
905
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
905
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorTruncSatU32(WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorTruncSatU32(WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
5.78k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
5.78k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
5.78k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
5.78k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorConvertS(WasmEdge::LLVM::Type, WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorConvertS(WasmEdge::LLVM::Type, WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
667
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
667
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
667
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
667
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorConvertU(WasmEdge::LLVM::Type, WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorConvertU(WasmEdge::LLVM::Type, WasmEdge::LLVM::Type, bool)::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
1.91k
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
1.91k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
1.91k
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
1.91k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorDemote()::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorDemote()::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
567
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
567
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
567
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
567
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorPromote()::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorPromote()::{lambda(auto:1)#1}&&)
Line
Count
Source
4729
554
  void compileVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4730
554
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4731
554
    Stack.back() = Builder.createBitCast(Op(V), Context.Int64x2Ty);
4732
554
  }
4733
1.97k
  void compileVectorAbs(LLVM::Type VectorTy) noexcept {
4734
1.97k
    compileVectorOp(VectorTy, [this, VectorTy](auto V) noexcept {
4735
1.97k
      auto Zero = LLVM::Value::getConstNull(VectorTy);
4736
1.97k
      auto C = Builder.createICmpSLT(V, Zero);
4737
1.97k
      return Builder.createSelect(C, Builder.createNeg(V), V);
4738
1.97k
    });
4739
1.97k
  }
4740
2.40k
  void compileVectorNeg(LLVM::Type VectorTy) noexcept {
4741
2.40k
    compileVectorOp(VectorTy,
4742
2.40k
                    [this](auto V) noexcept { return Builder.createNeg(V); });
4743
2.40k
  }
4744
130
  void compileVectorPopcnt() noexcept {
4745
130
    compileVectorOp(Context.Int8x16Ty, [this](auto V) noexcept {
4746
130
      assuming(LLVM::Core::Ctpop != LLVM::Core::NotIntrinsic);
4747
130
      return Builder.createUnaryIntrinsic(LLVM::Core::Ctpop, V);
4748
130
    });
4749
130
  }
4750
  template <typename Func>
4751
1.88k
  void compileVectorReduceIOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4752
1.88k
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4753
1.88k
    Stack.back() = Builder.createZExt(Op(V), Context.Int32Ty);
4754
1.88k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorReduceIOp<(anonymous namespace)::FunctionCompiler::compileVectorAnyTrue()::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorAnyTrue()::{lambda(auto:1)#1}&&)
Line
Count
Source
4751
107
  void compileVectorReduceIOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4752
107
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4753
107
    Stack.back() = Builder.createZExt(Op(V), Context.Int32Ty);
4754
107
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorReduceIOp<(anonymous namespace)::FunctionCompiler::compileVectorAllTrue(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorAllTrue(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4751
874
  void compileVectorReduceIOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4752
874
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4753
874
    Stack.back() = Builder.createZExt(Op(V), Context.Int32Ty);
4754
874
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorReduceIOp<(anonymous namespace)::FunctionCompiler::compileVectorBitMask(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorBitMask(WasmEdge::LLVM::Type)::{lambda(auto:1)#1}&&)
Line
Count
Source
4751
906
  void compileVectorReduceIOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4752
906
    auto V = Builder.createBitCast(Stack.back(), VectorTy);
4753
906
    Stack.back() = Builder.createZExt(Op(V), Context.Int32Ty);
4754
906
  }
4755
107
  void compileVectorAnyTrue() noexcept {
4756
107
    compileVectorReduceIOp(Context.Int128x1Ty, [this](auto V) noexcept {
4757
107
      auto Zero = LLVM::Value::getConstNull(Context.Int128x1Ty);
4758
107
      return Builder.createBitCast(Builder.createICmpNE(V, Zero),
4759
107
                                   LLContext.getInt1Ty());
4760
107
    });
4761
107
  }
4762
874
  void compileVectorAllTrue(LLVM::Type VectorTy) noexcept {
4763
874
    compileVectorReduceIOp(VectorTy, [this, VectorTy](auto V) noexcept {
4764
874
      const auto Size = VectorTy.getVectorSize();
4765
874
      auto IntType = LLContext.getIntNTy(Size);
4766
874
      auto Zero = LLVM::Value::getConstNull(VectorTy);
4767
874
      auto Cmp = Builder.createBitCast(Builder.createICmpEQ(V, Zero), IntType);
4768
874
      auto CmpZero = LLVM::Value::getConstInt(IntType, 0);
4769
874
      return Builder.createICmpEQ(Cmp, CmpZero);
4770
874
    });
4771
874
  }
4772
906
  void compileVectorBitMask(LLVM::Type VectorTy) noexcept {
4773
906
    compileVectorReduceIOp(VectorTy, [this, VectorTy](auto V) noexcept {
4774
906
      const auto Size = VectorTy.getVectorSize();
4775
906
      auto IntType = LLContext.getIntNTy(Size);
4776
906
      auto Zero = LLVM::Value::getConstNull(VectorTy);
4777
906
      return Builder.createBitCast(Builder.createICmpSLT(V, Zero), IntType);
4778
906
    });
4779
906
  }
4780
  template <typename Func>
4781
3.24k
  void compileVectorShiftOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4782
3.24k
    const bool Trunc = VectorTy.getElementType().getIntegerBitWidth() < 32;
4783
3.24k
    const uint32_t Mask = VectorTy.getElementType().getIntegerBitWidth() - 1;
4784
3.24k
    auto N = Builder.createAnd(stackPop(), LLContext.getInt32(Mask));
4785
3.24k
    auto RHS = Builder.createVectorSplat(
4786
3.24k
        VectorTy.getVectorSize(),
4787
3.24k
        Trunc ? Builder.createTrunc(N, VectorTy.getElementType())
4788
3.24k
              : Builder.createZExtOrTrunc(N, VectorTy.getElementType()));
4789
3.24k
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4790
3.24k
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4791
3.24k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorShiftOp<(anonymous namespace)::FunctionCompiler::compileVectorShl(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorShl(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4781
1.23k
  void compileVectorShiftOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4782
1.23k
    const bool Trunc = VectorTy.getElementType().getIntegerBitWidth() < 32;
4783
1.23k
    const uint32_t Mask = VectorTy.getElementType().getIntegerBitWidth() - 1;
4784
1.23k
    auto N = Builder.createAnd(stackPop(), LLContext.getInt32(Mask));
4785
1.23k
    auto RHS = Builder.createVectorSplat(
4786
1.23k
        VectorTy.getVectorSize(),
4787
1.23k
        Trunc ? Builder.createTrunc(N, VectorTy.getElementType())
4788
1.23k
              : Builder.createZExtOrTrunc(N, VectorTy.getElementType()));
4789
1.23k
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4790
1.23k
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4791
1.23k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorShiftOp<(anonymous namespace)::FunctionCompiler::compileVectorAShr(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorAShr(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4781
1.74k
  void compileVectorShiftOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4782
1.74k
    const bool Trunc = VectorTy.getElementType().getIntegerBitWidth() < 32;
4783
1.74k
    const uint32_t Mask = VectorTy.getElementType().getIntegerBitWidth() - 1;
4784
1.74k
    auto N = Builder.createAnd(stackPop(), LLContext.getInt32(Mask));
4785
1.74k
    auto RHS = Builder.createVectorSplat(
4786
1.74k
        VectorTy.getVectorSize(),
4787
1.74k
        Trunc ? Builder.createTrunc(N, VectorTy.getElementType())
4788
1.74k
              : Builder.createZExtOrTrunc(N, VectorTy.getElementType()));
4789
1.74k
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4790
1.74k
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4791
1.74k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorShiftOp<(anonymous namespace)::FunctionCompiler::compileVectorLShr(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorLShr(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4781
268
  void compileVectorShiftOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4782
268
    const bool Trunc = VectorTy.getElementType().getIntegerBitWidth() < 32;
4783
268
    const uint32_t Mask = VectorTy.getElementType().getIntegerBitWidth() - 1;
4784
268
    auto N = Builder.createAnd(stackPop(), LLContext.getInt32(Mask));
4785
268
    auto RHS = Builder.createVectorSplat(
4786
268
        VectorTy.getVectorSize(),
4787
268
        Trunc ? Builder.createTrunc(N, VectorTy.getElementType())
4788
268
              : Builder.createZExtOrTrunc(N, VectorTy.getElementType()));
4789
268
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4790
268
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4791
268
  }
4792
1.23k
  void compileVectorShl(LLVM::Type VectorTy) noexcept {
4793
1.23k
    compileVectorShiftOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4794
1.23k
      return Builder.createShl(LHS, RHS);
4795
1.23k
    });
4796
1.23k
  }
4797
268
  void compileVectorLShr(LLVM::Type VectorTy) noexcept {
4798
268
    compileVectorShiftOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4799
268
      return Builder.createLShr(LHS, RHS);
4800
268
    });
4801
268
  }
4802
1.74k
  void compileVectorAShr(LLVM::Type VectorTy) noexcept {
4803
1.74k
    compileVectorShiftOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4804
1.74k
      return Builder.createAShr(LHS, RHS);
4805
1.74k
    });
4806
1.74k
  }
4807
  template <typename Func>
4808
7.23k
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
7.23k
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
7.23k
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
7.23k
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
7.23k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorAdd(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorAdd(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
299
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
299
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
299
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
299
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
299
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorAddSat(WasmEdge::LLVM::Type, bool)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorAddSat(WasmEdge::LLVM::Type, bool)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
1.17k
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
1.17k
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
1.17k
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
1.17k
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
1.17k
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorSub(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorSub(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
827
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
827
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
827
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
827
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
827
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorSubSat(WasmEdge::LLVM::Type, bool)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorSubSat(WasmEdge::LLVM::Type, bool)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
371
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
371
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
371
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
371
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
371
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorSMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorSMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
263
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
263
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
263
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
263
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
263
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorUMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorUMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
244
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
244
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
244
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
244
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
244
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorSMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorSMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
397
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
397
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
397
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
397
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
397
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorUMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorUMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
754
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
754
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
754
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
754
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
754
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorUAvgr(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorUAvgr(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
220
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
220
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
220
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
220
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
220
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorMul(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorMul(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
424
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
424
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
424
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
424
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
424
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorQ15MulSat()::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorQ15MulSat()::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
134
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
134
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
134
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
134
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
134
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFAdd(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFAdd(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
179
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
179
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
179
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
179
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
179
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFSub(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFSub(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
449
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
449
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
449
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
449
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
449
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFMul(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFMul(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
180
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
180
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
180
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
180
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
180
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFDiv(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFDiv(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
224
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
224
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
224
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
224
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
224
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
285
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
285
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
285
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
285
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
285
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
196
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
196
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
196
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
196
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
196
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFPMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFPMin(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
298
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
298
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
298
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
298
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
298
  }
compiler.cpp:void (anonymous namespace)::FunctionCompiler::compileVectorVectorOp<(anonymous namespace)::FunctionCompiler::compileVectorVectorFPMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}>(WasmEdge::LLVM::Type, (anonymous namespace)::FunctionCompiler::compileVectorVectorFPMax(WasmEdge::LLVM::Type)::{lambda(auto:1, auto:2)#1}&&)
Line
Count
Source
4808
311
  void compileVectorVectorOp(LLVM::Type VectorTy, Func &&Op) noexcept {
4809
311
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
4810
311
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
4811
311
    stackPush(Builder.createBitCast(Op(LHS, RHS), Context.Int64x2Ty));
4812
311
  }
4813
299
  void compileVectorVectorAdd(LLVM::Type VectorTy) noexcept {
4814
299
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4815
299
      return Builder.createAdd(LHS, RHS);
4816
299
    });
4817
299
  }
4818
1.17k
  void compileVectorVectorAddSat(LLVM::Type VectorTy, bool Signed) noexcept {
4819
1.17k
    auto ID = Signed ? LLVM::Core::SAddSat : LLVM::Core::UAddSat;
4820
1.17k
    assuming(ID != LLVM::Core::NotIntrinsic);
4821
1.17k
    compileVectorVectorOp(
4822
1.17k
        VectorTy, [this, VectorTy, ID](auto LHS, auto RHS) noexcept {
4823
1.17k
          return Builder.createIntrinsic(ID, {VectorTy}, {LHS, RHS});
4824
1.17k
        });
4825
1.17k
  }
4826
827
  void compileVectorVectorSub(LLVM::Type VectorTy) noexcept {
4827
827
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4828
827
      return Builder.createSub(LHS, RHS);
4829
827
    });
4830
827
  }
4831
371
  void compileVectorVectorSubSat(LLVM::Type VectorTy, bool Signed) noexcept {
4832
371
    auto ID = Signed ? LLVM::Core::SSubSat : LLVM::Core::USubSat;
4833
371
    assuming(ID != LLVM::Core::NotIntrinsic);
4834
371
    compileVectorVectorOp(
4835
371
        VectorTy, [this, VectorTy, ID](auto LHS, auto RHS) noexcept {
4836
371
          return Builder.createIntrinsic(ID, {VectorTy}, {LHS, RHS});
4837
371
        });
4838
371
  }
4839
424
  void compileVectorVectorMul(LLVM::Type VectorTy) noexcept {
4840
424
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4841
424
      return Builder.createMul(LHS, RHS);
4842
424
    });
4843
424
  }
4844
65
  void compileVectorSwizzle() noexcept {
4845
65
    auto Index = Builder.createBitCast(stackPop(), Context.Int8x16Ty);
4846
65
    auto Vector = Builder.createBitCast(stackPop(), Context.Int8x16Ty);
4847
4848
65
#if defined(__x86_64__)
4849
65
    if (Context.SupportSSSE3) {
4850
65
      auto Magic = Builder.createVectorSplat(16, LLContext.getInt8(112));
4851
65
      auto Added = Builder.createAdd(Index, Magic);
4852
65
      auto NewIndex = Builder.createSelect(
4853
65
          Builder.createICmpUGT(Index, Added),
4854
65
          LLVM::Value::getConstAllOnes(Context.Int8x16Ty), Added);
4855
65
      assuming(LLVM::Core::X86SSSE3PShufB128 != LLVM::Core::NotIntrinsic);
4856
65
      stackPush(Builder.createBitCast(
4857
65
          Builder.createIntrinsic(LLVM::Core::X86SSSE3PShufB128, {},
4858
65
                                  {Vector, NewIndex}),
4859
65
          Context.Int64x2Ty));
4860
65
      return;
4861
65
    }
4862
0
#endif
4863
4864
#if defined(__aarch64__)
4865
    if (Context.SupportNEON) {
4866
      assuming(LLVM::Core::AArch64NeonTbl1 != LLVM::Core::NotIntrinsic);
4867
      stackPush(Builder.createBitCast(
4868
          Builder.createIntrinsic(LLVM::Core::AArch64NeonTbl1,
4869
                                  {Context.Int8x16Ty}, {Vector, Index}),
4870
          Context.Int64x2Ty));
4871
      return;
4872
    }
4873
#endif
4874
4875
0
    auto Mask = Builder.createVectorSplat(16, LLContext.getInt8(15));
4876
0
    auto Zero = Builder.createVectorSplat(16, LLContext.getInt8(0));
4877
4878
#if defined(__s390x__)
4879
    assuming(LLVM::Core::S390VPerm != LLVM::Core::NotIntrinsic);
4880
    auto Exceed = Builder.createICmpULE(Index, Mask);
4881
    Index = Builder.createSub(Mask, Index);
4882
    auto Result = Builder.createIntrinsic(LLVM::Core::S390VPerm, {},
4883
                                          {Vector, Zero, Index});
4884
    Result = Builder.createSelect(Exceed, Result, Zero);
4885
    stackPush(Builder.createBitCast(Result, Context.Int64x2Ty));
4886
    return;
4887
#endif
4888
4889
    // Fallback case.
4890
    // If the SSSE3 is not supported on the x86_64 platform or
4891
    // the NEON is not supported on the aarch64 platform,
4892
    // then fallback to this.
4893
0
    auto IsOver = Builder.createICmpUGT(Index, Mask);
4894
0
    auto InboundIndex = Builder.createAnd(Index, Mask);
4895
0
    auto Array = Builder.createArray(16, 1);
4896
0
    for (size_t I = 0; I < 16; ++I) {
4897
0
      Builder.createStore(
4898
0
          Builder.createExtractElement(Vector, LLContext.getInt64(I)),
4899
0
          Builder.createInBoundsGEP1(Context.Int8Ty, Array,
4900
0
                                     LLContext.getInt64(I)));
4901
0
    }
4902
0
    LLVM::Value Ret = LLVM::Value::getUndef(Context.Int8x16Ty);
4903
0
    for (size_t I = 0; I < 16; ++I) {
4904
0
      auto Idx =
4905
0
          Builder.createExtractElement(InboundIndex, LLContext.getInt64(I));
4906
0
      auto Value = Builder.createLoad(
4907
0
          Context.Int8Ty,
4908
0
          Builder.createInBoundsGEP1(Context.Int8Ty, Array, Idx));
4909
0
      Ret = Builder.createInsertElement(Ret, Value, LLContext.getInt64(I));
4910
0
    }
4911
0
    Ret = Builder.createSelect(IsOver, Zero, Ret);
4912
0
    stackPush(Builder.createBitCast(Ret, Context.Int64x2Ty));
4913
0
  }
4914
4915
134
  void compileVectorVectorQ15MulSat() noexcept {
4916
134
    compileVectorVectorOp(
4917
134
        Context.Int16x8Ty, [this](auto LHS, auto RHS) noexcept -> LLVM::Value {
4918
134
#if defined(__x86_64__)
4919
134
          if (Context.SupportSSSE3) {
4920
134
            assuming(LLVM::Core::X86SSSE3PMulHrSw128 !=
4921
134
                     LLVM::Core::NotIntrinsic);
4922
134
            auto Result = Builder.createIntrinsic(
4923
134
                LLVM::Core::X86SSSE3PMulHrSw128, {}, {LHS, RHS});
4924
134
            auto IntMaxV = Builder.createVectorSplat(
4925
134
                8, LLContext.getInt16(UINT16_C(0x8000)));
4926
134
            auto NotOver = Builder.createSExt(
4927
134
                Builder.createICmpEQ(Result, IntMaxV), Context.Int16x8Ty);
4928
134
            return Builder.createXor(Result, NotOver);
4929
134
          }
4930
0
#endif
4931
4932
#if defined(__aarch64__)
4933
          if (Context.SupportNEON) {
4934
            assuming(LLVM::Core::AArch64NeonSQRDMulH !=
4935
                     LLVM::Core::NotIntrinsic);
4936
            return Builder.createBinaryIntrinsic(
4937
                LLVM::Core::AArch64NeonSQRDMulH, LHS, RHS);
4938
          }
4939
#endif
4940
4941
          // Fallback case.
4942
          // If the SSSE3 is not supported on the x86_64 platform or
4943
          // the NEON is not supported on the aarch64 platform,
4944
          // then fallback to this.
4945
0
          auto ExtTy = Context.Int16x8Ty.getExtendedElementVectorType();
4946
0
          auto Offset = Builder.createVectorSplat(
4947
0
              8, LLContext.getInt32(UINT32_C(0x4000)));
4948
0
          auto Shift =
4949
0
              Builder.createVectorSplat(8, LLContext.getInt32(UINT32_C(15)));
4950
0
          auto ExtLHS = Builder.createSExt(LHS, ExtTy);
4951
0
          auto ExtRHS = Builder.createSExt(RHS, ExtTy);
4952
0
          auto Result = Builder.createTrunc(
4953
0
              Builder.createAShr(
4954
0
                  Builder.createAdd(Builder.createMul(ExtLHS, ExtRHS), Offset),
4955
0
                  Shift),
4956
0
              Context.Int16x8Ty);
4957
0
          auto IntMaxV = Builder.createVectorSplat(
4958
0
              8, LLContext.getInt16(UINT16_C(0x8000)));
4959
0
          auto NotOver = Builder.createSExt(
4960
0
              Builder.createICmpEQ(Result, IntMaxV), Context.Int16x8Ty);
4961
0
          return Builder.createXor(Result, NotOver);
4962
134
        });
4963
134
  }
4964
263
  void compileVectorVectorSMin(LLVM::Type VectorTy) noexcept {
4965
263
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4966
263
      auto C = Builder.createICmpSLE(LHS, RHS);
4967
263
      return Builder.createSelect(C, LHS, RHS);
4968
263
    });
4969
263
  }
4970
244
  void compileVectorVectorUMin(LLVM::Type VectorTy) noexcept {
4971
244
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4972
244
      auto C = Builder.createICmpULE(LHS, RHS);
4973
244
      return Builder.createSelect(C, LHS, RHS);
4974
244
    });
4975
244
  }
4976
397
  void compileVectorVectorSMax(LLVM::Type VectorTy) noexcept {
4977
397
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4978
397
      auto C = Builder.createICmpSGE(LHS, RHS);
4979
397
      return Builder.createSelect(C, LHS, RHS);
4980
397
    });
4981
397
  }
4982
754
  void compileVectorVectorUMax(LLVM::Type VectorTy) noexcept {
4983
754
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
4984
754
      auto C = Builder.createICmpUGE(LHS, RHS);
4985
754
      return Builder.createSelect(C, LHS, RHS);
4986
754
    });
4987
754
  }
4988
220
  void compileVectorVectorUAvgr(LLVM::Type VectorTy) noexcept {
4989
220
    auto ExtendTy = VectorTy.getExtendedElementVectorType();
4990
220
    compileVectorVectorOp(
4991
220
        VectorTy,
4992
220
        [this, VectorTy, ExtendTy](auto LHS, auto RHS) noexcept -> LLVM::Value {
4993
220
#if defined(__x86_64__)
4994
220
          if (Context.SupportSSE2) {
4995
220
            const auto ID = [VectorTy]() noexcept {
4996
220
              switch (VectorTy.getElementType().getIntegerBitWidth()) {
4997
116
              case 8:
4998
116
                return LLVM::Core::X86SSE2PAvgB;
4999
104
              case 16:
5000
104
                return LLVM::Core::X86SSE2PAvgW;
5001
0
              default:
5002
0
                assumingUnreachable();
5003
220
              }
5004
220
            }();
5005
220
            assuming(ID != LLVM::Core::NotIntrinsic);
5006
220
            return Builder.createIntrinsic(ID, {}, {LHS, RHS});
5007
220
          }
5008
0
#endif
5009
5010
#if defined(__aarch64__)
5011
          if (Context.SupportNEON) {
5012
            assuming(LLVM::Core::AArch64NeonURHAdd != LLVM::Core::NotIntrinsic);
5013
            return Builder.createBinaryIntrinsic(LLVM::Core::AArch64NeonURHAdd,
5014
                                                 LHS, RHS);
5015
          }
5016
#endif
5017
5018
          // Fallback case.
5019
          // If the SSE2 is not supported on the x86_64 platform or
5020
          // the NEON is not supported on the aarch64 platform,
5021
          // then fallback to this.
5022
0
          auto EL = Builder.createZExt(LHS, ExtendTy);
5023
0
          auto ER = Builder.createZExt(RHS, ExtendTy);
5024
0
          auto One = Builder.createZExt(
5025
0
              Builder.createVectorSplat(ExtendTy.getVectorSize(),
5026
0
                                        LLContext.getTrue()),
5027
0
              ExtendTy);
5028
0
          return Builder.createTrunc(
5029
0
              Builder.createLShr(
5030
0
                  Builder.createAdd(Builder.createAdd(EL, ER), One), One),
5031
0
              VectorTy);
5032
220
        });
5033
220
  }
5034
683
  void compileVectorNarrow(LLVM::Type FromTy, bool Signed) noexcept {
5035
683
    auto [MinInt,
5036
683
          MaxInt] = [&]() noexcept -> std::tuple<LLVM::Value, LLVM::Value> {
5037
683
      switch (FromTy.getElementType().getIntegerBitWidth()) {
5038
259
      case 16: {
5039
259
        const auto Min =
5040
259
            static_cast<int16_t>(Signed ? std::numeric_limits<int8_t>::min()
5041
259
                                        : std::numeric_limits<uint8_t>::min());
5042
259
        const auto Max =
5043
259
            static_cast<int16_t>(Signed ? std::numeric_limits<int8_t>::max()
5044
259
                                        : std::numeric_limits<uint8_t>::max());
5045
259
        return {LLContext.getInt16(static_cast<uint16_t>(Min)),
5046
259
                LLContext.getInt16(static_cast<uint16_t>(Max))};
5047
0
      }
5048
424
      case 32: {
5049
424
        const auto Min =
5050
424
            static_cast<int32_t>(Signed ? std::numeric_limits<int16_t>::min()
5051
424
                                        : std::numeric_limits<uint16_t>::min());
5052
424
        const auto Max =
5053
424
            static_cast<int32_t>(Signed ? std::numeric_limits<int16_t>::max()
5054
424
                                        : std::numeric_limits<uint16_t>::max());
5055
424
        return {LLContext.getInt32(static_cast<uint32_t>(Min)),
5056
424
                LLContext.getInt32(static_cast<uint32_t>(Max))};
5057
0
      }
5058
0
      default:
5059
0
        assumingUnreachable();
5060
683
      }
5061
683
    }();
5062
683
    const auto Count = FromTy.getVectorSize();
5063
683
    auto VMin = Builder.createVectorSplat(Count, MinInt);
5064
683
    auto VMax = Builder.createVectorSplat(Count, MaxInt);
5065
5066
683
    auto TruncTy = FromTy.getTruncatedElementVectorType();
5067
5068
683
    auto F2 = Builder.createBitCast(stackPop(), FromTy);
5069
683
    F2 = Builder.createSelect(Builder.createICmpSLT(F2, VMin), VMin, F2);
5070
683
    F2 = Builder.createSelect(Builder.createICmpSGT(F2, VMax), VMax, F2);
5071
683
    F2 = Builder.createTrunc(F2, TruncTy);
5072
5073
683
    auto F1 = Builder.createBitCast(stackPop(), FromTy);
5074
683
    F1 = Builder.createSelect(Builder.createICmpSLT(F1, VMin), VMin, F1);
5075
683
    F1 = Builder.createSelect(Builder.createICmpSGT(F1, VMax), VMax, F1);
5076
683
    F1 = Builder.createTrunc(F1, TruncTy);
5077
5078
683
    std::vector<uint32_t> Mask(Count * 2);
5079
683
    std::iota(Mask.begin(), Mask.end(), 0);
5080
683
    auto V = Endian::native == Endian::little
5081
683
                 ? Builder.createShuffleVector(
5082
683
                       F1, F2, LLVM::Value::getConstVector32(LLContext, Mask))
5083
683
                 : Builder.createShuffleVector(
5084
0
                       F2, F1, LLVM::Value::getConstVector32(LLContext, Mask));
5085
683
    stackPush(Builder.createBitCast(V, Context.Int64x2Ty));
5086
683
  }
5087
5.53k
  void compileVectorExtend(LLVM::Type FromTy, bool Signed, bool Low) noexcept {
5088
5.53k
    auto ExtTy = FromTy.getExtendedElementVectorType();
5089
5.53k
    const auto Count = FromTy.getVectorSize();
5090
5.53k
    std::vector<uint32_t> Mask(Count / 2);
5091
    if constexpr (Endian::native == Endian::big) {
5092
      Low = !Low;
5093
    }
5094
5.53k
    std::iota(Mask.begin(), Mask.end(), Low ? 0 : Count / 2);
5095
5.53k
    auto R = Builder.createBitCast(Stack.back(), FromTy);
5096
5.53k
    if (Signed) {
5097
2.32k
      R = Builder.createSExt(R, ExtTy);
5098
3.20k
    } else {
5099
3.20k
      R = Builder.createZExt(R, ExtTy);
5100
3.20k
    }
5101
5.53k
    R = Builder.createShuffleVector(
5102
5.53k
        R, LLVM::Value::getUndef(ExtTy),
5103
5.53k
        LLVM::Value::getConstVector32(LLContext, Mask));
5104
5.53k
    Stack.back() = Builder.createBitCast(R, Context.Int64x2Ty);
5105
5.53k
  }
5106
1.78k
  void compileVectorExtMul(LLVM::Type FromTy, bool Signed, bool Low) noexcept {
5107
1.78k
    auto ExtTy = FromTy.getExtendedElementVectorType();
5108
1.78k
    const auto Count = FromTy.getVectorSize();
5109
1.78k
    std::vector<uint32_t> Mask(Count / 2);
5110
1.78k
    std::iota(Mask.begin(), Mask.end(), Low ? 0 : Count / 2);
5111
3.57k
    auto Extend = [this, FromTy, Signed, ExtTy, &Mask](LLVM::Value R) noexcept {
5112
3.57k
      R = Builder.createBitCast(R, FromTy);
5113
3.57k
      if (Signed) {
5114
1.49k
        R = Builder.createSExt(R, ExtTy);
5115
2.07k
      } else {
5116
2.07k
        R = Builder.createZExt(R, ExtTy);
5117
2.07k
      }
5118
3.57k
      return Builder.createShuffleVector(
5119
3.57k
          R, LLVM::Value::getUndef(ExtTy),
5120
3.57k
          LLVM::Value::getConstVector32(LLContext, Mask));
5121
3.57k
    };
5122
1.78k
    auto RHS = Extend(stackPop());
5123
1.78k
    auto LHS = Extend(stackPop());
5124
1.78k
    stackPush(
5125
1.78k
        Builder.createBitCast(Builder.createMul(RHS, LHS), Context.Int64x2Ty));
5126
1.78k
  }
5127
2.12k
  void compileVectorExtAddPairwise(LLVM::Type VectorTy, bool Signed) noexcept {
5128
2.12k
    compileVectorOp(
5129
2.12k
        VectorTy, [this, VectorTy, Signed](auto V) noexcept -> LLVM::Value {
5130
2.12k
          auto ExtTy = VectorTy.getExtendedElementVectorType()
5131
2.12k
                           .getHalfElementsVectorType();
5132
2.12k
#if defined(__x86_64__)
5133
2.12k
          const auto Count = VectorTy.getVectorSize();
5134
2.12k
          if (Context.SupportXOP) {
5135
0
            const auto ID = [Count, Signed]() noexcept {
5136
0
              switch (Count) {
5137
0
              case 8:
5138
0
                return Signed ? LLVM::Core::X86XOpVPHAddWD
5139
0
                              : LLVM::Core::X86XOpVPHAddUWD;
5140
0
              case 16:
5141
0
                return Signed ? LLVM::Core::X86XOpVPHAddBW
5142
0
                              : LLVM::Core::X86XOpVPHAddUBW;
5143
0
              default:
5144
0
                assumingUnreachable();
5145
0
              }
5146
0
            }();
5147
0
            assuming(ID != LLVM::Core::NotIntrinsic);
5148
0
            return Builder.createUnaryIntrinsic(ID, V);
5149
0
          }
5150
2.12k
          if (Context.SupportSSSE3 && Count == 16) {
5151
632
            assuming(LLVM::Core::X86SSSE3PMAddUbSw128 !=
5152
632
                     LLVM::Core::NotIntrinsic);
5153
632
            if (Signed) {
5154
303
              return Builder.createIntrinsic(
5155
303
                  LLVM::Core::X86SSSE3PMAddUbSw128, {},
5156
303
                  {Builder.createVectorSplat(16, LLContext.getInt8(1)), V});
5157
329
            } else {
5158
329
              return Builder.createIntrinsic(
5159
329
                  LLVM::Core::X86SSSE3PMAddUbSw128, {},
5160
329
                  {V, Builder.createVectorSplat(16, LLContext.getInt8(1))});
5161
329
            }
5162
632
          }
5163
1.49k
          if (Context.SupportSSE2 && Count == 8) {
5164
1.49k
            assuming(LLVM::Core::X86SSE2PMAddWd != LLVM::Core::NotIntrinsic);
5165
1.49k
            if (Signed) {
5166
1.12k
              return Builder.createIntrinsic(
5167
1.12k
                  LLVM::Core::X86SSE2PMAddWd, {},
5168
1.12k
                  {V, Builder.createVectorSplat(8, LLContext.getInt16(1))});
5169
1.12k
            } else {
5170
369
              V = Builder.createXor(
5171
369
                  V, Builder.createVectorSplat(8, LLContext.getInt16(0x8000)));
5172
369
              V = Builder.createIntrinsic(
5173
369
                  LLVM::Core::X86SSE2PMAddWd, {},
5174
369
                  {V, Builder.createVectorSplat(8, LLContext.getInt16(1))});
5175
369
              return Builder.createAdd(
5176
369
                  V, Builder.createVectorSplat(4, LLContext.getInt32(0x10000)));
5177
369
            }
5178
1.49k
          }
5179
0
#endif
5180
5181
#if defined(__aarch64__)
5182
          if (Context.SupportNEON) {
5183
            const auto ID = Signed ? LLVM::Core::AArch64NeonSAddLP
5184
                                   : LLVM::Core::AArch64NeonUAddLP;
5185
            assuming(ID != LLVM::Core::NotIntrinsic);
5186
            return Builder.createIntrinsic(ID, {ExtTy, VectorTy}, {V});
5187
          }
5188
#endif
5189
5190
          // Fallback case.
5191
          // If the XOP, SSSE3, or SSE2 is not supported on the x86_64 platform
5192
          // or the NEON is not supported on the aarch64 platform,
5193
          // then fallback to this.
5194
0
          auto Width = LLVM::Value::getConstInt(
5195
0
              ExtTy.getElementType(),
5196
0
              VectorTy.getElementType().getIntegerBitWidth());
5197
0
          Width = Builder.createVectorSplat(ExtTy.getVectorSize(), Width);
5198
0
          auto EV = Builder.createBitCast(V, ExtTy);
5199
0
          LLVM::Value L, R;
5200
0
          if (Signed) {
5201
0
            L = Builder.createAShr(EV, Width);
5202
0
            R = Builder.createAShr(Builder.createShl(EV, Width), Width);
5203
0
          } else {
5204
0
            L = Builder.createLShr(EV, Width);
5205
0
            R = Builder.createLShr(Builder.createShl(EV, Width), Width);
5206
0
          }
5207
0
          return Builder.createAdd(L, R);
5208
1.49k
        });
5209
2.12k
  }
5210
558
  void compileVectorFAbs(LLVM::Type VectorTy) noexcept {
5211
558
    compileVectorOp(VectorTy, [this](auto V) noexcept {
5212
558
      assuming(LLVM::Core::Fabs != LLVM::Core::NotIntrinsic);
5213
558
      return Builder.createUnaryIntrinsic(LLVM::Core::Fabs, V);
5214
558
    });
5215
558
  }
5216
938
  void compileVectorFNeg(LLVM::Type VectorTy) noexcept {
5217
938
    compileVectorOp(VectorTy,
5218
938
                    [this](auto V) noexcept { return Builder.createFNeg(V); });
5219
938
  }
5220
301
  void compileVectorFSqrt(LLVM::Type VectorTy) noexcept {
5221
301
    compileVectorOp(VectorTy, [this](auto V) noexcept {
5222
301
      assuming(LLVM::Core::Sqrt != LLVM::Core::NotIntrinsic);
5223
301
      return Builder.createUnaryIntrinsic(LLVM::Core::Sqrt, V);
5224
301
    });
5225
301
  }
5226
1.26k
  void compileVectorFCeil(LLVM::Type VectorTy) noexcept {
5227
1.26k
    compileVectorOp(VectorTy, [this](auto V) noexcept {
5228
1.26k
      assuming(LLVM::Core::Ceil != LLVM::Core::NotIntrinsic);
5229
1.26k
      return Builder.createUnaryIntrinsic(LLVM::Core::Ceil, V);
5230
1.26k
    });
5231
1.26k
  }
5232
2.08k
  void compileVectorFFloor(LLVM::Type VectorTy) noexcept {
5233
2.08k
    compileVectorOp(VectorTy, [this](auto V) noexcept {
5234
2.08k
      assuming(LLVM::Core::Floor != LLVM::Core::NotIntrinsic);
5235
2.08k
      return Builder.createUnaryIntrinsic(LLVM::Core::Floor, V);
5236
2.08k
    });
5237
2.08k
  }
5238
1.63k
  void compileVectorFTrunc(LLVM::Type VectorTy) noexcept {
5239
1.63k
    compileVectorOp(VectorTy, [this](auto V) noexcept {
5240
1.63k
      assuming(LLVM::Core::Trunc != LLVM::Core::NotIntrinsic);
5241
1.63k
      return Builder.createUnaryIntrinsic(LLVM::Core::Trunc, V);
5242
1.63k
    });
5243
1.63k
  }
5244
354
  void compileVectorFNearest(LLVM::Type VectorTy) noexcept {
5245
354
    compileVectorOp(VectorTy, [&](auto V) noexcept {
5246
354
#if LLVM_VERSION_MAJOR >= 12 && !defined(__s390x__)
5247
354
      assuming(LLVM::Core::Roundeven != LLVM::Core::NotIntrinsic);
5248
354
      if (LLVM::Core::Roundeven != LLVM::Core::NotIntrinsic) {
5249
354
        return Builder.createUnaryIntrinsic(LLVM::Core::Roundeven, V);
5250
354
      }
5251
0
#endif
5252
5253
0
#if defined(__x86_64__)
5254
0
      if (Context.SupportSSE4_1) {
5255
0
        const bool IsFloat = VectorTy.getElementType().isFloatTy();
5256
0
        auto ID =
5257
0
            IsFloat ? LLVM::Core::X86SSE41RoundPs : LLVM::Core::X86SSE41RoundPd;
5258
0
        assuming(ID != LLVM::Core::NotIntrinsic);
5259
0
        return Builder.createIntrinsic(ID, {}, {V, LLContext.getInt32(8)});
5260
0
      }
5261
0
#endif
5262
5263
#if defined(__aarch64__)
5264
      if (Context.SupportNEON &&
5265
          LLVM::Core::AArch64NeonFRIntN != LLVM::Core::NotIntrinsic) {
5266
        return Builder.createUnaryIntrinsic(LLVM::Core::AArch64NeonFRIntN, V);
5267
      }
5268
#endif
5269
5270
      // Fallback case.
5271
      // If the SSE4.1 is not supported on the x86_64 platform or
5272
      // the NEON is not supported on the aarch64 platform,
5273
      // then fallback to this.
5274
0
      assuming(LLVM::Core::Nearbyint != LLVM::Core::NotIntrinsic);
5275
0
      return Builder.createUnaryIntrinsic(LLVM::Core::Nearbyint, V);
5276
0
    });
5277
354
  }
5278
179
  void compileVectorVectorFAdd(LLVM::Type VectorTy) noexcept {
5279
179
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5280
179
      return Builder.createFAdd(LHS, RHS);
5281
179
    });
5282
179
  }
5283
449
  void compileVectorVectorFSub(LLVM::Type VectorTy) noexcept {
5284
449
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5285
449
      return Builder.createFSub(LHS, RHS);
5286
449
    });
5287
449
  }
5288
180
  void compileVectorVectorFMul(LLVM::Type VectorTy) noexcept {
5289
180
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5290
180
      return Builder.createFMul(LHS, RHS);
5291
180
    });
5292
180
  }
5293
224
  void compileVectorVectorFDiv(LLVM::Type VectorTy) noexcept {
5294
224
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5295
224
      return Builder.createFDiv(LHS, RHS);
5296
224
    });
5297
224
  }
5298
285
  void compileVectorVectorFMin(LLVM::Type VectorTy) noexcept {
5299
285
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5300
285
      auto LNaN = Builder.createFCmpUNO(LHS, LHS);
5301
285
      auto RNaN = Builder.createFCmpUNO(RHS, RHS);
5302
285
      auto OLT = Builder.createFCmpOLT(LHS, RHS);
5303
285
      auto OGT = Builder.createFCmpOGT(LHS, RHS);
5304
285
      auto Ret = Builder.createBitCast(
5305
285
          Builder.createOr(Builder.createBitCast(LHS, Context.Int64x2Ty),
5306
285
                           Builder.createBitCast(RHS, Context.Int64x2Ty)),
5307
285
          LHS.getType());
5308
285
      Ret = Builder.createSelect(OGT, RHS, Ret);
5309
285
      Ret = Builder.createSelect(OLT, LHS, Ret);
5310
285
      Ret = Builder.createSelect(RNaN, RHS, Ret);
5311
285
      Ret = Builder.createSelect(LNaN, LHS, Ret);
5312
285
      return Ret;
5313
285
    });
5314
285
  }
5315
196
  void compileVectorVectorFMax(LLVM::Type VectorTy) noexcept {
5316
196
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5317
196
      auto LNaN = Builder.createFCmpUNO(LHS, LHS);
5318
196
      auto RNaN = Builder.createFCmpUNO(RHS, RHS);
5319
196
      auto OLT = Builder.createFCmpOLT(LHS, RHS);
5320
196
      auto OGT = Builder.createFCmpOGT(LHS, RHS);
5321
196
      auto Ret = Builder.createBitCast(
5322
196
          Builder.createAnd(Builder.createBitCast(LHS, Context.Int64x2Ty),
5323
196
                            Builder.createBitCast(RHS, Context.Int64x2Ty)),
5324
196
          LHS.getType());
5325
196
      Ret = Builder.createSelect(OLT, RHS, Ret);
5326
196
      Ret = Builder.createSelect(OGT, LHS, Ret);
5327
196
      Ret = Builder.createSelect(RNaN, RHS, Ret);
5328
196
      Ret = Builder.createSelect(LNaN, LHS, Ret);
5329
196
      return Ret;
5330
196
    });
5331
196
  }
5332
298
  void compileVectorVectorFPMin(LLVM::Type VectorTy) noexcept {
5333
298
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5334
298
      auto Cmp = Builder.createFCmpOLT(RHS, LHS);
5335
298
      return Builder.createSelect(Cmp, RHS, LHS);
5336
298
    });
5337
298
  }
5338
311
  void compileVectorVectorFPMax(LLVM::Type VectorTy) noexcept {
5339
311
    compileVectorVectorOp(VectorTy, [this](auto LHS, auto RHS) noexcept {
5340
311
      auto Cmp = Builder.createFCmpOGT(RHS, LHS);
5341
311
      return Builder.createSelect(Cmp, RHS, LHS);
5342
311
    });
5343
311
  }
5344
905
  void compileVectorTruncSatS32(LLVM::Type VectorTy, bool PadZero) noexcept {
5345
905
    compileVectorOp(VectorTy, [this, VectorTy, PadZero](auto V) noexcept {
5346
905
      const auto Size = VectorTy.getVectorSize();
5347
905
      auto FPTy = VectorTy.getElementType();
5348
905
      auto IntMin = LLContext.getInt32(
5349
905
          static_cast<uint32_t>(std::numeric_limits<int32_t>::min()));
5350
905
      auto IntMax = LLContext.getInt32(
5351
905
          static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
5352
905
      auto IntMinV = Builder.createVectorSplat(Size, IntMin);
5353
905
      auto IntMaxV = Builder.createVectorSplat(Size, IntMax);
5354
905
      auto IntZeroV = LLVM::Value::getConstNull(IntMinV.getType());
5355
905
      auto FPMin = Builder.createSIToFP(IntMin, FPTy);
5356
905
      auto FPMax = Builder.createSIToFP(IntMax, FPTy);
5357
905
      auto FPMinV = Builder.createVectorSplat(Size, FPMin);
5358
905
      auto FPMaxV = Builder.createVectorSplat(Size, FPMax);
5359
5360
905
      auto Normal = Builder.createFCmpORD(V, V);
5361
905
      auto NotUnder = Builder.createFCmpUGE(V, FPMinV);
5362
905
      auto NotOver = Builder.createFCmpULT(V, FPMaxV);
5363
905
      V = Builder.createFPToSI(
5364
905
          V, LLVM::Type::getVectorType(LLContext.getInt32Ty(), Size));
5365
905
      V = Builder.createSelect(Normal, V, IntZeroV);
5366
905
      V = Builder.createSelect(NotUnder, V, IntMinV);
5367
905
      V = Builder.createSelect(NotOver, V, IntMaxV);
5368
905
      if (PadZero) {
5369
744
        std::vector<uint32_t> Mask(Size * 2);
5370
744
        std::iota(Mask.begin(), Mask.end(), 0);
5371
744
        if constexpr (Endian::native == Endian::little) {
5372
744
          V = Builder.createShuffleVector(
5373
744
              V, IntZeroV, LLVM::Value::getConstVector32(LLContext, Mask));
5374
        } else {
5375
          V = Builder.createShuffleVector(
5376
              IntZeroV, V, LLVM::Value::getConstVector32(LLContext, Mask));
5377
        }
5378
744
      }
5379
905
      return V;
5380
905
    });
5381
905
  }
5382
5.78k
  void compileVectorTruncSatU32(LLVM::Type VectorTy, bool PadZero) noexcept {
5383
5.78k
    compileVectorOp(VectorTy, [this, VectorTy, PadZero](auto V) noexcept {
5384
5.78k
      const auto Size = VectorTy.getVectorSize();
5385
5.78k
      auto FPTy = VectorTy.getElementType();
5386
5.78k
      auto IntMin = LLContext.getInt32(std::numeric_limits<uint32_t>::min());
5387
5.78k
      auto IntMax = LLContext.getInt32(std::numeric_limits<uint32_t>::max());
5388
5.78k
      auto IntMinV = Builder.createVectorSplat(Size, IntMin);
5389
5.78k
      auto IntMaxV = Builder.createVectorSplat(Size, IntMax);
5390
5.78k
      auto FPMin = Builder.createUIToFP(IntMin, FPTy);
5391
5.78k
      auto FPMax = Builder.createUIToFP(IntMax, FPTy);
5392
5.78k
      auto FPMinV = Builder.createVectorSplat(Size, FPMin);
5393
5.78k
      auto FPMaxV = Builder.createVectorSplat(Size, FPMax);
5394
5395
5.78k
      auto NotUnder = Builder.createFCmpOGE(V, FPMinV);
5396
5.78k
      auto NotOver = Builder.createFCmpULT(V, FPMaxV);
5397
5.78k
      V = Builder.createFPToUI(
5398
5.78k
          V, LLVM::Type::getVectorType(LLContext.getInt32Ty(), Size));
5399
5.78k
      V = Builder.createSelect(NotUnder, V, IntMinV);
5400
5.78k
      V = Builder.createSelect(NotOver, V, IntMaxV);
5401
5.78k
      if (PadZero) {
5402
2.10k
        auto IntZeroV = LLVM::Value::getConstNull(IntMinV.getType());
5403
2.10k
        std::vector<uint32_t> Mask(Size * 2);
5404
2.10k
        std::iota(Mask.begin(), Mask.end(), 0);
5405
2.10k
        if constexpr (Endian::native == Endian::little) {
5406
2.10k
          V = Builder.createShuffleVector(
5407
2.10k
              V, IntZeroV, LLVM::Value::getConstVector32(LLContext, Mask));
5408
        } else {
5409
          V = Builder.createShuffleVector(
5410
              IntZeroV, V, LLVM::Value::getConstVector32(LLContext, Mask));
5411
        }
5412
2.10k
      }
5413
5.78k
      return V;
5414
5.78k
    });
5415
5.78k
  }
5416
  void compileVectorConvertS(LLVM::Type VectorTy, LLVM::Type FPVectorTy,
5417
667
                             bool Low) noexcept {
5418
667
    compileVectorOp(VectorTy,
5419
667
                    [this, VectorTy, FPVectorTy, Low](auto V) noexcept {
5420
667
                      if (Low) {
5421
352
                        const auto Size = VectorTy.getVectorSize() / 2;
5422
352
                        std::vector<uint32_t> Mask(Size);
5423
352
                        if constexpr (Endian::native == Endian::little) {
5424
352
                          std::iota(Mask.begin(), Mask.end(), 0);
5425
                        } else {
5426
                          std::iota(Mask.begin(), Mask.end(), Size);
5427
                        }
5428
352
                        V = Builder.createShuffleVector(
5429
352
                            V, LLVM::Value::getUndef(VectorTy),
5430
352
                            LLVM::Value::getConstVector32(LLContext, Mask));
5431
352
                      }
5432
667
                      return Builder.createSIToFP(V, FPVectorTy);
5433
667
                    });
5434
667
  }
5435
  void compileVectorConvertU(LLVM::Type VectorTy, LLVM::Type FPVectorTy,
5436
1.91k
                             bool Low) noexcept {
5437
1.91k
    compileVectorOp(VectorTy,
5438
1.91k
                    [this, VectorTy, FPVectorTy, Low](auto V) noexcept {
5439
1.91k
                      if (Low) {
5440
1.21k
                        const auto Size = VectorTy.getVectorSize() / 2;
5441
1.21k
                        std::vector<uint32_t> Mask(Size);
5442
1.21k
                        if constexpr (Endian::native == Endian::little) {
5443
1.21k
                          std::iota(Mask.begin(), Mask.end(), 0);
5444
                        } else {
5445
                          std::iota(Mask.begin(), Mask.end(), Size);
5446
                        }
5447
1.21k
                        V = Builder.createShuffleVector(
5448
1.21k
                            V, LLVM::Value::getUndef(VectorTy),
5449
1.21k
                            LLVM::Value::getConstVector32(LLContext, Mask));
5450
1.21k
                      }
5451
1.91k
                      return Builder.createUIToFP(V, FPVectorTy);
5452
1.91k
                    });
5453
1.91k
  }
5454
567
  void compileVectorDemote() noexcept {
5455
567
    compileVectorOp(Context.Doublex2Ty, [this](auto V) noexcept {
5456
567
      auto Demoted = Builder.createFPTrunc(
5457
567
          V, LLVM::Type::getVectorType(Context.FloatTy, 2));
5458
567
      auto ZeroV = LLVM::Value::getConstNull(Demoted.getType());
5459
567
      if constexpr (Endian::native == Endian::little) {
5460
567
        return Builder.createShuffleVector(
5461
567
            Demoted, ZeroV,
5462
567
            LLVM::Value::getConstVector32(LLContext, {0u, 1u, 2u, 3u}));
5463
      } else {
5464
        return Builder.createShuffleVector(
5465
            Demoted, ZeroV,
5466
            LLVM::Value::getConstVector32(LLContext, {3u, 2u, 1u, 0u}));
5467
      }
5468
567
    });
5469
567
  }
5470
554
  void compileVectorPromote() noexcept {
5471
554
    compileVectorOp(Context.Floatx4Ty, [this](auto V) noexcept {
5472
554
      auto UndefV = LLVM::Value::getUndef(V.getType());
5473
554
      auto Low = Builder.createShuffleVector(
5474
554
          V, UndefV, LLVM::Value::getConstVector32(LLContext, {0u, 1u}));
5475
554
      return Builder.createFPExt(
5476
554
          Low, LLVM::Type::getVectorType(Context.DoubleTy, 2));
5477
554
    });
5478
554
  }
5479
5480
0
  void compileVectorVectorMAdd(LLVM::Type VectorTy) noexcept {
5481
0
    auto C = Builder.createBitCast(stackPop(), VectorTy);
5482
0
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
5483
0
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
5484
0
    stackPush(Builder.createBitCast(
5485
0
        Builder.createFAdd(Builder.createFMul(LHS, RHS), C),
5486
0
        Context.Int64x2Ty));
5487
0
  }
5488
5489
0
  void compileVectorVectorNMAdd(LLVM::Type VectorTy) noexcept {
5490
0
    auto C = Builder.createBitCast(stackPop(), VectorTy);
5491
0
    auto RHS = Builder.createBitCast(stackPop(), VectorTy);
5492
0
    auto LHS = Builder.createBitCast(stackPop(), VectorTy);
5493
0
    stackPush(Builder.createBitCast(
5494
0
        Builder.createFAdd(Builder.createFMul(Builder.createFNeg(LHS), RHS), C),
5495
0
        Context.Int64x2Ty));
5496
0
  }
5497
5498
0
  void compileVectorRelaxedIntegerDotProduct() noexcept {
5499
0
    auto OriTy = Context.Int8x16Ty;
5500
0
    auto ExtTy = Context.Int16x8Ty;
5501
0
    auto RHS = Builder.createBitCast(stackPop(), OriTy);
5502
0
    auto LHS = Builder.createBitCast(stackPop(), OriTy);
5503
0
#if defined(__x86_64__)
5504
0
    if (Context.SupportSSSE3) {
5505
0
      assuming(LLVM::Core::X86SSSE3PMAddUbSw128 != LLVM::Core::NotIntrinsic);
5506
      // WebAssembly Relaxed SIMD spec: signed(LHS) * unsigned/signed(RHS)
5507
      // But PMAddUbSw128 is unsigned(LHS) * signed(RHS). Therefore swap both
5508
      // side to match the WebAssembly spec
5509
0
      return stackPush(Builder.createBitCast(
5510
0
          Builder.createIntrinsic(LLVM::Core::X86SSSE3PMAddUbSw128, {},
5511
0
                                  {RHS, LHS}),
5512
0
          Context.Int64x2Ty));
5513
0
    }
5514
0
#endif
5515
0
    auto Width = LLVM::Value::getConstInt(
5516
0
        ExtTy.getElementType(), OriTy.getElementType().getIntegerBitWidth());
5517
0
    Width = Builder.createVectorSplat(ExtTy.getVectorSize(), Width);
5518
0
    auto EA = Builder.createBitCast(LHS, ExtTy);
5519
0
    auto EB = Builder.createBitCast(RHS, ExtTy);
5520
5521
0
    LLVM::Value AL, AR, BL, BR;
5522
0
    AL = Builder.createAShr(EA, Width);
5523
0
    AR = Builder.createAShr(Builder.createShl(EA, Width), Width);
5524
0
    BL = Builder.createAShr(EB, Width);
5525
0
    BR = Builder.createAShr(Builder.createShl(EB, Width), Width);
5526
5527
0
    return stackPush(Builder.createBitCast(
5528
0
        Builder.createAdd(Builder.createMul(AL, BL), Builder.createMul(AR, BR)),
5529
0
        Context.Int64x2Ty));
5530
0
  }
5531
5532
0
  void compileVectorRelaxedIntegerDotProductAdd() noexcept {
5533
0
    auto OriTy = Context.Int8x16Ty;
5534
0
    auto ExtTy = Context.Int16x8Ty;
5535
0
    auto FinTy = Context.Int32x4Ty;
5536
0
    auto VC = Builder.createBitCast(stackPop(), FinTy);
5537
0
    auto RHS = Builder.createBitCast(stackPop(), OriTy);
5538
0
    auto LHS = Builder.createBitCast(stackPop(), OriTy);
5539
0
    LLVM::Value IM;
5540
0
#if defined(__x86_64__)
5541
0
    if (Context.SupportSSSE3) {
5542
0
      assuming(LLVM::Core::X86SSSE3PMAddUbSw128 != LLVM::Core::NotIntrinsic);
5543
      // WebAssembly Relaxed SIMD spec: signed(LHS) * unsigned/signed(RHS)
5544
      // But PMAddUbSw128 is unsigned(LHS) * signed(RHS). Therefore swap both
5545
      // side to match the WebAssembly spec
5546
0
      IM = Builder.createIntrinsic(LLVM::Core::X86SSSE3PMAddUbSw128, {},
5547
0
                                   {RHS, LHS});
5548
0
    } else
5549
0
#endif
5550
0
    {
5551
0
      auto Width = LLVM::Value::getConstInt(
5552
0
          ExtTy.getElementType(), OriTy.getElementType().getIntegerBitWidth());
5553
0
      Width = Builder.createVectorSplat(ExtTy.getVectorSize(), Width);
5554
0
      auto EA = Builder.createBitCast(LHS, ExtTy);
5555
0
      auto EB = Builder.createBitCast(RHS, ExtTy);
5556
5557
0
      LLVM::Value AL, AR, BL, BR;
5558
0
      AL = Builder.createAShr(EA, Width);
5559
0
      AR = Builder.createAShr(Builder.createShl(EA, Width), Width);
5560
0
      BL = Builder.createAShr(EB, Width);
5561
0
      BR = Builder.createAShr(Builder.createShl(EB, Width), Width);
5562
0
      IM = Builder.createAdd(Builder.createMul(AL, BL),
5563
0
                             Builder.createMul(AR, BR));
5564
0
    }
5565
5566
0
    auto Width = LLVM::Value::getConstInt(
5567
0
        FinTy.getElementType(), ExtTy.getElementType().getIntegerBitWidth());
5568
0
    Width = Builder.createVectorSplat(FinTy.getVectorSize(), Width);
5569
0
    auto IME = Builder.createBitCast(IM, FinTy);
5570
0
    auto L = Builder.createAShr(IME, Width);
5571
0
    auto R = Builder.createAShr(Builder.createShl(IME, Width), Width);
5572
5573
0
    return stackPush(Builder.createBitCast(
5574
0
        Builder.createAdd(Builder.createAdd(L, R), VC), Context.Int64x2Ty));
5575
0
  }
5576
5577
  void
5578
  enterBlock(LLVM::BasicBlock JumpBlock, LLVM::BasicBlock NextBlock,
5579
             LLVM::BasicBlock ElseBlock, std::vector<LLVM::Value> Args,
5580
             std::pair<std::vector<ValType>, std::vector<ValType>> Type,
5581
             std::vector<std::tuple<std::vector<LLVM::Value>, LLVM::BasicBlock>>
5582
18.4k
                 ReturnPHI = {}) noexcept {
5583
18.4k
    assuming(Type.first.size() == Args.size());
5584
18.4k
    for (auto &Value : Args) {
5585
3.97k
      stackPush(Value);
5586
3.97k
    }
5587
18.4k
    const auto Unreachable = isUnreachable();
5588
18.4k
    ControlStack.emplace_back(Stack.size() - Args.size(), Unreachable,
5589
18.4k
                              JumpBlock, NextBlock, ElseBlock, std::move(Args),
5590
18.4k
                              std::move(Type), std::move(ReturnPHI));
5591
18.4k
  }
5592
5593
18.4k
  Control leaveBlock() noexcept {
5594
18.4k
    Control Entry = std::move(ControlStack.back());
5595
18.4k
    ControlStack.pop_back();
5596
5597
18.4k
    auto NextBlock = Entry.NextBlock ? Entry.NextBlock : Entry.JumpBlock;
5598
18.4k
    if (!Entry.Unreachable) {
5599
11.6k
      const auto &ReturnType = Entry.Type.second;
5600
11.6k
      if (!ReturnType.empty()) {
5601
8.77k
        std::vector<LLVM::Value> Rets(ReturnType.size());
5602
17.9k
        for (size_t I = 0; I < Rets.size(); ++I) {
5603
9.16k
          const size_t J = Rets.size() - 1 - I;
5604
9.16k
          Rets[J] = stackPop();
5605
9.16k
        }
5606
8.77k
        Entry.ReturnPHI.emplace_back(std::move(Rets), Builder.getInsertBlock());
5607
8.77k
      }
5608
11.6k
      Builder.createBr(NextBlock);
5609
11.6k
    } else {
5610
6.84k
      Builder.createUnreachable();
5611
6.84k
    }
5612
18.4k
    Builder.positionAtEnd(NextBlock);
5613
18.4k
    Stack.erase(Stack.begin() + static_cast<int64_t>(Entry.StackSize),
5614
18.4k
                Stack.end());
5615
18.4k
    return Entry;
5616
18.4k
  }
5617
5618
4.63k
  void checkStop() noexcept {
5619
4.63k
    if (!Interruptible) {
5620
4.63k
      return;
5621
4.63k
    }
5622
0
    auto NotStopBB = LLVM::BasicBlock::create(LLContext, F.Fn, "NotStop");
5623
0
    auto StopToken = Builder.createAtomicRMW(
5624
0
        LLVMAtomicRMWBinOpXchg, Context.getStopToken(Builder, ExecCtx),
5625
0
        LLContext.getInt32(0), LLVMAtomicOrderingMonotonic);
5626
#if LLVM_VERSION_MAJOR >= 13
5627
    StopToken.setAlignment(32);
5628
#endif
5629
0
    auto NotStop = Builder.createLikely(
5630
0
        Builder.createICmpEQ(StopToken, LLContext.getInt32(0)));
5631
0
    Builder.createCondBr(NotStop, NotStopBB,
5632
0
                         getTrapBB(ErrCode::Value::Interrupted));
5633
5634
0
    Builder.positionAtEnd(NotStopBB);
5635
0
  }
5636
5637
5.03k
  void setUnreachable() noexcept {
5638
5.03k
    if (ControlStack.empty()) {
5639
0
      IsUnreachable = true;
5640
5.03k
    } else {
5641
5.03k
      ControlStack.back().Unreachable = true;
5642
5.03k
    }
5643
5.03k
  }
5644
5645
1.40M
  bool isUnreachable() const noexcept {
5646
1.40M
    if (ControlStack.empty()) {
5647
9.34k
      return IsUnreachable;
5648
1.39M
    } else {
5649
1.39M
      return ControlStack.back().Unreachable;
5650
1.39M
    }
5651
1.40M
  }
5652
5653
  void
5654
  buildPHI(Span<const ValType> RetType,
5655
           Span<const std::tuple<std::vector<LLVM::Value>, LLVM::BasicBlock>>
5656
16.2k
               Incomings) noexcept {
5657
16.2k
    if (isVoidReturn(RetType)) {
5658
4.95k
      return;
5659
4.95k
    }
5660
11.2k
    std::vector<LLVM::Value> Nodes;
5661
11.2k
    if (Incomings.size() == 0) {
5662
2.32k
      const auto &Types = toLLVMTypeVector(LLContext, RetType);
5663
2.32k
      Nodes.reserve(Types.size());
5664
2.63k
      for (LLVM::Type Type : Types) {
5665
2.63k
        Nodes.push_back(LLVM::Value::getUndef(Type));
5666
2.63k
      }
5667
8.95k
    } else if (Incomings.size() == 1) {
5668
7.90k
      Nodes = std::move(std::get<0>(Incomings.front()));
5669
7.90k
    } else {
5670
1.04k
      const auto &Types = toLLVMTypeVector(LLContext, RetType);
5671
1.04k
      Nodes.reserve(Types.size());
5672
2.18k
      for (size_t I = 0; I < Types.size(); ++I) {
5673
1.14k
        auto PHIRet = Builder.createPHI(Types[I]);
5674
3.05k
        for (auto &[Value, BB] : Incomings) {
5675
3.05k
          assuming(Value.size() == Types.size());
5676
3.05k
          PHIRet.addIncoming(Value[I], BB);
5677
3.05k
        }
5678
1.14k
        Nodes.push_back(PHIRet);
5679
1.14k
      }
5680
1.04k
    }
5681
11.9k
    for (auto &Val : Nodes) {
5682
11.9k
      stackPush(Val);
5683
11.9k
    }
5684
11.2k
  }
5685
5686
37.4k
  void setLableJumpPHI(unsigned int Index) noexcept {
5687
37.4k
    assuming(Index < ControlStack.size());
5688
37.4k
    auto &Entry = *(ControlStack.rbegin() + Index);
5689
37.4k
    if (Entry.NextBlock) { // is loop
5690
2.17k
      std::vector<LLVM::Value> Args(Entry.Type.first.size());
5691
4.02k
      for (size_t I = 0; I < Args.size(); ++I) {
5692
1.85k
        const size_t J = Args.size() - 1 - I;
5693
1.85k
        Args[J] = stackPop();
5694
1.85k
      }
5695
4.02k
      for (size_t I = 0; I < Args.size(); ++I) {
5696
1.85k
        Entry.Args[I].addIncoming(Args[I], Builder.getInsertBlock());
5697
1.85k
        stackPush(Args[I]);
5698
1.85k
      }
5699
35.2k
    } else if (!Entry.Type.second.empty()) { // has return value
5700
1.98k
      std::vector<LLVM::Value> Rets(Entry.Type.second.size());
5701
4.09k
      for (size_t I = 0; I < Rets.size(); ++I) {
5702
2.11k
        const size_t J = Rets.size() - 1 - I;
5703
2.11k
        Rets[J] = stackPop();
5704
2.11k
      }
5705
4.09k
      for (size_t I = 0; I < Rets.size(); ++I) {
5706
2.11k
        stackPush(Rets[I]);
5707
2.11k
      }
5708
1.98k
      Entry.ReturnPHI.emplace_back(std::move(Rets), Builder.getInsertBlock());
5709
1.98k
    }
5710
37.4k
  }
5711
5712
37.4k
  LLVM::BasicBlock getLabel(unsigned int Index) const noexcept {
5713
37.4k
    return (ControlStack.rbegin() + Index)->JumpBlock;
5714
37.4k
  }
5715
5716
832k
  void stackPush(LLVM::Value Value) noexcept { Stack.push_back(Value); }
5717
321k
  LLVM::Value stackPop() noexcept {
5718
321k
    assuming(!ControlStack.empty() || !Stack.empty());
5719
321k
    assuming(ControlStack.empty() ||
5720
321k
             Stack.size() > ControlStack.back().StackSize);
5721
321k
    auto Value = Stack.back();
5722
321k
    Stack.pop_back();
5723
321k
    return Value;
5724
321k
  }
5725
5726
19.7k
  LLVM::Value switchEndian(LLVM::Value Value) {
5727
    if constexpr (Endian::native == Endian::big) {
5728
      auto Type = Value.getType();
5729
      if ((Type.isIntegerTy() && Type.getIntegerBitWidth() > 8) ||
5730
          (Type.isVectorTy() && Type.getVectorSize() == 1)) {
5731
        return Builder.createUnaryIntrinsic(LLVM::Core::Bswap, Value);
5732
      }
5733
      if (Type.isVectorTy()) {
5734
        LLVM::Type VecType = Type.getElementType().getIntegerBitWidth() == 128
5735
                                 ? Context.Int128Ty
5736
                                 : Context.Int64Ty;
5737
        Value = Builder.createBitCast(Value, VecType);
5738
        Value = Builder.createUnaryIntrinsic(LLVM::Core::Bswap, Value);
5739
        return Builder.createBitCast(Value, Type);
5740
      }
5741
      if (Type.isFloatTy() || Type.isDoubleTy()) {
5742
        LLVM::Type IntType =
5743
            Type.isFloatTy() ? Context.Int32Ty : Context.Int64Ty;
5744
        Value = Builder.createBitCast(Value, IntType);
5745
        Value = Builder.createUnaryIntrinsic(LLVM::Core::Bswap, Value);
5746
        return Builder.createBitCast(Value, Type);
5747
      }
5748
    }
5749
19.7k
    return Value;
5750
19.7k
  }
5751
5752
  LLVM::Compiler::CompileContext &Context;
5753
  LLVM::Context LLContext;
5754
  std::vector<std::pair<LLVM::Type, LLVM::Value>> Local;
5755
  std::vector<LLVM::Value> Stack;
5756
  LLVM::Value LocalInstrCount = nullptr;
5757
  LLVM::Value LocalGas = nullptr;
5758
  std::unordered_map<ErrCode::Value, LLVM::BasicBlock> TrapBB;
5759
  bool IsUnreachable = false;
5760
  bool Interruptible = false;
5761
  struct Control {
5762
    size_t StackSize;
5763
    bool Unreachable;
5764
    LLVM::BasicBlock JumpBlock;
5765
    LLVM::BasicBlock NextBlock;
5766
    LLVM::BasicBlock ElseBlock;
5767
    std::vector<LLVM::Value> Args;
5768
    std::pair<std::vector<ValType>, std::vector<ValType>> Type;
5769
    std::vector<std::tuple<std::vector<LLVM::Value>, LLVM::BasicBlock>>
5770
        ReturnPHI;
5771
    Control(size_t S, bool U, LLVM::BasicBlock J, LLVM::BasicBlock N,
5772
            LLVM::BasicBlock E, std::vector<LLVM::Value> A,
5773
            std::pair<std::vector<ValType>, std::vector<ValType>> T,
5774
            std::vector<std::tuple<std::vector<LLVM::Value>, LLVM::BasicBlock>>
5775
                R) noexcept
5776
18.4k
        : StackSize(S), Unreachable(U), JumpBlock(J), NextBlock(N),
5777
18.4k
          ElseBlock(E), Args(std::move(A)), Type(std::move(T)),
5778
18.4k
          ReturnPHI(std::move(R)) {}
5779
    Control(const Control &) = default;
5780
23.1k
    Control(Control &&) = default;
5781
    Control &operator=(const Control &) = default;
5782
883
    Control &operator=(Control &&) = default;
5783
  };
5784
  std::vector<Control> ControlStack;
5785
  LLVM::FunctionCallee F;
5786
  LLVM::Value ExecCtx;
5787
  LLVM::Builder Builder;
5788
};
5789
5790
std::vector<LLVM::Value> unpackStruct(LLVM::Builder &Builder,
5791
379
                                      LLVM::Value Struct) noexcept {
5792
379
  const auto N = Struct.getType().getStructNumElements();
5793
379
  std::vector<LLVM::Value> Ret;
5794
379
  Ret.reserve(N);
5795
1.35k
  for (unsigned I = 0; I < N; ++I) {
5796
976
    Ret.push_back(Builder.createExtractValue(Struct, I));
5797
976
  }
5798
379
  return Ret;
5799
379
}
5800
5801
} // namespace
5802
5803
namespace WasmEdge {
5804
namespace LLVM {
5805
5806
1.96k
Expect<void> Compiler::checkConfigure() noexcept {
5807
1.96k
  if (Conf.hasProposal(Proposal::ExceptionHandling)) {
5808
0
    spdlog::error(ErrCode::Value::InvalidConfigure);
5809
0
    spdlog::error(
5810
0
        "    Proposal ExceptionHandling is not yet supported in LLVM backend");
5811
0
    return Unexpect(ErrCode::Value::InvalidConfigure);
5812
0
  }
5813
1.96k
  return {};
5814
1.96k
}
5815
5816
1.96k
Expect<Data> Compiler::compile(const AST::Module &Module) noexcept {
5817
  // Check the module is validated.
5818
1.96k
  if (unlikely(!Module.getIsValidated())) {
5819
0
    spdlog::error(ErrCode::Value::NotValidated);
5820
0
    return Unexpect(ErrCode::Value::NotValidated);
5821
0
  }
5822
5823
1.96k
  std::unique_lock Lock(Mutex);
5824
1.96k
  spdlog::info("compile start"sv);
5825
5826
1.96k
  LLVM::Core::init();
5827
5828
1.96k
  LLVM::Data D;
5829
1.96k
  auto LLContext = D.extract().LLContext();
5830
1.96k
  auto &LLModule = D.extract().LLModule;
5831
1.96k
  LLModule.setTarget(LLVM::getDefaultTargetTriple().unwrap());
5832
1.96k
  LLModule.addFlag(LLVMModuleFlagBehaviorError, "PIC Level"sv, 2);
5833
5834
1.96k
  CompileContext NewContext(LLContext, LLModule,
5835
1.96k
                            Conf.getCompilerConfigure().isGenericBinary());
5836
1.96k
  struct RAIICleanup {
5837
1.96k
    RAIICleanup(CompileContext *&Context, CompileContext &NewContext)
5838
1.96k
        : Context(Context) {
5839
1.96k
      Context = &NewContext;
5840
1.96k
    }
5841
1.96k
    ~RAIICleanup() { Context = nullptr; }
5842
1.96k
    CompileContext *&Context;
5843
1.96k
  };
5844
1.96k
  RAIICleanup Cleanup(Context, NewContext);
5845
5846
  // Compile Function Types
5847
1.96k
  compile(Module.getTypeSection());
5848
  // Compile ImportSection
5849
1.96k
  compile(Module.getImportSection());
5850
  // Compile GlobalSection
5851
1.96k
  compile(Module.getGlobalSection());
5852
  // Compile MemorySection (MemorySec, DataSec)
5853
1.96k
  compile(Module.getMemorySection(), Module.getDataSection());
5854
  // Compile TableSection (TableSec, ElemSec)
5855
1.96k
  compile(Module.getTableSection(), Module.getElementSection());
5856
  // compile Functions in module. (FunctionSec, CodeSec)
5857
1.96k
  compile(Module.getFunctionSection(), Module.getCodeSection());
5858
  // Compile ExportSection
5859
1.96k
  compile(Module.getExportSection());
5860
  // StartSection is not required to compile
5861
5862
1.96k
  spdlog::info("verify start"sv);
5863
1.96k
  LLModule.verify(LLVMPrintMessageAction);
5864
5865
1.96k
  spdlog::info("optimize start"sv);
5866
1.96k
  auto &TM = D.extract().TM;
5867
1.96k
  {
5868
1.96k
    auto Triple = LLModule.getTarget();
5869
1.96k
    auto [TheTarget, ErrorMessage] = LLVM::Target::getFromTriple(Triple);
5870
1.96k
    if (ErrorMessage) {
5871
0
      spdlog::error("getFromTriple failed:{}"sv, ErrorMessage.string_view());
5872
0
      return Unexpect(ErrCode::Value::IllegalPath);
5873
1.96k
    } else {
5874
1.96k
      std::string CPUName;
5875
#if defined(__riscv) && __riscv_xlen == 64
5876
      CPUName = "generic-rv64"s;
5877
#else
5878
1.96k
      if (!Conf.getCompilerConfigure().isGenericBinary()) {
5879
1.96k
        CPUName = LLVM::getHostCPUName().string_view();
5880
1.96k
      } else {
5881
0
        CPUName = "generic"s;
5882
0
      }
5883
1.96k
#endif
5884
5885
1.96k
      TM = LLVM::TargetMachine::create(
5886
1.96k
          TheTarget, Triple, CPUName.c_str(),
5887
1.96k
          LLVM::getHostCPUFeatures().unwrap(),
5888
1.96k
          toLLVMCodeGenLevel(
5889
1.96k
              Conf.getCompilerConfigure().getOptimizationLevel()),
5890
1.96k
          LLVMRelocPIC, LLVMCodeModelDefault);
5891
1.96k
    }
5892
5893
#if LLVM_VERSION_MAJOR >= 13
5894
    auto PBO = LLVM::PassBuilderOptions::create();
5895
    if (auto Error = PBO.runPasses(
5896
            LLModule,
5897
            toLLVMLevel(Conf.getCompilerConfigure().getOptimizationLevel()),
5898
            TM)) {
5899
      spdlog::error("{}"sv, Error.message().string_view());
5900
    }
5901
#else
5902
1.96k
    auto FP = LLVM::PassManager::createForModule(LLModule);
5903
1.96k
    auto MP = LLVM::PassManager::create();
5904
5905
1.96k
    TM.addAnalysisPasses(MP);
5906
1.96k
    TM.addAnalysisPasses(FP);
5907
1.96k
    {
5908
1.96k
      auto PMB = LLVM::PassManagerBuilder::create();
5909
1.96k
      auto [OptLevel, SizeLevel] =
5910
1.96k
          toLLVMLevel(Conf.getCompilerConfigure().getOptimizationLevel());
5911
1.96k
      PMB.setOptLevel(OptLevel);
5912
1.96k
      PMB.setSizeLevel(SizeLevel);
5913
1.96k
      PMB.populateFunctionPassManager(FP);
5914
1.96k
      PMB.populateModulePassManager(MP);
5915
1.96k
    }
5916
1.96k
    switch (Conf.getCompilerConfigure().getOptimizationLevel()) {
5917
0
    case CompilerConfigure::OptimizationLevel::O0:
5918
0
    case CompilerConfigure::OptimizationLevel::O1:
5919
0
      FP.addTailCallEliminationPass();
5920
0
      break;
5921
1.96k
    default:
5922
1.96k
      break;
5923
1.96k
    }
5924
5925
1.96k
    FP.initializeFunctionPassManager();
5926
21.2k
    for (auto Fn = LLModule.getFirstFunction(); Fn; Fn = Fn.getNextFunction()) {
5927
19.3k
      FP.runFunctionPassManager(Fn);
5928
19.3k
    }
5929
1.96k
    FP.finalizeFunctionPassManager();
5930
1.96k
    MP.runPassManager(LLModule);
5931
1.96k
#endif
5932
1.96k
  }
5933
5934
  // Set initializer for constant value
5935
1.96k
  if (auto IntrinsicsTable = LLModule.getNamedGlobal("intrinsics")) {
5936
1.12k
    IntrinsicsTable.setInitializer(
5937
1.12k
        LLVM::Value::getConstNull(IntrinsicsTable.getType()));
5938
1.12k
    IntrinsicsTable.setGlobalConstant(false);
5939
1.12k
  } else {
5940
841
    auto IntrinsicsTableTy = LLVM::Type::getArrayType(
5941
841
        LLContext.getInt8Ty().getPointerTo(),
5942
841
        static_cast<uint32_t>(Executable::Intrinsics::kIntrinsicMax));
5943
841
    LLModule.addGlobal(
5944
841
        IntrinsicsTableTy.getPointerTo(), false, LLVMExternalLinkage,
5945
841
        LLVM::Value::getConstNull(IntrinsicsTableTy), "intrinsics");
5946
841
  }
5947
5948
1.96k
  spdlog::info("optimize done"sv);
5949
1.96k
  return Expect<Data>{std::move(D)};
5950
1.96k
}
5951
5952
1.96k
void Compiler::compile(const AST::TypeSection &TypeSec) noexcept {
5953
1.96k
  auto WrapperTy =
5954
1.96k
      LLVM::Type::getFunctionType(Context->VoidTy,
5955
1.96k
                                  {Context->ExecCtxPtrTy, Context->Int8PtrTy,
5956
1.96k
                                   Context->Int8PtrTy, Context->Int8PtrTy},
5957
1.96k
                                  false);
5958
1.96k
  auto SubTypes = TypeSec.getContent();
5959
1.96k
  const auto Size = SubTypes.size();
5960
1.96k
  if (Size == 0) {
5961
110
    return;
5962
110
  }
5963
1.85k
  Context->CompositeTypes.reserve(Size);
5964
1.85k
  Context->FunctionWrappers.reserve(Size);
5965
5966
  // Iterate and compile types.
5967
5.69k
  for (size_t I = 0; I < Size; ++I) {
5968
3.84k
    const auto &CompType = SubTypes[I].getCompositeType();
5969
3.84k
    const auto Name = fmt::format("t{}"sv, Context->CompositeTypes.size());
5970
3.84k
    if (CompType.isFunc()) {
5971
      // Check function type is unique
5972
3.84k
      {
5973
3.84k
        bool Unique = true;
5974
13.8k
        for (size_t J = 0; J < I; ++J) {
5975
10.1k
          if (Context->CompositeTypes[J] &&
5976
10.1k
              Context->CompositeTypes[J]->isFunc()) {
5977
10.1k
            const auto &OldFuncType = Context->CompositeTypes[J]->getFuncType();
5978
10.1k
            if (OldFuncType == CompType.getFuncType()) {
5979
125
              Unique = false;
5980
125
              Context->CompositeTypes.push_back(Context->CompositeTypes[J]);
5981
125
              auto F = Context->FunctionWrappers[J];
5982
125
              Context->FunctionWrappers.push_back(F);
5983
125
              auto A = Context->LLModule.addAlias(WrapperTy, F, Name.c_str());
5984
125
              A.setLinkage(LLVMExternalLinkage);
5985
125
              A.setVisibility(LLVMProtectedVisibility);
5986
125
              A.setDSOLocal(true);
5987
125
              A.setDLLStorageClass(LLVMDLLExportStorageClass);
5988
125
              break;
5989
125
            }
5990
10.1k
          }
5991
10.1k
        }
5992
3.84k
        if (!Unique) {
5993
125
          continue;
5994
125
        }
5995
3.84k
      }
5996
5997
      // Create Wrapper
5998
3.72k
      auto F = Context->LLModule.addFunction(WrapperTy, LLVMExternalLinkage,
5999
3.72k
                                             Name.c_str());
6000
3.72k
      {
6001
3.72k
        F.setVisibility(LLVMProtectedVisibility);
6002
3.72k
        F.setDSOLocal(true);
6003
3.72k
        F.setDLLStorageClass(LLVMDLLExportStorageClass);
6004
3.72k
        F.addFnAttr(Context->NoStackArgProbe);
6005
3.72k
        F.addFnAttr(Context->StrictFP);
6006
3.72k
        F.addFnAttr(Context->UWTable);
6007
3.72k
        F.addParamAttr(0, Context->ReadOnly);
6008
3.72k
        F.addParamAttr(0, Context->NoAlias);
6009
3.72k
        F.addParamAttr(1, Context->NoAlias);
6010
3.72k
        F.addParamAttr(2, Context->NoAlias);
6011
3.72k
        F.addParamAttr(3, Context->NoAlias);
6012
6013
3.72k
        LLVM::Builder Builder(Context->LLContext);
6014
3.72k
        Builder.positionAtEnd(
6015
3.72k
            LLVM::BasicBlock::create(Context->LLContext, F, "entry"));
6016
6017
3.72k
        auto FTy = toLLVMType(Context->LLContext, Context->ExecCtxPtrTy,
6018
3.72k
                              CompType.getFuncType());
6019
3.72k
        auto RTy = FTy.getReturnType();
6020
3.72k
        std::vector<LLVM::Type> FPTy(FTy.getNumParams());
6021
3.72k
        FTy.getParamTypes(FPTy);
6022
6023
3.72k
        const size_t ArgCount = FPTy.size() - 1;
6024
3.72k
        auto ExecCtxPtr = F.getFirstParam();
6025
3.72k
        auto RawFunc = LLVM::FunctionCallee{
6026
3.72k
            FTy, Builder.createBitCast(ExecCtxPtr.getNextParam(),
6027
3.72k
                                       FTy.getPointerTo())};
6028
3.72k
        auto RawArgs = ExecCtxPtr.getNextParam().getNextParam();
6029
3.72k
        auto RawRets = RawArgs.getNextParam();
6030
6031
3.72k
        std::vector<LLVM::Value> Args;
6032
3.72k
        Args.reserve(FTy.getNumParams());
6033
3.72k
        Args.push_back(ExecCtxPtr);
6034
7.80k
        for (size_t J = 0; J < ArgCount; ++J) {
6035
4.08k
          Args.push_back(Builder.createValuePtrLoad(
6036
4.08k
              FPTy[J + 1], RawArgs, Context->Int8Ty, J * kValSize));
6037
4.08k
        }
6038
6039
3.72k
        auto Ret = Builder.createCall(RawFunc, Args);
6040
3.72k
        if (RTy.isVoidTy()) {
6041
          // nothing to do
6042
2.47k
        } else if (RTy.isStructTy()) {
6043
289
          auto Rets = unpackStruct(Builder, Ret);
6044
289
          Builder.createArrayPtrStore(Rets, RawRets, Context->Int8Ty, kValSize);
6045
2.18k
        } else {
6046
2.18k
          Builder.createValuePtrStore(Ret, RawRets, Context->Int8Ty);
6047
2.18k
        }
6048
3.72k
        Builder.createRetVoid();
6049
3.72k
      }
6050
      // Copy wrapper, param and return lists to module instance.
6051
3.72k
      Context->FunctionWrappers.push_back(F);
6052
3.72k
    } else {
6053
      // Non function type case. Create empty wrapper.
6054
0
      auto F = Context->LLModule.addFunction(WrapperTy, LLVMExternalLinkage,
6055
0
                                             Name.c_str());
6056
0
      {
6057
0
        F.setVisibility(LLVMProtectedVisibility);
6058
0
        F.setDSOLocal(true);
6059
0
        F.setDLLStorageClass(LLVMDLLExportStorageClass);
6060
0
        F.addFnAttr(Context->NoStackArgProbe);
6061
0
        F.addFnAttr(Context->StrictFP);
6062
0
        F.addFnAttr(Context->UWTable);
6063
0
        F.addParamAttr(0, Context->ReadOnly);
6064
0
        F.addParamAttr(0, Context->NoAlias);
6065
0
        F.addParamAttr(1, Context->NoAlias);
6066
0
        F.addParamAttr(2, Context->NoAlias);
6067
0
        F.addParamAttr(3, Context->NoAlias);
6068
6069
0
        LLVM::Builder Builder(Context->LLContext);
6070
0
        Builder.positionAtEnd(
6071
0
            LLVM::BasicBlock::create(Context->LLContext, F, "entry"));
6072
0
        Builder.createRetVoid();
6073
0
      }
6074
0
      Context->FunctionWrappers.push_back(F);
6075
0
    }
6076
3.72k
    Context->CompositeTypes.push_back(&CompType);
6077
3.72k
  }
6078
1.85k
}
6079
6080
1.96k
void Compiler::compile(const AST::ImportSection &ImportSec) noexcept {
6081
  // Iterate and compile import descriptions.
6082
1.96k
  for (const auto &ImpDesc : ImportSec.getContent()) {
6083
    // Get data from import description.
6084
364
    const auto &ExtType = ImpDesc.getExternalType();
6085
6086
    // Add the imports into module instance.
6087
364
    switch (ExtType) {
6088
267
    case ExternalType::Function: // Function type index
6089
267
    {
6090
267
      const auto FuncID = static_cast<uint32_t>(Context->Functions.size());
6091
      // Get the function type index in module.
6092
267
      uint32_t TypeIdx = ImpDesc.getExternalFuncTypeIdx();
6093
267
      assuming(TypeIdx < Context->CompositeTypes.size());
6094
267
      assuming(Context->CompositeTypes[TypeIdx]->isFunc());
6095
267
      const auto &FuncType = Context->CompositeTypes[TypeIdx]->getFuncType();
6096
267
      auto FTy =
6097
267
          toLLVMType(Context->LLContext, Context->ExecCtxPtrTy, FuncType);
6098
267
      auto RTy = FTy.getReturnType();
6099
267
      auto F = LLVM::FunctionCallee{
6100
267
          FTy,
6101
267
          Context->LLModule.addFunction(FTy, LLVMInternalLinkage,
6102
267
                                        fmt::format("f{}"sv, FuncID).c_str())};
6103
267
      F.Fn.setDSOLocal(true);
6104
267
      F.Fn.addFnAttr(Context->NoStackArgProbe);
6105
267
      F.Fn.addFnAttr(Context->StrictFP);
6106
267
      F.Fn.addFnAttr(Context->UWTable);
6107
267
      F.Fn.addParamAttr(0, Context->ReadOnly);
6108
267
      F.Fn.addParamAttr(0, Context->NoAlias);
6109
6110
267
      LLVM::Builder Builder(Context->LLContext);
6111
267
      Builder.positionAtEnd(
6112
267
          LLVM::BasicBlock::create(Context->LLContext, F.Fn, "entry"));
6113
6114
267
      const auto ArgSize = FuncType.getParamTypes().size();
6115
267
      const auto RetSize =
6116
267
          RTy.isVoidTy() ? 0 : FuncType.getReturnTypes().size();
6117
6118
267
      LLVM::Value Args = Builder.createArray(ArgSize, kValSize);
6119
267
      LLVM::Value Rets = Builder.createArray(RetSize, kValSize);
6120
6121
267
      auto Arg = F.Fn.getFirstParam();
6122
391
      for (unsigned I = 0; I < ArgSize; ++I) {
6123
124
        Arg = Arg.getNextParam();
6124
124
        Builder.createValuePtrStore(Arg, Args, Context->Int8Ty, I * kValSize);
6125
124
      }
6126
6127
267
      Builder.createCall(
6128
267
          Context->getIntrinsic(
6129
267
              Builder, Executable::Intrinsics::kCall,
6130
267
              LLVM::Type::getFunctionType(
6131
267
                  Context->VoidTy,
6132
267
                  {Context->Int32Ty, Context->Int8PtrTy, Context->Int8PtrTy},
6133
267
                  false)),
6134
267
          {Context->LLContext.getInt32(FuncID), Args, Rets});
6135
6136
267
      if (RetSize == 0) {
6137
152
        Builder.createRetVoid();
6138
152
      } else if (RetSize == 1) {
6139
87
        Builder.createRet(
6140
87
            Builder.createValuePtrLoad(RTy, Rets, Context->Int8Ty));
6141
87
      } else {
6142
28
        Builder.createAggregateRet(Builder.createArrayPtrLoad(
6143
28
            RetSize, RTy, Rets, Context->Int8Ty, kValSize));
6144
28
      }
6145
6146
267
      Context->Functions.emplace_back(TypeIdx, F, nullptr);
6147
267
      break;
6148
267
    }
6149
50
    case ExternalType::Table: // Table type
6150
50
    {
6151
      // Nothing to do.
6152
50
      break;
6153
267
    }
6154
9
    case ExternalType::Memory: // Memory type
6155
9
    {
6156
      // Nothing to do.
6157
9
      break;
6158
267
    }
6159
38
    case ExternalType::Global: // Global type
6160
38
    {
6161
      // Get global type. External type checked in validation.
6162
38
      const auto &GlobType = ImpDesc.getExternalGlobalType();
6163
38
      const auto &ValType = GlobType.getValType();
6164
38
      auto Type = toLLVMType(Context->LLContext, ValType);
6165
38
      Context->Globals.push_back(Type);
6166
38
      break;
6167
267
    }
6168
0
    default:
6169
0
      break;
6170
364
    }
6171
364
  }
6172
1.96k
}
6173
6174
1.96k
void Compiler::compile(const AST::ExportSection &) noexcept {}
6175
6176
1.96k
void Compiler::compile(const AST::GlobalSection &GlobalSec) noexcept {
6177
1.96k
  for (const auto &GlobalSeg : GlobalSec.getContent()) {
6178
109
    const auto &ValType = GlobalSeg.getGlobalType().getValType();
6179
109
    auto Type = toLLVMType(Context->LLContext, ValType);
6180
109
    Context->Globals.push_back(Type);
6181
109
  }
6182
1.96k
}
6183
6184
void Compiler::compile(const AST::MemorySection &,
6185
1.96k
                       const AST::DataSection &) noexcept {}
6186
6187
void Compiler::compile(const AST::TableSection &,
6188
1.96k
                       const AST::ElementSection &) noexcept {}
6189
6190
void Compiler::compile(const AST::FunctionSection &FuncSec,
6191
1.96k
                       const AST::CodeSection &CodeSec) noexcept {
6192
1.96k
  const auto &TypeIdxs = FuncSec.getContent();
6193
1.96k
  const auto &CodeSegs = CodeSec.getContent();
6194
1.96k
  if (TypeIdxs.size() == 0 || CodeSegs.size() == 0) {
6195
188
    return;
6196
188
  }
6197
6198
11.1k
  for (size_t I = 0; I < TypeIdxs.size() && I < CodeSegs.size(); ++I) {
6199
9.34k
    const auto &TypeIdx = TypeIdxs[I];
6200
9.34k
    const auto &Code = CodeSegs[I];
6201
9.34k
    assuming(TypeIdx < Context->CompositeTypes.size());
6202
9.34k
    assuming(Context->CompositeTypes[TypeIdx]->isFunc());
6203
9.34k
    const auto &FuncType = Context->CompositeTypes[TypeIdx]->getFuncType();
6204
9.34k
    const auto FuncID = Context->Functions.size();
6205
9.34k
    auto FTy = toLLVMType(Context->LLContext, Context->ExecCtxPtrTy, FuncType);
6206
9.34k
    LLVM::FunctionCallee F = {FTy, Context->LLModule.addFunction(
6207
9.34k
                                       FTy, LLVMExternalLinkage,
6208
9.34k
                                       fmt::format("f{}"sv, FuncID).c_str())};
6209
9.34k
    F.Fn.setVisibility(LLVMProtectedVisibility);
6210
9.34k
    F.Fn.setDSOLocal(true);
6211
9.34k
    F.Fn.setDLLStorageClass(LLVMDLLExportStorageClass);
6212
9.34k
    F.Fn.addFnAttr(Context->NoStackArgProbe);
6213
9.34k
    F.Fn.addFnAttr(Context->StrictFP);
6214
9.34k
    F.Fn.addFnAttr(Context->UWTable);
6215
9.34k
    F.Fn.addParamAttr(0, Context->ReadOnly);
6216
9.34k
    F.Fn.addParamAttr(0, Context->NoAlias);
6217
6218
9.34k
    Context->Functions.emplace_back(TypeIdx, F, &Code);
6219
9.34k
  }
6220
6221
9.45k
  for (auto [T, F, Code] : Context->Functions) {
6222
9.45k
    if (!Code) {
6223
107
      continue;
6224
107
    }
6225
6226
9.34k
    std::vector<ValType> Locals;
6227
9.34k
    for (const auto &Local : Code->getLocals()) {
6228
2.65M
      for (unsigned I = 0; I < Local.first; ++I) {
6229
2.65M
        Locals.push_back(Local.second);
6230
2.65M
      }
6231
1.42k
    }
6232
9.34k
    FunctionCompiler FC(*Context, F, Locals,
6233
9.34k
                        Conf.getCompilerConfigure().isInterruptible(),
6234
9.34k
                        Conf.getStatisticsConfigure().isInstructionCounting(),
6235
9.34k
                        Conf.getStatisticsConfigure().isCostMeasuring());
6236
9.34k
    auto Type = Context->resolveBlockType(T);
6237
9.34k
    FC.compile(*Code, std::move(Type));
6238
9.34k
    F.Fn.eliminateUnreachableBlocks();
6239
9.34k
  }
6240
1.77k
}
6241
6242
} // namespace LLVM
6243
} // namespace WasmEdge