/src/WasmEdge/lib/executor/engine/engine.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: Apache-2.0 |
2 | | // SPDX-FileCopyrightText: 2019-2024 Second State INC |
3 | | |
4 | | #include "common/endian.h" |
5 | | #include "executor/coredump.h" |
6 | | #include "executor/executor.h" |
7 | | #include "system/stacktrace.h" |
8 | | |
9 | | #include <array> |
10 | | #include <cstdint> |
11 | | #include <cstring> |
12 | | |
13 | | using namespace std::literals; |
14 | | |
15 | | namespace WasmEdge { |
16 | | namespace Executor { |
17 | | |
18 | | Expect<void> Executor::runExpression(Runtime::StackManager &StackMgr, |
19 | 0 | AST::InstrView Instrs) { |
20 | 0 | return execute(StackMgr, Instrs.begin(), Instrs.end()); |
21 | 0 | } |
22 | | |
23 | | Expect<void> |
24 | | Executor::runFunction(Runtime::StackManager &StackMgr, |
25 | | const Runtime::Instance::FunctionInstance &Func, |
26 | 0 | Span<const ValVariant> Params) { |
27 | | // Set start time. |
28 | 0 | if (Stat && Conf.getStatisticsConfigure().isTimeMeasuring()) { |
29 | 0 | Stat->startRecordWasm(); |
30 | 0 | } |
31 | | |
32 | | // Reset and push a dummy frame into stack. |
33 | 0 | StackMgr.pushFrame(nullptr, AST::InstrView::iterator(), 0, 0); |
34 | | |
35 | | // Push arguments. |
36 | 0 | const auto &PTypes = Func.getFuncType().getParamTypes(); |
37 | 0 | for (uint32_t I = 0; I < Params.size(); I++) { |
38 | | // For the references, transform to non-null reference type if the value not |
39 | | // null. |
40 | 0 | if (PTypes[I].isRefType() && Params[I].get<RefVariant>().getPtr<void>() && |
41 | 0 | Params[I].get<RefVariant>().getType().isNullableRefType()) { |
42 | 0 | auto Val = Params[I]; |
43 | 0 | Val.get<RefVariant>().getType().toNonNullableRef(); |
44 | 0 | StackMgr.push(Val); |
45 | 0 | } else { |
46 | 0 | StackMgr.push(Params[I]); |
47 | 0 | } |
48 | 0 | } |
49 | | |
50 | | // Enter and execute function. |
51 | 0 | Expect<void> Res = |
52 | 0 | enterFunction(StackMgr, Func, Func.getInstrs().end()) |
53 | 0 | .and_then([&](AST::InstrView::iterator StartIt) { |
54 | | // If not terminated, execute the instructions in interpreter mode. |
55 | | // For the entering AOT or host functions, the `StartIt` is equal to |
56 | | // the end of instruction list, therefore the execution will return |
57 | | // immediately. |
58 | 0 | return execute(StackMgr, StartIt, Func.getInstrs().end()); |
59 | 0 | }); |
60 | |
|
61 | 0 | if (Res) { |
62 | 0 | spdlog::debug(" Execution succeeded."sv); |
63 | 0 | } else if (likely(Res.error() == ErrCode::Value::Terminated)) { |
64 | 0 | spdlog::debug(" Terminated."sv); |
65 | 0 | } |
66 | |
|
67 | 0 | if (Stat && Conf.getStatisticsConfigure().isTimeMeasuring()) { |
68 | 0 | Stat->stopRecordWasm(); |
69 | 0 | } |
70 | | |
71 | | // If Statistics is enabled, then dump it here. |
72 | 0 | if (Stat) { |
73 | 0 | Stat->dumpToLog(Conf); |
74 | 0 | } |
75 | |
|
76 | 0 | if (!Res && likely(Res.error() == ErrCode::Value::Terminated)) { |
77 | 0 | StackMgr.reset(); |
78 | 0 | } |
79 | |
|
80 | 0 | return Res; |
81 | 0 | } |
82 | | |
83 | | Expect<void> Executor::execute(Runtime::StackManager &StackMgr, |
84 | | const AST::InstrView::iterator Start, |
85 | 0 | const AST::InstrView::iterator End) { |
86 | 0 | AST::InstrView::iterator PC = Start; |
87 | 0 | AST::InstrView::iterator PCEnd = End; |
88 | |
|
89 | 0 | auto Dispatch = [this, &PC, &StackMgr]() -> Expect<void> { |
90 | 0 | const AST::Instruction &Instr = *PC; |
91 | 0 | switch (Instr.getOpCode()) { |
92 | | // Control instructions |
93 | 0 | case OpCode::Unreachable: |
94 | 0 | spdlog::error(ErrCode::Value::Unreachable); |
95 | 0 | spdlog::error( |
96 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
97 | 0 | return Unexpect(ErrCode::Value::Unreachable); |
98 | 0 | case OpCode::Nop: |
99 | 0 | return {}; |
100 | 0 | case OpCode::Block: |
101 | 0 | return {}; |
102 | 0 | case OpCode::Loop: |
103 | 0 | return {}; |
104 | 0 | case OpCode::If: |
105 | 0 | return runIfElseOp(StackMgr, Instr, PC); |
106 | 0 | case OpCode::Else: |
107 | 0 | if (Stat && Conf.getStatisticsConfigure().isCostMeasuring()) { |
108 | | // Reach here means end of if-statement. |
109 | 0 | if (unlikely(!Stat->subInstrCost(Instr.getOpCode()))) { |
110 | 0 | spdlog::error(ErrCode::Value::CostLimitExceeded); |
111 | 0 | spdlog::error( |
112 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
113 | 0 | return Unexpect(ErrCode::Value::CostLimitExceeded); |
114 | 0 | } |
115 | 0 | if (unlikely(!Stat->addInstrCost(OpCode::End))) { |
116 | 0 | spdlog::error(ErrCode::Value::CostLimitExceeded); |
117 | 0 | spdlog::error( |
118 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
119 | 0 | return Unexpect(ErrCode::Value::CostLimitExceeded); |
120 | 0 | } |
121 | 0 | } |
122 | 0 | PC += PC->getJumpEnd() - 1; |
123 | 0 | return {}; |
124 | 0 | case OpCode::End: |
125 | 0 | PC = StackMgr.maybePopFrameOrHandler(PC); |
126 | 0 | return {}; |
127 | | // LEGACY-EH: remove the `Try` cases after deprecating legacy EH. |
128 | 0 | case OpCode::Try: |
129 | 0 | return runTryTableOp(StackMgr, Instr, PC); |
130 | 0 | case OpCode::Throw: |
131 | 0 | return runThrowOp(StackMgr, Instr, PC); |
132 | 0 | case OpCode::Throw_ref: |
133 | 0 | return runThrowRefOp(StackMgr, Instr, PC); |
134 | 0 | case OpCode::Br: |
135 | 0 | return runBrOp(StackMgr, Instr, PC); |
136 | 0 | case OpCode::Br_if: |
137 | 0 | return runBrIfOp(StackMgr, Instr, PC); |
138 | 0 | case OpCode::Br_table: |
139 | 0 | return runBrTableOp(StackMgr, Instr, PC); |
140 | 0 | case OpCode::Br_on_null: |
141 | 0 | return runBrOnNullOp(StackMgr, Instr, PC); |
142 | 0 | case OpCode::Br_on_non_null: |
143 | 0 | return runBrOnNonNullOp(StackMgr, Instr, PC); |
144 | 0 | case OpCode::Br_on_cast: |
145 | 0 | return runBrOnCastOp(StackMgr, Instr, PC); |
146 | 0 | case OpCode::Br_on_cast_fail: |
147 | 0 | return runBrOnCastOp(StackMgr, Instr, PC, true); |
148 | 0 | case OpCode::Return: |
149 | 0 | return runReturnOp(StackMgr, PC); |
150 | 0 | case OpCode::Call: |
151 | 0 | return runCallOp(StackMgr, Instr, PC); |
152 | 0 | case OpCode::Call_indirect: |
153 | 0 | return runCallIndirectOp(StackMgr, Instr, PC); |
154 | 0 | case OpCode::Return_call: |
155 | 0 | return runCallOp(StackMgr, Instr, PC, true); |
156 | 0 | case OpCode::Return_call_indirect: |
157 | 0 | return runCallIndirectOp(StackMgr, Instr, PC, true); |
158 | 0 | case OpCode::Call_ref: |
159 | 0 | return runCallRefOp(StackMgr, Instr, PC); |
160 | 0 | case OpCode::Return_call_ref: |
161 | 0 | return runCallRefOp(StackMgr, Instr, PC, true); |
162 | | // LEGACY-EH: remove the `Catch` cases after deprecating legacy EH. |
163 | 0 | case OpCode::Catch: |
164 | 0 | case OpCode::Catch_all: |
165 | 0 | PC -= Instr.getCatchLegacy().CatchPCOffset; |
166 | 0 | PC += PC->getTryCatch().JumpEnd; |
167 | 0 | return {}; |
168 | 0 | case OpCode::Try_table: |
169 | 0 | return runTryTableOp(StackMgr, Instr, PC); |
170 | | |
171 | | // Reference Instructions |
172 | 0 | case OpCode::Ref__null: |
173 | 0 | return runRefNullOp(StackMgr, Instr.getValType()); |
174 | 0 | case OpCode::Ref__is_null: |
175 | 0 | return runRefIsNullOp(StackMgr.getTop()); |
176 | 0 | case OpCode::Ref__func: |
177 | 0 | return runRefFuncOp(StackMgr, Instr.getTargetIndex()); |
178 | 0 | case OpCode::Ref__eq: { |
179 | 0 | ValVariant Rhs = StackMgr.pop(); |
180 | 0 | return runRefEqOp(StackMgr.getTop(), Rhs); |
181 | 0 | } |
182 | 0 | case OpCode::Ref__as_non_null: |
183 | 0 | return runRefAsNonNullOp(StackMgr.getTop().get<RefVariant>(), Instr); |
184 | | |
185 | | // Reference Instructions (GC proposal) |
186 | 0 | case OpCode::Struct__new: |
187 | 0 | return runStructNewOp(StackMgr, Instr.getTargetIndex()); |
188 | 0 | case OpCode::Struct__new_default: |
189 | 0 | return runStructNewOp(StackMgr, Instr.getTargetIndex(), true); |
190 | 0 | case OpCode::Struct__get: |
191 | 0 | case OpCode::Struct__get_u: |
192 | 0 | return runStructGetOp(StackMgr, Instr.getTargetIndex(), |
193 | 0 | Instr.getSourceIndex(), Instr); |
194 | 0 | case OpCode::Struct__get_s: |
195 | 0 | return runStructGetOp(StackMgr, Instr.getTargetIndex(), |
196 | 0 | Instr.getSourceIndex(), Instr, true); |
197 | 0 | case OpCode::Struct__set: |
198 | 0 | return runStructSetOp(StackMgr, StackMgr.pop(), Instr.getTargetIndex(), |
199 | 0 | Instr.getSourceIndex(), Instr); |
200 | 0 | case OpCode::Array__new: |
201 | 0 | return runArrayNewOp(StackMgr, Instr.getTargetIndex(), 1, |
202 | 0 | StackMgr.pop().get<uint32_t>()); |
203 | 0 | case OpCode::Array__new_default: |
204 | 0 | return runArrayNewOp(StackMgr, Instr.getTargetIndex(), 0, |
205 | 0 | StackMgr.pop().get<uint32_t>()); |
206 | 0 | case OpCode::Array__new_fixed: |
207 | 0 | return runArrayNewOp(StackMgr, Instr.getTargetIndex(), |
208 | 0 | Instr.getSourceIndex(), Instr.getSourceIndex()); |
209 | 0 | case OpCode::Array__new_data: |
210 | 0 | return runArrayNewDataOp(StackMgr, Instr.getTargetIndex(), |
211 | 0 | Instr.getSourceIndex(), Instr); |
212 | 0 | case OpCode::Array__new_elem: |
213 | 0 | return runArrayNewElemOp(StackMgr, Instr.getTargetIndex(), |
214 | 0 | Instr.getSourceIndex(), Instr); |
215 | 0 | case OpCode::Array__get: |
216 | 0 | case OpCode::Array__get_u: |
217 | 0 | return runArrayGetOp(StackMgr, Instr.getTargetIndex(), Instr); |
218 | 0 | case OpCode::Array__get_s: |
219 | 0 | return runArrayGetOp(StackMgr, Instr.getTargetIndex(), Instr, true); |
220 | 0 | case OpCode::Array__set: |
221 | 0 | return runArraySetOp(StackMgr, StackMgr.pop(), Instr.getTargetIndex(), |
222 | 0 | Instr); |
223 | 0 | case OpCode::Array__len: |
224 | 0 | return runArrayLenOp(StackMgr.getTop(), Instr); |
225 | 0 | case OpCode::Array__fill: { |
226 | 0 | const uint32_t Cnt = StackMgr.pop().get<uint32_t>(); |
227 | 0 | return runArrayFillOp(StackMgr, Cnt, StackMgr.pop(), |
228 | 0 | Instr.getTargetIndex(), Instr); |
229 | 0 | } |
230 | 0 | case OpCode::Array__copy: |
231 | 0 | return runArrayCopyOp(StackMgr, StackMgr.pop().get<uint32_t>(), |
232 | 0 | Instr.getTargetIndex(), Instr.getSourceIndex(), |
233 | 0 | Instr); |
234 | 0 | case OpCode::Array__init_data: |
235 | 0 | return runArrayInitDataOp(StackMgr, StackMgr.pop().get<uint32_t>(), |
236 | 0 | Instr.getTargetIndex(), Instr.getSourceIndex(), |
237 | 0 | Instr); |
238 | 0 | case OpCode::Array__init_elem: |
239 | 0 | return runArrayInitElemOp(StackMgr, StackMgr.pop().get<uint32_t>(), |
240 | 0 | Instr.getTargetIndex(), Instr.getSourceIndex(), |
241 | 0 | Instr); |
242 | 0 | case OpCode::Ref__test: |
243 | 0 | case OpCode::Ref__test_null: |
244 | 0 | return runRefTestOp(StackMgr.getModule(), StackMgr.getTop(), Instr); |
245 | 0 | case OpCode::Ref__cast: |
246 | 0 | case OpCode::Ref__cast_null: |
247 | 0 | return runRefTestOp(StackMgr.getModule(), StackMgr.getTop(), Instr, true); |
248 | 0 | case OpCode::Any__convert_extern: |
249 | 0 | return runRefConvOp(StackMgr.getTop().get<RefVariant>(), |
250 | 0 | TypeCode::AnyRef); |
251 | 0 | case OpCode::Extern__convert_any: |
252 | 0 | return runRefConvOp(StackMgr.getTop().get<RefVariant>(), |
253 | 0 | TypeCode::ExternRef); |
254 | 0 | case OpCode::Ref__i31: |
255 | 0 | return runRefI31Op(StackMgr.getTop()); |
256 | 0 | case OpCode::I31__get_s: |
257 | 0 | return runI31GetOp(StackMgr.getTop(), Instr, true); |
258 | 0 | case OpCode::I31__get_u: |
259 | 0 | return runI31GetOp(StackMgr.getTop(), Instr); |
260 | | |
261 | | // Parametric Instructions |
262 | 0 | case OpCode::Drop: |
263 | 0 | StackMgr.pop(); |
264 | 0 | return {}; |
265 | 0 | case OpCode::Select: |
266 | 0 | case OpCode::Select_t: { |
267 | | // Pop the i32 value and select values from stack. |
268 | 0 | ValVariant CondVal = StackMgr.pop(); |
269 | 0 | ValVariant Val2 = StackMgr.pop(); |
270 | 0 | ValVariant Val1 = StackMgr.pop(); |
271 | | |
272 | | // Select the value. |
273 | 0 | if (CondVal.get<uint32_t>() == 0) { |
274 | 0 | StackMgr.push(Val2); |
275 | 0 | } else { |
276 | 0 | StackMgr.push(Val1); |
277 | 0 | } |
278 | 0 | return {}; |
279 | 0 | } |
280 | | |
281 | | // Variable Instructions |
282 | 0 | case OpCode::Local__get: |
283 | 0 | return runLocalGetOp(StackMgr, Instr.getStackOffset()); |
284 | 0 | case OpCode::Local__set: |
285 | 0 | return runLocalSetOp(StackMgr, Instr.getStackOffset()); |
286 | 0 | case OpCode::Local__tee: |
287 | 0 | return runLocalTeeOp(StackMgr, Instr.getStackOffset()); |
288 | 0 | case OpCode::Global__get: |
289 | 0 | return runGlobalGetOp(StackMgr, Instr.getTargetIndex()); |
290 | 0 | case OpCode::Global__set: |
291 | 0 | return runGlobalSetOp(StackMgr, Instr.getTargetIndex()); |
292 | | |
293 | | // Table Instructions |
294 | 0 | case OpCode::Table__get: |
295 | 0 | return runTableGetOp( |
296 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
297 | 0 | case OpCode::Table__set: |
298 | 0 | return runTableSetOp( |
299 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
300 | 0 | case OpCode::Table__init: |
301 | 0 | return runTableInitOp( |
302 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), |
303 | 0 | *getElemInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
304 | 0 | case OpCode::Elem__drop: |
305 | 0 | return runElemDropOp(*getElemInstByIdx(StackMgr, Instr.getTargetIndex())); |
306 | 0 | case OpCode::Table__copy: |
307 | 0 | return runTableCopyOp( |
308 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), |
309 | 0 | *getTabInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
310 | 0 | case OpCode::Table__grow: |
311 | 0 | return runTableGrowOp(StackMgr, |
312 | 0 | *getTabInstByIdx(StackMgr, Instr.getTargetIndex())); |
313 | 0 | case OpCode::Table__size: |
314 | 0 | return runTableSizeOp(StackMgr, |
315 | 0 | *getTabInstByIdx(StackMgr, Instr.getTargetIndex())); |
316 | 0 | case OpCode::Table__fill: |
317 | 0 | return runTableFillOp( |
318 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
319 | | |
320 | | // Memory Instructions |
321 | 0 | case OpCode::I32__load: |
322 | 0 | return runLoadOp<uint32_t>( |
323 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
324 | 0 | case OpCode::I64__load: |
325 | 0 | return runLoadOp<uint64_t>( |
326 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
327 | 0 | case OpCode::F32__load: |
328 | 0 | return runLoadOp<float>( |
329 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
330 | 0 | case OpCode::F64__load: |
331 | 0 | return runLoadOp<double>( |
332 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
333 | 0 | case OpCode::I32__load8_s: |
334 | 0 | return runLoadOp<int32_t, 8>( |
335 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
336 | 0 | case OpCode::I32__load8_u: |
337 | 0 | return runLoadOp<uint32_t, 8>( |
338 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
339 | 0 | case OpCode::I32__load16_s: |
340 | 0 | return runLoadOp<int32_t, 16>( |
341 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
342 | 0 | case OpCode::I32__load16_u: |
343 | 0 | return runLoadOp<uint32_t, 16>( |
344 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
345 | 0 | case OpCode::I64__load8_s: |
346 | 0 | return runLoadOp<int64_t, 8>( |
347 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
348 | 0 | case OpCode::I64__load8_u: |
349 | 0 | return runLoadOp<uint64_t, 8>( |
350 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
351 | 0 | case OpCode::I64__load16_s: |
352 | 0 | return runLoadOp<int64_t, 16>( |
353 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
354 | 0 | case OpCode::I64__load16_u: |
355 | 0 | return runLoadOp<uint64_t, 16>( |
356 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
357 | 0 | case OpCode::I64__load32_s: |
358 | 0 | return runLoadOp<int64_t, 32>( |
359 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
360 | 0 | case OpCode::I64__load32_u: |
361 | 0 | return runLoadOp<uint64_t, 32>( |
362 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
363 | 0 | case OpCode::I32__store: |
364 | 0 | return runStoreOp<uint32_t>( |
365 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
366 | 0 | case OpCode::I64__store: |
367 | 0 | return runStoreOp<uint64_t>( |
368 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
369 | 0 | case OpCode::F32__store: |
370 | 0 | return runStoreOp<float>( |
371 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
372 | 0 | case OpCode::F64__store: |
373 | 0 | return runStoreOp<double>( |
374 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
375 | 0 | case OpCode::I32__store8: |
376 | 0 | return runStoreOp<uint32_t, 8>( |
377 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
378 | 0 | case OpCode::I32__store16: |
379 | 0 | return runStoreOp<uint32_t, 16>( |
380 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
381 | 0 | case OpCode::I64__store8: |
382 | 0 | return runStoreOp<uint64_t, 8>( |
383 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
384 | 0 | case OpCode::I64__store16: |
385 | 0 | return runStoreOp<uint64_t, 16>( |
386 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
387 | 0 | case OpCode::I64__store32: |
388 | 0 | return runStoreOp<uint64_t, 32>( |
389 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
390 | 0 | case OpCode::Memory__grow: |
391 | 0 | return runMemoryGrowOp( |
392 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex())); |
393 | 0 | case OpCode::Memory__size: |
394 | 0 | return runMemorySizeOp( |
395 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex())); |
396 | 0 | case OpCode::Memory__init: |
397 | 0 | return runMemoryInitOp( |
398 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), |
399 | 0 | *getDataInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
400 | 0 | case OpCode::Data__drop: |
401 | 0 | return runDataDropOp(*getDataInstByIdx(StackMgr, Instr.getTargetIndex())); |
402 | 0 | case OpCode::Memory__copy: |
403 | 0 | return runMemoryCopyOp( |
404 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), |
405 | 0 | *getMemInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
406 | 0 | case OpCode::Memory__fill: |
407 | 0 | return runMemoryFillOp( |
408 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
409 | | |
410 | | // Const Numeric Instructions |
411 | 0 | case OpCode::I32__const: |
412 | 0 | case OpCode::I64__const: |
413 | 0 | case OpCode::F32__const: |
414 | 0 | case OpCode::F64__const: |
415 | 0 | StackMgr.push(Instr.getNum()); |
416 | 0 | return {}; |
417 | | |
418 | | // Unary Numeric Instructions |
419 | 0 | case OpCode::I32__eqz: |
420 | 0 | return runEqzOp<uint32_t>(StackMgr.getTop()); |
421 | 0 | case OpCode::I64__eqz: |
422 | 0 | return runEqzOp<uint64_t>(StackMgr.getTop()); |
423 | 0 | case OpCode::I32__clz: |
424 | 0 | return runClzOp<uint32_t>(StackMgr.getTop()); |
425 | 0 | case OpCode::I32__ctz: |
426 | 0 | return runCtzOp<uint32_t>(StackMgr.getTop()); |
427 | 0 | case OpCode::I32__popcnt: |
428 | 0 | return runPopcntOp<uint32_t>(StackMgr.getTop()); |
429 | 0 | case OpCode::I64__clz: |
430 | 0 | return runClzOp<uint64_t>(StackMgr.getTop()); |
431 | 0 | case OpCode::I64__ctz: |
432 | 0 | return runCtzOp<uint64_t>(StackMgr.getTop()); |
433 | 0 | case OpCode::I64__popcnt: |
434 | 0 | return runPopcntOp<uint64_t>(StackMgr.getTop()); |
435 | 0 | case OpCode::F32__abs: |
436 | 0 | return runAbsOp<float>(StackMgr.getTop()); |
437 | 0 | case OpCode::F32__neg: |
438 | 0 | return runNegOp<float>(StackMgr.getTop()); |
439 | 0 | case OpCode::F32__ceil: |
440 | 0 | return runCeilOp<float>(StackMgr.getTop()); |
441 | 0 | case OpCode::F32__floor: |
442 | 0 | return runFloorOp<float>(StackMgr.getTop()); |
443 | 0 | case OpCode::F32__trunc: |
444 | 0 | return runTruncOp<float>(StackMgr.getTop()); |
445 | 0 | case OpCode::F32__nearest: |
446 | 0 | return runNearestOp<float>(StackMgr.getTop()); |
447 | 0 | case OpCode::F32__sqrt: |
448 | 0 | return runSqrtOp<float>(StackMgr.getTop()); |
449 | 0 | case OpCode::F64__abs: |
450 | 0 | return runAbsOp<double>(StackMgr.getTop()); |
451 | 0 | case OpCode::F64__neg: |
452 | 0 | return runNegOp<double>(StackMgr.getTop()); |
453 | 0 | case OpCode::F64__ceil: |
454 | 0 | return runCeilOp<double>(StackMgr.getTop()); |
455 | 0 | case OpCode::F64__floor: |
456 | 0 | return runFloorOp<double>(StackMgr.getTop()); |
457 | 0 | case OpCode::F64__trunc: |
458 | 0 | return runTruncOp<double>(StackMgr.getTop()); |
459 | 0 | case OpCode::F64__nearest: |
460 | 0 | return runNearestOp<double>(StackMgr.getTop()); |
461 | 0 | case OpCode::F64__sqrt: |
462 | 0 | return runSqrtOp<double>(StackMgr.getTop()); |
463 | 0 | case OpCode::I32__wrap_i64: |
464 | 0 | return runWrapOp<uint64_t, uint32_t>(StackMgr.getTop()); |
465 | 0 | case OpCode::I32__trunc_f32_s: |
466 | 0 | return runTruncateOp<float, int32_t>(Instr, StackMgr.getTop()); |
467 | 0 | case OpCode::I32__trunc_f32_u: |
468 | 0 | return runTruncateOp<float, uint32_t>(Instr, StackMgr.getTop()); |
469 | 0 | case OpCode::I32__trunc_f64_s: |
470 | 0 | return runTruncateOp<double, int32_t>(Instr, StackMgr.getTop()); |
471 | 0 | case OpCode::I32__trunc_f64_u: |
472 | 0 | return runTruncateOp<double, uint32_t>(Instr, StackMgr.getTop()); |
473 | 0 | case OpCode::I64__extend_i32_s: |
474 | 0 | return runExtendOp<int32_t, uint64_t>(StackMgr.getTop()); |
475 | 0 | case OpCode::I64__extend_i32_u: |
476 | 0 | return runExtendOp<uint32_t, uint64_t>(StackMgr.getTop()); |
477 | 0 | case OpCode::I64__trunc_f32_s: |
478 | 0 | return runTruncateOp<float, int64_t>(Instr, StackMgr.getTop()); |
479 | 0 | case OpCode::I64__trunc_f32_u: |
480 | 0 | return runTruncateOp<float, uint64_t>(Instr, StackMgr.getTop()); |
481 | 0 | case OpCode::I64__trunc_f64_s: |
482 | 0 | return runTruncateOp<double, int64_t>(Instr, StackMgr.getTop()); |
483 | 0 | case OpCode::I64__trunc_f64_u: |
484 | 0 | return runTruncateOp<double, uint64_t>(Instr, StackMgr.getTop()); |
485 | 0 | case OpCode::F32__convert_i32_s: |
486 | 0 | return runConvertOp<int32_t, float>(StackMgr.getTop()); |
487 | 0 | case OpCode::F32__convert_i32_u: |
488 | 0 | return runConvertOp<uint32_t, float>(StackMgr.getTop()); |
489 | 0 | case OpCode::F32__convert_i64_s: |
490 | 0 | return runConvertOp<int64_t, float>(StackMgr.getTop()); |
491 | 0 | case OpCode::F32__convert_i64_u: |
492 | 0 | return runConvertOp<uint64_t, float>(StackMgr.getTop()); |
493 | 0 | case OpCode::F32__demote_f64: |
494 | 0 | return runDemoteOp<double, float>(StackMgr.getTop()); |
495 | 0 | case OpCode::F64__convert_i32_s: |
496 | 0 | return runConvertOp<int32_t, double>(StackMgr.getTop()); |
497 | 0 | case OpCode::F64__convert_i32_u: |
498 | 0 | return runConvertOp<uint32_t, double>(StackMgr.getTop()); |
499 | 0 | case OpCode::F64__convert_i64_s: |
500 | 0 | return runConvertOp<int64_t, double>(StackMgr.getTop()); |
501 | 0 | case OpCode::F64__convert_i64_u: |
502 | 0 | return runConvertOp<uint64_t, double>(StackMgr.getTop()); |
503 | 0 | case OpCode::F64__promote_f32: |
504 | 0 | return runPromoteOp<float, double>(StackMgr.getTop()); |
505 | 0 | case OpCode::I32__reinterpret_f32: |
506 | 0 | return runReinterpretOp<float, uint32_t>(StackMgr.getTop()); |
507 | 0 | case OpCode::I64__reinterpret_f64: |
508 | 0 | return runReinterpretOp<double, uint64_t>(StackMgr.getTop()); |
509 | 0 | case OpCode::F32__reinterpret_i32: |
510 | 0 | return runReinterpretOp<uint32_t, float>(StackMgr.getTop()); |
511 | 0 | case OpCode::F64__reinterpret_i64: |
512 | 0 | return runReinterpretOp<uint64_t, double>(StackMgr.getTop()); |
513 | 0 | case OpCode::I32__extend8_s: |
514 | 0 | return runExtendOp<int32_t, uint32_t, 8>(StackMgr.getTop()); |
515 | 0 | case OpCode::I32__extend16_s: |
516 | 0 | return runExtendOp<int32_t, uint32_t, 16>(StackMgr.getTop()); |
517 | 0 | case OpCode::I64__extend8_s: |
518 | 0 | return runExtendOp<int64_t, uint64_t, 8>(StackMgr.getTop()); |
519 | 0 | case OpCode::I64__extend16_s: |
520 | 0 | return runExtendOp<int64_t, uint64_t, 16>(StackMgr.getTop()); |
521 | 0 | case OpCode::I64__extend32_s: |
522 | 0 | return runExtendOp<int64_t, uint64_t, 32>(StackMgr.getTop()); |
523 | | |
524 | | // Binary Numeric Instructions |
525 | 0 | case OpCode::I32__eq: { |
526 | 0 | ValVariant Rhs = StackMgr.pop(); |
527 | 0 | return runEqOp<uint32_t>(StackMgr.getTop(), Rhs); |
528 | 0 | } |
529 | 0 | case OpCode::I32__ne: { |
530 | 0 | ValVariant Rhs = StackMgr.pop(); |
531 | 0 | return runNeOp<uint32_t>(StackMgr.getTop(), Rhs); |
532 | 0 | } |
533 | 0 | case OpCode::I32__lt_s: { |
534 | 0 | ValVariant Rhs = StackMgr.pop(); |
535 | 0 | return runLtOp<int32_t>(StackMgr.getTop(), Rhs); |
536 | 0 | } |
537 | 0 | case OpCode::I32__lt_u: { |
538 | 0 | ValVariant Rhs = StackMgr.pop(); |
539 | 0 | return runLtOp<uint32_t>(StackMgr.getTop(), Rhs); |
540 | 0 | } |
541 | 0 | case OpCode::I32__gt_s: { |
542 | 0 | ValVariant Rhs = StackMgr.pop(); |
543 | 0 | return runGtOp<int32_t>(StackMgr.getTop(), Rhs); |
544 | 0 | } |
545 | 0 | case OpCode::I32__gt_u: { |
546 | 0 | ValVariant Rhs = StackMgr.pop(); |
547 | 0 | return runGtOp<uint32_t>(StackMgr.getTop(), Rhs); |
548 | 0 | } |
549 | 0 | case OpCode::I32__le_s: { |
550 | 0 | ValVariant Rhs = StackMgr.pop(); |
551 | 0 | return runLeOp<int32_t>(StackMgr.getTop(), Rhs); |
552 | 0 | } |
553 | 0 | case OpCode::I32__le_u: { |
554 | 0 | ValVariant Rhs = StackMgr.pop(); |
555 | 0 | return runLeOp<uint32_t>(StackMgr.getTop(), Rhs); |
556 | 0 | } |
557 | 0 | case OpCode::I32__ge_s: { |
558 | 0 | ValVariant Rhs = StackMgr.pop(); |
559 | 0 | return runGeOp<int32_t>(StackMgr.getTop(), Rhs); |
560 | 0 | } |
561 | 0 | case OpCode::I32__ge_u: { |
562 | 0 | ValVariant Rhs = StackMgr.pop(); |
563 | 0 | return runGeOp<uint32_t>(StackMgr.getTop(), Rhs); |
564 | 0 | } |
565 | 0 | case OpCode::I64__eq: { |
566 | 0 | ValVariant Rhs = StackMgr.pop(); |
567 | 0 | return runEqOp<uint64_t>(StackMgr.getTop(), Rhs); |
568 | 0 | } |
569 | 0 | case OpCode::I64__ne: { |
570 | 0 | ValVariant Rhs = StackMgr.pop(); |
571 | 0 | return runNeOp<uint64_t>(StackMgr.getTop(), Rhs); |
572 | 0 | } |
573 | 0 | case OpCode::I64__lt_s: { |
574 | 0 | ValVariant Rhs = StackMgr.pop(); |
575 | 0 | return runLtOp<int64_t>(StackMgr.getTop(), Rhs); |
576 | 0 | } |
577 | 0 | case OpCode::I64__lt_u: { |
578 | 0 | ValVariant Rhs = StackMgr.pop(); |
579 | 0 | return runLtOp<uint64_t>(StackMgr.getTop(), Rhs); |
580 | 0 | } |
581 | 0 | case OpCode::I64__gt_s: { |
582 | 0 | ValVariant Rhs = StackMgr.pop(); |
583 | 0 | return runGtOp<int64_t>(StackMgr.getTop(), Rhs); |
584 | 0 | } |
585 | 0 | case OpCode::I64__gt_u: { |
586 | 0 | ValVariant Rhs = StackMgr.pop(); |
587 | 0 | return runGtOp<uint64_t>(StackMgr.getTop(), Rhs); |
588 | 0 | } |
589 | 0 | case OpCode::I64__le_s: { |
590 | 0 | ValVariant Rhs = StackMgr.pop(); |
591 | 0 | return runLeOp<int64_t>(StackMgr.getTop(), Rhs); |
592 | 0 | } |
593 | 0 | case OpCode::I64__le_u: { |
594 | 0 | ValVariant Rhs = StackMgr.pop(); |
595 | 0 | return runLeOp<uint64_t>(StackMgr.getTop(), Rhs); |
596 | 0 | } |
597 | 0 | case OpCode::I64__ge_s: { |
598 | 0 | ValVariant Rhs = StackMgr.pop(); |
599 | 0 | return runGeOp<int64_t>(StackMgr.getTop(), Rhs); |
600 | 0 | } |
601 | 0 | case OpCode::I64__ge_u: { |
602 | 0 | ValVariant Rhs = StackMgr.pop(); |
603 | 0 | return runGeOp<uint64_t>(StackMgr.getTop(), Rhs); |
604 | 0 | } |
605 | 0 | case OpCode::F32__eq: { |
606 | 0 | ValVariant Rhs = StackMgr.pop(); |
607 | 0 | return runEqOp<float>(StackMgr.getTop(), Rhs); |
608 | 0 | } |
609 | 0 | case OpCode::F32__ne: { |
610 | 0 | ValVariant Rhs = StackMgr.pop(); |
611 | 0 | return runNeOp<float>(StackMgr.getTop(), Rhs); |
612 | 0 | } |
613 | 0 | case OpCode::F32__lt: { |
614 | 0 | ValVariant Rhs = StackMgr.pop(); |
615 | 0 | return runLtOp<float>(StackMgr.getTop(), Rhs); |
616 | 0 | } |
617 | 0 | case OpCode::F32__gt: { |
618 | 0 | ValVariant Rhs = StackMgr.pop(); |
619 | 0 | return runGtOp<float>(StackMgr.getTop(), Rhs); |
620 | 0 | } |
621 | 0 | case OpCode::F32__le: { |
622 | 0 | ValVariant Rhs = StackMgr.pop(); |
623 | 0 | return runLeOp<float>(StackMgr.getTop(), Rhs); |
624 | 0 | } |
625 | 0 | case OpCode::F32__ge: { |
626 | 0 | ValVariant Rhs = StackMgr.pop(); |
627 | 0 | return runGeOp<float>(StackMgr.getTop(), Rhs); |
628 | 0 | } |
629 | 0 | case OpCode::F64__eq: { |
630 | 0 | ValVariant Rhs = StackMgr.pop(); |
631 | 0 | return runEqOp<double>(StackMgr.getTop(), Rhs); |
632 | 0 | } |
633 | 0 | case OpCode::F64__ne: { |
634 | 0 | ValVariant Rhs = StackMgr.pop(); |
635 | 0 | return runNeOp<double>(StackMgr.getTop(), Rhs); |
636 | 0 | } |
637 | 0 | case OpCode::F64__lt: { |
638 | 0 | ValVariant Rhs = StackMgr.pop(); |
639 | 0 | return runLtOp<double>(StackMgr.getTop(), Rhs); |
640 | 0 | } |
641 | 0 | case OpCode::F64__gt: { |
642 | 0 | ValVariant Rhs = StackMgr.pop(); |
643 | 0 | return runGtOp<double>(StackMgr.getTop(), Rhs); |
644 | 0 | } |
645 | 0 | case OpCode::F64__le: { |
646 | 0 | ValVariant Rhs = StackMgr.pop(); |
647 | 0 | return runLeOp<double>(StackMgr.getTop(), Rhs); |
648 | 0 | } |
649 | 0 | case OpCode::F64__ge: { |
650 | 0 | ValVariant Rhs = StackMgr.pop(); |
651 | 0 | return runGeOp<double>(StackMgr.getTop(), Rhs); |
652 | 0 | } |
653 | 0 | case OpCode::I32__add: { |
654 | 0 | ValVariant Rhs = StackMgr.pop(); |
655 | 0 | return runAddOp<uint32_t>(StackMgr.getTop(), Rhs); |
656 | 0 | } |
657 | 0 | case OpCode::I32__sub: { |
658 | 0 | ValVariant Rhs = StackMgr.pop(); |
659 | 0 | return runSubOp<uint32_t>(StackMgr.getTop(), Rhs); |
660 | 0 | } |
661 | 0 | case OpCode::I32__mul: { |
662 | 0 | ValVariant Rhs = StackMgr.pop(); |
663 | 0 | return runMulOp<uint32_t>(StackMgr.getTop(), Rhs); |
664 | 0 | } |
665 | 0 | case OpCode::I32__div_s: { |
666 | 0 | ValVariant Rhs = StackMgr.pop(); |
667 | 0 | return runDivOp<int32_t>(Instr, StackMgr.getTop(), Rhs); |
668 | 0 | } |
669 | 0 | case OpCode::I32__div_u: { |
670 | 0 | ValVariant Rhs = StackMgr.pop(); |
671 | 0 | return runDivOp<uint32_t>(Instr, StackMgr.getTop(), Rhs); |
672 | 0 | } |
673 | 0 | case OpCode::I32__rem_s: { |
674 | 0 | ValVariant Rhs = StackMgr.pop(); |
675 | 0 | return runRemOp<int32_t>(Instr, StackMgr.getTop(), Rhs); |
676 | 0 | } |
677 | 0 | case OpCode::I32__rem_u: { |
678 | 0 | ValVariant Rhs = StackMgr.pop(); |
679 | 0 | return runRemOp<uint32_t>(Instr, StackMgr.getTop(), Rhs); |
680 | 0 | } |
681 | 0 | case OpCode::I32__and: { |
682 | 0 | ValVariant Rhs = StackMgr.pop(); |
683 | 0 | return runAndOp<uint32_t>(StackMgr.getTop(), Rhs); |
684 | 0 | } |
685 | 0 | case OpCode::I32__or: { |
686 | 0 | ValVariant Rhs = StackMgr.pop(); |
687 | 0 | return runOrOp<uint32_t>(StackMgr.getTop(), Rhs); |
688 | 0 | } |
689 | 0 | case OpCode::I32__xor: { |
690 | 0 | ValVariant Rhs = StackMgr.pop(); |
691 | 0 | return runXorOp<uint32_t>(StackMgr.getTop(), Rhs); |
692 | 0 | } |
693 | 0 | case OpCode::I32__shl: { |
694 | 0 | ValVariant Rhs = StackMgr.pop(); |
695 | 0 | return runShlOp<uint32_t>(StackMgr.getTop(), Rhs); |
696 | 0 | } |
697 | 0 | case OpCode::I32__shr_s: { |
698 | 0 | ValVariant Rhs = StackMgr.pop(); |
699 | 0 | return runShrOp<int32_t>(StackMgr.getTop(), Rhs); |
700 | 0 | } |
701 | 0 | case OpCode::I32__shr_u: { |
702 | 0 | ValVariant Rhs = StackMgr.pop(); |
703 | 0 | return runShrOp<uint32_t>(StackMgr.getTop(), Rhs); |
704 | 0 | } |
705 | 0 | case OpCode::I32__rotl: { |
706 | 0 | ValVariant Rhs = StackMgr.pop(); |
707 | 0 | return runRotlOp<uint32_t>(StackMgr.getTop(), Rhs); |
708 | 0 | } |
709 | 0 | case OpCode::I32__rotr: { |
710 | 0 | ValVariant Rhs = StackMgr.pop(); |
711 | 0 | return runRotrOp<uint32_t>(StackMgr.getTop(), Rhs); |
712 | 0 | } |
713 | 0 | case OpCode::I64__add: { |
714 | 0 | ValVariant Rhs = StackMgr.pop(); |
715 | 0 | return runAddOp<uint64_t>(StackMgr.getTop(), Rhs); |
716 | 0 | } |
717 | 0 | case OpCode::I64__sub: { |
718 | 0 | ValVariant Rhs = StackMgr.pop(); |
719 | 0 | return runSubOp<uint64_t>(StackMgr.getTop(), Rhs); |
720 | 0 | } |
721 | 0 | case OpCode::I64__mul: { |
722 | 0 | ValVariant Rhs = StackMgr.pop(); |
723 | 0 | return runMulOp<uint64_t>(StackMgr.getTop(), Rhs); |
724 | 0 | } |
725 | 0 | case OpCode::I64__div_s: { |
726 | 0 | ValVariant Rhs = StackMgr.pop(); |
727 | 0 | return runDivOp<int64_t>(Instr, StackMgr.getTop(), Rhs); |
728 | 0 | } |
729 | 0 | case OpCode::I64__div_u: { |
730 | 0 | ValVariant Rhs = StackMgr.pop(); |
731 | 0 | return runDivOp<uint64_t>(Instr, StackMgr.getTop(), Rhs); |
732 | 0 | } |
733 | 0 | case OpCode::I64__rem_s: { |
734 | 0 | ValVariant Rhs = StackMgr.pop(); |
735 | 0 | return runRemOp<int64_t>(Instr, StackMgr.getTop(), Rhs); |
736 | 0 | } |
737 | 0 | case OpCode::I64__rem_u: { |
738 | 0 | ValVariant Rhs = StackMgr.pop(); |
739 | 0 | return runRemOp<uint64_t>(Instr, StackMgr.getTop(), Rhs); |
740 | 0 | } |
741 | 0 | case OpCode::I64__and: { |
742 | 0 | ValVariant Rhs = StackMgr.pop(); |
743 | 0 | return runAndOp<uint64_t>(StackMgr.getTop(), Rhs); |
744 | 0 | } |
745 | 0 | case OpCode::I64__or: { |
746 | 0 | ValVariant Rhs = StackMgr.pop(); |
747 | 0 | return runOrOp<uint64_t>(StackMgr.getTop(), Rhs); |
748 | 0 | } |
749 | 0 | case OpCode::I64__xor: { |
750 | 0 | ValVariant Rhs = StackMgr.pop(); |
751 | 0 | return runXorOp<uint64_t>(StackMgr.getTop(), Rhs); |
752 | 0 | } |
753 | 0 | case OpCode::I64__shl: { |
754 | 0 | ValVariant Rhs = StackMgr.pop(); |
755 | 0 | return runShlOp<uint64_t>(StackMgr.getTop(), Rhs); |
756 | 0 | } |
757 | 0 | case OpCode::I64__shr_s: { |
758 | 0 | ValVariant Rhs = StackMgr.pop(); |
759 | 0 | return runShrOp<int64_t>(StackMgr.getTop(), Rhs); |
760 | 0 | } |
761 | 0 | case OpCode::I64__shr_u: { |
762 | 0 | ValVariant Rhs = StackMgr.pop(); |
763 | 0 | return runShrOp<uint64_t>(StackMgr.getTop(), Rhs); |
764 | 0 | } |
765 | 0 | case OpCode::I64__rotl: { |
766 | 0 | ValVariant Rhs = StackMgr.pop(); |
767 | 0 | return runRotlOp<uint64_t>(StackMgr.getTop(), Rhs); |
768 | 0 | } |
769 | 0 | case OpCode::I64__rotr: { |
770 | 0 | ValVariant Rhs = StackMgr.pop(); |
771 | 0 | return runRotrOp<uint64_t>(StackMgr.getTop(), Rhs); |
772 | 0 | } |
773 | 0 | case OpCode::F32__add: { |
774 | 0 | ValVariant Rhs = StackMgr.pop(); |
775 | 0 | return runAddOp<float>(StackMgr.getTop(), Rhs); |
776 | 0 | } |
777 | 0 | case OpCode::F32__sub: { |
778 | 0 | ValVariant Rhs = StackMgr.pop(); |
779 | 0 | return runSubOp<float>(StackMgr.getTop(), Rhs); |
780 | 0 | } |
781 | 0 | case OpCode::F32__mul: { |
782 | 0 | ValVariant Rhs = StackMgr.pop(); |
783 | 0 | return runMulOp<float>(StackMgr.getTop(), Rhs); |
784 | 0 | } |
785 | 0 | case OpCode::F32__div: { |
786 | 0 | ValVariant Rhs = StackMgr.pop(); |
787 | 0 | return runDivOp<float>(Instr, StackMgr.getTop(), Rhs); |
788 | 0 | } |
789 | 0 | case OpCode::F32__min: { |
790 | 0 | ValVariant Rhs = StackMgr.pop(); |
791 | 0 | return runMinOp<float>(StackMgr.getTop(), Rhs); |
792 | 0 | } |
793 | 0 | case OpCode::F32__max: { |
794 | 0 | ValVariant Rhs = StackMgr.pop(); |
795 | 0 | return runMaxOp<float>(StackMgr.getTop(), Rhs); |
796 | 0 | } |
797 | 0 | case OpCode::F32__copysign: { |
798 | 0 | ValVariant Rhs = StackMgr.pop(); |
799 | 0 | return runCopysignOp<float>(StackMgr.getTop(), Rhs); |
800 | 0 | } |
801 | 0 | case OpCode::F64__add: { |
802 | 0 | ValVariant Rhs = StackMgr.pop(); |
803 | 0 | return runAddOp<double>(StackMgr.getTop(), Rhs); |
804 | 0 | } |
805 | 0 | case OpCode::F64__sub: { |
806 | 0 | ValVariant Rhs = StackMgr.pop(); |
807 | 0 | return runSubOp<double>(StackMgr.getTop(), Rhs); |
808 | 0 | } |
809 | 0 | case OpCode::F64__mul: { |
810 | 0 | ValVariant Rhs = StackMgr.pop(); |
811 | 0 | return runMulOp<double>(StackMgr.getTop(), Rhs); |
812 | 0 | } |
813 | 0 | case OpCode::F64__div: { |
814 | 0 | ValVariant Rhs = StackMgr.pop(); |
815 | 0 | return runDivOp<double>(Instr, StackMgr.getTop(), Rhs); |
816 | 0 | } |
817 | 0 | case OpCode::F64__min: { |
818 | 0 | ValVariant Rhs = StackMgr.pop(); |
819 | 0 | return runMinOp<double>(StackMgr.getTop(), Rhs); |
820 | 0 | } |
821 | 0 | case OpCode::F64__max: { |
822 | 0 | ValVariant Rhs = StackMgr.pop(); |
823 | 0 | return runMaxOp<double>(StackMgr.getTop(), Rhs); |
824 | 0 | } |
825 | 0 | case OpCode::F64__copysign: { |
826 | 0 | ValVariant Rhs = StackMgr.pop(); |
827 | 0 | return runCopysignOp<double>(StackMgr.getTop(), Rhs); |
828 | 0 | } |
829 | | |
830 | | // Saturating Truncation Numeric Instructions |
831 | 0 | case OpCode::I32__trunc_sat_f32_s: |
832 | 0 | return runTruncateSatOp<float, int32_t>(StackMgr.getTop()); |
833 | 0 | case OpCode::I32__trunc_sat_f32_u: |
834 | 0 | return runTruncateSatOp<float, uint32_t>(StackMgr.getTop()); |
835 | 0 | case OpCode::I32__trunc_sat_f64_s: |
836 | 0 | return runTruncateSatOp<double, int32_t>(StackMgr.getTop()); |
837 | 0 | case OpCode::I32__trunc_sat_f64_u: |
838 | 0 | return runTruncateSatOp<double, uint32_t>(StackMgr.getTop()); |
839 | 0 | case OpCode::I64__trunc_sat_f32_s: |
840 | 0 | return runTruncateSatOp<float, int64_t>(StackMgr.getTop()); |
841 | 0 | case OpCode::I64__trunc_sat_f32_u: |
842 | 0 | return runTruncateSatOp<float, uint64_t>(StackMgr.getTop()); |
843 | 0 | case OpCode::I64__trunc_sat_f64_s: |
844 | 0 | return runTruncateSatOp<double, int64_t>(StackMgr.getTop()); |
845 | 0 | case OpCode::I64__trunc_sat_f64_u: |
846 | 0 | return runTruncateSatOp<double, uint64_t>(StackMgr.getTop()); |
847 | | |
848 | | // SIMD Memory Instructions |
849 | 0 | case OpCode::V128__load: |
850 | 0 | return runLoadOp<uint128_t>( |
851 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
852 | 0 | case OpCode::V128__load8x8_s: |
853 | 0 | return runLoadExpandOp<int8_t, int16_t>( |
854 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
855 | 0 | case OpCode::V128__load8x8_u: |
856 | 0 | return runLoadExpandOp<uint8_t, uint16_t>( |
857 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
858 | 0 | case OpCode::V128__load16x4_s: |
859 | 0 | return runLoadExpandOp<int16_t, int32_t>( |
860 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
861 | 0 | case OpCode::V128__load16x4_u: |
862 | 0 | return runLoadExpandOp<uint16_t, uint32_t>( |
863 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
864 | 0 | case OpCode::V128__load32x2_s: |
865 | 0 | return runLoadExpandOp<int32_t, int64_t>( |
866 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
867 | 0 | case OpCode::V128__load32x2_u: |
868 | 0 | return runLoadExpandOp<uint32_t, uint64_t>( |
869 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
870 | 0 | case OpCode::V128__load8_splat: |
871 | 0 | return runLoadSplatOp<uint8_t>( |
872 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
873 | 0 | case OpCode::V128__load16_splat: |
874 | 0 | return runLoadSplatOp<uint16_t>( |
875 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
876 | 0 | case OpCode::V128__load32_splat: |
877 | 0 | return runLoadSplatOp<uint32_t>( |
878 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
879 | 0 | case OpCode::V128__load64_splat: |
880 | 0 | return runLoadSplatOp<uint64_t>( |
881 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
882 | 0 | case OpCode::V128__load32_zero: |
883 | 0 | return runLoadOp<uint128_t, 32>( |
884 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
885 | 0 | case OpCode::V128__load64_zero: |
886 | 0 | return runLoadOp<uint128_t, 64>( |
887 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
888 | 0 | case OpCode::V128__store: |
889 | 0 | return runStoreOp<uint128_t>( |
890 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
891 | 0 | case OpCode::V128__load8_lane: |
892 | 0 | return runLoadLaneOp<uint8_t>( |
893 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
894 | 0 | case OpCode::V128__load16_lane: |
895 | 0 | return runLoadLaneOp<uint16_t>( |
896 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
897 | 0 | case OpCode::V128__load32_lane: |
898 | 0 | return runLoadLaneOp<uint32_t>( |
899 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
900 | 0 | case OpCode::V128__load64_lane: |
901 | 0 | return runLoadLaneOp<uint64_t>( |
902 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
903 | 0 | case OpCode::V128__store8_lane: |
904 | 0 | return runStoreLaneOp<uint8_t>( |
905 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
906 | 0 | case OpCode::V128__store16_lane: |
907 | 0 | return runStoreLaneOp<uint16_t>( |
908 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
909 | 0 | case OpCode::V128__store32_lane: |
910 | 0 | return runStoreLaneOp<uint32_t>( |
911 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
912 | 0 | case OpCode::V128__store64_lane: |
913 | 0 | return runStoreLaneOp<uint64_t>( |
914 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
915 | | |
916 | | // SIMD Const Instructions |
917 | 0 | case OpCode::V128__const: |
918 | 0 | StackMgr.push(Instr.getNum()); |
919 | 0 | return {}; |
920 | | |
921 | | // SIMD Shuffle Instructions |
922 | 0 | case OpCode::I8x16__shuffle: { |
923 | 0 | ValVariant Val2 = StackMgr.pop(); |
924 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
925 | 0 | std::array<uint8_t, 32> Data; |
926 | 0 | std::array<uint8_t, 16> Result; |
927 | 0 | std::memcpy(&Data[0], &Val1, 16); |
928 | 0 | std::memcpy(&Data[16], &Val2, 16); |
929 | 0 | const auto V3 = Instr.getNum().get<uint128_t>(); |
930 | 0 | for (size_t I = 0; I < 16; ++I) { |
931 | 0 | const uint8_t Index = static_cast<uint8_t>(V3 >> (I * 8)); |
932 | 0 | if constexpr (Endian::native == Endian::little) { |
933 | 0 | Result[I] = Data[Index]; |
934 | | } else { |
935 | | Result[15 - I] = Index < 16 ? Data[15 - Index] : Data[47 - Index]; |
936 | | } |
937 | 0 | } |
938 | 0 | std::memcpy(&Val1, &Result[0], 16); |
939 | 0 | return {}; |
940 | 0 | } |
941 | | |
942 | | // SIMD Lane Instructions |
943 | 0 | case OpCode::I8x16__extract_lane_s: |
944 | 0 | return runExtractLaneOp<int8_t, int32_t>(StackMgr.getTop(), |
945 | 0 | Instr.getMemoryLane()); |
946 | 0 | case OpCode::I8x16__extract_lane_u: |
947 | 0 | return runExtractLaneOp<uint8_t, uint32_t>(StackMgr.getTop(), |
948 | 0 | Instr.getMemoryLane()); |
949 | 0 | case OpCode::I16x8__extract_lane_s: |
950 | 0 | return runExtractLaneOp<int16_t, int32_t>(StackMgr.getTop(), |
951 | 0 | Instr.getMemoryLane()); |
952 | 0 | case OpCode::I16x8__extract_lane_u: |
953 | 0 | return runExtractLaneOp<uint16_t, uint32_t>(StackMgr.getTop(), |
954 | 0 | Instr.getMemoryLane()); |
955 | 0 | case OpCode::I32x4__extract_lane: |
956 | 0 | return runExtractLaneOp<uint32_t>(StackMgr.getTop(), |
957 | 0 | Instr.getMemoryLane()); |
958 | 0 | case OpCode::I64x2__extract_lane: |
959 | 0 | return runExtractLaneOp<uint64_t>(StackMgr.getTop(), |
960 | 0 | Instr.getMemoryLane()); |
961 | 0 | case OpCode::F32x4__extract_lane: |
962 | 0 | return runExtractLaneOp<float>(StackMgr.getTop(), Instr.getMemoryLane()); |
963 | 0 | case OpCode::F64x2__extract_lane: |
964 | 0 | return runExtractLaneOp<double>(StackMgr.getTop(), Instr.getMemoryLane()); |
965 | 0 | case OpCode::I8x16__replace_lane: { |
966 | 0 | ValVariant Rhs = StackMgr.pop(); |
967 | 0 | return runReplaceLaneOp<uint32_t, uint8_t>(StackMgr.getTop(), Rhs, |
968 | 0 | Instr.getMemoryLane()); |
969 | 0 | } |
970 | 0 | case OpCode::I16x8__replace_lane: { |
971 | 0 | ValVariant Rhs = StackMgr.pop(); |
972 | 0 | return runReplaceLaneOp<uint32_t, uint16_t>(StackMgr.getTop(), Rhs, |
973 | 0 | Instr.getMemoryLane()); |
974 | 0 | } |
975 | 0 | case OpCode::I32x4__replace_lane: { |
976 | 0 | ValVariant Rhs = StackMgr.pop(); |
977 | 0 | return runReplaceLaneOp<uint32_t>(StackMgr.getTop(), Rhs, |
978 | 0 | Instr.getMemoryLane()); |
979 | 0 | } |
980 | 0 | case OpCode::I64x2__replace_lane: { |
981 | 0 | ValVariant Rhs = StackMgr.pop(); |
982 | 0 | return runReplaceLaneOp<uint64_t>(StackMgr.getTop(), Rhs, |
983 | 0 | Instr.getMemoryLane()); |
984 | 0 | } |
985 | 0 | case OpCode::F32x4__replace_lane: { |
986 | 0 | ValVariant Rhs = StackMgr.pop(); |
987 | 0 | return runReplaceLaneOp<float>(StackMgr.getTop(), Rhs, |
988 | 0 | Instr.getMemoryLane()); |
989 | 0 | } |
990 | 0 | case OpCode::F64x2__replace_lane: { |
991 | 0 | ValVariant Rhs = StackMgr.pop(); |
992 | 0 | return runReplaceLaneOp<double>(StackMgr.getTop(), Rhs, |
993 | 0 | Instr.getMemoryLane()); |
994 | 0 | } |
995 | | |
996 | | // SIMD Numeric Instructions |
997 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
998 | | case OpCode::I8x16__swizzle: { |
999 | | const ValVariant Val2 = StackMgr.pop(); |
1000 | | ValVariant &Val1 = StackMgr.getTop(); |
1001 | | const uint8x16_t &Index = Val2.get<uint8x16_t>(); |
1002 | | uint8x16_t &Vector = Val1.get<uint8x16_t>(); |
1003 | | uint8x16_t Result; |
1004 | | for (size_t I = 0; I < 16; ++I) { |
1005 | | const uint8_t SwizzleIndex = Index[I]; |
1006 | | if (SwizzleIndex < 16) { |
1007 | | Result[I] = Vector[SwizzleIndex]; |
1008 | | } else { |
1009 | | Result[I] = 0; |
1010 | | } |
1011 | | } |
1012 | | Vector = Result; |
1013 | | return {}; |
1014 | | } |
1015 | | #else |
1016 | 0 | case OpCode::I8x16__swizzle: { |
1017 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1018 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1019 | 0 | uint8x16_t Index = Val2.get<uint8x16_t>(); |
1020 | | if constexpr (Endian::native == Endian::big) { |
1021 | | Index = 15 - Index; |
1022 | | } |
1023 | 0 | uint8x16_t &Vector = Val1.get<uint8x16_t>(); |
1024 | 0 | const uint8x16_t Limit = uint8x16_t{} + 16; |
1025 | 0 | const uint8x16_t Zero = uint8x16_t{}; |
1026 | 0 | const uint8x16_t Exceed = (Index >= Limit); |
1027 | 0 | #ifdef __clang__ |
1028 | 0 | uint8x16_t Result = __builtin_shufflevector(Vector, Index); |
1029 | | #else |
1030 | | uint8x16_t Result = __builtin_shuffle(Vector, Index); |
1031 | | #endif |
1032 | 0 | Vector = detail::vectorSelect(Exceed, Zero, Result); |
1033 | 0 | return {}; |
1034 | 0 | } |
1035 | 0 | #endif // MSVC |
1036 | 0 | case OpCode::I8x16__splat: |
1037 | 0 | return runSplatOp<uint32_t, uint8_t>(StackMgr.getTop()); |
1038 | 0 | case OpCode::I16x8__splat: |
1039 | 0 | return runSplatOp<uint32_t, uint16_t>(StackMgr.getTop()); |
1040 | 0 | case OpCode::I32x4__splat: |
1041 | 0 | return runSplatOp<uint32_t>(StackMgr.getTop()); |
1042 | 0 | case OpCode::I64x2__splat: |
1043 | 0 | return runSplatOp<uint64_t>(StackMgr.getTop()); |
1044 | 0 | case OpCode::F32x4__splat: |
1045 | 0 | return runSplatOp<float>(StackMgr.getTop()); |
1046 | 0 | case OpCode::F64x2__splat: |
1047 | 0 | return runSplatOp<double>(StackMgr.getTop()); |
1048 | 0 | case OpCode::I8x16__eq: { |
1049 | 0 | ValVariant Rhs = StackMgr.pop(); |
1050 | 0 | return runVectorEqOp<uint8_t>(StackMgr.getTop(), Rhs); |
1051 | 0 | } |
1052 | 0 | case OpCode::I8x16__ne: { |
1053 | 0 | ValVariant Rhs = StackMgr.pop(); |
1054 | 0 | return runVectorNeOp<uint8_t>(StackMgr.getTop(), Rhs); |
1055 | 0 | } |
1056 | 0 | case OpCode::I8x16__lt_s: { |
1057 | 0 | ValVariant Rhs = StackMgr.pop(); |
1058 | 0 | return runVectorLtOp<int8_t>(StackMgr.getTop(), Rhs); |
1059 | 0 | } |
1060 | 0 | case OpCode::I8x16__lt_u: { |
1061 | 0 | ValVariant Rhs = StackMgr.pop(); |
1062 | 0 | return runVectorLtOp<uint8_t>(StackMgr.getTop(), Rhs); |
1063 | 0 | } |
1064 | 0 | case OpCode::I8x16__gt_s: { |
1065 | 0 | ValVariant Rhs = StackMgr.pop(); |
1066 | 0 | return runVectorGtOp<int8_t>(StackMgr.getTop(), Rhs); |
1067 | 0 | } |
1068 | 0 | case OpCode::I8x16__gt_u: { |
1069 | 0 | ValVariant Rhs = StackMgr.pop(); |
1070 | 0 | return runVectorGtOp<uint8_t>(StackMgr.getTop(), Rhs); |
1071 | 0 | } |
1072 | 0 | case OpCode::I8x16__le_s: { |
1073 | 0 | ValVariant Rhs = StackMgr.pop(); |
1074 | 0 | return runVectorLeOp<int8_t>(StackMgr.getTop(), Rhs); |
1075 | 0 | } |
1076 | 0 | case OpCode::I8x16__le_u: { |
1077 | 0 | ValVariant Rhs = StackMgr.pop(); |
1078 | 0 | return runVectorLeOp<uint8_t>(StackMgr.getTop(), Rhs); |
1079 | 0 | } |
1080 | 0 | case OpCode::I8x16__ge_s: { |
1081 | 0 | ValVariant Rhs = StackMgr.pop(); |
1082 | 0 | return runVectorGeOp<int8_t>(StackMgr.getTop(), Rhs); |
1083 | 0 | } |
1084 | 0 | case OpCode::I8x16__ge_u: { |
1085 | 0 | ValVariant Rhs = StackMgr.pop(); |
1086 | 0 | return runVectorGeOp<uint8_t>(StackMgr.getTop(), Rhs); |
1087 | 0 | } |
1088 | 0 | case OpCode::I16x8__eq: { |
1089 | 0 | ValVariant Rhs = StackMgr.pop(); |
1090 | 0 | return runVectorEqOp<uint16_t>(StackMgr.getTop(), Rhs); |
1091 | 0 | } |
1092 | 0 | case OpCode::I16x8__ne: { |
1093 | 0 | ValVariant Rhs = StackMgr.pop(); |
1094 | 0 | return runVectorNeOp<uint16_t>(StackMgr.getTop(), Rhs); |
1095 | 0 | } |
1096 | 0 | case OpCode::I16x8__lt_s: { |
1097 | 0 | ValVariant Rhs = StackMgr.pop(); |
1098 | 0 | return runVectorLtOp<int16_t>(StackMgr.getTop(), Rhs); |
1099 | 0 | } |
1100 | 0 | case OpCode::I16x8__lt_u: { |
1101 | 0 | ValVariant Rhs = StackMgr.pop(); |
1102 | 0 | return runVectorLtOp<uint16_t>(StackMgr.getTop(), Rhs); |
1103 | 0 | } |
1104 | 0 | case OpCode::I16x8__gt_s: { |
1105 | 0 | ValVariant Rhs = StackMgr.pop(); |
1106 | 0 | return runVectorGtOp<int16_t>(StackMgr.getTop(), Rhs); |
1107 | 0 | } |
1108 | 0 | case OpCode::I16x8__gt_u: { |
1109 | 0 | ValVariant Rhs = StackMgr.pop(); |
1110 | 0 | return runVectorGtOp<uint16_t>(StackMgr.getTop(), Rhs); |
1111 | 0 | } |
1112 | 0 | case OpCode::I16x8__le_s: { |
1113 | 0 | ValVariant Rhs = StackMgr.pop(); |
1114 | 0 | return runVectorLeOp<int16_t>(StackMgr.getTop(), Rhs); |
1115 | 0 | } |
1116 | 0 | case OpCode::I16x8__le_u: { |
1117 | 0 | ValVariant Rhs = StackMgr.pop(); |
1118 | 0 | return runVectorLeOp<uint16_t>(StackMgr.getTop(), Rhs); |
1119 | 0 | } |
1120 | 0 | case OpCode::I16x8__ge_s: { |
1121 | 0 | ValVariant Rhs = StackMgr.pop(); |
1122 | 0 | return runVectorGeOp<int16_t>(StackMgr.getTop(), Rhs); |
1123 | 0 | } |
1124 | 0 | case OpCode::I16x8__ge_u: { |
1125 | 0 | ValVariant Rhs = StackMgr.pop(); |
1126 | 0 | return runVectorGeOp<uint16_t>(StackMgr.getTop(), Rhs); |
1127 | 0 | } |
1128 | 0 | case OpCode::I32x4__eq: { |
1129 | 0 | ValVariant Rhs = StackMgr.pop(); |
1130 | 0 | return runVectorEqOp<uint32_t>(StackMgr.getTop(), Rhs); |
1131 | 0 | } |
1132 | 0 | case OpCode::I32x4__ne: { |
1133 | 0 | ValVariant Rhs = StackMgr.pop(); |
1134 | 0 | return runVectorNeOp<uint32_t>(StackMgr.getTop(), Rhs); |
1135 | 0 | } |
1136 | 0 | case OpCode::I32x4__lt_s: { |
1137 | 0 | ValVariant Rhs = StackMgr.pop(); |
1138 | 0 | return runVectorLtOp<int32_t>(StackMgr.getTop(), Rhs); |
1139 | 0 | } |
1140 | 0 | case OpCode::I32x4__lt_u: { |
1141 | 0 | ValVariant Rhs = StackMgr.pop(); |
1142 | 0 | return runVectorLtOp<uint32_t>(StackMgr.getTop(), Rhs); |
1143 | 0 | } |
1144 | 0 | case OpCode::I32x4__gt_s: { |
1145 | 0 | ValVariant Rhs = StackMgr.pop(); |
1146 | 0 | return runVectorGtOp<int32_t>(StackMgr.getTop(), Rhs); |
1147 | 0 | } |
1148 | 0 | case OpCode::I32x4__gt_u: { |
1149 | 0 | ValVariant Rhs = StackMgr.pop(); |
1150 | 0 | return runVectorGtOp<uint32_t>(StackMgr.getTop(), Rhs); |
1151 | 0 | } |
1152 | 0 | case OpCode::I32x4__le_s: { |
1153 | 0 | ValVariant Rhs = StackMgr.pop(); |
1154 | 0 | return runVectorLeOp<int32_t>(StackMgr.getTop(), Rhs); |
1155 | 0 | } |
1156 | 0 | case OpCode::I32x4__le_u: { |
1157 | 0 | ValVariant Rhs = StackMgr.pop(); |
1158 | 0 | return runVectorLeOp<uint32_t>(StackMgr.getTop(), Rhs); |
1159 | 0 | } |
1160 | 0 | case OpCode::I32x4__ge_s: { |
1161 | 0 | ValVariant Rhs = StackMgr.pop(); |
1162 | 0 | return runVectorGeOp<int32_t>(StackMgr.getTop(), Rhs); |
1163 | 0 | } |
1164 | 0 | case OpCode::I32x4__ge_u: { |
1165 | 0 | ValVariant Rhs = StackMgr.pop(); |
1166 | 0 | return runVectorGeOp<uint32_t>(StackMgr.getTop(), Rhs); |
1167 | 0 | } |
1168 | 0 | case OpCode::I64x2__eq: { |
1169 | 0 | ValVariant Rhs = StackMgr.pop(); |
1170 | 0 | return runVectorEqOp<uint64_t>(StackMgr.getTop(), Rhs); |
1171 | 0 | } |
1172 | 0 | case OpCode::I64x2__ne: { |
1173 | 0 | ValVariant Rhs = StackMgr.pop(); |
1174 | 0 | return runVectorNeOp<uint64_t>(StackMgr.getTop(), Rhs); |
1175 | 0 | } |
1176 | 0 | case OpCode::I64x2__lt_s: { |
1177 | 0 | ValVariant Rhs = StackMgr.pop(); |
1178 | 0 | return runVectorLtOp<int64_t>(StackMgr.getTop(), Rhs); |
1179 | 0 | } |
1180 | 0 | case OpCode::I64x2__gt_s: { |
1181 | 0 | ValVariant Rhs = StackMgr.pop(); |
1182 | 0 | return runVectorGtOp<int64_t>(StackMgr.getTop(), Rhs); |
1183 | 0 | } |
1184 | 0 | case OpCode::I64x2__le_s: { |
1185 | 0 | ValVariant Rhs = StackMgr.pop(); |
1186 | 0 | return runVectorLeOp<int64_t>(StackMgr.getTop(), Rhs); |
1187 | 0 | } |
1188 | 0 | case OpCode::I64x2__ge_s: { |
1189 | 0 | ValVariant Rhs = StackMgr.pop(); |
1190 | 0 | return runVectorGeOp<int64_t>(StackMgr.getTop(), Rhs); |
1191 | 0 | } |
1192 | 0 | case OpCode::F32x4__eq: { |
1193 | 0 | ValVariant Rhs = StackMgr.pop(); |
1194 | 0 | return runVectorEqOp<float>(StackMgr.getTop(), Rhs); |
1195 | 0 | } |
1196 | 0 | case OpCode::F32x4__ne: { |
1197 | 0 | ValVariant Rhs = StackMgr.pop(); |
1198 | 0 | return runVectorNeOp<float>(StackMgr.getTop(), Rhs); |
1199 | 0 | } |
1200 | 0 | case OpCode::F32x4__lt: { |
1201 | 0 | ValVariant Rhs = StackMgr.pop(); |
1202 | 0 | return runVectorLtOp<float>(StackMgr.getTop(), Rhs); |
1203 | 0 | } |
1204 | 0 | case OpCode::F32x4__gt: { |
1205 | 0 | ValVariant Rhs = StackMgr.pop(); |
1206 | 0 | return runVectorGtOp<float>(StackMgr.getTop(), Rhs); |
1207 | 0 | } |
1208 | 0 | case OpCode::F32x4__le: { |
1209 | 0 | ValVariant Rhs = StackMgr.pop(); |
1210 | 0 | return runVectorLeOp<float>(StackMgr.getTop(), Rhs); |
1211 | 0 | } |
1212 | 0 | case OpCode::F32x4__ge: { |
1213 | 0 | ValVariant Rhs = StackMgr.pop(); |
1214 | 0 | return runVectorGeOp<float>(StackMgr.getTop(), Rhs); |
1215 | 0 | } |
1216 | 0 | case OpCode::F64x2__eq: { |
1217 | 0 | ValVariant Rhs = StackMgr.pop(); |
1218 | 0 | return runVectorEqOp<double>(StackMgr.getTop(), Rhs); |
1219 | 0 | } |
1220 | 0 | case OpCode::F64x2__ne: { |
1221 | 0 | ValVariant Rhs = StackMgr.pop(); |
1222 | 0 | return runVectorNeOp<double>(StackMgr.getTop(), Rhs); |
1223 | 0 | } |
1224 | 0 | case OpCode::F64x2__lt: { |
1225 | 0 | ValVariant Rhs = StackMgr.pop(); |
1226 | 0 | return runVectorLtOp<double>(StackMgr.getTop(), Rhs); |
1227 | 0 | } |
1228 | 0 | case OpCode::F64x2__gt: { |
1229 | 0 | ValVariant Rhs = StackMgr.pop(); |
1230 | 0 | return runVectorGtOp<double>(StackMgr.getTop(), Rhs); |
1231 | 0 | } |
1232 | 0 | case OpCode::F64x2__le: { |
1233 | 0 | ValVariant Rhs = StackMgr.pop(); |
1234 | 0 | return runVectorLeOp<double>(StackMgr.getTop(), Rhs); |
1235 | 0 | } |
1236 | 0 | case OpCode::F64x2__ge: { |
1237 | 0 | ValVariant Rhs = StackMgr.pop(); |
1238 | 0 | return runVectorGeOp<double>(StackMgr.getTop(), Rhs); |
1239 | 0 | } |
1240 | 0 | case OpCode::V128__not: { |
1241 | 0 | auto &Val = StackMgr.getTop().get<uint128_t>(); |
1242 | 0 | Val = ~Val; |
1243 | 0 | return {}; |
1244 | 0 | } |
1245 | 0 | case OpCode::V128__and: { |
1246 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1247 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1248 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1249 | | auto &Result = Val1.get<uint64x2_t>(); |
1250 | | auto &Vector = Val2.get<uint64x2_t>(); |
1251 | | Result[0] &= Vector[0]; |
1252 | | Result[1] &= Vector[1]; |
1253 | | #else |
1254 | 0 | Val1.get<uint64x2_t>() &= Val2.get<uint64x2_t>(); |
1255 | 0 | #endif // MSVC |
1256 | 0 | return {}; |
1257 | 0 | } |
1258 | 0 | case OpCode::V128__andnot: { |
1259 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1260 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1261 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1262 | | auto &Result = Val1.get<uint64x2_t>(); |
1263 | | auto &Vector = Val2.get<uint64x2_t>(); |
1264 | | Result[0] &= ~Vector[0]; |
1265 | | Result[1] &= ~Vector[1]; |
1266 | | #else |
1267 | 0 | Val1.get<uint64x2_t>() &= ~Val2.get<uint64x2_t>(); |
1268 | 0 | #endif // MSVC |
1269 | 0 | return {}; |
1270 | 0 | } |
1271 | 0 | case OpCode::V128__or: { |
1272 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1273 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1274 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1275 | | auto &Result = Val1.get<uint64x2_t>(); |
1276 | | auto &Vector = Val2.get<uint64x2_t>(); |
1277 | | Result[0] |= Vector[0]; |
1278 | | Result[1] |= Vector[1]; |
1279 | | #else |
1280 | 0 | Val1.get<uint64x2_t>() |= Val2.get<uint64x2_t>(); |
1281 | 0 | #endif // MSVC |
1282 | 0 | return {}; |
1283 | 0 | } |
1284 | 0 | case OpCode::V128__xor: { |
1285 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1286 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1287 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1288 | | auto &Result = Val1.get<uint64x2_t>(); |
1289 | | auto &Vector = Val2.get<uint64x2_t>(); |
1290 | | Result[0] ^= Vector[0]; |
1291 | | Result[1] ^= Vector[1]; |
1292 | | #else |
1293 | 0 | Val1.get<uint64x2_t>() ^= Val2.get<uint64x2_t>(); |
1294 | 0 | #endif // MSVC |
1295 | 0 | return {}; |
1296 | 0 | } |
1297 | 0 | case OpCode::V128__bitselect: { |
1298 | 0 | const uint64x2_t C = StackMgr.pop().get<uint64x2_t>(); |
1299 | 0 | const uint64x2_t Val2 = StackMgr.pop().get<uint64x2_t>(); |
1300 | 0 | uint64x2_t &Val1 = StackMgr.getTop().get<uint64x2_t>(); |
1301 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1302 | | Val1[0] = (Val1[0] & C[0]) | (Val2[0] & ~C[0]); |
1303 | | Val1[1] = (Val1[1] & C[1]) | (Val2[1] & ~C[1]); |
1304 | | #else |
1305 | 0 | Val1 = (Val1 & C) | (Val2 & ~C); |
1306 | 0 | #endif // MSVC |
1307 | 0 | return {}; |
1308 | 0 | } |
1309 | 0 | case OpCode::V128__any_true: |
1310 | 0 | return runVectorAnyTrueOp(StackMgr.getTop()); |
1311 | 0 | case OpCode::I8x16__abs: |
1312 | 0 | return runVectorAbsOp<int8_t>(StackMgr.getTop()); |
1313 | 0 | case OpCode::I8x16__neg: |
1314 | 0 | return runVectorNegOp<int8_t>(StackMgr.getTop()); |
1315 | 0 | case OpCode::I8x16__popcnt: |
1316 | 0 | return runVectorPopcntOp(StackMgr.getTop()); |
1317 | 0 | case OpCode::I8x16__all_true: |
1318 | 0 | return runVectorAllTrueOp<uint8_t>(StackMgr.getTop()); |
1319 | 0 | case OpCode::I8x16__bitmask: |
1320 | 0 | return runVectorBitMaskOp<uint8_t>(StackMgr.getTop()); |
1321 | 0 | case OpCode::I8x16__narrow_i16x8_s: { |
1322 | 0 | ValVariant Rhs = StackMgr.pop(); |
1323 | 0 | return runVectorNarrowOp<int16_t, int8_t>(StackMgr.getTop(), Rhs); |
1324 | 0 | } |
1325 | 0 | case OpCode::I8x16__narrow_i16x8_u: { |
1326 | 0 | ValVariant Rhs = StackMgr.pop(); |
1327 | 0 | return runVectorNarrowOp<int16_t, uint8_t>(StackMgr.getTop(), Rhs); |
1328 | 0 | } |
1329 | 0 | case OpCode::I8x16__shl: { |
1330 | 0 | ValVariant Rhs = StackMgr.pop(); |
1331 | 0 | return runVectorShlOp<uint8_t>(StackMgr.getTop(), Rhs); |
1332 | 0 | } |
1333 | 0 | case OpCode::I8x16__shr_s: { |
1334 | 0 | ValVariant Rhs = StackMgr.pop(); |
1335 | 0 | return runVectorShrOp<int8_t>(StackMgr.getTop(), Rhs); |
1336 | 0 | } |
1337 | 0 | case OpCode::I8x16__shr_u: { |
1338 | 0 | ValVariant Rhs = StackMgr.pop(); |
1339 | 0 | return runVectorShrOp<uint8_t>(StackMgr.getTop(), Rhs); |
1340 | 0 | } |
1341 | 0 | case OpCode::I8x16__add: { |
1342 | 0 | ValVariant Rhs = StackMgr.pop(); |
1343 | 0 | return runVectorAddOp<uint8_t>(StackMgr.getTop(), Rhs); |
1344 | 0 | } |
1345 | 0 | case OpCode::I8x16__add_sat_s: { |
1346 | 0 | ValVariant Rhs = StackMgr.pop(); |
1347 | 0 | return runVectorAddSatOp<int8_t>(StackMgr.getTop(), Rhs); |
1348 | 0 | } |
1349 | 0 | case OpCode::I8x16__add_sat_u: { |
1350 | 0 | ValVariant Rhs = StackMgr.pop(); |
1351 | 0 | return runVectorAddSatOp<uint8_t>(StackMgr.getTop(), Rhs); |
1352 | 0 | } |
1353 | 0 | case OpCode::I8x16__sub: { |
1354 | 0 | ValVariant Rhs = StackMgr.pop(); |
1355 | 0 | return runVectorSubOp<uint8_t>(StackMgr.getTop(), Rhs); |
1356 | 0 | } |
1357 | 0 | case OpCode::I8x16__sub_sat_s: { |
1358 | 0 | ValVariant Rhs = StackMgr.pop(); |
1359 | 0 | return runVectorSubSatOp<int8_t>(StackMgr.getTop(), Rhs); |
1360 | 0 | } |
1361 | 0 | case OpCode::I8x16__sub_sat_u: { |
1362 | 0 | ValVariant Rhs = StackMgr.pop(); |
1363 | 0 | return runVectorSubSatOp<uint8_t>(StackMgr.getTop(), Rhs); |
1364 | 0 | } |
1365 | 0 | case OpCode::I8x16__min_s: { |
1366 | 0 | ValVariant Rhs = StackMgr.pop(); |
1367 | 0 | return runVectorMinOp<int8_t>(StackMgr.getTop(), Rhs); |
1368 | 0 | } |
1369 | 0 | case OpCode::I8x16__min_u: { |
1370 | 0 | ValVariant Rhs = StackMgr.pop(); |
1371 | 0 | return runVectorMinOp<uint8_t>(StackMgr.getTop(), Rhs); |
1372 | 0 | } |
1373 | 0 | case OpCode::I8x16__max_s: { |
1374 | 0 | ValVariant Rhs = StackMgr.pop(); |
1375 | 0 | return runVectorMaxOp<int8_t>(StackMgr.getTop(), Rhs); |
1376 | 0 | } |
1377 | 0 | case OpCode::I8x16__max_u: { |
1378 | 0 | ValVariant Rhs = StackMgr.pop(); |
1379 | 0 | return runVectorMaxOp<uint8_t>(StackMgr.getTop(), Rhs); |
1380 | 0 | } |
1381 | 0 | case OpCode::I8x16__avgr_u: { |
1382 | 0 | ValVariant Rhs = StackMgr.pop(); |
1383 | 0 | return runVectorAvgrOp<uint8_t, uint16_t>(StackMgr.getTop(), Rhs); |
1384 | 0 | } |
1385 | 0 | case OpCode::I16x8__abs: |
1386 | 0 | return runVectorAbsOp<int16_t>(StackMgr.getTop()); |
1387 | 0 | case OpCode::I16x8__neg: |
1388 | 0 | return runVectorNegOp<int16_t>(StackMgr.getTop()); |
1389 | 0 | case OpCode::I16x8__all_true: |
1390 | 0 | return runVectorAllTrueOp<uint16_t>(StackMgr.getTop()); |
1391 | 0 | case OpCode::I16x8__bitmask: |
1392 | 0 | return runVectorBitMaskOp<uint16_t>(StackMgr.getTop()); |
1393 | 0 | case OpCode::I16x8__narrow_i32x4_s: { |
1394 | 0 | ValVariant Rhs = StackMgr.pop(); |
1395 | 0 | return runVectorNarrowOp<int32_t, int16_t>(StackMgr.getTop(), Rhs); |
1396 | 0 | } |
1397 | 0 | case OpCode::I16x8__narrow_i32x4_u: { |
1398 | 0 | ValVariant Rhs = StackMgr.pop(); |
1399 | 0 | return runVectorNarrowOp<int32_t, uint16_t>(StackMgr.getTop(), Rhs); |
1400 | 0 | } |
1401 | 0 | case OpCode::I16x8__extend_low_i8x16_s: |
1402 | 0 | return runVectorExtendLowOp<int8_t, int16_t>(StackMgr.getTop()); |
1403 | 0 | case OpCode::I16x8__extend_high_i8x16_s: |
1404 | 0 | return runVectorExtendHighOp<int8_t, int16_t>(StackMgr.getTop()); |
1405 | 0 | case OpCode::I16x8__extend_low_i8x16_u: |
1406 | 0 | return runVectorExtendLowOp<uint8_t, uint16_t>(StackMgr.getTop()); |
1407 | 0 | case OpCode::I16x8__extend_high_i8x16_u: |
1408 | 0 | return runVectorExtendHighOp<uint8_t, uint16_t>(StackMgr.getTop()); |
1409 | 0 | case OpCode::I16x8__shl: { |
1410 | 0 | ValVariant Rhs = StackMgr.pop(); |
1411 | 0 | return runVectorShlOp<uint16_t>(StackMgr.getTop(), Rhs); |
1412 | 0 | } |
1413 | 0 | case OpCode::I16x8__shr_s: { |
1414 | 0 | ValVariant Rhs = StackMgr.pop(); |
1415 | 0 | return runVectorShrOp<int16_t>(StackMgr.getTop(), Rhs); |
1416 | 0 | } |
1417 | 0 | case OpCode::I16x8__shr_u: { |
1418 | 0 | ValVariant Rhs = StackMgr.pop(); |
1419 | 0 | return runVectorShrOp<uint16_t>(StackMgr.getTop(), Rhs); |
1420 | 0 | } |
1421 | 0 | case OpCode::I16x8__add: { |
1422 | 0 | ValVariant Rhs = StackMgr.pop(); |
1423 | 0 | return runVectorAddOp<uint16_t>(StackMgr.getTop(), Rhs); |
1424 | 0 | } |
1425 | 0 | case OpCode::I16x8__add_sat_s: { |
1426 | 0 | ValVariant Rhs = StackMgr.pop(); |
1427 | 0 | return runVectorAddSatOp<int16_t>(StackMgr.getTop(), Rhs); |
1428 | 0 | } |
1429 | 0 | case OpCode::I16x8__add_sat_u: { |
1430 | 0 | ValVariant Rhs = StackMgr.pop(); |
1431 | 0 | return runVectorAddSatOp<uint16_t>(StackMgr.getTop(), Rhs); |
1432 | 0 | } |
1433 | 0 | case OpCode::I16x8__sub: { |
1434 | 0 | ValVariant Rhs = StackMgr.pop(); |
1435 | 0 | return runVectorSubOp<uint16_t>(StackMgr.getTop(), Rhs); |
1436 | 0 | } |
1437 | 0 | case OpCode::I16x8__sub_sat_s: { |
1438 | 0 | ValVariant Rhs = StackMgr.pop(); |
1439 | 0 | return runVectorSubSatOp<int16_t>(StackMgr.getTop(), Rhs); |
1440 | 0 | } |
1441 | 0 | case OpCode::I16x8__sub_sat_u: { |
1442 | 0 | ValVariant Rhs = StackMgr.pop(); |
1443 | 0 | return runVectorSubSatOp<uint16_t>(StackMgr.getTop(), Rhs); |
1444 | 0 | } |
1445 | 0 | case OpCode::I16x8__mul: { |
1446 | 0 | ValVariant Rhs = StackMgr.pop(); |
1447 | 0 | return runVectorMulOp<uint16_t>(StackMgr.getTop(), Rhs); |
1448 | 0 | } |
1449 | 0 | case OpCode::I16x8__min_s: { |
1450 | 0 | ValVariant Rhs = StackMgr.pop(); |
1451 | 0 | return runVectorMinOp<int16_t>(StackMgr.getTop(), Rhs); |
1452 | 0 | } |
1453 | 0 | case OpCode::I16x8__min_u: { |
1454 | 0 | ValVariant Rhs = StackMgr.pop(); |
1455 | 0 | return runVectorMinOp<uint16_t>(StackMgr.getTop(), Rhs); |
1456 | 0 | } |
1457 | 0 | case OpCode::I16x8__max_s: { |
1458 | 0 | ValVariant Rhs = StackMgr.pop(); |
1459 | 0 | return runVectorMaxOp<int16_t>(StackMgr.getTop(), Rhs); |
1460 | 0 | } |
1461 | 0 | case OpCode::I16x8__max_u: { |
1462 | 0 | ValVariant Rhs = StackMgr.pop(); |
1463 | 0 | return runVectorMaxOp<uint16_t>(StackMgr.getTop(), Rhs); |
1464 | 0 | } |
1465 | 0 | case OpCode::I16x8__avgr_u: { |
1466 | 0 | ValVariant Rhs = StackMgr.pop(); |
1467 | 0 | return runVectorAvgrOp<uint16_t, uint32_t>(StackMgr.getTop(), Rhs); |
1468 | 0 | } |
1469 | 0 | case OpCode::I16x8__extmul_low_i8x16_s: { |
1470 | 0 | ValVariant Rhs = StackMgr.pop(); |
1471 | 0 | return runVectorExtMulLowOp<int8_t, int16_t>(StackMgr.getTop(), Rhs); |
1472 | 0 | } |
1473 | 0 | case OpCode::I16x8__extmul_high_i8x16_s: { |
1474 | 0 | ValVariant Rhs = StackMgr.pop(); |
1475 | 0 | return runVectorExtMulHighOp<int8_t, int16_t>(StackMgr.getTop(), Rhs); |
1476 | 0 | } |
1477 | 0 | case OpCode::I16x8__extmul_low_i8x16_u: { |
1478 | 0 | ValVariant Rhs = StackMgr.pop(); |
1479 | 0 | return runVectorExtMulLowOp<uint8_t, uint16_t>(StackMgr.getTop(), Rhs); |
1480 | 0 | } |
1481 | 0 | case OpCode::I16x8__extmul_high_i8x16_u: { |
1482 | 0 | ValVariant Rhs = StackMgr.pop(); |
1483 | 0 | return runVectorExtMulHighOp<uint8_t, uint16_t>(StackMgr.getTop(), Rhs); |
1484 | 0 | } |
1485 | 0 | case OpCode::I16x8__q15mulr_sat_s: { |
1486 | 0 | ValVariant Rhs = StackMgr.pop(); |
1487 | 0 | return runVectorQ15MulSatOp(StackMgr.getTop(), Rhs); |
1488 | 0 | } |
1489 | 0 | case OpCode::I16x8__extadd_pairwise_i8x16_s: |
1490 | 0 | return runVectorExtAddPairwiseOp<int8_t, int16_t>(StackMgr.getTop()); |
1491 | 0 | case OpCode::I16x8__extadd_pairwise_i8x16_u: |
1492 | 0 | return runVectorExtAddPairwiseOp<uint8_t, uint16_t>(StackMgr.getTop()); |
1493 | 0 | case OpCode::I32x4__abs: |
1494 | 0 | return runVectorAbsOp<int32_t>(StackMgr.getTop()); |
1495 | 0 | case OpCode::I32x4__neg: |
1496 | 0 | return runVectorNegOp<int32_t>(StackMgr.getTop()); |
1497 | 0 | case OpCode::I32x4__all_true: |
1498 | 0 | return runVectorAllTrueOp<uint32_t>(StackMgr.getTop()); |
1499 | 0 | case OpCode::I32x4__bitmask: |
1500 | 0 | return runVectorBitMaskOp<uint32_t>(StackMgr.getTop()); |
1501 | 0 | case OpCode::I32x4__extend_low_i16x8_s: |
1502 | 0 | return runVectorExtendLowOp<int16_t, int32_t>(StackMgr.getTop()); |
1503 | 0 | case OpCode::I32x4__extend_high_i16x8_s: |
1504 | 0 | return runVectorExtendHighOp<int16_t, int32_t>(StackMgr.getTop()); |
1505 | 0 | case OpCode::I32x4__extend_low_i16x8_u: |
1506 | 0 | return runVectorExtendLowOp<uint16_t, uint32_t>(StackMgr.getTop()); |
1507 | 0 | case OpCode::I32x4__extend_high_i16x8_u: |
1508 | 0 | return runVectorExtendHighOp<uint16_t, uint32_t>(StackMgr.getTop()); |
1509 | 0 | case OpCode::I32x4__shl: { |
1510 | 0 | ValVariant Rhs = StackMgr.pop(); |
1511 | 0 | return runVectorShlOp<uint32_t>(StackMgr.getTop(), Rhs); |
1512 | 0 | } |
1513 | 0 | case OpCode::I32x4__shr_s: { |
1514 | 0 | ValVariant Rhs = StackMgr.pop(); |
1515 | 0 | return runVectorShrOp<int32_t>(StackMgr.getTop(), Rhs); |
1516 | 0 | } |
1517 | 0 | case OpCode::I32x4__shr_u: { |
1518 | 0 | ValVariant Rhs = StackMgr.pop(); |
1519 | 0 | return runVectorShrOp<uint32_t>(StackMgr.getTop(), Rhs); |
1520 | 0 | } |
1521 | 0 | case OpCode::I32x4__add: { |
1522 | 0 | ValVariant Rhs = StackMgr.pop(); |
1523 | 0 | return runVectorAddOp<uint32_t>(StackMgr.getTop(), Rhs); |
1524 | 0 | } |
1525 | 0 | case OpCode::I32x4__sub: { |
1526 | 0 | ValVariant Rhs = StackMgr.pop(); |
1527 | 0 | return runVectorSubOp<uint32_t>(StackMgr.getTop(), Rhs); |
1528 | 0 | } |
1529 | 0 | case OpCode::I32x4__mul: { |
1530 | 0 | ValVariant Rhs = StackMgr.pop(); |
1531 | 0 | return runVectorMulOp<uint32_t>(StackMgr.getTop(), Rhs); |
1532 | 0 | } |
1533 | 0 | case OpCode::I32x4__min_s: { |
1534 | 0 | ValVariant Rhs = StackMgr.pop(); |
1535 | 0 | return runVectorMinOp<int32_t>(StackMgr.getTop(), Rhs); |
1536 | 0 | } |
1537 | 0 | case OpCode::I32x4__min_u: { |
1538 | 0 | ValVariant Rhs = StackMgr.pop(); |
1539 | 0 | return runVectorMinOp<uint32_t>(StackMgr.getTop(), Rhs); |
1540 | 0 | } |
1541 | 0 | case OpCode::I32x4__max_s: { |
1542 | 0 | ValVariant Rhs = StackMgr.pop(); |
1543 | 0 | return runVectorMaxOp<int32_t>(StackMgr.getTop(), Rhs); |
1544 | 0 | } |
1545 | 0 | case OpCode::I32x4__max_u: { |
1546 | 0 | ValVariant Rhs = StackMgr.pop(); |
1547 | 0 | return runVectorMaxOp<uint32_t>(StackMgr.getTop(), Rhs); |
1548 | 0 | } |
1549 | 0 | case OpCode::I32x4__extmul_low_i16x8_s: { |
1550 | 0 | ValVariant Rhs = StackMgr.pop(); |
1551 | 0 | return runVectorExtMulLowOp<int16_t, int32_t>(StackMgr.getTop(), Rhs); |
1552 | 0 | } |
1553 | 0 | case OpCode::I32x4__extmul_high_i16x8_s: { |
1554 | 0 | ValVariant Rhs = StackMgr.pop(); |
1555 | 0 | return runVectorExtMulHighOp<int16_t, int32_t>(StackMgr.getTop(), Rhs); |
1556 | 0 | } |
1557 | 0 | case OpCode::I32x4__extmul_low_i16x8_u: { |
1558 | 0 | ValVariant Rhs = StackMgr.pop(); |
1559 | 0 | return runVectorExtMulLowOp<uint16_t, uint32_t>(StackMgr.getTop(), Rhs); |
1560 | 0 | } |
1561 | 0 | case OpCode::I32x4__extmul_high_i16x8_u: { |
1562 | 0 | ValVariant Rhs = StackMgr.pop(); |
1563 | 0 | return runVectorExtMulHighOp<uint16_t, uint32_t>(StackMgr.getTop(), Rhs); |
1564 | 0 | } |
1565 | 0 | case OpCode::I32x4__extadd_pairwise_i16x8_s: |
1566 | 0 | return runVectorExtAddPairwiseOp<int16_t, int32_t>(StackMgr.getTop()); |
1567 | 0 | case OpCode::I32x4__extadd_pairwise_i16x8_u: |
1568 | 0 | return runVectorExtAddPairwiseOp<uint16_t, uint32_t>(StackMgr.getTop()); |
1569 | 0 | case OpCode::I64x2__abs: |
1570 | 0 | return runVectorAbsOp<int64_t>(StackMgr.getTop()); |
1571 | 0 | case OpCode::I64x2__neg: |
1572 | 0 | return runVectorNegOp<int64_t>(StackMgr.getTop()); |
1573 | 0 | case OpCode::I64x2__all_true: |
1574 | 0 | return runVectorAllTrueOp<uint64_t>(StackMgr.getTop()); |
1575 | 0 | case OpCode::I64x2__bitmask: |
1576 | 0 | return runVectorBitMaskOp<uint64_t>(StackMgr.getTop()); |
1577 | 0 | case OpCode::I64x2__extend_low_i32x4_s: |
1578 | 0 | return runVectorExtendLowOp<int32_t, int64_t>(StackMgr.getTop()); |
1579 | 0 | case OpCode::I64x2__extend_high_i32x4_s: |
1580 | 0 | return runVectorExtendHighOp<int32_t, int64_t>(StackMgr.getTop()); |
1581 | 0 | case OpCode::I64x2__extend_low_i32x4_u: |
1582 | 0 | return runVectorExtendLowOp<uint32_t, uint64_t>(StackMgr.getTop()); |
1583 | 0 | case OpCode::I64x2__extend_high_i32x4_u: |
1584 | 0 | return runVectorExtendHighOp<uint32_t, uint64_t>(StackMgr.getTop()); |
1585 | 0 | case OpCode::I64x2__shl: { |
1586 | 0 | ValVariant Rhs = StackMgr.pop(); |
1587 | 0 | return runVectorShlOp<uint64_t>(StackMgr.getTop(), Rhs); |
1588 | 0 | } |
1589 | 0 | case OpCode::I64x2__shr_s: { |
1590 | 0 | ValVariant Rhs = StackMgr.pop(); |
1591 | 0 | return runVectorShrOp<int64_t>(StackMgr.getTop(), Rhs); |
1592 | 0 | } |
1593 | 0 | case OpCode::I64x2__shr_u: { |
1594 | 0 | ValVariant Rhs = StackMgr.pop(); |
1595 | 0 | return runVectorShrOp<uint64_t>(StackMgr.getTop(), Rhs); |
1596 | 0 | } |
1597 | 0 | case OpCode::I64x2__add: { |
1598 | 0 | ValVariant Rhs = StackMgr.pop(); |
1599 | 0 | return runVectorAddOp<uint64_t>(StackMgr.getTop(), Rhs); |
1600 | 0 | } |
1601 | 0 | case OpCode::I64x2__sub: { |
1602 | 0 | ValVariant Rhs = StackMgr.pop(); |
1603 | 0 | return runVectorSubOp<uint64_t>(StackMgr.getTop(), Rhs); |
1604 | 0 | } |
1605 | 0 | case OpCode::I64x2__mul: { |
1606 | 0 | ValVariant Rhs = StackMgr.pop(); |
1607 | 0 | return runVectorMulOp<uint64_t>(StackMgr.getTop(), Rhs); |
1608 | 0 | } |
1609 | 0 | case OpCode::I64x2__extmul_low_i32x4_s: { |
1610 | 0 | ValVariant Rhs = StackMgr.pop(); |
1611 | 0 | return runVectorExtMulLowOp<int32_t, int64_t>(StackMgr.getTop(), Rhs); |
1612 | 0 | } |
1613 | 0 | case OpCode::I64x2__extmul_high_i32x4_s: { |
1614 | 0 | ValVariant Rhs = StackMgr.pop(); |
1615 | 0 | return runVectorExtMulHighOp<int32_t, int64_t>(StackMgr.getTop(), Rhs); |
1616 | 0 | } |
1617 | 0 | case OpCode::I64x2__extmul_low_i32x4_u: { |
1618 | 0 | ValVariant Rhs = StackMgr.pop(); |
1619 | 0 | return runVectorExtMulLowOp<uint32_t, uint64_t>(StackMgr.getTop(), Rhs); |
1620 | 0 | } |
1621 | 0 | case OpCode::I64x2__extmul_high_i32x4_u: { |
1622 | 0 | ValVariant Rhs = StackMgr.pop(); |
1623 | 0 | return runVectorExtMulHighOp<uint32_t, uint64_t>(StackMgr.getTop(), Rhs); |
1624 | 0 | } |
1625 | 0 | case OpCode::F32x4__abs: |
1626 | 0 | return runVectorAbsOp<float>(StackMgr.getTop()); |
1627 | 0 | case OpCode::F32x4__neg: |
1628 | 0 | return runVectorNegOp<float>(StackMgr.getTop()); |
1629 | 0 | case OpCode::F32x4__sqrt: |
1630 | 0 | return runVectorSqrtOp<float>(StackMgr.getTop()); |
1631 | 0 | case OpCode::F32x4__add: { |
1632 | 0 | ValVariant Rhs = StackMgr.pop(); |
1633 | 0 | return runVectorAddOp<float>(StackMgr.getTop(), Rhs); |
1634 | 0 | } |
1635 | 0 | case OpCode::F32x4__sub: { |
1636 | 0 | ValVariant Rhs = StackMgr.pop(); |
1637 | 0 | return runVectorSubOp<float>(StackMgr.getTop(), Rhs); |
1638 | 0 | } |
1639 | 0 | case OpCode::F32x4__mul: { |
1640 | 0 | ValVariant Rhs = StackMgr.pop(); |
1641 | 0 | return runVectorMulOp<float>(StackMgr.getTop(), Rhs); |
1642 | 0 | } |
1643 | 0 | case OpCode::F32x4__div: { |
1644 | 0 | ValVariant Rhs = StackMgr.pop(); |
1645 | 0 | return runVectorDivOp<float>(StackMgr.getTop(), Rhs); |
1646 | 0 | } |
1647 | 0 | case OpCode::F32x4__min: { |
1648 | 0 | ValVariant Rhs = StackMgr.pop(); |
1649 | 0 | return runVectorFMinOp<float>(StackMgr.getTop(), Rhs); |
1650 | 0 | } |
1651 | 0 | case OpCode::F32x4__max: { |
1652 | 0 | ValVariant Rhs = StackMgr.pop(); |
1653 | 0 | return runVectorFMaxOp<float>(StackMgr.getTop(), Rhs); |
1654 | 0 | } |
1655 | 0 | case OpCode::F32x4__pmin: { |
1656 | 0 | ValVariant Rhs = StackMgr.pop(); |
1657 | 0 | return runVectorMinOp<float>(StackMgr.getTop(), Rhs); |
1658 | 0 | } |
1659 | 0 | case OpCode::F32x4__pmax: { |
1660 | 0 | ValVariant Rhs = StackMgr.pop(); |
1661 | 0 | return runVectorMaxOp<float>(StackMgr.getTop(), Rhs); |
1662 | 0 | } |
1663 | 0 | case OpCode::F64x2__abs: |
1664 | 0 | return runVectorAbsOp<double>(StackMgr.getTop()); |
1665 | 0 | case OpCode::F64x2__neg: |
1666 | 0 | return runVectorNegOp<double>(StackMgr.getTop()); |
1667 | 0 | case OpCode::F64x2__sqrt: |
1668 | 0 | return runVectorSqrtOp<double>(StackMgr.getTop()); |
1669 | 0 | case OpCode::F64x2__add: { |
1670 | 0 | ValVariant Rhs = StackMgr.pop(); |
1671 | 0 | return runVectorAddOp<double>(StackMgr.getTop(), Rhs); |
1672 | 0 | } |
1673 | 0 | case OpCode::F64x2__sub: { |
1674 | 0 | ValVariant Rhs = StackMgr.pop(); |
1675 | 0 | return runVectorSubOp<double>(StackMgr.getTop(), Rhs); |
1676 | 0 | } |
1677 | 0 | case OpCode::F64x2__mul: { |
1678 | 0 | ValVariant Rhs = StackMgr.pop(); |
1679 | 0 | return runVectorMulOp<double>(StackMgr.getTop(), Rhs); |
1680 | 0 | } |
1681 | 0 | case OpCode::F64x2__div: { |
1682 | 0 | ValVariant Rhs = StackMgr.pop(); |
1683 | 0 | return runVectorDivOp<double>(StackMgr.getTop(), Rhs); |
1684 | 0 | } |
1685 | 0 | case OpCode::F64x2__min: { |
1686 | 0 | ValVariant Rhs = StackMgr.pop(); |
1687 | 0 | return runVectorFMinOp<double>(StackMgr.getTop(), Rhs); |
1688 | 0 | } |
1689 | 0 | case OpCode::F64x2__max: { |
1690 | 0 | ValVariant Rhs = StackMgr.pop(); |
1691 | 0 | return runVectorFMaxOp<double>(StackMgr.getTop(), Rhs); |
1692 | 0 | } |
1693 | 0 | case OpCode::F64x2__pmin: { |
1694 | 0 | ValVariant Rhs = StackMgr.pop(); |
1695 | 0 | return runVectorMinOp<double>(StackMgr.getTop(), Rhs); |
1696 | 0 | } |
1697 | 0 | case OpCode::F64x2__pmax: { |
1698 | 0 | ValVariant Rhs = StackMgr.pop(); |
1699 | 0 | return runVectorMaxOp<double>(StackMgr.getTop(), Rhs); |
1700 | 0 | } |
1701 | 0 | case OpCode::I32x4__trunc_sat_f32x4_s: |
1702 | 0 | return runVectorTruncSatOp<float, int32_t>(StackMgr.getTop()); |
1703 | 0 | case OpCode::I32x4__trunc_sat_f32x4_u: |
1704 | 0 | return runVectorTruncSatOp<float, uint32_t>(StackMgr.getTop()); |
1705 | 0 | case OpCode::F32x4__convert_i32x4_s: |
1706 | 0 | return runVectorConvertOp<int32_t, float>(StackMgr.getTop()); |
1707 | 0 | case OpCode::F32x4__convert_i32x4_u: |
1708 | 0 | return runVectorConvertOp<uint32_t, float>(StackMgr.getTop()); |
1709 | 0 | case OpCode::I32x4__trunc_sat_f64x2_s_zero: |
1710 | 0 | return runVectorTruncSatOp<double, int32_t>(StackMgr.getTop()); |
1711 | 0 | case OpCode::I32x4__trunc_sat_f64x2_u_zero: |
1712 | 0 | return runVectorTruncSatOp<double, uint32_t>(StackMgr.getTop()); |
1713 | 0 | case OpCode::F64x2__convert_low_i32x4_s: |
1714 | 0 | return runVectorConvertOp<int32_t, double>(StackMgr.getTop()); |
1715 | 0 | case OpCode::F64x2__convert_low_i32x4_u: |
1716 | 0 | return runVectorConvertOp<uint32_t, double>(StackMgr.getTop()); |
1717 | 0 | case OpCode::F32x4__demote_f64x2_zero: |
1718 | 0 | return runVectorDemoteOp(StackMgr.getTop()); |
1719 | 0 | case OpCode::F64x2__promote_low_f32x4: |
1720 | 0 | return runVectorPromoteOp(StackMgr.getTop()); |
1721 | | |
1722 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1723 | | case OpCode::I32x4__dot_i16x8_s: { |
1724 | | using int32x8_t = SIMDArray<int32_t, 32>; |
1725 | | const ValVariant Val2 = StackMgr.pop(); |
1726 | | ValVariant &Val1 = StackMgr.getTop(); |
1727 | | |
1728 | | auto &V2 = Val2.get<int16x8_t>(); |
1729 | | auto &V1 = Val1.get<int16x8_t>(); |
1730 | | int32x8_t M; |
1731 | | |
1732 | | for (size_t I = 0; I < 8; ++I) { |
1733 | | M[I] = V1[I] * V2[I]; |
1734 | | } |
1735 | | |
1736 | | int32x4_t Result; |
1737 | | for (size_t I = 0; I < 4; ++I) { |
1738 | | Result[I] = M[I * 2] + M[I * 2 + 1]; |
1739 | | } |
1740 | | Val1.emplace<int32x4_t>(Result); |
1741 | | return {}; |
1742 | | } |
1743 | | #else |
1744 | 0 | case OpCode::I32x4__dot_i16x8_s: { |
1745 | 0 | using int32x8_t [[gnu::vector_size(32)]] = int32_t; |
1746 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1747 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1748 | |
|
1749 | 0 | auto &V2 = Val2.get<int16x8_t>(); |
1750 | 0 | auto &V1 = Val1.get<int16x8_t>(); |
1751 | 0 | const auto M = __builtin_convertvector(V1, int32x8_t) * |
1752 | 0 | __builtin_convertvector(V2, int32x8_t); |
1753 | 0 | const int32x4_t L = {M[0], M[2], M[4], M[6]}; |
1754 | 0 | const int32x4_t R = {M[1], M[3], M[5], M[7]}; |
1755 | 0 | Val1.emplace<int32x4_t>(L + R); |
1756 | |
|
1757 | 0 | return {}; |
1758 | 0 | } |
1759 | 0 | #endif // MSVC |
1760 | 0 | case OpCode::F32x4__ceil: |
1761 | 0 | return runVectorCeilOp<float>(StackMgr.getTop()); |
1762 | 0 | case OpCode::F32x4__floor: |
1763 | 0 | return runVectorFloorOp<float>(StackMgr.getTop()); |
1764 | 0 | case OpCode::F32x4__trunc: |
1765 | 0 | return runVectorTruncOp<float>(StackMgr.getTop()); |
1766 | 0 | case OpCode::F32x4__nearest: |
1767 | 0 | return runVectorNearestOp<float>(StackMgr.getTop()); |
1768 | 0 | case OpCode::F64x2__ceil: |
1769 | 0 | return runVectorCeilOp<double>(StackMgr.getTop()); |
1770 | 0 | case OpCode::F64x2__floor: |
1771 | 0 | return runVectorFloorOp<double>(StackMgr.getTop()); |
1772 | 0 | case OpCode::F64x2__trunc: |
1773 | 0 | return runVectorTruncOp<double>(StackMgr.getTop()); |
1774 | 0 | case OpCode::F64x2__nearest: |
1775 | 0 | return runVectorNearestOp<double>(StackMgr.getTop()); |
1776 | | |
1777 | | // Relaxed SIMD Instructions |
1778 | 0 | case OpCode::I8x16__relaxed_swizzle: { |
1779 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1780 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1781 | 0 | uint8x16_t Index = Val2.get<uint8x16_t>(); |
1782 | | if constexpr (Endian::native == Endian::big) { |
1783 | | #if defined(_MSC_VER) && !defined(__clang__) |
1784 | | std::for_each(Index.begin(), Index.end(), [](auto &I) { I = 15 - I; }); |
1785 | | #else |
1786 | | Index = 15 - Index; |
1787 | | #endif |
1788 | | } |
1789 | 0 | uint8x16_t &Vector = Val1.get<uint8x16_t>(); |
1790 | 0 | uint8x16_t Result{}; |
1791 | 0 | for (size_t I = 0; I < 16; ++I) { |
1792 | 0 | const uint8_t SwizzleIndex = Index[I]; |
1793 | 0 | if (SwizzleIndex < 16) { |
1794 | 0 | Result[I] = Vector[SwizzleIndex]; |
1795 | 0 | } else { |
1796 | 0 | Result[I] = 0; |
1797 | 0 | } |
1798 | 0 | } |
1799 | 0 | Vector = Result; |
1800 | 0 | return {}; |
1801 | 0 | } |
1802 | 0 | case OpCode::I32x4__relaxed_trunc_f32x4_s: |
1803 | 0 | return runVectorTruncSatOp<float, int32_t>(StackMgr.getTop()); |
1804 | 0 | case OpCode::I32x4__relaxed_trunc_f32x4_u: |
1805 | 0 | return runVectorTruncSatOp<float, uint32_t>(StackMgr.getTop()); |
1806 | 0 | case OpCode::I32x4__relaxed_trunc_f64x2_s_zero: |
1807 | 0 | return runVectorTruncSatOp<double, int32_t>(StackMgr.getTop()); |
1808 | 0 | case OpCode::I32x4__relaxed_trunc_f64x2_u_zero: |
1809 | 0 | return runVectorTruncSatOp<double, uint32_t>(StackMgr.getTop()); |
1810 | 0 | case OpCode::F32x4__relaxed_madd: { |
1811 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1812 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1813 | 0 | runVectorMulOp<float>(StackMgr.getTop(), Val2); |
1814 | 0 | return runVectorAddOp<float>(StackMgr.getTop(), Val3); |
1815 | 0 | } |
1816 | 0 | case OpCode::F32x4__relaxed_nmadd: { |
1817 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1818 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1819 | 0 | runVectorNegOp<float>(StackMgr.getTop()); |
1820 | 0 | runVectorMulOp<float>(StackMgr.getTop(), Val2); |
1821 | 0 | return runVectorAddOp<float>(StackMgr.getTop(), Val3); |
1822 | 0 | } |
1823 | 0 | case OpCode::F64x2__relaxed_madd: { |
1824 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1825 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1826 | 0 | runVectorMulOp<double>(StackMgr.getTop(), Val2); |
1827 | 0 | return runVectorAddOp<double>(StackMgr.getTop(), Val3); |
1828 | 0 | } |
1829 | 0 | case OpCode::F64x2__relaxed_nmadd: { |
1830 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1831 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1832 | 0 | runVectorMulOp<double>(StackMgr.getTop(), Val2); |
1833 | 0 | runVectorNegOp<double>(StackMgr.getTop()); |
1834 | 0 | return runVectorAddOp<double>(StackMgr.getTop(), Val3); |
1835 | 0 | } |
1836 | 0 | case OpCode::I8x16__relaxed_laneselect: { |
1837 | 0 | const ValVariant Mask = StackMgr.pop(); |
1838 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1839 | 0 | return runVectorRelaxedLaneselectOp<uint8_t>(StackMgr.getTop(), Val2, |
1840 | 0 | Mask); |
1841 | 0 | } |
1842 | 0 | case OpCode::I16x8__relaxed_laneselect: { |
1843 | 0 | const ValVariant Mask = StackMgr.pop(); |
1844 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1845 | 0 | return runVectorRelaxedLaneselectOp<uint16_t>(StackMgr.getTop(), Val2, |
1846 | 0 | Mask); |
1847 | 0 | } |
1848 | 0 | case OpCode::I32x4__relaxed_laneselect: { |
1849 | 0 | const ValVariant Mask = StackMgr.pop(); |
1850 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1851 | 0 | return runVectorRelaxedLaneselectOp<uint32_t>(StackMgr.getTop(), Val2, |
1852 | 0 | Mask); |
1853 | 0 | } |
1854 | 0 | case OpCode::I64x2__relaxed_laneselect: { |
1855 | 0 | const ValVariant Mask = StackMgr.pop(); |
1856 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1857 | 0 | return runVectorRelaxedLaneselectOp<uint64_t>(StackMgr.getTop(), Val2, |
1858 | 0 | Mask); |
1859 | 0 | } |
1860 | 0 | case OpCode::F32x4__relaxed_min: { |
1861 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1862 | 0 | return runVectorFMinOp<float>(StackMgr.getTop(), Val2); |
1863 | 0 | } |
1864 | 0 | case OpCode::F32x4__relaxed_max: { |
1865 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1866 | 0 | return runVectorFMaxOp<float>(StackMgr.getTop(), Val2); |
1867 | 0 | } |
1868 | 0 | case OpCode::F64x2__relaxed_min: { |
1869 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1870 | 0 | return runVectorFMinOp<double>(StackMgr.getTop(), Val2); |
1871 | 0 | } |
1872 | 0 | case OpCode::F64x2__relaxed_max: { |
1873 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1874 | 0 | return runVectorFMaxOp<double>(StackMgr.getTop(), Val2); |
1875 | 0 | } |
1876 | 0 | case OpCode::I16x8__relaxed_q15mulr_s: { |
1877 | 0 | ValVariant Rhs = StackMgr.pop(); |
1878 | 0 | return runVectorQ15MulSatOp(StackMgr.getTop(), Rhs); |
1879 | 0 | } |
1880 | 0 | case OpCode::I16x8__relaxed_dot_i8x16_i7x16_s: { |
1881 | 0 | ValVariant Rhs = StackMgr.pop(); |
1882 | 0 | return runVectorRelaxedIntegerDotProductOp(StackMgr.getTop(), Rhs); |
1883 | 0 | } |
1884 | 0 | case OpCode::I32x4__relaxed_dot_i8x16_i7x16_add_s: { |
1885 | 0 | ValVariant C = StackMgr.pop(); |
1886 | 0 | ValVariant Rhs = StackMgr.pop(); |
1887 | 0 | return runVectorRelaxedIntegerDotProductOpAdd(StackMgr.getTop(), Rhs, C); |
1888 | 0 | } |
1889 | | |
1890 | | // Atomic Instructions |
1891 | 0 | case OpCode::Atomic__fence: |
1892 | 0 | return runMemoryFenceOp(); |
1893 | 0 | case OpCode::Memory__atomic__notify: |
1894 | 0 | return runAtomicNotifyOp( |
1895 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1896 | 0 | case OpCode::Memory__atomic__wait32: |
1897 | 0 | return runAtomicWaitOp<int32_t>( |
1898 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1899 | 0 | case OpCode::Memory__atomic__wait64: |
1900 | 0 | return runAtomicWaitOp<int64_t>( |
1901 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1902 | 0 | case OpCode::I32__atomic__load: |
1903 | 0 | return runAtomicLoadOp<int32_t, uint32_t>( |
1904 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1905 | 0 | case OpCode::I64__atomic__load: |
1906 | 0 | return runAtomicLoadOp<int64_t, uint64_t>( |
1907 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1908 | 0 | case OpCode::I32__atomic__load8_u: |
1909 | 0 | return runAtomicLoadOp<uint32_t, uint8_t>( |
1910 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1911 | 0 | case OpCode::I32__atomic__load16_u: |
1912 | 0 | return runAtomicLoadOp<uint32_t, uint16_t>( |
1913 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1914 | 0 | case OpCode::I64__atomic__load8_u: |
1915 | 0 | return runAtomicLoadOp<uint64_t, uint8_t>( |
1916 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1917 | 0 | case OpCode::I64__atomic__load16_u: |
1918 | 0 | return runAtomicLoadOp<uint64_t, uint16_t>( |
1919 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1920 | 0 | case OpCode::I64__atomic__load32_u: |
1921 | 0 | return runAtomicLoadOp<uint64_t, uint32_t>( |
1922 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1923 | 0 | case OpCode::I32__atomic__store: |
1924 | 0 | return runAtomicStoreOp<int32_t, uint32_t>( |
1925 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1926 | 0 | case OpCode::I64__atomic__store: |
1927 | 0 | return runAtomicStoreOp<int64_t, uint64_t>( |
1928 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1929 | 0 | case OpCode::I32__atomic__store8: |
1930 | 0 | return runAtomicStoreOp<uint32_t, uint8_t>( |
1931 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1932 | 0 | case OpCode::I32__atomic__store16: |
1933 | 0 | return runAtomicStoreOp<uint32_t, uint16_t>( |
1934 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1935 | 0 | case OpCode::I64__atomic__store8: |
1936 | 0 | return runAtomicStoreOp<uint64_t, uint8_t>( |
1937 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1938 | 0 | case OpCode::I64__atomic__store16: |
1939 | 0 | return runAtomicStoreOp<uint64_t, uint16_t>( |
1940 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1941 | 0 | case OpCode::I64__atomic__store32: |
1942 | 0 | return runAtomicStoreOp<uint64_t, uint32_t>( |
1943 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1944 | 0 | case OpCode::I32__atomic__rmw__add: |
1945 | 0 | return runAtomicAddOp<int32_t, uint32_t>( |
1946 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1947 | 0 | case OpCode::I64__atomic__rmw__add: |
1948 | 0 | return runAtomicAddOp<int64_t, uint64_t>( |
1949 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1950 | 0 | case OpCode::I32__atomic__rmw8__add_u: |
1951 | 0 | return runAtomicAddOp<uint32_t, uint8_t>( |
1952 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1953 | 0 | case OpCode::I32__atomic__rmw16__add_u: |
1954 | 0 | return runAtomicAddOp<uint32_t, uint16_t>( |
1955 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1956 | 0 | case OpCode::I64__atomic__rmw8__add_u: |
1957 | 0 | return runAtomicAddOp<uint64_t, uint8_t>( |
1958 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1959 | 0 | case OpCode::I64__atomic__rmw16__add_u: |
1960 | 0 | return runAtomicAddOp<uint64_t, uint16_t>( |
1961 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1962 | 0 | case OpCode::I64__atomic__rmw32__add_u: |
1963 | 0 | return runAtomicAddOp<uint64_t, uint32_t>( |
1964 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1965 | 0 | case OpCode::I32__atomic__rmw__sub: |
1966 | 0 | return runAtomicSubOp<int32_t, uint32_t>( |
1967 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1968 | 0 | case OpCode::I64__atomic__rmw__sub: |
1969 | 0 | return runAtomicSubOp<int64_t, uint64_t>( |
1970 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1971 | 0 | case OpCode::I32__atomic__rmw8__sub_u: |
1972 | 0 | return runAtomicSubOp<uint32_t, uint8_t>( |
1973 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1974 | 0 | case OpCode::I32__atomic__rmw16__sub_u: |
1975 | 0 | return runAtomicSubOp<uint32_t, uint16_t>( |
1976 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1977 | 0 | case OpCode::I64__atomic__rmw8__sub_u: |
1978 | 0 | return runAtomicSubOp<uint64_t, uint8_t>( |
1979 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1980 | 0 | case OpCode::I64__atomic__rmw16__sub_u: |
1981 | 0 | return runAtomicSubOp<uint64_t, uint16_t>( |
1982 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1983 | 0 | case OpCode::I64__atomic__rmw32__sub_u: |
1984 | 0 | return runAtomicSubOp<uint64_t, uint32_t>( |
1985 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1986 | 0 | case OpCode::I32__atomic__rmw__and: |
1987 | 0 | return runAtomicAndOp<int32_t, uint32_t>( |
1988 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1989 | 0 | case OpCode::I64__atomic__rmw__and: |
1990 | 0 | return runAtomicAndOp<int64_t, uint64_t>( |
1991 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1992 | 0 | case OpCode::I32__atomic__rmw8__and_u: |
1993 | 0 | return runAtomicAndOp<uint32_t, uint8_t>( |
1994 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1995 | 0 | case OpCode::I32__atomic__rmw16__and_u: |
1996 | 0 | return runAtomicAndOp<uint32_t, uint16_t>( |
1997 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1998 | 0 | case OpCode::I64__atomic__rmw8__and_u: |
1999 | 0 | return runAtomicAndOp<uint64_t, uint8_t>( |
2000 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2001 | 0 | case OpCode::I64__atomic__rmw16__and_u: |
2002 | 0 | return runAtomicAndOp<uint64_t, uint16_t>( |
2003 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2004 | 0 | case OpCode::I64__atomic__rmw32__and_u: |
2005 | 0 | return runAtomicAndOp<uint64_t, uint32_t>( |
2006 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2007 | 0 | case OpCode::I32__atomic__rmw__or: |
2008 | 0 | return runAtomicOrOp<int32_t, uint32_t>( |
2009 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2010 | 0 | case OpCode::I64__atomic__rmw__or: |
2011 | 0 | return runAtomicOrOp<int64_t, uint64_t>( |
2012 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2013 | 0 | case OpCode::I32__atomic__rmw8__or_u: |
2014 | 0 | return runAtomicOrOp<uint32_t, uint8_t>( |
2015 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2016 | 0 | case OpCode::I32__atomic__rmw16__or_u: |
2017 | 0 | return runAtomicOrOp<uint32_t, uint16_t>( |
2018 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2019 | 0 | case OpCode::I64__atomic__rmw8__or_u: |
2020 | 0 | return runAtomicOrOp<uint64_t, uint8_t>( |
2021 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2022 | 0 | case OpCode::I64__atomic__rmw16__or_u: |
2023 | 0 | return runAtomicOrOp<uint64_t, uint16_t>( |
2024 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2025 | 0 | case OpCode::I64__atomic__rmw32__or_u: |
2026 | 0 | return runAtomicOrOp<uint64_t, uint32_t>( |
2027 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2028 | 0 | case OpCode::I32__atomic__rmw__xor: |
2029 | 0 | return runAtomicXorOp<int32_t, uint32_t>( |
2030 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2031 | 0 | case OpCode::I64__atomic__rmw__xor: |
2032 | 0 | return runAtomicXorOp<int64_t, uint64_t>( |
2033 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2034 | 0 | case OpCode::I32__atomic__rmw8__xor_u: |
2035 | 0 | return runAtomicXorOp<uint32_t, uint8_t>( |
2036 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2037 | 0 | case OpCode::I32__atomic__rmw16__xor_u: |
2038 | 0 | return runAtomicXorOp<uint32_t, uint16_t>( |
2039 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2040 | 0 | case OpCode::I64__atomic__rmw8__xor_u: |
2041 | 0 | return runAtomicXorOp<uint64_t, uint8_t>( |
2042 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2043 | 0 | case OpCode::I64__atomic__rmw16__xor_u: |
2044 | 0 | return runAtomicXorOp<uint64_t, uint16_t>( |
2045 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2046 | 0 | case OpCode::I64__atomic__rmw32__xor_u: |
2047 | 0 | return runAtomicXorOp<uint64_t, uint32_t>( |
2048 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2049 | 0 | case OpCode::I32__atomic__rmw__xchg: |
2050 | 0 | return runAtomicExchangeOp<int32_t, uint32_t>( |
2051 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2052 | 0 | case OpCode::I64__atomic__rmw__xchg: |
2053 | 0 | return runAtomicExchangeOp<int64_t, uint64_t>( |
2054 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2055 | 0 | case OpCode::I32__atomic__rmw8__xchg_u: |
2056 | 0 | return runAtomicExchangeOp<uint32_t, uint8_t>( |
2057 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2058 | 0 | case OpCode::I32__atomic__rmw16__xchg_u: |
2059 | 0 | return runAtomicExchangeOp<uint32_t, uint16_t>( |
2060 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2061 | 0 | case OpCode::I64__atomic__rmw8__xchg_u: |
2062 | 0 | return runAtomicExchangeOp<uint64_t, uint8_t>( |
2063 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2064 | 0 | case OpCode::I64__atomic__rmw16__xchg_u: |
2065 | 0 | return runAtomicExchangeOp<uint64_t, uint16_t>( |
2066 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2067 | 0 | case OpCode::I64__atomic__rmw32__xchg_u: |
2068 | 0 | return runAtomicExchangeOp<uint64_t, uint32_t>( |
2069 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2070 | 0 | case OpCode::I32__atomic__rmw__cmpxchg: |
2071 | 0 | return runAtomicCompareExchangeOp<int32_t, uint32_t>( |
2072 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2073 | 0 | case OpCode::I64__atomic__rmw__cmpxchg: |
2074 | 0 | return runAtomicCompareExchangeOp<int64_t, uint64_t>( |
2075 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2076 | 0 | case OpCode::I32__atomic__rmw8__cmpxchg_u: |
2077 | 0 | return runAtomicCompareExchangeOp<uint32_t, uint8_t>( |
2078 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2079 | 0 | case OpCode::I32__atomic__rmw16__cmpxchg_u: |
2080 | 0 | return runAtomicCompareExchangeOp<uint32_t, uint16_t>( |
2081 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2082 | 0 | case OpCode::I64__atomic__rmw8__cmpxchg_u: |
2083 | 0 | return runAtomicCompareExchangeOp<uint64_t, uint8_t>( |
2084 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2085 | 0 | case OpCode::I64__atomic__rmw16__cmpxchg_u: |
2086 | 0 | return runAtomicCompareExchangeOp<uint64_t, uint16_t>( |
2087 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2088 | 0 | case OpCode::I64__atomic__rmw32__cmpxchg_u: |
2089 | 0 | return runAtomicCompareExchangeOp<uint64_t, uint32_t>( |
2090 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2091 | | |
2092 | 0 | default: |
2093 | 0 | return {}; |
2094 | 0 | } |
2095 | 0 | }; |
2096 | |
|
2097 | 0 | while (PC != PCEnd) { |
2098 | 0 | if (Stat) { |
2099 | 0 | OpCode Code = PC->getOpCode(); |
2100 | 0 | if (Conf.getStatisticsConfigure().isInstructionCounting()) { |
2101 | 0 | Stat->incInstrCount(); |
2102 | 0 | } |
2103 | | // Add cost. Note: if-else case should be processed additionally. |
2104 | 0 | if (Conf.getStatisticsConfigure().isCostMeasuring()) { |
2105 | 0 | if (unlikely(!Stat->addInstrCost(Code))) { |
2106 | 0 | const AST::Instruction &Instr = *PC; |
2107 | 0 | spdlog::error( |
2108 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
2109 | 0 | return Unexpect(ErrCode::Value::CostLimitExceeded); |
2110 | 0 | } |
2111 | 0 | } |
2112 | 0 | } |
2113 | 0 | EXPECTED_TRY(Dispatch().map_error([this, &StackMgr](auto E) { |
2114 | 0 | StackTraceSize = interpreterStackTrace(StackMgr, StackTrace).size(); |
2115 | 0 | if (Conf.getRuntimeConfigure().isEnableCoredump() && |
2116 | 0 | E.getErrCodePhase() == WasmPhase::Execution) { |
2117 | 0 | Coredump::generateCoredump( |
2118 | 0 | StackMgr, Conf.getRuntimeConfigure().isCoredumpWasmgdb()); |
2119 | 0 | } |
2120 | 0 | return E; |
2121 | 0 | })); |
2122 | 0 | PC++; |
2123 | 0 | } |
2124 | 0 | return {}; |
2125 | 0 | } |
2126 | | |
2127 | | } // namespace Executor |
2128 | | } // namespace WasmEdge |