/src/WasmEdge/lib/executor/engine/engine.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: Apache-2.0 |
2 | | // SPDX-FileCopyrightText: 2019-2024 Second State INC |
3 | | |
4 | | #include "executor/coredump.h" |
5 | | #include "executor/executor.h" |
6 | | #include "system/stacktrace.h" |
7 | | |
8 | | #include <array> |
9 | | #include <cstdint> |
10 | | #include <cstring> |
11 | | |
12 | | using namespace std::literals; |
13 | | |
14 | | namespace WasmEdge { |
15 | | namespace Executor { |
16 | | |
17 | | Expect<void> Executor::runExpression(Runtime::StackManager &StackMgr, |
18 | 0 | AST::InstrView Instrs) { |
19 | 0 | return execute(StackMgr, Instrs.begin(), Instrs.end()); |
20 | 0 | } |
21 | | |
22 | | Expect<void> |
23 | | Executor::runFunction(Runtime::StackManager &StackMgr, |
24 | | const Runtime::Instance::FunctionInstance &Func, |
25 | 0 | Span<const ValVariant> Params) { |
26 | | // Set start time. |
27 | 0 | if (Stat && Conf.getStatisticsConfigure().isTimeMeasuring()) { |
28 | 0 | Stat->startRecordWasm(); |
29 | 0 | } |
30 | | |
31 | | // Reset and push a dummy frame into stack. |
32 | 0 | StackMgr.pushFrame(nullptr, AST::InstrView::iterator(), 0, 0); |
33 | | |
34 | | // Push arguments. |
35 | 0 | const auto &PTypes = Func.getFuncType().getParamTypes(); |
36 | 0 | for (uint32_t I = 0; I < Params.size(); I++) { |
37 | | // For the references, transform to non-null reference type if the value not |
38 | | // null. |
39 | 0 | if (PTypes[I].isRefType() && Params[I].get<RefVariant>().getPtr<void>() && |
40 | 0 | Params[I].get<RefVariant>().getType().isNullableRefType()) { |
41 | 0 | auto Val = Params[I]; |
42 | 0 | Val.get<RefVariant>().getType().toNonNullableRef(); |
43 | 0 | StackMgr.push(Val); |
44 | 0 | } else { |
45 | 0 | StackMgr.push(Params[I]); |
46 | 0 | } |
47 | 0 | } |
48 | | |
49 | | // Enter and execute function. |
50 | 0 | Expect<void> Res = |
51 | 0 | enterFunction(StackMgr, Func, Func.getInstrs().end()) |
52 | 0 | .and_then([&](AST::InstrView::iterator StartIt) { |
53 | | // If not terminated, execute the instructions in interpreter mode. |
54 | | // For the entering AOT or host functions, the `StartIt` is equal to |
55 | | // the end of instruction list, therefore the execution will return |
56 | | // immediately. |
57 | 0 | return execute(StackMgr, StartIt, Func.getInstrs().end()); |
58 | 0 | }); |
59 | |
|
60 | 0 | if (Res) { |
61 | 0 | spdlog::debug(" Execution succeeded."sv); |
62 | 0 | } else if (likely(Res.error() == ErrCode::Value::Terminated)) { |
63 | 0 | spdlog::debug(" Terminated."sv); |
64 | 0 | } |
65 | |
|
66 | 0 | if (Stat && Conf.getStatisticsConfigure().isTimeMeasuring()) { |
67 | 0 | Stat->stopRecordWasm(); |
68 | 0 | } |
69 | | |
70 | | // If Statistics is enabled, then dump it here. |
71 | 0 | if (Stat) { |
72 | 0 | Stat->dumpToLog(Conf); |
73 | 0 | } |
74 | |
|
75 | 0 | if (!Res && likely(Res.error() == ErrCode::Value::Terminated)) { |
76 | 0 | StackMgr.reset(); |
77 | 0 | } |
78 | |
|
79 | 0 | return Res; |
80 | 0 | } |
81 | | |
82 | | Expect<void> Executor::execute(Runtime::StackManager &StackMgr, |
83 | | const AST::InstrView::iterator Start, |
84 | 0 | const AST::InstrView::iterator End) { |
85 | 0 | AST::InstrView::iterator PC = Start; |
86 | 0 | AST::InstrView::iterator PCEnd = End; |
87 | |
|
88 | 0 | auto Dispatch = [this, &PC, &StackMgr]() -> Expect<void> { |
89 | 0 | const AST::Instruction &Instr = *PC; |
90 | 0 | switch (Instr.getOpCode()) { |
91 | | // Control instructions |
92 | 0 | case OpCode::Unreachable: |
93 | 0 | spdlog::error(ErrCode::Value::Unreachable); |
94 | 0 | spdlog::error( |
95 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
96 | 0 | return Unexpect(ErrCode::Value::Unreachable); |
97 | 0 | case OpCode::Nop: |
98 | 0 | return {}; |
99 | 0 | case OpCode::Block: |
100 | 0 | return {}; |
101 | 0 | case OpCode::Loop: |
102 | 0 | return {}; |
103 | 0 | case OpCode::If: |
104 | 0 | return runIfElseOp(StackMgr, Instr, PC); |
105 | 0 | case OpCode::Else: |
106 | 0 | if (Stat && Conf.getStatisticsConfigure().isCostMeasuring()) { |
107 | | // Reach here means end of if-statement. |
108 | 0 | if (unlikely(!Stat->subInstrCost(Instr.getOpCode()))) { |
109 | 0 | spdlog::error(ErrCode::Value::CostLimitExceeded); |
110 | 0 | spdlog::error( |
111 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
112 | 0 | return Unexpect(ErrCode::Value::CostLimitExceeded); |
113 | 0 | } |
114 | 0 | if (unlikely(!Stat->addInstrCost(OpCode::End))) { |
115 | 0 | spdlog::error(ErrCode::Value::CostLimitExceeded); |
116 | 0 | spdlog::error( |
117 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
118 | 0 | return Unexpect(ErrCode::Value::CostLimitExceeded); |
119 | 0 | } |
120 | 0 | } |
121 | 0 | PC += PC->getJumpEnd() - 1; |
122 | 0 | return {}; |
123 | 0 | case OpCode::End: |
124 | 0 | PC = StackMgr.maybePopFrameOrHandler(PC); |
125 | 0 | return {}; |
126 | | // LEGACY-EH: remove the `Try` cases after deprecating legacy EH. |
127 | 0 | case OpCode::Try: |
128 | 0 | return runTryTableOp(StackMgr, Instr, PC); |
129 | 0 | case OpCode::Throw: |
130 | 0 | return runThrowOp(StackMgr, Instr, PC); |
131 | 0 | case OpCode::Throw_ref: |
132 | 0 | return runThrowRefOp(StackMgr, Instr, PC); |
133 | 0 | case OpCode::Br: |
134 | 0 | return runBrOp(StackMgr, Instr, PC); |
135 | 0 | case OpCode::Br_if: |
136 | 0 | return runBrIfOp(StackMgr, Instr, PC); |
137 | 0 | case OpCode::Br_table: |
138 | 0 | return runBrTableOp(StackMgr, Instr, PC); |
139 | 0 | case OpCode::Br_on_null: |
140 | 0 | return runBrOnNullOp(StackMgr, Instr, PC); |
141 | 0 | case OpCode::Br_on_non_null: |
142 | 0 | return runBrOnNonNullOp(StackMgr, Instr, PC); |
143 | 0 | case OpCode::Br_on_cast: |
144 | 0 | return runBrOnCastOp(StackMgr, Instr, PC); |
145 | 0 | case OpCode::Br_on_cast_fail: |
146 | 0 | return runBrOnCastOp(StackMgr, Instr, PC, true); |
147 | 0 | case OpCode::Return: |
148 | 0 | return runReturnOp(StackMgr, PC); |
149 | 0 | case OpCode::Call: |
150 | 0 | return runCallOp(StackMgr, Instr, PC); |
151 | 0 | case OpCode::Call_indirect: |
152 | 0 | return runCallIndirectOp(StackMgr, Instr, PC); |
153 | 0 | case OpCode::Return_call: |
154 | 0 | return runCallOp(StackMgr, Instr, PC, true); |
155 | 0 | case OpCode::Return_call_indirect: |
156 | 0 | return runCallIndirectOp(StackMgr, Instr, PC, true); |
157 | 0 | case OpCode::Call_ref: |
158 | 0 | return runCallRefOp(StackMgr, Instr, PC); |
159 | 0 | case OpCode::Return_call_ref: |
160 | 0 | return runCallRefOp(StackMgr, Instr, PC, true); |
161 | | // LEGACY-EH: remove the `Catch` cases after deprecating legacy EH. |
162 | 0 | case OpCode::Catch: |
163 | 0 | case OpCode::Catch_all: |
164 | 0 | PC -= Instr.getCatchLegacy().CatchPCOffset; |
165 | 0 | PC += PC->getTryCatch().JumpEnd; |
166 | 0 | return {}; |
167 | 0 | case OpCode::Try_table: |
168 | 0 | return runTryTableOp(StackMgr, Instr, PC); |
169 | | |
170 | | // Reference Instructions |
171 | 0 | case OpCode::Ref__null: |
172 | 0 | return runRefNullOp(StackMgr, Instr.getValType()); |
173 | 0 | case OpCode::Ref__is_null: |
174 | 0 | return runRefIsNullOp(StackMgr.getTop()); |
175 | 0 | case OpCode::Ref__func: |
176 | 0 | return runRefFuncOp(StackMgr, Instr.getTargetIndex()); |
177 | 0 | case OpCode::Ref__eq: { |
178 | 0 | ValVariant Rhs = StackMgr.pop(); |
179 | 0 | return runRefEqOp(StackMgr.getTop(), Rhs); |
180 | 0 | } |
181 | 0 | case OpCode::Ref__as_non_null: |
182 | 0 | return runRefAsNonNullOp(StackMgr.getTop().get<RefVariant>(), Instr); |
183 | | |
184 | | // Reference Instructions (GC proposal) |
185 | 0 | case OpCode::Struct__new: |
186 | 0 | return runStructNewOp(StackMgr, Instr.getTargetIndex()); |
187 | 0 | case OpCode::Struct__new_default: |
188 | 0 | return runStructNewOp(StackMgr, Instr.getTargetIndex(), true); |
189 | 0 | case OpCode::Struct__get: |
190 | 0 | case OpCode::Struct__get_u: |
191 | 0 | return runStructGetOp(StackMgr, Instr.getTargetIndex(), |
192 | 0 | Instr.getSourceIndex(), Instr); |
193 | 0 | case OpCode::Struct__get_s: |
194 | 0 | return runStructGetOp(StackMgr, Instr.getTargetIndex(), |
195 | 0 | Instr.getSourceIndex(), Instr, true); |
196 | 0 | case OpCode::Struct__set: |
197 | 0 | return runStructSetOp(StackMgr, StackMgr.pop(), Instr.getTargetIndex(), |
198 | 0 | Instr.getSourceIndex(), Instr); |
199 | 0 | case OpCode::Array__new: |
200 | 0 | return runArrayNewOp(StackMgr, Instr.getTargetIndex(), 1, |
201 | 0 | StackMgr.pop().get<uint32_t>()); |
202 | 0 | case OpCode::Array__new_default: |
203 | 0 | return runArrayNewOp(StackMgr, Instr.getTargetIndex(), 0, |
204 | 0 | StackMgr.pop().get<uint32_t>()); |
205 | 0 | case OpCode::Array__new_fixed: |
206 | 0 | return runArrayNewOp(StackMgr, Instr.getTargetIndex(), |
207 | 0 | Instr.getSourceIndex(), Instr.getSourceIndex()); |
208 | 0 | case OpCode::Array__new_data: |
209 | 0 | return runArrayNewDataOp(StackMgr, Instr.getTargetIndex(), |
210 | 0 | Instr.getSourceIndex(), Instr); |
211 | 0 | case OpCode::Array__new_elem: |
212 | 0 | return runArrayNewElemOp(StackMgr, Instr.getTargetIndex(), |
213 | 0 | Instr.getSourceIndex(), Instr); |
214 | 0 | case OpCode::Array__get: |
215 | 0 | case OpCode::Array__get_u: |
216 | 0 | return runArrayGetOp(StackMgr, Instr.getTargetIndex(), Instr); |
217 | 0 | case OpCode::Array__get_s: |
218 | 0 | return runArrayGetOp(StackMgr, Instr.getTargetIndex(), Instr, true); |
219 | 0 | case OpCode::Array__set: |
220 | 0 | return runArraySetOp(StackMgr, StackMgr.pop(), Instr.getTargetIndex(), |
221 | 0 | Instr); |
222 | 0 | case OpCode::Array__len: |
223 | 0 | return runArrayLenOp(StackMgr.getTop(), Instr); |
224 | 0 | case OpCode::Array__fill: { |
225 | 0 | const uint32_t Cnt = StackMgr.pop().get<uint32_t>(); |
226 | 0 | return runArrayFillOp(StackMgr, Cnt, StackMgr.pop(), |
227 | 0 | Instr.getTargetIndex(), Instr); |
228 | 0 | } |
229 | 0 | case OpCode::Array__copy: |
230 | 0 | return runArrayCopyOp(StackMgr, StackMgr.pop().get<uint32_t>(), |
231 | 0 | Instr.getTargetIndex(), Instr.getSourceIndex(), |
232 | 0 | Instr); |
233 | 0 | case OpCode::Array__init_data: |
234 | 0 | return runArrayInitDataOp(StackMgr, StackMgr.pop().get<uint32_t>(), |
235 | 0 | Instr.getTargetIndex(), Instr.getSourceIndex(), |
236 | 0 | Instr); |
237 | 0 | case OpCode::Array__init_elem: |
238 | 0 | return runArrayInitElemOp(StackMgr, StackMgr.pop().get<uint32_t>(), |
239 | 0 | Instr.getTargetIndex(), Instr.getSourceIndex(), |
240 | 0 | Instr); |
241 | 0 | case OpCode::Ref__test: |
242 | 0 | case OpCode::Ref__test_null: |
243 | 0 | return runRefTestOp(StackMgr.getModule(), StackMgr.getTop(), Instr); |
244 | 0 | case OpCode::Ref__cast: |
245 | 0 | case OpCode::Ref__cast_null: |
246 | 0 | return runRefTestOp(StackMgr.getModule(), StackMgr.getTop(), Instr, true); |
247 | 0 | case OpCode::Any__convert_extern: |
248 | 0 | return runRefConvOp(StackMgr.getTop().get<RefVariant>(), |
249 | 0 | TypeCode::AnyRef); |
250 | 0 | case OpCode::Extern__convert_any: |
251 | 0 | return runRefConvOp(StackMgr.getTop().get<RefVariant>(), |
252 | 0 | TypeCode::ExternRef); |
253 | 0 | case OpCode::Ref__i31: |
254 | 0 | return runRefI31Op(StackMgr.getTop()); |
255 | 0 | case OpCode::I31__get_s: |
256 | 0 | return runI31GetOp(StackMgr.getTop(), Instr, true); |
257 | 0 | case OpCode::I31__get_u: |
258 | 0 | return runI31GetOp(StackMgr.getTop(), Instr); |
259 | | |
260 | | // Parametric Instructions |
261 | 0 | case OpCode::Drop: |
262 | 0 | StackMgr.pop(); |
263 | 0 | return {}; |
264 | 0 | case OpCode::Select: |
265 | 0 | case OpCode::Select_t: { |
266 | | // Pop the i32 value and select values from stack. |
267 | 0 | ValVariant CondVal = StackMgr.pop(); |
268 | 0 | ValVariant Val2 = StackMgr.pop(); |
269 | 0 | ValVariant Val1 = StackMgr.pop(); |
270 | | |
271 | | // Select the value. |
272 | 0 | if (CondVal.get<uint32_t>() == 0) { |
273 | 0 | StackMgr.push(Val2); |
274 | 0 | } else { |
275 | 0 | StackMgr.push(Val1); |
276 | 0 | } |
277 | 0 | return {}; |
278 | 0 | } |
279 | | |
280 | | // Variable Instructions |
281 | 0 | case OpCode::Local__get: |
282 | 0 | return runLocalGetOp(StackMgr, Instr.getStackOffset()); |
283 | 0 | case OpCode::Local__set: |
284 | 0 | return runLocalSetOp(StackMgr, Instr.getStackOffset()); |
285 | 0 | case OpCode::Local__tee: |
286 | 0 | return runLocalTeeOp(StackMgr, Instr.getStackOffset()); |
287 | 0 | case OpCode::Global__get: |
288 | 0 | return runGlobalGetOp(StackMgr, Instr.getTargetIndex()); |
289 | 0 | case OpCode::Global__set: |
290 | 0 | return runGlobalSetOp(StackMgr, Instr.getTargetIndex()); |
291 | | |
292 | | // Table Instructions |
293 | 0 | case OpCode::Table__get: |
294 | 0 | return runTableGetOp( |
295 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
296 | 0 | case OpCode::Table__set: |
297 | 0 | return runTableSetOp( |
298 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
299 | 0 | case OpCode::Table__init: |
300 | 0 | return runTableInitOp( |
301 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), |
302 | 0 | *getElemInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
303 | 0 | case OpCode::Elem__drop: |
304 | 0 | return runElemDropOp(*getElemInstByIdx(StackMgr, Instr.getTargetIndex())); |
305 | 0 | case OpCode::Table__copy: |
306 | 0 | return runTableCopyOp( |
307 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), |
308 | 0 | *getTabInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
309 | 0 | case OpCode::Table__grow: |
310 | 0 | return runTableGrowOp(StackMgr, |
311 | 0 | *getTabInstByIdx(StackMgr, Instr.getTargetIndex())); |
312 | 0 | case OpCode::Table__size: |
313 | 0 | return runTableSizeOp(StackMgr, |
314 | 0 | *getTabInstByIdx(StackMgr, Instr.getTargetIndex())); |
315 | 0 | case OpCode::Table__fill: |
316 | 0 | return runTableFillOp( |
317 | 0 | StackMgr, *getTabInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
318 | | |
319 | | // Memory Instructions |
320 | 0 | case OpCode::I32__load: |
321 | 0 | return runLoadOp<uint32_t>( |
322 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
323 | 0 | case OpCode::I64__load: |
324 | 0 | return runLoadOp<uint64_t>( |
325 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
326 | 0 | case OpCode::F32__load: |
327 | 0 | return runLoadOp<float>( |
328 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
329 | 0 | case OpCode::F64__load: |
330 | 0 | return runLoadOp<double>( |
331 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
332 | 0 | case OpCode::I32__load8_s: |
333 | 0 | return runLoadOp<int32_t, 8>( |
334 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
335 | 0 | case OpCode::I32__load8_u: |
336 | 0 | return runLoadOp<uint32_t, 8>( |
337 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
338 | 0 | case OpCode::I32__load16_s: |
339 | 0 | return runLoadOp<int32_t, 16>( |
340 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
341 | 0 | case OpCode::I32__load16_u: |
342 | 0 | return runLoadOp<uint32_t, 16>( |
343 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
344 | 0 | case OpCode::I64__load8_s: |
345 | 0 | return runLoadOp<int64_t, 8>( |
346 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
347 | 0 | case OpCode::I64__load8_u: |
348 | 0 | return runLoadOp<uint64_t, 8>( |
349 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
350 | 0 | case OpCode::I64__load16_s: |
351 | 0 | return runLoadOp<int64_t, 16>( |
352 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
353 | 0 | case OpCode::I64__load16_u: |
354 | 0 | return runLoadOp<uint64_t, 16>( |
355 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
356 | 0 | case OpCode::I64__load32_s: |
357 | 0 | return runLoadOp<int64_t, 32>( |
358 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
359 | 0 | case OpCode::I64__load32_u: |
360 | 0 | return runLoadOp<uint64_t, 32>( |
361 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
362 | 0 | case OpCode::I32__store: |
363 | 0 | return runStoreOp<uint32_t>( |
364 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
365 | 0 | case OpCode::I64__store: |
366 | 0 | return runStoreOp<uint64_t>( |
367 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
368 | 0 | case OpCode::F32__store: |
369 | 0 | return runStoreOp<float>( |
370 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
371 | 0 | case OpCode::F64__store: |
372 | 0 | return runStoreOp<double>( |
373 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
374 | 0 | case OpCode::I32__store8: |
375 | 0 | return runStoreOp<uint32_t, 8>( |
376 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
377 | 0 | case OpCode::I32__store16: |
378 | 0 | return runStoreOp<uint32_t, 16>( |
379 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
380 | 0 | case OpCode::I64__store8: |
381 | 0 | return runStoreOp<uint64_t, 8>( |
382 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
383 | 0 | case OpCode::I64__store16: |
384 | 0 | return runStoreOp<uint64_t, 16>( |
385 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
386 | 0 | case OpCode::I64__store32: |
387 | 0 | return runStoreOp<uint64_t, 32>( |
388 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
389 | 0 | case OpCode::Memory__grow: |
390 | 0 | return runMemoryGrowOp( |
391 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex())); |
392 | 0 | case OpCode::Memory__size: |
393 | 0 | return runMemorySizeOp( |
394 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex())); |
395 | 0 | case OpCode::Memory__init: |
396 | 0 | return runMemoryInitOp( |
397 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), |
398 | 0 | *getDataInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
399 | 0 | case OpCode::Data__drop: |
400 | 0 | return runDataDropOp(*getDataInstByIdx(StackMgr, Instr.getTargetIndex())); |
401 | 0 | case OpCode::Memory__copy: |
402 | 0 | return runMemoryCopyOp( |
403 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), |
404 | 0 | *getMemInstByIdx(StackMgr, Instr.getSourceIndex()), Instr); |
405 | 0 | case OpCode::Memory__fill: |
406 | 0 | return runMemoryFillOp( |
407 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
408 | | |
409 | | // Const Numeric Instructions |
410 | 0 | case OpCode::I32__const: |
411 | 0 | case OpCode::I64__const: |
412 | 0 | case OpCode::F32__const: |
413 | 0 | case OpCode::F64__const: |
414 | 0 | StackMgr.push(Instr.getNum()); |
415 | 0 | return {}; |
416 | | |
417 | | // Unary Numeric Instructions |
418 | 0 | case OpCode::I32__eqz: |
419 | 0 | return runEqzOp<uint32_t>(StackMgr.getTop()); |
420 | 0 | case OpCode::I64__eqz: |
421 | 0 | return runEqzOp<uint64_t>(StackMgr.getTop()); |
422 | 0 | case OpCode::I32__clz: |
423 | 0 | return runClzOp<uint32_t>(StackMgr.getTop()); |
424 | 0 | case OpCode::I32__ctz: |
425 | 0 | return runCtzOp<uint32_t>(StackMgr.getTop()); |
426 | 0 | case OpCode::I32__popcnt: |
427 | 0 | return runPopcntOp<uint32_t>(StackMgr.getTop()); |
428 | 0 | case OpCode::I64__clz: |
429 | 0 | return runClzOp<uint64_t>(StackMgr.getTop()); |
430 | 0 | case OpCode::I64__ctz: |
431 | 0 | return runCtzOp<uint64_t>(StackMgr.getTop()); |
432 | 0 | case OpCode::I64__popcnt: |
433 | 0 | return runPopcntOp<uint64_t>(StackMgr.getTop()); |
434 | 0 | case OpCode::F32__abs: |
435 | 0 | return runAbsOp<float>(StackMgr.getTop()); |
436 | 0 | case OpCode::F32__neg: |
437 | 0 | return runNegOp<float>(StackMgr.getTop()); |
438 | 0 | case OpCode::F32__ceil: |
439 | 0 | return runCeilOp<float>(StackMgr.getTop()); |
440 | 0 | case OpCode::F32__floor: |
441 | 0 | return runFloorOp<float>(StackMgr.getTop()); |
442 | 0 | case OpCode::F32__trunc: |
443 | 0 | return runTruncOp<float>(StackMgr.getTop()); |
444 | 0 | case OpCode::F32__nearest: |
445 | 0 | return runNearestOp<float>(StackMgr.getTop()); |
446 | 0 | case OpCode::F32__sqrt: |
447 | 0 | return runSqrtOp<float>(StackMgr.getTop()); |
448 | 0 | case OpCode::F64__abs: |
449 | 0 | return runAbsOp<double>(StackMgr.getTop()); |
450 | 0 | case OpCode::F64__neg: |
451 | 0 | return runNegOp<double>(StackMgr.getTop()); |
452 | 0 | case OpCode::F64__ceil: |
453 | 0 | return runCeilOp<double>(StackMgr.getTop()); |
454 | 0 | case OpCode::F64__floor: |
455 | 0 | return runFloorOp<double>(StackMgr.getTop()); |
456 | 0 | case OpCode::F64__trunc: |
457 | 0 | return runTruncOp<double>(StackMgr.getTop()); |
458 | 0 | case OpCode::F64__nearest: |
459 | 0 | return runNearestOp<double>(StackMgr.getTop()); |
460 | 0 | case OpCode::F64__sqrt: |
461 | 0 | return runSqrtOp<double>(StackMgr.getTop()); |
462 | 0 | case OpCode::I32__wrap_i64: |
463 | 0 | return runWrapOp<uint64_t, uint32_t>(StackMgr.getTop()); |
464 | 0 | case OpCode::I32__trunc_f32_s: |
465 | 0 | return runTruncateOp<float, int32_t>(Instr, StackMgr.getTop()); |
466 | 0 | case OpCode::I32__trunc_f32_u: |
467 | 0 | return runTruncateOp<float, uint32_t>(Instr, StackMgr.getTop()); |
468 | 0 | case OpCode::I32__trunc_f64_s: |
469 | 0 | return runTruncateOp<double, int32_t>(Instr, StackMgr.getTop()); |
470 | 0 | case OpCode::I32__trunc_f64_u: |
471 | 0 | return runTruncateOp<double, uint32_t>(Instr, StackMgr.getTop()); |
472 | 0 | case OpCode::I64__extend_i32_s: |
473 | 0 | return runExtendOp<int32_t, uint64_t>(StackMgr.getTop()); |
474 | 0 | case OpCode::I64__extend_i32_u: |
475 | 0 | return runExtendOp<uint32_t, uint64_t>(StackMgr.getTop()); |
476 | 0 | case OpCode::I64__trunc_f32_s: |
477 | 0 | return runTruncateOp<float, int64_t>(Instr, StackMgr.getTop()); |
478 | 0 | case OpCode::I64__trunc_f32_u: |
479 | 0 | return runTruncateOp<float, uint64_t>(Instr, StackMgr.getTop()); |
480 | 0 | case OpCode::I64__trunc_f64_s: |
481 | 0 | return runTruncateOp<double, int64_t>(Instr, StackMgr.getTop()); |
482 | 0 | case OpCode::I64__trunc_f64_u: |
483 | 0 | return runTruncateOp<double, uint64_t>(Instr, StackMgr.getTop()); |
484 | 0 | case OpCode::F32__convert_i32_s: |
485 | 0 | return runConvertOp<int32_t, float>(StackMgr.getTop()); |
486 | 0 | case OpCode::F32__convert_i32_u: |
487 | 0 | return runConvertOp<uint32_t, float>(StackMgr.getTop()); |
488 | 0 | case OpCode::F32__convert_i64_s: |
489 | 0 | return runConvertOp<int64_t, float>(StackMgr.getTop()); |
490 | 0 | case OpCode::F32__convert_i64_u: |
491 | 0 | return runConvertOp<uint64_t, float>(StackMgr.getTop()); |
492 | 0 | case OpCode::F32__demote_f64: |
493 | 0 | return runDemoteOp<double, float>(StackMgr.getTop()); |
494 | 0 | case OpCode::F64__convert_i32_s: |
495 | 0 | return runConvertOp<int32_t, double>(StackMgr.getTop()); |
496 | 0 | case OpCode::F64__convert_i32_u: |
497 | 0 | return runConvertOp<uint32_t, double>(StackMgr.getTop()); |
498 | 0 | case OpCode::F64__convert_i64_s: |
499 | 0 | return runConvertOp<int64_t, double>(StackMgr.getTop()); |
500 | 0 | case OpCode::F64__convert_i64_u: |
501 | 0 | return runConvertOp<uint64_t, double>(StackMgr.getTop()); |
502 | 0 | case OpCode::F64__promote_f32: |
503 | 0 | return runPromoteOp<float, double>(StackMgr.getTop()); |
504 | 0 | case OpCode::I32__reinterpret_f32: |
505 | 0 | return runReinterpretOp<float, uint32_t>(StackMgr.getTop()); |
506 | 0 | case OpCode::I64__reinterpret_f64: |
507 | 0 | return runReinterpretOp<double, uint64_t>(StackMgr.getTop()); |
508 | 0 | case OpCode::F32__reinterpret_i32: |
509 | 0 | return runReinterpretOp<uint32_t, float>(StackMgr.getTop()); |
510 | 0 | case OpCode::F64__reinterpret_i64: |
511 | 0 | return runReinterpretOp<uint64_t, double>(StackMgr.getTop()); |
512 | 0 | case OpCode::I32__extend8_s: |
513 | 0 | return runExtendOp<int32_t, uint32_t, 8>(StackMgr.getTop()); |
514 | 0 | case OpCode::I32__extend16_s: |
515 | 0 | return runExtendOp<int32_t, uint32_t, 16>(StackMgr.getTop()); |
516 | 0 | case OpCode::I64__extend8_s: |
517 | 0 | return runExtendOp<int64_t, uint64_t, 8>(StackMgr.getTop()); |
518 | 0 | case OpCode::I64__extend16_s: |
519 | 0 | return runExtendOp<int64_t, uint64_t, 16>(StackMgr.getTop()); |
520 | 0 | case OpCode::I64__extend32_s: |
521 | 0 | return runExtendOp<int64_t, uint64_t, 32>(StackMgr.getTop()); |
522 | | |
523 | | // Binary Numeric Instructions |
524 | 0 | case OpCode::I32__eq: { |
525 | 0 | ValVariant Rhs = StackMgr.pop(); |
526 | 0 | return runEqOp<uint32_t>(StackMgr.getTop(), Rhs); |
527 | 0 | } |
528 | 0 | case OpCode::I32__ne: { |
529 | 0 | ValVariant Rhs = StackMgr.pop(); |
530 | 0 | return runNeOp<uint32_t>(StackMgr.getTop(), Rhs); |
531 | 0 | } |
532 | 0 | case OpCode::I32__lt_s: { |
533 | 0 | ValVariant Rhs = StackMgr.pop(); |
534 | 0 | return runLtOp<int32_t>(StackMgr.getTop(), Rhs); |
535 | 0 | } |
536 | 0 | case OpCode::I32__lt_u: { |
537 | 0 | ValVariant Rhs = StackMgr.pop(); |
538 | 0 | return runLtOp<uint32_t>(StackMgr.getTop(), Rhs); |
539 | 0 | } |
540 | 0 | case OpCode::I32__gt_s: { |
541 | 0 | ValVariant Rhs = StackMgr.pop(); |
542 | 0 | return runGtOp<int32_t>(StackMgr.getTop(), Rhs); |
543 | 0 | } |
544 | 0 | case OpCode::I32__gt_u: { |
545 | 0 | ValVariant Rhs = StackMgr.pop(); |
546 | 0 | return runGtOp<uint32_t>(StackMgr.getTop(), Rhs); |
547 | 0 | } |
548 | 0 | case OpCode::I32__le_s: { |
549 | 0 | ValVariant Rhs = StackMgr.pop(); |
550 | 0 | return runLeOp<int32_t>(StackMgr.getTop(), Rhs); |
551 | 0 | } |
552 | 0 | case OpCode::I32__le_u: { |
553 | 0 | ValVariant Rhs = StackMgr.pop(); |
554 | 0 | return runLeOp<uint32_t>(StackMgr.getTop(), Rhs); |
555 | 0 | } |
556 | 0 | case OpCode::I32__ge_s: { |
557 | 0 | ValVariant Rhs = StackMgr.pop(); |
558 | 0 | return runGeOp<int32_t>(StackMgr.getTop(), Rhs); |
559 | 0 | } |
560 | 0 | case OpCode::I32__ge_u: { |
561 | 0 | ValVariant Rhs = StackMgr.pop(); |
562 | 0 | return runGeOp<uint32_t>(StackMgr.getTop(), Rhs); |
563 | 0 | } |
564 | 0 | case OpCode::I64__eq: { |
565 | 0 | ValVariant Rhs = StackMgr.pop(); |
566 | 0 | return runEqOp<uint64_t>(StackMgr.getTop(), Rhs); |
567 | 0 | } |
568 | 0 | case OpCode::I64__ne: { |
569 | 0 | ValVariant Rhs = StackMgr.pop(); |
570 | 0 | return runNeOp<uint64_t>(StackMgr.getTop(), Rhs); |
571 | 0 | } |
572 | 0 | case OpCode::I64__lt_s: { |
573 | 0 | ValVariant Rhs = StackMgr.pop(); |
574 | 0 | return runLtOp<int64_t>(StackMgr.getTop(), Rhs); |
575 | 0 | } |
576 | 0 | case OpCode::I64__lt_u: { |
577 | 0 | ValVariant Rhs = StackMgr.pop(); |
578 | 0 | return runLtOp<uint64_t>(StackMgr.getTop(), Rhs); |
579 | 0 | } |
580 | 0 | case OpCode::I64__gt_s: { |
581 | 0 | ValVariant Rhs = StackMgr.pop(); |
582 | 0 | return runGtOp<int64_t>(StackMgr.getTop(), Rhs); |
583 | 0 | } |
584 | 0 | case OpCode::I64__gt_u: { |
585 | 0 | ValVariant Rhs = StackMgr.pop(); |
586 | 0 | return runGtOp<uint64_t>(StackMgr.getTop(), Rhs); |
587 | 0 | } |
588 | 0 | case OpCode::I64__le_s: { |
589 | 0 | ValVariant Rhs = StackMgr.pop(); |
590 | 0 | return runLeOp<int64_t>(StackMgr.getTop(), Rhs); |
591 | 0 | } |
592 | 0 | case OpCode::I64__le_u: { |
593 | 0 | ValVariant Rhs = StackMgr.pop(); |
594 | 0 | return runLeOp<uint64_t>(StackMgr.getTop(), Rhs); |
595 | 0 | } |
596 | 0 | case OpCode::I64__ge_s: { |
597 | 0 | ValVariant Rhs = StackMgr.pop(); |
598 | 0 | return runGeOp<int64_t>(StackMgr.getTop(), Rhs); |
599 | 0 | } |
600 | 0 | case OpCode::I64__ge_u: { |
601 | 0 | ValVariant Rhs = StackMgr.pop(); |
602 | 0 | return runGeOp<uint64_t>(StackMgr.getTop(), Rhs); |
603 | 0 | } |
604 | 0 | case OpCode::F32__eq: { |
605 | 0 | ValVariant Rhs = StackMgr.pop(); |
606 | 0 | return runEqOp<float>(StackMgr.getTop(), Rhs); |
607 | 0 | } |
608 | 0 | case OpCode::F32__ne: { |
609 | 0 | ValVariant Rhs = StackMgr.pop(); |
610 | 0 | return runNeOp<float>(StackMgr.getTop(), Rhs); |
611 | 0 | } |
612 | 0 | case OpCode::F32__lt: { |
613 | 0 | ValVariant Rhs = StackMgr.pop(); |
614 | 0 | return runLtOp<float>(StackMgr.getTop(), Rhs); |
615 | 0 | } |
616 | 0 | case OpCode::F32__gt: { |
617 | 0 | ValVariant Rhs = StackMgr.pop(); |
618 | 0 | return runGtOp<float>(StackMgr.getTop(), Rhs); |
619 | 0 | } |
620 | 0 | case OpCode::F32__le: { |
621 | 0 | ValVariant Rhs = StackMgr.pop(); |
622 | 0 | return runLeOp<float>(StackMgr.getTop(), Rhs); |
623 | 0 | } |
624 | 0 | case OpCode::F32__ge: { |
625 | 0 | ValVariant Rhs = StackMgr.pop(); |
626 | 0 | return runGeOp<float>(StackMgr.getTop(), Rhs); |
627 | 0 | } |
628 | 0 | case OpCode::F64__eq: { |
629 | 0 | ValVariant Rhs = StackMgr.pop(); |
630 | 0 | return runEqOp<double>(StackMgr.getTop(), Rhs); |
631 | 0 | } |
632 | 0 | case OpCode::F64__ne: { |
633 | 0 | ValVariant Rhs = StackMgr.pop(); |
634 | 0 | return runNeOp<double>(StackMgr.getTop(), Rhs); |
635 | 0 | } |
636 | 0 | case OpCode::F64__lt: { |
637 | 0 | ValVariant Rhs = StackMgr.pop(); |
638 | 0 | return runLtOp<double>(StackMgr.getTop(), Rhs); |
639 | 0 | } |
640 | 0 | case OpCode::F64__gt: { |
641 | 0 | ValVariant Rhs = StackMgr.pop(); |
642 | 0 | return runGtOp<double>(StackMgr.getTop(), Rhs); |
643 | 0 | } |
644 | 0 | case OpCode::F64__le: { |
645 | 0 | ValVariant Rhs = StackMgr.pop(); |
646 | 0 | return runLeOp<double>(StackMgr.getTop(), Rhs); |
647 | 0 | } |
648 | 0 | case OpCode::F64__ge: { |
649 | 0 | ValVariant Rhs = StackMgr.pop(); |
650 | 0 | return runGeOp<double>(StackMgr.getTop(), Rhs); |
651 | 0 | } |
652 | 0 | case OpCode::I32__add: { |
653 | 0 | ValVariant Rhs = StackMgr.pop(); |
654 | 0 | return runAddOp<uint32_t>(StackMgr.getTop(), Rhs); |
655 | 0 | } |
656 | 0 | case OpCode::I32__sub: { |
657 | 0 | ValVariant Rhs = StackMgr.pop(); |
658 | 0 | return runSubOp<uint32_t>(StackMgr.getTop(), Rhs); |
659 | 0 | } |
660 | 0 | case OpCode::I32__mul: { |
661 | 0 | ValVariant Rhs = StackMgr.pop(); |
662 | 0 | return runMulOp<uint32_t>(StackMgr.getTop(), Rhs); |
663 | 0 | } |
664 | 0 | case OpCode::I32__div_s: { |
665 | 0 | ValVariant Rhs = StackMgr.pop(); |
666 | 0 | return runDivOp<int32_t>(Instr, StackMgr.getTop(), Rhs); |
667 | 0 | } |
668 | 0 | case OpCode::I32__div_u: { |
669 | 0 | ValVariant Rhs = StackMgr.pop(); |
670 | 0 | return runDivOp<uint32_t>(Instr, StackMgr.getTop(), Rhs); |
671 | 0 | } |
672 | 0 | case OpCode::I32__rem_s: { |
673 | 0 | ValVariant Rhs = StackMgr.pop(); |
674 | 0 | return runRemOp<int32_t>(Instr, StackMgr.getTop(), Rhs); |
675 | 0 | } |
676 | 0 | case OpCode::I32__rem_u: { |
677 | 0 | ValVariant Rhs = StackMgr.pop(); |
678 | 0 | return runRemOp<uint32_t>(Instr, StackMgr.getTop(), Rhs); |
679 | 0 | } |
680 | 0 | case OpCode::I32__and: { |
681 | 0 | ValVariant Rhs = StackMgr.pop(); |
682 | 0 | return runAndOp<uint32_t>(StackMgr.getTop(), Rhs); |
683 | 0 | } |
684 | 0 | case OpCode::I32__or: { |
685 | 0 | ValVariant Rhs = StackMgr.pop(); |
686 | 0 | return runOrOp<uint32_t>(StackMgr.getTop(), Rhs); |
687 | 0 | } |
688 | 0 | case OpCode::I32__xor: { |
689 | 0 | ValVariant Rhs = StackMgr.pop(); |
690 | 0 | return runXorOp<uint32_t>(StackMgr.getTop(), Rhs); |
691 | 0 | } |
692 | 0 | case OpCode::I32__shl: { |
693 | 0 | ValVariant Rhs = StackMgr.pop(); |
694 | 0 | return runShlOp<uint32_t>(StackMgr.getTop(), Rhs); |
695 | 0 | } |
696 | 0 | case OpCode::I32__shr_s: { |
697 | 0 | ValVariant Rhs = StackMgr.pop(); |
698 | 0 | return runShrOp<int32_t>(StackMgr.getTop(), Rhs); |
699 | 0 | } |
700 | 0 | case OpCode::I32__shr_u: { |
701 | 0 | ValVariant Rhs = StackMgr.pop(); |
702 | 0 | return runShrOp<uint32_t>(StackMgr.getTop(), Rhs); |
703 | 0 | } |
704 | 0 | case OpCode::I32__rotl: { |
705 | 0 | ValVariant Rhs = StackMgr.pop(); |
706 | 0 | return runRotlOp<uint32_t>(StackMgr.getTop(), Rhs); |
707 | 0 | } |
708 | 0 | case OpCode::I32__rotr: { |
709 | 0 | ValVariant Rhs = StackMgr.pop(); |
710 | 0 | return runRotrOp<uint32_t>(StackMgr.getTop(), Rhs); |
711 | 0 | } |
712 | 0 | case OpCode::I64__add: { |
713 | 0 | ValVariant Rhs = StackMgr.pop(); |
714 | 0 | return runAddOp<uint64_t>(StackMgr.getTop(), Rhs); |
715 | 0 | } |
716 | 0 | case OpCode::I64__sub: { |
717 | 0 | ValVariant Rhs = StackMgr.pop(); |
718 | 0 | return runSubOp<uint64_t>(StackMgr.getTop(), Rhs); |
719 | 0 | } |
720 | 0 | case OpCode::I64__mul: { |
721 | 0 | ValVariant Rhs = StackMgr.pop(); |
722 | 0 | return runMulOp<uint64_t>(StackMgr.getTop(), Rhs); |
723 | 0 | } |
724 | 0 | case OpCode::I64__div_s: { |
725 | 0 | ValVariant Rhs = StackMgr.pop(); |
726 | 0 | return runDivOp<int64_t>(Instr, StackMgr.getTop(), Rhs); |
727 | 0 | } |
728 | 0 | case OpCode::I64__div_u: { |
729 | 0 | ValVariant Rhs = StackMgr.pop(); |
730 | 0 | return runDivOp<uint64_t>(Instr, StackMgr.getTop(), Rhs); |
731 | 0 | } |
732 | 0 | case OpCode::I64__rem_s: { |
733 | 0 | ValVariant Rhs = StackMgr.pop(); |
734 | 0 | return runRemOp<int64_t>(Instr, StackMgr.getTop(), Rhs); |
735 | 0 | } |
736 | 0 | case OpCode::I64__rem_u: { |
737 | 0 | ValVariant Rhs = StackMgr.pop(); |
738 | 0 | return runRemOp<uint64_t>(Instr, StackMgr.getTop(), Rhs); |
739 | 0 | } |
740 | 0 | case OpCode::I64__and: { |
741 | 0 | ValVariant Rhs = StackMgr.pop(); |
742 | 0 | return runAndOp<uint64_t>(StackMgr.getTop(), Rhs); |
743 | 0 | } |
744 | 0 | case OpCode::I64__or: { |
745 | 0 | ValVariant Rhs = StackMgr.pop(); |
746 | 0 | return runOrOp<uint64_t>(StackMgr.getTop(), Rhs); |
747 | 0 | } |
748 | 0 | case OpCode::I64__xor: { |
749 | 0 | ValVariant Rhs = StackMgr.pop(); |
750 | 0 | return runXorOp<uint64_t>(StackMgr.getTop(), Rhs); |
751 | 0 | } |
752 | 0 | case OpCode::I64__shl: { |
753 | 0 | ValVariant Rhs = StackMgr.pop(); |
754 | 0 | return runShlOp<uint64_t>(StackMgr.getTop(), Rhs); |
755 | 0 | } |
756 | 0 | case OpCode::I64__shr_s: { |
757 | 0 | ValVariant Rhs = StackMgr.pop(); |
758 | 0 | return runShrOp<int64_t>(StackMgr.getTop(), Rhs); |
759 | 0 | } |
760 | 0 | case OpCode::I64__shr_u: { |
761 | 0 | ValVariant Rhs = StackMgr.pop(); |
762 | 0 | return runShrOp<uint64_t>(StackMgr.getTop(), Rhs); |
763 | 0 | } |
764 | 0 | case OpCode::I64__rotl: { |
765 | 0 | ValVariant Rhs = StackMgr.pop(); |
766 | 0 | return runRotlOp<uint64_t>(StackMgr.getTop(), Rhs); |
767 | 0 | } |
768 | 0 | case OpCode::I64__rotr: { |
769 | 0 | ValVariant Rhs = StackMgr.pop(); |
770 | 0 | return runRotrOp<uint64_t>(StackMgr.getTop(), Rhs); |
771 | 0 | } |
772 | 0 | case OpCode::F32__add: { |
773 | 0 | ValVariant Rhs = StackMgr.pop(); |
774 | 0 | return runAddOp<float>(StackMgr.getTop(), Rhs); |
775 | 0 | } |
776 | 0 | case OpCode::F32__sub: { |
777 | 0 | ValVariant Rhs = StackMgr.pop(); |
778 | 0 | return runSubOp<float>(StackMgr.getTop(), Rhs); |
779 | 0 | } |
780 | 0 | case OpCode::F32__mul: { |
781 | 0 | ValVariant Rhs = StackMgr.pop(); |
782 | 0 | return runMulOp<float>(StackMgr.getTop(), Rhs); |
783 | 0 | } |
784 | 0 | case OpCode::F32__div: { |
785 | 0 | ValVariant Rhs = StackMgr.pop(); |
786 | 0 | return runDivOp<float>(Instr, StackMgr.getTop(), Rhs); |
787 | 0 | } |
788 | 0 | case OpCode::F32__min: { |
789 | 0 | ValVariant Rhs = StackMgr.pop(); |
790 | 0 | return runMinOp<float>(StackMgr.getTop(), Rhs); |
791 | 0 | } |
792 | 0 | case OpCode::F32__max: { |
793 | 0 | ValVariant Rhs = StackMgr.pop(); |
794 | 0 | return runMaxOp<float>(StackMgr.getTop(), Rhs); |
795 | 0 | } |
796 | 0 | case OpCode::F32__copysign: { |
797 | 0 | ValVariant Rhs = StackMgr.pop(); |
798 | 0 | return runCopysignOp<float>(StackMgr.getTop(), Rhs); |
799 | 0 | } |
800 | 0 | case OpCode::F64__add: { |
801 | 0 | ValVariant Rhs = StackMgr.pop(); |
802 | 0 | return runAddOp<double>(StackMgr.getTop(), Rhs); |
803 | 0 | } |
804 | 0 | case OpCode::F64__sub: { |
805 | 0 | ValVariant Rhs = StackMgr.pop(); |
806 | 0 | return runSubOp<double>(StackMgr.getTop(), Rhs); |
807 | 0 | } |
808 | 0 | case OpCode::F64__mul: { |
809 | 0 | ValVariant Rhs = StackMgr.pop(); |
810 | 0 | return runMulOp<double>(StackMgr.getTop(), Rhs); |
811 | 0 | } |
812 | 0 | case OpCode::F64__div: { |
813 | 0 | ValVariant Rhs = StackMgr.pop(); |
814 | 0 | return runDivOp<double>(Instr, StackMgr.getTop(), Rhs); |
815 | 0 | } |
816 | 0 | case OpCode::F64__min: { |
817 | 0 | ValVariant Rhs = StackMgr.pop(); |
818 | 0 | return runMinOp<double>(StackMgr.getTop(), Rhs); |
819 | 0 | } |
820 | 0 | case OpCode::F64__max: { |
821 | 0 | ValVariant Rhs = StackMgr.pop(); |
822 | 0 | return runMaxOp<double>(StackMgr.getTop(), Rhs); |
823 | 0 | } |
824 | 0 | case OpCode::F64__copysign: { |
825 | 0 | ValVariant Rhs = StackMgr.pop(); |
826 | 0 | return runCopysignOp<double>(StackMgr.getTop(), Rhs); |
827 | 0 | } |
828 | | |
829 | | // Saturating Truncation Numeric Instructions |
830 | 0 | case OpCode::I32__trunc_sat_f32_s: |
831 | 0 | return runTruncateSatOp<float, int32_t>(StackMgr.getTop()); |
832 | 0 | case OpCode::I32__trunc_sat_f32_u: |
833 | 0 | return runTruncateSatOp<float, uint32_t>(StackMgr.getTop()); |
834 | 0 | case OpCode::I32__trunc_sat_f64_s: |
835 | 0 | return runTruncateSatOp<double, int32_t>(StackMgr.getTop()); |
836 | 0 | case OpCode::I32__trunc_sat_f64_u: |
837 | 0 | return runTruncateSatOp<double, uint32_t>(StackMgr.getTop()); |
838 | 0 | case OpCode::I64__trunc_sat_f32_s: |
839 | 0 | return runTruncateSatOp<float, int64_t>(StackMgr.getTop()); |
840 | 0 | case OpCode::I64__trunc_sat_f32_u: |
841 | 0 | return runTruncateSatOp<float, uint64_t>(StackMgr.getTop()); |
842 | 0 | case OpCode::I64__trunc_sat_f64_s: |
843 | 0 | return runTruncateSatOp<double, int64_t>(StackMgr.getTop()); |
844 | 0 | case OpCode::I64__trunc_sat_f64_u: |
845 | 0 | return runTruncateSatOp<double, uint64_t>(StackMgr.getTop()); |
846 | | |
847 | | // SIMD Memory Instructions |
848 | 0 | case OpCode::V128__load: |
849 | 0 | return runLoadOp<uint128_t>( |
850 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
851 | 0 | case OpCode::V128__load8x8_s: |
852 | 0 | return runLoadExpandOp<int8_t, int16_t>( |
853 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
854 | 0 | case OpCode::V128__load8x8_u: |
855 | 0 | return runLoadExpandOp<uint8_t, uint16_t>( |
856 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
857 | 0 | case OpCode::V128__load16x4_s: |
858 | 0 | return runLoadExpandOp<int16_t, int32_t>( |
859 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
860 | 0 | case OpCode::V128__load16x4_u: |
861 | 0 | return runLoadExpandOp<uint16_t, uint32_t>( |
862 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
863 | 0 | case OpCode::V128__load32x2_s: |
864 | 0 | return runLoadExpandOp<int32_t, int64_t>( |
865 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
866 | 0 | case OpCode::V128__load32x2_u: |
867 | 0 | return runLoadExpandOp<uint32_t, uint64_t>( |
868 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
869 | 0 | case OpCode::V128__load8_splat: |
870 | 0 | return runLoadSplatOp<uint8_t>( |
871 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
872 | 0 | case OpCode::V128__load16_splat: |
873 | 0 | return runLoadSplatOp<uint16_t>( |
874 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
875 | 0 | case OpCode::V128__load32_splat: |
876 | 0 | return runLoadSplatOp<uint32_t>( |
877 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
878 | 0 | case OpCode::V128__load64_splat: |
879 | 0 | return runLoadSplatOp<uint64_t>( |
880 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
881 | 0 | case OpCode::V128__load32_zero: |
882 | 0 | return runLoadOp<uint128_t, 32>( |
883 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
884 | 0 | case OpCode::V128__load64_zero: |
885 | 0 | return runLoadOp<uint128_t, 64>( |
886 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
887 | 0 | case OpCode::V128__store: |
888 | 0 | return runStoreOp<uint128_t>( |
889 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
890 | 0 | case OpCode::V128__load8_lane: |
891 | 0 | return runLoadLaneOp<uint8_t>( |
892 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
893 | 0 | case OpCode::V128__load16_lane: |
894 | 0 | return runLoadLaneOp<uint16_t>( |
895 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
896 | 0 | case OpCode::V128__load32_lane: |
897 | 0 | return runLoadLaneOp<uint32_t>( |
898 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
899 | 0 | case OpCode::V128__load64_lane: |
900 | 0 | return runLoadLaneOp<uint64_t>( |
901 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
902 | 0 | case OpCode::V128__store8_lane: |
903 | 0 | return runStoreLaneOp<uint8_t>( |
904 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
905 | 0 | case OpCode::V128__store16_lane: |
906 | 0 | return runStoreLaneOp<uint16_t>( |
907 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
908 | 0 | case OpCode::V128__store32_lane: |
909 | 0 | return runStoreLaneOp<uint32_t>( |
910 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
911 | 0 | case OpCode::V128__store64_lane: |
912 | 0 | return runStoreLaneOp<uint64_t>( |
913 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
914 | | |
915 | | // SIMD Const Instructions |
916 | 0 | case OpCode::V128__const: |
917 | 0 | StackMgr.push(Instr.getNum()); |
918 | 0 | return {}; |
919 | | |
920 | | // SIMD Shuffle Instructions |
921 | 0 | case OpCode::I8x16__shuffle: { |
922 | 0 | ValVariant Val2 = StackMgr.pop(); |
923 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
924 | 0 | std::array<uint8_t, 32> Data; |
925 | 0 | std::array<uint8_t, 16> Result; |
926 | 0 | std::memcpy(&Data[0], &Val1, 16); |
927 | 0 | std::memcpy(&Data[16], &Val2, 16); |
928 | 0 | const auto V3 = Instr.getNum().get<uint128_t>(); |
929 | 0 | for (size_t I = 0; I < 16; ++I) { |
930 | 0 | const uint8_t Index = static_cast<uint8_t>(V3 >> (I * 8)); |
931 | 0 | Result[I] = Data[Index]; |
932 | 0 | } |
933 | 0 | std::memcpy(&Val1, &Result[0], 16); |
934 | 0 | return {}; |
935 | 0 | } |
936 | | |
937 | | // SIMD Lane Instructions |
938 | 0 | case OpCode::I8x16__extract_lane_s: |
939 | 0 | return runExtractLaneOp<int8_t, int32_t>(StackMgr.getTop(), |
940 | 0 | Instr.getMemoryLane()); |
941 | 0 | case OpCode::I8x16__extract_lane_u: |
942 | 0 | return runExtractLaneOp<uint8_t, uint32_t>(StackMgr.getTop(), |
943 | 0 | Instr.getMemoryLane()); |
944 | 0 | case OpCode::I16x8__extract_lane_s: |
945 | 0 | return runExtractLaneOp<int16_t, int32_t>(StackMgr.getTop(), |
946 | 0 | Instr.getMemoryLane()); |
947 | 0 | case OpCode::I16x8__extract_lane_u: |
948 | 0 | return runExtractLaneOp<uint16_t, uint32_t>(StackMgr.getTop(), |
949 | 0 | Instr.getMemoryLane()); |
950 | 0 | case OpCode::I32x4__extract_lane: |
951 | 0 | return runExtractLaneOp<uint32_t>(StackMgr.getTop(), |
952 | 0 | Instr.getMemoryLane()); |
953 | 0 | case OpCode::I64x2__extract_lane: |
954 | 0 | return runExtractLaneOp<uint64_t>(StackMgr.getTop(), |
955 | 0 | Instr.getMemoryLane()); |
956 | 0 | case OpCode::F32x4__extract_lane: |
957 | 0 | return runExtractLaneOp<float>(StackMgr.getTop(), Instr.getMemoryLane()); |
958 | 0 | case OpCode::F64x2__extract_lane: |
959 | 0 | return runExtractLaneOp<double>(StackMgr.getTop(), Instr.getMemoryLane()); |
960 | 0 | case OpCode::I8x16__replace_lane: { |
961 | 0 | ValVariant Rhs = StackMgr.pop(); |
962 | 0 | return runReplaceLaneOp<uint32_t, uint8_t>(StackMgr.getTop(), Rhs, |
963 | 0 | Instr.getMemoryLane()); |
964 | 0 | } |
965 | 0 | case OpCode::I16x8__replace_lane: { |
966 | 0 | ValVariant Rhs = StackMgr.pop(); |
967 | 0 | return runReplaceLaneOp<uint32_t, uint16_t>(StackMgr.getTop(), Rhs, |
968 | 0 | Instr.getMemoryLane()); |
969 | 0 | } |
970 | 0 | case OpCode::I32x4__replace_lane: { |
971 | 0 | ValVariant Rhs = StackMgr.pop(); |
972 | 0 | return runReplaceLaneOp<uint32_t>(StackMgr.getTop(), Rhs, |
973 | 0 | Instr.getMemoryLane()); |
974 | 0 | } |
975 | 0 | case OpCode::I64x2__replace_lane: { |
976 | 0 | ValVariant Rhs = StackMgr.pop(); |
977 | 0 | return runReplaceLaneOp<uint64_t>(StackMgr.getTop(), Rhs, |
978 | 0 | Instr.getMemoryLane()); |
979 | 0 | } |
980 | 0 | case OpCode::F32x4__replace_lane: { |
981 | 0 | ValVariant Rhs = StackMgr.pop(); |
982 | 0 | return runReplaceLaneOp<float>(StackMgr.getTop(), Rhs, |
983 | 0 | Instr.getMemoryLane()); |
984 | 0 | } |
985 | 0 | case OpCode::F64x2__replace_lane: { |
986 | 0 | ValVariant Rhs = StackMgr.pop(); |
987 | 0 | return runReplaceLaneOp<double>(StackMgr.getTop(), Rhs, |
988 | 0 | Instr.getMemoryLane()); |
989 | 0 | } |
990 | | |
991 | | // SIMD Numeric Instructions |
992 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
993 | | case OpCode::I8x16__swizzle: { |
994 | | const ValVariant Val2 = StackMgr.pop(); |
995 | | ValVariant &Val1 = StackMgr.getTop(); |
996 | | const uint8x16_t &Index = Val2.get<uint8x16_t>(); |
997 | | uint8x16_t &Vector = Val1.get<uint8x16_t>(); |
998 | | uint8x16_t Result; |
999 | | for (size_t I = 0; I < 16; ++I) { |
1000 | | const uint8_t SwizzleIndex = Index[I]; |
1001 | | if (SwizzleIndex < 16) { |
1002 | | Result[I] = Vector[SwizzleIndex]; |
1003 | | } else { |
1004 | | Result[I] = 0; |
1005 | | } |
1006 | | } |
1007 | | Vector = Result; |
1008 | | return {}; |
1009 | | } |
1010 | | #else |
1011 | 0 | case OpCode::I8x16__swizzle: { |
1012 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1013 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1014 | 0 | const uint8x16_t &Index = Val2.get<uint8x16_t>(); |
1015 | 0 | uint8x16_t &Vector = Val1.get<uint8x16_t>(); |
1016 | 0 | const uint8x16_t Limit = uint8x16_t{} + 16; |
1017 | 0 | const uint8x16_t Zero = uint8x16_t{}; |
1018 | 0 | const uint8x16_t Exceed = (Index >= Limit); |
1019 | 0 | #ifdef __clang__ |
1020 | 0 | uint8x16_t Result = {Vector[Index[0] & 0xF], Vector[Index[1] & 0xF], |
1021 | 0 | Vector[Index[2] & 0xF], Vector[Index[3] & 0xF], |
1022 | 0 | Vector[Index[4] & 0xF], Vector[Index[5] & 0xF], |
1023 | 0 | Vector[Index[6] & 0xF], Vector[Index[7] & 0xF], |
1024 | 0 | Vector[Index[8] & 0xF], Vector[Index[9] & 0xF], |
1025 | 0 | Vector[Index[10] & 0xF], Vector[Index[11] & 0xF], |
1026 | 0 | Vector[Index[12] & 0xF], Vector[Index[13] & 0xF], |
1027 | 0 | Vector[Index[14] & 0xF], Vector[Index[15] & 0xF]}; |
1028 | | #else |
1029 | | uint8x16_t Result = __builtin_shuffle(Vector, Index); |
1030 | | #endif |
1031 | 0 | Vector = detail::vectorSelect(Exceed, Zero, Result); |
1032 | 0 | return {}; |
1033 | 0 | } |
1034 | 0 | #endif // MSVC |
1035 | 0 | case OpCode::I8x16__splat: |
1036 | 0 | return runSplatOp<uint32_t, uint8_t>(StackMgr.getTop()); |
1037 | 0 | case OpCode::I16x8__splat: |
1038 | 0 | return runSplatOp<uint32_t, uint16_t>(StackMgr.getTop()); |
1039 | 0 | case OpCode::I32x4__splat: |
1040 | 0 | return runSplatOp<uint32_t>(StackMgr.getTop()); |
1041 | 0 | case OpCode::I64x2__splat: |
1042 | 0 | return runSplatOp<uint64_t>(StackMgr.getTop()); |
1043 | 0 | case OpCode::F32x4__splat: |
1044 | 0 | return runSplatOp<float>(StackMgr.getTop()); |
1045 | 0 | case OpCode::F64x2__splat: |
1046 | 0 | return runSplatOp<double>(StackMgr.getTop()); |
1047 | 0 | case OpCode::I8x16__eq: { |
1048 | 0 | ValVariant Rhs = StackMgr.pop(); |
1049 | 0 | return runVectorEqOp<uint8_t>(StackMgr.getTop(), Rhs); |
1050 | 0 | } |
1051 | 0 | case OpCode::I8x16__ne: { |
1052 | 0 | ValVariant Rhs = StackMgr.pop(); |
1053 | 0 | return runVectorNeOp<uint8_t>(StackMgr.getTop(), Rhs); |
1054 | 0 | } |
1055 | 0 | case OpCode::I8x16__lt_s: { |
1056 | 0 | ValVariant Rhs = StackMgr.pop(); |
1057 | 0 | return runVectorLtOp<int8_t>(StackMgr.getTop(), Rhs); |
1058 | 0 | } |
1059 | 0 | case OpCode::I8x16__lt_u: { |
1060 | 0 | ValVariant Rhs = StackMgr.pop(); |
1061 | 0 | return runVectorLtOp<uint8_t>(StackMgr.getTop(), Rhs); |
1062 | 0 | } |
1063 | 0 | case OpCode::I8x16__gt_s: { |
1064 | 0 | ValVariant Rhs = StackMgr.pop(); |
1065 | 0 | return runVectorGtOp<int8_t>(StackMgr.getTop(), Rhs); |
1066 | 0 | } |
1067 | 0 | case OpCode::I8x16__gt_u: { |
1068 | 0 | ValVariant Rhs = StackMgr.pop(); |
1069 | 0 | return runVectorGtOp<uint8_t>(StackMgr.getTop(), Rhs); |
1070 | 0 | } |
1071 | 0 | case OpCode::I8x16__le_s: { |
1072 | 0 | ValVariant Rhs = StackMgr.pop(); |
1073 | 0 | return runVectorLeOp<int8_t>(StackMgr.getTop(), Rhs); |
1074 | 0 | } |
1075 | 0 | case OpCode::I8x16__le_u: { |
1076 | 0 | ValVariant Rhs = StackMgr.pop(); |
1077 | 0 | return runVectorLeOp<uint8_t>(StackMgr.getTop(), Rhs); |
1078 | 0 | } |
1079 | 0 | case OpCode::I8x16__ge_s: { |
1080 | 0 | ValVariant Rhs = StackMgr.pop(); |
1081 | 0 | return runVectorGeOp<int8_t>(StackMgr.getTop(), Rhs); |
1082 | 0 | } |
1083 | 0 | case OpCode::I8x16__ge_u: { |
1084 | 0 | ValVariant Rhs = StackMgr.pop(); |
1085 | 0 | return runVectorGeOp<uint8_t>(StackMgr.getTop(), Rhs); |
1086 | 0 | } |
1087 | 0 | case OpCode::I16x8__eq: { |
1088 | 0 | ValVariant Rhs = StackMgr.pop(); |
1089 | 0 | return runVectorEqOp<uint16_t>(StackMgr.getTop(), Rhs); |
1090 | 0 | } |
1091 | 0 | case OpCode::I16x8__ne: { |
1092 | 0 | ValVariant Rhs = StackMgr.pop(); |
1093 | 0 | return runVectorNeOp<uint16_t>(StackMgr.getTop(), Rhs); |
1094 | 0 | } |
1095 | 0 | case OpCode::I16x8__lt_s: { |
1096 | 0 | ValVariant Rhs = StackMgr.pop(); |
1097 | 0 | return runVectorLtOp<int16_t>(StackMgr.getTop(), Rhs); |
1098 | 0 | } |
1099 | 0 | case OpCode::I16x8__lt_u: { |
1100 | 0 | ValVariant Rhs = StackMgr.pop(); |
1101 | 0 | return runVectorLtOp<uint16_t>(StackMgr.getTop(), Rhs); |
1102 | 0 | } |
1103 | 0 | case OpCode::I16x8__gt_s: { |
1104 | 0 | ValVariant Rhs = StackMgr.pop(); |
1105 | 0 | return runVectorGtOp<int16_t>(StackMgr.getTop(), Rhs); |
1106 | 0 | } |
1107 | 0 | case OpCode::I16x8__gt_u: { |
1108 | 0 | ValVariant Rhs = StackMgr.pop(); |
1109 | 0 | return runVectorGtOp<uint16_t>(StackMgr.getTop(), Rhs); |
1110 | 0 | } |
1111 | 0 | case OpCode::I16x8__le_s: { |
1112 | 0 | ValVariant Rhs = StackMgr.pop(); |
1113 | 0 | return runVectorLeOp<int16_t>(StackMgr.getTop(), Rhs); |
1114 | 0 | } |
1115 | 0 | case OpCode::I16x8__le_u: { |
1116 | 0 | ValVariant Rhs = StackMgr.pop(); |
1117 | 0 | return runVectorLeOp<uint16_t>(StackMgr.getTop(), Rhs); |
1118 | 0 | } |
1119 | 0 | case OpCode::I16x8__ge_s: { |
1120 | 0 | ValVariant Rhs = StackMgr.pop(); |
1121 | 0 | return runVectorGeOp<int16_t>(StackMgr.getTop(), Rhs); |
1122 | 0 | } |
1123 | 0 | case OpCode::I16x8__ge_u: { |
1124 | 0 | ValVariant Rhs = StackMgr.pop(); |
1125 | 0 | return runVectorGeOp<uint16_t>(StackMgr.getTop(), Rhs); |
1126 | 0 | } |
1127 | 0 | case OpCode::I32x4__eq: { |
1128 | 0 | ValVariant Rhs = StackMgr.pop(); |
1129 | 0 | return runVectorEqOp<uint32_t>(StackMgr.getTop(), Rhs); |
1130 | 0 | } |
1131 | 0 | case OpCode::I32x4__ne: { |
1132 | 0 | ValVariant Rhs = StackMgr.pop(); |
1133 | 0 | return runVectorNeOp<uint32_t>(StackMgr.getTop(), Rhs); |
1134 | 0 | } |
1135 | 0 | case OpCode::I32x4__lt_s: { |
1136 | 0 | ValVariant Rhs = StackMgr.pop(); |
1137 | 0 | return runVectorLtOp<int32_t>(StackMgr.getTop(), Rhs); |
1138 | 0 | } |
1139 | 0 | case OpCode::I32x4__lt_u: { |
1140 | 0 | ValVariant Rhs = StackMgr.pop(); |
1141 | 0 | return runVectorLtOp<uint32_t>(StackMgr.getTop(), Rhs); |
1142 | 0 | } |
1143 | 0 | case OpCode::I32x4__gt_s: { |
1144 | 0 | ValVariant Rhs = StackMgr.pop(); |
1145 | 0 | return runVectorGtOp<int32_t>(StackMgr.getTop(), Rhs); |
1146 | 0 | } |
1147 | 0 | case OpCode::I32x4__gt_u: { |
1148 | 0 | ValVariant Rhs = StackMgr.pop(); |
1149 | 0 | return runVectorGtOp<uint32_t>(StackMgr.getTop(), Rhs); |
1150 | 0 | } |
1151 | 0 | case OpCode::I32x4__le_s: { |
1152 | 0 | ValVariant Rhs = StackMgr.pop(); |
1153 | 0 | return runVectorLeOp<int32_t>(StackMgr.getTop(), Rhs); |
1154 | 0 | } |
1155 | 0 | case OpCode::I32x4__le_u: { |
1156 | 0 | ValVariant Rhs = StackMgr.pop(); |
1157 | 0 | return runVectorLeOp<uint32_t>(StackMgr.getTop(), Rhs); |
1158 | 0 | } |
1159 | 0 | case OpCode::I32x4__ge_s: { |
1160 | 0 | ValVariant Rhs = StackMgr.pop(); |
1161 | 0 | return runVectorGeOp<int32_t>(StackMgr.getTop(), Rhs); |
1162 | 0 | } |
1163 | 0 | case OpCode::I32x4__ge_u: { |
1164 | 0 | ValVariant Rhs = StackMgr.pop(); |
1165 | 0 | return runVectorGeOp<uint32_t>(StackMgr.getTop(), Rhs); |
1166 | 0 | } |
1167 | 0 | case OpCode::I64x2__eq: { |
1168 | 0 | ValVariant Rhs = StackMgr.pop(); |
1169 | 0 | return runVectorEqOp<uint64_t>(StackMgr.getTop(), Rhs); |
1170 | 0 | } |
1171 | 0 | case OpCode::I64x2__ne: { |
1172 | 0 | ValVariant Rhs = StackMgr.pop(); |
1173 | 0 | return runVectorNeOp<uint64_t>(StackMgr.getTop(), Rhs); |
1174 | 0 | } |
1175 | 0 | case OpCode::I64x2__lt_s: { |
1176 | 0 | ValVariant Rhs = StackMgr.pop(); |
1177 | 0 | return runVectorLtOp<int64_t>(StackMgr.getTop(), Rhs); |
1178 | 0 | } |
1179 | 0 | case OpCode::I64x2__gt_s: { |
1180 | 0 | ValVariant Rhs = StackMgr.pop(); |
1181 | 0 | return runVectorGtOp<int64_t>(StackMgr.getTop(), Rhs); |
1182 | 0 | } |
1183 | 0 | case OpCode::I64x2__le_s: { |
1184 | 0 | ValVariant Rhs = StackMgr.pop(); |
1185 | 0 | return runVectorLeOp<int64_t>(StackMgr.getTop(), Rhs); |
1186 | 0 | } |
1187 | 0 | case OpCode::I64x2__ge_s: { |
1188 | 0 | ValVariant Rhs = StackMgr.pop(); |
1189 | 0 | return runVectorGeOp<int64_t>(StackMgr.getTop(), Rhs); |
1190 | 0 | } |
1191 | 0 | case OpCode::F32x4__eq: { |
1192 | 0 | ValVariant Rhs = StackMgr.pop(); |
1193 | 0 | return runVectorEqOp<float>(StackMgr.getTop(), Rhs); |
1194 | 0 | } |
1195 | 0 | case OpCode::F32x4__ne: { |
1196 | 0 | ValVariant Rhs = StackMgr.pop(); |
1197 | 0 | return runVectorNeOp<float>(StackMgr.getTop(), Rhs); |
1198 | 0 | } |
1199 | 0 | case OpCode::F32x4__lt: { |
1200 | 0 | ValVariant Rhs = StackMgr.pop(); |
1201 | 0 | return runVectorLtOp<float>(StackMgr.getTop(), Rhs); |
1202 | 0 | } |
1203 | 0 | case OpCode::F32x4__gt: { |
1204 | 0 | ValVariant Rhs = StackMgr.pop(); |
1205 | 0 | return runVectorGtOp<float>(StackMgr.getTop(), Rhs); |
1206 | 0 | } |
1207 | 0 | case OpCode::F32x4__le: { |
1208 | 0 | ValVariant Rhs = StackMgr.pop(); |
1209 | 0 | return runVectorLeOp<float>(StackMgr.getTop(), Rhs); |
1210 | 0 | } |
1211 | 0 | case OpCode::F32x4__ge: { |
1212 | 0 | ValVariant Rhs = StackMgr.pop(); |
1213 | 0 | return runVectorGeOp<float>(StackMgr.getTop(), Rhs); |
1214 | 0 | } |
1215 | 0 | case OpCode::F64x2__eq: { |
1216 | 0 | ValVariant Rhs = StackMgr.pop(); |
1217 | 0 | return runVectorEqOp<double>(StackMgr.getTop(), Rhs); |
1218 | 0 | } |
1219 | 0 | case OpCode::F64x2__ne: { |
1220 | 0 | ValVariant Rhs = StackMgr.pop(); |
1221 | 0 | return runVectorNeOp<double>(StackMgr.getTop(), Rhs); |
1222 | 0 | } |
1223 | 0 | case OpCode::F64x2__lt: { |
1224 | 0 | ValVariant Rhs = StackMgr.pop(); |
1225 | 0 | return runVectorLtOp<double>(StackMgr.getTop(), Rhs); |
1226 | 0 | } |
1227 | 0 | case OpCode::F64x2__gt: { |
1228 | 0 | ValVariant Rhs = StackMgr.pop(); |
1229 | 0 | return runVectorGtOp<double>(StackMgr.getTop(), Rhs); |
1230 | 0 | } |
1231 | 0 | case OpCode::F64x2__le: { |
1232 | 0 | ValVariant Rhs = StackMgr.pop(); |
1233 | 0 | return runVectorLeOp<double>(StackMgr.getTop(), Rhs); |
1234 | 0 | } |
1235 | 0 | case OpCode::F64x2__ge: { |
1236 | 0 | ValVariant Rhs = StackMgr.pop(); |
1237 | 0 | return runVectorGeOp<double>(StackMgr.getTop(), Rhs); |
1238 | 0 | } |
1239 | 0 | case OpCode::V128__not: { |
1240 | 0 | auto &Val = StackMgr.getTop().get<uint128_t>(); |
1241 | 0 | Val = ~Val; |
1242 | 0 | return {}; |
1243 | 0 | } |
1244 | 0 | case OpCode::V128__and: { |
1245 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1246 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1247 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1248 | | auto &Result = Val1.get<uint64x2_t>(); |
1249 | | auto &Vector = Val2.get<uint64x2_t>(); |
1250 | | Result[0] &= Vector[0]; |
1251 | | Result[1] &= Vector[1]; |
1252 | | #else |
1253 | 0 | Val1.get<uint64x2_t>() &= Val2.get<uint64x2_t>(); |
1254 | 0 | #endif // MSVC |
1255 | 0 | return {}; |
1256 | 0 | } |
1257 | 0 | case OpCode::V128__andnot: { |
1258 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1259 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1260 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1261 | | auto &Result = Val1.get<uint64x2_t>(); |
1262 | | auto &Vector = Val2.get<uint64x2_t>(); |
1263 | | Result[0] &= ~Vector[0]; |
1264 | | Result[1] &= ~Vector[1]; |
1265 | | #else |
1266 | 0 | Val1.get<uint64x2_t>() &= ~Val2.get<uint64x2_t>(); |
1267 | 0 | #endif // MSVC |
1268 | 0 | return {}; |
1269 | 0 | } |
1270 | 0 | case OpCode::V128__or: { |
1271 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1272 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1273 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1274 | | auto &Result = Val1.get<uint64x2_t>(); |
1275 | | auto &Vector = Val2.get<uint64x2_t>(); |
1276 | | Result[0] |= Vector[0]; |
1277 | | Result[1] |= Vector[1]; |
1278 | | #else |
1279 | 0 | Val1.get<uint64x2_t>() |= Val2.get<uint64x2_t>(); |
1280 | 0 | #endif // MSVC |
1281 | 0 | return {}; |
1282 | 0 | } |
1283 | 0 | case OpCode::V128__xor: { |
1284 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1285 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1286 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1287 | | auto &Result = Val1.get<uint64x2_t>(); |
1288 | | auto &Vector = Val2.get<uint64x2_t>(); |
1289 | | Result[0] ^= Vector[0]; |
1290 | | Result[1] ^= Vector[1]; |
1291 | | #else |
1292 | 0 | Val1.get<uint64x2_t>() ^= Val2.get<uint64x2_t>(); |
1293 | 0 | #endif // MSVC |
1294 | 0 | return {}; |
1295 | 0 | } |
1296 | 0 | case OpCode::V128__bitselect: { |
1297 | 0 | const uint64x2_t C = StackMgr.pop().get<uint64x2_t>(); |
1298 | 0 | const uint64x2_t Val2 = StackMgr.pop().get<uint64x2_t>(); |
1299 | 0 | uint64x2_t &Val1 = StackMgr.getTop().get<uint64x2_t>(); |
1300 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1301 | | Val1[0] = (Val1[0] & C[0]) | (Val2[0] & ~C[0]); |
1302 | | Val1[1] = (Val1[1] & C[1]) | (Val2[1] & ~C[1]); |
1303 | | #else |
1304 | 0 | Val1 = (Val1 & C) | (Val2 & ~C); |
1305 | 0 | #endif // MSVC |
1306 | 0 | return {}; |
1307 | 0 | } |
1308 | 0 | case OpCode::V128__any_true: |
1309 | 0 | return runVectorAnyTrueOp(StackMgr.getTop()); |
1310 | 0 | case OpCode::I8x16__abs: |
1311 | 0 | return runVectorAbsOp<int8_t>(StackMgr.getTop()); |
1312 | 0 | case OpCode::I8x16__neg: |
1313 | 0 | return runVectorNegOp<int8_t>(StackMgr.getTop()); |
1314 | 0 | case OpCode::I8x16__popcnt: |
1315 | 0 | return runVectorPopcntOp(StackMgr.getTop()); |
1316 | 0 | case OpCode::I8x16__all_true: |
1317 | 0 | return runVectorAllTrueOp<uint8_t>(StackMgr.getTop()); |
1318 | 0 | case OpCode::I8x16__bitmask: |
1319 | 0 | return runVectorBitMaskOp<uint8_t>(StackMgr.getTop()); |
1320 | 0 | case OpCode::I8x16__narrow_i16x8_s: { |
1321 | 0 | ValVariant Rhs = StackMgr.pop(); |
1322 | 0 | return runVectorNarrowOp<int16_t, int8_t>(StackMgr.getTop(), Rhs); |
1323 | 0 | } |
1324 | 0 | case OpCode::I8x16__narrow_i16x8_u: { |
1325 | 0 | ValVariant Rhs = StackMgr.pop(); |
1326 | 0 | return runVectorNarrowOp<int16_t, uint8_t>(StackMgr.getTop(), Rhs); |
1327 | 0 | } |
1328 | 0 | case OpCode::I8x16__shl: { |
1329 | 0 | ValVariant Rhs = StackMgr.pop(); |
1330 | 0 | return runVectorShlOp<uint8_t>(StackMgr.getTop(), Rhs); |
1331 | 0 | } |
1332 | 0 | case OpCode::I8x16__shr_s: { |
1333 | 0 | ValVariant Rhs = StackMgr.pop(); |
1334 | 0 | return runVectorShrOp<int8_t>(StackMgr.getTop(), Rhs); |
1335 | 0 | } |
1336 | 0 | case OpCode::I8x16__shr_u: { |
1337 | 0 | ValVariant Rhs = StackMgr.pop(); |
1338 | 0 | return runVectorShrOp<uint8_t>(StackMgr.getTop(), Rhs); |
1339 | 0 | } |
1340 | 0 | case OpCode::I8x16__add: { |
1341 | 0 | ValVariant Rhs = StackMgr.pop(); |
1342 | 0 | return runVectorAddOp<uint8_t>(StackMgr.getTop(), Rhs); |
1343 | 0 | } |
1344 | 0 | case OpCode::I8x16__add_sat_s: { |
1345 | 0 | ValVariant Rhs = StackMgr.pop(); |
1346 | 0 | return runVectorAddSatOp<int8_t>(StackMgr.getTop(), Rhs); |
1347 | 0 | } |
1348 | 0 | case OpCode::I8x16__add_sat_u: { |
1349 | 0 | ValVariant Rhs = StackMgr.pop(); |
1350 | 0 | return runVectorAddSatOp<uint8_t>(StackMgr.getTop(), Rhs); |
1351 | 0 | } |
1352 | 0 | case OpCode::I8x16__sub: { |
1353 | 0 | ValVariant Rhs = StackMgr.pop(); |
1354 | 0 | return runVectorSubOp<uint8_t>(StackMgr.getTop(), Rhs); |
1355 | 0 | } |
1356 | 0 | case OpCode::I8x16__sub_sat_s: { |
1357 | 0 | ValVariant Rhs = StackMgr.pop(); |
1358 | 0 | return runVectorSubSatOp<int8_t>(StackMgr.getTop(), Rhs); |
1359 | 0 | } |
1360 | 0 | case OpCode::I8x16__sub_sat_u: { |
1361 | 0 | ValVariant Rhs = StackMgr.pop(); |
1362 | 0 | return runVectorSubSatOp<uint8_t>(StackMgr.getTop(), Rhs); |
1363 | 0 | } |
1364 | 0 | case OpCode::I8x16__min_s: { |
1365 | 0 | ValVariant Rhs = StackMgr.pop(); |
1366 | 0 | return runVectorMinOp<int8_t>(StackMgr.getTop(), Rhs); |
1367 | 0 | } |
1368 | 0 | case OpCode::I8x16__min_u: { |
1369 | 0 | ValVariant Rhs = StackMgr.pop(); |
1370 | 0 | return runVectorMinOp<uint8_t>(StackMgr.getTop(), Rhs); |
1371 | 0 | } |
1372 | 0 | case OpCode::I8x16__max_s: { |
1373 | 0 | ValVariant Rhs = StackMgr.pop(); |
1374 | 0 | return runVectorMaxOp<int8_t>(StackMgr.getTop(), Rhs); |
1375 | 0 | } |
1376 | 0 | case OpCode::I8x16__max_u: { |
1377 | 0 | ValVariant Rhs = StackMgr.pop(); |
1378 | 0 | return runVectorMaxOp<uint8_t>(StackMgr.getTop(), Rhs); |
1379 | 0 | } |
1380 | 0 | case OpCode::I8x16__avgr_u: { |
1381 | 0 | ValVariant Rhs = StackMgr.pop(); |
1382 | 0 | return runVectorAvgrOp<uint8_t, uint16_t>(StackMgr.getTop(), Rhs); |
1383 | 0 | } |
1384 | 0 | case OpCode::I16x8__abs: |
1385 | 0 | return runVectorAbsOp<int16_t>(StackMgr.getTop()); |
1386 | 0 | case OpCode::I16x8__neg: |
1387 | 0 | return runVectorNegOp<int16_t>(StackMgr.getTop()); |
1388 | 0 | case OpCode::I16x8__all_true: |
1389 | 0 | return runVectorAllTrueOp<uint16_t>(StackMgr.getTop()); |
1390 | 0 | case OpCode::I16x8__bitmask: |
1391 | 0 | return runVectorBitMaskOp<uint16_t>(StackMgr.getTop()); |
1392 | 0 | case OpCode::I16x8__narrow_i32x4_s: { |
1393 | 0 | ValVariant Rhs = StackMgr.pop(); |
1394 | 0 | return runVectorNarrowOp<int32_t, int16_t>(StackMgr.getTop(), Rhs); |
1395 | 0 | } |
1396 | 0 | case OpCode::I16x8__narrow_i32x4_u: { |
1397 | 0 | ValVariant Rhs = StackMgr.pop(); |
1398 | 0 | return runVectorNarrowOp<int32_t, uint16_t>(StackMgr.getTop(), Rhs); |
1399 | 0 | } |
1400 | 0 | case OpCode::I16x8__extend_low_i8x16_s: |
1401 | 0 | return runVectorExtendLowOp<int8_t, int16_t>(StackMgr.getTop()); |
1402 | 0 | case OpCode::I16x8__extend_high_i8x16_s: |
1403 | 0 | return runVectorExtendHighOp<int8_t, int16_t>(StackMgr.getTop()); |
1404 | 0 | case OpCode::I16x8__extend_low_i8x16_u: |
1405 | 0 | return runVectorExtendLowOp<uint8_t, uint16_t>(StackMgr.getTop()); |
1406 | 0 | case OpCode::I16x8__extend_high_i8x16_u: |
1407 | 0 | return runVectorExtendHighOp<uint8_t, uint16_t>(StackMgr.getTop()); |
1408 | 0 | case OpCode::I16x8__shl: { |
1409 | 0 | ValVariant Rhs = StackMgr.pop(); |
1410 | 0 | return runVectorShlOp<uint16_t>(StackMgr.getTop(), Rhs); |
1411 | 0 | } |
1412 | 0 | case OpCode::I16x8__shr_s: { |
1413 | 0 | ValVariant Rhs = StackMgr.pop(); |
1414 | 0 | return runVectorShrOp<int16_t>(StackMgr.getTop(), Rhs); |
1415 | 0 | } |
1416 | 0 | case OpCode::I16x8__shr_u: { |
1417 | 0 | ValVariant Rhs = StackMgr.pop(); |
1418 | 0 | return runVectorShrOp<uint16_t>(StackMgr.getTop(), Rhs); |
1419 | 0 | } |
1420 | 0 | case OpCode::I16x8__add: { |
1421 | 0 | ValVariant Rhs = StackMgr.pop(); |
1422 | 0 | return runVectorAddOp<uint16_t>(StackMgr.getTop(), Rhs); |
1423 | 0 | } |
1424 | 0 | case OpCode::I16x8__add_sat_s: { |
1425 | 0 | ValVariant Rhs = StackMgr.pop(); |
1426 | 0 | return runVectorAddSatOp<int16_t>(StackMgr.getTop(), Rhs); |
1427 | 0 | } |
1428 | 0 | case OpCode::I16x8__add_sat_u: { |
1429 | 0 | ValVariant Rhs = StackMgr.pop(); |
1430 | 0 | return runVectorAddSatOp<uint16_t>(StackMgr.getTop(), Rhs); |
1431 | 0 | } |
1432 | 0 | case OpCode::I16x8__sub: { |
1433 | 0 | ValVariant Rhs = StackMgr.pop(); |
1434 | 0 | return runVectorSubOp<uint16_t>(StackMgr.getTop(), Rhs); |
1435 | 0 | } |
1436 | 0 | case OpCode::I16x8__sub_sat_s: { |
1437 | 0 | ValVariant Rhs = StackMgr.pop(); |
1438 | 0 | return runVectorSubSatOp<int16_t>(StackMgr.getTop(), Rhs); |
1439 | 0 | } |
1440 | 0 | case OpCode::I16x8__sub_sat_u: { |
1441 | 0 | ValVariant Rhs = StackMgr.pop(); |
1442 | 0 | return runVectorSubSatOp<uint16_t>(StackMgr.getTop(), Rhs); |
1443 | 0 | } |
1444 | 0 | case OpCode::I16x8__mul: { |
1445 | 0 | ValVariant Rhs = StackMgr.pop(); |
1446 | 0 | return runVectorMulOp<uint16_t>(StackMgr.getTop(), Rhs); |
1447 | 0 | } |
1448 | 0 | case OpCode::I16x8__min_s: { |
1449 | 0 | ValVariant Rhs = StackMgr.pop(); |
1450 | 0 | return runVectorMinOp<int16_t>(StackMgr.getTop(), Rhs); |
1451 | 0 | } |
1452 | 0 | case OpCode::I16x8__min_u: { |
1453 | 0 | ValVariant Rhs = StackMgr.pop(); |
1454 | 0 | return runVectorMinOp<uint16_t>(StackMgr.getTop(), Rhs); |
1455 | 0 | } |
1456 | 0 | case OpCode::I16x8__max_s: { |
1457 | 0 | ValVariant Rhs = StackMgr.pop(); |
1458 | 0 | return runVectorMaxOp<int16_t>(StackMgr.getTop(), Rhs); |
1459 | 0 | } |
1460 | 0 | case OpCode::I16x8__max_u: { |
1461 | 0 | ValVariant Rhs = StackMgr.pop(); |
1462 | 0 | return runVectorMaxOp<uint16_t>(StackMgr.getTop(), Rhs); |
1463 | 0 | } |
1464 | 0 | case OpCode::I16x8__avgr_u: { |
1465 | 0 | ValVariant Rhs = StackMgr.pop(); |
1466 | 0 | return runVectorAvgrOp<uint16_t, uint32_t>(StackMgr.getTop(), Rhs); |
1467 | 0 | } |
1468 | 0 | case OpCode::I16x8__extmul_low_i8x16_s: { |
1469 | 0 | ValVariant Rhs = StackMgr.pop(); |
1470 | 0 | return runVectorExtMulLowOp<int8_t, int16_t>(StackMgr.getTop(), Rhs); |
1471 | 0 | } |
1472 | 0 | case OpCode::I16x8__extmul_high_i8x16_s: { |
1473 | 0 | ValVariant Rhs = StackMgr.pop(); |
1474 | 0 | return runVectorExtMulHighOp<int8_t, int16_t>(StackMgr.getTop(), Rhs); |
1475 | 0 | } |
1476 | 0 | case OpCode::I16x8__extmul_low_i8x16_u: { |
1477 | 0 | ValVariant Rhs = StackMgr.pop(); |
1478 | 0 | return runVectorExtMulLowOp<uint8_t, uint16_t>(StackMgr.getTop(), Rhs); |
1479 | 0 | } |
1480 | 0 | case OpCode::I16x8__extmul_high_i8x16_u: { |
1481 | 0 | ValVariant Rhs = StackMgr.pop(); |
1482 | 0 | return runVectorExtMulHighOp<uint8_t, uint16_t>(StackMgr.getTop(), Rhs); |
1483 | 0 | } |
1484 | 0 | case OpCode::I16x8__q15mulr_sat_s: { |
1485 | 0 | ValVariant Rhs = StackMgr.pop(); |
1486 | 0 | return runVectorQ15MulSatOp(StackMgr.getTop(), Rhs); |
1487 | 0 | } |
1488 | 0 | case OpCode::I16x8__extadd_pairwise_i8x16_s: |
1489 | 0 | return runVectorExtAddPairwiseOp<int8_t, int16_t>(StackMgr.getTop()); |
1490 | 0 | case OpCode::I16x8__extadd_pairwise_i8x16_u: |
1491 | 0 | return runVectorExtAddPairwiseOp<uint8_t, uint16_t>(StackMgr.getTop()); |
1492 | 0 | case OpCode::I32x4__abs: |
1493 | 0 | return runVectorAbsOp<int32_t>(StackMgr.getTop()); |
1494 | 0 | case OpCode::I32x4__neg: |
1495 | 0 | return runVectorNegOp<int32_t>(StackMgr.getTop()); |
1496 | 0 | case OpCode::I32x4__all_true: |
1497 | 0 | return runVectorAllTrueOp<uint32_t>(StackMgr.getTop()); |
1498 | 0 | case OpCode::I32x4__bitmask: |
1499 | 0 | return runVectorBitMaskOp<uint32_t>(StackMgr.getTop()); |
1500 | 0 | case OpCode::I32x4__extend_low_i16x8_s: |
1501 | 0 | return runVectorExtendLowOp<int16_t, int32_t>(StackMgr.getTop()); |
1502 | 0 | case OpCode::I32x4__extend_high_i16x8_s: |
1503 | 0 | return runVectorExtendHighOp<int16_t, int32_t>(StackMgr.getTop()); |
1504 | 0 | case OpCode::I32x4__extend_low_i16x8_u: |
1505 | 0 | return runVectorExtendLowOp<uint16_t, uint32_t>(StackMgr.getTop()); |
1506 | 0 | case OpCode::I32x4__extend_high_i16x8_u: |
1507 | 0 | return runVectorExtendHighOp<uint16_t, uint32_t>(StackMgr.getTop()); |
1508 | 0 | case OpCode::I32x4__shl: { |
1509 | 0 | ValVariant Rhs = StackMgr.pop(); |
1510 | 0 | return runVectorShlOp<uint32_t>(StackMgr.getTop(), Rhs); |
1511 | 0 | } |
1512 | 0 | case OpCode::I32x4__shr_s: { |
1513 | 0 | ValVariant Rhs = StackMgr.pop(); |
1514 | 0 | return runVectorShrOp<int32_t>(StackMgr.getTop(), Rhs); |
1515 | 0 | } |
1516 | 0 | case OpCode::I32x4__shr_u: { |
1517 | 0 | ValVariant Rhs = StackMgr.pop(); |
1518 | 0 | return runVectorShrOp<uint32_t>(StackMgr.getTop(), Rhs); |
1519 | 0 | } |
1520 | 0 | case OpCode::I32x4__add: { |
1521 | 0 | ValVariant Rhs = StackMgr.pop(); |
1522 | 0 | return runVectorAddOp<uint32_t>(StackMgr.getTop(), Rhs); |
1523 | 0 | } |
1524 | 0 | case OpCode::I32x4__sub: { |
1525 | 0 | ValVariant Rhs = StackMgr.pop(); |
1526 | 0 | return runVectorSubOp<uint32_t>(StackMgr.getTop(), Rhs); |
1527 | 0 | } |
1528 | 0 | case OpCode::I32x4__mul: { |
1529 | 0 | ValVariant Rhs = StackMgr.pop(); |
1530 | 0 | return runVectorMulOp<uint32_t>(StackMgr.getTop(), Rhs); |
1531 | 0 | } |
1532 | 0 | case OpCode::I32x4__min_s: { |
1533 | 0 | ValVariant Rhs = StackMgr.pop(); |
1534 | 0 | return runVectorMinOp<int32_t>(StackMgr.getTop(), Rhs); |
1535 | 0 | } |
1536 | 0 | case OpCode::I32x4__min_u: { |
1537 | 0 | ValVariant Rhs = StackMgr.pop(); |
1538 | 0 | return runVectorMinOp<uint32_t>(StackMgr.getTop(), Rhs); |
1539 | 0 | } |
1540 | 0 | case OpCode::I32x4__max_s: { |
1541 | 0 | ValVariant Rhs = StackMgr.pop(); |
1542 | 0 | return runVectorMaxOp<int32_t>(StackMgr.getTop(), Rhs); |
1543 | 0 | } |
1544 | 0 | case OpCode::I32x4__max_u: { |
1545 | 0 | ValVariant Rhs = StackMgr.pop(); |
1546 | 0 | return runVectorMaxOp<uint32_t>(StackMgr.getTop(), Rhs); |
1547 | 0 | } |
1548 | 0 | case OpCode::I32x4__extmul_low_i16x8_s: { |
1549 | 0 | ValVariant Rhs = StackMgr.pop(); |
1550 | 0 | return runVectorExtMulLowOp<int16_t, int32_t>(StackMgr.getTop(), Rhs); |
1551 | 0 | } |
1552 | 0 | case OpCode::I32x4__extmul_high_i16x8_s: { |
1553 | 0 | ValVariant Rhs = StackMgr.pop(); |
1554 | 0 | return runVectorExtMulHighOp<int16_t, int32_t>(StackMgr.getTop(), Rhs); |
1555 | 0 | } |
1556 | 0 | case OpCode::I32x4__extmul_low_i16x8_u: { |
1557 | 0 | ValVariant Rhs = StackMgr.pop(); |
1558 | 0 | return runVectorExtMulLowOp<uint16_t, uint32_t>(StackMgr.getTop(), Rhs); |
1559 | 0 | } |
1560 | 0 | case OpCode::I32x4__extmul_high_i16x8_u: { |
1561 | 0 | ValVariant Rhs = StackMgr.pop(); |
1562 | 0 | return runVectorExtMulHighOp<uint16_t, uint32_t>(StackMgr.getTop(), Rhs); |
1563 | 0 | } |
1564 | 0 | case OpCode::I32x4__extadd_pairwise_i16x8_s: |
1565 | 0 | return runVectorExtAddPairwiseOp<int16_t, int32_t>(StackMgr.getTop()); |
1566 | 0 | case OpCode::I32x4__extadd_pairwise_i16x8_u: |
1567 | 0 | return runVectorExtAddPairwiseOp<uint16_t, uint32_t>(StackMgr.getTop()); |
1568 | 0 | case OpCode::I64x2__abs: |
1569 | 0 | return runVectorAbsOp<int64_t>(StackMgr.getTop()); |
1570 | 0 | case OpCode::I64x2__neg: |
1571 | 0 | return runVectorNegOp<int64_t>(StackMgr.getTop()); |
1572 | 0 | case OpCode::I64x2__all_true: |
1573 | 0 | return runVectorAllTrueOp<uint64_t>(StackMgr.getTop()); |
1574 | 0 | case OpCode::I64x2__bitmask: |
1575 | 0 | return runVectorBitMaskOp<uint64_t>(StackMgr.getTop()); |
1576 | 0 | case OpCode::I64x2__extend_low_i32x4_s: |
1577 | 0 | return runVectorExtendLowOp<int32_t, int64_t>(StackMgr.getTop()); |
1578 | 0 | case OpCode::I64x2__extend_high_i32x4_s: |
1579 | 0 | return runVectorExtendHighOp<int32_t, int64_t>(StackMgr.getTop()); |
1580 | 0 | case OpCode::I64x2__extend_low_i32x4_u: |
1581 | 0 | return runVectorExtendLowOp<uint32_t, uint64_t>(StackMgr.getTop()); |
1582 | 0 | case OpCode::I64x2__extend_high_i32x4_u: |
1583 | 0 | return runVectorExtendHighOp<uint32_t, uint64_t>(StackMgr.getTop()); |
1584 | 0 | case OpCode::I64x2__shl: { |
1585 | 0 | ValVariant Rhs = StackMgr.pop(); |
1586 | 0 | return runVectorShlOp<uint64_t>(StackMgr.getTop(), Rhs); |
1587 | 0 | } |
1588 | 0 | case OpCode::I64x2__shr_s: { |
1589 | 0 | ValVariant Rhs = StackMgr.pop(); |
1590 | 0 | return runVectorShrOp<int64_t>(StackMgr.getTop(), Rhs); |
1591 | 0 | } |
1592 | 0 | case OpCode::I64x2__shr_u: { |
1593 | 0 | ValVariant Rhs = StackMgr.pop(); |
1594 | 0 | return runVectorShrOp<uint64_t>(StackMgr.getTop(), Rhs); |
1595 | 0 | } |
1596 | 0 | case OpCode::I64x2__add: { |
1597 | 0 | ValVariant Rhs = StackMgr.pop(); |
1598 | 0 | return runVectorAddOp<uint64_t>(StackMgr.getTop(), Rhs); |
1599 | 0 | } |
1600 | 0 | case OpCode::I64x2__sub: { |
1601 | 0 | ValVariant Rhs = StackMgr.pop(); |
1602 | 0 | return runVectorSubOp<uint64_t>(StackMgr.getTop(), Rhs); |
1603 | 0 | } |
1604 | 0 | case OpCode::I64x2__mul: { |
1605 | 0 | ValVariant Rhs = StackMgr.pop(); |
1606 | 0 | return runVectorMulOp<uint64_t>(StackMgr.getTop(), Rhs); |
1607 | 0 | } |
1608 | 0 | case OpCode::I64x2__extmul_low_i32x4_s: { |
1609 | 0 | ValVariant Rhs = StackMgr.pop(); |
1610 | 0 | return runVectorExtMulLowOp<int32_t, int64_t>(StackMgr.getTop(), Rhs); |
1611 | 0 | } |
1612 | 0 | case OpCode::I64x2__extmul_high_i32x4_s: { |
1613 | 0 | ValVariant Rhs = StackMgr.pop(); |
1614 | 0 | return runVectorExtMulHighOp<int32_t, int64_t>(StackMgr.getTop(), Rhs); |
1615 | 0 | } |
1616 | 0 | case OpCode::I64x2__extmul_low_i32x4_u: { |
1617 | 0 | ValVariant Rhs = StackMgr.pop(); |
1618 | 0 | return runVectorExtMulLowOp<uint32_t, uint64_t>(StackMgr.getTop(), Rhs); |
1619 | 0 | } |
1620 | 0 | case OpCode::I64x2__extmul_high_i32x4_u: { |
1621 | 0 | ValVariant Rhs = StackMgr.pop(); |
1622 | 0 | return runVectorExtMulHighOp<uint32_t, uint64_t>(StackMgr.getTop(), Rhs); |
1623 | 0 | } |
1624 | 0 | case OpCode::F32x4__abs: |
1625 | 0 | return runVectorAbsOp<float>(StackMgr.getTop()); |
1626 | 0 | case OpCode::F32x4__neg: |
1627 | 0 | return runVectorNegOp<float>(StackMgr.getTop()); |
1628 | 0 | case OpCode::F32x4__sqrt: |
1629 | 0 | return runVectorSqrtOp<float>(StackMgr.getTop()); |
1630 | 0 | case OpCode::F32x4__add: { |
1631 | 0 | ValVariant Rhs = StackMgr.pop(); |
1632 | 0 | return runVectorAddOp<float>(StackMgr.getTop(), Rhs); |
1633 | 0 | } |
1634 | 0 | case OpCode::F32x4__sub: { |
1635 | 0 | ValVariant Rhs = StackMgr.pop(); |
1636 | 0 | return runVectorSubOp<float>(StackMgr.getTop(), Rhs); |
1637 | 0 | } |
1638 | 0 | case OpCode::F32x4__mul: { |
1639 | 0 | ValVariant Rhs = StackMgr.pop(); |
1640 | 0 | return runVectorMulOp<float>(StackMgr.getTop(), Rhs); |
1641 | 0 | } |
1642 | 0 | case OpCode::F32x4__div: { |
1643 | 0 | ValVariant Rhs = StackMgr.pop(); |
1644 | 0 | return runVectorDivOp<float>(StackMgr.getTop(), Rhs); |
1645 | 0 | } |
1646 | 0 | case OpCode::F32x4__min: { |
1647 | 0 | ValVariant Rhs = StackMgr.pop(); |
1648 | 0 | return runVectorFMinOp<float>(StackMgr.getTop(), Rhs); |
1649 | 0 | } |
1650 | 0 | case OpCode::F32x4__max: { |
1651 | 0 | ValVariant Rhs = StackMgr.pop(); |
1652 | 0 | return runVectorFMaxOp<float>(StackMgr.getTop(), Rhs); |
1653 | 0 | } |
1654 | 0 | case OpCode::F32x4__pmin: { |
1655 | 0 | ValVariant Rhs = StackMgr.pop(); |
1656 | 0 | return runVectorMinOp<float>(StackMgr.getTop(), Rhs); |
1657 | 0 | } |
1658 | 0 | case OpCode::F32x4__pmax: { |
1659 | 0 | ValVariant Rhs = StackMgr.pop(); |
1660 | 0 | return runVectorMaxOp<float>(StackMgr.getTop(), Rhs); |
1661 | 0 | } |
1662 | 0 | case OpCode::F64x2__abs: |
1663 | 0 | return runVectorAbsOp<double>(StackMgr.getTop()); |
1664 | 0 | case OpCode::F64x2__neg: |
1665 | 0 | return runVectorNegOp<double>(StackMgr.getTop()); |
1666 | 0 | case OpCode::F64x2__sqrt: |
1667 | 0 | return runVectorSqrtOp<double>(StackMgr.getTop()); |
1668 | 0 | case OpCode::F64x2__add: { |
1669 | 0 | ValVariant Rhs = StackMgr.pop(); |
1670 | 0 | return runVectorAddOp<double>(StackMgr.getTop(), Rhs); |
1671 | 0 | } |
1672 | 0 | case OpCode::F64x2__sub: { |
1673 | 0 | ValVariant Rhs = StackMgr.pop(); |
1674 | 0 | return runVectorSubOp<double>(StackMgr.getTop(), Rhs); |
1675 | 0 | } |
1676 | 0 | case OpCode::F64x2__mul: { |
1677 | 0 | ValVariant Rhs = StackMgr.pop(); |
1678 | 0 | return runVectorMulOp<double>(StackMgr.getTop(), Rhs); |
1679 | 0 | } |
1680 | 0 | case OpCode::F64x2__div: { |
1681 | 0 | ValVariant Rhs = StackMgr.pop(); |
1682 | 0 | return runVectorDivOp<double>(StackMgr.getTop(), Rhs); |
1683 | 0 | } |
1684 | 0 | case OpCode::F64x2__min: { |
1685 | 0 | ValVariant Rhs = StackMgr.pop(); |
1686 | 0 | return runVectorFMinOp<double>(StackMgr.getTop(), Rhs); |
1687 | 0 | } |
1688 | 0 | case OpCode::F64x2__max: { |
1689 | 0 | ValVariant Rhs = StackMgr.pop(); |
1690 | 0 | return runVectorFMaxOp<double>(StackMgr.getTop(), Rhs); |
1691 | 0 | } |
1692 | 0 | case OpCode::F64x2__pmin: { |
1693 | 0 | ValVariant Rhs = StackMgr.pop(); |
1694 | 0 | return runVectorMinOp<double>(StackMgr.getTop(), Rhs); |
1695 | 0 | } |
1696 | 0 | case OpCode::F64x2__pmax: { |
1697 | 0 | ValVariant Rhs = StackMgr.pop(); |
1698 | 0 | return runVectorMaxOp<double>(StackMgr.getTop(), Rhs); |
1699 | 0 | } |
1700 | 0 | case OpCode::I32x4__trunc_sat_f32x4_s: |
1701 | 0 | return runVectorTruncSatOp<float, int32_t>(StackMgr.getTop()); |
1702 | 0 | case OpCode::I32x4__trunc_sat_f32x4_u: |
1703 | 0 | return runVectorTruncSatOp<float, uint32_t>(StackMgr.getTop()); |
1704 | 0 | case OpCode::F32x4__convert_i32x4_s: |
1705 | 0 | return runVectorConvertOp<int32_t, float>(StackMgr.getTop()); |
1706 | 0 | case OpCode::F32x4__convert_i32x4_u: |
1707 | 0 | return runVectorConvertOp<uint32_t, float>(StackMgr.getTop()); |
1708 | 0 | case OpCode::I32x4__trunc_sat_f64x2_s_zero: |
1709 | 0 | return runVectorTruncSatOp<double, int32_t>(StackMgr.getTop()); |
1710 | 0 | case OpCode::I32x4__trunc_sat_f64x2_u_zero: |
1711 | 0 | return runVectorTruncSatOp<double, uint32_t>(StackMgr.getTop()); |
1712 | 0 | case OpCode::F64x2__convert_low_i32x4_s: |
1713 | 0 | return runVectorConvertOp<int32_t, double>(StackMgr.getTop()); |
1714 | 0 | case OpCode::F64x2__convert_low_i32x4_u: |
1715 | 0 | return runVectorConvertOp<uint32_t, double>(StackMgr.getTop()); |
1716 | 0 | case OpCode::F32x4__demote_f64x2_zero: |
1717 | 0 | return runVectorDemoteOp(StackMgr.getTop()); |
1718 | 0 | case OpCode::F64x2__promote_low_f32x4: |
1719 | 0 | return runVectorPromoteOp(StackMgr.getTop()); |
1720 | | |
1721 | | #if defined(_MSC_VER) && !defined(__clang__) // MSVC |
1722 | | case OpCode::I32x4__dot_i16x8_s: { |
1723 | | using int32x8_t = SIMDArray<int32_t, 32>; |
1724 | | const ValVariant Val2 = StackMgr.pop(); |
1725 | | ValVariant &Val1 = StackMgr.getTop(); |
1726 | | |
1727 | | auto &V2 = Val2.get<int16x8_t>(); |
1728 | | auto &V1 = Val1.get<int16x8_t>(); |
1729 | | int32x8_t M; |
1730 | | |
1731 | | for (size_t I = 0; I < 8; ++I) { |
1732 | | M[I] = V1[I] * V2[I]; |
1733 | | } |
1734 | | |
1735 | | int32x4_t Result; |
1736 | | for (size_t I = 0; I < 4; ++I) { |
1737 | | Result[I] = M[I * 2] + M[I * 2 + 1]; |
1738 | | } |
1739 | | Val1.emplace<int32x4_t>(Result); |
1740 | | return {}; |
1741 | | } |
1742 | | #else |
1743 | 0 | case OpCode::I32x4__dot_i16x8_s: { |
1744 | 0 | using int32x8_t [[gnu::vector_size(32)]] = int32_t; |
1745 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1746 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1747 | |
|
1748 | 0 | auto &V2 = Val2.get<int16x8_t>(); |
1749 | 0 | auto &V1 = Val1.get<int16x8_t>(); |
1750 | 0 | const auto M = __builtin_convertvector(V1, int32x8_t) * |
1751 | 0 | __builtin_convertvector(V2, int32x8_t); |
1752 | 0 | const int32x4_t L = {M[0], M[2], M[4], M[6]}; |
1753 | 0 | const int32x4_t R = {M[1], M[3], M[5], M[7]}; |
1754 | 0 | Val1.emplace<int32x4_t>(L + R); |
1755 | |
|
1756 | 0 | return {}; |
1757 | 0 | } |
1758 | 0 | #endif // MSVC |
1759 | 0 | case OpCode::F32x4__ceil: |
1760 | 0 | return runVectorCeilOp<float>(StackMgr.getTop()); |
1761 | 0 | case OpCode::F32x4__floor: |
1762 | 0 | return runVectorFloorOp<float>(StackMgr.getTop()); |
1763 | 0 | case OpCode::F32x4__trunc: |
1764 | 0 | return runVectorTruncOp<float>(StackMgr.getTop()); |
1765 | 0 | case OpCode::F32x4__nearest: |
1766 | 0 | return runVectorNearestOp<float>(StackMgr.getTop()); |
1767 | 0 | case OpCode::F64x2__ceil: |
1768 | 0 | return runVectorCeilOp<double>(StackMgr.getTop()); |
1769 | 0 | case OpCode::F64x2__floor: |
1770 | 0 | return runVectorFloorOp<double>(StackMgr.getTop()); |
1771 | 0 | case OpCode::F64x2__trunc: |
1772 | 0 | return runVectorTruncOp<double>(StackMgr.getTop()); |
1773 | 0 | case OpCode::F64x2__nearest: |
1774 | 0 | return runVectorNearestOp<double>(StackMgr.getTop()); |
1775 | | |
1776 | | // Relaxed SIMD Instructions |
1777 | 0 | case OpCode::I8x16__relaxed_swizzle: { |
1778 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1779 | 0 | ValVariant &Val1 = StackMgr.getTop(); |
1780 | 0 | const uint8x16_t &Index = Val2.get<uint8x16_t>(); |
1781 | 0 | uint8x16_t &Vector = Val1.get<uint8x16_t>(); |
1782 | 0 | uint8x16_t Result{}; |
1783 | 0 | for (size_t I = 0; I < 16; ++I) { |
1784 | 0 | const uint8_t SwizzleIndex = Index[I]; |
1785 | 0 | if (SwizzleIndex < 16) { |
1786 | 0 | Result[I] = Vector[SwizzleIndex]; |
1787 | 0 | } else { |
1788 | 0 | Result[I] = 0; |
1789 | 0 | } |
1790 | 0 | } |
1791 | 0 | Vector = Result; |
1792 | 0 | return {}; |
1793 | 0 | } |
1794 | 0 | case OpCode::I32x4__relaxed_trunc_f32x4_s: |
1795 | 0 | return runVectorTruncSatOp<float, int32_t>(StackMgr.getTop()); |
1796 | 0 | case OpCode::I32x4__relaxed_trunc_f32x4_u: |
1797 | 0 | return runVectorTruncSatOp<float, uint32_t>(StackMgr.getTop()); |
1798 | 0 | case OpCode::I32x4__relaxed_trunc_f64x2_s_zero: |
1799 | 0 | return runVectorTruncSatOp<double, int32_t>(StackMgr.getTop()); |
1800 | 0 | case OpCode::I32x4__relaxed_trunc_f64x2_u_zero: |
1801 | 0 | return runVectorTruncSatOp<double, uint32_t>(StackMgr.getTop()); |
1802 | 0 | case OpCode::F32x4__relaxed_madd: { |
1803 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1804 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1805 | 0 | runVectorMulOp<float>(StackMgr.getTop(), Val2); |
1806 | 0 | return runVectorAddOp<float>(StackMgr.getTop(), Val3); |
1807 | 0 | } |
1808 | 0 | case OpCode::F32x4__relaxed_nmadd: { |
1809 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1810 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1811 | 0 | runVectorNegOp<float>(StackMgr.getTop()); |
1812 | 0 | runVectorMulOp<float>(StackMgr.getTop(), Val2); |
1813 | 0 | return runVectorAddOp<float>(StackMgr.getTop(), Val3); |
1814 | 0 | } |
1815 | 0 | case OpCode::F64x2__relaxed_madd: { |
1816 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1817 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1818 | 0 | runVectorMulOp<double>(StackMgr.getTop(), Val2); |
1819 | 0 | return runVectorAddOp<double>(StackMgr.getTop(), Val3); |
1820 | 0 | } |
1821 | 0 | case OpCode::F64x2__relaxed_nmadd: { |
1822 | 0 | const ValVariant Val3 = StackMgr.pop(); |
1823 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1824 | 0 | runVectorMulOp<double>(StackMgr.getTop(), Val2); |
1825 | 0 | runVectorNegOp<double>(StackMgr.getTop()); |
1826 | 0 | return runVectorAddOp<double>(StackMgr.getTop(), Val3); |
1827 | 0 | } |
1828 | 0 | case OpCode::I8x16__relaxed_laneselect: { |
1829 | 0 | const ValVariant Mask = StackMgr.pop(); |
1830 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1831 | 0 | return runVectorRelaxedLaneselectOp<uint8_t>(StackMgr.getTop(), Val2, |
1832 | 0 | Mask); |
1833 | 0 | } |
1834 | 0 | case OpCode::I16x8__relaxed_laneselect: { |
1835 | 0 | const ValVariant Mask = StackMgr.pop(); |
1836 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1837 | 0 | return runVectorRelaxedLaneselectOp<uint16_t>(StackMgr.getTop(), Val2, |
1838 | 0 | Mask); |
1839 | 0 | } |
1840 | 0 | case OpCode::I32x4__relaxed_laneselect: { |
1841 | 0 | const ValVariant Mask = StackMgr.pop(); |
1842 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1843 | 0 | return runVectorRelaxedLaneselectOp<uint32_t>(StackMgr.getTop(), Val2, |
1844 | 0 | Mask); |
1845 | 0 | } |
1846 | 0 | case OpCode::I64x2__relaxed_laneselect: { |
1847 | 0 | const ValVariant Mask = StackMgr.pop(); |
1848 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1849 | 0 | return runVectorRelaxedLaneselectOp<uint64_t>(StackMgr.getTop(), Val2, |
1850 | 0 | Mask); |
1851 | 0 | } |
1852 | 0 | case OpCode::F32x4__relaxed_min: { |
1853 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1854 | 0 | return runVectorFMinOp<float>(StackMgr.getTop(), Val2); |
1855 | 0 | } |
1856 | 0 | case OpCode::F32x4__relaxed_max: { |
1857 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1858 | 0 | return runVectorFMaxOp<float>(StackMgr.getTop(), Val2); |
1859 | 0 | } |
1860 | 0 | case OpCode::F64x2__relaxed_min: { |
1861 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1862 | 0 | return runVectorFMinOp<double>(StackMgr.getTop(), Val2); |
1863 | 0 | } |
1864 | 0 | case OpCode::F64x2__relaxed_max: { |
1865 | 0 | const ValVariant Val2 = StackMgr.pop(); |
1866 | 0 | return runVectorFMaxOp<double>(StackMgr.getTop(), Val2); |
1867 | 0 | } |
1868 | 0 | case OpCode::I16x8__relaxed_q15mulr_s: { |
1869 | 0 | ValVariant Rhs = StackMgr.pop(); |
1870 | 0 | return runVectorQ15MulSatOp(StackMgr.getTop(), Rhs); |
1871 | 0 | } |
1872 | 0 | case OpCode::I16x8__relaxed_dot_i8x16_i7x16_s: { |
1873 | 0 | ValVariant Rhs = StackMgr.pop(); |
1874 | 0 | return runVectorRelaxedIntegerDotProductOp(StackMgr.getTop(), Rhs); |
1875 | 0 | } |
1876 | 0 | case OpCode::I32x4__relaxed_dot_i8x16_i7x16_add_s: { |
1877 | 0 | ValVariant C = StackMgr.pop(); |
1878 | 0 | ValVariant Rhs = StackMgr.pop(); |
1879 | 0 | return runVectorRelaxedIntegerDotProductOpAdd(StackMgr.getTop(), Rhs, C); |
1880 | 0 | } |
1881 | | |
1882 | | // Atomic Instructions |
1883 | 0 | case OpCode::Atomic__fence: |
1884 | 0 | return runMemoryFenceOp(); |
1885 | 0 | case OpCode::Memory__atomic__notify: |
1886 | 0 | return runAtomicNotifyOp( |
1887 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1888 | 0 | case OpCode::Memory__atomic__wait32: |
1889 | 0 | return runAtomicWaitOp<int32_t>( |
1890 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1891 | 0 | case OpCode::Memory__atomic__wait64: |
1892 | 0 | return runAtomicWaitOp<int64_t>( |
1893 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1894 | 0 | case OpCode::I32__atomic__load: |
1895 | 0 | return runAtomicLoadOp<int32_t, uint32_t>( |
1896 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1897 | 0 | case OpCode::I64__atomic__load: |
1898 | 0 | return runAtomicLoadOp<int64_t, uint64_t>( |
1899 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1900 | 0 | case OpCode::I32__atomic__load8_u: |
1901 | 0 | return runAtomicLoadOp<uint32_t, uint8_t>( |
1902 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1903 | 0 | case OpCode::I32__atomic__load16_u: |
1904 | 0 | return runAtomicLoadOp<uint32_t, uint16_t>( |
1905 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1906 | 0 | case OpCode::I64__atomic__load8_u: |
1907 | 0 | return runAtomicLoadOp<uint64_t, uint8_t>( |
1908 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1909 | 0 | case OpCode::I64__atomic__load16_u: |
1910 | 0 | return runAtomicLoadOp<uint64_t, uint16_t>( |
1911 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1912 | 0 | case OpCode::I64__atomic__load32_u: |
1913 | 0 | return runAtomicLoadOp<uint64_t, uint32_t>( |
1914 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1915 | 0 | case OpCode::I32__atomic__store: |
1916 | 0 | return runAtomicStoreOp<int32_t, uint32_t>( |
1917 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1918 | 0 | case OpCode::I64__atomic__store: |
1919 | 0 | return runAtomicStoreOp<int64_t, uint64_t>( |
1920 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1921 | 0 | case OpCode::I32__atomic__store8: |
1922 | 0 | return runAtomicStoreOp<uint32_t, uint8_t>( |
1923 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1924 | 0 | case OpCode::I32__atomic__store16: |
1925 | 0 | return runAtomicStoreOp<uint32_t, uint16_t>( |
1926 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1927 | 0 | case OpCode::I64__atomic__store8: |
1928 | 0 | return runAtomicStoreOp<uint64_t, uint8_t>( |
1929 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1930 | 0 | case OpCode::I64__atomic__store16: |
1931 | 0 | return runAtomicStoreOp<uint64_t, uint16_t>( |
1932 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1933 | 0 | case OpCode::I64__atomic__store32: |
1934 | 0 | return runAtomicStoreOp<uint64_t, uint32_t>( |
1935 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1936 | 0 | case OpCode::I32__atomic__rmw__add: |
1937 | 0 | return runAtomicAddOp<int32_t, uint32_t>( |
1938 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1939 | 0 | case OpCode::I64__atomic__rmw__add: |
1940 | 0 | return runAtomicAddOp<int64_t, uint64_t>( |
1941 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1942 | 0 | case OpCode::I32__atomic__rmw8__add_u: |
1943 | 0 | return runAtomicAddOp<uint32_t, uint8_t>( |
1944 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1945 | 0 | case OpCode::I32__atomic__rmw16__add_u: |
1946 | 0 | return runAtomicAddOp<uint32_t, uint16_t>( |
1947 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1948 | 0 | case OpCode::I64__atomic__rmw8__add_u: |
1949 | 0 | return runAtomicAddOp<uint64_t, uint8_t>( |
1950 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1951 | 0 | case OpCode::I64__atomic__rmw16__add_u: |
1952 | 0 | return runAtomicAddOp<uint64_t, uint16_t>( |
1953 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1954 | 0 | case OpCode::I64__atomic__rmw32__add_u: |
1955 | 0 | return runAtomicAddOp<uint64_t, uint32_t>( |
1956 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1957 | 0 | case OpCode::I32__atomic__rmw__sub: |
1958 | 0 | return runAtomicSubOp<int32_t, uint32_t>( |
1959 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1960 | 0 | case OpCode::I64__atomic__rmw__sub: |
1961 | 0 | return runAtomicSubOp<int64_t, uint64_t>( |
1962 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1963 | 0 | case OpCode::I32__atomic__rmw8__sub_u: |
1964 | 0 | return runAtomicSubOp<uint32_t, uint8_t>( |
1965 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1966 | 0 | case OpCode::I32__atomic__rmw16__sub_u: |
1967 | 0 | return runAtomicSubOp<uint32_t, uint16_t>( |
1968 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1969 | 0 | case OpCode::I64__atomic__rmw8__sub_u: |
1970 | 0 | return runAtomicSubOp<uint64_t, uint8_t>( |
1971 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1972 | 0 | case OpCode::I64__atomic__rmw16__sub_u: |
1973 | 0 | return runAtomicSubOp<uint64_t, uint16_t>( |
1974 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1975 | 0 | case OpCode::I64__atomic__rmw32__sub_u: |
1976 | 0 | return runAtomicSubOp<uint64_t, uint32_t>( |
1977 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1978 | 0 | case OpCode::I32__atomic__rmw__and: |
1979 | 0 | return runAtomicAndOp<int32_t, uint32_t>( |
1980 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1981 | 0 | case OpCode::I64__atomic__rmw__and: |
1982 | 0 | return runAtomicAndOp<int64_t, uint64_t>( |
1983 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1984 | 0 | case OpCode::I32__atomic__rmw8__and_u: |
1985 | 0 | return runAtomicAndOp<uint32_t, uint8_t>( |
1986 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1987 | 0 | case OpCode::I32__atomic__rmw16__and_u: |
1988 | 0 | return runAtomicAndOp<uint32_t, uint16_t>( |
1989 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1990 | 0 | case OpCode::I64__atomic__rmw8__and_u: |
1991 | 0 | return runAtomicAndOp<uint64_t, uint8_t>( |
1992 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1993 | 0 | case OpCode::I64__atomic__rmw16__and_u: |
1994 | 0 | return runAtomicAndOp<uint64_t, uint16_t>( |
1995 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1996 | 0 | case OpCode::I64__atomic__rmw32__and_u: |
1997 | 0 | return runAtomicAndOp<uint64_t, uint32_t>( |
1998 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
1999 | 0 | case OpCode::I32__atomic__rmw__or: |
2000 | 0 | return runAtomicOrOp<int32_t, uint32_t>( |
2001 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2002 | 0 | case OpCode::I64__atomic__rmw__or: |
2003 | 0 | return runAtomicOrOp<int64_t, uint64_t>( |
2004 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2005 | 0 | case OpCode::I32__atomic__rmw8__or_u: |
2006 | 0 | return runAtomicOrOp<uint32_t, uint8_t>( |
2007 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2008 | 0 | case OpCode::I32__atomic__rmw16__or_u: |
2009 | 0 | return runAtomicOrOp<uint32_t, uint16_t>( |
2010 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2011 | 0 | case OpCode::I64__atomic__rmw8__or_u: |
2012 | 0 | return runAtomicOrOp<uint64_t, uint8_t>( |
2013 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2014 | 0 | case OpCode::I64__atomic__rmw16__or_u: |
2015 | 0 | return runAtomicOrOp<uint64_t, uint16_t>( |
2016 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2017 | 0 | case OpCode::I64__atomic__rmw32__or_u: |
2018 | 0 | return runAtomicOrOp<uint64_t, uint32_t>( |
2019 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2020 | 0 | case OpCode::I32__atomic__rmw__xor: |
2021 | 0 | return runAtomicXorOp<int32_t, uint32_t>( |
2022 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2023 | 0 | case OpCode::I64__atomic__rmw__xor: |
2024 | 0 | return runAtomicXorOp<int64_t, uint64_t>( |
2025 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2026 | 0 | case OpCode::I32__atomic__rmw8__xor_u: |
2027 | 0 | return runAtomicXorOp<uint32_t, uint8_t>( |
2028 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2029 | 0 | case OpCode::I32__atomic__rmw16__xor_u: |
2030 | 0 | return runAtomicXorOp<uint32_t, uint16_t>( |
2031 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2032 | 0 | case OpCode::I64__atomic__rmw8__xor_u: |
2033 | 0 | return runAtomicXorOp<uint64_t, uint8_t>( |
2034 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2035 | 0 | case OpCode::I64__atomic__rmw16__xor_u: |
2036 | 0 | return runAtomicXorOp<uint64_t, uint16_t>( |
2037 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2038 | 0 | case OpCode::I64__atomic__rmw32__xor_u: |
2039 | 0 | return runAtomicXorOp<uint64_t, uint32_t>( |
2040 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2041 | 0 | case OpCode::I32__atomic__rmw__xchg: |
2042 | 0 | return runAtomicExchangeOp<int32_t, uint32_t>( |
2043 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2044 | 0 | case OpCode::I64__atomic__rmw__xchg: |
2045 | 0 | return runAtomicExchangeOp<int64_t, uint64_t>( |
2046 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2047 | 0 | case OpCode::I32__atomic__rmw8__xchg_u: |
2048 | 0 | return runAtomicExchangeOp<uint32_t, uint8_t>( |
2049 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2050 | 0 | case OpCode::I32__atomic__rmw16__xchg_u: |
2051 | 0 | return runAtomicExchangeOp<uint32_t, uint16_t>( |
2052 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2053 | 0 | case OpCode::I64__atomic__rmw8__xchg_u: |
2054 | 0 | return runAtomicExchangeOp<uint64_t, uint8_t>( |
2055 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2056 | 0 | case OpCode::I64__atomic__rmw16__xchg_u: |
2057 | 0 | return runAtomicExchangeOp<uint64_t, uint16_t>( |
2058 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2059 | 0 | case OpCode::I64__atomic__rmw32__xchg_u: |
2060 | 0 | return runAtomicExchangeOp<uint64_t, uint32_t>( |
2061 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2062 | 0 | case OpCode::I32__atomic__rmw__cmpxchg: |
2063 | 0 | return runAtomicCompareExchangeOp<int32_t, uint32_t>( |
2064 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2065 | 0 | case OpCode::I64__atomic__rmw__cmpxchg: |
2066 | 0 | return runAtomicCompareExchangeOp<int64_t, uint64_t>( |
2067 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2068 | 0 | case OpCode::I32__atomic__rmw8__cmpxchg_u: |
2069 | 0 | return runAtomicCompareExchangeOp<uint32_t, uint8_t>( |
2070 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2071 | 0 | case OpCode::I32__atomic__rmw16__cmpxchg_u: |
2072 | 0 | return runAtomicCompareExchangeOp<uint32_t, uint16_t>( |
2073 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2074 | 0 | case OpCode::I64__atomic__rmw8__cmpxchg_u: |
2075 | 0 | return runAtomicCompareExchangeOp<uint64_t, uint8_t>( |
2076 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2077 | 0 | case OpCode::I64__atomic__rmw16__cmpxchg_u: |
2078 | 0 | return runAtomicCompareExchangeOp<uint64_t, uint16_t>( |
2079 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2080 | 0 | case OpCode::I64__atomic__rmw32__cmpxchg_u: |
2081 | 0 | return runAtomicCompareExchangeOp<uint64_t, uint32_t>( |
2082 | 0 | StackMgr, *getMemInstByIdx(StackMgr, Instr.getTargetIndex()), Instr); |
2083 | | |
2084 | 0 | default: |
2085 | 0 | return {}; |
2086 | 0 | } |
2087 | 0 | }; |
2088 | |
|
2089 | 0 | while (PC != PCEnd) { |
2090 | 0 | if (Stat) { |
2091 | 0 | OpCode Code = PC->getOpCode(); |
2092 | 0 | if (Conf.getStatisticsConfigure().isInstructionCounting()) { |
2093 | 0 | Stat->incInstrCount(); |
2094 | 0 | } |
2095 | | // Add cost. Note: if-else case should be processed additionally. |
2096 | 0 | if (Conf.getStatisticsConfigure().isCostMeasuring()) { |
2097 | 0 | if (unlikely(!Stat->addInstrCost(Code))) { |
2098 | 0 | const AST::Instruction &Instr = *PC; |
2099 | 0 | spdlog::error( |
2100 | 0 | ErrInfo::InfoInstruction(Instr.getOpCode(), Instr.getOffset())); |
2101 | 0 | return Unexpect(ErrCode::Value::CostLimitExceeded); |
2102 | 0 | } |
2103 | 0 | } |
2104 | 0 | } |
2105 | 0 | EXPECTED_TRY(Dispatch().map_error([this, &StackMgr](auto E) { |
2106 | 0 | StackTraceSize = interpreterStackTrace(StackMgr, StackTrace).size(); |
2107 | 0 | if (Conf.getRuntimeConfigure().isEnableCoredump() && |
2108 | 0 | E.getErrCodePhase() == WasmPhase::Execution) { |
2109 | 0 | Coredump::generateCoredump( |
2110 | 0 | StackMgr, Conf.getRuntimeConfigure().isCoredumpWasmgdb()); |
2111 | 0 | } |
2112 | 0 | return E; |
2113 | 0 | })); |
2114 | 0 | PC++; |
2115 | 0 | } |
2116 | 0 | return {}; |
2117 | 0 | } |
2118 | | |
2119 | | } // namespace Executor |
2120 | | } // namespace WasmEdge |