Coverage Report

Created: 2024-10-16 07:58

/src/wasmer/lib/compiler-singlepass/src/machine.rs
Line
Count
Source (jump to first uncovered line)
1
use crate::common_decl::*;
2
use crate::location::{Location, Reg};
3
use crate::machine_arm64::MachineARM64;
4
use crate::machine_x64::MachineX86_64;
5
use crate::unwind::UnwindInstructions;
6
use dynasmrt::{AssemblyOffset, DynamicLabel};
7
use std::collections::BTreeMap;
8
use std::fmt::Debug;
9
pub use wasmer_compiler::wasmparser::MemArg;
10
use wasmer_compiler::wasmparser::ValType as WpType;
11
use wasmer_types::{
12
    Architecture, CallingConvention, CompileError, CustomSection, FunctionBody, FunctionIndex,
13
    FunctionType, InstructionAddressMap, Relocation, RelocationTarget, Target, TrapCode,
14
    TrapInformation, VMOffsets,
15
};
16
17
pub type Label = DynamicLabel;
18
pub type Offset = AssemblyOffset;
19
20
#[allow(dead_code)]
21
#[derive(Clone, PartialEq)]
22
pub enum Value {
23
    I8(i8),
24
    I32(i32),
25
    I64(i64),
26
    F32(f32),
27
    F64(f64),
28
}
29
30
#[macro_export]
31
macro_rules! codegen_error {
32
    ($($arg:tt)*) => {return Err(CompileError::Codegen(format!($($arg)*)))}
33
}
34
35
pub trait MaybeImmediate {
36
    fn imm_value(&self) -> Option<Value>;
37
0
    fn is_imm(&self) -> bool {
38
0
        self.imm_value().is_some()
39
0
    }
Unexecuted instantiation: <_ as wasmer_compiler_singlepass::machine::MaybeImmediate>::is_imm
Unexecuted instantiation: <_ as wasmer_compiler_singlepass::machine::MaybeImmediate>::is_imm
40
}
41
42
/// A trap table for a `RunnableModuleInfo`.
43
#[derive(Clone, Debug, Default)]
44
pub struct TrapTable {
45
    /// Mappings from offsets in generated machine code to the corresponding trap code.
46
    pub offset_to_code: BTreeMap<usize, TrapCode>,
47
}
48
49
// all machine seems to have a page this size, so not per arch for now
50
pub const NATIVE_PAGE_SIZE: usize = 4096;
51
52
pub struct MachineStackOffset(pub usize);
53
54
pub trait Machine {
55
    type GPR: Copy + Eq + Debug + Reg;
56
    type SIMD: Copy + Eq + Debug + Reg;
57
    /// Get current assembler offset
58
    fn assembler_get_offset(&self) -> Offset;
59
    /// Convert from a GPR register to index register
60
    fn index_from_gpr(&self, x: Self::GPR) -> RegisterIndex;
61
    /// Convert from an SIMD register
62
    fn index_from_simd(&self, x: Self::SIMD) -> RegisterIndex;
63
    /// Get the GPR that hold vmctx
64
    fn get_vmctx_reg(&self) -> Self::GPR;
65
    /// Picks an unused general purpose register for local/stack/argument use.
66
    ///
67
    /// This method does not mark the register as used
68
    fn pick_gpr(&self) -> Option<Self::GPR>;
69
    /// Picks an unused general purpose register for internal temporary use.
70
    ///
71
    /// This method does not mark the register as used
72
    fn pick_temp_gpr(&self) -> Option<Self::GPR>;
73
    /// Get all used GPR
74
    fn get_used_gprs(&self) -> Vec<Self::GPR>;
75
    /// Get all used SIMD regs
76
    fn get_used_simd(&self) -> Vec<Self::SIMD>;
77
    /// Picks an unused general pupose register and mark it as used
78
    fn acquire_temp_gpr(&mut self) -> Option<Self::GPR>;
79
    /// Releases a temporary GPR.
80
    fn release_gpr(&mut self, gpr: Self::GPR);
81
    /// Specify that a given register is in use.
82
    fn reserve_unused_temp_gpr(&mut self, gpr: Self::GPR) -> Self::GPR;
83
    /// reserve a GPR
84
    fn reserve_gpr(&mut self, gpr: Self::GPR);
85
    /// Push used gpr to the stack. Return the bytes taken on the stack
86
    fn push_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<usize, CompileError>;
87
    /// Pop used gpr to the stack
88
    fn pop_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<(), CompileError>;
89
    /// Picks an unused SIMD register.
90
    ///
91
    /// This method does not mark the register as used
92
    fn pick_simd(&self) -> Option<Self::SIMD>;
93
    /// Picks an unused SIMD register for internal temporary use.
94
    ///
95
    /// This method does not mark the register as used
96
    fn pick_temp_simd(&self) -> Option<Self::SIMD>;
97
    /// Acquires a temporary XMM register.
98
    fn acquire_temp_simd(&mut self) -> Option<Self::SIMD>;
99
    /// reserve a SIMD register
100
    fn reserve_simd(&mut self, simd: Self::SIMD);
101
    /// Releases a temporary XMM register.
102
    fn release_simd(&mut self, simd: Self::SIMD);
103
    /// Push used simd regs to the stack. Return bytes taken on the stack
104
    fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<usize, CompileError>;
105
    /// Pop used simd regs to the stack
106
    fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CompileError>;
107
    /// Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example)
108
    fn round_stack_adjust(&self, value: usize) -> usize;
109
    /// Set the source location of the Wasm to the given offset.
110
    fn set_srcloc(&mut self, offset: u32);
111
    /// Marks each address in the code range emitted by `f` with the trap code `code`.
112
    fn mark_address_range_with_trap_code(&mut self, code: TrapCode, begin: usize, end: usize);
113
    /// Marks one address as trappable with trap code `code`.
114
    fn mark_address_with_trap_code(&mut self, code: TrapCode);
115
    /// Marks the instruction as trappable with trap code `code`. return "begin" offset
116
    fn mark_instruction_with_trap_code(&mut self, code: TrapCode) -> usize;
117
    /// Pushes the instruction to the address map, calculating the offset from a
118
    /// provided beginning address.
119
    fn mark_instruction_address_end(&mut self, begin: usize);
120
    /// Insert a StackOverflow (at offset 0)
121
    fn insert_stackoverflow(&mut self);
122
    /// Get all current TrapInformation
123
    fn collect_trap_information(&self) -> Vec<TrapInformation>;
124
    // Get all intructions address map
125
    fn instructions_address_map(&self) -> Vec<InstructionAddressMap>;
126
    /// Memory location for a local on the stack
127
    /// Like Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)) for x86_64
128
    fn local_on_stack(&mut self, stack_offset: i32) -> Location<Self::GPR, Self::SIMD>;
129
    /// Adjust stack for locals
130
    /// Like assembler.emit_sub(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
131
    fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
132
    /// restore stack
133
    /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
134
    fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
135
    /// Pop stack of locals
136
    /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP))
137
    fn pop_stack_locals(&mut self, delta_stack_offset: u32) -> Result<(), CompileError>;
138
    /// Zero a location taht is 32bits
139
    fn zero_location(
140
        &mut self,
141
        size: Size,
142
        location: Location<Self::GPR, Self::SIMD>,
143
    ) -> Result<(), CompileError>;
144
    /// GPR Reg used for local pointer on the stack
145
    fn local_pointer(&self) -> Self::GPR;
146
    /// push a value on the stack for a native call
147
    fn move_location_for_native(
148
        &mut self,
149
        size: Size,
150
        loc: Location<Self::GPR, Self::SIMD>,
151
        dest: Location<Self::GPR, Self::SIMD>,
152
    ) -> Result<(), CompileError>;
153
    /// Determine whether a local should be allocated on the stack.
154
    fn is_local_on_stack(&self, idx: usize) -> bool;
155
    /// Determine a local's location.
156
    fn get_local_location(
157
        &self,
158
        idx: usize,
159
        callee_saved_regs_size: usize,
160
    ) -> Location<Self::GPR, Self::SIMD>;
161
    /// Move a local to the stack
162
    /// Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32)));
163
    fn move_local(
164
        &mut self,
165
        stack_offset: i32,
166
        location: Location<Self::GPR, Self::SIMD>,
167
    ) -> Result<(), CompileError>;
168
    /// List of register to save, depending on the CallingConvention
169
    fn list_to_save(
170
        &self,
171
        calling_convention: CallingConvention,
172
    ) -> Vec<Location<Self::GPR, Self::SIMD>>;
173
    /// Get param location (to build a call, using SP for stack args)
174
    fn get_param_location(
175
        &self,
176
        idx: usize,
177
        sz: Size,
178
        stack_offset: &mut usize,
179
        calling_convention: CallingConvention,
180
    ) -> Location<Self::GPR, Self::SIMD>;
181
    /// Get call param location (from a call, using FP for stack args)
182
    fn get_call_param_location(
183
        &self,
184
        idx: usize,
185
        sz: Size,
186
        stack_offset: &mut usize,
187
        calling_convention: CallingConvention,
188
    ) -> Location<Self::GPR, Self::SIMD>;
189
    /// Get simple param location
190
    fn get_simple_param_location(
191
        &self,
192
        idx: usize,
193
        calling_convention: CallingConvention,
194
    ) -> Location<Self::GPR, Self::SIMD>;
195
    /// move a location to another
196
    fn move_location(
197
        &mut self,
198
        size: Size,
199
        source: Location<Self::GPR, Self::SIMD>,
200
        dest: Location<Self::GPR, Self::SIMD>,
201
    ) -> Result<(), CompileError>;
202
    /// move a location to another, with zero or sign extension
203
    fn move_location_extend(
204
        &mut self,
205
        size_val: Size,
206
        signed: bool,
207
        source: Location<Self::GPR, Self::SIMD>,
208
        size_op: Size,
209
        dest: Location<Self::GPR, Self::SIMD>,
210
    ) -> Result<(), CompileError>;
211
    /// Load a memory value to a register, zero extending to 64bits.
212
    /// Panic if gpr is not a Location::GPR or if mem is not a Memory(2)
213
    fn load_address(
214
        &mut self,
215
        size: Size,
216
        gpr: Location<Self::GPR, Self::SIMD>,
217
        mem: Location<Self::GPR, Self::SIMD>,
218
    ) -> Result<(), CompileError>;
219
    /// Init the stack loc counter
220
    fn init_stack_loc(
221
        &mut self,
222
        init_stack_loc_cnt: u64,
223
        last_stack_loc: Location<Self::GPR, Self::SIMD>,
224
    ) -> Result<(), CompileError>;
225
    /// Restore save_area
226
    fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CompileError>;
227
    /// Pop a location
228
    fn pop_location(
229
        &mut self,
230
        location: Location<Self::GPR, Self::SIMD>,
231
    ) -> Result<(), CompileError>;
232
    /// Create a new `MachineState` with default values.
233
    fn new_machine_state(&self) -> MachineState;
234
235
    /// Finalize the assembler
236
    fn assembler_finalize(self) -> Result<Vec<u8>, CompileError>;
237
238
    /// get_offset of Assembler
239
    fn get_offset(&self) -> Offset;
240
241
    /// finalize a function
242
    fn finalize_function(&mut self) -> Result<(), CompileError>;
243
244
    /// emit native function prolog (depending on the calling Convention, like "PUSH RBP / MOV RSP, RBP")
245
    fn emit_function_prolog(&mut self) -> Result<(), CompileError>;
246
    /// emit native function epilog (depending on the calling Convention, like "MOV RBP, RSP / POP RBP")
247
    fn emit_function_epilog(&mut self) -> Result<(), CompileError>;
248
    /// handle return value, with optionnal cannonicalization if wanted
249
    fn emit_function_return_value(
250
        &mut self,
251
        ty: WpType,
252
        cannonicalize: bool,
253
        loc: Location<Self::GPR, Self::SIMD>,
254
    ) -> Result<(), CompileError>;
255
    /// Handle copy to SIMD register from ret value (if needed by the arch/calling convention)
256
    fn emit_function_return_float(&mut self) -> Result<(), CompileError>;
257
    /// Is NaN canonicalization supported
258
    fn arch_supports_canonicalize_nan(&self) -> bool;
259
    /// Cannonicalize a NaN (or panic if not supported)
260
    fn canonicalize_nan(
261
        &mut self,
262
        sz: Size,
263
        input: Location<Self::GPR, Self::SIMD>,
264
        output: Location<Self::GPR, Self::SIMD>,
265
    ) -> Result<(), CompileError>;
266
267
    /// emit an Illegal Opcode, associated with a trapcode
268
    fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CompileError>;
269
    /// create a new label
270
    fn get_label(&mut self) -> Label;
271
    /// emit a label
272
    fn emit_label(&mut self, label: Label) -> Result<(), CompileError>;
273
274
    /// get the gpr use for call. like RAX on x86_64
275
    fn get_grp_for_call(&self) -> Self::GPR;
276
    /// Emit a call using the value in register
277
    fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CompileError>;
278
    /// Emit a call to a label
279
    fn emit_call_label(&mut self, label: Label) -> Result<(), CompileError>;
280
    /// Does an trampoline is neededfor indirect call
281
    fn arch_requires_indirect_call_trampoline(&self) -> bool;
282
    /// indirect call with trampoline
283
    fn arch_emit_indirect_call_with_trampoline(
284
        &mut self,
285
        location: Location<Self::GPR, Self::SIMD>,
286
    ) -> Result<(), CompileError>;
287
    /// emit a call to a location
288
    fn emit_call_location(
289
        &mut self,
290
        location: Location<Self::GPR, Self::SIMD>,
291
    ) -> Result<(), CompileError>;
292
    /// get the gpr for the return of generic values
293
    fn get_gpr_for_ret(&self) -> Self::GPR;
294
    /// get the simd for the return of float/double values
295
    fn get_simd_for_ret(&self) -> Self::SIMD;
296
297
    /// Emit a debug breakpoint
298
    fn emit_debug_breakpoint(&mut self) -> Result<(), CompileError>;
299
300
    /// load the address of a memory location (will panic if src is not a memory)
301
    /// like LEA opcode on x86_64
302
    fn location_address(
303
        &mut self,
304
        size: Size,
305
        source: Location<Self::GPR, Self::SIMD>,
306
        dest: Location<Self::GPR, Self::SIMD>,
307
    ) -> Result<(), CompileError>;
308
309
    /// And src & dst -> dst (with or without flags)
310
    fn location_and(
311
        &mut self,
312
        size: Size,
313
        source: Location<Self::GPR, Self::SIMD>,
314
        dest: Location<Self::GPR, Self::SIMD>,
315
        flags: bool,
316
    ) -> Result<(), CompileError>;
317
    /// Xor src & dst -> dst (with or without flags)
318
    fn location_xor(
319
        &mut self,
320
        size: Size,
321
        source: Location<Self::GPR, Self::SIMD>,
322
        dest: Location<Self::GPR, Self::SIMD>,
323
        flags: bool,
324
    ) -> Result<(), CompileError>;
325
    /// Or src & dst -> dst (with or without flags)
326
    fn location_or(
327
        &mut self,
328
        size: Size,
329
        source: Location<Self::GPR, Self::SIMD>,
330
        dest: Location<Self::GPR, Self::SIMD>,
331
        flags: bool,
332
    ) -> Result<(), CompileError>;
333
334
    /// Add src+dst -> dst (with or without flags)
335
    fn location_add(
336
        &mut self,
337
        size: Size,
338
        source: Location<Self::GPR, Self::SIMD>,
339
        dest: Location<Self::GPR, Self::SIMD>,
340
        flags: bool,
341
    ) -> Result<(), CompileError>;
342
    /// Sub dst-src -> dst (with or without flags)
343
    fn location_sub(
344
        &mut self,
345
        size: Size,
346
        source: Location<Self::GPR, Self::SIMD>,
347
        dest: Location<Self::GPR, Self::SIMD>,
348
        flags: bool,
349
    ) -> Result<(), CompileError>;
350
    /// -src -> dst
351
    fn location_neg(
352
        &mut self,
353
        size_val: Size, // size of src
354
        signed: bool,
355
        source: Location<Self::GPR, Self::SIMD>,
356
        size_op: Size,
357
        dest: Location<Self::GPR, Self::SIMD>,
358
    ) -> Result<(), CompileError>;
359
360
    /// Cmp src - dst and set flags
361
    fn location_cmp(
362
        &mut self,
363
        size: Size,
364
        source: Location<Self::GPR, Self::SIMD>,
365
        dest: Location<Self::GPR, Self::SIMD>,
366
    ) -> Result<(), CompileError>;
367
    /// Test src & dst and set flags
368
    fn location_test(
369
        &mut self,
370
        size: Size,
371
        source: Location<Self::GPR, Self::SIMD>,
372
        dest: Location<Self::GPR, Self::SIMD>,
373
    ) -> Result<(), CompileError>;
374
375
    /// jmp without condidtion
376
    fn jmp_unconditionnal(&mut self, label: Label) -> Result<(), CompileError>;
377
    /// jmp on equal (src==dst)
378
    /// like Equal set on x86_64
379
    fn jmp_on_equal(&mut self, label: Label) -> Result<(), CompileError>;
380
    /// jmp on different (src!=dst)
381
    /// like NotEqual set on x86_64
382
    fn jmp_on_different(&mut self, label: Label) -> Result<(), CompileError>;
383
    /// jmp on above (src>dst)
384
    /// like Above set on x86_64
385
    fn jmp_on_above(&mut self, label: Label) -> Result<(), CompileError>;
386
    /// jmp on above (src>=dst)
387
    /// like Above or Equal set on x86_64
388
    fn jmp_on_aboveequal(&mut self, label: Label) -> Result<(), CompileError>;
389
    /// jmp on above (src<=dst)
390
    /// like Below or Equal set on x86_64
391
    fn jmp_on_belowequal(&mut self, label: Label) -> Result<(), CompileError>;
392
    /// jmp on overflow
393
    /// like Carry set on x86_64
394
    fn jmp_on_overflow(&mut self, label: Label) -> Result<(), CompileError>;
395
396
    /// jmp using a jump table at lable with cond as the indice
397
    fn emit_jmp_to_jumptable(
398
        &mut self,
399
        label: Label,
400
        cond: Location<Self::GPR, Self::SIMD>,
401
    ) -> Result<(), CompileError>;
402
403
    /// Align for Loop (may do nothing, depending on the arch)
404
    fn align_for_loop(&mut self) -> Result<(), CompileError>;
405
406
    /// ret (from a Call)
407
    fn emit_ret(&mut self) -> Result<(), CompileError>;
408
409
    /// Stack push of a location
410
    fn emit_push(
411
        &mut self,
412
        size: Size,
413
        loc: Location<Self::GPR, Self::SIMD>,
414
    ) -> Result<(), CompileError>;
415
    /// Stack pop of a location
416
    fn emit_pop(
417
        &mut self,
418
        size: Size,
419
        loc: Location<Self::GPR, Self::SIMD>,
420
    ) -> Result<(), CompileError>;
421
    /// relaxed mov: move from anywhere to anywhere
422
    fn emit_relaxed_mov(
423
        &mut self,
424
        sz: Size,
425
        src: Location<Self::GPR, Self::SIMD>,
426
        dst: Location<Self::GPR, Self::SIMD>,
427
    ) -> Result<(), CompileError>;
428
    /// relaxed cmp: compare from anywhere and anywhere
429
    fn emit_relaxed_cmp(
430
        &mut self,
431
        sz: Size,
432
        src: Location<Self::GPR, Self::SIMD>,
433
        dst: Location<Self::GPR, Self::SIMD>,
434
    ) -> Result<(), CompileError>;
435
    /// Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example
436
    fn emit_memory_fence(&mut self) -> Result<(), CompileError>;
437
    /// relaxed move with zero extension
438
    fn emit_relaxed_zero_extension(
439
        &mut self,
440
        sz_src: Size,
441
        src: Location<Self::GPR, Self::SIMD>,
442
        sz_dst: Size,
443
        dst: Location<Self::GPR, Self::SIMD>,
444
    ) -> Result<(), CompileError>;
445
    /// relaxed move with sign extension
446
    fn emit_relaxed_sign_extension(
447
        &mut self,
448
        sz_src: Size,
449
        src: Location<Self::GPR, Self::SIMD>,
450
        sz_dst: Size,
451
        dst: Location<Self::GPR, Self::SIMD>,
452
    ) -> Result<(), CompileError>;
453
    /// Multiply location with immediate
454
    fn emit_imul_imm32(
455
        &mut self,
456
        size: Size,
457
        imm32: u32,
458
        gpr: Self::GPR,
459
    ) -> Result<(), CompileError>;
460
    /// Add with location directly from the stack
461
    fn emit_binop_add32(
462
        &mut self,
463
        loc_a: Location<Self::GPR, Self::SIMD>,
464
        loc_b: Location<Self::GPR, Self::SIMD>,
465
        ret: Location<Self::GPR, Self::SIMD>,
466
    ) -> Result<(), CompileError>;
467
    /// Sub with location directly from the stack
468
    fn emit_binop_sub32(
469
        &mut self,
470
        loc_a: Location<Self::GPR, Self::SIMD>,
471
        loc_b: Location<Self::GPR, Self::SIMD>,
472
        ret: Location<Self::GPR, Self::SIMD>,
473
    ) -> Result<(), CompileError>;
474
    /// Multiply with location directly from the stack
475
    fn emit_binop_mul32(
476
        &mut self,
477
        loc_a: Location<Self::GPR, Self::SIMD>,
478
        loc_b: Location<Self::GPR, Self::SIMD>,
479
        ret: Location<Self::GPR, Self::SIMD>,
480
    ) -> Result<(), CompileError>;
481
    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
482
    fn emit_binop_udiv32(
483
        &mut self,
484
        loc_a: Location<Self::GPR, Self::SIMD>,
485
        loc_b: Location<Self::GPR, Self::SIMD>,
486
        ret: Location<Self::GPR, Self::SIMD>,
487
        integer_division_by_zero: Label,
488
        integer_overflow: Label,
489
    ) -> Result<usize, CompileError>;
490
    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
491
    fn emit_binop_sdiv32(
492
        &mut self,
493
        loc_a: Location<Self::GPR, Self::SIMD>,
494
        loc_b: Location<Self::GPR, Self::SIMD>,
495
        ret: Location<Self::GPR, Self::SIMD>,
496
        integer_division_by_zero: Label,
497
        integer_overflow: Label,
498
    ) -> Result<usize, CompileError>;
499
    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
500
    fn emit_binop_urem32(
501
        &mut self,
502
        loc_a: Location<Self::GPR, Self::SIMD>,
503
        loc_b: Location<Self::GPR, Self::SIMD>,
504
        ret: Location<Self::GPR, Self::SIMD>,
505
        integer_division_by_zero: Label,
506
        integer_overflow: Label,
507
    ) -> Result<usize, CompileError>;
508
    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
509
    fn emit_binop_srem32(
510
        &mut self,
511
        loc_a: Location<Self::GPR, Self::SIMD>,
512
        loc_b: Location<Self::GPR, Self::SIMD>,
513
        ret: Location<Self::GPR, Self::SIMD>,
514
        integer_division_by_zero: Label,
515
        integer_overflow: Label,
516
    ) -> Result<usize, CompileError>;
517
    /// And with location directly from the stack
518
    fn emit_binop_and32(
519
        &mut self,
520
        loc_a: Location<Self::GPR, Self::SIMD>,
521
        loc_b: Location<Self::GPR, Self::SIMD>,
522
        ret: Location<Self::GPR, Self::SIMD>,
523
    ) -> Result<(), CompileError>;
524
    /// Or with location directly from the stack
525
    fn emit_binop_or32(
526
        &mut self,
527
        loc_a: Location<Self::GPR, Self::SIMD>,
528
        loc_b: Location<Self::GPR, Self::SIMD>,
529
        ret: Location<Self::GPR, Self::SIMD>,
530
    ) -> Result<(), CompileError>;
531
    /// Xor with location directly from the stack
532
    fn emit_binop_xor32(
533
        &mut self,
534
        loc_a: Location<Self::GPR, Self::SIMD>,
535
        loc_b: Location<Self::GPR, Self::SIMD>,
536
        ret: Location<Self::GPR, Self::SIMD>,
537
    ) -> Result<(), CompileError>;
538
    /// Signed Greater of Equal Compare 2 i32, result in a GPR
539
    fn i32_cmp_ge_s(
540
        &mut self,
541
        loc_a: Location<Self::GPR, Self::SIMD>,
542
        loc_b: Location<Self::GPR, Self::SIMD>,
543
        ret: Location<Self::GPR, Self::SIMD>,
544
    ) -> Result<(), CompileError>;
545
    /// Signed Greater Than Compare 2 i32, result in a GPR
546
    fn i32_cmp_gt_s(
547
        &mut self,
548
        loc_a: Location<Self::GPR, Self::SIMD>,
549
        loc_b: Location<Self::GPR, Self::SIMD>,
550
        ret: Location<Self::GPR, Self::SIMD>,
551
    ) -> Result<(), CompileError>;
552
    /// Signed Less of Equal Compare 2 i32, result in a GPR
553
    fn i32_cmp_le_s(
554
        &mut self,
555
        loc_a: Location<Self::GPR, Self::SIMD>,
556
        loc_b: Location<Self::GPR, Self::SIMD>,
557
        ret: Location<Self::GPR, Self::SIMD>,
558
    ) -> Result<(), CompileError>;
559
    /// Signed Less Than Compare 2 i32, result in a GPR
560
    fn i32_cmp_lt_s(
561
        &mut self,
562
        loc_a: Location<Self::GPR, Self::SIMD>,
563
        loc_b: Location<Self::GPR, Self::SIMD>,
564
        ret: Location<Self::GPR, Self::SIMD>,
565
    ) -> Result<(), CompileError>;
566
    /// Unsigned Greater of Equal Compare 2 i32, result in a GPR
567
    fn i32_cmp_ge_u(
568
        &mut self,
569
        loc_a: Location<Self::GPR, Self::SIMD>,
570
        loc_b: Location<Self::GPR, Self::SIMD>,
571
        ret: Location<Self::GPR, Self::SIMD>,
572
    ) -> Result<(), CompileError>;
573
    /// Unsigned Greater Than Compare 2 i32, result in a GPR
574
    fn i32_cmp_gt_u(
575
        &mut self,
576
        loc_a: Location<Self::GPR, Self::SIMD>,
577
        loc_b: Location<Self::GPR, Self::SIMD>,
578
        ret: Location<Self::GPR, Self::SIMD>,
579
    ) -> Result<(), CompileError>;
580
    /// Unsigned Less of Equal Compare 2 i32, result in a GPR
581
    fn i32_cmp_le_u(
582
        &mut self,
583
        loc_a: Location<Self::GPR, Self::SIMD>,
584
        loc_b: Location<Self::GPR, Self::SIMD>,
585
        ret: Location<Self::GPR, Self::SIMD>,
586
    ) -> Result<(), CompileError>;
587
    /// Unsigned Less Than Compare 2 i32, result in a GPR
588
    fn i32_cmp_lt_u(
589
        &mut self,
590
        loc_a: Location<Self::GPR, Self::SIMD>,
591
        loc_b: Location<Self::GPR, Self::SIMD>,
592
        ret: Location<Self::GPR, Self::SIMD>,
593
    ) -> Result<(), CompileError>;
594
    /// Not Equal Compare 2 i32, result in a GPR
595
    fn i32_cmp_ne(
596
        &mut self,
597
        loc_a: Location<Self::GPR, Self::SIMD>,
598
        loc_b: Location<Self::GPR, Self::SIMD>,
599
        ret: Location<Self::GPR, Self::SIMD>,
600
    ) -> Result<(), CompileError>;
601
    /// Equal Compare 2 i32, result in a GPR
602
    fn i32_cmp_eq(
603
        &mut self,
604
        loc_a: Location<Self::GPR, Self::SIMD>,
605
        loc_b: Location<Self::GPR, Self::SIMD>,
606
        ret: Location<Self::GPR, Self::SIMD>,
607
    ) -> Result<(), CompileError>;
608
    /// Count Leading 0 bit of an i32
609
    fn i32_clz(
610
        &mut self,
611
        loc: Location<Self::GPR, Self::SIMD>,
612
        ret: Location<Self::GPR, Self::SIMD>,
613
    ) -> Result<(), CompileError>;
614
    /// Count Trailling 0 bit of an i32
615
    fn i32_ctz(
616
        &mut self,
617
        loc: Location<Self::GPR, Self::SIMD>,
618
        ret: Location<Self::GPR, Self::SIMD>,
619
    ) -> Result<(), CompileError>;
620
    /// Count the number of 1 bit of an i32
621
    fn i32_popcnt(
622
        &mut self,
623
        loc: Location<Self::GPR, Self::SIMD>,
624
        ret: Location<Self::GPR, Self::SIMD>,
625
    ) -> Result<(), CompileError>;
626
    /// i32 Logical Shift Left
627
    fn i32_shl(
628
        &mut self,
629
        loc_a: Location<Self::GPR, Self::SIMD>,
630
        loc_b: Location<Self::GPR, Self::SIMD>,
631
        ret: Location<Self::GPR, Self::SIMD>,
632
    ) -> Result<(), CompileError>;
633
    /// i32 Logical Shift Right
634
    fn i32_shr(
635
        &mut self,
636
        loc_a: Location<Self::GPR, Self::SIMD>,
637
        loc_b: Location<Self::GPR, Self::SIMD>,
638
        ret: Location<Self::GPR, Self::SIMD>,
639
    ) -> Result<(), CompileError>;
640
    /// i32 Arithmetic Shift Right
641
    fn i32_sar(
642
        &mut self,
643
        loc_a: Location<Self::GPR, Self::SIMD>,
644
        loc_b: Location<Self::GPR, Self::SIMD>,
645
        ret: Location<Self::GPR, Self::SIMD>,
646
    ) -> Result<(), CompileError>;
647
    /// i32 Roll Left
648
    fn i32_rol(
649
        &mut self,
650
        loc_a: Location<Self::GPR, Self::SIMD>,
651
        loc_b: Location<Self::GPR, Self::SIMD>,
652
        ret: Location<Self::GPR, Self::SIMD>,
653
    ) -> Result<(), CompileError>;
654
    /// i32 Roll Right
655
    fn i32_ror(
656
        &mut self,
657
        loc_a: Location<Self::GPR, Self::SIMD>,
658
        loc_b: Location<Self::GPR, Self::SIMD>,
659
        ret: Location<Self::GPR, Self::SIMD>,
660
    ) -> Result<(), CompileError>;
661
    /// i32 load
662
    #[allow(clippy::too_many_arguments)]
663
    fn i32_load(
664
        &mut self,
665
        addr: Location<Self::GPR, Self::SIMD>,
666
        memarg: &MemArg,
667
        ret: Location<Self::GPR, Self::SIMD>,
668
        need_check: bool,
669
        imported_memories: bool,
670
        offset: i32,
671
        heap_access_oob: Label,
672
        unaligned_atomic: Label,
673
    ) -> Result<(), CompileError>;
674
    /// i32 load of an unsigned 8bits
675
    #[allow(clippy::too_many_arguments)]
676
    fn i32_load_8u(
677
        &mut self,
678
        addr: Location<Self::GPR, Self::SIMD>,
679
        memarg: &MemArg,
680
        ret: Location<Self::GPR, Self::SIMD>,
681
        need_check: bool,
682
        imported_memories: bool,
683
        offset: i32,
684
        heap_access_oob: Label,
685
        unaligned_atomic: Label,
686
    ) -> Result<(), CompileError>;
687
    /// i32 load of an signed 8bits
688
    #[allow(clippy::too_many_arguments)]
689
    fn i32_load_8s(
690
        &mut self,
691
        addr: Location<Self::GPR, Self::SIMD>,
692
        memarg: &MemArg,
693
        ret: Location<Self::GPR, Self::SIMD>,
694
        need_check: bool,
695
        imported_memories: bool,
696
        offset: i32,
697
        heap_access_oob: Label,
698
        unaligned_atomic: Label,
699
    ) -> Result<(), CompileError>;
700
    /// i32 load of an unsigned 16bits
701
    #[allow(clippy::too_many_arguments)]
702
    fn i32_load_16u(
703
        &mut self,
704
        addr: Location<Self::GPR, Self::SIMD>,
705
        memarg: &MemArg,
706
        ret: Location<Self::GPR, Self::SIMD>,
707
        need_check: bool,
708
        imported_memories: bool,
709
        offset: i32,
710
        heap_access_oob: Label,
711
        unaligned_atomic: Label,
712
    ) -> Result<(), CompileError>;
713
    /// i32 load of an signed 16bits
714
    #[allow(clippy::too_many_arguments)]
715
    fn i32_load_16s(
716
        &mut self,
717
        addr: Location<Self::GPR, Self::SIMD>,
718
        memarg: &MemArg,
719
        ret: Location<Self::GPR, Self::SIMD>,
720
        need_check: bool,
721
        imported_memories: bool,
722
        offset: i32,
723
        heap_access_oob: Label,
724
        unaligned_atomic: Label,
725
    ) -> Result<(), CompileError>;
726
    /// i32 atomic load
727
    #[allow(clippy::too_many_arguments)]
728
    fn i32_atomic_load(
729
        &mut self,
730
        addr: Location<Self::GPR, Self::SIMD>,
731
        memarg: &MemArg,
732
        ret: Location<Self::GPR, Self::SIMD>,
733
        need_check: bool,
734
        imported_memories: bool,
735
        offset: i32,
736
        heap_access_oob: Label,
737
        unaligned_atomic: Label,
738
    ) -> Result<(), CompileError>;
739
    /// i32 atomic load of an unsigned 8bits
740
    #[allow(clippy::too_many_arguments)]
741
    fn i32_atomic_load_8u(
742
        &mut self,
743
        addr: Location<Self::GPR, Self::SIMD>,
744
        memarg: &MemArg,
745
        ret: Location<Self::GPR, Self::SIMD>,
746
        need_check: bool,
747
        imported_memories: bool,
748
        offset: i32,
749
        heap_access_oob: Label,
750
        unaligned_atomic: Label,
751
    ) -> Result<(), CompileError>;
752
    /// i32 atomic load of an unsigned 16bits
753
    #[allow(clippy::too_many_arguments)]
754
    fn i32_atomic_load_16u(
755
        &mut self,
756
        addr: Location<Self::GPR, Self::SIMD>,
757
        memarg: &MemArg,
758
        ret: Location<Self::GPR, Self::SIMD>,
759
        need_check: bool,
760
        imported_memories: bool,
761
        offset: i32,
762
        heap_access_oob: Label,
763
        unaligned_atomic: Label,
764
    ) -> Result<(), CompileError>;
765
    /// i32 save
766
    #[allow(clippy::too_many_arguments)]
767
    fn i32_save(
768
        &mut self,
769
        value: Location<Self::GPR, Self::SIMD>,
770
        memarg: &MemArg,
771
        addr: Location<Self::GPR, Self::SIMD>,
772
        need_check: bool,
773
        imported_memories: bool,
774
        offset: i32,
775
        heap_access_oob: Label,
776
        unaligned_atomic: Label,
777
    ) -> Result<(), CompileError>;
778
    /// i32 save of the lower 8bits
779
    #[allow(clippy::too_many_arguments)]
780
    fn i32_save_8(
781
        &mut self,
782
        value: Location<Self::GPR, Self::SIMD>,
783
        memarg: &MemArg,
784
        addr: Location<Self::GPR, Self::SIMD>,
785
        need_check: bool,
786
        imported_memories: bool,
787
        offset: i32,
788
        heap_access_oob: Label,
789
        unaligned_atomic: Label,
790
    ) -> Result<(), CompileError>;
791
    /// i32 save of the lower 16bits
792
    #[allow(clippy::too_many_arguments)]
793
    fn i32_save_16(
794
        &mut self,
795
        value: Location<Self::GPR, Self::SIMD>,
796
        memarg: &MemArg,
797
        addr: Location<Self::GPR, Self::SIMD>,
798
        need_check: bool,
799
        imported_memories: bool,
800
        offset: i32,
801
        heap_access_oob: Label,
802
        unaligned_atomic: Label,
803
    ) -> Result<(), CompileError>;
804
    /// i32 atomic save
805
    #[allow(clippy::too_many_arguments)]
806
    fn i32_atomic_save(
807
        &mut self,
808
        value: Location<Self::GPR, Self::SIMD>,
809
        memarg: &MemArg,
810
        addr: Location<Self::GPR, Self::SIMD>,
811
        need_check: bool,
812
        imported_memories: bool,
813
        offset: i32,
814
        heap_access_oob: Label,
815
        unaligned_atomic: Label,
816
    ) -> Result<(), CompileError>;
817
    /// i32 atomic save of a the lower 8bits
818
    #[allow(clippy::too_many_arguments)]
819
    fn i32_atomic_save_8(
820
        &mut self,
821
        value: Location<Self::GPR, Self::SIMD>,
822
        memarg: &MemArg,
823
        addr: Location<Self::GPR, Self::SIMD>,
824
        need_check: bool,
825
        imported_memories: bool,
826
        offset: i32,
827
        heap_access_oob: Label,
828
        unaligned_atomic: Label,
829
    ) -> Result<(), CompileError>;
830
    /// i32 atomic save of a the lower 16bits
831
    #[allow(clippy::too_many_arguments)]
832
    fn i32_atomic_save_16(
833
        &mut self,
834
        value: Location<Self::GPR, Self::SIMD>,
835
        memarg: &MemArg,
836
        addr: Location<Self::GPR, Self::SIMD>,
837
        need_check: bool,
838
        imported_memories: bool,
839
        offset: i32,
840
        heap_access_oob: Label,
841
        unaligned_atomic: Label,
842
    ) -> Result<(), CompileError>;
843
    /// i32 atomic Add with i32
844
    #[allow(clippy::too_many_arguments)]
845
    fn i32_atomic_add(
846
        &mut self,
847
        loc: Location<Self::GPR, Self::SIMD>,
848
        target: Location<Self::GPR, Self::SIMD>,
849
        memarg: &MemArg,
850
        ret: Location<Self::GPR, Self::SIMD>,
851
        need_check: bool,
852
        imported_memories: bool,
853
        offset: i32,
854
        heap_access_oob: Label,
855
        unaligned_atomic: Label,
856
    ) -> Result<(), CompileError>;
857
    /// i32 atomic Add with unsigned 8bits
858
    #[allow(clippy::too_many_arguments)]
859
    fn i32_atomic_add_8u(
860
        &mut self,
861
        loc: Location<Self::GPR, Self::SIMD>,
862
        target: Location<Self::GPR, Self::SIMD>,
863
        memarg: &MemArg,
864
        ret: Location<Self::GPR, Self::SIMD>,
865
        need_check: bool,
866
        imported_memories: bool,
867
        offset: i32,
868
        heap_access_oob: Label,
869
        unaligned_atomic: Label,
870
    ) -> Result<(), CompileError>;
871
    /// i32 atomic Add with unsigned 16bits
872
    #[allow(clippy::too_many_arguments)]
873
    fn i32_atomic_add_16u(
874
        &mut self,
875
        loc: Location<Self::GPR, Self::SIMD>,
876
        target: Location<Self::GPR, Self::SIMD>,
877
        memarg: &MemArg,
878
        ret: Location<Self::GPR, Self::SIMD>,
879
        need_check: bool,
880
        imported_memories: bool,
881
        offset: i32,
882
        heap_access_oob: Label,
883
        unaligned_atomic: Label,
884
    ) -> Result<(), CompileError>;
885
    /// i32 atomic Sub with i32
886
    #[allow(clippy::too_many_arguments)]
887
    fn i32_atomic_sub(
888
        &mut self,
889
        loc: Location<Self::GPR, Self::SIMD>,
890
        target: Location<Self::GPR, Self::SIMD>,
891
        memarg: &MemArg,
892
        ret: Location<Self::GPR, Self::SIMD>,
893
        need_check: bool,
894
        imported_memories: bool,
895
        offset: i32,
896
        heap_access_oob: Label,
897
        unaligned_atomic: Label,
898
    ) -> Result<(), CompileError>;
899
    /// i32 atomic Sub with unsigned 8bits
900
    #[allow(clippy::too_many_arguments)]
901
    fn i32_atomic_sub_8u(
902
        &mut self,
903
        loc: Location<Self::GPR, Self::SIMD>,
904
        target: Location<Self::GPR, Self::SIMD>,
905
        memarg: &MemArg,
906
        ret: Location<Self::GPR, Self::SIMD>,
907
        need_check: bool,
908
        imported_memories: bool,
909
        offset: i32,
910
        heap_access_oob: Label,
911
        unaligned_atomic: Label,
912
    ) -> Result<(), CompileError>;
913
    /// i32 atomic Sub with unsigned 16bits
914
    #[allow(clippy::too_many_arguments)]
915
    fn i32_atomic_sub_16u(
916
        &mut self,
917
        loc: Location<Self::GPR, Self::SIMD>,
918
        target: Location<Self::GPR, Self::SIMD>,
919
        memarg: &MemArg,
920
        ret: Location<Self::GPR, Self::SIMD>,
921
        need_check: bool,
922
        imported_memories: bool,
923
        offset: i32,
924
        heap_access_oob: Label,
925
        unaligned_atomic: Label,
926
    ) -> Result<(), CompileError>;
927
    /// i32 atomic And with i32
928
    #[allow(clippy::too_many_arguments)]
929
    fn i32_atomic_and(
930
        &mut self,
931
        loc: Location<Self::GPR, Self::SIMD>,
932
        target: Location<Self::GPR, Self::SIMD>,
933
        memarg: &MemArg,
934
        ret: Location<Self::GPR, Self::SIMD>,
935
        need_check: bool,
936
        imported_memories: bool,
937
        offset: i32,
938
        heap_access_oob: Label,
939
        unaligned_atomic: Label,
940
    ) -> Result<(), CompileError>;
941
    /// i32 atomic And with unsigned 8bits
942
    #[allow(clippy::too_many_arguments)]
943
    fn i32_atomic_and_8u(
944
        &mut self,
945
        loc: Location<Self::GPR, Self::SIMD>,
946
        target: Location<Self::GPR, Self::SIMD>,
947
        memarg: &MemArg,
948
        ret: Location<Self::GPR, Self::SIMD>,
949
        need_check: bool,
950
        imported_memories: bool,
951
        offset: i32,
952
        heap_access_oob: Label,
953
        unaligned_atomic: Label,
954
    ) -> Result<(), CompileError>;
955
    /// i32 atomic And with unsigned 16bits
956
    #[allow(clippy::too_many_arguments)]
957
    fn i32_atomic_and_16u(
958
        &mut self,
959
        loc: Location<Self::GPR, Self::SIMD>,
960
        target: Location<Self::GPR, Self::SIMD>,
961
        memarg: &MemArg,
962
        ret: Location<Self::GPR, Self::SIMD>,
963
        need_check: bool,
964
        imported_memories: bool,
965
        offset: i32,
966
        heap_access_oob: Label,
967
        unaligned_atomic: Label,
968
    ) -> Result<(), CompileError>;
969
    /// i32 atomic Or with i32
970
    #[allow(clippy::too_many_arguments)]
971
    fn i32_atomic_or(
972
        &mut self,
973
        loc: Location<Self::GPR, Self::SIMD>,
974
        target: Location<Self::GPR, Self::SIMD>,
975
        memarg: &MemArg,
976
        ret: Location<Self::GPR, Self::SIMD>,
977
        need_check: bool,
978
        imported_memories: bool,
979
        offset: i32,
980
        heap_access_oob: Label,
981
        unaligned_atomic: Label,
982
    ) -> Result<(), CompileError>;
983
    /// i32 atomic Or with unsigned 8bits
984
    #[allow(clippy::too_many_arguments)]
985
    fn i32_atomic_or_8u(
986
        &mut self,
987
        loc: Location<Self::GPR, Self::SIMD>,
988
        target: Location<Self::GPR, Self::SIMD>,
989
        memarg: &MemArg,
990
        ret: Location<Self::GPR, Self::SIMD>,
991
        need_check: bool,
992
        imported_memories: bool,
993
        offset: i32,
994
        heap_access_oob: Label,
995
        unaligned_atomic: Label,
996
    ) -> Result<(), CompileError>;
997
    /// i32 atomic Or with unsigned 16bits
998
    #[allow(clippy::too_many_arguments)]
999
    fn i32_atomic_or_16u(
1000
        &mut self,
1001
        loc: Location<Self::GPR, Self::SIMD>,
1002
        target: Location<Self::GPR, Self::SIMD>,
1003
        memarg: &MemArg,
1004
        ret: Location<Self::GPR, Self::SIMD>,
1005
        need_check: bool,
1006
        imported_memories: bool,
1007
        offset: i32,
1008
        heap_access_oob: Label,
1009
        unaligned_atomic: Label,
1010
    ) -> Result<(), CompileError>;
1011
    /// i32 atomic Xor with i32
1012
    #[allow(clippy::too_many_arguments)]
1013
    fn i32_atomic_xor(
1014
        &mut self,
1015
        loc: Location<Self::GPR, Self::SIMD>,
1016
        target: Location<Self::GPR, Self::SIMD>,
1017
        memarg: &MemArg,
1018
        ret: Location<Self::GPR, Self::SIMD>,
1019
        need_check: bool,
1020
        imported_memories: bool,
1021
        offset: i32,
1022
        heap_access_oob: Label,
1023
        unaligned_atomic: Label,
1024
    ) -> Result<(), CompileError>;
1025
    /// i32 atomic Xor with unsigned 8bits
1026
    #[allow(clippy::too_many_arguments)]
1027
    fn i32_atomic_xor_8u(
1028
        &mut self,
1029
        loc: Location<Self::GPR, Self::SIMD>,
1030
        target: Location<Self::GPR, Self::SIMD>,
1031
        memarg: &MemArg,
1032
        ret: Location<Self::GPR, Self::SIMD>,
1033
        need_check: bool,
1034
        imported_memories: bool,
1035
        offset: i32,
1036
        heap_access_oob: Label,
1037
        unaligned_atomic: Label,
1038
    ) -> Result<(), CompileError>;
1039
    /// i32 atomic Xor with unsigned 16bits
1040
    #[allow(clippy::too_many_arguments)]
1041
    fn i32_atomic_xor_16u(
1042
        &mut self,
1043
        loc: Location<Self::GPR, Self::SIMD>,
1044
        target: Location<Self::GPR, Self::SIMD>,
1045
        memarg: &MemArg,
1046
        ret: Location<Self::GPR, Self::SIMD>,
1047
        need_check: bool,
1048
        imported_memories: bool,
1049
        offset: i32,
1050
        heap_access_oob: Label,
1051
        unaligned_atomic: Label,
1052
    ) -> Result<(), CompileError>;
1053
    /// i32 atomic Exchange with i32
1054
    #[allow(clippy::too_many_arguments)]
1055
    fn i32_atomic_xchg(
1056
        &mut self,
1057
        loc: Location<Self::GPR, Self::SIMD>,
1058
        target: Location<Self::GPR, Self::SIMD>,
1059
        memarg: &MemArg,
1060
        ret: Location<Self::GPR, Self::SIMD>,
1061
        need_check: bool,
1062
        imported_memories: bool,
1063
        offset: i32,
1064
        heap_access_oob: Label,
1065
        unaligned_atomic: Label,
1066
    ) -> Result<(), CompileError>;
1067
    /// i32 atomic Exchange with u8
1068
    #[allow(clippy::too_many_arguments)]
1069
    fn i32_atomic_xchg_8u(
1070
        &mut self,
1071
        loc: Location<Self::GPR, Self::SIMD>,
1072
        target: Location<Self::GPR, Self::SIMD>,
1073
        memarg: &MemArg,
1074
        ret: Location<Self::GPR, Self::SIMD>,
1075
        need_check: bool,
1076
        imported_memories: bool,
1077
        offset: i32,
1078
        heap_access_oob: Label,
1079
        unaligned_atomic: Label,
1080
    ) -> Result<(), CompileError>;
1081
    /// i32 atomic Exchange with u16
1082
    #[allow(clippy::too_many_arguments)]
1083
    fn i32_atomic_xchg_16u(
1084
        &mut self,
1085
        loc: Location<Self::GPR, Self::SIMD>,
1086
        target: Location<Self::GPR, Self::SIMD>,
1087
        memarg: &MemArg,
1088
        ret: Location<Self::GPR, Self::SIMD>,
1089
        need_check: bool,
1090
        imported_memories: bool,
1091
        offset: i32,
1092
        heap_access_oob: Label,
1093
        unaligned_atomic: Label,
1094
    ) -> Result<(), CompileError>;
1095
    /// i32 atomic Compare and Exchange with i32
1096
    #[allow(clippy::too_many_arguments)]
1097
    fn i32_atomic_cmpxchg(
1098
        &mut self,
1099
        new: Location<Self::GPR, Self::SIMD>,
1100
        cmp: Location<Self::GPR, Self::SIMD>,
1101
        target: Location<Self::GPR, Self::SIMD>,
1102
        memarg: &MemArg,
1103
        ret: Location<Self::GPR, Self::SIMD>,
1104
        need_check: bool,
1105
        imported_memories: bool,
1106
        offset: i32,
1107
        heap_access_oob: Label,
1108
        unaligned_atomic: Label,
1109
    ) -> Result<(), CompileError>;
1110
    /// i32 atomic Compare and Exchange with u8
1111
    #[allow(clippy::too_many_arguments)]
1112
    fn i32_atomic_cmpxchg_8u(
1113
        &mut self,
1114
        new: Location<Self::GPR, Self::SIMD>,
1115
        cmp: Location<Self::GPR, Self::SIMD>,
1116
        target: Location<Self::GPR, Self::SIMD>,
1117
        memarg: &MemArg,
1118
        ret: Location<Self::GPR, Self::SIMD>,
1119
        need_check: bool,
1120
        imported_memories: bool,
1121
        offset: i32,
1122
        heap_access_oob: Label,
1123
        unaligned_atomic: Label,
1124
    ) -> Result<(), CompileError>;
1125
    /// i32 atomic Compare and Exchange with u16
1126
    #[allow(clippy::too_many_arguments)]
1127
    fn i32_atomic_cmpxchg_16u(
1128
        &mut self,
1129
        new: Location<Self::GPR, Self::SIMD>,
1130
        cmp: Location<Self::GPR, Self::SIMD>,
1131
        target: Location<Self::GPR, Self::SIMD>,
1132
        memarg: &MemArg,
1133
        ret: Location<Self::GPR, Self::SIMD>,
1134
        need_check: bool,
1135
        imported_memories: bool,
1136
        offset: i32,
1137
        heap_access_oob: Label,
1138
        unaligned_atomic: Label,
1139
    ) -> Result<(), CompileError>;
1140
1141
    /// emit a move function address to GPR ready for call, using appropriate relocation
1142
    fn emit_call_with_reloc(
1143
        &mut self,
1144
        calling_convention: CallingConvention,
1145
        reloc_target: RelocationTarget,
1146
    ) -> Result<Vec<Relocation>, CompileError>;
1147
    /// Add with location directly from the stack
1148
    fn emit_binop_add64(
1149
        &mut self,
1150
        loc_a: Location<Self::GPR, Self::SIMD>,
1151
        loc_b: Location<Self::GPR, Self::SIMD>,
1152
        ret: Location<Self::GPR, Self::SIMD>,
1153
    ) -> Result<(), CompileError>;
1154
    /// Sub with location directly from the stack
1155
    fn emit_binop_sub64(
1156
        &mut self,
1157
        loc_a: Location<Self::GPR, Self::SIMD>,
1158
        loc_b: Location<Self::GPR, Self::SIMD>,
1159
        ret: Location<Self::GPR, Self::SIMD>,
1160
    ) -> Result<(), CompileError>;
1161
    /// Multiply with location directly from the stack
1162
    fn emit_binop_mul64(
1163
        &mut self,
1164
        loc_a: Location<Self::GPR, Self::SIMD>,
1165
        loc_b: Location<Self::GPR, Self::SIMD>,
1166
        ret: Location<Self::GPR, Self::SIMD>,
1167
    ) -> Result<(), CompileError>;
1168
    /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1169
    fn emit_binop_udiv64(
1170
        &mut self,
1171
        loc_a: Location<Self::GPR, Self::SIMD>,
1172
        loc_b: Location<Self::GPR, Self::SIMD>,
1173
        ret: Location<Self::GPR, Self::SIMD>,
1174
        integer_division_by_zero: Label,
1175
        integer_overflow: Label,
1176
    ) -> Result<usize, CompileError>;
1177
    /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1178
    fn emit_binop_sdiv64(
1179
        &mut self,
1180
        loc_a: Location<Self::GPR, Self::SIMD>,
1181
        loc_b: Location<Self::GPR, Self::SIMD>,
1182
        ret: Location<Self::GPR, Self::SIMD>,
1183
        integer_division_by_zero: Label,
1184
        integer_overflow: Label,
1185
    ) -> Result<usize, CompileError>;
1186
    /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1187
    fn emit_binop_urem64(
1188
        &mut self,
1189
        loc_a: Location<Self::GPR, Self::SIMD>,
1190
        loc_b: Location<Self::GPR, Self::SIMD>,
1191
        ret: Location<Self::GPR, Self::SIMD>,
1192
        integer_division_by_zero: Label,
1193
        integer_overflow: Label,
1194
    ) -> Result<usize, CompileError>;
1195
    /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable.
1196
    fn emit_binop_srem64(
1197
        &mut self,
1198
        loc_a: Location<Self::GPR, Self::SIMD>,
1199
        loc_b: Location<Self::GPR, Self::SIMD>,
1200
        ret: Location<Self::GPR, Self::SIMD>,
1201
        integer_division_by_zero: Label,
1202
        integer_overflow: Label,
1203
    ) -> Result<usize, CompileError>;
1204
    /// And with location directly from the stack
1205
    fn emit_binop_and64(
1206
        &mut self,
1207
        loc_a: Location<Self::GPR, Self::SIMD>,
1208
        loc_b: Location<Self::GPR, Self::SIMD>,
1209
        ret: Location<Self::GPR, Self::SIMD>,
1210
    ) -> Result<(), CompileError>;
1211
    /// Or with location directly from the stack
1212
    fn emit_binop_or64(
1213
        &mut self,
1214
        loc_a: Location<Self::GPR, Self::SIMD>,
1215
        loc_b: Location<Self::GPR, Self::SIMD>,
1216
        ret: Location<Self::GPR, Self::SIMD>,
1217
    ) -> Result<(), CompileError>;
1218
    /// Xor with location directly from the stack
1219
    fn emit_binop_xor64(
1220
        &mut self,
1221
        loc_a: Location<Self::GPR, Self::SIMD>,
1222
        loc_b: Location<Self::GPR, Self::SIMD>,
1223
        ret: Location<Self::GPR, Self::SIMD>,
1224
    ) -> Result<(), CompileError>;
1225
    /// Signed Greater of Equal Compare 2 i64, result in a GPR
1226
    fn i64_cmp_ge_s(
1227
        &mut self,
1228
        loc_a: Location<Self::GPR, Self::SIMD>,
1229
        loc_b: Location<Self::GPR, Self::SIMD>,
1230
        ret: Location<Self::GPR, Self::SIMD>,
1231
    ) -> Result<(), CompileError>;
1232
    /// Signed Greater Than Compare 2 i64, result in a GPR
1233
    fn i64_cmp_gt_s(
1234
        &mut self,
1235
        loc_a: Location<Self::GPR, Self::SIMD>,
1236
        loc_b: Location<Self::GPR, Self::SIMD>,
1237
        ret: Location<Self::GPR, Self::SIMD>,
1238
    ) -> Result<(), CompileError>;
1239
    /// Signed Less of Equal Compare 2 i64, result in a GPR
1240
    fn i64_cmp_le_s(
1241
        &mut self,
1242
        loc_a: Location<Self::GPR, Self::SIMD>,
1243
        loc_b: Location<Self::GPR, Self::SIMD>,
1244
        ret: Location<Self::GPR, Self::SIMD>,
1245
    ) -> Result<(), CompileError>;
1246
    /// Signed Less Than Compare 2 i64, result in a GPR
1247
    fn i64_cmp_lt_s(
1248
        &mut self,
1249
        loc_a: Location<Self::GPR, Self::SIMD>,
1250
        loc_b: Location<Self::GPR, Self::SIMD>,
1251
        ret: Location<Self::GPR, Self::SIMD>,
1252
    ) -> Result<(), CompileError>;
1253
    /// Unsigned Greater of Equal Compare 2 i64, result in a GPR
1254
    fn i64_cmp_ge_u(
1255
        &mut self,
1256
        loc_a: Location<Self::GPR, Self::SIMD>,
1257
        loc_b: Location<Self::GPR, Self::SIMD>,
1258
        ret: Location<Self::GPR, Self::SIMD>,
1259
    ) -> Result<(), CompileError>;
1260
    /// Unsigned Greater Than Compare 2 i64, result in a GPR
1261
    fn i64_cmp_gt_u(
1262
        &mut self,
1263
        loc_a: Location<Self::GPR, Self::SIMD>,
1264
        loc_b: Location<Self::GPR, Self::SIMD>,
1265
        ret: Location<Self::GPR, Self::SIMD>,
1266
    ) -> Result<(), CompileError>;
1267
    /// Unsigned Less of Equal Compare 2 i64, result in a GPR
1268
    fn i64_cmp_le_u(
1269
        &mut self,
1270
        loc_a: Location<Self::GPR, Self::SIMD>,
1271
        loc_b: Location<Self::GPR, Self::SIMD>,
1272
        ret: Location<Self::GPR, Self::SIMD>,
1273
    ) -> Result<(), CompileError>;
1274
    /// Unsigned Less Than Compare 2 i64, result in a GPR
1275
    fn i64_cmp_lt_u(
1276
        &mut self,
1277
        loc_a: Location<Self::GPR, Self::SIMD>,
1278
        loc_b: Location<Self::GPR, Self::SIMD>,
1279
        ret: Location<Self::GPR, Self::SIMD>,
1280
    ) -> Result<(), CompileError>;
1281
    /// Not Equal Compare 2 i64, result in a GPR
1282
    fn i64_cmp_ne(
1283
        &mut self,
1284
        loc_a: Location<Self::GPR, Self::SIMD>,
1285
        loc_b: Location<Self::GPR, Self::SIMD>,
1286
        ret: Location<Self::GPR, Self::SIMD>,
1287
    ) -> Result<(), CompileError>;
1288
    /// Equal Compare 2 i64, result in a GPR
1289
    fn i64_cmp_eq(
1290
        &mut self,
1291
        loc_a: Location<Self::GPR, Self::SIMD>,
1292
        loc_b: Location<Self::GPR, Self::SIMD>,
1293
        ret: Location<Self::GPR, Self::SIMD>,
1294
    ) -> Result<(), CompileError>;
1295
    /// Count Leading 0 bit of an i64
1296
    fn i64_clz(
1297
        &mut self,
1298
        loc: Location<Self::GPR, Self::SIMD>,
1299
        ret: Location<Self::GPR, Self::SIMD>,
1300
    ) -> Result<(), CompileError>;
1301
    /// Count Trailling 0 bit of an i64
1302
    fn i64_ctz(
1303
        &mut self,
1304
        loc: Location<Self::GPR, Self::SIMD>,
1305
        ret: Location<Self::GPR, Self::SIMD>,
1306
    ) -> Result<(), CompileError>;
1307
    /// Count the number of 1 bit of an i64
1308
    fn i64_popcnt(
1309
        &mut self,
1310
        loc: Location<Self::GPR, Self::SIMD>,
1311
        ret: Location<Self::GPR, Self::SIMD>,
1312
    ) -> Result<(), CompileError>;
1313
    /// i64 Logical Shift Left
1314
    fn i64_shl(
1315
        &mut self,
1316
        loc_a: Location<Self::GPR, Self::SIMD>,
1317
        loc_b: Location<Self::GPR, Self::SIMD>,
1318
        ret: Location<Self::GPR, Self::SIMD>,
1319
    ) -> Result<(), CompileError>;
1320
    /// i64 Logical Shift Right
1321
    fn i64_shr(
1322
        &mut self,
1323
        loc_a: Location<Self::GPR, Self::SIMD>,
1324
        loc_b: Location<Self::GPR, Self::SIMD>,
1325
        ret: Location<Self::GPR, Self::SIMD>,
1326
    ) -> Result<(), CompileError>;
1327
    /// i64 Arithmetic Shift Right
1328
    fn i64_sar(
1329
        &mut self,
1330
        loc_a: Location<Self::GPR, Self::SIMD>,
1331
        loc_b: Location<Self::GPR, Self::SIMD>,
1332
        ret: Location<Self::GPR, Self::SIMD>,
1333
    ) -> Result<(), CompileError>;
1334
    /// i64 Roll Left
1335
    fn i64_rol(
1336
        &mut self,
1337
        loc_a: Location<Self::GPR, Self::SIMD>,
1338
        loc_b: Location<Self::GPR, Self::SIMD>,
1339
        ret: Location<Self::GPR, Self::SIMD>,
1340
    ) -> Result<(), CompileError>;
1341
    /// i64 Roll Right
1342
    fn i64_ror(
1343
        &mut self,
1344
        loc_a: Location<Self::GPR, Self::SIMD>,
1345
        loc_b: Location<Self::GPR, Self::SIMD>,
1346
        ret: Location<Self::GPR, Self::SIMD>,
1347
    ) -> Result<(), CompileError>;
1348
    /// i64 load
1349
    #[allow(clippy::too_many_arguments)]
1350
    fn i64_load(
1351
        &mut self,
1352
        addr: Location<Self::GPR, Self::SIMD>,
1353
        memarg: &MemArg,
1354
        ret: Location<Self::GPR, Self::SIMD>,
1355
        need_check: bool,
1356
        imported_memories: bool,
1357
        offset: i32,
1358
        heap_access_oob: Label,
1359
        unaligned_atomic: Label,
1360
    ) -> Result<(), CompileError>;
1361
    /// i64 load of an unsigned 8bits
1362
    #[allow(clippy::too_many_arguments)]
1363
    fn i64_load_8u(
1364
        &mut self,
1365
        addr: Location<Self::GPR, Self::SIMD>,
1366
        memarg: &MemArg,
1367
        ret: Location<Self::GPR, Self::SIMD>,
1368
        need_check: bool,
1369
        imported_memories: bool,
1370
        offset: i32,
1371
        heap_access_oob: Label,
1372
        unaligned_atomic: Label,
1373
    ) -> Result<(), CompileError>;
1374
    /// i64 load of an signed 8bits
1375
    #[allow(clippy::too_many_arguments)]
1376
    fn i64_load_8s(
1377
        &mut self,
1378
        addr: Location<Self::GPR, Self::SIMD>,
1379
        memarg: &MemArg,
1380
        ret: Location<Self::GPR, Self::SIMD>,
1381
        need_check: bool,
1382
        imported_memories: bool,
1383
        offset: i32,
1384
        heap_access_oob: Label,
1385
        unaligned_atomic: Label,
1386
    ) -> Result<(), CompileError>;
1387
    /// i64 load of an unsigned 32bits
1388
    #[allow(clippy::too_many_arguments)]
1389
    fn i64_load_32u(
1390
        &mut self,
1391
        addr: Location<Self::GPR, Self::SIMD>,
1392
        memarg: &MemArg,
1393
        ret: Location<Self::GPR, Self::SIMD>,
1394
        need_check: bool,
1395
        imported_memories: bool,
1396
        offset: i32,
1397
        heap_access_oob: Label,
1398
        unaligned_atomic: Label,
1399
    ) -> Result<(), CompileError>;
1400
    /// i64 load of an signed 32bits
1401
    #[allow(clippy::too_many_arguments)]
1402
    fn i64_load_32s(
1403
        &mut self,
1404
        addr: Location<Self::GPR, Self::SIMD>,
1405
        memarg: &MemArg,
1406
        ret: Location<Self::GPR, Self::SIMD>,
1407
        need_check: bool,
1408
        imported_memories: bool,
1409
        offset: i32,
1410
        heap_access_oob: Label,
1411
        unaligned_atomic: Label,
1412
    ) -> Result<(), CompileError>;
1413
    /// i64 load of an signed 16bits
1414
    #[allow(clippy::too_many_arguments)]
1415
    fn i64_load_16u(
1416
        &mut self,
1417
        addr: Location<Self::GPR, Self::SIMD>,
1418
        memarg: &MemArg,
1419
        ret: Location<Self::GPR, Self::SIMD>,
1420
        need_check: bool,
1421
        imported_memories: bool,
1422
        offset: i32,
1423
        heap_access_oob: Label,
1424
        unaligned_atomic: Label,
1425
    ) -> Result<(), CompileError>;
1426
    /// i64 load of an signed 16bits
1427
    #[allow(clippy::too_many_arguments)]
1428
    fn i64_load_16s(
1429
        &mut self,
1430
        addr: Location<Self::GPR, Self::SIMD>,
1431
        memarg: &MemArg,
1432
        ret: Location<Self::GPR, Self::SIMD>,
1433
        need_check: bool,
1434
        imported_memories: bool,
1435
        offset: i32,
1436
        heap_access_oob: Label,
1437
        unaligned_atomic: Label,
1438
    ) -> Result<(), CompileError>;
1439
    /// i64 atomic load
1440
    #[allow(clippy::too_many_arguments)]
1441
    fn i64_atomic_load(
1442
        &mut self,
1443
        addr: Location<Self::GPR, Self::SIMD>,
1444
        memarg: &MemArg,
1445
        ret: Location<Self::GPR, Self::SIMD>,
1446
        need_check: bool,
1447
        imported_memories: bool,
1448
        offset: i32,
1449
        heap_access_oob: Label,
1450
        unaligned_atomic: Label,
1451
    ) -> Result<(), CompileError>;
1452
    /// i64 atomic load from unsigned 8bits
1453
    #[allow(clippy::too_many_arguments)]
1454
    fn i64_atomic_load_8u(
1455
        &mut self,
1456
        addr: Location<Self::GPR, Self::SIMD>,
1457
        memarg: &MemArg,
1458
        ret: Location<Self::GPR, Self::SIMD>,
1459
        need_check: bool,
1460
        imported_memories: bool,
1461
        offset: i32,
1462
        heap_access_oob: Label,
1463
        unaligned_atomic: Label,
1464
    ) -> Result<(), CompileError>;
1465
    /// i64 atomic load from unsigned 16bits
1466
    #[allow(clippy::too_many_arguments)]
1467
    fn i64_atomic_load_16u(
1468
        &mut self,
1469
        addr: Location<Self::GPR, Self::SIMD>,
1470
        memarg: &MemArg,
1471
        ret: Location<Self::GPR, Self::SIMD>,
1472
        need_check: bool,
1473
        imported_memories: bool,
1474
        offset: i32,
1475
        heap_access_oob: Label,
1476
        unaligned_atomic: Label,
1477
    ) -> Result<(), CompileError>;
1478
    /// i64 atomic load from unsigned 32bits
1479
    #[allow(clippy::too_many_arguments)]
1480
    fn i64_atomic_load_32u(
1481
        &mut self,
1482
        addr: Location<Self::GPR, Self::SIMD>,
1483
        memarg: &MemArg,
1484
        ret: Location<Self::GPR, Self::SIMD>,
1485
        need_check: bool,
1486
        imported_memories: bool,
1487
        offset: i32,
1488
        heap_access_oob: Label,
1489
        unaligned_atomic: Label,
1490
    ) -> Result<(), CompileError>;
1491
    /// i64 save
1492
    #[allow(clippy::too_many_arguments)]
1493
    fn i64_save(
1494
        &mut self,
1495
        value: Location<Self::GPR, Self::SIMD>,
1496
        memarg: &MemArg,
1497
        addr: Location<Self::GPR, Self::SIMD>,
1498
        need_check: bool,
1499
        imported_memories: bool,
1500
        offset: i32,
1501
        heap_access_oob: Label,
1502
        unaligned_atomic: Label,
1503
    ) -> Result<(), CompileError>;
1504
    /// i64 save of the lower 8bits
1505
    #[allow(clippy::too_many_arguments)]
1506
    fn i64_save_8(
1507
        &mut self,
1508
        value: Location<Self::GPR, Self::SIMD>,
1509
        memarg: &MemArg,
1510
        addr: Location<Self::GPR, Self::SIMD>,
1511
        need_check: bool,
1512
        imported_memories: bool,
1513
        offset: i32,
1514
        heap_access_oob: Label,
1515
        unaligned_atomic: Label,
1516
    ) -> Result<(), CompileError>;
1517
    /// i64 save of the lower 16bits
1518
    #[allow(clippy::too_many_arguments)]
1519
    fn i64_save_16(
1520
        &mut self,
1521
        value: Location<Self::GPR, Self::SIMD>,
1522
        memarg: &MemArg,
1523
        addr: Location<Self::GPR, Self::SIMD>,
1524
        need_check: bool,
1525
        imported_memories: bool,
1526
        offset: i32,
1527
        heap_access_oob: Label,
1528
        unaligned_atomic: Label,
1529
    ) -> Result<(), CompileError>;
1530
    /// i64 save of the lower 32bits
1531
    #[allow(clippy::too_many_arguments)]
1532
    fn i64_save_32(
1533
        &mut self,
1534
        value: Location<Self::GPR, Self::SIMD>,
1535
        memarg: &MemArg,
1536
        addr: Location<Self::GPR, Self::SIMD>,
1537
        need_check: bool,
1538
        imported_memories: bool,
1539
        offset: i32,
1540
        heap_access_oob: Label,
1541
        unaligned_atomic: Label,
1542
    ) -> Result<(), CompileError>;
1543
    /// i64 atomic save
1544
    #[allow(clippy::too_many_arguments)]
1545
    fn i64_atomic_save(
1546
        &mut self,
1547
        value: Location<Self::GPR, Self::SIMD>,
1548
        memarg: &MemArg,
1549
        addr: Location<Self::GPR, Self::SIMD>,
1550
        need_check: bool,
1551
        imported_memories: bool,
1552
        offset: i32,
1553
        heap_access_oob: Label,
1554
        unaligned_atomic: Label,
1555
    ) -> Result<(), CompileError>;
1556
    /// i64 atomic save of a the lower 8bits
1557
    #[allow(clippy::too_many_arguments)]
1558
    fn i64_atomic_save_8(
1559
        &mut self,
1560
        value: Location<Self::GPR, Self::SIMD>,
1561
        memarg: &MemArg,
1562
        addr: Location<Self::GPR, Self::SIMD>,
1563
        need_check: bool,
1564
        imported_memories: bool,
1565
        offset: i32,
1566
        heap_access_oob: Label,
1567
        unaligned_atomic: Label,
1568
    ) -> Result<(), CompileError>;
1569
    /// i64 atomic save of a the lower 16bits
1570
    #[allow(clippy::too_many_arguments)]
1571
    fn i64_atomic_save_16(
1572
        &mut self,
1573
        value: Location<Self::GPR, Self::SIMD>,
1574
        memarg: &MemArg,
1575
        addr: Location<Self::GPR, Self::SIMD>,
1576
        need_check: bool,
1577
        imported_memories: bool,
1578
        offset: i32,
1579
        heap_access_oob: Label,
1580
        unaligned_atomic: Label,
1581
    ) -> Result<(), CompileError>;
1582
    /// i64 atomic save of a the lower 32bits
1583
    #[allow(clippy::too_many_arguments)]
1584
    fn i64_atomic_save_32(
1585
        &mut self,
1586
        value: Location<Self::GPR, Self::SIMD>,
1587
        memarg: &MemArg,
1588
        addr: Location<Self::GPR, Self::SIMD>,
1589
        need_check: bool,
1590
        imported_memories: bool,
1591
        offset: i32,
1592
        heap_access_oob: Label,
1593
        unaligned_atomic: Label,
1594
    ) -> Result<(), CompileError>;
1595
    /// i64 atomic Add with i64
1596
    #[allow(clippy::too_many_arguments)]
1597
    fn i64_atomic_add(
1598
        &mut self,
1599
        loc: Location<Self::GPR, Self::SIMD>,
1600
        target: Location<Self::GPR, Self::SIMD>,
1601
        memarg: &MemArg,
1602
        ret: Location<Self::GPR, Self::SIMD>,
1603
        need_check: bool,
1604
        imported_memories: bool,
1605
        offset: i32,
1606
        heap_access_oob: Label,
1607
        unaligned_atomic: Label,
1608
    ) -> Result<(), CompileError>;
1609
    /// i64 atomic Add with unsigned 8bits
1610
    #[allow(clippy::too_many_arguments)]
1611
    fn i64_atomic_add_8u(
1612
        &mut self,
1613
        loc: Location<Self::GPR, Self::SIMD>,
1614
        target: Location<Self::GPR, Self::SIMD>,
1615
        memarg: &MemArg,
1616
        ret: Location<Self::GPR, Self::SIMD>,
1617
        need_check: bool,
1618
        imported_memories: bool,
1619
        offset: i32,
1620
        heap_access_oob: Label,
1621
        unaligned_atomic: Label,
1622
    ) -> Result<(), CompileError>;
1623
    /// i64 atomic Add with unsigned 16bits
1624
    #[allow(clippy::too_many_arguments)]
1625
    fn i64_atomic_add_16u(
1626
        &mut self,
1627
        loc: Location<Self::GPR, Self::SIMD>,
1628
        target: Location<Self::GPR, Self::SIMD>,
1629
        memarg: &MemArg,
1630
        ret: Location<Self::GPR, Self::SIMD>,
1631
        need_check: bool,
1632
        imported_memories: bool,
1633
        offset: i32,
1634
        heap_access_oob: Label,
1635
        unaligned_atomic: Label,
1636
    ) -> Result<(), CompileError>;
1637
    /// i64 atomic Add with unsigned 32bits
1638
    #[allow(clippy::too_many_arguments)]
1639
    fn i64_atomic_add_32u(
1640
        &mut self,
1641
        loc: Location<Self::GPR, Self::SIMD>,
1642
        target: Location<Self::GPR, Self::SIMD>,
1643
        memarg: &MemArg,
1644
        ret: Location<Self::GPR, Self::SIMD>,
1645
        need_check: bool,
1646
        imported_memories: bool,
1647
        offset: i32,
1648
        heap_access_oob: Label,
1649
        unaligned_atomic: Label,
1650
    ) -> Result<(), CompileError>;
1651
    /// i64 atomic Sub with i64
1652
    #[allow(clippy::too_many_arguments)]
1653
    fn i64_atomic_sub(
1654
        &mut self,
1655
        loc: Location<Self::GPR, Self::SIMD>,
1656
        target: Location<Self::GPR, Self::SIMD>,
1657
        memarg: &MemArg,
1658
        ret: Location<Self::GPR, Self::SIMD>,
1659
        need_check: bool,
1660
        imported_memories: bool,
1661
        offset: i32,
1662
        heap_access_oob: Label,
1663
        unaligned_atomic: Label,
1664
    ) -> Result<(), CompileError>;
1665
    /// i64 atomic Sub with unsigned 8bits
1666
    #[allow(clippy::too_many_arguments)]
1667
    fn i64_atomic_sub_8u(
1668
        &mut self,
1669
        loc: Location<Self::GPR, Self::SIMD>,
1670
        target: Location<Self::GPR, Self::SIMD>,
1671
        memarg: &MemArg,
1672
        ret: Location<Self::GPR, Self::SIMD>,
1673
        need_check: bool,
1674
        imported_memories: bool,
1675
        offset: i32,
1676
        heap_access_oob: Label,
1677
        unaligned_atomic: Label,
1678
    ) -> Result<(), CompileError>;
1679
    /// i64 atomic Sub with unsigned 16bits
1680
    #[allow(clippy::too_many_arguments)]
1681
    fn i64_atomic_sub_16u(
1682
        &mut self,
1683
        loc: Location<Self::GPR, Self::SIMD>,
1684
        target: Location<Self::GPR, Self::SIMD>,
1685
        memarg: &MemArg,
1686
        ret: Location<Self::GPR, Self::SIMD>,
1687
        need_check: bool,
1688
        imported_memories: bool,
1689
        offset: i32,
1690
        heap_access_oob: Label,
1691
        unaligned_atomic: Label,
1692
    ) -> Result<(), CompileError>;
1693
    /// i64 atomic Sub with unsigned 32bits
1694
    #[allow(clippy::too_many_arguments)]
1695
    fn i64_atomic_sub_32u(
1696
        &mut self,
1697
        loc: Location<Self::GPR, Self::SIMD>,
1698
        target: Location<Self::GPR, Self::SIMD>,
1699
        memarg: &MemArg,
1700
        ret: Location<Self::GPR, Self::SIMD>,
1701
        need_check: bool,
1702
        imported_memories: bool,
1703
        offset: i32,
1704
        heap_access_oob: Label,
1705
        unaligned_atomic: Label,
1706
    ) -> Result<(), CompileError>;
1707
    /// i64 atomic And with i64
1708
    #[allow(clippy::too_many_arguments)]
1709
    fn i64_atomic_and(
1710
        &mut self,
1711
        loc: Location<Self::GPR, Self::SIMD>,
1712
        target: Location<Self::GPR, Self::SIMD>,
1713
        memarg: &MemArg,
1714
        ret: Location<Self::GPR, Self::SIMD>,
1715
        need_check: bool,
1716
        imported_memories: bool,
1717
        offset: i32,
1718
        heap_access_oob: Label,
1719
        unaligned_atomic: Label,
1720
    ) -> Result<(), CompileError>;
1721
    /// i64 atomic And with unsigned 8bits
1722
    #[allow(clippy::too_many_arguments)]
1723
    fn i64_atomic_and_8u(
1724
        &mut self,
1725
        loc: Location<Self::GPR, Self::SIMD>,
1726
        target: Location<Self::GPR, Self::SIMD>,
1727
        memarg: &MemArg,
1728
        ret: Location<Self::GPR, Self::SIMD>,
1729
        need_check: bool,
1730
        imported_memories: bool,
1731
        offset: i32,
1732
        heap_access_oob: Label,
1733
        unaligned_atomic: Label,
1734
    ) -> Result<(), CompileError>;
1735
    /// i64 atomic And with unsigned 16bits
1736
    #[allow(clippy::too_many_arguments)]
1737
    fn i64_atomic_and_16u(
1738
        &mut self,
1739
        loc: Location<Self::GPR, Self::SIMD>,
1740
        target: Location<Self::GPR, Self::SIMD>,
1741
        memarg: &MemArg,
1742
        ret: Location<Self::GPR, Self::SIMD>,
1743
        need_check: bool,
1744
        imported_memories: bool,
1745
        offset: i32,
1746
        heap_access_oob: Label,
1747
        unaligned_atomic: Label,
1748
    ) -> Result<(), CompileError>;
1749
    /// i64 atomic And with unsigned 32bits
1750
    #[allow(clippy::too_many_arguments)]
1751
    fn i64_atomic_and_32u(
1752
        &mut self,
1753
        loc: Location<Self::GPR, Self::SIMD>,
1754
        target: Location<Self::GPR, Self::SIMD>,
1755
        memarg: &MemArg,
1756
        ret: Location<Self::GPR, Self::SIMD>,
1757
        need_check: bool,
1758
        imported_memories: bool,
1759
        offset: i32,
1760
        heap_access_oob: Label,
1761
        unaligned_atomic: Label,
1762
    ) -> Result<(), CompileError>;
1763
    /// i64 atomic Or with i64
1764
    #[allow(clippy::too_many_arguments)]
1765
    fn i64_atomic_or(
1766
        &mut self,
1767
        loc: Location<Self::GPR, Self::SIMD>,
1768
        target: Location<Self::GPR, Self::SIMD>,
1769
        memarg: &MemArg,
1770
        ret: Location<Self::GPR, Self::SIMD>,
1771
        need_check: bool,
1772
        imported_memories: bool,
1773
        offset: i32,
1774
        heap_access_oob: Label,
1775
        unaligned_atomic: Label,
1776
    ) -> Result<(), CompileError>;
1777
    /// i64 atomic Or with unsigned 8bits
1778
    #[allow(clippy::too_many_arguments)]
1779
    fn i64_atomic_or_8u(
1780
        &mut self,
1781
        loc: Location<Self::GPR, Self::SIMD>,
1782
        target: Location<Self::GPR, Self::SIMD>,
1783
        memarg: &MemArg,
1784
        ret: Location<Self::GPR, Self::SIMD>,
1785
        need_check: bool,
1786
        imported_memories: bool,
1787
        offset: i32,
1788
        heap_access_oob: Label,
1789
        unaligned_atomic: Label,
1790
    ) -> Result<(), CompileError>;
1791
    /// i64 atomic Or with unsigned 16bits
1792
    #[allow(clippy::too_many_arguments)]
1793
    fn i64_atomic_or_16u(
1794
        &mut self,
1795
        loc: Location<Self::GPR, Self::SIMD>,
1796
        target: Location<Self::GPR, Self::SIMD>,
1797
        memarg: &MemArg,
1798
        ret: Location<Self::GPR, Self::SIMD>,
1799
        need_check: bool,
1800
        imported_memories: bool,
1801
        offset: i32,
1802
        heap_access_oob: Label,
1803
        unaligned_atomic: Label,
1804
    ) -> Result<(), CompileError>;
1805
    /// i64 atomic Or with unsigned 32bits
1806
    #[allow(clippy::too_many_arguments)]
1807
    fn i64_atomic_or_32u(
1808
        &mut self,
1809
        loc: Location<Self::GPR, Self::SIMD>,
1810
        target: Location<Self::GPR, Self::SIMD>,
1811
        memarg: &MemArg,
1812
        ret: Location<Self::GPR, Self::SIMD>,
1813
        need_check: bool,
1814
        imported_memories: bool,
1815
        offset: i32,
1816
        heap_access_oob: Label,
1817
        unaligned_atomic: Label,
1818
    ) -> Result<(), CompileError>;
1819
    /// i64 atomic Xor with i64
1820
    #[allow(clippy::too_many_arguments)]
1821
    fn i64_atomic_xor(
1822
        &mut self,
1823
        loc: Location<Self::GPR, Self::SIMD>,
1824
        target: Location<Self::GPR, Self::SIMD>,
1825
        memarg: &MemArg,
1826
        ret: Location<Self::GPR, Self::SIMD>,
1827
        need_check: bool,
1828
        imported_memories: bool,
1829
        offset: i32,
1830
        heap_access_oob: Label,
1831
        unaligned_atomic: Label,
1832
    ) -> Result<(), CompileError>;
1833
    /// i64 atomic Xor with unsigned 8bits
1834
    #[allow(clippy::too_many_arguments)]
1835
    fn i64_atomic_xor_8u(
1836
        &mut self,
1837
        loc: Location<Self::GPR, Self::SIMD>,
1838
        target: Location<Self::GPR, Self::SIMD>,
1839
        memarg: &MemArg,
1840
        ret: Location<Self::GPR, Self::SIMD>,
1841
        need_check: bool,
1842
        imported_memories: bool,
1843
        offset: i32,
1844
        heap_access_oob: Label,
1845
        unaligned_atomic: Label,
1846
    ) -> Result<(), CompileError>;
1847
    /// i64 atomic Xor with unsigned 16bits
1848
    #[allow(clippy::too_many_arguments)]
1849
    fn i64_atomic_xor_16u(
1850
        &mut self,
1851
        loc: Location<Self::GPR, Self::SIMD>,
1852
        target: Location<Self::GPR, Self::SIMD>,
1853
        memarg: &MemArg,
1854
        ret: Location<Self::GPR, Self::SIMD>,
1855
        need_check: bool,
1856
        imported_memories: bool,
1857
        offset: i32,
1858
        heap_access_oob: Label,
1859
        unaligned_atomic: Label,
1860
    ) -> Result<(), CompileError>;
1861
    /// i64 atomic Xor with unsigned 32bits
1862
    #[allow(clippy::too_many_arguments)]
1863
    fn i64_atomic_xor_32u(
1864
        &mut self,
1865
        loc: Location<Self::GPR, Self::SIMD>,
1866
        target: Location<Self::GPR, Self::SIMD>,
1867
        memarg: &MemArg,
1868
        ret: Location<Self::GPR, Self::SIMD>,
1869
        need_check: bool,
1870
        imported_memories: bool,
1871
        offset: i32,
1872
        heap_access_oob: Label,
1873
        unaligned_atomic: Label,
1874
    ) -> Result<(), CompileError>;
1875
    /// i64 atomic Exchange with i64
1876
    #[allow(clippy::too_many_arguments)]
1877
    fn i64_atomic_xchg(
1878
        &mut self,
1879
        loc: Location<Self::GPR, Self::SIMD>,
1880
        target: Location<Self::GPR, Self::SIMD>,
1881
        memarg: &MemArg,
1882
        ret: Location<Self::GPR, Self::SIMD>,
1883
        need_check: bool,
1884
        imported_memories: bool,
1885
        offset: i32,
1886
        heap_access_oob: Label,
1887
        unaligned_atomic: Label,
1888
    ) -> Result<(), CompileError>;
1889
    /// i64 atomic Exchange with u8
1890
    #[allow(clippy::too_many_arguments)]
1891
    fn i64_atomic_xchg_8u(
1892
        &mut self,
1893
        loc: Location<Self::GPR, Self::SIMD>,
1894
        target: Location<Self::GPR, Self::SIMD>,
1895
        memarg: &MemArg,
1896
        ret: Location<Self::GPR, Self::SIMD>,
1897
        need_check: bool,
1898
        imported_memories: bool,
1899
        offset: i32,
1900
        heap_access_oob: Label,
1901
        unaligned_atomic: Label,
1902
    ) -> Result<(), CompileError>;
1903
    /// i64 atomic Exchange with u16
1904
    #[allow(clippy::too_many_arguments)]
1905
    fn i64_atomic_xchg_16u(
1906
        &mut self,
1907
        loc: Location<Self::GPR, Self::SIMD>,
1908
        target: Location<Self::GPR, Self::SIMD>,
1909
        memarg: &MemArg,
1910
        ret: Location<Self::GPR, Self::SIMD>,
1911
        need_check: bool,
1912
        imported_memories: bool,
1913
        offset: i32,
1914
        heap_access_oob: Label,
1915
        unaligned_atomic: Label,
1916
    ) -> Result<(), CompileError>;
1917
    /// i64 atomic Exchange with u32
1918
    #[allow(clippy::too_many_arguments)]
1919
    fn i64_atomic_xchg_32u(
1920
        &mut self,
1921
        loc: Location<Self::GPR, Self::SIMD>,
1922
        target: Location<Self::GPR, Self::SIMD>,
1923
        memarg: &MemArg,
1924
        ret: Location<Self::GPR, Self::SIMD>,
1925
        need_check: bool,
1926
        imported_memories: bool,
1927
        offset: i32,
1928
        heap_access_oob: Label,
1929
        unaligned_atomic: Label,
1930
    ) -> Result<(), CompileError>;
1931
    /// i64 atomic Compare and Exchange with i32
1932
    #[allow(clippy::too_many_arguments)]
1933
    fn i64_atomic_cmpxchg(
1934
        &mut self,
1935
        new: Location<Self::GPR, Self::SIMD>,
1936
        cmp: Location<Self::GPR, Self::SIMD>,
1937
        target: Location<Self::GPR, Self::SIMD>,
1938
        memarg: &MemArg,
1939
        ret: Location<Self::GPR, Self::SIMD>,
1940
        need_check: bool,
1941
        imported_memories: bool,
1942
        offset: i32,
1943
        heap_access_oob: Label,
1944
        unaligned_atomic: Label,
1945
    ) -> Result<(), CompileError>;
1946
    /// i64 atomic Compare and Exchange with u8
1947
    #[allow(clippy::too_many_arguments)]
1948
    fn i64_atomic_cmpxchg_8u(
1949
        &mut self,
1950
        new: Location<Self::GPR, Self::SIMD>,
1951
        cmp: Location<Self::GPR, Self::SIMD>,
1952
        target: Location<Self::GPR, Self::SIMD>,
1953
        memarg: &MemArg,
1954
        ret: Location<Self::GPR, Self::SIMD>,
1955
        need_check: bool,
1956
        imported_memories: bool,
1957
        offset: i32,
1958
        heap_access_oob: Label,
1959
        unaligned_atomic: Label,
1960
    ) -> Result<(), CompileError>;
1961
    /// i64 atomic Compare and Exchange with u16
1962
    #[allow(clippy::too_many_arguments)]
1963
    fn i64_atomic_cmpxchg_16u(
1964
        &mut self,
1965
        new: Location<Self::GPR, Self::SIMD>,
1966
        cmp: Location<Self::GPR, Self::SIMD>,
1967
        target: Location<Self::GPR, Self::SIMD>,
1968
        memarg: &MemArg,
1969
        ret: Location<Self::GPR, Self::SIMD>,
1970
        need_check: bool,
1971
        imported_memories: bool,
1972
        offset: i32,
1973
        heap_access_oob: Label,
1974
        unaligned_atomic: Label,
1975
    ) -> Result<(), CompileError>;
1976
    /// i64 atomic Compare and Exchange with u32
1977
    #[allow(clippy::too_many_arguments)]
1978
    fn i64_atomic_cmpxchg_32u(
1979
        &mut self,
1980
        new: Location<Self::GPR, Self::SIMD>,
1981
        cmp: Location<Self::GPR, Self::SIMD>,
1982
        target: Location<Self::GPR, Self::SIMD>,
1983
        memarg: &MemArg,
1984
        ret: Location<Self::GPR, Self::SIMD>,
1985
        need_check: bool,
1986
        imported_memories: bool,
1987
        offset: i32,
1988
        heap_access_oob: Label,
1989
        unaligned_atomic: Label,
1990
    ) -> Result<(), CompileError>;
1991
1992
    /// load an F32
1993
    #[allow(clippy::too_many_arguments)]
1994
    fn f32_load(
1995
        &mut self,
1996
        addr: Location<Self::GPR, Self::SIMD>,
1997
        memarg: &MemArg,
1998
        ret: Location<Self::GPR, Self::SIMD>,
1999
        need_check: bool,
2000
        imported_memories: bool,
2001
        offset: i32,
2002
        heap_access_oob: Label,
2003
        unaligned_atomic: Label,
2004
    ) -> Result<(), CompileError>;
2005
    /// f32 save
2006
    #[allow(clippy::too_many_arguments)]
2007
    fn f32_save(
2008
        &mut self,
2009
        value: Location<Self::GPR, Self::SIMD>,
2010
        memarg: &MemArg,
2011
        addr: Location<Self::GPR, Self::SIMD>,
2012
        canonicalize: bool,
2013
        need_check: bool,
2014
        imported_memories: bool,
2015
        offset: i32,
2016
        heap_access_oob: Label,
2017
        unaligned_atomic: Label,
2018
    ) -> Result<(), CompileError>;
2019
    /// load an F64
2020
    #[allow(clippy::too_many_arguments)]
2021
    fn f64_load(
2022
        &mut self,
2023
        addr: Location<Self::GPR, Self::SIMD>,
2024
        memarg: &MemArg,
2025
        ret: Location<Self::GPR, Self::SIMD>,
2026
        need_check: bool,
2027
        imported_memories: bool,
2028
        offset: i32,
2029
        heap_access_oob: Label,
2030
        unaligned_atomic: Label,
2031
    ) -> Result<(), CompileError>;
2032
    /// f64 save
2033
    #[allow(clippy::too_many_arguments)]
2034
    fn f64_save(
2035
        &mut self,
2036
        value: Location<Self::GPR, Self::SIMD>,
2037
        memarg: &MemArg,
2038
        addr: Location<Self::GPR, Self::SIMD>,
2039
        canonicalize: bool,
2040
        need_check: bool,
2041
        imported_memories: bool,
2042
        offset: i32,
2043
        heap_access_oob: Label,
2044
        unaligned_atomic: Label,
2045
    ) -> Result<(), CompileError>;
2046
    /// Convert a F64 from I64, signed or unsigned
2047
    fn convert_f64_i64(
2048
        &mut self,
2049
        loc: Location<Self::GPR, Self::SIMD>,
2050
        signed: bool,
2051
        ret: Location<Self::GPR, Self::SIMD>,
2052
    ) -> Result<(), CompileError>;
2053
    /// Convert a F64 from I32, signed or unsigned
2054
    fn convert_f64_i32(
2055
        &mut self,
2056
        loc: Location<Self::GPR, Self::SIMD>,
2057
        signed: bool,
2058
        ret: Location<Self::GPR, Self::SIMD>,
2059
    ) -> Result<(), CompileError>;
2060
    /// Convert a F32 from I64, signed or unsigned
2061
    fn convert_f32_i64(
2062
        &mut self,
2063
        loc: Location<Self::GPR, Self::SIMD>,
2064
        signed: bool,
2065
        ret: Location<Self::GPR, Self::SIMD>,
2066
    ) -> Result<(), CompileError>;
2067
    /// Convert a F32 from I32, signed or unsigned
2068
    fn convert_f32_i32(
2069
        &mut self,
2070
        loc: Location<Self::GPR, Self::SIMD>,
2071
        signed: bool,
2072
        ret: Location<Self::GPR, Self::SIMD>,
2073
    ) -> Result<(), CompileError>;
2074
    /// Convert a F64 to I64, signed or unsigned, without or without saturation
2075
    fn convert_i64_f64(
2076
        &mut self,
2077
        loc: Location<Self::GPR, Self::SIMD>,
2078
        ret: Location<Self::GPR, Self::SIMD>,
2079
        signed: bool,
2080
        sat: bool,
2081
    ) -> Result<(), CompileError>;
2082
    /// Convert a F64 to I32, signed or unsigned, without or without saturation
2083
    fn convert_i32_f64(
2084
        &mut self,
2085
        loc: Location<Self::GPR, Self::SIMD>,
2086
        ret: Location<Self::GPR, Self::SIMD>,
2087
        signed: bool,
2088
        sat: bool,
2089
    ) -> Result<(), CompileError>;
2090
    /// Convert a F32 to I64, signed or unsigned, without or without saturation
2091
    fn convert_i64_f32(
2092
        &mut self,
2093
        loc: Location<Self::GPR, Self::SIMD>,
2094
        ret: Location<Self::GPR, Self::SIMD>,
2095
        signed: bool,
2096
        sat: bool,
2097
    ) -> Result<(), CompileError>;
2098
    /// Convert a F32 to I32, signed or unsigned, without or without saturation
2099
    fn convert_i32_f32(
2100
        &mut self,
2101
        loc: Location<Self::GPR, Self::SIMD>,
2102
        ret: Location<Self::GPR, Self::SIMD>,
2103
        signed: bool,
2104
        sat: bool,
2105
    ) -> Result<(), CompileError>;
2106
    /// Convert a F32 to F64
2107
    fn convert_f64_f32(
2108
        &mut self,
2109
        loc: Location<Self::GPR, Self::SIMD>,
2110
        ret: Location<Self::GPR, Self::SIMD>,
2111
    ) -> Result<(), CompileError>;
2112
    /// Convert a F64 to F32
2113
    fn convert_f32_f64(
2114
        &mut self,
2115
        loc: Location<Self::GPR, Self::SIMD>,
2116
        ret: Location<Self::GPR, Self::SIMD>,
2117
    ) -> Result<(), CompileError>;
2118
    /// Negate an F64
2119
    fn f64_neg(
2120
        &mut self,
2121
        loc: Location<Self::GPR, Self::SIMD>,
2122
        ret: Location<Self::GPR, Self::SIMD>,
2123
    ) -> Result<(), CompileError>;
2124
    /// Get the Absolute Value of an F64
2125
    fn f64_abs(
2126
        &mut self,
2127
        loc: Location<Self::GPR, Self::SIMD>,
2128
        ret: Location<Self::GPR, Self::SIMD>,
2129
    ) -> Result<(), CompileError>;
2130
    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2131
    fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2132
    /// Get the Square Root of an F64
2133
    fn f64_sqrt(
2134
        &mut self,
2135
        loc: Location<Self::GPR, Self::SIMD>,
2136
        ret: Location<Self::GPR, Self::SIMD>,
2137
    ) -> Result<(), CompileError>;
2138
    /// Trunc of an F64
2139
    fn f64_trunc(
2140
        &mut self,
2141
        loc: Location<Self::GPR, Self::SIMD>,
2142
        ret: Location<Self::GPR, Self::SIMD>,
2143
    ) -> Result<(), CompileError>;
2144
    /// Ceil of an F64
2145
    fn f64_ceil(
2146
        &mut self,
2147
        loc: Location<Self::GPR, Self::SIMD>,
2148
        ret: Location<Self::GPR, Self::SIMD>,
2149
    ) -> Result<(), CompileError>;
2150
    /// Floor of an F64
2151
    fn f64_floor(
2152
        &mut self,
2153
        loc: Location<Self::GPR, Self::SIMD>,
2154
        ret: Location<Self::GPR, Self::SIMD>,
2155
    ) -> Result<(), CompileError>;
2156
    /// Round at nearest int of an F64
2157
    fn f64_nearest(
2158
        &mut self,
2159
        loc: Location<Self::GPR, Self::SIMD>,
2160
        ret: Location<Self::GPR, Self::SIMD>,
2161
    ) -> Result<(), CompileError>;
2162
    /// Greater of Equal Compare 2 F64, result in a GPR
2163
    fn f64_cmp_ge(
2164
        &mut self,
2165
        loc_a: Location<Self::GPR, Self::SIMD>,
2166
        loc_b: Location<Self::GPR, Self::SIMD>,
2167
        ret: Location<Self::GPR, Self::SIMD>,
2168
    ) -> Result<(), CompileError>;
2169
    /// Greater Than Compare 2 F64, result in a GPR
2170
    fn f64_cmp_gt(
2171
        &mut self,
2172
        loc_a: Location<Self::GPR, Self::SIMD>,
2173
        loc_b: Location<Self::GPR, Self::SIMD>,
2174
        ret: Location<Self::GPR, Self::SIMD>,
2175
    ) -> Result<(), CompileError>;
2176
    /// Less of Equal Compare 2 F64, result in a GPR
2177
    fn f64_cmp_le(
2178
        &mut self,
2179
        loc_a: Location<Self::GPR, Self::SIMD>,
2180
        loc_b: Location<Self::GPR, Self::SIMD>,
2181
        ret: Location<Self::GPR, Self::SIMD>,
2182
    ) -> Result<(), CompileError>;
2183
    /// Less Than Compare 2 F64, result in a GPR
2184
    fn f64_cmp_lt(
2185
        &mut self,
2186
        loc_a: Location<Self::GPR, Self::SIMD>,
2187
        loc_b: Location<Self::GPR, Self::SIMD>,
2188
        ret: Location<Self::GPR, Self::SIMD>,
2189
    ) -> Result<(), CompileError>;
2190
    /// Not Equal Compare 2 F64, result in a GPR
2191
    fn f64_cmp_ne(
2192
        &mut self,
2193
        loc_a: Location<Self::GPR, Self::SIMD>,
2194
        loc_b: Location<Self::GPR, Self::SIMD>,
2195
        ret: Location<Self::GPR, Self::SIMD>,
2196
    ) -> Result<(), CompileError>;
2197
    /// Equal Compare 2 F64, result in a GPR
2198
    fn f64_cmp_eq(
2199
        &mut self,
2200
        loc_a: Location<Self::GPR, Self::SIMD>,
2201
        loc_b: Location<Self::GPR, Self::SIMD>,
2202
        ret: Location<Self::GPR, Self::SIMD>,
2203
    ) -> Result<(), CompileError>;
2204
    /// get Min for 2 F64 values
2205
    fn f64_min(
2206
        &mut self,
2207
        loc_a: Location<Self::GPR, Self::SIMD>,
2208
        loc_b: Location<Self::GPR, Self::SIMD>,
2209
        ret: Location<Self::GPR, Self::SIMD>,
2210
    ) -> Result<(), CompileError>;
2211
    /// get Max for 2 F64 values
2212
    fn f64_max(
2213
        &mut self,
2214
        loc_a: Location<Self::GPR, Self::SIMD>,
2215
        loc_b: Location<Self::GPR, Self::SIMD>,
2216
        ret: Location<Self::GPR, Self::SIMD>,
2217
    ) -> Result<(), CompileError>;
2218
    /// Add 2 F64 values
2219
    fn f64_add(
2220
        &mut self,
2221
        loc_a: Location<Self::GPR, Self::SIMD>,
2222
        loc_b: Location<Self::GPR, Self::SIMD>,
2223
        ret: Location<Self::GPR, Self::SIMD>,
2224
    ) -> Result<(), CompileError>;
2225
    /// Sub 2 F64 values
2226
    fn f64_sub(
2227
        &mut self,
2228
        loc_a: Location<Self::GPR, Self::SIMD>,
2229
        loc_b: Location<Self::GPR, Self::SIMD>,
2230
        ret: Location<Self::GPR, Self::SIMD>,
2231
    ) -> Result<(), CompileError>;
2232
    /// Multiply 2 F64 values
2233
    fn f64_mul(
2234
        &mut self,
2235
        loc_a: Location<Self::GPR, Self::SIMD>,
2236
        loc_b: Location<Self::GPR, Self::SIMD>,
2237
        ret: Location<Self::GPR, Self::SIMD>,
2238
    ) -> Result<(), CompileError>;
2239
    /// Divide 2 F64 values
2240
    fn f64_div(
2241
        &mut self,
2242
        loc_a: Location<Self::GPR, Self::SIMD>,
2243
        loc_b: Location<Self::GPR, Self::SIMD>,
2244
        ret: Location<Self::GPR, Self::SIMD>,
2245
    ) -> Result<(), CompileError>;
2246
    /// Negate an F32
2247
    fn f32_neg(
2248
        &mut self,
2249
        loc: Location<Self::GPR, Self::SIMD>,
2250
        ret: Location<Self::GPR, Self::SIMD>,
2251
    ) -> Result<(), CompileError>;
2252
    /// Get the Absolute Value of an F32
2253
    fn f32_abs(
2254
        &mut self,
2255
        loc: Location<Self::GPR, Self::SIMD>,
2256
        ret: Location<Self::GPR, Self::SIMD>,
2257
    ) -> Result<(), CompileError>;
2258
    /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR
2259
    fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CompileError>;
2260
    /// Get the Square Root of an F32
2261
    fn f32_sqrt(
2262
        &mut self,
2263
        loc: Location<Self::GPR, Self::SIMD>,
2264
        ret: Location<Self::GPR, Self::SIMD>,
2265
    ) -> Result<(), CompileError>;
2266
    /// Trunc of an F32
2267
    fn f32_trunc(
2268
        &mut self,
2269
        loc: Location<Self::GPR, Self::SIMD>,
2270
        ret: Location<Self::GPR, Self::SIMD>,
2271
    ) -> Result<(), CompileError>;
2272
    /// Ceil of an F32
2273
    fn f32_ceil(
2274
        &mut self,
2275
        loc: Location<Self::GPR, Self::SIMD>,
2276
        ret: Location<Self::GPR, Self::SIMD>,
2277
    ) -> Result<(), CompileError>;
2278
    /// Floor of an F32
2279
    fn f32_floor(
2280
        &mut self,
2281
        loc: Location<Self::GPR, Self::SIMD>,
2282
        ret: Location<Self::GPR, Self::SIMD>,
2283
    ) -> Result<(), CompileError>;
2284
    /// Round at nearest int of an F32
2285
    fn f32_nearest(
2286
        &mut self,
2287
        loc: Location<Self::GPR, Self::SIMD>,
2288
        ret: Location<Self::GPR, Self::SIMD>,
2289
    ) -> Result<(), CompileError>;
2290
    /// Greater of Equal Compare 2 F32, result in a GPR
2291
    fn f32_cmp_ge(
2292
        &mut self,
2293
        loc_a: Location<Self::GPR, Self::SIMD>,
2294
        loc_b: Location<Self::GPR, Self::SIMD>,
2295
        ret: Location<Self::GPR, Self::SIMD>,
2296
    ) -> Result<(), CompileError>;
2297
    /// Greater Than Compare 2 F32, result in a GPR
2298
    fn f32_cmp_gt(
2299
        &mut self,
2300
        loc_a: Location<Self::GPR, Self::SIMD>,
2301
        loc_b: Location<Self::GPR, Self::SIMD>,
2302
        ret: Location<Self::GPR, Self::SIMD>,
2303
    ) -> Result<(), CompileError>;
2304
    /// Less of Equal Compare 2 F32, result in a GPR
2305
    fn f32_cmp_le(
2306
        &mut self,
2307
        loc_a: Location<Self::GPR, Self::SIMD>,
2308
        loc_b: Location<Self::GPR, Self::SIMD>,
2309
        ret: Location<Self::GPR, Self::SIMD>,
2310
    ) -> Result<(), CompileError>;
2311
    /// Less Than Compare 2 F32, result in a GPR
2312
    fn f32_cmp_lt(
2313
        &mut self,
2314
        loc_a: Location<Self::GPR, Self::SIMD>,
2315
        loc_b: Location<Self::GPR, Self::SIMD>,
2316
        ret: Location<Self::GPR, Self::SIMD>,
2317
    ) -> Result<(), CompileError>;
2318
    /// Not Equal Compare 2 F32, result in a GPR
2319
    fn f32_cmp_ne(
2320
        &mut self,
2321
        loc_a: Location<Self::GPR, Self::SIMD>,
2322
        loc_b: Location<Self::GPR, Self::SIMD>,
2323
        ret: Location<Self::GPR, Self::SIMD>,
2324
    ) -> Result<(), CompileError>;
2325
    /// Equal Compare 2 F32, result in a GPR
2326
    fn f32_cmp_eq(
2327
        &mut self,
2328
        loc_a: Location<Self::GPR, Self::SIMD>,
2329
        loc_b: Location<Self::GPR, Self::SIMD>,
2330
        ret: Location<Self::GPR, Self::SIMD>,
2331
    ) -> Result<(), CompileError>;
2332
    /// get Min for 2 F32 values
2333
    fn f32_min(
2334
        &mut self,
2335
        loc_a: Location<Self::GPR, Self::SIMD>,
2336
        loc_b: Location<Self::GPR, Self::SIMD>,
2337
        ret: Location<Self::GPR, Self::SIMD>,
2338
    ) -> Result<(), CompileError>;
2339
    /// get Max for 2 F32 values
2340
    fn f32_max(
2341
        &mut self,
2342
        loc_a: Location<Self::GPR, Self::SIMD>,
2343
        loc_b: Location<Self::GPR, Self::SIMD>,
2344
        ret: Location<Self::GPR, Self::SIMD>,
2345
    ) -> Result<(), CompileError>;
2346
    /// Add 2 F32 values
2347
    fn f32_add(
2348
        &mut self,
2349
        loc_a: Location<Self::GPR, Self::SIMD>,
2350
        loc_b: Location<Self::GPR, Self::SIMD>,
2351
        ret: Location<Self::GPR, Self::SIMD>,
2352
    ) -> Result<(), CompileError>;
2353
    /// Sub 2 F32 values
2354
    fn f32_sub(
2355
        &mut self,
2356
        loc_a: Location<Self::GPR, Self::SIMD>,
2357
        loc_b: Location<Self::GPR, Self::SIMD>,
2358
        ret: Location<Self::GPR, Self::SIMD>,
2359
    ) -> Result<(), CompileError>;
2360
    /// Multiply 2 F32 values
2361
    fn f32_mul(
2362
        &mut self,
2363
        loc_a: Location<Self::GPR, Self::SIMD>,
2364
        loc_b: Location<Self::GPR, Self::SIMD>,
2365
        ret: Location<Self::GPR, Self::SIMD>,
2366
    ) -> Result<(), CompileError>;
2367
    /// Divide 2 F32 values
2368
    fn f32_div(
2369
        &mut self,
2370
        loc_a: Location<Self::GPR, Self::SIMD>,
2371
        loc_b: Location<Self::GPR, Self::SIMD>,
2372
        ret: Location<Self::GPR, Self::SIMD>,
2373
    ) -> Result<(), CompileError>;
2374
2375
    /// Standard function Trampoline generation
2376
    fn gen_std_trampoline(
2377
        &self,
2378
        sig: &FunctionType,
2379
        calling_convention: CallingConvention,
2380
    ) -> Result<FunctionBody, CompileError>;
2381
    /// Generates dynamic import function call trampoline for a function type.
2382
    fn gen_std_dynamic_import_trampoline(
2383
        &self,
2384
        vmoffsets: &VMOffsets,
2385
        sig: &FunctionType,
2386
        calling_convention: CallingConvention,
2387
    ) -> Result<FunctionBody, CompileError>;
2388
    /// Singlepass calls import functions through a trampoline.
2389
    fn gen_import_call_trampoline(
2390
        &self,
2391
        vmoffsets: &VMOffsets,
2392
        index: FunctionIndex,
2393
        sig: &FunctionType,
2394
        calling_convention: CallingConvention,
2395
    ) -> Result<CustomSection, CompileError>;
2396
    /// generate eh_frame instruction (or None if not possible / supported)
2397
    fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option<UnwindInstructions>;
2398
    /// generate Windows unwind instructions (or None if not possible / supported)
2399
    fn gen_windows_unwind_info(&mut self, code_len: usize) -> Option<Vec<u8>>;
2400
}
2401
2402
/// Standard entry trampoline generation
2403
70.4k
pub fn gen_std_trampoline(
2404
70.4k
    sig: &FunctionType,
2405
70.4k
    target: &Target,
2406
70.4k
    calling_convention: CallingConvention,
2407
70.4k
) -> Result<FunctionBody, CompileError> {
2408
70.4k
    match target.triple().architecture {
2409
        Architecture::X86_64 => {
2410
70.4k
            let machine = MachineX86_64::new(Some(target.clone()))?;
2411
70.4k
            machine.gen_std_trampoline(sig, calling_convention)
2412
        }
2413
        Architecture::Aarch64(_) => {
2414
0
            let machine = MachineARM64::new(Some(target.clone()));
2415
0
            machine.gen_std_trampoline(sig, calling_convention)
2416
        }
2417
0
        _ => Err(CompileError::UnsupportedTarget(
2418
0
            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2419
0
        )),
2420
    }
2421
70.4k
}
wasmer_compiler_singlepass::machine::gen_std_trampoline
Line
Count
Source
2403
18.8k
pub fn gen_std_trampoline(
2404
18.8k
    sig: &FunctionType,
2405
18.8k
    target: &Target,
2406
18.8k
    calling_convention: CallingConvention,
2407
18.8k
) -> Result<FunctionBody, CompileError> {
2408
18.8k
    match target.triple().architecture {
2409
        Architecture::X86_64 => {
2410
18.8k
            let machine = MachineX86_64::new(Some(target.clone()))?;
2411
18.8k
            machine.gen_std_trampoline(sig, calling_convention)
2412
        }
2413
        Architecture::Aarch64(_) => {
2414
0
            let machine = MachineARM64::new(Some(target.clone()));
2415
0
            machine.gen_std_trampoline(sig, calling_convention)
2416
        }
2417
0
        _ => Err(CompileError::UnsupportedTarget(
2418
0
            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2419
0
        )),
2420
    }
2421
18.8k
}
wasmer_compiler_singlepass::machine::gen_std_trampoline
Line
Count
Source
2403
51.5k
pub fn gen_std_trampoline(
2404
51.5k
    sig: &FunctionType,
2405
51.5k
    target: &Target,
2406
51.5k
    calling_convention: CallingConvention,
2407
51.5k
) -> Result<FunctionBody, CompileError> {
2408
51.5k
    match target.triple().architecture {
2409
        Architecture::X86_64 => {
2410
51.5k
            let machine = MachineX86_64::new(Some(target.clone()))?;
2411
51.5k
            machine.gen_std_trampoline(sig, calling_convention)
2412
        }
2413
        Architecture::Aarch64(_) => {
2414
0
            let machine = MachineARM64::new(Some(target.clone()));
2415
0
            machine.gen_std_trampoline(sig, calling_convention)
2416
        }
2417
0
        _ => Err(CompileError::UnsupportedTarget(
2418
0
            "singlepass unimplemented arch for gen_std_trampoline".to_owned(),
2419
0
        )),
2420
    }
2421
51.5k
}
2422
2423
/// Generates dynamic import function call trampoline for a function type.
2424
0
pub fn gen_std_dynamic_import_trampoline(
2425
0
    vmoffsets: &VMOffsets,
2426
0
    sig: &FunctionType,
2427
0
    target: &Target,
2428
0
    calling_convention: CallingConvention,
2429
0
) -> Result<FunctionBody, CompileError> {
2430
0
    match target.triple().architecture {
2431
        Architecture::X86_64 => {
2432
0
            let machine = MachineX86_64::new(Some(target.clone()))?;
2433
0
            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2434
        }
2435
        Architecture::Aarch64(_) => {
2436
0
            let machine = MachineARM64::new(Some(target.clone()));
2437
0
            machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention)
2438
        }
2439
0
        _ => Err(CompileError::UnsupportedTarget(
2440
0
            "singlepass unimplemented arch for gen_std_dynamic_import_trampoline".to_owned(),
2441
0
        )),
2442
    }
2443
0
}
Unexecuted instantiation: wasmer_compiler_singlepass::machine::gen_std_dynamic_import_trampoline
Unexecuted instantiation: wasmer_compiler_singlepass::machine::gen_std_dynamic_import_trampoline
2444
/// Singlepass calls import functions through a trampoline.
2445
0
pub fn gen_import_call_trampoline(
2446
0
    vmoffsets: &VMOffsets,
2447
0
    index: FunctionIndex,
2448
0
    sig: &FunctionType,
2449
0
    target: &Target,
2450
0
    calling_convention: CallingConvention,
2451
0
) -> Result<CustomSection, CompileError> {
2452
0
    match target.triple().architecture {
2453
        Architecture::X86_64 => {
2454
0
            let machine = MachineX86_64::new(Some(target.clone()))?;
2455
0
            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2456
        }
2457
        Architecture::Aarch64(_) => {
2458
0
            let machine = MachineARM64::new(Some(target.clone()));
2459
0
            machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention)
2460
        }
2461
0
        _ => Err(CompileError::UnsupportedTarget(
2462
0
            "singlepass unimplemented arch for gen_import_call_trampoline".to_owned(),
2463
0
        )),
2464
    }
2465
0
}
Unexecuted instantiation: wasmer_compiler_singlepass::machine::gen_import_call_trampoline
Unexecuted instantiation: wasmer_compiler_singlepass::machine::gen_import_call_trampoline
2466
2467
// Constants for the bounds of truncation operations. These are the least or
2468
// greatest exact floats in either f32 or f64 representation less-than (for
2469
// least) or greater-than (for greatest) the i32 or i64 or u32 or u64
2470
// min (for least) or max (for greatest), when rounding towards zero.
2471
2472
/// Greatest Exact Float (32 bits) less-than i32::MIN when rounding towards zero.
2473
pub const GEF32_LT_I32_MIN: f32 = -2147483904.0;
2474
/// Least Exact Float (32 bits) greater-than i32::MAX when rounding towards zero.
2475
pub const LEF32_GT_I32_MAX: f32 = 2147483648.0;
2476
/// Greatest Exact Float (32 bits) less-than i64::MIN when rounding towards zero.
2477
pub const GEF32_LT_I64_MIN: f32 = -9223373136366403584.0;
2478
/// Least Exact Float (32 bits) greater-than i64::MAX when rounding towards zero.
2479
pub const LEF32_GT_I64_MAX: f32 = 9223372036854775808.0;
2480
/// Greatest Exact Float (32 bits) less-than u32::MIN when rounding towards zero.
2481
pub const GEF32_LT_U32_MIN: f32 = -1.0;
2482
/// Least Exact Float (32 bits) greater-than u32::MAX when rounding towards zero.
2483
pub const LEF32_GT_U32_MAX: f32 = 4294967296.0;
2484
/// Greatest Exact Float (32 bits) less-than u64::MIN when rounding towards zero.
2485
pub const GEF32_LT_U64_MIN: f32 = -1.0;
2486
/// Least Exact Float (32 bits) greater-than u64::MAX when rounding towards zero.
2487
pub const LEF32_GT_U64_MAX: f32 = 18446744073709551616.0;
2488
2489
/// Greatest Exact Float (64 bits) less-than i32::MIN when rounding towards zero.
2490
pub const GEF64_LT_I32_MIN: f64 = -2147483649.0;
2491
/// Least Exact Float (64 bits) greater-than i32::MAX when rounding towards zero.
2492
pub const LEF64_GT_I32_MAX: f64 = 2147483648.0;
2493
/// Greatest Exact Float (64 bits) less-than i64::MIN when rounding towards zero.
2494
pub const GEF64_LT_I64_MIN: f64 = -9223372036854777856.0;
2495
/// Least Exact Float (64 bits) greater-than i64::MAX when rounding towards zero.
2496
pub const LEF64_GT_I64_MAX: f64 = 9223372036854775808.0;
2497
/// Greatest Exact Float (64 bits) less-than u32::MIN when rounding towards zero.
2498
pub const GEF64_LT_U32_MIN: f64 = -1.0;
2499
/// Least Exact Float (64 bits) greater-than u32::MAX when rounding towards zero.
2500
pub const LEF64_GT_U32_MAX: f64 = 4294967296.0;
2501
/// Greatest Exact Float (64 bits) less-than u64::MIN when rounding towards zero.
2502
pub const GEF64_LT_U64_MIN: f64 = -1.0;
2503
/// Least Exact Float (64 bits) greater-than u64::MAX when rounding towards zero.
2504
pub const LEF64_GT_U64_MAX: f64 = 18446744073709551616.0;