Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2
 * vim: set ts=8 sts=4 et sw=4 tw=99:
3
 * This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "jit/x86-shared/MacroAssembler-x86-shared.h"
8
9
#include "jit/JitFrames.h"
10
#include "jit/MacroAssembler.h"
11
#include "jit/MoveEmitter.h"
12
13
#include "jit/MacroAssembler-inl.h"
14
15
using namespace js;
16
using namespace js::jit;
17
18
// Note: this function clobbers the input register.
19
void
20
MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
21
0
{
22
0
    ScratchDoubleScope scratch(*this);
23
0
    MOZ_ASSERT(input != scratch);
24
0
    Label positive, done;
25
0
26
0
    // <= 0 or NaN --> 0
27
0
    zeroDouble(scratch);
28
0
    branchDouble(DoubleGreaterThan, input, scratch, &positive);
29
0
    {
30
0
        move32(Imm32(0), output);
31
0
        jump(&done);
32
0
    }
33
0
34
0
    bind(&positive);
35
0
36
0
    // Add 0.5 and truncate.
37
0
    loadConstantDouble(0.5, scratch);
38
0
    addDouble(scratch, input);
39
0
40
0
    Label outOfRange;
41
0
42
0
    // Truncate to int32 and ensure the result <= 255. This relies on the
43
0
    // processor setting output to a value > 255 for doubles outside the int32
44
0
    // range (for instance 0x80000000).
45
0
    vcvttsd2si(input, output);
46
0
    branch32(Assembler::Above, output, Imm32(255), &outOfRange);
47
0
    {
48
0
        // Check if we had a tie.
49
0
        convertInt32ToDouble(output, scratch);
50
0
        branchDouble(DoubleNotEqual, input, scratch, &done);
51
0
52
0
        // It was a tie. Mask out the ones bit to get an even value.
53
0
        // See also js_TypedArray_uint8_clamp_double.
54
0
        and32(Imm32(~1), output);
55
0
        jump(&done);
56
0
    }
57
0
58
0
    // > 255 --> 255
59
0
    bind(&outOfRange);
60
0
    {
61
0
        move32(Imm32(255), output);
62
0
    }
63
0
64
0
    bind(&done);
65
0
}
66
67
bool
68
MacroAssemblerX86Shared::buildOOLFakeExitFrame(void* fakeReturnAddr)
69
0
{
70
0
    uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), FrameType::IonJS,
71
0
                                              ExitFrameLayout::Size());
72
0
    asMasm().Push(Imm32(descriptor));
73
0
    asMasm().Push(ImmPtr(fakeReturnAddr));
74
0
    return true;
75
0
}
76
77
void
78
MacroAssemblerX86Shared::branchNegativeZero(FloatRegister reg,
79
                                            Register scratch,
80
                                            Label* label,
81
                                            bool maybeNonZero)
82
0
{
83
0
    // Determines whether the low double contained in the XMM register reg
84
0
    // is equal to -0.0.
85
0
86
#if defined(JS_CODEGEN_X86)
87
    Label nonZero;
88
89
    // if not already compared to zero
90
    if (maybeNonZero) {
91
        ScratchDoubleScope scratchDouble(asMasm());
92
93
        // Compare to zero. Lets through {0, -0}.
94
        zeroDouble(scratchDouble);
95
96
        // If reg is non-zero, jump to nonZero.
97
        asMasm().branchDouble(DoubleNotEqual, reg, scratchDouble, &nonZero);
98
    }
99
    // Input register is either zero or negative zero. Retrieve sign of input.
100
    vmovmskpd(reg, scratch);
101
102
    // If reg is 1 or 3, input is negative zero.
103
    // If reg is 0 or 2, input is a normal zero.
104
    asMasm().branchTest32(NonZero, scratch, Imm32(1), label);
105
106
    bind(&nonZero);
107
#elif defined(JS_CODEGEN_X64)
108
    vmovq(reg, scratch);
109
0
    cmpq(Imm32(1), scratch);
110
0
    j(Overflow, label);
111
0
#endif
112
0
}
113
114
void
115
MacroAssemblerX86Shared::branchNegativeZeroFloat32(FloatRegister reg,
116
                                                   Register scratch,
117
                                                   Label* label)
118
0
{
119
0
    vmovd(reg, scratch);
120
0
    cmp32(scratch, Imm32(1));
121
0
    j(Overflow, label);
122
0
}
123
124
MacroAssembler&
125
MacroAssemblerX86Shared::asMasm()
126
2
{
127
2
    return *static_cast<MacroAssembler*>(this);
128
2
}
129
130
const MacroAssembler&
131
MacroAssemblerX86Shared::asMasm() const
132
0
{
133
0
    return *static_cast<const MacroAssembler*>(this);
134
0
}
135
136
template<class T, class Map>
137
T*
138
MacroAssemblerX86Shared::getConstant(const typename T::Pod& value, Map& map,
139
                                     Vector<T, 0, SystemAllocPolicy>& vec)
140
0
{
141
0
    typedef typename Map::AddPtr AddPtr;
142
0
    size_t index;
143
0
    if (AddPtr p = map.lookupForAdd(value)) {
144
0
        index = p->value();
145
0
    } else {
146
0
        index = vec.length();
147
0
        enoughMemory_ &= vec.append(T(value));
148
0
        if (!enoughMemory_) {
149
0
            return nullptr;
150
0
        }
151
0
        enoughMemory_ &= map.add(p, value, index);
152
0
        if (!enoughMemory_) {
153
0
            return nullptr;
154
0
        }
155
0
    }
156
0
    return &vec[index];
157
0
}
Unexecuted instantiation: js::jit::MacroAssemblerX86Shared::Constant<float>* js::jit::MacroAssemblerX86Shared::getConstant<js::jit::MacroAssemblerX86Shared::Constant<float>, mozilla::HashMap<float, unsigned long, mozilla::DefaultHasher<float>, js::SystemAllocPolicy> >(js::jit::MacroAssemblerX86Shared::Constant<float>::Pod const&, mozilla::HashMap<float, unsigned long, mozilla::DefaultHasher<float>, js::SystemAllocPolicy>&, mozilla::Vector<js::jit::MacroAssemblerX86Shared::Constant<float>, 0ul, js::SystemAllocPolicy>&)
Unexecuted instantiation: js::jit::MacroAssemblerX86Shared::Constant<double>* js::jit::MacroAssemblerX86Shared::getConstant<js::jit::MacroAssemblerX86Shared::Constant<double>, mozilla::HashMap<double, unsigned long, mozilla::DefaultHasher<double>, js::SystemAllocPolicy> >(js::jit::MacroAssemblerX86Shared::Constant<double>::Pod const&, mozilla::HashMap<double, unsigned long, mozilla::DefaultHasher<double>, js::SystemAllocPolicy>&, mozilla::Vector<js::jit::MacroAssemblerX86Shared::Constant<double>, 0ul, js::SystemAllocPolicy>&)
Unexecuted instantiation: js::jit::MacroAssemblerX86Shared::SimdData* js::jit::MacroAssemblerX86Shared::getConstant<js::jit::MacroAssemblerX86Shared::SimdData, mozilla::HashMap<js::jit::SimdConstant, unsigned long, js::jit::SimdConstant, js::SystemAllocPolicy> >(js::jit::MacroAssemblerX86Shared::SimdData::Pod const&, mozilla::HashMap<js::jit::SimdConstant, unsigned long, js::jit::SimdConstant, js::SystemAllocPolicy>&, mozilla::Vector<js::jit::MacroAssemblerX86Shared::SimdData, 0ul, js::SystemAllocPolicy>&)
158
159
MacroAssemblerX86Shared::Float*
160
MacroAssemblerX86Shared::getFloat(float f)
161
0
{
162
0
    return getConstant<Float, FloatMap>(f, floatMap_, floats_);
163
0
}
164
165
MacroAssemblerX86Shared::Double*
166
MacroAssemblerX86Shared::getDouble(double d)
167
0
{
168
0
    return getConstant<Double, DoubleMap>(d, doubleMap_, doubles_);
169
0
}
170
171
MacroAssemblerX86Shared::SimdData*
172
MacroAssemblerX86Shared::getSimdData(const SimdConstant& v)
173
0
{
174
0
    return getConstant<SimdData, SimdMap>(v, simdMap_, simds_);
175
0
}
176
177
void
178
MacroAssemblerX86Shared::minMaxDouble(FloatRegister first, FloatRegister second, bool canBeNaN,
179
                                      bool isMax)
180
0
{
181
0
    Label done, nan, minMaxInst;
182
0
183
0
    // Do a vucomisd to catch equality and NaNs, which both require special
184
0
    // handling. If the operands are ordered and inequal, we branch straight to
185
0
    // the min/max instruction. If we wanted, we could also branch for less-than
186
0
    // or greater-than here instead of using min/max, however these conditions
187
0
    // will sometimes be hard on the branch predictor.
188
0
    vucomisd(second, first);
189
0
    j(Assembler::NotEqual, &minMaxInst);
190
0
    if (canBeNaN) {
191
0
        j(Assembler::Parity, &nan);
192
0
    }
193
0
194
0
    // Ordered and equal. The operands are bit-identical unless they are zero
195
0
    // and negative zero. These instructions merge the sign bits in that
196
0
    // case, and are no-ops otherwise.
197
0
    if (isMax) {
198
0
        vandpd(second, first, first);
199
0
    } else {
200
0
        vorpd(second, first, first);
201
0
    }
202
0
    jump(&done);
203
0
204
0
    // x86's min/max are not symmetric; if either operand is a NaN, they return
205
0
    // the read-only operand. We need to return a NaN if either operand is a
206
0
    // NaN, so we explicitly check for a NaN in the read-write operand.
207
0
    if (canBeNaN) {
208
0
        bind(&nan);
209
0
        vucomisd(first, first);
210
0
        j(Assembler::Parity, &done);
211
0
    }
212
0
213
0
    // When the values are inequal, or second is NaN, x86's min and max will
214
0
    // return the value we need.
215
0
    bind(&minMaxInst);
216
0
    if (isMax) {
217
0
        vmaxsd(second, first, first);
218
0
    } else {
219
0
        vminsd(second, first, first);
220
0
    }
221
0
222
0
    bind(&done);
223
0
}
224
225
void
226
MacroAssemblerX86Shared::minMaxFloat32(FloatRegister first, FloatRegister second, bool canBeNaN,
227
                                       bool isMax)
228
0
{
229
0
    Label done, nan, minMaxInst;
230
0
231
0
    // Do a vucomiss to catch equality and NaNs, which both require special
232
0
    // handling. If the operands are ordered and inequal, we branch straight to
233
0
    // the min/max instruction. If we wanted, we could also branch for less-than
234
0
    // or greater-than here instead of using min/max, however these conditions
235
0
    // will sometimes be hard on the branch predictor.
236
0
    vucomiss(second, first);
237
0
    j(Assembler::NotEqual, &minMaxInst);
238
0
    if (canBeNaN) {
239
0
        j(Assembler::Parity, &nan);
240
0
    }
241
0
242
0
    // Ordered and equal. The operands are bit-identical unless they are zero
243
0
    // and negative zero. These instructions merge the sign bits in that
244
0
    // case, and are no-ops otherwise.
245
0
    if (isMax) {
246
0
        vandps(second, first, first);
247
0
    } else {
248
0
        vorps(second, first, first);
249
0
    }
250
0
    jump(&done);
251
0
252
0
    // x86's min/max are not symmetric; if either operand is a NaN, they return
253
0
    // the read-only operand. We need to return a NaN if either operand is a
254
0
    // NaN, so we explicitly check for a NaN in the read-write operand.
255
0
    if (canBeNaN) {
256
0
        bind(&nan);
257
0
        vucomiss(first, first);
258
0
        j(Assembler::Parity, &done);
259
0
    }
260
0
261
0
    // When the values are inequal, or second is NaN, x86's min and max will
262
0
    // return the value we need.
263
0
    bind(&minMaxInst);
264
0
    if (isMax) {
265
0
        vmaxss(second, first, first);
266
0
    } else {
267
0
        vminss(second, first, first);
268
0
    }
269
0
270
0
    bind(&done);
271
0
}
272
273
//{{{ check_macroassembler_style
274
// ===============================================================
275
// MacroAssembler high-level usage.
276
277
void
278
MacroAssembler::flush()
279
0
{
280
0
}
281
282
void
283
MacroAssembler::comment(const char* msg)
284
70
{
285
70
    masm.comment(msg);
286
70
}
287
288
class MOZ_RAII ScopedMoveResolution
289
{
290
    MacroAssembler& masm_;
291
    MoveResolver& resolver_;
292
293
  public:
294
    explicit ScopedMoveResolution(MacroAssembler& masm)
295
      : masm_(masm),
296
        resolver_(masm.moveResolver())
297
0
    {
298
0
299
0
    }
300
301
0
    void addMove(Register src, Register dest) {
302
0
        if (src != dest) {
303
0
            masm_.propagateOOM(resolver_.addMove(MoveOperand(src), MoveOperand(dest), MoveOp::GENERAL));
304
0
        }
305
0
    }
306
307
0
    ~ScopedMoveResolution() {
308
0
        masm_.propagateOOM(resolver_.resolve());
309
0
        if (masm_.oom()) {
310
0
            return;
311
0
        }
312
0
313
0
        resolver_.sortMemoryToMemoryMoves();
314
0
315
0
        MoveEmitter emitter(masm_);
316
0
        emitter.emit(resolver_);
317
0
        emitter.finish();
318
0
    }
319
320
};
321
322
// This operation really consists of five phases, in order to enforce the restriction that
323
// on x86_shared, srcDest must be eax and edx will be clobbered.
324
//
325
//     Input: { rhs, lhsOutput }
326
//
327
//  [PUSH] Preserve registers
328
//  [MOVE] Generate moves to specific registers
329
//
330
//  [DIV] Input: { regForRhs, EAX }
331
//  [DIV] extend EAX into EDX
332
//  [DIV] x86 Division operator
333
//  [DIV] Ouptut: { EAX, EDX }
334
//
335
//  [MOVE] Move specific registers to outputs
336
//  [POP] Restore registers
337
//
338
//    Output: { lhsOutput, remainderOutput }
339
void
340
MacroAssembler::flexibleDivMod32(Register rhs, Register lhsOutput, Register remOutput,
341
                                      bool isUnsigned, const LiveRegisterSet&)
342
0
{
343
0
    // Currently this helper can't handle this situation.
344
0
    MOZ_ASSERT(lhsOutput != rhs);
345
0
    MOZ_ASSERT(lhsOutput != remOutput);
346
0
347
0
    // Choose a register that is not edx, or eax to hold the rhs;
348
0
    // ebx is chosen arbitrarily, and will be preserved if necessary.
349
0
    Register regForRhs = (rhs == eax || rhs == edx) ? ebx : rhs;
350
0
351
0
    // Add registers we will be clobbering as live, but
352
0
    // also remove the set we do not restore.
353
0
    LiveRegisterSet preserve;
354
0
    preserve.add(edx);
355
0
    preserve.add(eax);
356
0
    preserve.add(regForRhs);
357
0
358
0
    preserve.takeUnchecked(lhsOutput);
359
0
    preserve.takeUnchecked(remOutput);
360
0
361
0
    PushRegsInMask(preserve);
362
0
363
0
    // Marshal Registers For operation
364
0
    {
365
0
        ScopedMoveResolution resolution(*this);
366
0
        resolution.addMove(rhs, regForRhs);
367
0
        resolution.addMove(lhsOutput, eax);
368
0
    }
369
0
    if (oom()) {
370
0
        return;
371
0
    }
372
0
373
0
    // Sign extend eax into edx to make (edx:eax): idiv/udiv are 64-bit.
374
0
    if (isUnsigned) {
375
0
        mov(ImmWord(0), edx);
376
0
        udiv(regForRhs);
377
0
    } else {
378
0
        cdq();
379
0
        idiv(regForRhs);
380
0
    }
381
0
382
0
    {
383
0
        ScopedMoveResolution resolution(*this);
384
0
        resolution.addMove(eax, lhsOutput);
385
0
        resolution.addMove(edx, remOutput);
386
0
    }
387
0
    if (oom()) {
388
0
        return;
389
0
    }
390
0
391
0
    PopRegsInMask(preserve);
392
0
}
393
394
void
395
MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest, bool isUnsigned,
396
                                   const LiveRegisterSet& volatileLiveRegs)
397
0
{
398
0
    // Choose an arbitrary register that isn't eax, edx, rhs or srcDest;
399
0
    AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
400
0
    regs.takeUnchecked(eax);
401
0
    regs.takeUnchecked(edx);
402
0
    regs.takeUnchecked(rhs);
403
0
    regs.takeUnchecked(srcDest);
404
0
405
0
    Register remOut = regs.takeAny();
406
0
    push(remOut);
407
0
    flexibleDivMod32(rhs, srcDest, remOut, isUnsigned, volatileLiveRegs);
408
0
    pop(remOut);
409
0
}
410
411
void
412
MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest, bool isUnsigned,
413
                                    const LiveRegisterSet& volatileLiveRegs)
414
0
{
415
0
    // Choose an arbitrary register that isn't eax, edx, rhs or srcDest
416
0
    AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
417
0
    regs.takeUnchecked(eax);
418
0
    regs.takeUnchecked(edx);
419
0
    regs.takeUnchecked(rhs);
420
0
    regs.takeUnchecked(srcDest);
421
0
422
0
    Register remOut = regs.takeAny();
423
0
    push(remOut);
424
0
    flexibleDivMod32(rhs, srcDest, remOut, isUnsigned, volatileLiveRegs);
425
0
    mov(remOut, srcDest);
426
0
    pop(remOut);
427
0
}
428
429
// ===============================================================
430
// Stack manipulation functions.
431
432
void
433
MacroAssembler::PushRegsInMask(LiveRegisterSet set)
434
148
{
435
148
    FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
436
148
    unsigned numFpu = fpuSet.size();
437
148
    int32_t diffF = fpuSet.getPushSizeInBytes();
438
148
    int32_t diffG = set.gprs().size() * sizeof(intptr_t);
439
148
440
148
    // On x86, always use push to push the integer registers, as it's fast
441
148
    // on modern hardware and it's a small instruction.
442
769
    for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
443
621
        diffG -= sizeof(intptr_t);
444
621
        Push(*iter);
445
621
    }
446
148
    MOZ_ASSERT(diffG == 0);
447
148
448
148
    reserveStack(diffF);
449
784
    for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
450
636
        FloatRegister reg = *iter;
451
636
        diffF -= reg.size();
452
636
        numFpu -= 1;
453
636
        Address spillAddress(StackPointer, diffF);
454
636
        if (reg.isDouble()) {
455
0
            storeDouble(reg, spillAddress);
456
636
        } else if (reg.isSingle()) {
457
0
            storeFloat32(reg, spillAddress);
458
636
        } else if (reg.isSimd128()) {
459
636
            storeUnalignedSimd128Float(reg, spillAddress);
460
636
        } else {
461
0
            MOZ_CRASH("Unknown register type.");
462
0
        }
463
636
    }
464
148
    MOZ_ASSERT(numFpu == 0);
465
148
    // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
466
148
    // GetPushBytesInSize.
467
148
    diffF -= diffF % sizeof(uintptr_t);
468
148
    MOZ_ASSERT(diffF == 0);
469
148
}
470
471
void
472
MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register)
473
0
{
474
0
    FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
475
0
    unsigned numFpu = fpuSet.size();
476
0
    int32_t diffF = fpuSet.getPushSizeInBytes();
477
0
    int32_t diffG = set.gprs().size() * sizeof(intptr_t);
478
0
479
0
    MOZ_ASSERT(dest.offset >= diffG + diffF);
480
0
481
0
    for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
482
0
        diffG -= sizeof(intptr_t);
483
0
        dest.offset -= sizeof(intptr_t);
484
0
        storePtr(*iter, dest);
485
0
    }
486
0
    MOZ_ASSERT(diffG == 0);
487
0
488
0
    for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
489
0
        FloatRegister reg = *iter;
490
0
        diffF -= reg.size();
491
0
        numFpu -= 1;
492
0
        dest.offset -= reg.size();
493
0
        if (reg.isDouble()) {
494
0
            storeDouble(reg, dest);
495
0
        } else if (reg.isSingle()) {
496
0
            storeFloat32(reg, dest);
497
0
        } else if (reg.isSimd128()) {
498
0
            storeUnalignedSimd128Float(reg, dest);
499
0
        } else {
500
0
            MOZ_CRASH("Unknown register type.");
501
0
        }
502
0
    }
503
0
    MOZ_ASSERT(numFpu == 0);
504
0
    // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
505
0
    // GetPushBytesInSize.
506
0
    diffF -= diffF % sizeof(uintptr_t);
507
0
    MOZ_ASSERT(diffF == 0);
508
0
}
509
510
void
511
MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
512
142
{
513
142
    FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
514
142
    unsigned numFpu = fpuSet.size();
515
142
    int32_t diffG = set.gprs().size() * sizeof(intptr_t);
516
142
    int32_t diffF = fpuSet.getPushSizeInBytes();
517
142
    const int32_t reservedG = diffG;
518
142
    const int32_t reservedF = diffF;
519
142
520
682
    for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
521
540
        FloatRegister reg = *iter;
522
540
        diffF -= reg.size();
523
540
        numFpu -= 1;
524
540
        if (ignore.has(reg)) {
525
0
            continue;
526
0
        }
527
540
528
540
        Address spillAddress(StackPointer, diffF);
529
540
        if (reg.isDouble()) {
530
0
            loadDouble(spillAddress, reg);
531
540
        } else if (reg.isSingle()) {
532
0
            loadFloat32(spillAddress, reg);
533
540
        } else if (reg.isSimd128()) {
534
540
            loadUnalignedSimd128Float(spillAddress, reg);
535
540
        } else {
536
0
            MOZ_CRASH("Unknown register type.");
537
0
        }
538
540
    }
539
142
    freeStack(reservedF);
540
142
    MOZ_ASSERT(numFpu == 0);
541
142
    // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
542
142
    // GetPushBytesInSize.
543
142
    diffF -= diffF % sizeof(uintptr_t);
544
142
    MOZ_ASSERT(diffF == 0);
545
142
546
142
    // On x86, use pop to pop the integer registers, if we're not going to
547
142
    // ignore any slots, as it's fast on modern hardware and it's a small
548
142
    // instruction.
549
142
    if (ignore.emptyGeneral()) {
550
387
        for (GeneralRegisterForwardIterator iter(set.gprs()); iter.more(); ++iter) {
551
328
            diffG -= sizeof(intptr_t);
552
328
            Pop(*iter);
553
328
        }
554
83
    } else {
555
280
        for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
556
197
            diffG -= sizeof(intptr_t);
557
197
            if (!ignore.has(*iter)) {
558
193
                loadPtr(Address(StackPointer, diffG), *iter);
559
193
            }
560
197
        }
561
83
        freeStack(reservedG);
562
83
    }
563
142
    MOZ_ASSERT(diffG == 0);
564
142
}
565
566
void
567
MacroAssembler::Push(const Operand op)
568
7
{
569
7
    push(op);
570
7
    adjustFrame(sizeof(intptr_t));
571
7
}
572
573
void
574
MacroAssembler::Push(Register reg)
575
931
{
576
931
    push(reg);
577
931
    adjustFrame(sizeof(intptr_t));
578
931
}
579
580
void
581
MacroAssembler::Push(const Imm32 imm)
582
424
{
583
424
    push(imm);
584
424
    adjustFrame(sizeof(intptr_t));
585
424
}
586
587
void
588
MacroAssembler::Push(const ImmWord imm)
589
704
{
590
704
    push(imm);
591
704
    adjustFrame(sizeof(intptr_t));
592
704
}
593
594
void
595
MacroAssembler::Push(const ImmPtr imm)
596
704
{
597
704
    Push(ImmWord(uintptr_t(imm.value)));
598
704
}
599
600
void
601
MacroAssembler::Push(const ImmGCPtr ptr)
602
82
{
603
82
    push(ptr);
604
82
    adjustFrame(sizeof(intptr_t));
605
82
}
606
607
void
608
MacroAssembler::Push(FloatRegister t)
609
0
{
610
0
    push(t);
611
0
    adjustFrame(sizeof(double));
612
0
}
613
614
void
615
MacroAssembler::PushFlags()
616
0
{
617
0
    pushFlags();
618
0
    adjustFrame(sizeof(intptr_t));
619
0
}
620
621
void
622
MacroAssembler::Pop(const Operand op)
623
10
{
624
10
    pop(op);
625
10
    implicitPop(sizeof(intptr_t));
626
10
}
627
628
void
629
MacroAssembler::Pop(Register reg)
630
394
{
631
394
    pop(reg);
632
394
    implicitPop(sizeof(intptr_t));
633
394
}
634
635
void
636
MacroAssembler::Pop(FloatRegister reg)
637
0
{
638
0
    pop(reg);
639
0
    implicitPop(sizeof(double));
640
0
}
641
642
void
643
MacroAssembler::Pop(const ValueOperand& val)
644
228
{
645
228
    popValue(val);
646
228
    implicitPop(sizeof(Value));
647
228
}
648
649
void
650
MacroAssembler::PopFlags()
651
0
{
652
0
    popFlags();
653
0
    implicitPop(sizeof(intptr_t));
654
0
}
655
656
void
657
MacroAssembler::PopStackPtr()
658
0
{
659
0
    Pop(StackPointer);
660
0
}
661
662
// ===============================================================
663
// Simple call functions.
664
665
CodeOffset
666
MacroAssembler::call(Register reg)
667
49
{
668
49
    return Assembler::call(reg);
669
49
}
670
671
CodeOffset
672
MacroAssembler::call(Label* label)
673
1
{
674
1
    return Assembler::call(label);
675
1
}
676
677
void
678
MacroAssembler::call(const Address& addr)
679
337
{
680
337
    Assembler::call(Operand(addr.base, addr.offset));
681
337
}
682
683
void
684
MacroAssembler::call(wasm::SymbolicAddress target)
685
0
{
686
0
    mov(target, eax);
687
0
    Assembler::call(eax);
688
0
}
689
690
void
691
MacroAssembler::call(ImmWord target)
692
0
{
693
0
    Assembler::call(target);
694
0
}
695
696
void
697
MacroAssembler::call(ImmPtr target)
698
995
{
699
995
    Assembler::call(target);
700
995
}
701
702
void
703
MacroAssembler::call(JitCode* target)
704
0
{
705
0
    Assembler::call(target);
706
0
}
707
708
CodeOffset
709
MacroAssembler::callWithPatch()
710
0
{
711
0
    return Assembler::callWithPatch();
712
0
}
713
void
714
MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
715
0
{
716
0
    Assembler::patchCall(callerOffset, calleeOffset);
717
0
}
718
719
void
720
MacroAssembler::callAndPushReturnAddress(Register reg)
721
49
{
722
49
    call(reg);
723
49
}
724
725
void
726
MacroAssembler::callAndPushReturnAddress(Label* label)
727
0
{
728
0
    call(label);
729
0
}
730
731
// ===============================================================
732
// Patchable near/far jumps.
733
734
CodeOffset
735
MacroAssembler::farJumpWithPatch()
736
0
{
737
0
    return Assembler::farJumpWithPatch();
738
0
}
739
740
void
741
MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset)
742
0
{
743
0
    Assembler::patchFarJump(farJump, targetOffset);
744
0
}
745
746
CodeOffset
747
MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc)
748
0
{
749
0
    CodeOffset offset(currentOffset());
750
0
    masm.nop_five();
751
0
    append(desc, CodeOffset(currentOffset()));
752
0
    MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
753
0
    return offset;
754
0
}
755
756
void
757
MacroAssembler::patchNopToCall(uint8_t* callsite, uint8_t* target)
758
0
{
759
0
    Assembler::patchFiveByteNopToCall(callsite, target);
760
0
}
761
762
void
763
MacroAssembler::patchCallToNop(uint8_t* callsite)
764
0
{
765
0
    Assembler::patchCallToFiveByteNop(callsite);
766
0
}
767
768
// ===============================================================
769
// Jit Frames.
770
771
uint32_t
772
MacroAssembler::pushFakeReturnAddress(Register scratch)
773
14
{
774
14
    CodeLabel cl;
775
14
776
14
    mov(&cl, scratch);
777
14
    Push(scratch);
778
14
    bind(&cl);
779
14
    uint32_t retAddr = currentOffset();
780
14
781
14
    addCodeLabel(cl);
782
14
    return retAddr;
783
14
}
784
785
// ===============================================================
786
// WebAssembly
787
788
CodeOffset
789
MacroAssembler::wasmTrapInstruction()
790
0
{
791
0
    return ud2();
792
0
}
793
794
// RAII class that generates the jumps to traps when it's destructed, to
795
// prevent some code duplication in the outOfLineWasmTruncateXtoY methods.
796
struct MOZ_RAII AutoHandleWasmTruncateToIntErrors
797
{
798
    MacroAssembler& masm;
799
    Label inputIsNaN;
800
    Label intOverflow;
801
    wasm::BytecodeOffset off;
802
803
    explicit AutoHandleWasmTruncateToIntErrors(MacroAssembler& masm, wasm::BytecodeOffset off)
804
      : masm(masm), off(off)
805
0
    { }
806
807
0
    ~AutoHandleWasmTruncateToIntErrors() {
808
0
        // Handle errors.  These cases are not in arbitrary order: code will
809
0
        // fall through to intOverflow.
810
0
        masm.bind(&intOverflow);
811
0
        masm.wasmTrap(wasm::Trap::IntegerOverflow, off);
812
0
813
0
        masm.bind(&inputIsNaN);
814
0
        masm.wasmTrap(wasm::Trap::InvalidConversionToInteger, off);
815
0
    }
816
};
817
818
void
819
MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input, Register output,
820
                                          bool isSaturating, Label* oolEntry)
821
0
{
822
0
    vcvttsd2si(input, output);
823
0
    cmp32(output, Imm32(1));
824
0
    j(Assembler::Overflow, oolEntry);
825
0
}
826
827
void
828
MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
829
                                           bool isSaturating, Label* oolEntry)
830
0
{
831
0
    vcvttss2si(input, output);
832
0
    cmp32(output, Imm32(1));
833
0
    j(Assembler::Overflow, oolEntry);
834
0
}
835
836
void
837
MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input, Register output,
838
                                             TruncFlags flags, wasm::BytecodeOffset off,
839
                                             Label* rejoin)
840
0
{
841
0
    bool isUnsigned = flags & TRUNC_UNSIGNED;
842
0
    bool isSaturating = flags & TRUNC_SATURATING;
843
0
844
0
    if (isSaturating) {
845
0
        if (isUnsigned) {
846
0
            // Negative overflow and NaN both are converted to 0, and the only other case
847
0
            // is positive overflow which is converted to UINT32_MAX.
848
0
            Label nonNegative;
849
0
            loadConstantDouble(0.0, ScratchDoubleReg);
850
0
            branchDouble(Assembler::DoubleGreaterThanOrEqual, input, ScratchDoubleReg, &nonNegative);
851
0
            move32(Imm32(0), output);
852
0
            jump(rejoin);
853
0
            bind(&nonNegative);
854
0
855
0
            move32(Imm32(UINT32_MAX), output);
856
0
        } else {
857
0
            // Negative overflow is already saturated to INT32_MIN, so we only have
858
0
            // to handle NaN and positive overflow here.
859
0
            Label notNaN;
860
0
            branchDouble(Assembler::DoubleOrdered, input, input, &notNaN);
861
0
            move32(Imm32(0), output);
862
0
            jump(rejoin);
863
0
            bind(&notNaN);
864
0
865
0
            loadConstantDouble(0.0, ScratchDoubleReg);
866
0
            branchDouble(Assembler::DoubleLessThan, input, ScratchDoubleReg, rejoin);
867
0
            sub32(Imm32(1), output);
868
0
        }
869
0
        jump(rejoin);
870
0
        return;
871
0
    }
872
0
873
0
    AutoHandleWasmTruncateToIntErrors traps(*this, off);
874
0
875
0
    // Eagerly take care of NaNs.
876
0
    branchDouble(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
877
0
878
0
    // For unsigned, fall through to intOverflow failure case.
879
0
    if (isUnsigned) {
880
0
        return;
881
0
    }
882
0
883
0
    // Handle special values.
884
0
885
0
    // We've used vcvttsd2si. The only valid double values that can
886
0
    // truncate to INT32_MIN are in ]INT32_MIN - 1; INT32_MIN].
887
0
    loadConstantDouble(double(INT32_MIN) - 1.0, ScratchDoubleReg);
888
0
    branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &traps.intOverflow);
889
0
890
0
    loadConstantDouble(0.0, ScratchDoubleReg);
891
0
    branchDouble(Assembler::DoubleGreaterThan, input, ScratchDoubleReg, &traps.intOverflow);
892
0
    jump(rejoin);
893
0
}
894
895
void
896
MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input, Register output,
897
                                             TruncFlags flags, wasm::BytecodeOffset off,
898
                                             Label* rejoin)
899
0
{
900
0
    bool isUnsigned = flags & TRUNC_UNSIGNED;
901
0
    bool isSaturating = flags & TRUNC_SATURATING;
902
0
903
0
    if (isSaturating) {
904
0
        if (isUnsigned) {
905
0
            // Negative overflow and NaN both are converted to 0, and the only other case
906
0
            // is positive overflow which is converted to UINT32_MAX.
907
0
            Label nonNegative;
908
0
            loadConstantFloat32(0.0f, ScratchDoubleReg);
909
0
            branchFloat(Assembler::DoubleGreaterThanOrEqual, input, ScratchDoubleReg, &nonNegative);
910
0
            move32(Imm32(0), output);
911
0
            jump(rejoin);
912
0
            bind(&nonNegative);
913
0
914
0
            move32(Imm32(UINT32_MAX), output);
915
0
        } else {
916
0
            // Negative overflow is already saturated to INT32_MIN, so we only have
917
0
            // to handle NaN and positive overflow here.
918
0
            Label notNaN;
919
0
            branchFloat(Assembler::DoubleOrdered, input, input, &notNaN);
920
0
            move32(Imm32(0), output);
921
0
            jump(rejoin);
922
0
            bind(&notNaN);
923
0
924
0
            loadConstantFloat32(0.0f, ScratchFloat32Reg);
925
0
            branchFloat(Assembler::DoubleLessThan, input, ScratchFloat32Reg, rejoin);
926
0
            sub32(Imm32(1), output);
927
0
        }
928
0
        jump(rejoin);
929
0
        return;
930
0
    }
931
0
932
0
    AutoHandleWasmTruncateToIntErrors traps(*this, off);
933
0
934
0
    // Eagerly take care of NaNs.
935
0
    branchFloat(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
936
0
937
0
    // For unsigned, fall through to intOverflow failure case.
938
0
    if (isUnsigned) {
939
0
        return;
940
0
    }
941
0
942
0
    // Handle special values.
943
0
944
0
    // We've used vcvttss2si. Check that the input wasn't
945
0
    // float(INT32_MIN), which is the only legimitate input that
946
0
    // would truncate to INT32_MIN.
947
0
    loadConstantFloat32(float(INT32_MIN), ScratchFloat32Reg);
948
0
    branchFloat(Assembler::DoubleNotEqual, input, ScratchFloat32Reg, &traps.intOverflow);
949
0
    jump(rejoin);
950
0
}
951
952
void
953
MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input, Register64 output,
954
                                             TruncFlags flags, wasm::BytecodeOffset off,
955
                                             Label* rejoin)
956
0
{
957
0
    bool isUnsigned = flags & TRUNC_UNSIGNED;
958
0
    bool isSaturating = flags & TRUNC_SATURATING;
959
0
960
0
    if (isSaturating) {
961
0
        if (isUnsigned) {
962
0
            // Negative overflow and NaN both are converted to 0, and the only other case
963
0
            // is positive overflow which is converted to UINT64_MAX.
964
0
            Label nonNegative;
965
0
            loadConstantDouble(0.0, ScratchDoubleReg);
966
0
            branchDouble(Assembler::DoubleGreaterThanOrEqual, input, ScratchDoubleReg, &nonNegative);
967
0
            move64(Imm64(0), output);
968
0
            jump(rejoin);
969
0
            bind(&nonNegative);
970
0
971
0
            move64(Imm64(UINT64_MAX), output);
972
0
        } else {
973
0
            // Negative overflow is already saturated to INT64_MIN, so we only have
974
0
            // to handle NaN and positive overflow here.
975
0
            Label notNaN;
976
0
            branchDouble(Assembler::DoubleOrdered, input, input, &notNaN);
977
0
            move64(Imm64(0), output);
978
0
            jump(rejoin);
979
0
            bind(&notNaN);
980
0
981
0
            loadConstantDouble(0.0, ScratchDoubleReg);
982
0
            branchDouble(Assembler::DoubleLessThan, input, ScratchDoubleReg, rejoin);
983
0
            sub64(Imm64(1), output);
984
0
        }
985
0
        jump(rejoin);
986
0
        return;
987
0
    }
988
0
989
0
    AutoHandleWasmTruncateToIntErrors traps(*this, off);
990
0
991
0
    // Eagerly take care of NaNs.
992
0
    branchDouble(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
993
0
994
0
    // Handle special values.
995
0
    if (isUnsigned) {
996
0
        loadConstantDouble(0.0, ScratchDoubleReg);
997
0
        branchDouble(Assembler::DoubleGreaterThan, input, ScratchDoubleReg, &traps.intOverflow);
998
0
        loadConstantDouble(-1.0, ScratchDoubleReg);
999
0
        branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &traps.intOverflow);
1000
0
        jump(rejoin);
1001
0
        return;
1002
0
    }
1003
0
1004
0
    // We've used vcvtsd2sq. The only legit value whose i64
1005
0
    // truncation is INT64_MIN is double(INT64_MIN): exponent is so
1006
0
    // high that the highest resolution around is much more than 1.
1007
0
    loadConstantDouble(double(int64_t(INT64_MIN)), ScratchDoubleReg);
1008
0
    branchDouble(Assembler::DoubleNotEqual, input, ScratchDoubleReg, &traps.intOverflow);
1009
0
    jump(rejoin);
1010
0
}
1011
1012
void
1013
MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input, Register64 output,
1014
                                             TruncFlags flags, wasm::BytecodeOffset off,
1015
                                             Label* rejoin)
1016
0
{
1017
0
    bool isUnsigned = flags & TRUNC_UNSIGNED;
1018
0
    bool isSaturating = flags & TRUNC_SATURATING;
1019
0
1020
0
    if (isSaturating) {
1021
0
        if (isUnsigned) {
1022
0
            // Negative overflow and NaN both are converted to 0, and the only other case
1023
0
            // is positive overflow which is converted to UINT64_MAX.
1024
0
            Label nonNegative;
1025
0
            loadConstantFloat32(0.0f, ScratchDoubleReg);
1026
0
            branchFloat(Assembler::DoubleGreaterThanOrEqual, input, ScratchDoubleReg, &nonNegative);
1027
0
            move64(Imm64(0), output);
1028
0
            jump(rejoin);
1029
0
            bind(&nonNegative);
1030
0
1031
0
            move64(Imm64(UINT64_MAX), output);
1032
0
        } else {
1033
0
            // Negative overflow is already saturated to INT64_MIN, so we only have
1034
0
            // to handle NaN and positive overflow here.
1035
0
            Label notNaN;
1036
0
            branchFloat(Assembler::DoubleOrdered, input, input, &notNaN);
1037
0
            move64(Imm64(0), output);
1038
0
            jump(rejoin);
1039
0
            bind(&notNaN);
1040
0
1041
0
            loadConstantFloat32(0.0f, ScratchFloat32Reg);
1042
0
            branchFloat(Assembler::DoubleLessThan, input, ScratchFloat32Reg, rejoin);
1043
0
            sub64(Imm64(1), output);
1044
0
        }
1045
0
        jump(rejoin);
1046
0
        return;
1047
0
    }
1048
0
1049
0
    AutoHandleWasmTruncateToIntErrors traps(*this, off);
1050
0
1051
0
    // Eagerly take care of NaNs.
1052
0
    branchFloat(Assembler::DoubleUnordered, input, input, &traps.inputIsNaN);
1053
0
1054
0
    // Handle special values.
1055
0
    if (isUnsigned) {
1056
0
        loadConstantFloat32(0.0f, ScratchFloat32Reg);
1057
0
        branchFloat(Assembler::DoubleGreaterThan, input, ScratchFloat32Reg, &traps.intOverflow);
1058
0
        loadConstantFloat32(-1.0f, ScratchFloat32Reg);
1059
0
        branchFloat(Assembler::DoubleLessThanOrEqual, input, ScratchFloat32Reg, &traps.intOverflow);
1060
0
        jump(rejoin);
1061
0
        return;
1062
0
    }
1063
0
1064
0
    // We've used vcvtss2sq. See comment in outOfLineWasmTruncateDoubleToInt64.
1065
0
    loadConstantFloat32(float(int64_t(INT64_MIN)), ScratchFloat32Reg);
1066
0
    branchFloat(Assembler::DoubleNotEqual, input, ScratchFloat32Reg, &traps.intOverflow);
1067
0
    jump(rejoin);
1068
0
}
1069
1070
void
1071
MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch, ExitFrameType type)
1072
0
{
1073
0
    enterFakeExitFrame(cxreg, scratch, type);
1074
0
}
1075
1076
// ========================================================================
1077
// Primitive atomic operations.
1078
1079
static void
1080
ExtendTo32(MacroAssembler& masm, Scalar::Type type, Register r)
1081
0
{
1082
0
    switch (Scalar::byteSize(type)) {
1083
0
      case 1:
1084
0
        if (Scalar::isSignedIntType(type)) {
1085
0
            masm.movsbl(r, r);
1086
0
        } else {
1087
0
            masm.movzbl(r, r);
1088
0
        }
1089
0
        break;
1090
0
      case 2:
1091
0
        if (Scalar::isSignedIntType(type)) {
1092
0
            masm.movswl(r, r);
1093
0
        } else {
1094
0
            masm.movzwl(r, r);
1095
0
        }
1096
0
        break;
1097
0
      default:
1098
0
        break;
1099
0
    }
1100
0
}
1101
1102
static inline void
1103
0
CheckBytereg(Register r) {
1104
#ifdef DEBUG
1105
    AllocatableGeneralRegisterSet byteRegs(Registers::SingleByteRegs);
1106
    MOZ_ASSERT(byteRegs.has(r));
1107
#endif
1108
}
1109
1110
static inline void
1111
0
CheckBytereg(Imm32 r) {
1112
0
    // Nothing
1113
0
}
1114
1115
template<typename T>
1116
static void
1117
CompareExchange(MacroAssembler& masm, const wasm::MemoryAccessDesc* access, Scalar::Type type,
1118
                const T& mem, Register oldval, Register newval, Register output)
1119
0
{
1120
0
    MOZ_ASSERT(output == eax);
1121
0
1122
0
    if (oldval != output) {
1123
0
        masm.movl(oldval, output);
1124
0
    }
1125
0
1126
0
    if (access) {
1127
0
        masm.append(*access, masm.size());
1128
0
    }
1129
0
1130
0
    switch (Scalar::byteSize(type)) {
1131
0
      case 1:
1132
0
        CheckBytereg(newval);
1133
0
        masm.lock_cmpxchgb(newval, Operand(mem));
1134
0
        break;
1135
0
      case 2:
1136
0
        masm.lock_cmpxchgw(newval, Operand(mem));
1137
0
        break;
1138
0
      case 4:
1139
0
        masm.lock_cmpxchgl(newval, Operand(mem));
1140
0
        break;
1141
0
    }
1142
0
1143
0
    ExtendTo32(masm, type, output);
1144
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void CompareExchange<js::jit::Address>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::Address const&, js::jit::Register, js::jit::Register, js::jit::Register)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void CompareExchange<js::jit::BaseIndex>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register, js::jit::Register)
1145
1146
void
1147
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&, const Address& mem,
1148
                                Register oldval, Register newval, Register output)
1149
0
{
1150
0
    CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
1151
0
}
1152
1153
void
1154
MacroAssembler::compareExchange(Scalar::Type type, const Synchronization&, const BaseIndex& mem,
1155
                                Register oldval, Register newval, Register output)
1156
0
{
1157
0
    CompareExchange(*this, nullptr, type, mem, oldval, newval, output);
1158
0
}
1159
1160
void
1161
MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access, const Address& mem,
1162
                                    Register oldval, Register newval, Register output)
1163
0
{
1164
0
    CompareExchange(*this, &access, access.type(), mem, oldval, newval, output);
1165
0
}
1166
1167
void
1168
MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access, const BaseIndex& mem,
1169
                                    Register oldval, Register newval, Register output)
1170
0
{
1171
0
    CompareExchange(*this, &access, access.type(), mem, oldval, newval, output);
1172
0
}
1173
1174
template<typename T>
1175
static void
1176
AtomicExchange(MacroAssembler& masm, const wasm::MemoryAccessDesc* access, Scalar::Type type,
1177
               const T& mem, Register value, Register output)
1178
1179
0
{
1180
0
    if (value != output) {
1181
0
        masm.movl(value, output);
1182
0
    }
1183
0
1184
0
    if (access) {
1185
0
        masm.append(*access, masm.size());
1186
0
    }
1187
0
1188
0
    switch (Scalar::byteSize(type)) {
1189
0
      case 1:
1190
0
        CheckBytereg(output);
1191
0
        masm.xchgb(output, Operand(mem));
1192
0
        break;
1193
0
      case 2:
1194
0
        masm.xchgw(output, Operand(mem));
1195
0
        break;
1196
0
      case 4:
1197
0
        masm.xchgl(output, Operand(mem));
1198
0
        break;
1199
0
      default:
1200
0
        MOZ_CRASH("Invalid");
1201
0
    }
1202
0
    ExtendTo32(masm, type, output);
1203
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicExchange<js::jit::Address>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::Address const&, js::jit::Register, js::jit::Register)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicExchange<js::jit::BaseIndex>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register)
1204
1205
void
1206
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&, const Address& mem,
1207
                                 Register value, Register output)
1208
0
{
1209
0
    AtomicExchange(*this, nullptr, type, mem, value, output);
1210
0
}
1211
1212
void
1213
MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization&, const BaseIndex& mem,
1214
                               Register value, Register output)
1215
0
{
1216
0
    AtomicExchange(*this, nullptr, type, mem, value, output);
1217
0
}
1218
1219
void
1220
MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access, const Address& mem,
1221
                                   Register value, Register output)
1222
0
{
1223
0
    AtomicExchange(*this, &access, access.type(), mem, value, output);
1224
0
}
1225
1226
void
1227
MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access, const BaseIndex& mem,
1228
                                   Register value, Register output)
1229
0
{
1230
0
    AtomicExchange(*this, &access, access.type(), mem, value, output);
1231
0
}
1232
1233
static void
1234
0
SetupValue(MacroAssembler& masm, AtomicOp op, Imm32 src, Register output) {
1235
0
    if (op == AtomicFetchSubOp) {
1236
0
        masm.movl(Imm32(-src.value), output);
1237
0
    } else {
1238
0
        masm.movl(src, output);
1239
0
    }
1240
0
}
1241
1242
static void
1243
0
SetupValue(MacroAssembler& masm, AtomicOp op, Register src, Register output) {
1244
0
    if (src != output) {
1245
0
        masm.movl(src, output);
1246
0
    }
1247
0
    if (op == AtomicFetchSubOp) {
1248
0
        masm.negl(output);
1249
0
    }
1250
0
}
1251
1252
template<typename T, typename V>
1253
static void
1254
AtomicFetchOp(MacroAssembler& masm, const wasm::MemoryAccessDesc* access, Scalar::Type arrayType,
1255
              AtomicOp op, V value, const T& mem, Register temp, Register output)
1256
0
{
1257
0
// Note value can be an Imm or a Register.
1258
0
1259
0
#define ATOMIC_BITOP_BODY(LOAD, OP, LOCK_CMPXCHG)                       \
1260
0
    do {                                                                \
1261
0
        MOZ_ASSERT(output != temp);                                     \
1262
0
        MOZ_ASSERT(output == eax);                                      \
1263
0
        if (access) masm.append(*access, masm.size());                  \
1264
0
        masm.LOAD(Operand(mem), eax);                                   \
1265
0
        Label again;                                                    \
1266
0
        masm.bind(&again);                                              \
1267
0
        masm.movl(eax, temp);                                           \
1268
0
        masm.OP(value, temp);                                           \
1269
0
        masm.LOCK_CMPXCHG(temp, Operand(mem));                          \
1270
0
        masm.j(MacroAssembler::NonZero, &again);                        \
1271
0
    } while (0)
1272
0
1273
0
    MOZ_ASSERT_IF(op == AtomicFetchAddOp || op == AtomicFetchSubOp, temp == InvalidReg);
1274
0
1275
0
    switch (Scalar::byteSize(arrayType)) {
1276
0
      case 1:
1277
0
        CheckBytereg(value);
1278
0
        CheckBytereg(output);
1279
0
        switch (op) {
1280
0
          case AtomicFetchAddOp:
1281
0
          case AtomicFetchSubOp:
1282
0
            SetupValue(masm, op, value, output);
1283
0
            if (access) masm.append(*access, masm.size());
1284
0
            masm.lock_xaddb(output, Operand(mem));
1285
0
            break;
1286
0
          case AtomicFetchAndOp:
1287
0
            CheckBytereg(temp);
1288
0
            ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchgb);
1289
0
            break;
1290
0
          case AtomicFetchOrOp:
1291
0
            CheckBytereg(temp);
1292
0
            ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchgb);
1293
0
            break;
1294
0
          case AtomicFetchXorOp:
1295
0
            CheckBytereg(temp);
1296
0
            ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchgb);
1297
0
            break;
1298
0
          default:
1299
0
            MOZ_CRASH();
1300
0
        }
1301
0
        break;
1302
0
      case 2:
1303
0
        switch (op) {
1304
0
          case AtomicFetchAddOp:
1305
0
          case AtomicFetchSubOp:
1306
0
            SetupValue(masm, op, value, output);
1307
0
            if (access) masm.append(*access, masm.size());
1308
0
            masm.lock_xaddw(output, Operand(mem));
1309
0
            break;
1310
0
          case AtomicFetchAndOp:
1311
0
            ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchgw);
1312
0
            break;
1313
0
          case AtomicFetchOrOp:
1314
0
            ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchgw);
1315
0
            break;
1316
0
          case AtomicFetchXorOp:
1317
0
            ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchgw);
1318
0
            break;
1319
0
          default:
1320
0
            MOZ_CRASH();
1321
0
        }
1322
0
        break;
1323
0
      case 4:
1324
0
        switch (op) {
1325
0
          case AtomicFetchAddOp:
1326
0
          case AtomicFetchSubOp:
1327
0
            SetupValue(masm, op, value, output);
1328
0
            if (access) masm.append(*access, masm.size());
1329
0
            masm.lock_xaddl(output, Operand(mem));
1330
0
            break;
1331
0
          case AtomicFetchAndOp:
1332
0
            ATOMIC_BITOP_BODY(movl, andl, lock_cmpxchgl);
1333
0
            break;
1334
0
          case AtomicFetchOrOp:
1335
0
            ATOMIC_BITOP_BODY(movl, orl, lock_cmpxchgl);
1336
0
            break;
1337
0
          case AtomicFetchXorOp:
1338
0
            ATOMIC_BITOP_BODY(movl, xorl, lock_cmpxchgl);
1339
0
            break;
1340
0
          default:
1341
0
            MOZ_CRASH();
1342
0
        }
1343
0
        break;
1344
0
    }
1345
0
    ExtendTo32(masm, arrayType, output);
1346
0
1347
0
#undef ATOMIC_BITOP_BODY
1348
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOp<js::jit::BaseIndex, js::jit::Register>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Register, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOp<js::jit::Address, js::jit::Register>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Register, js::jit::Address const&, js::jit::Register, js::jit::Register)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOp<js::jit::BaseIndex, js::jit::Imm32>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Imm32, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOp<js::jit::Address, js::jit::Imm32>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Imm32, js::jit::Address const&, js::jit::Register, js::jit::Register)
1349
1350
void
1351
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
1352
                                Register value, const BaseIndex& mem, Register temp, Register output)
1353
0
{
1354
0
    AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
1355
0
}
1356
1357
void
1358
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
1359
                                Register value, const Address& mem, Register temp, Register output)
1360
0
{
1361
0
    AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
1362
0
}
1363
1364
void
1365
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
1366
                                Imm32 value, const BaseIndex& mem, Register temp, Register output)
1367
0
{
1368
0
    AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
1369
0
}
1370
1371
void
1372
MacroAssembler::atomicFetchOp(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
1373
                                Imm32 value, const Address& mem, Register temp, Register output)
1374
0
{
1375
0
    AtomicFetchOp(*this, nullptr, arrayType, op, value, mem, temp, output);
1376
0
}
1377
1378
void
1379
MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Register value,
1380
                                  const Address& mem, Register temp, Register output)
1381
0
{
1382
0
    AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
1383
0
}
1384
1385
void
1386
MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Imm32 value,
1387
                                  const Address& mem, Register temp, Register output)
1388
0
{
1389
0
    AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
1390
0
}
1391
1392
void
1393
MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Register value,
1394
                                  const BaseIndex& mem, Register temp, Register output)
1395
0
{
1396
0
    AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
1397
0
}
1398
1399
void
1400
MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Imm32 value,
1401
                                  const BaseIndex& mem, Register temp, Register output)
1402
0
{
1403
0
    AtomicFetchOp(*this, &access, access.type(), op, value, mem, temp, output);
1404
0
}
1405
1406
template<typename T, typename V>
1407
static void
1408
AtomicEffectOp(MacroAssembler& masm, const wasm::MemoryAccessDesc* access, Scalar::Type arrayType,
1409
               AtomicOp op, V value, const T& mem)
1410
0
{
1411
0
    if (access) {
1412
0
        masm.append(*access, masm.size());
1413
0
    }
1414
0
1415
0
    switch (Scalar::byteSize(arrayType)) {
1416
0
      case 1:
1417
0
        switch (op) {
1418
0
          case AtomicFetchAddOp: masm.lock_addb(value, Operand(mem)); break;
1419
0
          case AtomicFetchSubOp: masm.lock_subb(value, Operand(mem)); break;
1420
0
          case AtomicFetchAndOp: masm.lock_andb(value, Operand(mem)); break;
1421
0
          case AtomicFetchOrOp:  masm.lock_orb(value, Operand(mem)); break;
1422
0
          case AtomicFetchXorOp: masm.lock_xorb(value, Operand(mem)); break;
1423
0
          default:
1424
0
            MOZ_CRASH();
1425
0
        }
1426
0
        break;
1427
0
      case 2:
1428
0
        switch (op) {
1429
0
          case AtomicFetchAddOp: masm.lock_addw(value, Operand(mem)); break;
1430
0
          case AtomicFetchSubOp: masm.lock_subw(value, Operand(mem)); break;
1431
0
          case AtomicFetchAndOp: masm.lock_andw(value, Operand(mem)); break;
1432
0
          case AtomicFetchOrOp:  masm.lock_orw(value, Operand(mem)); break;
1433
0
          case AtomicFetchXorOp: masm.lock_xorw(value, Operand(mem)); break;
1434
0
          default:
1435
0
            MOZ_CRASH();
1436
0
        }
1437
0
        break;
1438
0
      case 4:
1439
0
        switch (op) {
1440
0
          case AtomicFetchAddOp: masm.lock_addl(value, Operand(mem)); break;
1441
0
          case AtomicFetchSubOp: masm.lock_subl(value, Operand(mem)); break;
1442
0
          case AtomicFetchAndOp: masm.lock_andl(value, Operand(mem)); break;
1443
0
          case AtomicFetchOrOp:  masm.lock_orl(value, Operand(mem)); break;
1444
0
          case AtomicFetchXorOp: masm.lock_xorl(value, Operand(mem)); break;
1445
0
          default:
1446
0
            MOZ_CRASH();
1447
0
        }
1448
0
        break;
1449
0
      default:
1450
0
        MOZ_CRASH();
1451
0
    }
1452
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicEffectOp<js::jit::Address, js::jit::Register>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Register, js::jit::Address const&)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicEffectOp<js::jit::Address, js::jit::Imm32>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Imm32, js::jit::Address const&)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicEffectOp<js::jit::BaseIndex, js::jit::Register>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Register, js::jit::BaseIndex const&)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicEffectOp<js::jit::BaseIndex, js::jit::Imm32>(js::jit::MacroAssembler&, js::wasm::MemoryAccessDesc const*, js::Scalar::Type, js::jit::AtomicOp, js::jit::Imm32, js::jit::BaseIndex const&)
1453
1454
void
1455
MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Register value,
1456
                                   const Address& mem, Register temp)
1457
0
{
1458
0
    MOZ_ASSERT(temp == InvalidReg);
1459
0
    AtomicEffectOp(*this, &access, access.type(), op, value, mem);
1460
0
}
1461
1462
void
1463
MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Imm32 value,
1464
                                   const Address& mem, Register temp)
1465
0
{
1466
0
    MOZ_ASSERT(temp == InvalidReg);
1467
0
    AtomicEffectOp(*this, &access, access.type(), op, value, mem);
1468
0
}
1469
1470
void
1471
MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Register value,
1472
                                   const BaseIndex& mem, Register temp)
1473
0
{
1474
0
    MOZ_ASSERT(temp == InvalidReg);
1475
0
    AtomicEffectOp(*this, &access, access.type(), op, value, mem);
1476
0
}
1477
1478
void
1479
MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, Imm32 value,
1480
                                   const BaseIndex& mem, Register temp)
1481
0
{
1482
0
    MOZ_ASSERT(temp == InvalidReg);
1483
0
    AtomicEffectOp(*this, &access, access.type(), op, value, mem);
1484
0
}
1485
1486
// ========================================================================
1487
// JS atomic operations.
1488
1489
template<typename T>
1490
static void
1491
CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
1492
                  const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
1493
0
{
1494
0
    if (arrayType == Scalar::Uint32) {
1495
0
        masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
1496
0
        masm.convertUInt32ToDouble(temp, output.fpu());
1497
0
    } else {
1498
0
        masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
1499
0
    }
1500
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void CompareExchangeJS<js::jit::Address>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::Address const&, js::jit::Register, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void CompareExchangeJS<js::jit::BaseIndex>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
1501
1502
void
1503
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
1504
                                  const Address& mem, Register oldval, Register newval,
1505
                                  Register temp, AnyRegister output)
1506
0
{
1507
0
    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
1508
0
}
1509
1510
void
1511
MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
1512
                                  const BaseIndex& mem, Register oldval, Register newval,
1513
                                  Register temp, AnyRegister output)
1514
0
{
1515
0
    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
1516
0
}
1517
1518
template<typename T>
1519
static void
1520
AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
1521
                 const T& mem, Register value, Register temp, AnyRegister output)
1522
0
{
1523
0
    if (arrayType == Scalar::Uint32) {
1524
0
        masm.atomicExchange(arrayType, sync, mem, value, temp);
1525
0
        masm.convertUInt32ToDouble(temp, output.fpu());
1526
0
    } else {
1527
0
        masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
1528
0
    }
1529
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicExchangeJS<js::jit::Address>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::Address const&, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicExchangeJS<js::jit::BaseIndex>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
1530
1531
void
1532
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
1533
                                 const Address& mem, Register value, Register temp,
1534
                                 AnyRegister output)
1535
0
{
1536
0
    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
1537
0
}
1538
1539
void
1540
MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
1541
                                 const BaseIndex& mem, Register value, Register temp,
1542
                                 AnyRegister output)
1543
0
{
1544
0
    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
1545
0
}
1546
1547
template<typename T>
1548
static void
1549
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
1550
                AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
1551
                AnyRegister output)
1552
0
{
1553
0
    if (arrayType == Scalar::Uint32) {
1554
0
        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
1555
0
        masm.convertUInt32ToDouble(temp1, output.fpu());
1556
0
    } else {
1557
0
        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
1558
0
    }
1559
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOpJS<js::jit::Address>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::AtomicOp, js::jit::Register, js::jit::Address const&, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOpJS<js::jit::BaseIndex>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::AtomicOp, js::jit::Register, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
1560
1561
void
1562
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1563
                                Register value, const Address& mem, Register temp1, Register temp2,
1564
                                AnyRegister output)
1565
0
{
1566
0
    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
1567
0
}
1568
1569
void
1570
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1571
                                Register value, const BaseIndex& mem, Register temp1, Register temp2,
1572
                                AnyRegister output)
1573
0
{
1574
0
    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
1575
0
}
1576
1577
void
1578
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
1579
                                 Register value, const BaseIndex& mem, Register temp)
1580
0
{
1581
0
    MOZ_ASSERT(temp == InvalidReg);
1582
0
    AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
1583
0
}
1584
1585
void
1586
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
1587
                                 Register value, const Address& mem, Register temp)
1588
0
{
1589
0
    MOZ_ASSERT(temp == InvalidReg);
1590
0
    AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
1591
0
}
1592
1593
void
1594
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization&, AtomicOp op,
1595
                                 Imm32 value, const Address& mem, Register temp)
1596
0
{
1597
0
    MOZ_ASSERT(temp == InvalidReg);
1598
0
    AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
1599
0
}
1600
1601
void
1602
MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1603
                                 Imm32 value, const BaseIndex& mem, Register temp)
1604
0
{
1605
0
    MOZ_ASSERT(temp == InvalidReg);
1606
0
    AtomicEffectOp(*this, nullptr, arrayType, op, value, mem);
1607
0
}
1608
1609
template<typename T>
1610
static void
1611
AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
1612
                AtomicOp op, Imm32 value, const T& mem, Register temp1, Register temp2,
1613
                AnyRegister output)
1614
0
{
1615
0
    if (arrayType == Scalar::Uint32) {
1616
0
        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
1617
0
        masm.convertUInt32ToDouble(temp1, output.fpu());
1618
0
    } else {
1619
0
        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
1620
0
    }
1621
0
}
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOpJS<js::jit::Address>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::AtomicOp, js::jit::Imm32, js::jit::Address const&, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
Unexecuted instantiation: Unified_cpp_js_src27.cpp:void AtomicFetchOpJS<js::jit::BaseIndex>(js::jit::MacroAssembler&, js::Scalar::Type, js::jit::Synchronization const&, js::jit::AtomicOp, js::jit::Imm32, js::jit::BaseIndex const&, js::jit::Register, js::jit::Register, js::jit::AnyRegister)
1622
1623
void
1624
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1625
                                Imm32 value, const Address& mem, Register temp1, Register temp2,
1626
                                AnyRegister output)
1627
0
{
1628
0
    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
1629
0
}
1630
1631
void
1632
MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1633
                                Imm32 value, const BaseIndex& mem, Register temp1, Register temp2,
1634
                                AnyRegister output)
1635
0
{
1636
0
    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
1637
0
}
1638
1639
// ========================================================================
1640
// Spectre Mitigations.
1641
1642
void
1643
MacroAssembler::speculationBarrier()
1644
347
{
1645
347
    // Spectre mitigation recommended by Intel and AMD suggest to use lfence as
1646
347
    // a way to force all speculative execution of instructions to end.
1647
347
    MOZ_ASSERT(HasSSE2());
1648
347
    masm.lfence();
1649
347
}
1650
1651
//}}} check_macroassembler_style