Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/js/src/jit/x64/Lowering-x64.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2
 * vim: set ts=8 sts=4 et sw=4 tw=99:
3
 * This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "jit/x64/Lowering-x64.h"
8
9
#include "jit/Lowering.h"
10
#include "jit/MIR.h"
11
#include "jit/x64/Assembler-x64.h"
12
13
#include "jit/shared/Lowering-shared-inl.h"
14
15
using namespace js;
16
using namespace js::jit;
17
18
LBoxAllocation
19
LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart)
20
0
{
21
0
    MOZ_ASSERT(mir->type() == MIRType::Value);
22
0
23
0
    ensureDefined(mir);
24
0
    return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
25
0
}
26
27
LAllocation
28
LIRGeneratorX64::useByteOpRegister(MDefinition* mir)
29
0
{
30
0
    return useRegister(mir);
31
0
}
32
33
LAllocation
34
LIRGeneratorX64::useByteOpRegisterAtStart(MDefinition* mir)
35
0
{
36
0
    return useRegisterAtStart(mir);
37
0
}
38
39
LAllocation
40
LIRGeneratorX64::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
41
0
{
42
0
    return useRegisterOrNonDoubleConstant(mir);
43
0
}
44
45
LDefinition
46
LIRGeneratorX64::tempByteOpRegister()
47
0
{
48
0
    return temp();
49
0
}
50
51
LDefinition
52
LIRGeneratorX64::tempToUnbox()
53
0
{
54
0
    return temp();
55
0
}
56
57
void
58
LIRGeneratorX64::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
59
                                  MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
60
0
{
61
0
    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
62
0
    ins->setInt64Operand(INT64_PIECES,
63
0
                         lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
64
0
    defineInt64ReuseInput(ins, mir, 0);
65
0
}
66
67
void
68
LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
69
0
{
70
0
    // X64 doesn't need a temp for 64bit multiplication.
71
0
    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
72
0
    ins->setInt64Operand(INT64_PIECES,
73
0
                         lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
74
0
    defineInt64ReuseInput(ins, mir, 0);
75
0
}
76
77
void
78
LIRGenerator::visitBox(MBox* box)
79
14
{
80
14
    MDefinition* opd = box->getOperand(0);
81
14
82
14
    // If the operand is a constant, emit near its uses.
83
14
    if (opd->isConstant() && box->canEmitAtUses()) {
84
0
        emitAtUses(box);
85
0
        return;
86
0
    }
87
14
88
14
    if (opd->isConstant()) {
89
0
        define(new(alloc()) LValue(opd->toConstant()->toJSValue()), box, LDefinition(LDefinition::BOX));
90
14
    } else {
91
14
        LBox* ins = new(alloc()) LBox(useRegister(opd), opd->type());
92
14
        define(ins, box, LDefinition(LDefinition::BOX));
93
14
    }
94
14
}
95
96
void
97
LIRGenerator::visitUnbox(MUnbox* unbox)
98
168
{
99
168
    MDefinition* box = unbox->getOperand(0);
100
168
101
168
    if (box->type() == MIRType::ObjectOrNull) {
102
0
        LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
103
0
        if (unbox->fallible()) {
104
0
            assignSnapshot(lir, unbox->bailoutKind());
105
0
        }
106
0
        defineReuseInput(lir, unbox, 0);
107
0
        return;
108
0
    }
109
168
110
168
    MOZ_ASSERT(box->type() == MIRType::Value);
111
168
112
168
    LUnboxBase* lir;
113
168
    if (IsFloatingPointType(unbox->type())) {
114
0
        lir = new(alloc()) LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
115
168
    } else if (unbox->fallible()) {
116
112
        // If the unbox is fallible, load the Value in a register first to
117
112
        // avoid multiple loads.
118
112
        lir = new(alloc()) LUnbox(useRegisterAtStart(box));
119
112
    } else {
120
56
        lir = new(alloc()) LUnbox(useAtStart(box));
121
56
    }
122
168
123
168
    if (unbox->fallible()) {
124
112
        assignSnapshot(lir, unbox->bailoutKind());
125
112
    }
126
168
127
168
    define(lir, unbox);
128
168
}
129
130
void
131
LIRGenerator::visitReturn(MReturn* ret)
132
14
{
133
14
    MDefinition* opd = ret->getOperand(0);
134
14
    MOZ_ASSERT(opd->type() == MIRType::Value);
135
14
136
14
    LReturn* ins = new(alloc()) LReturn;
137
14
    ins->setOperand(0, useFixed(opd, JSReturnReg));
138
14
    add(ins);
139
14
}
140
141
void
142
LIRGeneratorX64::defineUntypedPhi(MPhi* phi, size_t lirIndex)
143
0
{
144
0
    defineTypedPhi(phi, lirIndex);
145
0
}
146
147
void
148
LIRGeneratorX64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
149
0
{
150
0
    lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
151
0
}
152
153
void
154
LIRGeneratorX64::defineInt64Phi(MPhi* phi, size_t lirIndex)
155
0
{
156
0
    defineTypedPhi(phi, lirIndex);
157
0
}
158
159
void
160
LIRGeneratorX64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
161
0
{
162
0
    lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
163
0
}
164
165
void
166
LIRGenerator::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
167
0
{
168
0
    lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
169
0
}
170
171
void
172
LIRGenerator::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
173
0
{
174
0
    lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
175
0
}
176
177
void
178
LIRGenerator::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
179
0
{
180
0
    lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
181
0
}
182
183
void
184
LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
185
0
{
186
0
    MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
187
0
    LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
188
0
    define(lir, ins);
189
0
}
190
191
void
192
LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
193
0
{
194
0
    MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
195
0
    LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
196
0
    define(lir, ins);
197
0
}
198
199
void
200
LIRGenerator::visitWasmLoad(MWasmLoad* ins)
201
0
{
202
0
    MDefinition* base = ins->base();
203
0
    MOZ_ASSERT(base->type() == MIRType::Int32);
204
0
205
0
    if (ins->type() != MIRType::Int64) {
206
0
        auto* lir = new(alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
207
0
        define(lir, ins);
208
0
        return;
209
0
    }
210
0
211
0
    auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
212
0
    defineInt64(lir, ins);
213
0
}
214
215
void
216
LIRGenerator::visitWasmStore(MWasmStore* ins)
217
0
{
218
0
    MDefinition* base = ins->base();
219
0
    MOZ_ASSERT(base->type() == MIRType::Int32);
220
0
221
0
    MDefinition* value = ins->value();
222
0
    LAllocation valueAlloc;
223
0
    switch (ins->access().type()) {
224
0
      case Scalar::Int8:
225
0
      case Scalar::Uint8:
226
0
      case Scalar::Int16:
227
0
      case Scalar::Uint16:
228
0
      case Scalar::Int32:
229
0
      case Scalar::Uint32:
230
0
        valueAlloc = useRegisterOrConstantAtStart(value);
231
0
        break;
232
0
      case Scalar::Int64:
233
0
        // No way to encode an int64-to-memory move on x64.
234
0
        if (value->isConstant() && value->type() != MIRType::Int64) {
235
0
            valueAlloc = useOrConstantAtStart(value);
236
0
        } else {
237
0
            valueAlloc = useRegisterAtStart(value);
238
0
        }
239
0
        break;
240
0
      case Scalar::Float32:
241
0
      case Scalar::Float64:
242
0
        valueAlloc = useRegisterAtStart(value);
243
0
        break;
244
0
      case Scalar::Uint8Clamped:
245
0
      case Scalar::MaxTypedArrayViewType:
246
0
        MOZ_CRASH("unexpected array type");
247
0
    }
248
0
249
0
    LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
250
0
    auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
251
0
    add(lir, ins);
252
0
}
253
254
void
255
LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
256
0
{
257
0
    MDefinition* base = ins->base();
258
0
    MOZ_ASSERT(base->type() == MIRType::Int32);
259
0
260
0
    define(new(alloc()) LAsmJSLoadHeap(useRegisterOrZeroAtStart(base)), ins);
261
0
}
262
263
void
264
LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
265
0
{
266
0
    MDefinition* base = ins->base();
267
0
    MOZ_ASSERT(base->type() == MIRType::Int32);
268
0
269
0
    LAsmJSStoreHeap* lir = nullptr;  // initialize to silence GCC warning
270
0
    switch (ins->access().type()) {
271
0
      case Scalar::Int8:
272
0
      case Scalar::Uint8:
273
0
      case Scalar::Int16:
274
0
      case Scalar::Uint16:
275
0
      case Scalar::Int32:
276
0
      case Scalar::Uint32:
277
0
        lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
278
0
                                           useRegisterOrConstantAtStart(ins->value()));
279
0
        break;
280
0
      case Scalar::Float32:
281
0
      case Scalar::Float64:
282
0
        lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
283
0
                                           useRegisterAtStart(ins->value()));
284
0
        break;
285
0
      case Scalar::Int64:
286
0
      case Scalar::Uint8Clamped:
287
0
      case Scalar::MaxTypedArrayViewType:
288
0
        MOZ_CRASH("unexpected array type");
289
0
    }
290
0
    add(lir, ins);
291
0
}
292
293
void
294
LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins)
295
0
{
296
0
    MDefinition* base = ins->base();
297
0
    MOZ_ASSERT(base->type() == MIRType::Int32);
298
0
299
0
    // The output may not be used but will be clobbered regardless, so
300
0
    // pin the output to eax.
301
0
    //
302
0
    // The input values must both be in registers.
303
0
304
0
    const LAllocation oldval = useRegister(ins->oldValue());
305
0
    const LAllocation newval = useRegister(ins->newValue());
306
0
307
0
    LWasmCompareExchangeHeap* lir =
308
0
        new(alloc()) LWasmCompareExchangeHeap(useRegister(base), oldval, newval);
309
0
310
0
    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
311
0
}
312
313
void
314
LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins)
315
0
{
316
0
    MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
317
0
318
0
    const LAllocation base = useRegister(ins->base());
319
0
    const LAllocation value = useRegister(ins->value());
320
0
321
0
    // The output may not be used but will be clobbered regardless,
322
0
    // so ignore the case where we're not using the value and just
323
0
    // use the output register as a temp.
324
0
325
0
    LWasmAtomicExchangeHeap* lir =
326
0
        new(alloc()) LWasmAtomicExchangeHeap(base, value);
327
0
    define(lir, ins);
328
0
}
329
330
void
331
LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins)
332
0
{
333
0
    MDefinition* base = ins->base();
334
0
    MOZ_ASSERT(base->type() == MIRType::Int32);
335
0
336
0
    // No support for 64-bit operations with constants at the masm level.
337
0
338
0
    bool canTakeConstant = ins->access().type() != Scalar::Int64;
339
0
340
0
    // Case 1: the result of the operation is not used.
341
0
    //
342
0
    // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
343
0
    // LOCK OR, or LOCK XOR.
344
0
345
0
    if (!ins->hasUses()) {
346
0
        LAllocation value = canTakeConstant
347
0
                            ? useRegisterOrConstant(ins->value())
348
0
                            : useRegister(ins->value());
349
0
        LWasmAtomicBinopHeapForEffect* lir =
350
0
            new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base), value);
351
0
        add(lir, ins);
352
0
        return;
353
0
    }
354
0
355
0
    // Case 2: the result of the operation is used.
356
0
    //
357
0
    // For ADD and SUB we'll use XADD with word and byte ops as
358
0
    // appropriate.  Any output register can be used and if value is a
359
0
    // register it's best if it's the same as output:
360
0
    //
361
0
    //    movl       value, output  ; if value != output
362
0
    //    lock xaddl output, mem
363
0
    //
364
0
    // For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
365
0
    // always in rax:
366
0
    //
367
0
    //    movl          *mem, rax
368
0
    // L: mov           rax, temp
369
0
    //    andl          value, temp
370
0
    //    lock cmpxchg  temp, mem  ; reads rax also
371
0
    //    jnz           L
372
0
    //    ; result in rax
373
0
    //
374
0
    // Note the placement of L, cmpxchg will update rax with *mem if
375
0
    // *mem does not have the expected value, so reloading it at the
376
0
    // top of the loop would be redundant.
377
0
378
0
    bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
379
0
    bool reuseInput = false;
380
0
    LAllocation value;
381
0
382
0
    if (bitOp || ins->value()->isConstant()) {
383
0
        value = canTakeConstant ? useRegisterOrConstant(ins->value()) : useRegister(ins->value());
384
0
    } else {
385
0
        reuseInput = true;
386
0
        value = useRegisterAtStart(ins->value());
387
0
    }
388
0
389
0
    auto* lir = new(alloc()) LWasmAtomicBinopHeap(useRegister(base),
390
0
                                                   value,
391
0
                                                   bitOp ? temp() : LDefinition::BogusTemp());
392
0
393
0
    if (reuseInput) {
394
0
        defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
395
0
    } else if (bitOp) {
396
0
        defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
397
0
    } else {
398
0
        define(lir, ins);
399
0
    }
400
0
}
401
402
void
403
LIRGenerator::visitSubstr(MSubstr* ins)
404
0
{
405
0
    LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
406
0
                                         useRegister(ins->begin()),
407
0
                                         useRegister(ins->length()),
408
0
                                         temp(),
409
0
                                         temp(),
410
0
                                         tempByteOpRegister());
411
0
    define(lir, ins);
412
0
    assignSafepoint(lir, ins);
413
0
}
414
415
void
416
LIRGenerator::visitRandom(MRandom* ins)
417
0
{
418
0
    LRandom *lir = new(alloc()) LRandom(temp(),
419
0
                                        temp(),
420
0
                                        temp());
421
0
    defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
422
0
}
423
424
void
425
LIRGeneratorX64::lowerDivI64(MDiv* div)
426
0
{
427
0
    if (div->isUnsigned()) {
428
0
        lowerUDivI64(div);
429
0
        return;
430
0
    }
431
0
432
0
    LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()),
433
0
                                                  tempFixed(rdx));
434
0
    defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
435
0
}
436
437
void
438
LIRGeneratorX64::lowerModI64(MMod* mod)
439
0
{
440
0
    if (mod->isUnsigned()) {
441
0
        lowerUModI64(mod);
442
0
        return;
443
0
    }
444
0
445
0
    LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()),
446
0
                                                  tempFixed(rax));
447
0
    defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
448
0
}
449
450
void
451
LIRGeneratorX64::lowerUDivI64(MDiv* div)
452
0
{
453
0
    LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(div->lhs()),
454
0
                                                    useRegister(div->rhs()),
455
0
                                                    tempFixed(rdx));
456
0
    defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
457
0
}
458
459
void
460
LIRGeneratorX64::lowerUModI64(MMod* mod)
461
0
{
462
0
    LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()),
463
0
                                                    useRegister(mod->rhs()),
464
0
                                                    tempFixed(rax));
465
0
    defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
466
0
}
467
468
void
469
LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
470
0
{
471
0
    MDefinition* opd = ins->input();
472
0
    MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
473
0
474
0
    LDefinition maybeTemp = ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
475
0
    defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp), ins);
476
0
}
477
478
void
479
LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
480
0
{
481
0
    MDefinition* opd = ins->input();
482
0
    MOZ_ASSERT(opd->type() == MIRType::Int64);
483
0
    MOZ_ASSERT(IsFloatingPointType(ins->type()));
484
0
485
0
    LDefinition maybeTemp = ins->isUnsigned() ? temp() : LDefinition::BogusTemp();
486
0
    define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp), ins);
487
0
}
488
489
void
490
LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
491
0
{
492
0
    defineInt64(new(alloc()) LExtendInt32ToInt64(useAtStart(ins->input())), ins);
493
0
}
494
495
void
496
LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins)
497
0
{
498
0
    defineInt64(new(alloc()) LSignExtendInt64(useInt64RegisterAtStart(ins->input())), ins);
499
0
}