/src/mozilla-central/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
2 | | * vim: set ts=8 sts=4 et sw=4 tw=99: |
3 | | * This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "jit/x86-shared/MoveEmitter-x86-shared.h" |
8 | | |
9 | | #include "jit/MacroAssembler-inl.h" |
10 | | |
11 | | using namespace js; |
12 | | using namespace js::jit; |
13 | | |
14 | | using mozilla::Maybe; |
15 | | |
16 | | MoveEmitterX86::MoveEmitterX86(MacroAssembler& masm) |
17 | | : inCycle_(false), |
18 | | masm(masm), |
19 | | pushedAtCycle_(-1) |
20 | 957 | { |
21 | 957 | pushedAtStart_ = masm.framePushed(); |
22 | 957 | } |
23 | | |
24 | | // Examine the cycle in moves starting at position i. Determine if it's a |
25 | | // simple cycle consisting of all register-to-register moves in a single class, |
26 | | // and whether it can be implemented entirely by swaps. |
27 | | size_t |
28 | | MoveEmitterX86::characterizeCycle(const MoveResolver& moves, size_t i, |
29 | | bool* allGeneralRegs, bool* allFloatRegs) |
30 | 0 | { |
31 | 0 | size_t swapCount = 0; |
32 | 0 |
|
33 | 0 | for (size_t j = i; ; j++) { |
34 | 0 | const MoveOp& move = moves.getMove(j); |
35 | 0 |
|
36 | 0 | // If it isn't a cycle of registers of the same kind, we won't be able |
37 | 0 | // to optimize it. |
38 | 0 | if (!move.to().isGeneralReg()) { |
39 | 0 | *allGeneralRegs = false; |
40 | 0 | } |
41 | 0 | if (!move.to().isFloatReg()) { |
42 | 0 | *allFloatRegs = false; |
43 | 0 | } |
44 | 0 | if (!*allGeneralRegs && !*allFloatRegs) { |
45 | 0 | return -1; |
46 | 0 | } |
47 | 0 | |
48 | 0 | // Stop iterating when we see the last one. |
49 | 0 | if (j != i && move.isCycleEnd()) { |
50 | 0 | break; |
51 | 0 | } |
52 | 0 | |
53 | 0 | // Check that this move is actually part of the cycle. This is |
54 | 0 | // over-conservative when there are multiple reads from the same source, |
55 | 0 | // but that's expected to be rare. |
56 | 0 | if (move.from() != moves.getMove(j + 1).to()) { |
57 | 0 | *allGeneralRegs = false; |
58 | 0 | *allFloatRegs = false; |
59 | 0 | return -1; |
60 | 0 | } |
61 | 0 | |
62 | 0 | swapCount++; |
63 | 0 | } |
64 | 0 |
|
65 | 0 | // Check that the last move cycles back to the first move. |
66 | 0 | const MoveOp& move = moves.getMove(i + swapCount); |
67 | 0 | if (move.from() != moves.getMove(i).to()) { |
68 | 0 | *allGeneralRegs = false; |
69 | 0 | *allFloatRegs = false; |
70 | 0 | return -1; |
71 | 0 | } |
72 | 0 | |
73 | 0 | return swapCount; |
74 | 0 | } |
75 | | |
76 | | // If we can emit optimized code for the cycle in moves starting at position i, |
77 | | // do so, and return true. |
78 | | bool |
79 | | MoveEmitterX86::maybeEmitOptimizedCycle(const MoveResolver& moves, size_t i, |
80 | | bool allGeneralRegs, bool allFloatRegs, size_t swapCount) |
81 | 0 | { |
82 | 0 | if (allGeneralRegs && swapCount <= 2) { |
83 | 0 | // Use x86's swap-integer-registers instruction if we only have a few |
84 | 0 | // swaps. (x86 also has a swap between registers and memory but it's |
85 | 0 | // slow.) |
86 | 0 | for (size_t k = 0; k < swapCount; k++) { |
87 | 0 | masm.xchg(moves.getMove(i + k).to().reg(), moves.getMove(i + k + 1).to().reg()); |
88 | 0 | } |
89 | 0 | return true; |
90 | 0 | } |
91 | 0 |
|
92 | 0 | if (allFloatRegs && swapCount == 1) { |
93 | 0 | // There's no xchg for xmm registers, but if we only need a single swap, |
94 | 0 | // it's cheap to do an XOR swap. |
95 | 0 | FloatRegister a = moves.getMove(i).to().floatReg(); |
96 | 0 | FloatRegister b = moves.getMove(i + 1).to().floatReg(); |
97 | 0 | masm.vxorpd(a, b, b); |
98 | 0 | masm.vxorpd(b, a, a); |
99 | 0 | masm.vxorpd(a, b, b); |
100 | 0 | return true; |
101 | 0 | } |
102 | 0 | |
103 | 0 | return false; |
104 | 0 | } |
105 | | |
106 | | void |
107 | | MoveEmitterX86::emit(const MoveResolver& moves) |
108 | 957 | { |
109 | | #if defined(JS_CODEGEN_X86) && defined(DEBUG) |
110 | | // Clobber any scratch register we have, to make regalloc bugs more visible. |
111 | | if (scratchRegister_.isSome()) { |
112 | | masm.mov(ImmWord(0xdeadbeef), scratchRegister_.value()); |
113 | | } |
114 | | #endif |
115 | | |
116 | 3.18k | for (size_t i = 0; i < moves.numMoves(); i++) { |
117 | | #if defined(JS_CODEGEN_X86) && defined(DEBUG) |
118 | | if (!scratchRegister_.isSome()) { |
119 | | Maybe<Register> reg = findScratchRegister(moves, i); |
120 | | if (reg.isSome()) { |
121 | | masm.mov(ImmWord(0xdeadbeef), reg.value()); |
122 | | } |
123 | | } |
124 | | #endif |
125 | | |
126 | 2.23k | const MoveOp& move = moves.getMove(i); |
127 | 2.23k | const MoveOperand& from = move.from(); |
128 | 2.23k | const MoveOperand& to = move.to(); |
129 | 2.23k | |
130 | 2.23k | if (move.isCycleEnd()) { |
131 | 0 | MOZ_ASSERT(inCycle_); |
132 | 0 | completeCycle(to, move.type()); |
133 | 0 | inCycle_ = false; |
134 | 0 | continue; |
135 | 0 | } |
136 | 2.23k | |
137 | 2.23k | if (move.isCycleBegin()) { |
138 | 0 | MOZ_ASSERT(!inCycle_); |
139 | 0 |
|
140 | 0 | // Characterize the cycle. |
141 | 0 | bool allGeneralRegs = true, allFloatRegs = true; |
142 | 0 | size_t swapCount = characterizeCycle(moves, i, &allGeneralRegs, &allFloatRegs); |
143 | 0 |
|
144 | 0 | // Attempt to optimize it to avoid using the stack. |
145 | 0 | if (maybeEmitOptimizedCycle(moves, i, allGeneralRegs, allFloatRegs, swapCount)) { |
146 | 0 | i += swapCount; |
147 | 0 | continue; |
148 | 0 | } |
149 | 0 | |
150 | 0 | // Otherwise use the stack. |
151 | 0 | breakCycle(to, move.endCycleType()); |
152 | 0 | inCycle_ = true; |
153 | 0 | } |
154 | 2.23k | |
155 | 2.23k | // A normal move which is not part of a cycle. |
156 | 2.23k | switch (move.type()) { |
157 | 2.23k | case MoveOp::FLOAT32: |
158 | 0 | emitFloat32Move(from, to); |
159 | 0 | break; |
160 | 2.23k | case MoveOp::DOUBLE: |
161 | 3 | emitDoubleMove(from, to); |
162 | 3 | break; |
163 | 2.23k | case MoveOp::INT32: |
164 | 0 | emitInt32Move(from, to, moves, i); |
165 | 0 | break; |
166 | 2.23k | case MoveOp::GENERAL: |
167 | 2.22k | emitGeneralMove(from, to, moves, i); |
168 | 2.22k | break; |
169 | 2.23k | case MoveOp::SIMD128INT: |
170 | 0 | emitSimd128IntMove(from, to); |
171 | 0 | break; |
172 | 2.23k | case MoveOp::SIMD128FLOAT: |
173 | 0 | emitSimd128FloatMove(from, to); |
174 | 0 | break; |
175 | 2.23k | default: |
176 | 0 | MOZ_CRASH("Unexpected move type"); |
177 | 2.23k | } |
178 | 2.23k | } |
179 | 957 | } |
180 | | |
181 | | MoveEmitterX86::~MoveEmitterX86() |
182 | 957 | { |
183 | 957 | assertDone(); |
184 | 957 | } |
185 | | |
186 | | Address |
187 | | MoveEmitterX86::cycleSlot() |
188 | 0 | { |
189 | 0 | if (pushedAtCycle_ == -1) { |
190 | 0 | // Reserve stack for cycle resolution |
191 | 0 | masm.reserveStack(Simd128DataSize); |
192 | 0 | pushedAtCycle_ = masm.framePushed(); |
193 | 0 | } |
194 | 0 |
|
195 | 0 | return Address(StackPointer, masm.framePushed() - pushedAtCycle_); |
196 | 0 | } |
197 | | |
198 | | Address |
199 | | MoveEmitterX86::toAddress(const MoveOperand& operand) const |
200 | 1.30k | { |
201 | 1.30k | if (operand.base() != StackPointer) { |
202 | 1.11k | return Address(operand.base(), operand.disp()); |
203 | 1.11k | } |
204 | 188 | |
205 | 188 | MOZ_ASSERT(operand.disp() >= 0); |
206 | 188 | |
207 | 188 | // Otherwise, the stack offset may need to be adjusted. |
208 | 188 | return Address(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_)); |
209 | 188 | } |
210 | | |
211 | | // Warning, do not use the resulting operand with pop instructions, since they |
212 | | // compute the effective destination address after altering the stack pointer. |
213 | | // Use toPopOperand if an Operand is needed for a pop. |
214 | | Operand |
215 | | MoveEmitterX86::toOperand(const MoveOperand& operand) const |
216 | 1.56k | { |
217 | 1.56k | if (operand.isMemoryOrEffectiveAddress()) { |
218 | 638 | return Operand(toAddress(operand)); |
219 | 638 | } |
220 | 928 | if (operand.isGeneralReg()) { |
221 | 928 | return Operand(operand.reg()); |
222 | 928 | } |
223 | 0 | |
224 | 0 | MOZ_ASSERT(operand.isFloatReg()); |
225 | 0 | return Operand(operand.floatReg()); |
226 | 0 | } |
227 | | |
228 | | // This is the same as toOperand except that it computes an Operand suitable for |
229 | | // use in a pop. |
230 | | Operand |
231 | | MoveEmitterX86::toPopOperand(const MoveOperand& operand) const |
232 | 0 | { |
233 | 0 | if (operand.isMemory()) { |
234 | 0 | if (operand.base() != StackPointer) { |
235 | 0 | return Operand(operand.base(), operand.disp()); |
236 | 0 | } |
237 | 0 | |
238 | 0 | MOZ_ASSERT(operand.disp() >= 0); |
239 | 0 |
|
240 | 0 | // Otherwise, the stack offset may need to be adjusted. |
241 | 0 | // Note the adjustment by the stack slot here, to offset for the fact that pop |
242 | 0 | // computes its effective address after incrementing the stack pointer. |
243 | 0 | return Operand(StackPointer, |
244 | 0 | operand.disp() + (masm.framePushed() - sizeof(void*) - pushedAtStart_)); |
245 | 0 | } |
246 | 0 | if (operand.isGeneralReg()) { |
247 | 0 | return Operand(operand.reg()); |
248 | 0 | } |
249 | 0 | |
250 | 0 | MOZ_ASSERT(operand.isFloatReg()); |
251 | 0 | return Operand(operand.floatReg()); |
252 | 0 | } |
253 | | |
254 | | void |
255 | | MoveEmitterX86::breakCycle(const MoveOperand& to, MoveOp::Type type) |
256 | 0 | { |
257 | 0 | // There is some pattern: |
258 | 0 | // (A -> B) |
259 | 0 | // (B -> A) |
260 | 0 | // |
261 | 0 | // This case handles (A -> B), which we reach first. We save B, then allow |
262 | 0 | // the original move to continue. |
263 | 0 | switch (type) { |
264 | 0 | case MoveOp::SIMD128INT: |
265 | 0 | if (to.isMemory()) { |
266 | 0 | ScratchSimd128Scope scratch(masm); |
267 | 0 | masm.loadAlignedSimd128Int(toAddress(to), scratch); |
268 | 0 | masm.storeAlignedSimd128Int(scratch, cycleSlot()); |
269 | 0 | } else { |
270 | 0 | masm.storeAlignedSimd128Int(to.floatReg(), cycleSlot()); |
271 | 0 | } |
272 | 0 | break; |
273 | 0 | case MoveOp::SIMD128FLOAT: |
274 | 0 | if (to.isMemory()) { |
275 | 0 | ScratchSimd128Scope scratch(masm); |
276 | 0 | masm.loadAlignedSimd128Float(toAddress(to), scratch); |
277 | 0 | masm.storeAlignedSimd128Float(scratch, cycleSlot()); |
278 | 0 | } else { |
279 | 0 | masm.storeAlignedSimd128Float(to.floatReg(), cycleSlot()); |
280 | 0 | } |
281 | 0 | break; |
282 | 0 | case MoveOp::FLOAT32: |
283 | 0 | if (to.isMemory()) { |
284 | 0 | ScratchFloat32Scope scratch(masm); |
285 | 0 | masm.loadFloat32(toAddress(to), scratch); |
286 | 0 | masm.storeFloat32(scratch, cycleSlot()); |
287 | 0 | } else { |
288 | 0 | masm.storeFloat32(to.floatReg(), cycleSlot()); |
289 | 0 | } |
290 | 0 | break; |
291 | 0 | case MoveOp::DOUBLE: |
292 | 0 | if (to.isMemory()) { |
293 | 0 | ScratchDoubleScope scratch(masm); |
294 | 0 | masm.loadDouble(toAddress(to), scratch); |
295 | 0 | masm.storeDouble(scratch, cycleSlot()); |
296 | 0 | } else { |
297 | 0 | masm.storeDouble(to.floatReg(), cycleSlot()); |
298 | 0 | } |
299 | 0 | break; |
300 | 0 | case MoveOp::INT32: |
301 | 0 | #ifdef JS_CODEGEN_X64 |
302 | 0 | // x64 can't pop to a 32-bit destination, so don't push. |
303 | 0 | if (to.isMemory()) { |
304 | 0 | masm.load32(toAddress(to), ScratchReg); |
305 | 0 | masm.store32(ScratchReg, cycleSlot()); |
306 | 0 | } else { |
307 | 0 | masm.store32(to.reg(), cycleSlot()); |
308 | 0 | } |
309 | 0 | break; |
310 | 0 | #endif |
311 | 0 | case MoveOp::GENERAL: |
312 | 0 | masm.Push(toOperand(to)); |
313 | 0 | break; |
314 | 0 | default: |
315 | 0 | MOZ_CRASH("Unexpected move type"); |
316 | 0 | } |
317 | 0 | } |
318 | | |
319 | | void |
320 | | MoveEmitterX86::completeCycle(const MoveOperand& to, MoveOp::Type type) |
321 | 0 | { |
322 | 0 | // There is some pattern: |
323 | 0 | // (A -> B) |
324 | 0 | // (B -> A) |
325 | 0 | // |
326 | 0 | // This case handles (B -> A), which we reach last. We emit a move from the |
327 | 0 | // saved value of B, to A. |
328 | 0 | switch (type) { |
329 | 0 | case MoveOp::SIMD128INT: |
330 | 0 | MOZ_ASSERT(pushedAtCycle_ != -1); |
331 | 0 | MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize); |
332 | 0 | if (to.isMemory()) { |
333 | 0 | ScratchSimd128Scope scratch(masm); |
334 | 0 | masm.loadAlignedSimd128Int(cycleSlot(), scratch); |
335 | 0 | masm.storeAlignedSimd128Int(scratch, toAddress(to)); |
336 | 0 | } else { |
337 | 0 | masm.loadAlignedSimd128Int(cycleSlot(), to.floatReg()); |
338 | 0 | } |
339 | 0 | break; |
340 | 0 | case MoveOp::SIMD128FLOAT: |
341 | 0 | MOZ_ASSERT(pushedAtCycle_ != -1); |
342 | 0 | MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize); |
343 | 0 | if (to.isMemory()) { |
344 | 0 | ScratchSimd128Scope scratch(masm); |
345 | 0 | masm.loadAlignedSimd128Float(cycleSlot(), scratch); |
346 | 0 | masm.storeAlignedSimd128Float(scratch, toAddress(to)); |
347 | 0 | } else { |
348 | 0 | masm.loadAlignedSimd128Float(cycleSlot(), to.floatReg()); |
349 | 0 | } |
350 | 0 | break; |
351 | 0 | case MoveOp::FLOAT32: |
352 | 0 | MOZ_ASSERT(pushedAtCycle_ != -1); |
353 | 0 | MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(float)); |
354 | 0 | if (to.isMemory()) { |
355 | 0 | ScratchFloat32Scope scratch(masm); |
356 | 0 | masm.loadFloat32(cycleSlot(), scratch); |
357 | 0 | masm.storeFloat32(scratch, toAddress(to)); |
358 | 0 | } else { |
359 | 0 | masm.loadFloat32(cycleSlot(), to.floatReg()); |
360 | 0 | } |
361 | 0 | break; |
362 | 0 | case MoveOp::DOUBLE: |
363 | 0 | MOZ_ASSERT(pushedAtCycle_ != -1); |
364 | 0 | MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(double)); |
365 | 0 | if (to.isMemory()) { |
366 | 0 | ScratchDoubleScope scratch(masm); |
367 | 0 | masm.loadDouble(cycleSlot(), scratch); |
368 | 0 | masm.storeDouble(scratch, toAddress(to)); |
369 | 0 | } else { |
370 | 0 | masm.loadDouble(cycleSlot(), to.floatReg()); |
371 | 0 | } |
372 | 0 | break; |
373 | 0 | case MoveOp::INT32: |
374 | 0 | #ifdef JS_CODEGEN_X64 |
375 | 0 | MOZ_ASSERT(pushedAtCycle_ != -1); |
376 | 0 | MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(int32_t)); |
377 | 0 | // x64 can't pop to a 32-bit destination. |
378 | 0 | if (to.isMemory()) { |
379 | 0 | masm.load32(cycleSlot(), ScratchReg); |
380 | 0 | masm.store32(ScratchReg, toAddress(to)); |
381 | 0 | } else { |
382 | 0 | masm.load32(cycleSlot(), to.reg()); |
383 | 0 | } |
384 | 0 | break; |
385 | 0 | #endif |
386 | 0 | case MoveOp::GENERAL: |
387 | 0 | MOZ_ASSERT(masm.framePushed() - pushedAtStart_ >= sizeof(intptr_t)); |
388 | 0 | masm.Pop(toPopOperand(to)); |
389 | 0 | break; |
390 | 0 | default: |
391 | 0 | MOZ_CRASH("Unexpected move type"); |
392 | 0 | } |
393 | 0 | } |
394 | | |
395 | | void |
396 | | MoveEmitterX86::emitInt32Move(const MoveOperand& from, const MoveOperand& to, |
397 | | const MoveResolver& moves, size_t i) |
398 | 0 | { |
399 | 0 | if (from.isGeneralReg()) { |
400 | 0 | masm.move32(from.reg(), toOperand(to)); |
401 | 0 | } else if (to.isGeneralReg()) { |
402 | 0 | MOZ_ASSERT(from.isMemory()); |
403 | 0 | masm.load32(toAddress(from), to.reg()); |
404 | 0 | } else { |
405 | 0 | // Memory to memory gpr move. |
406 | 0 | MOZ_ASSERT(from.isMemory()); |
407 | 0 | Maybe<Register> reg = findScratchRegister(moves, i); |
408 | 0 | if (reg.isSome()) { |
409 | 0 | masm.load32(toAddress(from), reg.value()); |
410 | 0 | masm.move32(reg.value(), toOperand(to)); |
411 | 0 | } else { |
412 | 0 | // No scratch register available; bounce it off the stack. |
413 | 0 | masm.Push(toOperand(from)); |
414 | 0 | masm.Pop(toPopOperand(to)); |
415 | 0 | } |
416 | 0 | } |
417 | 0 | } |
418 | | |
419 | | void |
420 | | MoveEmitterX86::emitGeneralMove(const MoveOperand& from, const MoveOperand& to, |
421 | | const MoveResolver& moves, size_t i) |
422 | 2.22k | { |
423 | 2.22k | if (from.isGeneralReg()) { |
424 | 1.03k | masm.mov(from.reg(), toOperand(to)); |
425 | 1.19k | } else if (to.isGeneralReg()) { |
426 | 1.18k | MOZ_ASSERT(from.isMemoryOrEffectiveAddress()); |
427 | 1.18k | if (from.isMemory()) { |
428 | 666 | masm.loadPtr(toAddress(from), to.reg()); |
429 | 666 | } else { |
430 | 522 | masm.lea(toOperand(from), to.reg()); |
431 | 522 | } |
432 | 1.18k | } else if (from.isMemory()) { |
433 | 0 | // Memory to memory gpr move. |
434 | 0 | Maybe<Register> reg = findScratchRegister(moves, i); |
435 | 0 | if (reg.isSome()) { |
436 | 0 | masm.loadPtr(toAddress(from), reg.value()); |
437 | 0 | masm.mov(reg.value(), toOperand(to)); |
438 | 0 | } else { |
439 | 0 | // No scratch register available; bounce it off the stack. |
440 | 0 | masm.Push(toOperand(from)); |
441 | 0 | masm.Pop(toPopOperand(to)); |
442 | 0 | } |
443 | 3 | } else { |
444 | 3 | // Effective address to memory move. |
445 | 3 | MOZ_ASSERT(from.isEffectiveAddress()); |
446 | 3 | Maybe<Register> reg = findScratchRegister(moves, i); |
447 | 3 | if (reg.isSome()) { |
448 | 3 | masm.lea(toOperand(from), reg.value()); |
449 | 3 | masm.mov(reg.value(), toOperand(to)); |
450 | 3 | } else { |
451 | 0 | // This is tricky without a scratch reg. We can't do an lea. Bounce the |
452 | 0 | // base register off the stack, then add the offset in place. Note that |
453 | 0 | // this clobbers FLAGS! |
454 | 0 | masm.Push(from.base()); |
455 | 0 | masm.Pop(toPopOperand(to)); |
456 | 0 | MOZ_ASSERT(to.isMemoryOrEffectiveAddress()); |
457 | 0 | masm.addPtr(Imm32(from.disp()), toAddress(to)); |
458 | 0 | } |
459 | 3 | } |
460 | 2.22k | } |
461 | | |
462 | | void |
463 | | MoveEmitterX86::emitFloat32Move(const MoveOperand& from, const MoveOperand& to) |
464 | 0 | { |
465 | 0 | MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSingle()); |
466 | 0 | MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSingle()); |
467 | 0 |
|
468 | 0 | if (from.isFloatReg()) { |
469 | 0 | if (to.isFloatReg()) { |
470 | 0 | masm.moveFloat32(from.floatReg(), to.floatReg()); |
471 | 0 | } else { |
472 | 0 | masm.storeFloat32(from.floatReg(), toAddress(to)); |
473 | 0 | } |
474 | 0 | } else if (to.isFloatReg()) { |
475 | 0 | masm.loadFloat32(toAddress(from), to.floatReg()); |
476 | 0 | } else { |
477 | 0 | // Memory to memory move. |
478 | 0 | MOZ_ASSERT(from.isMemory()); |
479 | 0 | ScratchFloat32Scope scratch(masm); |
480 | 0 | masm.loadFloat32(toAddress(from), scratch); |
481 | 0 | masm.storeFloat32(scratch, toAddress(to)); |
482 | 0 | } |
483 | 0 | } |
484 | | |
485 | | void |
486 | | MoveEmitterX86::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) |
487 | 3 | { |
488 | 3 | MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isDouble()); |
489 | 3 | MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isDouble()); |
490 | 3 | |
491 | 3 | if (from.isFloatReg()) { |
492 | 0 | if (to.isFloatReg()) { |
493 | 0 | masm.moveDouble(from.floatReg(), to.floatReg()); |
494 | 0 | } else { |
495 | 0 | masm.storeDouble(from.floatReg(), toAddress(to)); |
496 | 0 | } |
497 | 3 | } else if (to.isFloatReg()) { |
498 | 3 | masm.loadDouble(toAddress(from), to.floatReg()); |
499 | 3 | } else { |
500 | 0 | // Memory to memory move. |
501 | 0 | MOZ_ASSERT(from.isMemory()); |
502 | 0 | ScratchDoubleScope scratch(masm); |
503 | 0 | masm.loadDouble(toAddress(from), scratch); |
504 | 0 | masm.storeDouble(scratch, toAddress(to)); |
505 | 0 | } |
506 | 3 | } |
507 | | |
508 | | void |
509 | | MoveEmitterX86::emitSimd128IntMove(const MoveOperand& from, const MoveOperand& to) |
510 | 0 | { |
511 | 0 | MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128()); |
512 | 0 | MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128()); |
513 | 0 |
|
514 | 0 | if (from.isFloatReg()) { |
515 | 0 | if (to.isFloatReg()) { |
516 | 0 | masm.moveSimd128Int(from.floatReg(), to.floatReg()); |
517 | 0 | } else { |
518 | 0 | masm.storeAlignedSimd128Int(from.floatReg(), toAddress(to)); |
519 | 0 | } |
520 | 0 | } else if (to.isFloatReg()) { |
521 | 0 | masm.loadAlignedSimd128Int(toAddress(from), to.floatReg()); |
522 | 0 | } else { |
523 | 0 | // Memory to memory move. |
524 | 0 | MOZ_ASSERT(from.isMemory()); |
525 | 0 | ScratchSimd128Scope scratch(masm); |
526 | 0 | masm.loadAlignedSimd128Int(toAddress(from), scratch); |
527 | 0 | masm.storeAlignedSimd128Int(scratch, toAddress(to)); |
528 | 0 | } |
529 | 0 | } |
530 | | |
531 | | void |
532 | | MoveEmitterX86::emitSimd128FloatMove(const MoveOperand& from, const MoveOperand& to) |
533 | 0 | { |
534 | 0 | MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128()); |
535 | 0 | MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128()); |
536 | 0 |
|
537 | 0 | if (from.isFloatReg()) { |
538 | 0 | if (to.isFloatReg()) { |
539 | 0 | masm.moveSimd128Float(from.floatReg(), to.floatReg()); |
540 | 0 | } else { |
541 | 0 | masm.storeAlignedSimd128Float(from.floatReg(), toAddress(to)); |
542 | 0 | } |
543 | 0 | } else if (to.isFloatReg()) { |
544 | 0 | masm.loadAlignedSimd128Float(toAddress(from), to.floatReg()); |
545 | 0 | } else { |
546 | 0 | // Memory to memory move. |
547 | 0 | MOZ_ASSERT(from.isMemory()); |
548 | 0 | ScratchSimd128Scope scratch(masm); |
549 | 0 | masm.loadAlignedSimd128Float(toAddress(from), scratch); |
550 | 0 | masm.storeAlignedSimd128Float(scratch, toAddress(to)); |
551 | 0 | } |
552 | 0 | } |
553 | | |
554 | | void |
555 | | MoveEmitterX86::assertDone() |
556 | 1.91k | { |
557 | 1.91k | MOZ_ASSERT(!inCycle_); |
558 | 1.91k | } |
559 | | |
560 | | void |
561 | | MoveEmitterX86::finish() |
562 | 957 | { |
563 | 957 | assertDone(); |
564 | 957 | |
565 | 957 | masm.freeStack(masm.framePushed() - pushedAtStart_); |
566 | 957 | } |
567 | | |
568 | | Maybe<Register> |
569 | | MoveEmitterX86::findScratchRegister(const MoveResolver& moves, size_t initial) |
570 | 3 | { |
571 | | #ifdef JS_CODEGEN_X86 |
572 | | if (scratchRegister_.isSome()) { |
573 | | return scratchRegister_; |
574 | | } |
575 | | |
576 | | // All registers are either in use by this move group or are live |
577 | | // afterwards. Look through the remaining moves for a register which is |
578 | | // clobbered before it is used, and is thus dead at this point. |
579 | | AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); |
580 | | for (size_t i = initial; i < moves.numMoves(); i++) { |
581 | | const MoveOp& move = moves.getMove(i); |
582 | | if (move.from().isGeneralReg()) { |
583 | | regs.takeUnchecked(move.from().reg()); |
584 | | } else if (move.from().isMemoryOrEffectiveAddress()) { |
585 | | regs.takeUnchecked(move.from().base()); |
586 | | } |
587 | | if (move.to().isGeneralReg()) { |
588 | | if (i != initial && !move.isCycleBegin() && regs.has(move.to().reg())) { |
589 | | return mozilla::Some(move.to().reg()); |
590 | | } |
591 | | regs.takeUnchecked(move.to().reg()); |
592 | | } else if (move.to().isMemoryOrEffectiveAddress()) { |
593 | | regs.takeUnchecked(move.to().base()); |
594 | | } |
595 | | } |
596 | | |
597 | | return mozilla::Nothing(); |
598 | | #else |
599 | | return mozilla::Some(ScratchReg); |
600 | 3 | #endif |
601 | 3 | } |