/src/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file implements the SystemZTargetLowering class. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "SystemZISelLowering.h" |
14 | | #include "SystemZCallingConv.h" |
15 | | #include "SystemZConstantPoolValue.h" |
16 | | #include "SystemZMachineFunctionInfo.h" |
17 | | #include "SystemZTargetMachine.h" |
18 | | #include "llvm/CodeGen/CallingConvLower.h" |
19 | | #include "llvm/CodeGen/MachineInstrBuilder.h" |
20 | | #include "llvm/CodeGen/MachineRegisterInfo.h" |
21 | | #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" |
22 | | #include "llvm/IR/IntrinsicInst.h" |
23 | | #include "llvm/IR/Intrinsics.h" |
24 | | #include "llvm/IR/IntrinsicsS390.h" |
25 | | #include "llvm/Support/CommandLine.h" |
26 | | #include "llvm/Support/KnownBits.h" |
27 | | #include <cctype> |
28 | | #include <optional> |
29 | | |
30 | | using namespace llvm; |
31 | | |
32 | | #define DEBUG_TYPE "systemz-lower" |
33 | | |
34 | | namespace { |
35 | | // Represents information about a comparison. |
36 | | struct Comparison { |
37 | | Comparison(SDValue Op0In, SDValue Op1In, SDValue ChainIn) |
38 | | : Op0(Op0In), Op1(Op1In), Chain(ChainIn), |
39 | 0 | Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} |
40 | | |
41 | | // The operands to the comparison. |
42 | | SDValue Op0, Op1; |
43 | | |
44 | | // Chain if this is a strict floating-point comparison. |
45 | | SDValue Chain; |
46 | | |
47 | | // The opcode that should be used to compare Op0 and Op1. |
48 | | unsigned Opcode; |
49 | | |
50 | | // A SystemZICMP value. Only used for integer comparisons. |
51 | | unsigned ICmpType; |
52 | | |
53 | | // The mask of CC values that Opcode can produce. |
54 | | unsigned CCValid; |
55 | | |
56 | | // The mask of CC values for which the original condition is true. |
57 | | unsigned CCMask; |
58 | | }; |
59 | | } // end anonymous namespace |
60 | | |
61 | | // Classify VT as either 32 or 64 bit. |
62 | 0 | static bool is32Bit(EVT VT) { |
63 | 0 | switch (VT.getSimpleVT().SimpleTy) { |
64 | 0 | case MVT::i32: |
65 | 0 | return true; |
66 | 0 | case MVT::i64: |
67 | 0 | return false; |
68 | 0 | default: |
69 | 0 | llvm_unreachable("Unsupported type"); |
70 | 0 | } |
71 | 0 | } |
72 | | |
73 | | // Return a version of MachineOperand that can be safely used before the |
74 | | // final use. |
75 | 0 | static MachineOperand earlyUseOperand(MachineOperand Op) { |
76 | 0 | if (Op.isReg()) |
77 | 0 | Op.setIsKill(false); |
78 | 0 | return Op; |
79 | 0 | } |
80 | | |
81 | | SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, |
82 | | const SystemZSubtarget &STI) |
83 | 0 | : TargetLowering(TM), Subtarget(STI) { |
84 | 0 | MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0)); |
85 | |
|
86 | 0 | auto *Regs = STI.getSpecialRegisters(); |
87 | | |
88 | | // Set up the register classes. |
89 | 0 | if (Subtarget.hasHighWord()) |
90 | 0 | addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); |
91 | 0 | else |
92 | 0 | addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); |
93 | 0 | addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); |
94 | 0 | if (!useSoftFloat()) { |
95 | 0 | if (Subtarget.hasVector()) { |
96 | 0 | addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass); |
97 | 0 | addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass); |
98 | 0 | } else { |
99 | 0 | addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); |
100 | 0 | addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); |
101 | 0 | } |
102 | 0 | if (Subtarget.hasVectorEnhancements1()) |
103 | 0 | addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); |
104 | 0 | else |
105 | 0 | addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); |
106 | |
|
107 | 0 | if (Subtarget.hasVector()) { |
108 | 0 | addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); |
109 | 0 | addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass); |
110 | 0 | addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass); |
111 | 0 | addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass); |
112 | 0 | addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass); |
113 | 0 | addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass); |
114 | 0 | } |
115 | |
|
116 | 0 | if (Subtarget.hasVector()) |
117 | 0 | addRegisterClass(MVT::i128, &SystemZ::VR128BitRegClass); |
118 | 0 | } |
119 | | |
120 | | // Compute derived properties from the register classes |
121 | 0 | computeRegisterProperties(Subtarget.getRegisterInfo()); |
122 | | |
123 | | // Set up special registers. |
124 | 0 | setStackPointerRegisterToSaveRestore(Regs->getStackPointerRegister()); |
125 | | |
126 | | // TODO: It may be better to default to latency-oriented scheduling, however |
127 | | // LLVM's current latency-oriented scheduler can't handle physreg definitions |
128 | | // such as SystemZ has with CC, so set this to the register-pressure |
129 | | // scheduler, because it can. |
130 | 0 | setSchedulingPreference(Sched::RegPressure); |
131 | |
|
132 | 0 | setBooleanContents(ZeroOrOneBooleanContent); |
133 | 0 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
134 | |
|
135 | 0 | setMaxAtomicSizeInBitsSupported(128); |
136 | | |
137 | | // Instructions are strings of 2-byte aligned 2-byte values. |
138 | 0 | setMinFunctionAlignment(Align(2)); |
139 | | // For performance reasons we prefer 16-byte alignment. |
140 | 0 | setPrefFunctionAlignment(Align(16)); |
141 | | |
142 | | // Handle operations that are handled in a similar way for all types. |
143 | 0 | for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; |
144 | 0 | I <= MVT::LAST_FP_VALUETYPE; |
145 | 0 | ++I) { |
146 | 0 | MVT VT = MVT::SimpleValueType(I); |
147 | 0 | if (isTypeLegal(VT)) { |
148 | | // Lower SET_CC into an IPM-based sequence. |
149 | 0 | setOperationAction(ISD::SETCC, VT, Custom); |
150 | 0 | setOperationAction(ISD::STRICT_FSETCC, VT, Custom); |
151 | 0 | setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); |
152 | | |
153 | | // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). |
154 | 0 | setOperationAction(ISD::SELECT, VT, Expand); |
155 | | |
156 | | // Lower SELECT_CC and BR_CC into separate comparisons and branches. |
157 | 0 | setOperationAction(ISD::SELECT_CC, VT, Custom); |
158 | 0 | setOperationAction(ISD::BR_CC, VT, Custom); |
159 | 0 | } |
160 | 0 | } |
161 | | |
162 | | // Expand jump table branches as address arithmetic followed by an |
163 | | // indirect jump. |
164 | 0 | setOperationAction(ISD::BR_JT, MVT::Other, Expand); |
165 | | |
166 | | // Expand BRCOND into a BR_CC (see above). |
167 | 0 | setOperationAction(ISD::BRCOND, MVT::Other, Expand); |
168 | | |
169 | | // Handle integer types except i128. |
170 | 0 | for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; |
171 | 0 | I <= MVT::LAST_INTEGER_VALUETYPE; |
172 | 0 | ++I) { |
173 | 0 | MVT VT = MVT::SimpleValueType(I); |
174 | 0 | if (isTypeLegal(VT) && VT != MVT::i128) { |
175 | 0 | setOperationAction(ISD::ABS, VT, Legal); |
176 | | |
177 | | // Expand individual DIV and REMs into DIVREMs. |
178 | 0 | setOperationAction(ISD::SDIV, VT, Expand); |
179 | 0 | setOperationAction(ISD::UDIV, VT, Expand); |
180 | 0 | setOperationAction(ISD::SREM, VT, Expand); |
181 | 0 | setOperationAction(ISD::UREM, VT, Expand); |
182 | 0 | setOperationAction(ISD::SDIVREM, VT, Custom); |
183 | 0 | setOperationAction(ISD::UDIVREM, VT, Custom); |
184 | | |
185 | | // Support addition/subtraction with overflow. |
186 | 0 | setOperationAction(ISD::SADDO, VT, Custom); |
187 | 0 | setOperationAction(ISD::SSUBO, VT, Custom); |
188 | | |
189 | | // Support addition/subtraction with carry. |
190 | 0 | setOperationAction(ISD::UADDO, VT, Custom); |
191 | 0 | setOperationAction(ISD::USUBO, VT, Custom); |
192 | | |
193 | | // Support carry in as value rather than glue. |
194 | 0 | setOperationAction(ISD::UADDO_CARRY, VT, Custom); |
195 | 0 | setOperationAction(ISD::USUBO_CARRY, VT, Custom); |
196 | | |
197 | | // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and |
198 | | // stores, putting a serialization instruction after the stores. |
199 | 0 | setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); |
200 | 0 | setOperationAction(ISD::ATOMIC_STORE, VT, Custom); |
201 | | |
202 | | // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are |
203 | | // available, or if the operand is constant. |
204 | 0 | setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); |
205 | | |
206 | | // Use POPCNT on z196 and above. |
207 | 0 | if (Subtarget.hasPopulationCount()) |
208 | 0 | setOperationAction(ISD::CTPOP, VT, Custom); |
209 | 0 | else |
210 | 0 | setOperationAction(ISD::CTPOP, VT, Expand); |
211 | | |
212 | | // No special instructions for these. |
213 | 0 | setOperationAction(ISD::CTTZ, VT, Expand); |
214 | 0 | setOperationAction(ISD::ROTR, VT, Expand); |
215 | | |
216 | | // Use *MUL_LOHI where possible instead of MULH*. |
217 | 0 | setOperationAction(ISD::MULHS, VT, Expand); |
218 | 0 | setOperationAction(ISD::MULHU, VT, Expand); |
219 | 0 | setOperationAction(ISD::SMUL_LOHI, VT, Custom); |
220 | 0 | setOperationAction(ISD::UMUL_LOHI, VT, Custom); |
221 | | |
222 | | // Only z196 and above have native support for conversions to unsigned. |
223 | | // On z10, promoting to i64 doesn't generate an inexact condition for |
224 | | // values that are outside the i32 range but in the i64 range, so use |
225 | | // the default expansion. |
226 | 0 | if (!Subtarget.hasFPExtension()) |
227 | 0 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); |
228 | | |
229 | | // Mirror those settings for STRICT_FP_TO_[SU]INT. Note that these all |
230 | | // default to Expand, so need to be modified to Legal where appropriate. |
231 | 0 | setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal); |
232 | 0 | if (Subtarget.hasFPExtension()) |
233 | 0 | setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal); |
234 | | |
235 | | // And similarly for STRICT_[SU]INT_TO_FP. |
236 | 0 | setOperationAction(ISD::STRICT_SINT_TO_FP, VT, Legal); |
237 | 0 | if (Subtarget.hasFPExtension()) |
238 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, VT, Legal); |
239 | 0 | } |
240 | 0 | } |
241 | | |
242 | | // Handle i128 if legal. |
243 | 0 | if (isTypeLegal(MVT::i128)) { |
244 | | // No special instructions for these. |
245 | 0 | setOperationAction(ISD::SDIVREM, MVT::i128, Expand); |
246 | 0 | setOperationAction(ISD::UDIVREM, MVT::i128, Expand); |
247 | 0 | setOperationAction(ISD::SMUL_LOHI, MVT::i128, Expand); |
248 | 0 | setOperationAction(ISD::UMUL_LOHI, MVT::i128, Expand); |
249 | 0 | setOperationAction(ISD::ROTR, MVT::i128, Expand); |
250 | 0 | setOperationAction(ISD::ROTL, MVT::i128, Expand); |
251 | 0 | setOperationAction(ISD::MUL, MVT::i128, Expand); |
252 | 0 | setOperationAction(ISD::MULHS, MVT::i128, Expand); |
253 | 0 | setOperationAction(ISD::MULHU, MVT::i128, Expand); |
254 | 0 | setOperationAction(ISD::SDIV, MVT::i128, Expand); |
255 | 0 | setOperationAction(ISD::UDIV, MVT::i128, Expand); |
256 | 0 | setOperationAction(ISD::SREM, MVT::i128, Expand); |
257 | 0 | setOperationAction(ISD::UREM, MVT::i128, Expand); |
258 | 0 | setOperationAction(ISD::CTLZ, MVT::i128, Expand); |
259 | 0 | setOperationAction(ISD::CTTZ, MVT::i128, Expand); |
260 | | |
261 | | // Support addition/subtraction with carry. |
262 | 0 | setOperationAction(ISD::UADDO, MVT::i128, Custom); |
263 | 0 | setOperationAction(ISD::USUBO, MVT::i128, Custom); |
264 | 0 | setOperationAction(ISD::UADDO_CARRY, MVT::i128, Custom); |
265 | 0 | setOperationAction(ISD::USUBO_CARRY, MVT::i128, Custom); |
266 | | |
267 | | // Use VPOPCT and add up partial results. |
268 | 0 | setOperationAction(ISD::CTPOP, MVT::i128, Custom); |
269 | | |
270 | | // We have to use libcalls for these. |
271 | 0 | setOperationAction(ISD::FP_TO_UINT, MVT::i128, LibCall); |
272 | 0 | setOperationAction(ISD::FP_TO_SINT, MVT::i128, LibCall); |
273 | 0 | setOperationAction(ISD::UINT_TO_FP, MVT::i128, LibCall); |
274 | 0 | setOperationAction(ISD::SINT_TO_FP, MVT::i128, LibCall); |
275 | 0 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, LibCall); |
276 | 0 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, LibCall); |
277 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, LibCall); |
278 | 0 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, LibCall); |
279 | 0 | } |
280 | | |
281 | | // Type legalization will convert 8- and 16-bit atomic operations into |
282 | | // forms that operate on i32s (but still keeping the original memory VT). |
283 | | // Lower them into full i32 operations. |
284 | 0 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); |
285 | 0 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); |
286 | 0 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); |
287 | 0 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); |
288 | 0 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); |
289 | 0 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); |
290 | 0 | setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); |
291 | 0 | setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); |
292 | 0 | setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); |
293 | 0 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); |
294 | 0 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); |
295 | | |
296 | | // Whether or not i128 is not a legal type, we need to custom lower |
297 | | // the atomic operations in order to exploit SystemZ instructions. |
298 | 0 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); |
299 | 0 | setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); |
300 | | |
301 | | // We can use the CC result of compare-and-swap to implement |
302 | | // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. |
303 | 0 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); |
304 | 0 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); |
305 | 0 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); |
306 | |
|
307 | 0 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); |
308 | | |
309 | | // Traps are legal, as we will convert them to "j .+2". |
310 | 0 | setOperationAction(ISD::TRAP, MVT::Other, Legal); |
311 | | |
312 | | // z10 has instructions for signed but not unsigned FP conversion. |
313 | | // Handle unsigned 32-bit types as signed 64-bit types. |
314 | 0 | if (!Subtarget.hasFPExtension()) { |
315 | 0 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); |
316 | 0 | setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); |
317 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Promote); |
318 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); |
319 | 0 | } |
320 | | |
321 | | // We have native support for a 64-bit CTLZ, via FLOGR. |
322 | 0 | setOperationAction(ISD::CTLZ, MVT::i32, Promote); |
323 | 0 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote); |
324 | 0 | setOperationAction(ISD::CTLZ, MVT::i64, Legal); |
325 | | |
326 | | // On z15 we have native support for a 64-bit CTPOP. |
327 | 0 | if (Subtarget.hasMiscellaneousExtensions3()) { |
328 | 0 | setOperationAction(ISD::CTPOP, MVT::i32, Promote); |
329 | 0 | setOperationAction(ISD::CTPOP, MVT::i64, Legal); |
330 | 0 | } |
331 | | |
332 | | // Give LowerOperation the chance to replace 64-bit ORs with subregs. |
333 | 0 | setOperationAction(ISD::OR, MVT::i64, Custom); |
334 | | |
335 | | // Expand 128 bit shifts without using a libcall. |
336 | 0 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); |
337 | 0 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); |
338 | 0 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); |
339 | 0 | setLibcallName(RTLIB::SRL_I128, nullptr); |
340 | 0 | setLibcallName(RTLIB::SHL_I128, nullptr); |
341 | 0 | setLibcallName(RTLIB::SRA_I128, nullptr); |
342 | | |
343 | | // Also expand 256 bit shifts if i128 is a legal type. |
344 | 0 | if (isTypeLegal(MVT::i128)) { |
345 | 0 | setOperationAction(ISD::SRL_PARTS, MVT::i128, Expand); |
346 | 0 | setOperationAction(ISD::SHL_PARTS, MVT::i128, Expand); |
347 | 0 | setOperationAction(ISD::SRA_PARTS, MVT::i128, Expand); |
348 | 0 | } |
349 | | |
350 | | // Handle bitcast from fp128 to i128. |
351 | 0 | if (!isTypeLegal(MVT::i128)) |
352 | 0 | setOperationAction(ISD::BITCAST, MVT::i128, Custom); |
353 | | |
354 | | // We have native instructions for i8, i16 and i32 extensions, but not i1. |
355 | 0 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
356 | 0 | for (MVT VT : MVT::integer_valuetypes()) { |
357 | 0 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
358 | 0 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); |
359 | 0 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); |
360 | 0 | } |
361 | | |
362 | | // Handle the various types of symbolic address. |
363 | 0 | setOperationAction(ISD::ConstantPool, PtrVT, Custom); |
364 | 0 | setOperationAction(ISD::GlobalAddress, PtrVT, Custom); |
365 | 0 | setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); |
366 | 0 | setOperationAction(ISD::BlockAddress, PtrVT, Custom); |
367 | 0 | setOperationAction(ISD::JumpTable, PtrVT, Custom); |
368 | | |
369 | | // We need to handle dynamic allocations specially because of the |
370 | | // 160-byte area at the bottom of the stack. |
371 | 0 | setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); |
372 | 0 | setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); |
373 | |
|
374 | 0 | setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); |
375 | 0 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); |
376 | | |
377 | | // Handle prefetches with PFD or PFDRL. |
378 | 0 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); |
379 | |
|
380 | 0 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
381 | | // Assume by default that all vector operations need to be expanded. |
382 | 0 | for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode) |
383 | 0 | if (getOperationAction(Opcode, VT) == Legal) |
384 | 0 | setOperationAction(Opcode, VT, Expand); |
385 | | |
386 | | // Likewise all truncating stores and extending loads. |
387 | 0 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
388 | 0 | setTruncStoreAction(VT, InnerVT, Expand); |
389 | 0 | setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); |
390 | 0 | setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); |
391 | 0 | setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); |
392 | 0 | } |
393 | |
|
394 | 0 | if (isTypeLegal(VT)) { |
395 | | // These operations are legal for anything that can be stored in a |
396 | | // vector register, even if there is no native support for the format |
397 | | // as such. In particular, we can do these for v4f32 even though there |
398 | | // are no specific instructions for that format. |
399 | 0 | setOperationAction(ISD::LOAD, VT, Legal); |
400 | 0 | setOperationAction(ISD::STORE, VT, Legal); |
401 | 0 | setOperationAction(ISD::VSELECT, VT, Legal); |
402 | 0 | setOperationAction(ISD::BITCAST, VT, Legal); |
403 | 0 | setOperationAction(ISD::UNDEF, VT, Legal); |
404 | | |
405 | | // Likewise, except that we need to replace the nodes with something |
406 | | // more specific. |
407 | 0 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
408 | 0 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
409 | 0 | } |
410 | 0 | } |
411 | | |
412 | | // Handle integer vector types. |
413 | 0 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { |
414 | 0 | if (isTypeLegal(VT)) { |
415 | | // These operations have direct equivalents. |
416 | 0 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); |
417 | 0 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); |
418 | 0 | setOperationAction(ISD::ADD, VT, Legal); |
419 | 0 | setOperationAction(ISD::SUB, VT, Legal); |
420 | 0 | if (VT != MVT::v2i64) |
421 | 0 | setOperationAction(ISD::MUL, VT, Legal); |
422 | 0 | setOperationAction(ISD::ABS, VT, Legal); |
423 | 0 | setOperationAction(ISD::AND, VT, Legal); |
424 | 0 | setOperationAction(ISD::OR, VT, Legal); |
425 | 0 | setOperationAction(ISD::XOR, VT, Legal); |
426 | 0 | if (Subtarget.hasVectorEnhancements1()) |
427 | 0 | setOperationAction(ISD::CTPOP, VT, Legal); |
428 | 0 | else |
429 | 0 | setOperationAction(ISD::CTPOP, VT, Custom); |
430 | 0 | setOperationAction(ISD::CTTZ, VT, Legal); |
431 | 0 | setOperationAction(ISD::CTLZ, VT, Legal); |
432 | | |
433 | | // Convert a GPR scalar to a vector by inserting it into element 0. |
434 | 0 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); |
435 | | |
436 | | // Use a series of unpacks for extensions. |
437 | 0 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); |
438 | 0 | setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); |
439 | | |
440 | | // Detect shifts/rotates by a scalar amount and convert them into |
441 | | // V*_BY_SCALAR. |
442 | 0 | setOperationAction(ISD::SHL, VT, Custom); |
443 | 0 | setOperationAction(ISD::SRA, VT, Custom); |
444 | 0 | setOperationAction(ISD::SRL, VT, Custom); |
445 | 0 | setOperationAction(ISD::ROTL, VT, Custom); |
446 | | |
447 | | // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands |
448 | | // and inverting the result as necessary. |
449 | 0 | setOperationAction(ISD::SETCC, VT, Custom); |
450 | 0 | } |
451 | 0 | } |
452 | |
|
453 | 0 | if (Subtarget.hasVector()) { |
454 | | // There should be no need to check for float types other than v2f64 |
455 | | // since <2 x f32> isn't a legal type. |
456 | 0 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); |
457 | 0 | setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); |
458 | 0 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); |
459 | 0 | setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); |
460 | 0 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); |
461 | 0 | setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); |
462 | 0 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); |
463 | 0 | setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); |
464 | |
|
465 | 0 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); |
466 | 0 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal); |
467 | 0 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); |
468 | 0 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal); |
469 | 0 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); |
470 | 0 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f64, Legal); |
471 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); |
472 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f64, Legal); |
473 | 0 | } |
474 | |
|
475 | 0 | if (Subtarget.hasVectorEnhancements2()) { |
476 | 0 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); |
477 | 0 | setOperationAction(ISD::FP_TO_SINT, MVT::v4f32, Legal); |
478 | 0 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); |
479 | 0 | setOperationAction(ISD::FP_TO_UINT, MVT::v4f32, Legal); |
480 | 0 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); |
481 | 0 | setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal); |
482 | 0 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); |
483 | 0 | setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal); |
484 | |
|
485 | 0 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); |
486 | 0 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal); |
487 | 0 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); |
488 | 0 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal); |
489 | 0 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); |
490 | 0 | setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f32, Legal); |
491 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); |
492 | 0 | setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f32, Legal); |
493 | 0 | } |
494 | | |
495 | | // Handle floating-point types. |
496 | 0 | for (unsigned I = MVT::FIRST_FP_VALUETYPE; |
497 | 0 | I <= MVT::LAST_FP_VALUETYPE; |
498 | 0 | ++I) { |
499 | 0 | MVT VT = MVT::SimpleValueType(I); |
500 | 0 | if (isTypeLegal(VT)) { |
501 | | // We can use FI for FRINT. |
502 | 0 | setOperationAction(ISD::FRINT, VT, Legal); |
503 | | |
504 | | // We can use the extended form of FI for other rounding operations. |
505 | 0 | if (Subtarget.hasFPExtension()) { |
506 | 0 | setOperationAction(ISD::FNEARBYINT, VT, Legal); |
507 | 0 | setOperationAction(ISD::FFLOOR, VT, Legal); |
508 | 0 | setOperationAction(ISD::FCEIL, VT, Legal); |
509 | 0 | setOperationAction(ISD::FTRUNC, VT, Legal); |
510 | 0 | setOperationAction(ISD::FROUND, VT, Legal); |
511 | 0 | } |
512 | | |
513 | | // No special instructions for these. |
514 | 0 | setOperationAction(ISD::FSIN, VT, Expand); |
515 | 0 | setOperationAction(ISD::FCOS, VT, Expand); |
516 | 0 | setOperationAction(ISD::FSINCOS, VT, Expand); |
517 | 0 | setOperationAction(ISD::FREM, VT, Expand); |
518 | 0 | setOperationAction(ISD::FPOW, VT, Expand); |
519 | | |
520 | | // Special treatment. |
521 | 0 | setOperationAction(ISD::IS_FPCLASS, VT, Custom); |
522 | | |
523 | | // Handle constrained floating-point operations. |
524 | 0 | setOperationAction(ISD::STRICT_FADD, VT, Legal); |
525 | 0 | setOperationAction(ISD::STRICT_FSUB, VT, Legal); |
526 | 0 | setOperationAction(ISD::STRICT_FMUL, VT, Legal); |
527 | 0 | setOperationAction(ISD::STRICT_FDIV, VT, Legal); |
528 | 0 | setOperationAction(ISD::STRICT_FMA, VT, Legal); |
529 | 0 | setOperationAction(ISD::STRICT_FSQRT, VT, Legal); |
530 | 0 | setOperationAction(ISD::STRICT_FRINT, VT, Legal); |
531 | 0 | setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal); |
532 | 0 | setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal); |
533 | 0 | if (Subtarget.hasFPExtension()) { |
534 | 0 | setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); |
535 | 0 | setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); |
536 | 0 | setOperationAction(ISD::STRICT_FCEIL, VT, Legal); |
537 | 0 | setOperationAction(ISD::STRICT_FROUND, VT, Legal); |
538 | 0 | setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); |
539 | 0 | } |
540 | 0 | } |
541 | 0 | } |
542 | | |
543 | | // Handle floating-point vector types. |
544 | 0 | if (Subtarget.hasVector()) { |
545 | | // Scalar-to-vector conversion is just a subreg. |
546 | 0 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); |
547 | 0 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); |
548 | | |
549 | | // Some insertions and extractions can be done directly but others |
550 | | // need to go via integers. |
551 | 0 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); |
552 | 0 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); |
553 | 0 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); |
554 | 0 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); |
555 | | |
556 | | // These operations have direct equivalents. |
557 | 0 | setOperationAction(ISD::FADD, MVT::v2f64, Legal); |
558 | 0 | setOperationAction(ISD::FNEG, MVT::v2f64, Legal); |
559 | 0 | setOperationAction(ISD::FSUB, MVT::v2f64, Legal); |
560 | 0 | setOperationAction(ISD::FMUL, MVT::v2f64, Legal); |
561 | 0 | setOperationAction(ISD::FMA, MVT::v2f64, Legal); |
562 | 0 | setOperationAction(ISD::FDIV, MVT::v2f64, Legal); |
563 | 0 | setOperationAction(ISD::FABS, MVT::v2f64, Legal); |
564 | 0 | setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); |
565 | 0 | setOperationAction(ISD::FRINT, MVT::v2f64, Legal); |
566 | 0 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); |
567 | 0 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); |
568 | 0 | setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); |
569 | 0 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); |
570 | 0 | setOperationAction(ISD::FROUND, MVT::v2f64, Legal); |
571 | | |
572 | | // Handle constrained floating-point operations. |
573 | 0 | setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); |
574 | 0 | setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); |
575 | 0 | setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); |
576 | 0 | setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); |
577 | 0 | setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); |
578 | 0 | setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); |
579 | 0 | setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); |
580 | 0 | setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal); |
581 | 0 | setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); |
582 | 0 | setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); |
583 | 0 | setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); |
584 | 0 | setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); |
585 | |
|
586 | 0 | setOperationAction(ISD::SETCC, MVT::v2f64, Custom); |
587 | 0 | setOperationAction(ISD::SETCC, MVT::v4f32, Custom); |
588 | 0 | setOperationAction(ISD::STRICT_FSETCC, MVT::v2f64, Custom); |
589 | 0 | setOperationAction(ISD::STRICT_FSETCC, MVT::v4f32, Custom); |
590 | 0 | if (Subtarget.hasVectorEnhancements1()) { |
591 | 0 | setOperationAction(ISD::STRICT_FSETCCS, MVT::v2f64, Custom); |
592 | 0 | setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f32, Custom); |
593 | 0 | } |
594 | 0 | } |
595 | | |
596 | | // The vector enhancements facility 1 has instructions for these. |
597 | 0 | if (Subtarget.hasVectorEnhancements1()) { |
598 | 0 | setOperationAction(ISD::FADD, MVT::v4f32, Legal); |
599 | 0 | setOperationAction(ISD::FNEG, MVT::v4f32, Legal); |
600 | 0 | setOperationAction(ISD::FSUB, MVT::v4f32, Legal); |
601 | 0 | setOperationAction(ISD::FMUL, MVT::v4f32, Legal); |
602 | 0 | setOperationAction(ISD::FMA, MVT::v4f32, Legal); |
603 | 0 | setOperationAction(ISD::FDIV, MVT::v4f32, Legal); |
604 | 0 | setOperationAction(ISD::FABS, MVT::v4f32, Legal); |
605 | 0 | setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); |
606 | 0 | setOperationAction(ISD::FRINT, MVT::v4f32, Legal); |
607 | 0 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); |
608 | 0 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); |
609 | 0 | setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); |
610 | 0 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); |
611 | 0 | setOperationAction(ISD::FROUND, MVT::v4f32, Legal); |
612 | |
|
613 | 0 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
614 | 0 | setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal); |
615 | 0 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
616 | 0 | setOperationAction(ISD::FMINIMUM, MVT::f64, Legal); |
617 | |
|
618 | 0 | setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); |
619 | 0 | setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal); |
620 | 0 | setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); |
621 | 0 | setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal); |
622 | |
|
623 | 0 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); |
624 | 0 | setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); |
625 | 0 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); |
626 | 0 | setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); |
627 | |
|
628 | 0 | setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); |
629 | 0 | setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); |
630 | 0 | setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); |
631 | 0 | setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); |
632 | |
|
633 | 0 | setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); |
634 | 0 | setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal); |
635 | 0 | setOperationAction(ISD::FMINNUM, MVT::f128, Legal); |
636 | 0 | setOperationAction(ISD::FMINIMUM, MVT::f128, Legal); |
637 | | |
638 | | // Handle constrained floating-point operations. |
639 | 0 | setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); |
640 | 0 | setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); |
641 | 0 | setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); |
642 | 0 | setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); |
643 | 0 | setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); |
644 | 0 | setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); |
645 | 0 | setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); |
646 | 0 | setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal); |
647 | 0 | setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); |
648 | 0 | setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); |
649 | 0 | setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); |
650 | 0 | setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); |
651 | 0 | for (auto VT : { MVT::f32, MVT::f64, MVT::f128, |
652 | 0 | MVT::v4f32, MVT::v2f64 }) { |
653 | 0 | setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal); |
654 | 0 | setOperationAction(ISD::STRICT_FMINNUM, VT, Legal); |
655 | 0 | setOperationAction(ISD::STRICT_FMAXIMUM, VT, Legal); |
656 | 0 | setOperationAction(ISD::STRICT_FMINIMUM, VT, Legal); |
657 | 0 | } |
658 | 0 | } |
659 | | |
660 | | // We only have fused f128 multiply-addition on vector registers. |
661 | 0 | if (!Subtarget.hasVectorEnhancements1()) { |
662 | 0 | setOperationAction(ISD::FMA, MVT::f128, Expand); |
663 | 0 | setOperationAction(ISD::STRICT_FMA, MVT::f128, Expand); |
664 | 0 | } |
665 | | |
666 | | // We don't have a copysign instruction on vector registers. |
667 | 0 | if (Subtarget.hasVectorEnhancements1()) |
668 | 0 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); |
669 | | |
670 | | // Needed so that we don't try to implement f128 constant loads using |
671 | | // a load-and-extend of a f80 constant (in cases where the constant |
672 | | // would fit in an f80). |
673 | 0 | for (MVT VT : MVT::fp_valuetypes()) |
674 | 0 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); |
675 | | |
676 | | // We don't have extending load instruction on vector registers. |
677 | 0 | if (Subtarget.hasVectorEnhancements1()) { |
678 | 0 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); |
679 | 0 | setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); |
680 | 0 | } |
681 | | |
682 | | // Floating-point truncation and stores need to be done separately. |
683 | 0 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
684 | 0 | setTruncStoreAction(MVT::f128, MVT::f32, Expand); |
685 | 0 | setTruncStoreAction(MVT::f128, MVT::f64, Expand); |
686 | | |
687 | | // We have 64-bit FPR<->GPR moves, but need special handling for |
688 | | // 32-bit forms. |
689 | 0 | if (!Subtarget.hasVector()) { |
690 | 0 | setOperationAction(ISD::BITCAST, MVT::i32, Custom); |
691 | 0 | setOperationAction(ISD::BITCAST, MVT::f32, Custom); |
692 | 0 | } |
693 | | |
694 | | // VASTART and VACOPY need to deal with the SystemZ-specific varargs |
695 | | // structure, but VAEND is a no-op. |
696 | 0 | setOperationAction(ISD::VASTART, MVT::Other, Custom); |
697 | 0 | setOperationAction(ISD::VACOPY, MVT::Other, Custom); |
698 | 0 | setOperationAction(ISD::VAEND, MVT::Other, Expand); |
699 | |
|
700 | 0 | setOperationAction(ISD::GET_ROUNDING, MVT::i32, Custom); |
701 | | |
702 | | // Codes for which we want to perform some z-specific combinations. |
703 | 0 | setTargetDAGCombine({ISD::ZERO_EXTEND, |
704 | 0 | ISD::SIGN_EXTEND, |
705 | 0 | ISD::SIGN_EXTEND_INREG, |
706 | 0 | ISD::LOAD, |
707 | 0 | ISD::STORE, |
708 | 0 | ISD::VECTOR_SHUFFLE, |
709 | 0 | ISD::EXTRACT_VECTOR_ELT, |
710 | 0 | ISD::FP_ROUND, |
711 | 0 | ISD::STRICT_FP_ROUND, |
712 | 0 | ISD::FP_EXTEND, |
713 | 0 | ISD::SINT_TO_FP, |
714 | 0 | ISD::UINT_TO_FP, |
715 | 0 | ISD::STRICT_FP_EXTEND, |
716 | 0 | ISD::BSWAP, |
717 | 0 | ISD::SDIV, |
718 | 0 | ISD::UDIV, |
719 | 0 | ISD::SREM, |
720 | 0 | ISD::UREM, |
721 | 0 | ISD::INTRINSIC_VOID, |
722 | 0 | ISD::INTRINSIC_W_CHAIN}); |
723 | | |
724 | | // Handle intrinsics. |
725 | 0 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); |
726 | 0 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
727 | | |
728 | | // We want to use MVC in preference to even a single load/store pair. |
729 | 0 | MaxStoresPerMemcpy = Subtarget.hasVector() ? 2 : 0; |
730 | 0 | MaxStoresPerMemcpyOptSize = 0; |
731 | | |
732 | | // The main memset sequence is a byte store followed by an MVC. |
733 | | // Two STC or MV..I stores win over that, but the kind of fused stores |
734 | | // generated by target-independent code don't when the byte value is |
735 | | // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better |
736 | | // than "STC;MVC". Handle the choice in target-specific code instead. |
737 | 0 | MaxStoresPerMemset = Subtarget.hasVector() ? 2 : 0; |
738 | 0 | MaxStoresPerMemsetOptSize = 0; |
739 | | |
740 | | // Default to having -disable-strictnode-mutation on |
741 | 0 | IsStrictFPEnabled = true; |
742 | |
|
743 | 0 | if (Subtarget.isTargetzOS()) { |
744 | 0 | struct RTLibCallMapping { |
745 | 0 | RTLIB::Libcall Code; |
746 | 0 | const char *Name; |
747 | 0 | }; |
748 | 0 | static RTLibCallMapping RTLibCallCommon[] = { |
749 | 0 | #define HANDLE_LIBCALL(code, name) {RTLIB::code, name}, |
750 | 0 | #include "ZOSLibcallNames.def" |
751 | 0 | }; |
752 | 0 | for (auto &E : RTLibCallCommon) |
753 | 0 | setLibcallName(E.Code, E.Name); |
754 | 0 | } |
755 | 0 | } |
756 | | |
757 | 0 | bool SystemZTargetLowering::useSoftFloat() const { |
758 | 0 | return Subtarget.hasSoftFloat(); |
759 | 0 | } |
760 | | |
761 | | EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, |
762 | 0 | LLVMContext &, EVT VT) const { |
763 | 0 | if (!VT.isVector()) |
764 | 0 | return MVT::i32; |
765 | 0 | return VT.changeVectorElementTypeToInteger(); |
766 | 0 | } |
767 | | |
768 | | bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd( |
769 | 0 | const MachineFunction &MF, EVT VT) const { |
770 | 0 | VT = VT.getScalarType(); |
771 | |
|
772 | 0 | if (!VT.isSimple()) |
773 | 0 | return false; |
774 | | |
775 | 0 | switch (VT.getSimpleVT().SimpleTy) { |
776 | 0 | case MVT::f32: |
777 | 0 | case MVT::f64: |
778 | 0 | return true; |
779 | 0 | case MVT::f128: |
780 | 0 | return Subtarget.hasVectorEnhancements1(); |
781 | 0 | default: |
782 | 0 | break; |
783 | 0 | } |
784 | | |
785 | 0 | return false; |
786 | 0 | } |
787 | | |
788 | | // Return true if the constant can be generated with a vector instruction, |
789 | | // such as VGM, VGMB or VREPI. |
790 | | bool SystemZVectorConstantInfo::isVectorConstantLegal( |
791 | 0 | const SystemZSubtarget &Subtarget) { |
792 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
793 | 0 | if (!Subtarget.hasVector() || |
794 | 0 | (isFP128 && !Subtarget.hasVectorEnhancements1())) |
795 | 0 | return false; |
796 | | |
797 | | // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- |
798 | | // preferred way of creating all-zero and all-one vectors so give it |
799 | | // priority over other methods below. |
800 | 0 | unsigned Mask = 0; |
801 | 0 | unsigned I = 0; |
802 | 0 | for (; I < SystemZ::VectorBytes; ++I) { |
803 | 0 | uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue(); |
804 | 0 | if (Byte == 0xff) |
805 | 0 | Mask |= 1ULL << I; |
806 | 0 | else if (Byte != 0) |
807 | 0 | break; |
808 | 0 | } |
809 | 0 | if (I == SystemZ::VectorBytes) { |
810 | 0 | Opcode = SystemZISD::BYTE_MASK; |
811 | 0 | OpVals.push_back(Mask); |
812 | 0 | VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16); |
813 | 0 | return true; |
814 | 0 | } |
815 | | |
816 | 0 | if (SplatBitSize > 64) |
817 | 0 | return false; |
818 | | |
819 | 0 | auto tryValue = [&](uint64_t Value) -> bool { |
820 | | // Try VECTOR REPLICATE IMMEDIATE |
821 | 0 | int64_t SignedValue = SignExtend64(Value, SplatBitSize); |
822 | 0 | if (isInt<16>(SignedValue)) { |
823 | 0 | OpVals.push_back(((unsigned) SignedValue)); |
824 | 0 | Opcode = SystemZISD::REPLICATE; |
825 | 0 | VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize), |
826 | 0 | SystemZ::VectorBits / SplatBitSize); |
827 | 0 | return true; |
828 | 0 | } |
829 | | // Try VECTOR GENERATE MASK |
830 | 0 | unsigned Start, End; |
831 | 0 | if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) { |
832 | | // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0 |
833 | | // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for |
834 | | // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1). |
835 | 0 | OpVals.push_back(Start - (64 - SplatBitSize)); |
836 | 0 | OpVals.push_back(End - (64 - SplatBitSize)); |
837 | 0 | Opcode = SystemZISD::ROTATE_MASK; |
838 | 0 | VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize), |
839 | 0 | SystemZ::VectorBits / SplatBitSize); |
840 | 0 | return true; |
841 | 0 | } |
842 | 0 | return false; |
843 | 0 | }; |
844 | | |
845 | | // First try assuming that any undefined bits above the highest set bit |
846 | | // and below the lowest set bit are 1s. This increases the likelihood of |
847 | | // being able to use a sign-extended element value in VECTOR REPLICATE |
848 | | // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK. |
849 | 0 | uint64_t SplatBitsZ = SplatBits.getZExtValue(); |
850 | 0 | uint64_t SplatUndefZ = SplatUndef.getZExtValue(); |
851 | 0 | unsigned LowerBits = llvm::countr_zero(SplatBitsZ); |
852 | 0 | unsigned UpperBits = llvm::countl_zero(SplatBitsZ); |
853 | 0 | uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits); |
854 | 0 | uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits); |
855 | 0 | if (tryValue(SplatBitsZ | Upper | Lower)) |
856 | 0 | return true; |
857 | | |
858 | | // Now try assuming that any undefined bits between the first and |
859 | | // last defined set bits are set. This increases the chances of |
860 | | // using a non-wraparound mask. |
861 | 0 | uint64_t Middle = SplatUndefZ & ~Upper & ~Lower; |
862 | 0 | return tryValue(SplatBitsZ | Middle); |
863 | 0 | } |
864 | | |
865 | 0 | SystemZVectorConstantInfo::SystemZVectorConstantInfo(APInt IntImm) { |
866 | 0 | if (IntImm.isSingleWord()) { |
867 | 0 | IntBits = APInt(128, IntImm.getZExtValue()); |
868 | 0 | IntBits <<= (SystemZ::VectorBits - IntImm.getBitWidth()); |
869 | 0 | } else |
870 | 0 | IntBits = IntImm; |
871 | 0 | assert(IntBits.getBitWidth() == 128 && "Unsupported APInt."); |
872 | | |
873 | | // Find the smallest splat. |
874 | 0 | SplatBits = IntImm; |
875 | 0 | unsigned Width = SplatBits.getBitWidth(); |
876 | 0 | while (Width > 8) { |
877 | 0 | unsigned HalfSize = Width / 2; |
878 | 0 | APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize); |
879 | 0 | APInt LowValue = SplatBits.trunc(HalfSize); |
880 | | |
881 | | // If the two halves do not match, stop here. |
882 | 0 | if (HighValue != LowValue || 8 > HalfSize) |
883 | 0 | break; |
884 | | |
885 | 0 | SplatBits = HighValue; |
886 | 0 | Width = HalfSize; |
887 | 0 | } |
888 | 0 | SplatUndef = 0; |
889 | 0 | SplatBitSize = Width; |
890 | 0 | } |
891 | | |
892 | 0 | SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) { |
893 | 0 | assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR"); |
894 | 0 | bool HasAnyUndefs; |
895 | | |
896 | | // Get IntBits by finding the 128 bit splat. |
897 | 0 | BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128, |
898 | 0 | true); |
899 | | |
900 | | // Get SplatBits by finding the 8 bit or greater splat. |
901 | 0 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8, |
902 | 0 | true); |
903 | 0 | } |
904 | | |
905 | | bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
906 | 0 | bool ForCodeSize) const { |
907 | | // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. |
908 | 0 | if (Imm.isZero() || Imm.isNegZero()) |
909 | 0 | return true; |
910 | | |
911 | 0 | return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget); |
912 | 0 | } |
913 | | |
914 | | /// Returns true if stack probing through inline assembly is requested. |
915 | 0 | bool SystemZTargetLowering::hasInlineStackProbe(const MachineFunction &MF) const { |
916 | | // If the function specifically requests inline stack probes, emit them. |
917 | 0 | if (MF.getFunction().hasFnAttribute("probe-stack")) |
918 | 0 | return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() == |
919 | 0 | "inline-asm"; |
920 | 0 | return false; |
921 | 0 | } |
922 | | |
923 | | TargetLowering::AtomicExpansionKind |
924 | 0 | SystemZTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { |
925 | | // Don't expand subword operations as they require special treatment. |
926 | 0 | if (RMW->getType()->isIntegerTy(8) || RMW->getType()->isIntegerTy(16)) |
927 | 0 | return AtomicExpansionKind::None; |
928 | | |
929 | | // Don't expand if there is a target instruction available. |
930 | 0 | if (Subtarget.hasInterlockedAccess1() && |
931 | 0 | (RMW->getType()->isIntegerTy(32) || RMW->getType()->isIntegerTy(64)) && |
932 | 0 | (RMW->getOperation() == AtomicRMWInst::BinOp::Add || |
933 | 0 | RMW->getOperation() == AtomicRMWInst::BinOp::Sub || |
934 | 0 | RMW->getOperation() == AtomicRMWInst::BinOp::And || |
935 | 0 | RMW->getOperation() == AtomicRMWInst::BinOp::Or || |
936 | 0 | RMW->getOperation() == AtomicRMWInst::BinOp::Xor)) |
937 | 0 | return AtomicExpansionKind::None; |
938 | | |
939 | 0 | return AtomicExpansionKind::CmpXChg; |
940 | 0 | } |
941 | | |
942 | 0 | bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
943 | | // We can use CGFI or CLGFI. |
944 | 0 | return isInt<32>(Imm) || isUInt<32>(Imm); |
945 | 0 | } |
946 | | |
947 | 0 | bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
948 | | // We can use ALGFI or SLGFI. |
949 | 0 | return isUInt<32>(Imm) || isUInt<32>(-Imm); |
950 | 0 | } |
951 | | |
952 | | bool SystemZTargetLowering::allowsMisalignedMemoryAccesses( |
953 | 0 | EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *Fast) const { |
954 | | // Unaligned accesses should never be slower than the expanded version. |
955 | | // We check specifically for aligned accesses in the few cases where |
956 | | // they are required. |
957 | 0 | if (Fast) |
958 | 0 | *Fast = 1; |
959 | 0 | return true; |
960 | 0 | } |
961 | | |
962 | | // Information about the addressing mode for a memory access. |
963 | | struct AddressingMode { |
964 | | // True if a long displacement is supported. |
965 | | bool LongDisplacement; |
966 | | |
967 | | // True if use of index register is supported. |
968 | | bool IndexReg; |
969 | | |
970 | | AddressingMode(bool LongDispl, bool IdxReg) : |
971 | 0 | LongDisplacement(LongDispl), IndexReg(IdxReg) {} |
972 | | }; |
973 | | |
974 | | // Return the desired addressing mode for a Load which has only one use (in |
975 | | // the same block) which is a Store. |
976 | | static AddressingMode getLoadStoreAddrMode(bool HasVector, |
977 | 0 | Type *Ty) { |
978 | | // With vector support a Load->Store combination may be combined to either |
979 | | // an MVC or vector operations and it seems to work best to allow the |
980 | | // vector addressing mode. |
981 | 0 | if (HasVector) |
982 | 0 | return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); |
983 | | |
984 | | // Otherwise only the MVC case is special. |
985 | 0 | bool MVC = Ty->isIntegerTy(8); |
986 | 0 | return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/); |
987 | 0 | } |
988 | | |
989 | | // Return the addressing mode which seems most desirable given an LLVM |
990 | | // Instruction pointer. |
991 | | static AddressingMode |
992 | 0 | supportedAddressingMode(Instruction *I, bool HasVector) { |
993 | 0 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { |
994 | 0 | switch (II->getIntrinsicID()) { |
995 | 0 | default: break; |
996 | 0 | case Intrinsic::memset: |
997 | 0 | case Intrinsic::memmove: |
998 | 0 | case Intrinsic::memcpy: |
999 | 0 | return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); |
1000 | 0 | } |
1001 | 0 | } |
1002 | | |
1003 | 0 | if (isa<LoadInst>(I) && I->hasOneUse()) { |
1004 | 0 | auto *SingleUser = cast<Instruction>(*I->user_begin()); |
1005 | 0 | if (SingleUser->getParent() == I->getParent()) { |
1006 | 0 | if (isa<ICmpInst>(SingleUser)) { |
1007 | 0 | if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) |
1008 | 0 | if (C->getBitWidth() <= 64 && |
1009 | 0 | (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue()))) |
1010 | | // Comparison of memory with 16 bit signed / unsigned immediate |
1011 | 0 | return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); |
1012 | 0 | } else if (isa<StoreInst>(SingleUser)) |
1013 | | // Load->Store |
1014 | 0 | return getLoadStoreAddrMode(HasVector, I->getType()); |
1015 | 0 | } |
1016 | 0 | } else if (auto *StoreI = dyn_cast<StoreInst>(I)) { |
1017 | 0 | if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) |
1018 | 0 | if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) |
1019 | | // Load->Store |
1020 | 0 | return getLoadStoreAddrMode(HasVector, LoadI->getType()); |
1021 | 0 | } |
1022 | | |
1023 | 0 | if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) { |
1024 | | |
1025 | | // * Use LDE instead of LE/LEY for z13 to avoid partial register |
1026 | | // dependencies (LDE only supports small offsets). |
1027 | | // * Utilize the vector registers to hold floating point |
1028 | | // values (vector load / store instructions only support small |
1029 | | // offsets). |
1030 | |
|
1031 | 0 | Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : |
1032 | 0 | I->getOperand(0)->getType()); |
1033 | 0 | bool IsFPAccess = MemAccessTy->isFloatingPointTy(); |
1034 | 0 | bool IsVectorAccess = MemAccessTy->isVectorTy(); |
1035 | | |
1036 | | // A store of an extracted vector element will be combined into a VSTE type |
1037 | | // instruction. |
1038 | 0 | if (!IsVectorAccess && isa<StoreInst>(I)) { |
1039 | 0 | Value *DataOp = I->getOperand(0); |
1040 | 0 | if (isa<ExtractElementInst>(DataOp)) |
1041 | 0 | IsVectorAccess = true; |
1042 | 0 | } |
1043 | | |
1044 | | // A load which gets inserted into a vector element will be combined into a |
1045 | | // VLE type instruction. |
1046 | 0 | if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) { |
1047 | 0 | User *LoadUser = *I->user_begin(); |
1048 | 0 | if (isa<InsertElementInst>(LoadUser)) |
1049 | 0 | IsVectorAccess = true; |
1050 | 0 | } |
1051 | |
|
1052 | 0 | if (IsFPAccess || IsVectorAccess) |
1053 | 0 | return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); |
1054 | 0 | } |
1055 | | |
1056 | 0 | return AddressingMode(true/*LongDispl*/, true/*IdxReg*/); |
1057 | 0 | } |
1058 | | |
1059 | | bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
1060 | 0 | const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { |
1061 | | // Punt on globals for now, although they can be used in limited |
1062 | | // RELATIVE LONG cases. |
1063 | 0 | if (AM.BaseGV) |
1064 | 0 | return false; |
1065 | | |
1066 | | // Require a 20-bit signed offset. |
1067 | 0 | if (!isInt<20>(AM.BaseOffs)) |
1068 | 0 | return false; |
1069 | | |
1070 | 0 | bool RequireD12 = Subtarget.hasVector() && Ty->isVectorTy(); |
1071 | 0 | AddressingMode SupportedAM(!RequireD12, true); |
1072 | 0 | if (I != nullptr) |
1073 | 0 | SupportedAM = supportedAddressingMode(I, Subtarget.hasVector()); |
1074 | |
|
1075 | 0 | if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs)) |
1076 | 0 | return false; |
1077 | | |
1078 | 0 | if (!SupportedAM.IndexReg) |
1079 | | // No indexing allowed. |
1080 | 0 | return AM.Scale == 0; |
1081 | 0 | else |
1082 | | // Indexing is OK but no scale factor can be applied. |
1083 | 0 | return AM.Scale == 0 || AM.Scale == 1; |
1084 | 0 | } |
1085 | | |
1086 | | bool SystemZTargetLowering::findOptimalMemOpLowering( |
1087 | | std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, |
1088 | 0 | unsigned SrcAS, const AttributeList &FuncAttributes) const { |
1089 | 0 | const int MVCFastLen = 16; |
1090 | |
|
1091 | 0 | if (Limit != ~unsigned(0)) { |
1092 | | // Don't expand Op into scalar loads/stores in these cases: |
1093 | 0 | if (Op.isMemcpy() && Op.allowOverlap() && Op.size() <= MVCFastLen) |
1094 | 0 | return false; // Small memcpy: Use MVC |
1095 | 0 | if (Op.isMemset() && Op.size() - 1 <= MVCFastLen) |
1096 | 0 | return false; // Small memset (first byte with STC/MVI): Use MVC |
1097 | 0 | if (Op.isZeroMemset()) |
1098 | 0 | return false; // Memset zero: Use XC |
1099 | 0 | } |
1100 | | |
1101 | 0 | return TargetLowering::findOptimalMemOpLowering(MemOps, Limit, Op, DstAS, |
1102 | 0 | SrcAS, FuncAttributes); |
1103 | 0 | } |
1104 | | |
1105 | | EVT SystemZTargetLowering::getOptimalMemOpType(const MemOp &Op, |
1106 | 0 | const AttributeList &FuncAttributes) const { |
1107 | 0 | return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other; |
1108 | 0 | } |
1109 | | |
1110 | 0 | bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { |
1111 | 0 | if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) |
1112 | 0 | return false; |
1113 | 0 | unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue(); |
1114 | 0 | unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedValue(); |
1115 | 0 | return FromBits > ToBits; |
1116 | 0 | } |
1117 | | |
1118 | 0 | bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { |
1119 | 0 | if (!FromVT.isInteger() || !ToVT.isInteger()) |
1120 | 0 | return false; |
1121 | 0 | unsigned FromBits = FromVT.getFixedSizeInBits(); |
1122 | 0 | unsigned ToBits = ToVT.getFixedSizeInBits(); |
1123 | 0 | return FromBits > ToBits; |
1124 | 0 | } |
1125 | | |
1126 | | //===----------------------------------------------------------------------===// |
1127 | | // Inline asm support |
1128 | | //===----------------------------------------------------------------------===// |
1129 | | |
1130 | | TargetLowering::ConstraintType |
1131 | 0 | SystemZTargetLowering::getConstraintType(StringRef Constraint) const { |
1132 | 0 | if (Constraint.size() == 1) { |
1133 | 0 | switch (Constraint[0]) { |
1134 | 0 | case 'a': // Address register |
1135 | 0 | case 'd': // Data register (equivalent to 'r') |
1136 | 0 | case 'f': // Floating-point register |
1137 | 0 | case 'h': // High-part register |
1138 | 0 | case 'r': // General-purpose register |
1139 | 0 | case 'v': // Vector register |
1140 | 0 | return C_RegisterClass; |
1141 | | |
1142 | 0 | case 'Q': // Memory with base and unsigned 12-bit displacement |
1143 | 0 | case 'R': // Likewise, plus an index |
1144 | 0 | case 'S': // Memory with base and signed 20-bit displacement |
1145 | 0 | case 'T': // Likewise, plus an index |
1146 | 0 | case 'm': // Equivalent to 'T'. |
1147 | 0 | return C_Memory; |
1148 | | |
1149 | 0 | case 'I': // Unsigned 8-bit constant |
1150 | 0 | case 'J': // Unsigned 12-bit constant |
1151 | 0 | case 'K': // Signed 16-bit constant |
1152 | 0 | case 'L': // Signed 20-bit displacement (on all targets we support) |
1153 | 0 | case 'M': // 0x7fffffff |
1154 | 0 | return C_Immediate; |
1155 | | |
1156 | 0 | default: |
1157 | 0 | break; |
1158 | 0 | } |
1159 | 0 | } else if (Constraint.size() == 2 && Constraint[0] == 'Z') { |
1160 | 0 | switch (Constraint[1]) { |
1161 | 0 | case 'Q': // Address with base and unsigned 12-bit displacement |
1162 | 0 | case 'R': // Likewise, plus an index |
1163 | 0 | case 'S': // Address with base and signed 20-bit displacement |
1164 | 0 | case 'T': // Likewise, plus an index |
1165 | 0 | return C_Address; |
1166 | | |
1167 | 0 | default: |
1168 | 0 | break; |
1169 | 0 | } |
1170 | 0 | } |
1171 | 0 | return TargetLowering::getConstraintType(Constraint); |
1172 | 0 | } |
1173 | | |
1174 | | TargetLowering::ConstraintWeight SystemZTargetLowering:: |
1175 | | getSingleConstraintMatchWeight(AsmOperandInfo &info, |
1176 | 0 | const char *constraint) const { |
1177 | 0 | ConstraintWeight weight = CW_Invalid; |
1178 | 0 | Value *CallOperandVal = info.CallOperandVal; |
1179 | | // If we don't have a value, we can't do a match, |
1180 | | // but allow it at the lowest weight. |
1181 | 0 | if (!CallOperandVal) |
1182 | 0 | return CW_Default; |
1183 | 0 | Type *type = CallOperandVal->getType(); |
1184 | | // Look at the constraint type. |
1185 | 0 | switch (*constraint) { |
1186 | 0 | default: |
1187 | 0 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
1188 | 0 | break; |
1189 | | |
1190 | 0 | case 'a': // Address register |
1191 | 0 | case 'd': // Data register (equivalent to 'r') |
1192 | 0 | case 'h': // High-part register |
1193 | 0 | case 'r': // General-purpose register |
1194 | 0 | weight = CallOperandVal->getType()->isIntegerTy() ? CW_Register : CW_Default; |
1195 | 0 | break; |
1196 | | |
1197 | 0 | case 'f': // Floating-point register |
1198 | 0 | if (!useSoftFloat()) |
1199 | 0 | weight = type->isFloatingPointTy() ? CW_Register : CW_Default; |
1200 | 0 | break; |
1201 | | |
1202 | 0 | case 'v': // Vector register |
1203 | 0 | if (Subtarget.hasVector()) |
1204 | 0 | weight = (type->isVectorTy() || type->isFloatingPointTy()) ? CW_Register |
1205 | 0 | : CW_Default; |
1206 | 0 | break; |
1207 | | |
1208 | 0 | case 'I': // Unsigned 8-bit constant |
1209 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
1210 | 0 | if (isUInt<8>(C->getZExtValue())) |
1211 | 0 | weight = CW_Constant; |
1212 | 0 | break; |
1213 | | |
1214 | 0 | case 'J': // Unsigned 12-bit constant |
1215 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
1216 | 0 | if (isUInt<12>(C->getZExtValue())) |
1217 | 0 | weight = CW_Constant; |
1218 | 0 | break; |
1219 | | |
1220 | 0 | case 'K': // Signed 16-bit constant |
1221 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
1222 | 0 | if (isInt<16>(C->getSExtValue())) |
1223 | 0 | weight = CW_Constant; |
1224 | 0 | break; |
1225 | | |
1226 | 0 | case 'L': // Signed 20-bit displacement (on all targets we support) |
1227 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
1228 | 0 | if (isInt<20>(C->getSExtValue())) |
1229 | 0 | weight = CW_Constant; |
1230 | 0 | break; |
1231 | | |
1232 | 0 | case 'M': // 0x7fffffff |
1233 | 0 | if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) |
1234 | 0 | if (C->getZExtValue() == 0x7fffffff) |
1235 | 0 | weight = CW_Constant; |
1236 | 0 | break; |
1237 | 0 | } |
1238 | 0 | return weight; |
1239 | 0 | } |
1240 | | |
1241 | | // Parse a "{tNNN}" register constraint for which the register type "t" |
1242 | | // has already been verified. MC is the class associated with "t" and |
1243 | | // Map maps 0-based register numbers to LLVM register numbers. |
1244 | | static std::pair<unsigned, const TargetRegisterClass *> |
1245 | | parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, |
1246 | 0 | const unsigned *Map, unsigned Size) { |
1247 | 0 | assert(*(Constraint.end()-1) == '}' && "Missing '}'"); |
1248 | 0 | if (isdigit(Constraint[2])) { |
1249 | 0 | unsigned Index; |
1250 | 0 | bool Failed = |
1251 | 0 | Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); |
1252 | 0 | if (!Failed && Index < Size && Map[Index]) |
1253 | 0 | return std::make_pair(Map[Index], RC); |
1254 | 0 | } |
1255 | 0 | return std::make_pair(0U, nullptr); |
1256 | 0 | } |
1257 | | |
1258 | | std::pair<unsigned, const TargetRegisterClass *> |
1259 | | SystemZTargetLowering::getRegForInlineAsmConstraint( |
1260 | 0 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
1261 | 0 | if (Constraint.size() == 1) { |
1262 | | // GCC Constraint Letters |
1263 | 0 | switch (Constraint[0]) { |
1264 | 0 | default: break; |
1265 | 0 | case 'd': // Data register (equivalent to 'r') |
1266 | 0 | case 'r': // General-purpose register |
1267 | 0 | if (VT.getSizeInBits() == 64) |
1268 | 0 | return std::make_pair(0U, &SystemZ::GR64BitRegClass); |
1269 | 0 | else if (VT.getSizeInBits() == 128) |
1270 | 0 | return std::make_pair(0U, &SystemZ::GR128BitRegClass); |
1271 | 0 | return std::make_pair(0U, &SystemZ::GR32BitRegClass); |
1272 | | |
1273 | 0 | case 'a': // Address register |
1274 | 0 | if (VT == MVT::i64) |
1275 | 0 | return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); |
1276 | 0 | else if (VT == MVT::i128) |
1277 | 0 | return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); |
1278 | 0 | return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); |
1279 | | |
1280 | 0 | case 'h': // High-part register (an LLVM extension) |
1281 | 0 | return std::make_pair(0U, &SystemZ::GRH32BitRegClass); |
1282 | | |
1283 | 0 | case 'f': // Floating-point register |
1284 | 0 | if (!useSoftFloat()) { |
1285 | 0 | if (VT.getSizeInBits() == 64) |
1286 | 0 | return std::make_pair(0U, &SystemZ::FP64BitRegClass); |
1287 | 0 | else if (VT.getSizeInBits() == 128) |
1288 | 0 | return std::make_pair(0U, &SystemZ::FP128BitRegClass); |
1289 | 0 | return std::make_pair(0U, &SystemZ::FP32BitRegClass); |
1290 | 0 | } |
1291 | 0 | break; |
1292 | | |
1293 | 0 | case 'v': // Vector register |
1294 | 0 | if (Subtarget.hasVector()) { |
1295 | 0 | if (VT.getSizeInBits() == 32) |
1296 | 0 | return std::make_pair(0U, &SystemZ::VR32BitRegClass); |
1297 | 0 | if (VT.getSizeInBits() == 64) |
1298 | 0 | return std::make_pair(0U, &SystemZ::VR64BitRegClass); |
1299 | 0 | return std::make_pair(0U, &SystemZ::VR128BitRegClass); |
1300 | 0 | } |
1301 | 0 | break; |
1302 | 0 | } |
1303 | 0 | } |
1304 | 0 | if (Constraint.size() > 0 && Constraint[0] == '{') { |
1305 | | |
1306 | | // A clobber constraint (e.g. ~{f0}) will have MVT::Other which is illegal |
1307 | | // to check the size on. |
1308 | 0 | auto getVTSizeInBits = [&VT]() { |
1309 | 0 | return VT == MVT::Other ? 0 : VT.getSizeInBits(); |
1310 | 0 | }; |
1311 | | |
1312 | | // We need to override the default register parsing for GPRs and FPRs |
1313 | | // because the interpretation depends on VT. The internal names of |
1314 | | // the registers are also different from the external names |
1315 | | // (F0D and F0S instead of F0, etc.). |
1316 | 0 | if (Constraint[1] == 'r') { |
1317 | 0 | if (getVTSizeInBits() == 32) |
1318 | 0 | return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, |
1319 | 0 | SystemZMC::GR32Regs, 16); |
1320 | 0 | if (getVTSizeInBits() == 128) |
1321 | 0 | return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, |
1322 | 0 | SystemZMC::GR128Regs, 16); |
1323 | 0 | return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, |
1324 | 0 | SystemZMC::GR64Regs, 16); |
1325 | 0 | } |
1326 | 0 | if (Constraint[1] == 'f') { |
1327 | 0 | if (useSoftFloat()) |
1328 | 0 | return std::make_pair( |
1329 | 0 | 0u, static_cast<const TargetRegisterClass *>(nullptr)); |
1330 | 0 | if (getVTSizeInBits() == 32) |
1331 | 0 | return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, |
1332 | 0 | SystemZMC::FP32Regs, 16); |
1333 | 0 | if (getVTSizeInBits() == 128) |
1334 | 0 | return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, |
1335 | 0 | SystemZMC::FP128Regs, 16); |
1336 | 0 | return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, |
1337 | 0 | SystemZMC::FP64Regs, 16); |
1338 | 0 | } |
1339 | 0 | if (Constraint[1] == 'v') { |
1340 | 0 | if (!Subtarget.hasVector()) |
1341 | 0 | return std::make_pair( |
1342 | 0 | 0u, static_cast<const TargetRegisterClass *>(nullptr)); |
1343 | 0 | if (getVTSizeInBits() == 32) |
1344 | 0 | return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass, |
1345 | 0 | SystemZMC::VR32Regs, 32); |
1346 | 0 | if (getVTSizeInBits() == 64) |
1347 | 0 | return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass, |
1348 | 0 | SystemZMC::VR64Regs, 32); |
1349 | 0 | return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass, |
1350 | 0 | SystemZMC::VR128Regs, 32); |
1351 | 0 | } |
1352 | 0 | } |
1353 | 0 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
1354 | 0 | } |
1355 | | |
1356 | | // FIXME? Maybe this could be a TableGen attribute on some registers and |
1357 | | // this table could be generated automatically from RegInfo. |
1358 | | Register |
1359 | | SystemZTargetLowering::getRegisterByName(const char *RegName, LLT VT, |
1360 | 0 | const MachineFunction &MF) const { |
1361 | 0 | Register Reg = |
1362 | 0 | StringSwitch<Register>(RegName) |
1363 | 0 | .Case("r4", Subtarget.isTargetXPLINK64() ? SystemZ::R4D : 0) |
1364 | 0 | .Case("r15", Subtarget.isTargetELF() ? SystemZ::R15D : 0) |
1365 | 0 | .Default(0); |
1366 | |
|
1367 | 0 | if (Reg) |
1368 | 0 | return Reg; |
1369 | 0 | report_fatal_error("Invalid register name global variable"); |
1370 | 0 | } |
1371 | | |
1372 | | Register SystemZTargetLowering::getExceptionPointerRegister( |
1373 | 0 | const Constant *PersonalityFn) const { |
1374 | 0 | return Subtarget.isTargetXPLINK64() ? SystemZ::R1D : SystemZ::R6D; |
1375 | 0 | } |
1376 | | |
1377 | | Register SystemZTargetLowering::getExceptionSelectorRegister( |
1378 | 0 | const Constant *PersonalityFn) const { |
1379 | 0 | return Subtarget.isTargetXPLINK64() ? SystemZ::R2D : SystemZ::R7D; |
1380 | 0 | } |
1381 | | |
1382 | | void SystemZTargetLowering::LowerAsmOperandForConstraint( |
1383 | | SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops, |
1384 | 0 | SelectionDAG &DAG) const { |
1385 | | // Only support length 1 constraints for now. |
1386 | 0 | if (Constraint.size() == 1) { |
1387 | 0 | switch (Constraint[0]) { |
1388 | 0 | case 'I': // Unsigned 8-bit constant |
1389 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
1390 | 0 | if (isUInt<8>(C->getZExtValue())) |
1391 | 0 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), |
1392 | 0 | Op.getValueType())); |
1393 | 0 | return; |
1394 | | |
1395 | 0 | case 'J': // Unsigned 12-bit constant |
1396 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
1397 | 0 | if (isUInt<12>(C->getZExtValue())) |
1398 | 0 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), |
1399 | 0 | Op.getValueType())); |
1400 | 0 | return; |
1401 | | |
1402 | 0 | case 'K': // Signed 16-bit constant |
1403 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
1404 | 0 | if (isInt<16>(C->getSExtValue())) |
1405 | 0 | Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), |
1406 | 0 | Op.getValueType())); |
1407 | 0 | return; |
1408 | | |
1409 | 0 | case 'L': // Signed 20-bit displacement (on all targets we support) |
1410 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
1411 | 0 | if (isInt<20>(C->getSExtValue())) |
1412 | 0 | Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), |
1413 | 0 | Op.getValueType())); |
1414 | 0 | return; |
1415 | | |
1416 | 0 | case 'M': // 0x7fffffff |
1417 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(Op)) |
1418 | 0 | if (C->getZExtValue() == 0x7fffffff) |
1419 | 0 | Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), |
1420 | 0 | Op.getValueType())); |
1421 | 0 | return; |
1422 | 0 | } |
1423 | 0 | } |
1424 | 0 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
1425 | 0 | } |
1426 | | |
1427 | | //===----------------------------------------------------------------------===// |
1428 | | // Calling conventions |
1429 | | //===----------------------------------------------------------------------===// |
1430 | | |
1431 | | #include "SystemZGenCallingConv.inc" |
1432 | | |
1433 | | const MCPhysReg *SystemZTargetLowering::getScratchRegisters( |
1434 | 0 | CallingConv::ID) const { |
1435 | 0 | static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D, |
1436 | 0 | SystemZ::R14D, 0 }; |
1437 | 0 | return ScratchRegs; |
1438 | 0 | } |
1439 | | |
1440 | | bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, |
1441 | 0 | Type *ToType) const { |
1442 | 0 | return isTruncateFree(FromType, ToType); |
1443 | 0 | } |
1444 | | |
1445 | 0 | bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
1446 | 0 | return CI->isTailCall(); |
1447 | 0 | } |
1448 | | |
1449 | | // Value is a value that has been passed to us in the location described by VA |
1450 | | // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining |
1451 | | // any loads onto Chain. |
1452 | | static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, |
1453 | | CCValAssign &VA, SDValue Chain, |
1454 | 0 | SDValue Value) { |
1455 | | // If the argument has been promoted from a smaller type, insert an |
1456 | | // assertion to capture this. |
1457 | 0 | if (VA.getLocInfo() == CCValAssign::SExt) |
1458 | 0 | Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, |
1459 | 0 | DAG.getValueType(VA.getValVT())); |
1460 | 0 | else if (VA.getLocInfo() == CCValAssign::ZExt) |
1461 | 0 | Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, |
1462 | 0 | DAG.getValueType(VA.getValVT())); |
1463 | |
|
1464 | 0 | if (VA.isExtInLoc()) |
1465 | 0 | Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); |
1466 | 0 | else if (VA.getLocInfo() == CCValAssign::BCvt) { |
1467 | | // If this is a short vector argument loaded from the stack, |
1468 | | // extend from i64 to full vector size and then bitcast. |
1469 | 0 | assert(VA.getLocVT() == MVT::i64); |
1470 | 0 | assert(VA.getValVT().isVector()); |
1471 | 0 | Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)}); |
1472 | 0 | Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); |
1473 | 0 | } else |
1474 | 0 | assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); |
1475 | 0 | return Value; |
1476 | 0 | } |
1477 | | |
1478 | | // Value is a value of type VA.getValVT() that we need to copy into |
1479 | | // the location described by VA. Return a copy of Value converted to |
1480 | | // VA.getValVT(). The caller is responsible for handling indirect values. |
1481 | | static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, |
1482 | 0 | CCValAssign &VA, SDValue Value) { |
1483 | 0 | switch (VA.getLocInfo()) { |
1484 | 0 | case CCValAssign::SExt: |
1485 | 0 | return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); |
1486 | 0 | case CCValAssign::ZExt: |
1487 | 0 | return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); |
1488 | 0 | case CCValAssign::AExt: |
1489 | 0 | return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); |
1490 | 0 | case CCValAssign::BCvt: { |
1491 | 0 | assert(VA.getLocVT() == MVT::i64 || VA.getLocVT() == MVT::i128); |
1492 | 0 | assert(VA.getValVT().isVector() || VA.getValVT() == MVT::f32 || |
1493 | 0 | VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::f128); |
1494 | | // For an f32 vararg we need to first promote it to an f64 and then |
1495 | | // bitcast it to an i64. |
1496 | 0 | if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i64) |
1497 | 0 | Value = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f64, Value); |
1498 | 0 | MVT BitCastToType = VA.getValVT().isVector() && VA.getLocVT() == MVT::i64 |
1499 | 0 | ? MVT::v2i64 |
1500 | 0 | : VA.getLocVT(); |
1501 | 0 | Value = DAG.getNode(ISD::BITCAST, DL, BitCastToType, Value); |
1502 | | // For ELF, this is a short vector argument to be stored to the stack, |
1503 | | // bitcast to v2i64 and then extract first element. |
1504 | 0 | if (BitCastToType == MVT::v2i64) |
1505 | 0 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value, |
1506 | 0 | DAG.getConstant(0, DL, MVT::i32)); |
1507 | 0 | return Value; |
1508 | 0 | } |
1509 | 0 | case CCValAssign::Full: |
1510 | 0 | return Value; |
1511 | 0 | default: |
1512 | 0 | llvm_unreachable("Unhandled getLocInfo()"); |
1513 | 0 | } |
1514 | 0 | } |
1515 | | |
1516 | 0 | static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { |
1517 | 0 | SDLoc DL(In); |
1518 | 0 | SDValue Lo, Hi; |
1519 | 0 | if (DAG.getTargetLoweringInfo().isTypeLegal(MVT::i128)) { |
1520 | 0 | Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, In); |
1521 | 0 | Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, |
1522 | 0 | DAG.getNode(ISD::SRL, DL, MVT::i128, In, |
1523 | 0 | DAG.getConstant(64, DL, MVT::i32))); |
1524 | 0 | } else { |
1525 | 0 | std::tie(Lo, Hi) = DAG.SplitScalar(In, DL, MVT::i64, MVT::i64); |
1526 | 0 | } |
1527 | |
|
1528 | 0 | SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, |
1529 | 0 | MVT::Untyped, Hi, Lo); |
1530 | 0 | return SDValue(Pair, 0); |
1531 | 0 | } |
1532 | | |
1533 | 0 | static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { |
1534 | 0 | SDLoc DL(In); |
1535 | 0 | SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, |
1536 | 0 | DL, MVT::i64, In); |
1537 | 0 | SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, |
1538 | 0 | DL, MVT::i64, In); |
1539 | |
|
1540 | 0 | if (DAG.getTargetLoweringInfo().isTypeLegal(MVT::i128)) { |
1541 | 0 | Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, Lo); |
1542 | 0 | Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i128, Hi); |
1543 | 0 | Hi = DAG.getNode(ISD::SHL, DL, MVT::i128, Hi, |
1544 | 0 | DAG.getConstant(64, DL, MVT::i32)); |
1545 | 0 | return DAG.getNode(ISD::OR, DL, MVT::i128, Lo, Hi); |
1546 | 0 | } else { |
1547 | 0 | return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); |
1548 | 0 | } |
1549 | 0 | } |
1550 | | |
1551 | | bool SystemZTargetLowering::splitValueIntoRegisterParts( |
1552 | | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
1553 | 0 | unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { |
1554 | 0 | EVT ValueVT = Val.getValueType(); |
1555 | 0 | if (ValueVT.getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) { |
1556 | | // Inline assembly operand. |
1557 | 0 | Parts[0] = lowerI128ToGR128(DAG, DAG.getBitcast(MVT::i128, Val)); |
1558 | 0 | return true; |
1559 | 0 | } |
1560 | | |
1561 | 0 | return false; |
1562 | 0 | } |
1563 | | |
1564 | | SDValue SystemZTargetLowering::joinRegisterPartsIntoValue( |
1565 | | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, |
1566 | 0 | MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const { |
1567 | 0 | if (ValueVT.getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) { |
1568 | | // Inline assembly operand. |
1569 | 0 | SDValue Res = lowerGR128ToI128(DAG, Parts[0]); |
1570 | 0 | return DAG.getBitcast(ValueVT, Res); |
1571 | 0 | } |
1572 | | |
1573 | 0 | return SDValue(); |
1574 | 0 | } |
1575 | | |
1576 | | SDValue SystemZTargetLowering::LowerFormalArguments( |
1577 | | SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, |
1578 | | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
1579 | 0 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
1580 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
1581 | 0 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1582 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1583 | 0 | SystemZMachineFunctionInfo *FuncInfo = |
1584 | 0 | MF.getInfo<SystemZMachineFunctionInfo>(); |
1585 | 0 | auto *TFL = Subtarget.getFrameLowering<SystemZELFFrameLowering>(); |
1586 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
1587 | | |
1588 | | // Assign locations to all of the incoming arguments. |
1589 | 0 | SmallVector<CCValAssign, 16> ArgLocs; |
1590 | 0 | SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
1591 | 0 | CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); |
1592 | 0 | FuncInfo->setSizeOfFnParams(CCInfo.getStackSize()); |
1593 | |
|
1594 | 0 | unsigned NumFixedGPRs = 0; |
1595 | 0 | unsigned NumFixedFPRs = 0; |
1596 | 0 | for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { |
1597 | 0 | SDValue ArgValue; |
1598 | 0 | CCValAssign &VA = ArgLocs[I]; |
1599 | 0 | EVT LocVT = VA.getLocVT(); |
1600 | 0 | if (VA.isRegLoc()) { |
1601 | | // Arguments passed in registers |
1602 | 0 | const TargetRegisterClass *RC; |
1603 | 0 | switch (LocVT.getSimpleVT().SimpleTy) { |
1604 | 0 | default: |
1605 | | // Integers smaller than i64 should be promoted to i64. |
1606 | 0 | llvm_unreachable("Unexpected argument type"); |
1607 | 0 | case MVT::i32: |
1608 | 0 | NumFixedGPRs += 1; |
1609 | 0 | RC = &SystemZ::GR32BitRegClass; |
1610 | 0 | break; |
1611 | 0 | case MVT::i64: |
1612 | 0 | NumFixedGPRs += 1; |
1613 | 0 | RC = &SystemZ::GR64BitRegClass; |
1614 | 0 | break; |
1615 | 0 | case MVT::f32: |
1616 | 0 | NumFixedFPRs += 1; |
1617 | 0 | RC = &SystemZ::FP32BitRegClass; |
1618 | 0 | break; |
1619 | 0 | case MVT::f64: |
1620 | 0 | NumFixedFPRs += 1; |
1621 | 0 | RC = &SystemZ::FP64BitRegClass; |
1622 | 0 | break; |
1623 | 0 | case MVT::f128: |
1624 | 0 | NumFixedFPRs += 2; |
1625 | 0 | RC = &SystemZ::FP128BitRegClass; |
1626 | 0 | break; |
1627 | 0 | case MVT::v16i8: |
1628 | 0 | case MVT::v8i16: |
1629 | 0 | case MVT::v4i32: |
1630 | 0 | case MVT::v2i64: |
1631 | 0 | case MVT::v4f32: |
1632 | 0 | case MVT::v2f64: |
1633 | 0 | RC = &SystemZ::VR128BitRegClass; |
1634 | 0 | break; |
1635 | 0 | } |
1636 | | |
1637 | 0 | Register VReg = MRI.createVirtualRegister(RC); |
1638 | 0 | MRI.addLiveIn(VA.getLocReg(), VReg); |
1639 | 0 | ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); |
1640 | 0 | } else { |
1641 | 0 | assert(VA.isMemLoc() && "Argument not register or memory"); |
1642 | | |
1643 | | // Create the frame index object for this incoming parameter. |
1644 | | // FIXME: Pre-include call frame size in the offset, should not |
1645 | | // need to manually add it here. |
1646 | 0 | int64_t ArgSPOffset = VA.getLocMemOffset(); |
1647 | 0 | if (Subtarget.isTargetXPLINK64()) { |
1648 | 0 | auto &XPRegs = |
1649 | 0 | Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
1650 | 0 | ArgSPOffset += XPRegs.getCallFrameSize(); |
1651 | 0 | } |
1652 | 0 | int FI = |
1653 | 0 | MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, ArgSPOffset, true); |
1654 | | |
1655 | | // Create the SelectionDAG nodes corresponding to a load |
1656 | | // from this parameter. Unpromoted ints and floats are |
1657 | | // passed as right-justified 8-byte values. |
1658 | 0 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
1659 | 0 | if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) |
1660 | 0 | FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, |
1661 | 0 | DAG.getIntPtrConstant(4, DL)); |
1662 | 0 | ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, |
1663 | 0 | MachinePointerInfo::getFixedStack(MF, FI)); |
1664 | 0 | } |
1665 | | |
1666 | | // Convert the value of the argument register into the value that's |
1667 | | // being passed. |
1668 | 0 | if (VA.getLocInfo() == CCValAssign::Indirect) { |
1669 | 0 | InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, |
1670 | 0 | MachinePointerInfo())); |
1671 | | // If the original argument was split (e.g. i128), we need |
1672 | | // to load all parts of it here (using the same address). |
1673 | 0 | unsigned ArgIndex = Ins[I].OrigArgIndex; |
1674 | 0 | assert (Ins[I].PartOffset == 0); |
1675 | 0 | while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) { |
1676 | 0 | CCValAssign &PartVA = ArgLocs[I + 1]; |
1677 | 0 | unsigned PartOffset = Ins[I + 1].PartOffset; |
1678 | 0 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, |
1679 | 0 | DAG.getIntPtrConstant(PartOffset, DL)); |
1680 | 0 | InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, |
1681 | 0 | MachinePointerInfo())); |
1682 | 0 | ++I; |
1683 | 0 | } |
1684 | 0 | } else |
1685 | 0 | InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); |
1686 | 0 | } |
1687 | | |
1688 | 0 | if (IsVarArg && Subtarget.isTargetXPLINK64()) { |
1689 | | // Save the number of non-varargs registers for later use by va_start, etc. |
1690 | 0 | FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); |
1691 | 0 | FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); |
1692 | |
|
1693 | 0 | auto *Regs = static_cast<SystemZXPLINK64Registers *>( |
1694 | 0 | Subtarget.getSpecialRegisters()); |
1695 | | |
1696 | | // Likewise the address (in the form of a frame index) of where the |
1697 | | // first stack vararg would be. The 1-byte size here is arbitrary. |
1698 | | // FIXME: Pre-include call frame size in the offset, should not |
1699 | | // need to manually add it here. |
1700 | 0 | int64_t VarArgOffset = CCInfo.getStackSize() + Regs->getCallFrameSize(); |
1701 | 0 | int FI = MFI.CreateFixedObject(1, VarArgOffset, true); |
1702 | 0 | FuncInfo->setVarArgsFrameIndex(FI); |
1703 | 0 | } |
1704 | |
|
1705 | 0 | if (IsVarArg && Subtarget.isTargetELF()) { |
1706 | | // Save the number of non-varargs registers for later use by va_start, etc. |
1707 | 0 | FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); |
1708 | 0 | FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); |
1709 | | |
1710 | | // Likewise the address (in the form of a frame index) of where the |
1711 | | // first stack vararg would be. The 1-byte size here is arbitrary. |
1712 | 0 | int64_t VarArgsOffset = CCInfo.getStackSize(); |
1713 | 0 | FuncInfo->setVarArgsFrameIndex( |
1714 | 0 | MFI.CreateFixedObject(1, VarArgsOffset, true)); |
1715 | | |
1716 | | // ...and a similar frame index for the caller-allocated save area |
1717 | | // that will be used to store the incoming registers. |
1718 | 0 | int64_t RegSaveOffset = |
1719 | 0 | -SystemZMC::ELFCallFrameSize + TFL->getRegSpillOffset(MF, SystemZ::R2D) - 16; |
1720 | 0 | unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true); |
1721 | 0 | FuncInfo->setRegSaveFrameIndex(RegSaveIndex); |
1722 | | |
1723 | | // Store the FPR varargs in the reserved frame slots. (We store the |
1724 | | // GPRs as part of the prologue.) |
1725 | 0 | if (NumFixedFPRs < SystemZ::ELFNumArgFPRs && !useSoftFloat()) { |
1726 | 0 | SDValue MemOps[SystemZ::ELFNumArgFPRs]; |
1727 | 0 | for (unsigned I = NumFixedFPRs; I < SystemZ::ELFNumArgFPRs; ++I) { |
1728 | 0 | unsigned Offset = TFL->getRegSpillOffset(MF, SystemZ::ELFArgFPRs[I]); |
1729 | 0 | int FI = |
1730 | 0 | MFI.CreateFixedObject(8, -SystemZMC::ELFCallFrameSize + Offset, true); |
1731 | 0 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); |
1732 | 0 | Register VReg = MF.addLiveIn(SystemZ::ELFArgFPRs[I], |
1733 | 0 | &SystemZ::FP64BitRegClass); |
1734 | 0 | SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); |
1735 | 0 | MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, |
1736 | 0 | MachinePointerInfo::getFixedStack(MF, FI)); |
1737 | 0 | } |
1738 | | // Join the stores, which are independent of one another. |
1739 | 0 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, |
1740 | 0 | ArrayRef(&MemOps[NumFixedFPRs], |
1741 | 0 | SystemZ::ELFNumArgFPRs - NumFixedFPRs)); |
1742 | 0 | } |
1743 | 0 | } |
1744 | |
|
1745 | 0 | if (Subtarget.isTargetXPLINK64()) { |
1746 | | // Create virual register for handling incoming "ADA" special register (R5) |
1747 | 0 | const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; |
1748 | 0 | Register ADAvReg = MRI.createVirtualRegister(RC); |
1749 | 0 | auto *Regs = static_cast<SystemZXPLINK64Registers *>( |
1750 | 0 | Subtarget.getSpecialRegisters()); |
1751 | 0 | MRI.addLiveIn(Regs->getADARegister(), ADAvReg); |
1752 | 0 | FuncInfo->setADAVirtualRegister(ADAvReg); |
1753 | 0 | } |
1754 | 0 | return Chain; |
1755 | 0 | } |
1756 | | |
1757 | | static bool canUseSiblingCall(const CCState &ArgCCInfo, |
1758 | | SmallVectorImpl<CCValAssign> &ArgLocs, |
1759 | 0 | SmallVectorImpl<ISD::OutputArg> &Outs) { |
1760 | | // Punt if there are any indirect or stack arguments, or if the call |
1761 | | // needs the callee-saved argument register R6, or if the call uses |
1762 | | // the callee-saved register arguments SwiftSelf and SwiftError. |
1763 | 0 | for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { |
1764 | 0 | CCValAssign &VA = ArgLocs[I]; |
1765 | 0 | if (VA.getLocInfo() == CCValAssign::Indirect) |
1766 | 0 | return false; |
1767 | 0 | if (!VA.isRegLoc()) |
1768 | 0 | return false; |
1769 | 0 | Register Reg = VA.getLocReg(); |
1770 | 0 | if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) |
1771 | 0 | return false; |
1772 | 0 | if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) |
1773 | 0 | return false; |
1774 | 0 | } |
1775 | 0 | return true; |
1776 | 0 | } |
1777 | | |
1778 | | static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, |
1779 | 0 | unsigned Offset, bool LoadAdr = false) { |
1780 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
1781 | 0 | SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>(); |
1782 | 0 | unsigned ADAvReg = MFI->getADAVirtualRegister(); |
1783 | 0 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
1784 | |
|
1785 | 0 | SDValue Reg = DAG.getRegister(ADAvReg, PtrVT); |
1786 | 0 | SDValue Ofs = DAG.getTargetConstant(Offset, DL, PtrVT); |
1787 | |
|
1788 | 0 | SDValue Result = DAG.getNode(SystemZISD::ADA_ENTRY, DL, PtrVT, Val, Reg, Ofs); |
1789 | 0 | if (!LoadAdr) |
1790 | 0 | Result = DAG.getLoad( |
1791 | 0 | PtrVT, DL, DAG.getEntryNode(), Result, MachinePointerInfo(), Align(8), |
1792 | 0 | MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant); |
1793 | |
|
1794 | 0 | return Result; |
1795 | 0 | } |
1796 | | |
1797 | | // ADA access using Global value |
1798 | | // Note: for functions, address of descriptor is returned |
1799 | | static SDValue getADAEntry(SelectionDAG &DAG, const GlobalValue *GV, SDLoc DL, |
1800 | 0 | EVT PtrVT) { |
1801 | 0 | unsigned ADAtype; |
1802 | 0 | bool LoadAddr = false; |
1803 | 0 | const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV); |
1804 | 0 | bool IsFunction = |
1805 | 0 | (isa<Function>(GV)) || (GA && isa<Function>(GA->getAliaseeObject())); |
1806 | 0 | bool IsInternal = (GV->hasInternalLinkage() || GV->hasPrivateLinkage()); |
1807 | |
|
1808 | 0 | if (IsFunction) { |
1809 | 0 | if (IsInternal) { |
1810 | 0 | ADAtype = SystemZII::MO_ADA_DIRECT_FUNC_DESC; |
1811 | 0 | LoadAddr = true; |
1812 | 0 | } else |
1813 | 0 | ADAtype = SystemZII::MO_ADA_INDIRECT_FUNC_DESC; |
1814 | 0 | } else { |
1815 | 0 | ADAtype = SystemZII::MO_ADA_DATA_SYMBOL_ADDR; |
1816 | 0 | } |
1817 | 0 | SDValue Val = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, ADAtype); |
1818 | |
|
1819 | 0 | return getADAEntry(DAG, Val, DL, 0, LoadAddr); |
1820 | 0 | } |
1821 | | |
1822 | | static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, |
1823 | 0 | SDLoc &DL, SDValue &Chain) { |
1824 | 0 | unsigned ADADelta = 0; // ADA offset in desc. |
1825 | 0 | unsigned EPADelta = 8; // EPA offset in desc. |
1826 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
1827 | 0 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
1828 | | |
1829 | | // XPLink calling convention. |
1830 | 0 | if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { |
1831 | 0 | bool IsInternal = (G->getGlobal()->hasInternalLinkage() || |
1832 | 0 | G->getGlobal()->hasPrivateLinkage()); |
1833 | 0 | if (IsInternal) { |
1834 | 0 | SystemZMachineFunctionInfo *MFI = |
1835 | 0 | MF.getInfo<SystemZMachineFunctionInfo>(); |
1836 | 0 | unsigned ADAvReg = MFI->getADAVirtualRegister(); |
1837 | 0 | ADA = DAG.getCopyFromReg(Chain, DL, ADAvReg, PtrVT); |
1838 | 0 | Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); |
1839 | 0 | Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); |
1840 | 0 | return true; |
1841 | 0 | } else { |
1842 | 0 | SDValue GA = DAG.getTargetGlobalAddress( |
1843 | 0 | G->getGlobal(), DL, PtrVT, 0, SystemZII::MO_ADA_DIRECT_FUNC_DESC); |
1844 | 0 | ADA = getADAEntry(DAG, GA, DL, ADADelta); |
1845 | 0 | Callee = getADAEntry(DAG, GA, DL, EPADelta); |
1846 | 0 | } |
1847 | 0 | } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
1848 | 0 | SDValue ES = DAG.getTargetExternalSymbol( |
1849 | 0 | E->getSymbol(), PtrVT, SystemZII::MO_ADA_DIRECT_FUNC_DESC); |
1850 | 0 | ADA = getADAEntry(DAG, ES, DL, ADADelta); |
1851 | 0 | Callee = getADAEntry(DAG, ES, DL, EPADelta); |
1852 | 0 | } else { |
1853 | | // Function pointer case |
1854 | 0 | ADA = DAG.getNode(ISD::ADD, DL, PtrVT, Callee, |
1855 | 0 | DAG.getConstant(ADADelta, DL, PtrVT)); |
1856 | 0 | ADA = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), ADA, |
1857 | 0 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
1858 | 0 | Callee = DAG.getNode(ISD::ADD, DL, PtrVT, Callee, |
1859 | 0 | DAG.getConstant(EPADelta, DL, PtrVT)); |
1860 | 0 | Callee = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Callee, |
1861 | 0 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
1862 | 0 | } |
1863 | 0 | return false; |
1864 | 0 | } |
1865 | | |
1866 | | SDValue |
1867 | | SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, |
1868 | 0 | SmallVectorImpl<SDValue> &InVals) const { |
1869 | 0 | SelectionDAG &DAG = CLI.DAG; |
1870 | 0 | SDLoc &DL = CLI.DL; |
1871 | 0 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
1872 | 0 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
1873 | 0 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
1874 | 0 | SDValue Chain = CLI.Chain; |
1875 | 0 | SDValue Callee = CLI.Callee; |
1876 | 0 | bool &IsTailCall = CLI.IsTailCall; |
1877 | 0 | CallingConv::ID CallConv = CLI.CallConv; |
1878 | 0 | bool IsVarArg = CLI.IsVarArg; |
1879 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
1880 | 0 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
1881 | 0 | LLVMContext &Ctx = *DAG.getContext(); |
1882 | 0 | SystemZCallingConventionRegisters *Regs = Subtarget.getSpecialRegisters(); |
1883 | | |
1884 | | // FIXME: z/OS support to be added in later. |
1885 | 0 | if (Subtarget.isTargetXPLINK64()) |
1886 | 0 | IsTailCall = false; |
1887 | | |
1888 | | // Analyze the operands of the call, assigning locations to each operand. |
1889 | 0 | SmallVector<CCValAssign, 16> ArgLocs; |
1890 | 0 | SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, Ctx); |
1891 | 0 | ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); |
1892 | | |
1893 | | // We don't support GuaranteedTailCallOpt, only automatically-detected |
1894 | | // sibling calls. |
1895 | 0 | if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs)) |
1896 | 0 | IsTailCall = false; |
1897 | | |
1898 | | // Get a count of how many bytes are to be pushed on the stack. |
1899 | 0 | unsigned NumBytes = ArgCCInfo.getStackSize(); |
1900 | | |
1901 | | // Mark the start of the call. |
1902 | 0 | if (!IsTailCall) |
1903 | 0 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); |
1904 | | |
1905 | | // Copy argument values to their designated locations. |
1906 | 0 | SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; |
1907 | 0 | SmallVector<SDValue, 8> MemOpChains; |
1908 | 0 | SDValue StackPtr; |
1909 | 0 | for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { |
1910 | 0 | CCValAssign &VA = ArgLocs[I]; |
1911 | 0 | SDValue ArgValue = OutVals[I]; |
1912 | |
|
1913 | 0 | if (VA.getLocInfo() == CCValAssign::Indirect) { |
1914 | | // Store the argument in a stack slot and pass its address. |
1915 | 0 | unsigned ArgIndex = Outs[I].OrigArgIndex; |
1916 | 0 | EVT SlotVT; |
1917 | 0 | if (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { |
1918 | | // Allocate the full stack space for a promoted (and split) argument. |
1919 | 0 | Type *OrigArgType = CLI.Args[Outs[I].OrigArgIndex].Ty; |
1920 | 0 | EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType); |
1921 | 0 | MVT PartVT = getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT); |
1922 | 0 | unsigned N = getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT); |
1923 | 0 | SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N); |
1924 | 0 | } else { |
1925 | 0 | SlotVT = Outs[I].ArgVT; |
1926 | 0 | } |
1927 | 0 | SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT); |
1928 | 0 | int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); |
1929 | 0 | MemOpChains.push_back( |
1930 | 0 | DAG.getStore(Chain, DL, ArgValue, SpillSlot, |
1931 | 0 | MachinePointerInfo::getFixedStack(MF, FI))); |
1932 | | // If the original argument was split (e.g. i128), we need |
1933 | | // to store all parts of it here (and pass just one address). |
1934 | 0 | assert (Outs[I].PartOffset == 0); |
1935 | 0 | while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { |
1936 | 0 | SDValue PartValue = OutVals[I + 1]; |
1937 | 0 | unsigned PartOffset = Outs[I + 1].PartOffset; |
1938 | 0 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, |
1939 | 0 | DAG.getIntPtrConstant(PartOffset, DL)); |
1940 | 0 | MemOpChains.push_back( |
1941 | 0 | DAG.getStore(Chain, DL, PartValue, Address, |
1942 | 0 | MachinePointerInfo::getFixedStack(MF, FI))); |
1943 | 0 | assert((PartOffset + PartValue.getValueType().getStoreSize() <= |
1944 | 0 | SlotVT.getStoreSize()) && "Not enough space for argument part!"); |
1945 | 0 | ++I; |
1946 | 0 | } |
1947 | 0 | ArgValue = SpillSlot; |
1948 | 0 | } else |
1949 | 0 | ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); |
1950 | | |
1951 | 0 | if (VA.isRegLoc()) { |
1952 | | // In XPLINK64, for the 128-bit vararg case, ArgValue is bitcasted to a |
1953 | | // MVT::i128 type. We decompose the 128-bit type to a pair of its high |
1954 | | // and low values. |
1955 | 0 | if (VA.getLocVT() == MVT::i128) |
1956 | 0 | ArgValue = lowerI128ToGR128(DAG, ArgValue); |
1957 | | // Queue up the argument copies and emit them at the end. |
1958 | 0 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); |
1959 | 0 | } else { |
1960 | 0 | assert(VA.isMemLoc() && "Argument not register or memory"); |
1961 | | |
1962 | | // Work out the address of the stack slot. Unpromoted ints and |
1963 | | // floats are passed as right-justified 8-byte values. |
1964 | 0 | if (!StackPtr.getNode()) |
1965 | 0 | StackPtr = DAG.getCopyFromReg(Chain, DL, |
1966 | 0 | Regs->getStackPointerRegister(), PtrVT); |
1967 | 0 | unsigned Offset = Regs->getStackPointerBias() + Regs->getCallFrameSize() + |
1968 | 0 | VA.getLocMemOffset(); |
1969 | 0 | if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) |
1970 | 0 | Offset += 4; |
1971 | 0 | SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, |
1972 | 0 | DAG.getIntPtrConstant(Offset, DL)); |
1973 | | |
1974 | | // Emit the store. |
1975 | 0 | MemOpChains.push_back( |
1976 | 0 | DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); |
1977 | | |
1978 | | // Although long doubles or vectors are passed through the stack when |
1979 | | // they are vararg (non-fixed arguments), if a long double or vector |
1980 | | // occupies the third and fourth slot of the argument list GPR3 should |
1981 | | // still shadow the third slot of the argument list. |
1982 | 0 | if (Subtarget.isTargetXPLINK64() && VA.needsCustom()) { |
1983 | 0 | SDValue ShadowArgValue = |
1984 | 0 | DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, ArgValue, |
1985 | 0 | DAG.getIntPtrConstant(1, DL)); |
1986 | 0 | RegsToPass.push_back(std::make_pair(SystemZ::R3D, ShadowArgValue)); |
1987 | 0 | } |
1988 | 0 | } |
1989 | 0 | } |
1990 | | |
1991 | | // Join the stores, which are independent of one another. |
1992 | 0 | if (!MemOpChains.empty()) |
1993 | 0 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); |
1994 | | |
1995 | | // Accept direct calls by converting symbolic call addresses to the |
1996 | | // associated Target* opcodes. Force %r1 to be used for indirect |
1997 | | // tail calls. |
1998 | 0 | SDValue Glue; |
1999 | |
|
2000 | 0 | if (Subtarget.isTargetXPLINK64()) { |
2001 | 0 | SDValue ADA; |
2002 | 0 | bool IsBRASL = getzOSCalleeAndADA(DAG, Callee, ADA, DL, Chain); |
2003 | 0 | if (!IsBRASL) { |
2004 | 0 | unsigned CalleeReg = static_cast<SystemZXPLINK64Registers *>(Regs) |
2005 | 0 | ->getAddressOfCalleeRegister(); |
2006 | 0 | Chain = DAG.getCopyToReg(Chain, DL, CalleeReg, Callee, Glue); |
2007 | 0 | Glue = Chain.getValue(1); |
2008 | 0 | Callee = DAG.getRegister(CalleeReg, Callee.getValueType()); |
2009 | 0 | } |
2010 | 0 | RegsToPass.push_back(std::make_pair( |
2011 | 0 | static_cast<SystemZXPLINK64Registers *>(Regs)->getADARegister(), ADA)); |
2012 | 0 | } else { |
2013 | 0 | if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { |
2014 | 0 | Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); |
2015 | 0 | Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); |
2016 | 0 | } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
2017 | 0 | Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); |
2018 | 0 | Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); |
2019 | 0 | } else if (IsTailCall) { |
2020 | 0 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); |
2021 | 0 | Glue = Chain.getValue(1); |
2022 | 0 | Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); |
2023 | 0 | } |
2024 | 0 | } |
2025 | | |
2026 | | // Build a sequence of copy-to-reg nodes, chained and glued together. |
2027 | 0 | for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { |
2028 | 0 | Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, |
2029 | 0 | RegsToPass[I].second, Glue); |
2030 | 0 | Glue = Chain.getValue(1); |
2031 | 0 | } |
2032 | | |
2033 | | // The first call operand is the chain and the second is the target address. |
2034 | 0 | SmallVector<SDValue, 8> Ops; |
2035 | 0 | Ops.push_back(Chain); |
2036 | 0 | Ops.push_back(Callee); |
2037 | | |
2038 | | // Add argument registers to the end of the list so that they are |
2039 | | // known live into the call. |
2040 | 0 | for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) |
2041 | 0 | Ops.push_back(DAG.getRegister(RegsToPass[I].first, |
2042 | 0 | RegsToPass[I].second.getValueType())); |
2043 | | |
2044 | | // Add a register mask operand representing the call-preserved registers. |
2045 | 0 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); |
2046 | 0 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); |
2047 | 0 | assert(Mask && "Missing call preserved mask for calling convention"); |
2048 | 0 | Ops.push_back(DAG.getRegisterMask(Mask)); |
2049 | | |
2050 | | // Glue the call to the argument copies, if any. |
2051 | 0 | if (Glue.getNode()) |
2052 | 0 | Ops.push_back(Glue); |
2053 | | |
2054 | | // Emit the call. |
2055 | 0 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
2056 | 0 | if (IsTailCall) { |
2057 | 0 | SDValue Ret = DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); |
2058 | 0 | DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge); |
2059 | 0 | return Ret; |
2060 | 0 | } |
2061 | 0 | Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); |
2062 | 0 | DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); |
2063 | 0 | Glue = Chain.getValue(1); |
2064 | | |
2065 | | // Mark the end of the call, which is glued to the call itself. |
2066 | 0 | Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, DL); |
2067 | 0 | Glue = Chain.getValue(1); |
2068 | | |
2069 | | // Assign locations to each value returned by this call. |
2070 | 0 | SmallVector<CCValAssign, 16> RetLocs; |
2071 | 0 | CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx); |
2072 | 0 | RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); |
2073 | | |
2074 | | // Copy all of the result registers out of their specified physreg. |
2075 | 0 | for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { |
2076 | 0 | CCValAssign &VA = RetLocs[I]; |
2077 | | |
2078 | | // Copy the value out, gluing the copy to the end of the call sequence. |
2079 | 0 | SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), |
2080 | 0 | VA.getLocVT(), Glue); |
2081 | 0 | Chain = RetValue.getValue(1); |
2082 | 0 | Glue = RetValue.getValue(2); |
2083 | | |
2084 | | // Convert the value of the return register into the value that's |
2085 | | // being returned. |
2086 | 0 | InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); |
2087 | 0 | } |
2088 | |
|
2089 | 0 | return Chain; |
2090 | 0 | } |
2091 | | |
2092 | | // Generate a call taking the given operands as arguments and returning a |
2093 | | // result of type RetVT. |
2094 | | std::pair<SDValue, SDValue> SystemZTargetLowering::makeExternalCall( |
2095 | | SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, |
2096 | | ArrayRef<SDValue> Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, |
2097 | 0 | bool DoesNotReturn, bool IsReturnValueUsed) const { |
2098 | 0 | TargetLowering::ArgListTy Args; |
2099 | 0 | Args.reserve(Ops.size()); |
2100 | |
|
2101 | 0 | TargetLowering::ArgListEntry Entry; |
2102 | 0 | for (SDValue Op : Ops) { |
2103 | 0 | Entry.Node = Op; |
2104 | 0 | Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); |
2105 | 0 | Entry.IsSExt = shouldSignExtendTypeInLibCall(Op.getValueType(), IsSigned); |
2106 | 0 | Entry.IsZExt = !shouldSignExtendTypeInLibCall(Op.getValueType(), IsSigned); |
2107 | 0 | Args.push_back(Entry); |
2108 | 0 | } |
2109 | |
|
2110 | 0 | SDValue Callee = |
2111 | 0 | DAG.getExternalSymbol(CalleeName, getPointerTy(DAG.getDataLayout())); |
2112 | |
|
2113 | 0 | Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); |
2114 | 0 | TargetLowering::CallLoweringInfo CLI(DAG); |
2115 | 0 | bool SignExtend = shouldSignExtendTypeInLibCall(RetVT, IsSigned); |
2116 | 0 | CLI.setDebugLoc(DL) |
2117 | 0 | .setChain(Chain) |
2118 | 0 | .setCallee(CallConv, RetTy, Callee, std::move(Args)) |
2119 | 0 | .setNoReturn(DoesNotReturn) |
2120 | 0 | .setDiscardResult(!IsReturnValueUsed) |
2121 | 0 | .setSExtResult(SignExtend) |
2122 | 0 | .setZExtResult(!SignExtend); |
2123 | 0 | return LowerCallTo(CLI); |
2124 | 0 | } |
2125 | | |
2126 | | bool SystemZTargetLowering:: |
2127 | | CanLowerReturn(CallingConv::ID CallConv, |
2128 | | MachineFunction &MF, bool isVarArg, |
2129 | | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2130 | 0 | LLVMContext &Context) const { |
2131 | | // Special case that we cannot easily detect in RetCC_SystemZ since |
2132 | | // i128 may not be a legal type. |
2133 | 0 | for (auto &Out : Outs) |
2134 | 0 | if (Out.ArgVT == MVT::i128) |
2135 | 0 | return false; |
2136 | | |
2137 | 0 | SmallVector<CCValAssign, 16> RetLocs; |
2138 | 0 | CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context); |
2139 | 0 | return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ); |
2140 | 0 | } |
2141 | | |
2142 | | SDValue |
2143 | | SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
2144 | | bool IsVarArg, |
2145 | | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2146 | | const SmallVectorImpl<SDValue> &OutVals, |
2147 | 0 | const SDLoc &DL, SelectionDAG &DAG) const { |
2148 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
2149 | | |
2150 | | // Assign locations to each returned value. |
2151 | 0 | SmallVector<CCValAssign, 16> RetLocs; |
2152 | 0 | CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); |
2153 | 0 | RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); |
2154 | | |
2155 | | // Quick exit for void returns |
2156 | 0 | if (RetLocs.empty()) |
2157 | 0 | return DAG.getNode(SystemZISD::RET_GLUE, DL, MVT::Other, Chain); |
2158 | | |
2159 | 0 | if (CallConv == CallingConv::GHC) |
2160 | 0 | report_fatal_error("GHC functions return void only"); |
2161 | | |
2162 | | // Copy the result values into the output registers. |
2163 | 0 | SDValue Glue; |
2164 | 0 | SmallVector<SDValue, 4> RetOps; |
2165 | 0 | RetOps.push_back(Chain); |
2166 | 0 | for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { |
2167 | 0 | CCValAssign &VA = RetLocs[I]; |
2168 | 0 | SDValue RetValue = OutVals[I]; |
2169 | | |
2170 | | // Make the return register live on exit. |
2171 | 0 | assert(VA.isRegLoc() && "Can only return in registers!"); |
2172 | | |
2173 | | // Promote the value as required. |
2174 | 0 | RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); |
2175 | | |
2176 | | // Chain and glue the copies together. |
2177 | 0 | Register Reg = VA.getLocReg(); |
2178 | 0 | Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); |
2179 | 0 | Glue = Chain.getValue(1); |
2180 | 0 | RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); |
2181 | 0 | } |
2182 | | |
2183 | | // Update chain and glue. |
2184 | 0 | RetOps[0] = Chain; |
2185 | 0 | if (Glue.getNode()) |
2186 | 0 | RetOps.push_back(Glue); |
2187 | |
|
2188 | 0 | return DAG.getNode(SystemZISD::RET_GLUE, DL, MVT::Other, RetOps); |
2189 | 0 | } |
2190 | | |
2191 | | // Return true if Op is an intrinsic node with chain that returns the CC value |
2192 | | // as its only (other) argument. Provide the associated SystemZISD opcode and |
2193 | | // the mask of valid CC values if so. |
2194 | | static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, |
2195 | 0 | unsigned &CCValid) { |
2196 | 0 | unsigned Id = Op.getConstantOperandVal(1); |
2197 | 0 | switch (Id) { |
2198 | 0 | case Intrinsic::s390_tbegin: |
2199 | 0 | Opcode = SystemZISD::TBEGIN; |
2200 | 0 | CCValid = SystemZ::CCMASK_TBEGIN; |
2201 | 0 | return true; |
2202 | | |
2203 | 0 | case Intrinsic::s390_tbegin_nofloat: |
2204 | 0 | Opcode = SystemZISD::TBEGIN_NOFLOAT; |
2205 | 0 | CCValid = SystemZ::CCMASK_TBEGIN; |
2206 | 0 | return true; |
2207 | | |
2208 | 0 | case Intrinsic::s390_tend: |
2209 | 0 | Opcode = SystemZISD::TEND; |
2210 | 0 | CCValid = SystemZ::CCMASK_TEND; |
2211 | 0 | return true; |
2212 | | |
2213 | 0 | default: |
2214 | 0 | return false; |
2215 | 0 | } |
2216 | 0 | } |
2217 | | |
2218 | | // Return true if Op is an intrinsic node without chain that returns the |
2219 | | // CC value as its final argument. Provide the associated SystemZISD |
2220 | | // opcode and the mask of valid CC values if so. |
2221 | 0 | static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { |
2222 | 0 | unsigned Id = Op.getConstantOperandVal(0); |
2223 | 0 | switch (Id) { |
2224 | 0 | case Intrinsic::s390_vpkshs: |
2225 | 0 | case Intrinsic::s390_vpksfs: |
2226 | 0 | case Intrinsic::s390_vpksgs: |
2227 | 0 | Opcode = SystemZISD::PACKS_CC; |
2228 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2229 | 0 | return true; |
2230 | | |
2231 | 0 | case Intrinsic::s390_vpklshs: |
2232 | 0 | case Intrinsic::s390_vpklsfs: |
2233 | 0 | case Intrinsic::s390_vpklsgs: |
2234 | 0 | Opcode = SystemZISD::PACKLS_CC; |
2235 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2236 | 0 | return true; |
2237 | | |
2238 | 0 | case Intrinsic::s390_vceqbs: |
2239 | 0 | case Intrinsic::s390_vceqhs: |
2240 | 0 | case Intrinsic::s390_vceqfs: |
2241 | 0 | case Intrinsic::s390_vceqgs: |
2242 | 0 | Opcode = SystemZISD::VICMPES; |
2243 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2244 | 0 | return true; |
2245 | | |
2246 | 0 | case Intrinsic::s390_vchbs: |
2247 | 0 | case Intrinsic::s390_vchhs: |
2248 | 0 | case Intrinsic::s390_vchfs: |
2249 | 0 | case Intrinsic::s390_vchgs: |
2250 | 0 | Opcode = SystemZISD::VICMPHS; |
2251 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2252 | 0 | return true; |
2253 | | |
2254 | 0 | case Intrinsic::s390_vchlbs: |
2255 | 0 | case Intrinsic::s390_vchlhs: |
2256 | 0 | case Intrinsic::s390_vchlfs: |
2257 | 0 | case Intrinsic::s390_vchlgs: |
2258 | 0 | Opcode = SystemZISD::VICMPHLS; |
2259 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2260 | 0 | return true; |
2261 | | |
2262 | 0 | case Intrinsic::s390_vtm: |
2263 | 0 | Opcode = SystemZISD::VTM; |
2264 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2265 | 0 | return true; |
2266 | | |
2267 | 0 | case Intrinsic::s390_vfaebs: |
2268 | 0 | case Intrinsic::s390_vfaehs: |
2269 | 0 | case Intrinsic::s390_vfaefs: |
2270 | 0 | Opcode = SystemZISD::VFAE_CC; |
2271 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2272 | 0 | return true; |
2273 | | |
2274 | 0 | case Intrinsic::s390_vfaezbs: |
2275 | 0 | case Intrinsic::s390_vfaezhs: |
2276 | 0 | case Intrinsic::s390_vfaezfs: |
2277 | 0 | Opcode = SystemZISD::VFAEZ_CC; |
2278 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2279 | 0 | return true; |
2280 | | |
2281 | 0 | case Intrinsic::s390_vfeebs: |
2282 | 0 | case Intrinsic::s390_vfeehs: |
2283 | 0 | case Intrinsic::s390_vfeefs: |
2284 | 0 | Opcode = SystemZISD::VFEE_CC; |
2285 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2286 | 0 | return true; |
2287 | | |
2288 | 0 | case Intrinsic::s390_vfeezbs: |
2289 | 0 | case Intrinsic::s390_vfeezhs: |
2290 | 0 | case Intrinsic::s390_vfeezfs: |
2291 | 0 | Opcode = SystemZISD::VFEEZ_CC; |
2292 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2293 | 0 | return true; |
2294 | | |
2295 | 0 | case Intrinsic::s390_vfenebs: |
2296 | 0 | case Intrinsic::s390_vfenehs: |
2297 | 0 | case Intrinsic::s390_vfenefs: |
2298 | 0 | Opcode = SystemZISD::VFENE_CC; |
2299 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2300 | 0 | return true; |
2301 | | |
2302 | 0 | case Intrinsic::s390_vfenezbs: |
2303 | 0 | case Intrinsic::s390_vfenezhs: |
2304 | 0 | case Intrinsic::s390_vfenezfs: |
2305 | 0 | Opcode = SystemZISD::VFENEZ_CC; |
2306 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2307 | 0 | return true; |
2308 | | |
2309 | 0 | case Intrinsic::s390_vistrbs: |
2310 | 0 | case Intrinsic::s390_vistrhs: |
2311 | 0 | case Intrinsic::s390_vistrfs: |
2312 | 0 | Opcode = SystemZISD::VISTR_CC; |
2313 | 0 | CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3; |
2314 | 0 | return true; |
2315 | | |
2316 | 0 | case Intrinsic::s390_vstrcbs: |
2317 | 0 | case Intrinsic::s390_vstrchs: |
2318 | 0 | case Intrinsic::s390_vstrcfs: |
2319 | 0 | Opcode = SystemZISD::VSTRC_CC; |
2320 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2321 | 0 | return true; |
2322 | | |
2323 | 0 | case Intrinsic::s390_vstrczbs: |
2324 | 0 | case Intrinsic::s390_vstrczhs: |
2325 | 0 | case Intrinsic::s390_vstrczfs: |
2326 | 0 | Opcode = SystemZISD::VSTRCZ_CC; |
2327 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2328 | 0 | return true; |
2329 | | |
2330 | 0 | case Intrinsic::s390_vstrsb: |
2331 | 0 | case Intrinsic::s390_vstrsh: |
2332 | 0 | case Intrinsic::s390_vstrsf: |
2333 | 0 | Opcode = SystemZISD::VSTRS_CC; |
2334 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2335 | 0 | return true; |
2336 | | |
2337 | 0 | case Intrinsic::s390_vstrszb: |
2338 | 0 | case Intrinsic::s390_vstrszh: |
2339 | 0 | case Intrinsic::s390_vstrszf: |
2340 | 0 | Opcode = SystemZISD::VSTRSZ_CC; |
2341 | 0 | CCValid = SystemZ::CCMASK_ANY; |
2342 | 0 | return true; |
2343 | | |
2344 | 0 | case Intrinsic::s390_vfcedbs: |
2345 | 0 | case Intrinsic::s390_vfcesbs: |
2346 | 0 | Opcode = SystemZISD::VFCMPES; |
2347 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2348 | 0 | return true; |
2349 | | |
2350 | 0 | case Intrinsic::s390_vfchdbs: |
2351 | 0 | case Intrinsic::s390_vfchsbs: |
2352 | 0 | Opcode = SystemZISD::VFCMPHS; |
2353 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2354 | 0 | return true; |
2355 | | |
2356 | 0 | case Intrinsic::s390_vfchedbs: |
2357 | 0 | case Intrinsic::s390_vfchesbs: |
2358 | 0 | Opcode = SystemZISD::VFCMPHES; |
2359 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2360 | 0 | return true; |
2361 | | |
2362 | 0 | case Intrinsic::s390_vftcidb: |
2363 | 0 | case Intrinsic::s390_vftcisb: |
2364 | 0 | Opcode = SystemZISD::VFTCI; |
2365 | 0 | CCValid = SystemZ::CCMASK_VCMP; |
2366 | 0 | return true; |
2367 | | |
2368 | 0 | case Intrinsic::s390_tdc: |
2369 | 0 | Opcode = SystemZISD::TDC; |
2370 | 0 | CCValid = SystemZ::CCMASK_TDC; |
2371 | 0 | return true; |
2372 | | |
2373 | 0 | default: |
2374 | 0 | return false; |
2375 | 0 | } |
2376 | 0 | } |
2377 | | |
2378 | | // Emit an intrinsic with chain and an explicit CC register result. |
2379 | | static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, |
2380 | 0 | unsigned Opcode) { |
2381 | | // Copy all operands except the intrinsic ID. |
2382 | 0 | unsigned NumOps = Op.getNumOperands(); |
2383 | 0 | SmallVector<SDValue, 6> Ops; |
2384 | 0 | Ops.reserve(NumOps - 1); |
2385 | 0 | Ops.push_back(Op.getOperand(0)); |
2386 | 0 | for (unsigned I = 2; I < NumOps; ++I) |
2387 | 0 | Ops.push_back(Op.getOperand(I)); |
2388 | |
|
2389 | 0 | assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); |
2390 | 0 | SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other); |
2391 | 0 | SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); |
2392 | 0 | SDValue OldChain = SDValue(Op.getNode(), 1); |
2393 | 0 | SDValue NewChain = SDValue(Intr.getNode(), 1); |
2394 | 0 | DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); |
2395 | 0 | return Intr.getNode(); |
2396 | 0 | } |
2397 | | |
2398 | | // Emit an intrinsic with an explicit CC register result. |
2399 | | static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, |
2400 | 0 | unsigned Opcode) { |
2401 | | // Copy all operands except the intrinsic ID. |
2402 | 0 | unsigned NumOps = Op.getNumOperands(); |
2403 | 0 | SmallVector<SDValue, 6> Ops; |
2404 | 0 | Ops.reserve(NumOps - 1); |
2405 | 0 | for (unsigned I = 1; I < NumOps; ++I) |
2406 | 0 | Ops.push_back(Op.getOperand(I)); |
2407 | |
|
2408 | 0 | SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops); |
2409 | 0 | return Intr.getNode(); |
2410 | 0 | } |
2411 | | |
2412 | | // CC is a comparison that will be implemented using an integer or |
2413 | | // floating-point comparison. Return the condition code mask for |
2414 | | // a branch on true. In the integer case, CCMASK_CMP_UO is set for |
2415 | | // unsigned comparisons and clear for signed ones. In the floating-point |
2416 | | // case, CCMASK_CMP_UO has its normal mask meaning (unordered). |
2417 | 0 | static unsigned CCMaskForCondCode(ISD::CondCode CC) { |
2418 | 0 | #define CONV(X) \ |
2419 | 0 | case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ |
2420 | 0 | case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ |
2421 | 0 | case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X |
2422 | |
|
2423 | 0 | switch (CC) { |
2424 | 0 | default: |
2425 | 0 | llvm_unreachable("Invalid integer condition!"); |
2426 | |
|
2427 | 0 | CONV(EQ); |
2428 | 0 | CONV(NE); |
2429 | 0 | CONV(GT); |
2430 | 0 | CONV(GE); |
2431 | 0 | CONV(LT); |
2432 | 0 | CONV(LE); |
2433 | | |
2434 | 0 | case ISD::SETO: return SystemZ::CCMASK_CMP_O; |
2435 | 0 | case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; |
2436 | 0 | } |
2437 | 0 | #undef CONV |
2438 | 0 | } |
2439 | | |
2440 | | // If C can be converted to a comparison against zero, adjust the operands |
2441 | | // as necessary. |
2442 | 0 | static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { |
2443 | 0 | if (C.ICmpType == SystemZICMP::UnsignedOnly) |
2444 | 0 | return; |
2445 | | |
2446 | 0 | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); |
2447 | 0 | if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64) |
2448 | 0 | return; |
2449 | | |
2450 | 0 | int64_t Value = ConstOp1->getSExtValue(); |
2451 | 0 | if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || |
2452 | 0 | (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || |
2453 | 0 | (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || |
2454 | 0 | (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { |
2455 | 0 | C.CCMask ^= SystemZ::CCMASK_CMP_EQ; |
2456 | 0 | C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType()); |
2457 | 0 | } |
2458 | 0 | } |
2459 | | |
2460 | | // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, |
2461 | | // adjust the operands as necessary. |
2462 | | static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, |
2463 | 0 | Comparison &C) { |
2464 | | // For us to make any changes, it must a comparison between a single-use |
2465 | | // load and a constant. |
2466 | 0 | if (!C.Op0.hasOneUse() || |
2467 | 0 | C.Op0.getOpcode() != ISD::LOAD || |
2468 | 0 | C.Op1.getOpcode() != ISD::Constant) |
2469 | 0 | return; |
2470 | | |
2471 | | // We must have an 8- or 16-bit load. |
2472 | 0 | auto *Load = cast<LoadSDNode>(C.Op0); |
2473 | 0 | unsigned NumBits = Load->getMemoryVT().getSizeInBits(); |
2474 | 0 | if ((NumBits != 8 && NumBits != 16) || |
2475 | 0 | NumBits != Load->getMemoryVT().getStoreSizeInBits()) |
2476 | 0 | return; |
2477 | | |
2478 | | // The load must be an extending one and the constant must be within the |
2479 | | // range of the unextended value. |
2480 | 0 | auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); |
2481 | 0 | if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64) |
2482 | 0 | return; |
2483 | 0 | uint64_t Value = ConstOp1->getZExtValue(); |
2484 | 0 | uint64_t Mask = (1 << NumBits) - 1; |
2485 | 0 | if (Load->getExtensionType() == ISD::SEXTLOAD) { |
2486 | | // Make sure that ConstOp1 is in range of C.Op0. |
2487 | 0 | int64_t SignedValue = ConstOp1->getSExtValue(); |
2488 | 0 | if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) |
2489 | 0 | return; |
2490 | 0 | if (C.ICmpType != SystemZICMP::SignedOnly) { |
2491 | | // Unsigned comparison between two sign-extended values is equivalent |
2492 | | // to unsigned comparison between two zero-extended values. |
2493 | 0 | Value &= Mask; |
2494 | 0 | } else if (NumBits == 8) { |
2495 | | // Try to treat the comparison as unsigned, so that we can use CLI. |
2496 | | // Adjust CCMask and Value as necessary. |
2497 | 0 | if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) |
2498 | | // Test whether the high bit of the byte is set. |
2499 | 0 | Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; |
2500 | 0 | else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) |
2501 | | // Test whether the high bit of the byte is clear. |
2502 | 0 | Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; |
2503 | 0 | else |
2504 | | // No instruction exists for this combination. |
2505 | 0 | return; |
2506 | 0 | C.ICmpType = SystemZICMP::UnsignedOnly; |
2507 | 0 | } |
2508 | 0 | } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { |
2509 | 0 | if (Value > Mask) |
2510 | 0 | return; |
2511 | | // If the constant is in range, we can use any comparison. |
2512 | 0 | C.ICmpType = SystemZICMP::Any; |
2513 | 0 | } else |
2514 | 0 | return; |
2515 | | |
2516 | | // Make sure that the first operand is an i32 of the right extension type. |
2517 | 0 | ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? |
2518 | 0 | ISD::SEXTLOAD : |
2519 | 0 | ISD::ZEXTLOAD); |
2520 | 0 | if (C.Op0.getValueType() != MVT::i32 || |
2521 | 0 | Load->getExtensionType() != ExtType) { |
2522 | 0 | C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), |
2523 | 0 | Load->getBasePtr(), Load->getPointerInfo(), |
2524 | 0 | Load->getMemoryVT(), Load->getAlign(), |
2525 | 0 | Load->getMemOperand()->getFlags()); |
2526 | | // Update the chain uses. |
2527 | 0 | DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); |
2528 | 0 | } |
2529 | | |
2530 | | // Make sure that the second operand is an i32 with the right value. |
2531 | 0 | if (C.Op1.getValueType() != MVT::i32 || |
2532 | 0 | Value != ConstOp1->getZExtValue()) |
2533 | 0 | C.Op1 = DAG.getConstant(Value, DL, MVT::i32); |
2534 | 0 | } |
2535 | | |
2536 | | // Return true if Op is either an unextended load, or a load suitable |
2537 | | // for integer register-memory comparisons of type ICmpType. |
2538 | 0 | static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { |
2539 | 0 | auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); |
2540 | 0 | if (Load) { |
2541 | | // There are no instructions to compare a register with a memory byte. |
2542 | 0 | if (Load->getMemoryVT() == MVT::i8) |
2543 | 0 | return false; |
2544 | | // Otherwise decide on extension type. |
2545 | 0 | switch (Load->getExtensionType()) { |
2546 | 0 | case ISD::NON_EXTLOAD: |
2547 | 0 | return true; |
2548 | 0 | case ISD::SEXTLOAD: |
2549 | 0 | return ICmpType != SystemZICMP::UnsignedOnly; |
2550 | 0 | case ISD::ZEXTLOAD: |
2551 | 0 | return ICmpType != SystemZICMP::SignedOnly; |
2552 | 0 | default: |
2553 | 0 | break; |
2554 | 0 | } |
2555 | 0 | } |
2556 | 0 | return false; |
2557 | 0 | } |
2558 | | |
2559 | | // Return true if it is better to swap the operands of C. |
2560 | 0 | static bool shouldSwapCmpOperands(const Comparison &C) { |
2561 | | // Leave i128 and f128 comparisons alone, since they have no memory forms. |
2562 | 0 | if (C.Op0.getValueType() == MVT::i128) |
2563 | 0 | return false; |
2564 | 0 | if (C.Op0.getValueType() == MVT::f128) |
2565 | 0 | return false; |
2566 | | |
2567 | | // Always keep a floating-point constant second, since comparisons with |
2568 | | // zero can use LOAD TEST and comparisons with other constants make a |
2569 | | // natural memory operand. |
2570 | 0 | if (isa<ConstantFPSDNode>(C.Op1)) |
2571 | 0 | return false; |
2572 | | |
2573 | | // Never swap comparisons with zero since there are many ways to optimize |
2574 | | // those later. |
2575 | 0 | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); |
2576 | 0 | if (ConstOp1 && ConstOp1->getZExtValue() == 0) |
2577 | 0 | return false; |
2578 | | |
2579 | | // Also keep natural memory operands second if the loaded value is |
2580 | | // only used here. Several comparisons have memory forms. |
2581 | 0 | if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) |
2582 | 0 | return false; |
2583 | | |
2584 | | // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. |
2585 | | // In that case we generally prefer the memory to be second. |
2586 | 0 | if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { |
2587 | | // The only exceptions are when the second operand is a constant and |
2588 | | // we can use things like CHHSI. |
2589 | 0 | if (!ConstOp1) |
2590 | 0 | return true; |
2591 | | // The unsigned memory-immediate instructions can handle 16-bit |
2592 | | // unsigned integers. |
2593 | 0 | if (C.ICmpType != SystemZICMP::SignedOnly && |
2594 | 0 | isUInt<16>(ConstOp1->getZExtValue())) |
2595 | 0 | return false; |
2596 | | // The signed memory-immediate instructions can handle 16-bit |
2597 | | // signed integers. |
2598 | 0 | if (C.ICmpType != SystemZICMP::UnsignedOnly && |
2599 | 0 | isInt<16>(ConstOp1->getSExtValue())) |
2600 | 0 | return false; |
2601 | 0 | return true; |
2602 | 0 | } |
2603 | | |
2604 | | // Try to promote the use of CGFR and CLGFR. |
2605 | 0 | unsigned Opcode0 = C.Op0.getOpcode(); |
2606 | 0 | if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) |
2607 | 0 | return true; |
2608 | 0 | if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) |
2609 | 0 | return true; |
2610 | 0 | if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::AND && |
2611 | 0 | C.Op0.getOperand(1).getOpcode() == ISD::Constant && |
2612 | 0 | C.Op0.getConstantOperandVal(1) == 0xffffffff) |
2613 | 0 | return true; |
2614 | | |
2615 | 0 | return false; |
2616 | 0 | } |
2617 | | |
2618 | | // Check whether C tests for equality between X and Y and whether X - Y |
2619 | | // or Y - X is also computed. In that case it's better to compare the |
2620 | | // result of the subtraction against zero. |
2621 | | static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, |
2622 | 0 | Comparison &C) { |
2623 | 0 | if (C.CCMask == SystemZ::CCMASK_CMP_EQ || |
2624 | 0 | C.CCMask == SystemZ::CCMASK_CMP_NE) { |
2625 | 0 | for (SDNode *N : C.Op0->uses()) { |
2626 | 0 | if (N->getOpcode() == ISD::SUB && |
2627 | 0 | ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || |
2628 | 0 | (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { |
2629 | | // Disable the nsw and nuw flags: the backend needs to handle |
2630 | | // overflow as well during comparison elimination. |
2631 | 0 | SDNodeFlags Flags = N->getFlags(); |
2632 | 0 | Flags.setNoSignedWrap(false); |
2633 | 0 | Flags.setNoUnsignedWrap(false); |
2634 | 0 | N->setFlags(Flags); |
2635 | 0 | C.Op0 = SDValue(N, 0); |
2636 | 0 | C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); |
2637 | 0 | return; |
2638 | 0 | } |
2639 | 0 | } |
2640 | 0 | } |
2641 | 0 | } |
2642 | | |
2643 | | // Check whether C compares a floating-point value with zero and if that |
2644 | | // floating-point value is also negated. In this case we can use the |
2645 | | // negation to set CC, so avoiding separate LOAD AND TEST and |
2646 | | // LOAD (NEGATIVE/COMPLEMENT) instructions. |
2647 | 0 | static void adjustForFNeg(Comparison &C) { |
2648 | | // This optimization is invalid for strict comparisons, since FNEG |
2649 | | // does not raise any exceptions. |
2650 | 0 | if (C.Chain) |
2651 | 0 | return; |
2652 | 0 | auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); |
2653 | 0 | if (C1 && C1->isZero()) { |
2654 | 0 | for (SDNode *N : C.Op0->uses()) { |
2655 | 0 | if (N->getOpcode() == ISD::FNEG) { |
2656 | 0 | C.Op0 = SDValue(N, 0); |
2657 | 0 | C.CCMask = SystemZ::reverseCCMask(C.CCMask); |
2658 | 0 | return; |
2659 | 0 | } |
2660 | 0 | } |
2661 | 0 | } |
2662 | 0 | } |
2663 | | |
2664 | | // Check whether C compares (shl X, 32) with 0 and whether X is |
2665 | | // also sign-extended. In that case it is better to test the result |
2666 | | // of the sign extension using LTGFR. |
2667 | | // |
2668 | | // This case is important because InstCombine transforms a comparison |
2669 | | // with (sext (trunc X)) into a comparison with (shl X, 32). |
2670 | 0 | static void adjustForLTGFR(Comparison &C) { |
2671 | | // Check for a comparison between (shl X, 32) and 0. |
2672 | 0 | if (C.Op0.getOpcode() == ISD::SHL && C.Op0.getValueType() == MVT::i64 && |
2673 | 0 | C.Op1.getOpcode() == ISD::Constant && C.Op1->getAsZExtVal() == 0) { |
2674 | 0 | auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); |
2675 | 0 | if (C1 && C1->getZExtValue() == 32) { |
2676 | 0 | SDValue ShlOp0 = C.Op0.getOperand(0); |
2677 | | // See whether X has any SIGN_EXTEND_INREG uses. |
2678 | 0 | for (SDNode *N : ShlOp0->uses()) { |
2679 | 0 | if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && |
2680 | 0 | cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { |
2681 | 0 | C.Op0 = SDValue(N, 0); |
2682 | 0 | return; |
2683 | 0 | } |
2684 | 0 | } |
2685 | 0 | } |
2686 | 0 | } |
2687 | 0 | } |
2688 | | |
2689 | | // If C compares the truncation of an extending load, try to compare |
2690 | | // the untruncated value instead. This exposes more opportunities to |
2691 | | // reuse CC. |
2692 | | static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, |
2693 | 0 | Comparison &C) { |
2694 | 0 | if (C.Op0.getOpcode() == ISD::TRUNCATE && |
2695 | 0 | C.Op0.getOperand(0).getOpcode() == ISD::LOAD && |
2696 | 0 | C.Op1.getOpcode() == ISD::Constant && |
2697 | 0 | cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 && |
2698 | 0 | C.Op1->getAsZExtVal() == 0) { |
2699 | 0 | auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); |
2700 | 0 | if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <= |
2701 | 0 | C.Op0.getValueSizeInBits().getFixedValue()) { |
2702 | 0 | unsigned Type = L->getExtensionType(); |
2703 | 0 | if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || |
2704 | 0 | (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { |
2705 | 0 | C.Op0 = C.Op0.getOperand(0); |
2706 | 0 | C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType()); |
2707 | 0 | } |
2708 | 0 | } |
2709 | 0 | } |
2710 | 0 | } |
2711 | | |
2712 | | // Return true if shift operation N has an in-range constant shift value. |
2713 | | // Store it in ShiftVal if so. |
2714 | 0 | static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { |
2715 | 0 | auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); |
2716 | 0 | if (!Shift) |
2717 | 0 | return false; |
2718 | | |
2719 | 0 | uint64_t Amount = Shift->getZExtValue(); |
2720 | 0 | if (Amount >= N.getValueSizeInBits()) |
2721 | 0 | return false; |
2722 | | |
2723 | 0 | ShiftVal = Amount; |
2724 | 0 | return true; |
2725 | 0 | } |
2726 | | |
2727 | | // Check whether an AND with Mask is suitable for a TEST UNDER MASK |
2728 | | // instruction and whether the CC value is descriptive enough to handle |
2729 | | // a comparison of type Opcode between the AND result and CmpVal. |
2730 | | // CCMask says which comparison result is being tested and BitSize is |
2731 | | // the number of bits in the operands. If TEST UNDER MASK can be used, |
2732 | | // return the corresponding CC mask, otherwise return 0. |
2733 | | static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, |
2734 | | uint64_t Mask, uint64_t CmpVal, |
2735 | 0 | unsigned ICmpType) { |
2736 | 0 | assert(Mask != 0 && "ANDs with zero should have been removed by now"); |
2737 | | |
2738 | | // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. |
2739 | 0 | if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && |
2740 | 0 | !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) |
2741 | 0 | return 0; |
2742 | | |
2743 | | // Work out the masks for the lowest and highest bits. |
2744 | 0 | uint64_t High = llvm::bit_floor(Mask); |
2745 | 0 | uint64_t Low = uint64_t(1) << llvm::countr_zero(Mask); |
2746 | | |
2747 | | // Signed ordered comparisons are effectively unsigned if the sign |
2748 | | // bit is dropped. |
2749 | 0 | bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); |
2750 | | |
2751 | | // Check for equality comparisons with 0, or the equivalent. |
2752 | 0 | if (CmpVal == 0) { |
2753 | 0 | if (CCMask == SystemZ::CCMASK_CMP_EQ) |
2754 | 0 | return SystemZ::CCMASK_TM_ALL_0; |
2755 | 0 | if (CCMask == SystemZ::CCMASK_CMP_NE) |
2756 | 0 | return SystemZ::CCMASK_TM_SOME_1; |
2757 | 0 | } |
2758 | 0 | if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) { |
2759 | 0 | if (CCMask == SystemZ::CCMASK_CMP_LT) |
2760 | 0 | return SystemZ::CCMASK_TM_ALL_0; |
2761 | 0 | if (CCMask == SystemZ::CCMASK_CMP_GE) |
2762 | 0 | return SystemZ::CCMASK_TM_SOME_1; |
2763 | 0 | } |
2764 | 0 | if (EffectivelyUnsigned && CmpVal < Low) { |
2765 | 0 | if (CCMask == SystemZ::CCMASK_CMP_LE) |
2766 | 0 | return SystemZ::CCMASK_TM_ALL_0; |
2767 | 0 | if (CCMask == SystemZ::CCMASK_CMP_GT) |
2768 | 0 | return SystemZ::CCMASK_TM_SOME_1; |
2769 | 0 | } |
2770 | | |
2771 | | // Check for equality comparisons with the mask, or the equivalent. |
2772 | 0 | if (CmpVal == Mask) { |
2773 | 0 | if (CCMask == SystemZ::CCMASK_CMP_EQ) |
2774 | 0 | return SystemZ::CCMASK_TM_ALL_1; |
2775 | 0 | if (CCMask == SystemZ::CCMASK_CMP_NE) |
2776 | 0 | return SystemZ::CCMASK_TM_SOME_0; |
2777 | 0 | } |
2778 | 0 | if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { |
2779 | 0 | if (CCMask == SystemZ::CCMASK_CMP_GT) |
2780 | 0 | return SystemZ::CCMASK_TM_ALL_1; |
2781 | 0 | if (CCMask == SystemZ::CCMASK_CMP_LE) |
2782 | 0 | return SystemZ::CCMASK_TM_SOME_0; |
2783 | 0 | } |
2784 | 0 | if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { |
2785 | 0 | if (CCMask == SystemZ::CCMASK_CMP_GE) |
2786 | 0 | return SystemZ::CCMASK_TM_ALL_1; |
2787 | 0 | if (CCMask == SystemZ::CCMASK_CMP_LT) |
2788 | 0 | return SystemZ::CCMASK_TM_SOME_0; |
2789 | 0 | } |
2790 | | |
2791 | | // Check for ordered comparisons with the top bit. |
2792 | 0 | if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { |
2793 | 0 | if (CCMask == SystemZ::CCMASK_CMP_LE) |
2794 | 0 | return SystemZ::CCMASK_TM_MSB_0; |
2795 | 0 | if (CCMask == SystemZ::CCMASK_CMP_GT) |
2796 | 0 | return SystemZ::CCMASK_TM_MSB_1; |
2797 | 0 | } |
2798 | 0 | if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { |
2799 | 0 | if (CCMask == SystemZ::CCMASK_CMP_LT) |
2800 | 0 | return SystemZ::CCMASK_TM_MSB_0; |
2801 | 0 | if (CCMask == SystemZ::CCMASK_CMP_GE) |
2802 | 0 | return SystemZ::CCMASK_TM_MSB_1; |
2803 | 0 | } |
2804 | | |
2805 | | // If there are just two bits, we can do equality checks for Low and High |
2806 | | // as well. |
2807 | 0 | if (Mask == Low + High) { |
2808 | 0 | if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) |
2809 | 0 | return SystemZ::CCMASK_TM_MIXED_MSB_0; |
2810 | 0 | if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) |
2811 | 0 | return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; |
2812 | 0 | if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) |
2813 | 0 | return SystemZ::CCMASK_TM_MIXED_MSB_1; |
2814 | 0 | if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) |
2815 | 0 | return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; |
2816 | 0 | } |
2817 | | |
2818 | | // Looks like we've exhausted our options. |
2819 | 0 | return 0; |
2820 | 0 | } |
2821 | | |
2822 | | // See whether C can be implemented as a TEST UNDER MASK instruction. |
2823 | | // Update the arguments with the TM version if so. |
2824 | | static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, |
2825 | 0 | Comparison &C) { |
2826 | | // Use VECTOR TEST UNDER MASK for i128 operations. |
2827 | 0 | if (C.Op0.getValueType() == MVT::i128) { |
2828 | | // We can use VTM for EQ/NE comparisons of x & y against 0. |
2829 | 0 | if (C.Op0.getOpcode() == ISD::AND && |
2830 | 0 | (C.CCMask == SystemZ::CCMASK_CMP_EQ || |
2831 | 0 | C.CCMask == SystemZ::CCMASK_CMP_NE)) { |
2832 | 0 | auto *Mask = dyn_cast<ConstantSDNode>(C.Op1); |
2833 | 0 | if (Mask && Mask->getAPIntValue() == 0) { |
2834 | 0 | C.Opcode = SystemZISD::VTM; |
2835 | 0 | C.Op1 = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, C.Op0.getOperand(1)); |
2836 | 0 | C.Op0 = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, C.Op0.getOperand(0)); |
2837 | 0 | C.CCValid = SystemZ::CCMASK_VCMP; |
2838 | 0 | if (C.CCMask == SystemZ::CCMASK_CMP_EQ) |
2839 | 0 | C.CCMask = SystemZ::CCMASK_VCMP_ALL; |
2840 | 0 | else |
2841 | 0 | C.CCMask = SystemZ::CCMASK_VCMP_ALL ^ C.CCValid; |
2842 | 0 | } |
2843 | 0 | } |
2844 | 0 | return; |
2845 | 0 | } |
2846 | | |
2847 | | // Check that we have a comparison with a constant. |
2848 | 0 | auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); |
2849 | 0 | if (!ConstOp1) |
2850 | 0 | return; |
2851 | 0 | uint64_t CmpVal = ConstOp1->getZExtValue(); |
2852 | | |
2853 | | // Check whether the nonconstant input is an AND with a constant mask. |
2854 | 0 | Comparison NewC(C); |
2855 | 0 | uint64_t MaskVal; |
2856 | 0 | ConstantSDNode *Mask = nullptr; |
2857 | 0 | if (C.Op0.getOpcode() == ISD::AND) { |
2858 | 0 | NewC.Op0 = C.Op0.getOperand(0); |
2859 | 0 | NewC.Op1 = C.Op0.getOperand(1); |
2860 | 0 | Mask = dyn_cast<ConstantSDNode>(NewC.Op1); |
2861 | 0 | if (!Mask) |
2862 | 0 | return; |
2863 | 0 | MaskVal = Mask->getZExtValue(); |
2864 | 0 | } else { |
2865 | | // There is no instruction to compare with a 64-bit immediate |
2866 | | // so use TMHH instead if possible. We need an unsigned ordered |
2867 | | // comparison with an i64 immediate. |
2868 | 0 | if (NewC.Op0.getValueType() != MVT::i64 || |
2869 | 0 | NewC.CCMask == SystemZ::CCMASK_CMP_EQ || |
2870 | 0 | NewC.CCMask == SystemZ::CCMASK_CMP_NE || |
2871 | 0 | NewC.ICmpType == SystemZICMP::SignedOnly) |
2872 | 0 | return; |
2873 | | // Convert LE and GT comparisons into LT and GE. |
2874 | 0 | if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || |
2875 | 0 | NewC.CCMask == SystemZ::CCMASK_CMP_GT) { |
2876 | 0 | if (CmpVal == uint64_t(-1)) |
2877 | 0 | return; |
2878 | 0 | CmpVal += 1; |
2879 | 0 | NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; |
2880 | 0 | } |
2881 | | // If the low N bits of Op1 are zero than the low N bits of Op0 can |
2882 | | // be masked off without changing the result. |
2883 | 0 | MaskVal = -(CmpVal & -CmpVal); |
2884 | 0 | NewC.ICmpType = SystemZICMP::UnsignedOnly; |
2885 | 0 | } |
2886 | 0 | if (!MaskVal) |
2887 | 0 | return; |
2888 | | |
2889 | | // Check whether the combination of mask, comparison value and comparison |
2890 | | // type are suitable. |
2891 | 0 | unsigned BitSize = NewC.Op0.getValueSizeInBits(); |
2892 | 0 | unsigned NewCCMask, ShiftVal; |
2893 | 0 | if (NewC.ICmpType != SystemZICMP::SignedOnly && |
2894 | 0 | NewC.Op0.getOpcode() == ISD::SHL && |
2895 | 0 | isSimpleShift(NewC.Op0, ShiftVal) && |
2896 | 0 | (MaskVal >> ShiftVal != 0) && |
2897 | 0 | ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal && |
2898 | 0 | (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, |
2899 | 0 | MaskVal >> ShiftVal, |
2900 | 0 | CmpVal >> ShiftVal, |
2901 | 0 | SystemZICMP::Any))) { |
2902 | 0 | NewC.Op0 = NewC.Op0.getOperand(0); |
2903 | 0 | MaskVal >>= ShiftVal; |
2904 | 0 | } else if (NewC.ICmpType != SystemZICMP::SignedOnly && |
2905 | 0 | NewC.Op0.getOpcode() == ISD::SRL && |
2906 | 0 | isSimpleShift(NewC.Op0, ShiftVal) && |
2907 | 0 | (MaskVal << ShiftVal != 0) && |
2908 | 0 | ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal && |
2909 | 0 | (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, |
2910 | 0 | MaskVal << ShiftVal, |
2911 | 0 | CmpVal << ShiftVal, |
2912 | 0 | SystemZICMP::UnsignedOnly))) { |
2913 | 0 | NewC.Op0 = NewC.Op0.getOperand(0); |
2914 | 0 | MaskVal <<= ShiftVal; |
2915 | 0 | } else { |
2916 | 0 | NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, |
2917 | 0 | NewC.ICmpType); |
2918 | 0 | if (!NewCCMask) |
2919 | 0 | return; |
2920 | 0 | } |
2921 | | |
2922 | | // Go ahead and make the change. |
2923 | 0 | C.Opcode = SystemZISD::TM; |
2924 | 0 | C.Op0 = NewC.Op0; |
2925 | 0 | if (Mask && Mask->getZExtValue() == MaskVal) |
2926 | 0 | C.Op1 = SDValue(Mask, 0); |
2927 | 0 | else |
2928 | 0 | C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType()); |
2929 | 0 | C.CCValid = SystemZ::CCMASK_TM; |
2930 | 0 | C.CCMask = NewCCMask; |
2931 | 0 | } |
2932 | | |
2933 | | // Implement i128 comparison in vector registers. |
2934 | | static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, |
2935 | 0 | Comparison &C) { |
2936 | 0 | if (C.Opcode != SystemZISD::ICMP) |
2937 | 0 | return; |
2938 | 0 | if (C.Op0.getValueType() != MVT::i128) |
2939 | 0 | return; |
2940 | | |
2941 | | // (In-)Equality comparisons can be implemented via VCEQGS. |
2942 | 0 | if (C.CCMask == SystemZ::CCMASK_CMP_EQ || |
2943 | 0 | C.CCMask == SystemZ::CCMASK_CMP_NE) { |
2944 | 0 | C.Opcode = SystemZISD::VICMPES; |
2945 | 0 | C.Op0 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, C.Op0); |
2946 | 0 | C.Op1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, C.Op1); |
2947 | 0 | C.CCValid = SystemZ::CCMASK_VCMP; |
2948 | 0 | if (C.CCMask == SystemZ::CCMASK_CMP_EQ) |
2949 | 0 | C.CCMask = SystemZ::CCMASK_VCMP_ALL; |
2950 | 0 | else |
2951 | 0 | C.CCMask = SystemZ::CCMASK_VCMP_ALL ^ C.CCValid; |
2952 | 0 | return; |
2953 | 0 | } |
2954 | | |
2955 | | // Normalize other comparisons to GT. |
2956 | 0 | bool Swap = false, Invert = false; |
2957 | 0 | switch (C.CCMask) { |
2958 | 0 | case SystemZ::CCMASK_CMP_GT: break; |
2959 | 0 | case SystemZ::CCMASK_CMP_LT: Swap = true; break; |
2960 | 0 | case SystemZ::CCMASK_CMP_LE: Invert = true; break; |
2961 | 0 | case SystemZ::CCMASK_CMP_GE: Swap = Invert = true; break; |
2962 | 0 | default: llvm_unreachable("Invalid integer condition!"); |
2963 | 0 | } |
2964 | 0 | if (Swap) |
2965 | 0 | std::swap(C.Op0, C.Op1); |
2966 | |
|
2967 | 0 | if (C.ICmpType == SystemZICMP::UnsignedOnly) |
2968 | 0 | C.Opcode = SystemZISD::UCMP128HI; |
2969 | 0 | else |
2970 | 0 | C.Opcode = SystemZISD::SCMP128HI; |
2971 | 0 | C.CCValid = SystemZ::CCMASK_ANY; |
2972 | 0 | C.CCMask = SystemZ::CCMASK_1; |
2973 | |
|
2974 | 0 | if (Invert) |
2975 | 0 | C.CCMask ^= C.CCValid; |
2976 | 0 | } |
2977 | | |
2978 | | // See whether the comparison argument contains a redundant AND |
2979 | | // and remove it if so. This sometimes happens due to the generic |
2980 | | // BRCOND expansion. |
2981 | | static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, |
2982 | 0 | Comparison &C) { |
2983 | 0 | if (C.Op0.getOpcode() != ISD::AND) |
2984 | 0 | return; |
2985 | 0 | auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); |
2986 | 0 | if (!Mask || Mask->getValueSizeInBits(0) > 64) |
2987 | 0 | return; |
2988 | 0 | KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0)); |
2989 | 0 | if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue()) |
2990 | 0 | return; |
2991 | | |
2992 | 0 | C.Op0 = C.Op0.getOperand(0); |
2993 | 0 | } |
2994 | | |
2995 | | // Return a Comparison that tests the condition-code result of intrinsic |
2996 | | // node Call against constant integer CC using comparison code Cond. |
2997 | | // Opcode is the opcode of the SystemZISD operation for the intrinsic |
2998 | | // and CCValid is the set of possible condition-code results. |
2999 | | static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, |
3000 | | SDValue Call, unsigned CCValid, uint64_t CC, |
3001 | 0 | ISD::CondCode Cond) { |
3002 | 0 | Comparison C(Call, SDValue(), SDValue()); |
3003 | 0 | C.Opcode = Opcode; |
3004 | 0 | C.CCValid = CCValid; |
3005 | 0 | if (Cond == ISD::SETEQ) |
3006 | | // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3. |
3007 | 0 | C.CCMask = CC < 4 ? 1 << (3 - CC) : 0; |
3008 | 0 | else if (Cond == ISD::SETNE) |
3009 | | // ...and the inverse of that. |
3010 | 0 | C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1; |
3011 | 0 | else if (Cond == ISD::SETLT || Cond == ISD::SETULT) |
3012 | | // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3, |
3013 | | // always true for CC>3. |
3014 | 0 | C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1; |
3015 | 0 | else if (Cond == ISD::SETGE || Cond == ISD::SETUGE) |
3016 | | // ...and the inverse of that. |
3017 | 0 | C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0; |
3018 | 0 | else if (Cond == ISD::SETLE || Cond == ISD::SETULE) |
3019 | | // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true), |
3020 | | // always true for CC>3. |
3021 | 0 | C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1; |
3022 | 0 | else if (Cond == ISD::SETGT || Cond == ISD::SETUGT) |
3023 | | // ...and the inverse of that. |
3024 | 0 | C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0; |
3025 | 0 | else |
3026 | 0 | llvm_unreachable("Unexpected integer comparison type"); |
3027 | 0 | C.CCMask &= CCValid; |
3028 | 0 | return C; |
3029 | 0 | } |
3030 | | |
3031 | | // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. |
3032 | | static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, |
3033 | | ISD::CondCode Cond, const SDLoc &DL, |
3034 | | SDValue Chain = SDValue(), |
3035 | 0 | bool IsSignaling = false) { |
3036 | 0 | if (CmpOp1.getOpcode() == ISD::Constant) { |
3037 | 0 | assert(!Chain); |
3038 | 0 | unsigned Opcode, CCValid; |
3039 | 0 | if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN && |
3040 | 0 | CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) && |
3041 | 0 | isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid)) |
3042 | 0 | return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, |
3043 | 0 | CmpOp1->getAsZExtVal(), Cond); |
3044 | 0 | if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && |
3045 | 0 | CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && |
3046 | 0 | isIntrinsicWithCC(CmpOp0, Opcode, CCValid)) |
3047 | 0 | return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, |
3048 | 0 | CmpOp1->getAsZExtVal(), Cond); |
3049 | 0 | } |
3050 | 0 | Comparison C(CmpOp0, CmpOp1, Chain); |
3051 | 0 | C.CCMask = CCMaskForCondCode(Cond); |
3052 | 0 | if (C.Op0.getValueType().isFloatingPoint()) { |
3053 | 0 | C.CCValid = SystemZ::CCMASK_FCMP; |
3054 | 0 | if (!C.Chain) |
3055 | 0 | C.Opcode = SystemZISD::FCMP; |
3056 | 0 | else if (!IsSignaling) |
3057 | 0 | C.Opcode = SystemZISD::STRICT_FCMP; |
3058 | 0 | else |
3059 | 0 | C.Opcode = SystemZISD::STRICT_FCMPS; |
3060 | 0 | adjustForFNeg(C); |
3061 | 0 | } else { |
3062 | 0 | assert(!C.Chain); |
3063 | 0 | C.CCValid = SystemZ::CCMASK_ICMP; |
3064 | 0 | C.Opcode = SystemZISD::ICMP; |
3065 | | // Choose the type of comparison. Equality and inequality tests can |
3066 | | // use either signed or unsigned comparisons. The choice also doesn't |
3067 | | // matter if both sign bits are known to be clear. In those cases we |
3068 | | // want to give the main isel code the freedom to choose whichever |
3069 | | // form fits best. |
3070 | 0 | if (C.CCMask == SystemZ::CCMASK_CMP_EQ || |
3071 | 0 | C.CCMask == SystemZ::CCMASK_CMP_NE || |
3072 | 0 | (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) |
3073 | 0 | C.ICmpType = SystemZICMP::Any; |
3074 | 0 | else if (C.CCMask & SystemZ::CCMASK_CMP_UO) |
3075 | 0 | C.ICmpType = SystemZICMP::UnsignedOnly; |
3076 | 0 | else |
3077 | 0 | C.ICmpType = SystemZICMP::SignedOnly; |
3078 | 0 | C.CCMask &= ~SystemZ::CCMASK_CMP_UO; |
3079 | 0 | adjustForRedundantAnd(DAG, DL, C); |
3080 | 0 | adjustZeroCmp(DAG, DL, C); |
3081 | 0 | adjustSubwordCmp(DAG, DL, C); |
3082 | 0 | adjustForSubtraction(DAG, DL, C); |
3083 | 0 | adjustForLTGFR(C); |
3084 | 0 | adjustICmpTruncate(DAG, DL, C); |
3085 | 0 | } |
3086 | | |
3087 | 0 | if (shouldSwapCmpOperands(C)) { |
3088 | 0 | std::swap(C.Op0, C.Op1); |
3089 | 0 | C.CCMask = SystemZ::reverseCCMask(C.CCMask); |
3090 | 0 | } |
3091 | |
|
3092 | 0 | adjustForTestUnderMask(DAG, DL, C); |
3093 | 0 | adjustICmp128(DAG, DL, C); |
3094 | 0 | return C; |
3095 | 0 | } |
3096 | | |
3097 | | // Emit the comparison instruction described by C. |
3098 | 0 | static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { |
3099 | 0 | if (!C.Op1.getNode()) { |
3100 | 0 | SDNode *Node; |
3101 | 0 | switch (C.Op0.getOpcode()) { |
3102 | 0 | case ISD::INTRINSIC_W_CHAIN: |
3103 | 0 | Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode); |
3104 | 0 | return SDValue(Node, 0); |
3105 | 0 | case ISD::INTRINSIC_WO_CHAIN: |
3106 | 0 | Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode); |
3107 | 0 | return SDValue(Node, Node->getNumValues() - 1); |
3108 | 0 | default: |
3109 | 0 | llvm_unreachable("Invalid comparison operands"); |
3110 | 0 | } |
3111 | 0 | } |
3112 | 0 | if (C.Opcode == SystemZISD::ICMP) |
3113 | 0 | return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1, |
3114 | 0 | DAG.getTargetConstant(C.ICmpType, DL, MVT::i32)); |
3115 | 0 | if (C.Opcode == SystemZISD::TM) { |
3116 | 0 | bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != |
3117 | 0 | bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); |
3118 | 0 | return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1, |
3119 | 0 | DAG.getTargetConstant(RegisterOnly, DL, MVT::i32)); |
3120 | 0 | } |
3121 | 0 | if (C.Opcode == SystemZISD::VICMPES) { |
3122 | 0 | SDVTList VTs = DAG.getVTList(C.Op0.getValueType(), MVT::i32); |
3123 | 0 | SDValue Val = DAG.getNode(C.Opcode, DL, VTs, C.Op0, C.Op1); |
3124 | 0 | return SDValue(Val.getNode(), 1); |
3125 | 0 | } |
3126 | 0 | if (C.Chain) { |
3127 | 0 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); |
3128 | 0 | return DAG.getNode(C.Opcode, DL, VTs, C.Chain, C.Op0, C.Op1); |
3129 | 0 | } |
3130 | 0 | return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1); |
3131 | 0 | } |
3132 | | |
3133 | | // Implement a 32-bit *MUL_LOHI operation by extending both operands to |
3134 | | // 64 bits. Extend is the extension type to use. Store the high part |
3135 | | // in Hi and the low part in Lo. |
3136 | | static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, |
3137 | | SDValue Op0, SDValue Op1, SDValue &Hi, |
3138 | 0 | SDValue &Lo) { |
3139 | 0 | Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); |
3140 | 0 | Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); |
3141 | 0 | SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); |
3142 | 0 | Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, |
3143 | 0 | DAG.getConstant(32, DL, MVT::i64)); |
3144 | 0 | Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); |
3145 | 0 | Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); |
3146 | 0 | } |
3147 | | |
3148 | | // Lower a binary operation that produces two VT results, one in each |
3149 | | // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, |
3150 | | // and Opcode performs the GR128 operation. Store the even register result |
3151 | | // in Even and the odd register result in Odd. |
3152 | | static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
3153 | | unsigned Opcode, SDValue Op0, SDValue Op1, |
3154 | 0 | SDValue &Even, SDValue &Odd) { |
3155 | 0 | SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); |
3156 | 0 | bool Is32Bit = is32Bit(VT); |
3157 | 0 | Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); |
3158 | 0 | Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); |
3159 | 0 | } |
3160 | | |
3161 | | // Return an i32 value that is 1 if the CC value produced by CCReg is |
3162 | | // in the mask CCMask and 0 otherwise. CC is known to have a value |
3163 | | // in CCValid, so other values can be ignored. |
3164 | | static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, |
3165 | 0 | unsigned CCValid, unsigned CCMask) { |
3166 | 0 | SDValue Ops[] = {DAG.getConstant(1, DL, MVT::i32), |
3167 | 0 | DAG.getConstant(0, DL, MVT::i32), |
3168 | 0 | DAG.getTargetConstant(CCValid, DL, MVT::i32), |
3169 | 0 | DAG.getTargetConstant(CCMask, DL, MVT::i32), CCReg}; |
3170 | 0 | return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops); |
3171 | 0 | } |
3172 | | |
3173 | | // Return the SystemISD vector comparison operation for CC, or 0 if it cannot |
3174 | | // be done directly. Mode is CmpMode::Int for integer comparisons, CmpMode::FP |
3175 | | // for regular floating-point comparisons, CmpMode::StrictFP for strict (quiet) |
3176 | | // floating-point comparisons, and CmpMode::SignalingFP for strict signaling |
3177 | | // floating-point comparisons. |
3178 | | enum class CmpMode { Int, FP, StrictFP, SignalingFP }; |
3179 | 0 | static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode) { |
3180 | 0 | switch (CC) { |
3181 | 0 | case ISD::SETOEQ: |
3182 | 0 | case ISD::SETEQ: |
3183 | 0 | switch (Mode) { |
3184 | 0 | case CmpMode::Int: return SystemZISD::VICMPE; |
3185 | 0 | case CmpMode::FP: return SystemZISD::VFCMPE; |
3186 | 0 | case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPE; |
3187 | 0 | case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPES; |
3188 | 0 | } |
3189 | 0 | llvm_unreachable("Bad mode"); |
3190 | |
|
3191 | 0 | case ISD::SETOGE: |
3192 | 0 | case ISD::SETGE: |
3193 | 0 | switch (Mode) { |
3194 | 0 | case CmpMode::Int: return 0; |
3195 | 0 | case CmpMode::FP: return SystemZISD::VFCMPHE; |
3196 | 0 | case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPHE; |
3197 | 0 | case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHES; |
3198 | 0 | } |
3199 | 0 | llvm_unreachable("Bad mode"); |
3200 | |
|
3201 | 0 | case ISD::SETOGT: |
3202 | 0 | case ISD::SETGT: |
3203 | 0 | switch (Mode) { |
3204 | 0 | case CmpMode::Int: return SystemZISD::VICMPH; |
3205 | 0 | case CmpMode::FP: return SystemZISD::VFCMPH; |
3206 | 0 | case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPH; |
3207 | 0 | case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHS; |
3208 | 0 | } |
3209 | 0 | llvm_unreachable("Bad mode"); |
3210 | |
|
3211 | 0 | case ISD::SETUGT: |
3212 | 0 | switch (Mode) { |
3213 | 0 | case CmpMode::Int: return SystemZISD::VICMPHL; |
3214 | 0 | case CmpMode::FP: return 0; |
3215 | 0 | case CmpMode::StrictFP: return 0; |
3216 | 0 | case CmpMode::SignalingFP: return 0; |
3217 | 0 | } |
3218 | 0 | llvm_unreachable("Bad mode"); |
3219 | |
|
3220 | 0 | default: |
3221 | 0 | return 0; |
3222 | 0 | } |
3223 | 0 | } |
3224 | | |
3225 | | // Return the SystemZISD vector comparison operation for CC or its inverse, |
3226 | | // or 0 if neither can be done directly. Indicate in Invert whether the |
3227 | | // result is for the inverse of CC. Mode is as above. |
3228 | | static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, |
3229 | 0 | bool &Invert) { |
3230 | 0 | if (unsigned Opcode = getVectorComparison(CC, Mode)) { |
3231 | 0 | Invert = false; |
3232 | 0 | return Opcode; |
3233 | 0 | } |
3234 | | |
3235 | 0 | CC = ISD::getSetCCInverse(CC, Mode == CmpMode::Int ? MVT::i32 : MVT::f32); |
3236 | 0 | if (unsigned Opcode = getVectorComparison(CC, Mode)) { |
3237 | 0 | Invert = true; |
3238 | 0 | return Opcode; |
3239 | 0 | } |
3240 | | |
3241 | 0 | return 0; |
3242 | 0 | } |
3243 | | |
3244 | | // Return a v2f64 that contains the extended form of elements Start and Start+1 |
3245 | | // of v4f32 value Op. If Chain is nonnull, return the strict form. |
3246 | | static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, |
3247 | 0 | SDValue Op, SDValue Chain) { |
3248 | 0 | int Mask[] = { Start, -1, Start + 1, -1 }; |
3249 | 0 | Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask); |
3250 | 0 | if (Chain) { |
3251 | 0 | SDVTList VTs = DAG.getVTList(MVT::v2f64, MVT::Other); |
3252 | 0 | return DAG.getNode(SystemZISD::STRICT_VEXTEND, DL, VTs, Chain, Op); |
3253 | 0 | } |
3254 | 0 | return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op); |
3255 | 0 | } |
3256 | | |
3257 | | // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, |
3258 | | // producing a result of type VT. If Chain is nonnull, return the strict form. |
3259 | | SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, |
3260 | | const SDLoc &DL, EVT VT, |
3261 | | SDValue CmpOp0, |
3262 | | SDValue CmpOp1, |
3263 | 0 | SDValue Chain) const { |
3264 | | // There is no hardware support for v4f32 (unless we have the vector |
3265 | | // enhancements facility 1), so extend the vector into two v2f64s |
3266 | | // and compare those. |
3267 | 0 | if (CmpOp0.getValueType() == MVT::v4f32 && |
3268 | 0 | !Subtarget.hasVectorEnhancements1()) { |
3269 | 0 | SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0, Chain); |
3270 | 0 | SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0, Chain); |
3271 | 0 | SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1, Chain); |
3272 | 0 | SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1, Chain); |
3273 | 0 | if (Chain) { |
3274 | 0 | SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::Other); |
3275 | 0 | SDValue HRes = DAG.getNode(Opcode, DL, VTs, Chain, H0, H1); |
3276 | 0 | SDValue LRes = DAG.getNode(Opcode, DL, VTs, Chain, L0, L1); |
3277 | 0 | SDValue Res = DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); |
3278 | 0 | SDValue Chains[6] = { H0.getValue(1), L0.getValue(1), |
3279 | 0 | H1.getValue(1), L1.getValue(1), |
3280 | 0 | HRes.getValue(1), LRes.getValue(1) }; |
3281 | 0 | SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
3282 | 0 | SDValue Ops[2] = { Res, NewChain }; |
3283 | 0 | return DAG.getMergeValues(Ops, DL); |
3284 | 0 | } |
3285 | 0 | SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1); |
3286 | 0 | SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1); |
3287 | 0 | return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); |
3288 | 0 | } |
3289 | 0 | if (Chain) { |
3290 | 0 | SDVTList VTs = DAG.getVTList(VT, MVT::Other); |
3291 | 0 | return DAG.getNode(Opcode, DL, VTs, Chain, CmpOp0, CmpOp1); |
3292 | 0 | } |
3293 | 0 | return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); |
3294 | 0 | } |
3295 | | |
3296 | | // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing |
3297 | | // an integer mask of type VT. If Chain is nonnull, we have a strict |
3298 | | // floating-point comparison. If in addition IsSignaling is true, we have |
3299 | | // a strict signaling floating-point comparison. |
3300 | | SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, |
3301 | | const SDLoc &DL, EVT VT, |
3302 | | ISD::CondCode CC, |
3303 | | SDValue CmpOp0, |
3304 | | SDValue CmpOp1, |
3305 | | SDValue Chain, |
3306 | 0 | bool IsSignaling) const { |
3307 | 0 | bool IsFP = CmpOp0.getValueType().isFloatingPoint(); |
3308 | 0 | assert (!Chain || IsFP); |
3309 | 0 | assert (!IsSignaling || Chain); |
3310 | 0 | CmpMode Mode = IsSignaling ? CmpMode::SignalingFP : |
3311 | 0 | Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int; |
3312 | 0 | bool Invert = false; |
3313 | 0 | SDValue Cmp; |
3314 | 0 | switch (CC) { |
3315 | | // Handle tests for order using (or (ogt y x) (oge x y)). |
3316 | 0 | case ISD::SETUO: |
3317 | 0 | Invert = true; |
3318 | 0 | [[fallthrough]]; |
3319 | 0 | case ISD::SETO: { |
3320 | 0 | assert(IsFP && "Unexpected integer comparison"); |
3321 | 0 | SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode), |
3322 | 0 | DL, VT, CmpOp1, CmpOp0, Chain); |
3323 | 0 | SDValue GE = getVectorCmp(DAG, getVectorComparison(ISD::SETOGE, Mode), |
3324 | 0 | DL, VT, CmpOp0, CmpOp1, Chain); |
3325 | 0 | Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); |
3326 | 0 | if (Chain) |
3327 | 0 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, |
3328 | 0 | LT.getValue(1), GE.getValue(1)); |
3329 | 0 | break; |
3330 | 0 | } |
3331 | | |
3332 | | // Handle <> tests using (or (ogt y x) (ogt x y)). |
3333 | 0 | case ISD::SETUEQ: |
3334 | 0 | Invert = true; |
3335 | 0 | [[fallthrough]]; |
3336 | 0 | case ISD::SETONE: { |
3337 | 0 | assert(IsFP && "Unexpected integer comparison"); |
3338 | 0 | SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode), |
3339 | 0 | DL, VT, CmpOp1, CmpOp0, Chain); |
3340 | 0 | SDValue GT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode), |
3341 | 0 | DL, VT, CmpOp0, CmpOp1, Chain); |
3342 | 0 | Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); |
3343 | 0 | if (Chain) |
3344 | 0 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, |
3345 | 0 | LT.getValue(1), GT.getValue(1)); |
3346 | 0 | break; |
3347 | 0 | } |
3348 | | |
3349 | | // Otherwise a single comparison is enough. It doesn't really |
3350 | | // matter whether we try the inversion or the swap first, since |
3351 | | // there are no cases where both work. |
3352 | 0 | default: |
3353 | 0 | if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert)) |
3354 | 0 | Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1, Chain); |
3355 | 0 | else { |
3356 | 0 | CC = ISD::getSetCCSwappedOperands(CC); |
3357 | 0 | if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert)) |
3358 | 0 | Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0, Chain); |
3359 | 0 | else |
3360 | 0 | llvm_unreachable("Unhandled comparison"); |
3361 | 0 | } |
3362 | 0 | if (Chain) |
3363 | 0 | Chain = Cmp.getValue(1); |
3364 | 0 | break; |
3365 | 0 | } |
3366 | 0 | if (Invert) { |
3367 | 0 | SDValue Mask = |
3368 | 0 | DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64)); |
3369 | 0 | Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); |
3370 | 0 | } |
3371 | 0 | if (Chain && Chain.getNode() != Cmp.getNode()) { |
3372 | 0 | SDValue Ops[2] = { Cmp, Chain }; |
3373 | 0 | Cmp = DAG.getMergeValues(Ops, DL); |
3374 | 0 | } |
3375 | 0 | return Cmp; |
3376 | 0 | } |
3377 | | |
3378 | | SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, |
3379 | 0 | SelectionDAG &DAG) const { |
3380 | 0 | SDValue CmpOp0 = Op.getOperand(0); |
3381 | 0 | SDValue CmpOp1 = Op.getOperand(1); |
3382 | 0 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); |
3383 | 0 | SDLoc DL(Op); |
3384 | 0 | EVT VT = Op.getValueType(); |
3385 | 0 | if (VT.isVector()) |
3386 | 0 | return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); |
3387 | | |
3388 | 0 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); |
3389 | 0 | SDValue CCReg = emitCmp(DAG, DL, C); |
3390 | 0 | return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask); |
3391 | 0 | } |
3392 | | |
3393 | | SDValue SystemZTargetLowering::lowerSTRICT_FSETCC(SDValue Op, |
3394 | | SelectionDAG &DAG, |
3395 | 0 | bool IsSignaling) const { |
3396 | 0 | SDValue Chain = Op.getOperand(0); |
3397 | 0 | SDValue CmpOp0 = Op.getOperand(1); |
3398 | 0 | SDValue CmpOp1 = Op.getOperand(2); |
3399 | 0 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get(); |
3400 | 0 | SDLoc DL(Op); |
3401 | 0 | EVT VT = Op.getNode()->getValueType(0); |
3402 | 0 | if (VT.isVector()) { |
3403 | 0 | SDValue Res = lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1, |
3404 | 0 | Chain, IsSignaling); |
3405 | 0 | return Res.getValue(Op.getResNo()); |
3406 | 0 | } |
3407 | | |
3408 | 0 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL, Chain, IsSignaling)); |
3409 | 0 | SDValue CCReg = emitCmp(DAG, DL, C); |
3410 | 0 | CCReg->setFlags(Op->getFlags()); |
3411 | 0 | SDValue Result = emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask); |
3412 | 0 | SDValue Ops[2] = { Result, CCReg.getValue(1) }; |
3413 | 0 | return DAG.getMergeValues(Ops, DL); |
3414 | 0 | } |
3415 | | |
3416 | 0 | SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
3417 | 0 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
3418 | 0 | SDValue CmpOp0 = Op.getOperand(2); |
3419 | 0 | SDValue CmpOp1 = Op.getOperand(3); |
3420 | 0 | SDValue Dest = Op.getOperand(4); |
3421 | 0 | SDLoc DL(Op); |
3422 | |
|
3423 | 0 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); |
3424 | 0 | SDValue CCReg = emitCmp(DAG, DL, C); |
3425 | 0 | return DAG.getNode( |
3426 | 0 | SystemZISD::BR_CCMASK, DL, Op.getValueType(), Op.getOperand(0), |
3427 | 0 | DAG.getTargetConstant(C.CCValid, DL, MVT::i32), |
3428 | 0 | DAG.getTargetConstant(C.CCMask, DL, MVT::i32), Dest, CCReg); |
3429 | 0 | } |
3430 | | |
3431 | | // Return true if Pos is CmpOp and Neg is the negative of CmpOp, |
3432 | | // allowing Pos and Neg to be wider than CmpOp. |
3433 | 0 | static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { |
3434 | 0 | return (Neg.getOpcode() == ISD::SUB && |
3435 | 0 | Neg.getOperand(0).getOpcode() == ISD::Constant && |
3436 | 0 | Neg.getConstantOperandVal(0) == 0 && Neg.getOperand(1) == Pos && |
3437 | 0 | (Pos == CmpOp || (Pos.getOpcode() == ISD::SIGN_EXTEND && |
3438 | 0 | Pos.getOperand(0) == CmpOp))); |
3439 | 0 | } |
3440 | | |
3441 | | // Return the absolute or negative absolute of Op; IsNegative decides which. |
3442 | | static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, |
3443 | 0 | bool IsNegative) { |
3444 | 0 | Op = DAG.getNode(ISD::ABS, DL, Op.getValueType(), Op); |
3445 | 0 | if (IsNegative) |
3446 | 0 | Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), |
3447 | 0 | DAG.getConstant(0, DL, Op.getValueType()), Op); |
3448 | 0 | return Op; |
3449 | 0 | } |
3450 | | |
3451 | | SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, |
3452 | 0 | SelectionDAG &DAG) const { |
3453 | 0 | SDValue CmpOp0 = Op.getOperand(0); |
3454 | 0 | SDValue CmpOp1 = Op.getOperand(1); |
3455 | 0 | SDValue TrueOp = Op.getOperand(2); |
3456 | 0 | SDValue FalseOp = Op.getOperand(3); |
3457 | 0 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
3458 | 0 | SDLoc DL(Op); |
3459 | |
|
3460 | 0 | Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); |
3461 | | |
3462 | | // Check for absolute and negative-absolute selections, including those |
3463 | | // where the comparison value is sign-extended (for LPGFR and LNGFR). |
3464 | | // This check supplements the one in DAGCombiner. |
3465 | 0 | if (C.Opcode == SystemZISD::ICMP && C.CCMask != SystemZ::CCMASK_CMP_EQ && |
3466 | 0 | C.CCMask != SystemZ::CCMASK_CMP_NE && |
3467 | 0 | C.Op1.getOpcode() == ISD::Constant && |
3468 | 0 | cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 && |
3469 | 0 | C.Op1->getAsZExtVal() == 0) { |
3470 | 0 | if (isAbsolute(C.Op0, TrueOp, FalseOp)) |
3471 | 0 | return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); |
3472 | 0 | if (isAbsolute(C.Op0, FalseOp, TrueOp)) |
3473 | 0 | return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); |
3474 | 0 | } |
3475 | | |
3476 | 0 | SDValue CCReg = emitCmp(DAG, DL, C); |
3477 | 0 | SDValue Ops[] = {TrueOp, FalseOp, |
3478 | 0 | DAG.getTargetConstant(C.CCValid, DL, MVT::i32), |
3479 | 0 | DAG.getTargetConstant(C.CCMask, DL, MVT::i32), CCReg}; |
3480 | |
|
3481 | 0 | return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops); |
3482 | 0 | } |
3483 | | |
3484 | | SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, |
3485 | 0 | SelectionDAG &DAG) const { |
3486 | 0 | SDLoc DL(Node); |
3487 | 0 | const GlobalValue *GV = Node->getGlobal(); |
3488 | 0 | int64_t Offset = Node->getOffset(); |
3489 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3490 | 0 | CodeModel::Model CM = DAG.getTarget().getCodeModel(); |
3491 | |
|
3492 | 0 | SDValue Result; |
3493 | 0 | if (Subtarget.isPC32DBLSymbol(GV, CM)) { |
3494 | 0 | if (isInt<32>(Offset)) { |
3495 | | // Assign anchors at 1<<12 byte boundaries. |
3496 | 0 | uint64_t Anchor = Offset & ~uint64_t(0xfff); |
3497 | 0 | Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); |
3498 | 0 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
3499 | | |
3500 | | // The offset can be folded into the address if it is aligned to a |
3501 | | // halfword. |
3502 | 0 | Offset -= Anchor; |
3503 | 0 | if (Offset != 0 && (Offset & 1) == 0) { |
3504 | 0 | SDValue Full = |
3505 | 0 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); |
3506 | 0 | Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); |
3507 | 0 | Offset = 0; |
3508 | 0 | } |
3509 | 0 | } else { |
3510 | | // Conservatively load a constant offset greater than 32 bits into a |
3511 | | // register below. |
3512 | 0 | Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT); |
3513 | 0 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
3514 | 0 | } |
3515 | 0 | } else if (Subtarget.isTargetELF()) { |
3516 | 0 | Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); |
3517 | 0 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
3518 | 0 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, |
3519 | 0 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
3520 | 0 | } else if (Subtarget.isTargetzOS()) { |
3521 | 0 | Result = getADAEntry(DAG, GV, DL, PtrVT); |
3522 | 0 | } else |
3523 | 0 | llvm_unreachable("Unexpected Subtarget"); |
3524 | | |
3525 | | // If there was a non-zero offset that we didn't fold, create an explicit |
3526 | | // addition for it. |
3527 | 0 | if (Offset != 0) |
3528 | 0 | Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, |
3529 | 0 | DAG.getConstant(Offset, DL, PtrVT)); |
3530 | |
|
3531 | 0 | return Result; |
3532 | 0 | } |
3533 | | |
3534 | | SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, |
3535 | | SelectionDAG &DAG, |
3536 | | unsigned Opcode, |
3537 | 0 | SDValue GOTOffset) const { |
3538 | 0 | SDLoc DL(Node); |
3539 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3540 | 0 | SDValue Chain = DAG.getEntryNode(); |
3541 | 0 | SDValue Glue; |
3542 | |
|
3543 | 0 | if (DAG.getMachineFunction().getFunction().getCallingConv() == |
3544 | 0 | CallingConv::GHC) |
3545 | 0 | report_fatal_error("In GHC calling convention TLS is not supported"); |
3546 | | |
3547 | | // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. |
3548 | 0 | SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); |
3549 | 0 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); |
3550 | 0 | Glue = Chain.getValue(1); |
3551 | 0 | Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); |
3552 | 0 | Glue = Chain.getValue(1); |
3553 | | |
3554 | | // The first call operand is the chain and the second is the TLS symbol. |
3555 | 0 | SmallVector<SDValue, 8> Ops; |
3556 | 0 | Ops.push_back(Chain); |
3557 | 0 | Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, |
3558 | 0 | Node->getValueType(0), |
3559 | 0 | 0, 0)); |
3560 | | |
3561 | | // Add argument registers to the end of the list so that they are |
3562 | | // known live into the call. |
3563 | 0 | Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); |
3564 | 0 | Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); |
3565 | | |
3566 | | // Add a register mask operand representing the call-preserved registers. |
3567 | 0 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); |
3568 | 0 | const uint32_t *Mask = |
3569 | 0 | TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); |
3570 | 0 | assert(Mask && "Missing call preserved mask for calling convention"); |
3571 | 0 | Ops.push_back(DAG.getRegisterMask(Mask)); |
3572 | | |
3573 | | // Glue the call to the argument copies. |
3574 | 0 | Ops.push_back(Glue); |
3575 | | |
3576 | | // Emit the call. |
3577 | 0 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
3578 | 0 | Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); |
3579 | 0 | Glue = Chain.getValue(1); |
3580 | | |
3581 | | // Copy the return value from %r2. |
3582 | 0 | return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); |
3583 | 0 | } |
3584 | | |
3585 | | SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL, |
3586 | 0 | SelectionDAG &DAG) const { |
3587 | 0 | SDValue Chain = DAG.getEntryNode(); |
3588 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3589 | | |
3590 | | // The high part of the thread pointer is in access register 0. |
3591 | 0 | SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32); |
3592 | 0 | TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); |
3593 | | |
3594 | | // The low part of the thread pointer is in access register 1. |
3595 | 0 | SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32); |
3596 | 0 | TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); |
3597 | | |
3598 | | // Merge them into a single 64-bit address. |
3599 | 0 | SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, |
3600 | 0 | DAG.getConstant(32, DL, PtrVT)); |
3601 | 0 | return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); |
3602 | 0 | } |
3603 | | |
3604 | | SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, |
3605 | 0 | SelectionDAG &DAG) const { |
3606 | 0 | if (DAG.getTarget().useEmulatedTLS()) |
3607 | 0 | return LowerToTLSEmulatedModel(Node, DAG); |
3608 | 0 | SDLoc DL(Node); |
3609 | 0 | const GlobalValue *GV = Node->getGlobal(); |
3610 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3611 | 0 | TLSModel::Model model = DAG.getTarget().getTLSModel(GV); |
3612 | |
|
3613 | 0 | if (DAG.getMachineFunction().getFunction().getCallingConv() == |
3614 | 0 | CallingConv::GHC) |
3615 | 0 | report_fatal_error("In GHC calling convention TLS is not supported"); |
3616 | |
|
3617 | 0 | SDValue TP = lowerThreadPointer(DL, DAG); |
3618 | | |
3619 | | // Get the offset of GA from the thread pointer, based on the TLS model. |
3620 | 0 | SDValue Offset; |
3621 | 0 | switch (model) { |
3622 | 0 | case TLSModel::GeneralDynamic: { |
3623 | | // Load the GOT offset of the tls_index (module ID / per-symbol offset). |
3624 | 0 | SystemZConstantPoolValue *CPV = |
3625 | 0 | SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); |
3626 | |
|
3627 | 0 | Offset = DAG.getConstantPool(CPV, PtrVT, Align(8)); |
3628 | 0 | Offset = DAG.getLoad( |
3629 | 0 | PtrVT, DL, DAG.getEntryNode(), Offset, |
3630 | 0 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3631 | | |
3632 | | // Call __tls_get_offset to retrieve the offset. |
3633 | 0 | Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); |
3634 | 0 | break; |
3635 | 0 | } |
3636 | | |
3637 | 0 | case TLSModel::LocalDynamic: { |
3638 | | // Load the GOT offset of the module ID. |
3639 | 0 | SystemZConstantPoolValue *CPV = |
3640 | 0 | SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); |
3641 | |
|
3642 | 0 | Offset = DAG.getConstantPool(CPV, PtrVT, Align(8)); |
3643 | 0 | Offset = DAG.getLoad( |
3644 | 0 | PtrVT, DL, DAG.getEntryNode(), Offset, |
3645 | 0 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3646 | | |
3647 | | // Call __tls_get_offset to retrieve the module base offset. |
3648 | 0 | Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); |
3649 | | |
3650 | | // Note: The SystemZLDCleanupPass will remove redundant computations |
3651 | | // of the module base offset. Count total number of local-dynamic |
3652 | | // accesses to trigger execution of that pass. |
3653 | 0 | SystemZMachineFunctionInfo* MFI = |
3654 | 0 | DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); |
3655 | 0 | MFI->incNumLocalDynamicTLSAccesses(); |
3656 | | |
3657 | | // Add the per-symbol offset. |
3658 | 0 | CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); |
3659 | |
|
3660 | 0 | SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, Align(8)); |
3661 | 0 | DTPOffset = DAG.getLoad( |
3662 | 0 | PtrVT, DL, DAG.getEntryNode(), DTPOffset, |
3663 | 0 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3664 | |
|
3665 | 0 | Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); |
3666 | 0 | break; |
3667 | 0 | } |
3668 | | |
3669 | 0 | case TLSModel::InitialExec: { |
3670 | | // Load the offset from the GOT. |
3671 | 0 | Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, |
3672 | 0 | SystemZII::MO_INDNTPOFF); |
3673 | 0 | Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); |
3674 | 0 | Offset = |
3675 | 0 | DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset, |
3676 | 0 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
3677 | 0 | break; |
3678 | 0 | } |
3679 | | |
3680 | 0 | case TLSModel::LocalExec: { |
3681 | | // Force the offset into the constant pool and load it from there. |
3682 | 0 | SystemZConstantPoolValue *CPV = |
3683 | 0 | SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); |
3684 | |
|
3685 | 0 | Offset = DAG.getConstantPool(CPV, PtrVT, Align(8)); |
3686 | 0 | Offset = DAG.getLoad( |
3687 | 0 | PtrVT, DL, DAG.getEntryNode(), Offset, |
3688 | 0 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
3689 | 0 | break; |
3690 | 0 | } |
3691 | 0 | } |
3692 | | |
3693 | | // Add the base and offset together. |
3694 | 0 | return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); |
3695 | 0 | } |
3696 | | |
3697 | | SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, |
3698 | 0 | SelectionDAG &DAG) const { |
3699 | 0 | SDLoc DL(Node); |
3700 | 0 | const BlockAddress *BA = Node->getBlockAddress(); |
3701 | 0 | int64_t Offset = Node->getOffset(); |
3702 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3703 | |
|
3704 | 0 | SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); |
3705 | 0 | Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
3706 | 0 | return Result; |
3707 | 0 | } |
3708 | | |
3709 | | SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, |
3710 | 0 | SelectionDAG &DAG) const { |
3711 | 0 | SDLoc DL(JT); |
3712 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3713 | 0 | SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); |
3714 | | |
3715 | | // Use LARL to load the address of the table. |
3716 | 0 | return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
3717 | 0 | } |
3718 | | |
3719 | | SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, |
3720 | 0 | SelectionDAG &DAG) const { |
3721 | 0 | SDLoc DL(CP); |
3722 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3723 | |
|
3724 | 0 | SDValue Result; |
3725 | 0 | if (CP->isMachineConstantPoolEntry()) |
3726 | 0 | Result = |
3727 | 0 | DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign()); |
3728 | 0 | else |
3729 | 0 | Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), |
3730 | 0 | CP->getOffset()); |
3731 | | |
3732 | | // Use LARL to load the address of the constant pool entry. |
3733 | 0 | return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); |
3734 | 0 | } |
3735 | | |
3736 | | SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, |
3737 | 0 | SelectionDAG &DAG) const { |
3738 | 0 | auto *TFL = Subtarget.getFrameLowering<SystemZELFFrameLowering>(); |
3739 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
3740 | 0 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3741 | 0 | MFI.setFrameAddressIsTaken(true); |
3742 | |
|
3743 | 0 | SDLoc DL(Op); |
3744 | 0 | unsigned Depth = Op.getConstantOperandVal(0); |
3745 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3746 | | |
3747 | | // By definition, the frame address is the address of the back chain. (In |
3748 | | // the case of packed stack without backchain, return the address where the |
3749 | | // backchain would have been stored. This will either be an unused space or |
3750 | | // contain a saved register). |
3751 | 0 | int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF); |
3752 | 0 | SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT); |
3753 | |
|
3754 | 0 | if (Depth > 0) { |
3755 | | // FIXME The frontend should detect this case. |
3756 | 0 | if (!MF.getSubtarget<SystemZSubtarget>().hasBackChain()) |
3757 | 0 | report_fatal_error("Unsupported stack frame traversal count"); |
3758 | |
|
3759 | 0 | SDValue Offset = DAG.getConstant(TFL->getBackchainOffset(MF), DL, PtrVT); |
3760 | 0 | while (Depth--) { |
3761 | 0 | BackChain = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), BackChain, |
3762 | 0 | MachinePointerInfo()); |
3763 | 0 | BackChain = DAG.getNode(ISD::ADD, DL, PtrVT, BackChain, Offset); |
3764 | 0 | } |
3765 | 0 | } |
3766 | |
|
3767 | 0 | return BackChain; |
3768 | 0 | } |
3769 | | |
3770 | | SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, |
3771 | 0 | SelectionDAG &DAG) const { |
3772 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
3773 | 0 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3774 | 0 | MFI.setReturnAddressIsTaken(true); |
3775 | |
|
3776 | 0 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
3777 | 0 | return SDValue(); |
3778 | | |
3779 | 0 | SDLoc DL(Op); |
3780 | 0 | unsigned Depth = Op.getConstantOperandVal(0); |
3781 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3782 | |
|
3783 | 0 | if (Depth > 0) { |
3784 | | // FIXME The frontend should detect this case. |
3785 | 0 | if (!MF.getSubtarget<SystemZSubtarget>().hasBackChain()) |
3786 | 0 | report_fatal_error("Unsupported stack frame traversal count"); |
3787 | |
|
3788 | 0 | SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); |
3789 | 0 | auto *TFL = Subtarget.getFrameLowering<SystemZELFFrameLowering>(); |
3790 | 0 | int Offset = (TFL->usePackedStack(MF) ? -2 : 14) * |
3791 | 0 | getTargetMachine().getPointerSize(0); |
3792 | 0 | SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, FrameAddr, |
3793 | 0 | DAG.getConstant(Offset, DL, PtrVT)); |
3794 | 0 | return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, |
3795 | 0 | MachinePointerInfo()); |
3796 | 0 | } |
3797 | | |
3798 | | // Return R14D, which has the return address. Mark it an implicit live-in. |
3799 | 0 | Register LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass); |
3800 | 0 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT); |
3801 | 0 | } |
3802 | | |
3803 | | SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, |
3804 | 0 | SelectionDAG &DAG) const { |
3805 | 0 | SDLoc DL(Op); |
3806 | 0 | SDValue In = Op.getOperand(0); |
3807 | 0 | EVT InVT = In.getValueType(); |
3808 | 0 | EVT ResVT = Op.getValueType(); |
3809 | | |
3810 | | // Convert loads directly. This is normally done by DAGCombiner, |
3811 | | // but we need this case for bitcasts that are created during lowering |
3812 | | // and which are then lowered themselves. |
3813 | 0 | if (auto *LoadN = dyn_cast<LoadSDNode>(In)) |
3814 | 0 | if (ISD::isNormalLoad(LoadN)) { |
3815 | 0 | SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(), |
3816 | 0 | LoadN->getBasePtr(), LoadN->getMemOperand()); |
3817 | | // Update the chain uses. |
3818 | 0 | DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1)); |
3819 | 0 | return NewLoad; |
3820 | 0 | } |
3821 | | |
3822 | 0 | if (InVT == MVT::i32 && ResVT == MVT::f32) { |
3823 | 0 | SDValue In64; |
3824 | 0 | if (Subtarget.hasHighWord()) { |
3825 | 0 | SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, |
3826 | 0 | MVT::i64); |
3827 | 0 | In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, |
3828 | 0 | MVT::i64, SDValue(U64, 0), In); |
3829 | 0 | } else { |
3830 | 0 | In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); |
3831 | 0 | In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, |
3832 | 0 | DAG.getConstant(32, DL, MVT::i64)); |
3833 | 0 | } |
3834 | 0 | SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); |
3835 | 0 | return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, |
3836 | 0 | DL, MVT::f32, Out64); |
3837 | 0 | } |
3838 | 0 | if (InVT == MVT::f32 && ResVT == MVT::i32) { |
3839 | 0 | SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); |
3840 | 0 | SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, |
3841 | 0 | MVT::f64, SDValue(U64, 0), In); |
3842 | 0 | SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); |
3843 | 0 | if (Subtarget.hasHighWord()) |
3844 | 0 | return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, |
3845 | 0 | MVT::i32, Out64); |
3846 | 0 | SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, |
3847 | 0 | DAG.getConstant(32, DL, MVT::i64)); |
3848 | 0 | return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); |
3849 | 0 | } |
3850 | 0 | llvm_unreachable("Unexpected bitcast combination"); |
3851 | 0 | } |
3852 | | |
3853 | | SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, |
3854 | 0 | SelectionDAG &DAG) const { |
3855 | |
|
3856 | 0 | if (Subtarget.isTargetXPLINK64()) |
3857 | 0 | return lowerVASTART_XPLINK(Op, DAG); |
3858 | 0 | else |
3859 | 0 | return lowerVASTART_ELF(Op, DAG); |
3860 | 0 | } |
3861 | | |
3862 | | SDValue SystemZTargetLowering::lowerVASTART_XPLINK(SDValue Op, |
3863 | 0 | SelectionDAG &DAG) const { |
3864 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
3865 | 0 | SystemZMachineFunctionInfo *FuncInfo = |
3866 | 0 | MF.getInfo<SystemZMachineFunctionInfo>(); |
3867 | |
|
3868 | 0 | SDLoc DL(Op); |
3869 | | |
3870 | | // vastart just stores the address of the VarArgsFrameIndex slot into the |
3871 | | // memory location argument. |
3872 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3873 | 0 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
3874 | 0 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
3875 | 0 | return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), |
3876 | 0 | MachinePointerInfo(SV)); |
3877 | 0 | } |
3878 | | |
3879 | | SDValue SystemZTargetLowering::lowerVASTART_ELF(SDValue Op, |
3880 | 0 | SelectionDAG &DAG) const { |
3881 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
3882 | 0 | SystemZMachineFunctionInfo *FuncInfo = |
3883 | 0 | MF.getInfo<SystemZMachineFunctionInfo>(); |
3884 | 0 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
3885 | |
|
3886 | 0 | SDValue Chain = Op.getOperand(0); |
3887 | 0 | SDValue Addr = Op.getOperand(1); |
3888 | 0 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
3889 | 0 | SDLoc DL(Op); |
3890 | | |
3891 | | // The initial values of each field. |
3892 | 0 | const unsigned NumFields = 4; |
3893 | 0 | SDValue Fields[NumFields] = { |
3894 | 0 | DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), |
3895 | 0 | DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), |
3896 | 0 | DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), |
3897 | 0 | DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) |
3898 | 0 | }; |
3899 | | |
3900 | | // Store each field into its respective slot. |
3901 | 0 | SDValue MemOps[NumFields]; |
3902 | 0 | unsigned Offset = 0; |
3903 | 0 | for (unsigned I = 0; I < NumFields; ++I) { |
3904 | 0 | SDValue FieldAddr = Addr; |
3905 | 0 | if (Offset != 0) |
3906 | 0 | FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, |
3907 | 0 | DAG.getIntPtrConstant(Offset, DL)); |
3908 | 0 | MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, |
3909 | 0 | MachinePointerInfo(SV, Offset)); |
3910 | 0 | Offset += 8; |
3911 | 0 | } |
3912 | 0 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); |
3913 | 0 | } |
3914 | | |
3915 | | SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, |
3916 | 0 | SelectionDAG &DAG) const { |
3917 | 0 | SDValue Chain = Op.getOperand(0); |
3918 | 0 | SDValue DstPtr = Op.getOperand(1); |
3919 | 0 | SDValue SrcPtr = Op.getOperand(2); |
3920 | 0 | const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); |
3921 | 0 | const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); |
3922 | 0 | SDLoc DL(Op); |
3923 | |
|
3924 | 0 | uint32_t Sz = |
3925 | 0 | Subtarget.isTargetXPLINK64() ? getTargetMachine().getPointerSize(0) : 32; |
3926 | 0 | return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(Sz, DL), |
3927 | 0 | Align(8), /*isVolatile*/ false, /*AlwaysInline*/ false, |
3928 | 0 | /*isTailCall*/ false, MachinePointerInfo(DstSV), |
3929 | 0 | MachinePointerInfo(SrcSV)); |
3930 | 0 | } |
3931 | | |
3932 | | SDValue |
3933 | | SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(SDValue Op, |
3934 | 0 | SelectionDAG &DAG) const { |
3935 | 0 | if (Subtarget.isTargetXPLINK64()) |
3936 | 0 | return lowerDYNAMIC_STACKALLOC_XPLINK(Op, DAG); |
3937 | 0 | else |
3938 | 0 | return lowerDYNAMIC_STACKALLOC_ELF(Op, DAG); |
3939 | 0 | } |
3940 | | |
3941 | | SDValue |
3942 | | SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op, |
3943 | 0 | SelectionDAG &DAG) const { |
3944 | 0 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); |
3945 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
3946 | 0 | bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); |
3947 | 0 | SDValue Chain = Op.getOperand(0); |
3948 | 0 | SDValue Size = Op.getOperand(1); |
3949 | 0 | SDValue Align = Op.getOperand(2); |
3950 | 0 | SDLoc DL(Op); |
3951 | | |
3952 | | // If user has set the no alignment function attribute, ignore |
3953 | | // alloca alignments. |
3954 | 0 | uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0); |
3955 | |
|
3956 | 0 | uint64_t StackAlign = TFI->getStackAlignment(); |
3957 | 0 | uint64_t RequiredAlign = std::max(AlignVal, StackAlign); |
3958 | 0 | uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; |
3959 | |
|
3960 | 0 | SDValue NeededSpace = Size; |
3961 | | |
3962 | | // Add extra space for alignment if needed. |
3963 | 0 | EVT PtrVT = getPointerTy(MF.getDataLayout()); |
3964 | 0 | if (ExtraAlignSpace) |
3965 | 0 | NeededSpace = DAG.getNode(ISD::ADD, DL, PtrVT, NeededSpace, |
3966 | 0 | DAG.getConstant(ExtraAlignSpace, DL, PtrVT)); |
3967 | |
|
3968 | 0 | bool IsSigned = false; |
3969 | 0 | bool DoesNotReturn = false; |
3970 | 0 | bool IsReturnValueUsed = false; |
3971 | 0 | EVT VT = Op.getValueType(); |
3972 | 0 | SDValue AllocaCall = |
3973 | 0 | makeExternalCall(Chain, DAG, "@@ALCAXP", VT, ArrayRef(NeededSpace), |
3974 | 0 | CallingConv::C, IsSigned, DL, DoesNotReturn, |
3975 | 0 | IsReturnValueUsed) |
3976 | 0 | .first; |
3977 | | |
3978 | | // Perform a CopyFromReg from %GPR4 (stack pointer register). Chain and Glue |
3979 | | // to end of call in order to ensure it isn't broken up from the call |
3980 | | // sequence. |
3981 | 0 | auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); |
3982 | 0 | Register SPReg = Regs.getStackPointerRegister(); |
3983 | 0 | Chain = AllocaCall.getValue(1); |
3984 | 0 | SDValue Glue = AllocaCall.getValue(2); |
3985 | 0 | SDValue NewSPRegNode = DAG.getCopyFromReg(Chain, DL, SPReg, PtrVT, Glue); |
3986 | 0 | Chain = NewSPRegNode.getValue(1); |
3987 | |
|
3988 | 0 | MVT PtrMVT = getPointerMemTy(MF.getDataLayout()); |
3989 | 0 | SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, PtrMVT); |
3990 | 0 | SDValue Result = DAG.getNode(ISD::ADD, DL, PtrMVT, NewSPRegNode, ArgAdjust); |
3991 | | |
3992 | | // Dynamically realign if needed. |
3993 | 0 | if (ExtraAlignSpace) { |
3994 | 0 | Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, |
3995 | 0 | DAG.getConstant(ExtraAlignSpace, DL, PtrVT)); |
3996 | 0 | Result = DAG.getNode(ISD::AND, DL, PtrVT, Result, |
3997 | 0 | DAG.getConstant(~(RequiredAlign - 1), DL, PtrVT)); |
3998 | 0 | } |
3999 | |
|
4000 | 0 | SDValue Ops[2] = {Result, Chain}; |
4001 | 0 | return DAG.getMergeValues(Ops, DL); |
4002 | 0 | } |
4003 | | |
4004 | | SDValue |
4005 | | SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(SDValue Op, |
4006 | 0 | SelectionDAG &DAG) const { |
4007 | 0 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); |
4008 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
4009 | 0 | bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); |
4010 | 0 | bool StoreBackchain = MF.getSubtarget<SystemZSubtarget>().hasBackChain(); |
4011 | |
|
4012 | 0 | SDValue Chain = Op.getOperand(0); |
4013 | 0 | SDValue Size = Op.getOperand(1); |
4014 | 0 | SDValue Align = Op.getOperand(2); |
4015 | 0 | SDLoc DL(Op); |
4016 | | |
4017 | | // If user has set the no alignment function attribute, ignore |
4018 | | // alloca alignments. |
4019 | 0 | uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0); |
4020 | |
|
4021 | 0 | uint64_t StackAlign = TFI->getStackAlignment(); |
4022 | 0 | uint64_t RequiredAlign = std::max(AlignVal, StackAlign); |
4023 | 0 | uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; |
4024 | |
|
4025 | 0 | Register SPReg = getStackPointerRegisterToSaveRestore(); |
4026 | 0 | SDValue NeededSpace = Size; |
4027 | | |
4028 | | // Get a reference to the stack pointer. |
4029 | 0 | SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); |
4030 | | |
4031 | | // If we need a backchain, save it now. |
4032 | 0 | SDValue Backchain; |
4033 | 0 | if (StoreBackchain) |
4034 | 0 | Backchain = DAG.getLoad(MVT::i64, DL, Chain, getBackchainAddress(OldSP, DAG), |
4035 | 0 | MachinePointerInfo()); |
4036 | | |
4037 | | // Add extra space for alignment if needed. |
4038 | 0 | if (ExtraAlignSpace) |
4039 | 0 | NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace, |
4040 | 0 | DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); |
4041 | | |
4042 | | // Get the new stack pointer value. |
4043 | 0 | SDValue NewSP; |
4044 | 0 | if (hasInlineStackProbe(MF)) { |
4045 | 0 | NewSP = DAG.getNode(SystemZISD::PROBED_ALLOCA, DL, |
4046 | 0 | DAG.getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace); |
4047 | 0 | Chain = NewSP.getValue(1); |
4048 | 0 | } |
4049 | 0 | else { |
4050 | 0 | NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace); |
4051 | | // Copy the new stack pointer back. |
4052 | 0 | Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); |
4053 | 0 | } |
4054 | | |
4055 | | // The allocated data lives above the 160 bytes allocated for the standard |
4056 | | // frame, plus any outgoing stack arguments. We don't know how much that |
4057 | | // amounts to yet, so emit a special ADJDYNALLOC placeholder. |
4058 | 0 | SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); |
4059 | 0 | SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); |
4060 | | |
4061 | | // Dynamically realign if needed. |
4062 | 0 | if (RequiredAlign > StackAlign) { |
4063 | 0 | Result = |
4064 | 0 | DAG.getNode(ISD::ADD, DL, MVT::i64, Result, |
4065 | 0 | DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); |
4066 | 0 | Result = |
4067 | 0 | DAG.getNode(ISD::AND, DL, MVT::i64, Result, |
4068 | 0 | DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); |
4069 | 0 | } |
4070 | |
|
4071 | 0 | if (StoreBackchain) |
4072 | 0 | Chain = DAG.getStore(Chain, DL, Backchain, getBackchainAddress(NewSP, DAG), |
4073 | 0 | MachinePointerInfo()); |
4074 | |
|
4075 | 0 | SDValue Ops[2] = { Result, Chain }; |
4076 | 0 | return DAG.getMergeValues(Ops, DL); |
4077 | 0 | } |
4078 | | |
4079 | | SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET( |
4080 | 0 | SDValue Op, SelectionDAG &DAG) const { |
4081 | 0 | SDLoc DL(Op); |
4082 | |
|
4083 | 0 | return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); |
4084 | 0 | } |
4085 | | |
4086 | | SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, |
4087 | 0 | SelectionDAG &DAG) const { |
4088 | 0 | EVT VT = Op.getValueType(); |
4089 | 0 | SDLoc DL(Op); |
4090 | 0 | SDValue Ops[2]; |
4091 | 0 | if (is32Bit(VT)) |
4092 | | // Just do a normal 64-bit multiplication and extract the results. |
4093 | | // We define this so that it can be used for constant division. |
4094 | 0 | lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), |
4095 | 0 | Op.getOperand(1), Ops[1], Ops[0]); |
4096 | 0 | else if (Subtarget.hasMiscellaneousExtensions2()) |
4097 | | // SystemZISD::SMUL_LOHI returns the low result in the odd register and |
4098 | | // the high result in the even register. ISD::SMUL_LOHI is defined to |
4099 | | // return the low half first, so the results are in reverse order. |
4100 | 0 | lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, |
4101 | 0 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); |
4102 | 0 | else { |
4103 | | // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: |
4104 | | // |
4105 | | // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) |
4106 | | // |
4107 | | // but using the fact that the upper halves are either all zeros |
4108 | | // or all ones: |
4109 | | // |
4110 | | // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) |
4111 | | // |
4112 | | // and grouping the right terms together since they are quicker than the |
4113 | | // multiplication: |
4114 | | // |
4115 | | // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) |
4116 | 0 | SDValue C63 = DAG.getConstant(63, DL, MVT::i64); |
4117 | 0 | SDValue LL = Op.getOperand(0); |
4118 | 0 | SDValue RL = Op.getOperand(1); |
4119 | 0 | SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); |
4120 | 0 | SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); |
4121 | | // SystemZISD::UMUL_LOHI returns the low result in the odd register and |
4122 | | // the high result in the even register. ISD::SMUL_LOHI is defined to |
4123 | | // return the low half first, so the results are in reverse order. |
4124 | 0 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, |
4125 | 0 | LL, RL, Ops[1], Ops[0]); |
4126 | 0 | SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); |
4127 | 0 | SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); |
4128 | 0 | SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); |
4129 | 0 | Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); |
4130 | 0 | } |
4131 | 0 | return DAG.getMergeValues(Ops, DL); |
4132 | 0 | } |
4133 | | |
4134 | | SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, |
4135 | 0 | SelectionDAG &DAG) const { |
4136 | 0 | EVT VT = Op.getValueType(); |
4137 | 0 | SDLoc DL(Op); |
4138 | 0 | SDValue Ops[2]; |
4139 | 0 | if (is32Bit(VT)) |
4140 | | // Just do a normal 64-bit multiplication and extract the results. |
4141 | | // We define this so that it can be used for constant division. |
4142 | 0 | lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), |
4143 | 0 | Op.getOperand(1), Ops[1], Ops[0]); |
4144 | 0 | else |
4145 | | // SystemZISD::UMUL_LOHI returns the low result in the odd register and |
4146 | | // the high result in the even register. ISD::UMUL_LOHI is defined to |
4147 | | // return the low half first, so the results are in reverse order. |
4148 | 0 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, |
4149 | 0 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); |
4150 | 0 | return DAG.getMergeValues(Ops, DL); |
4151 | 0 | } |
4152 | | |
4153 | | SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, |
4154 | 0 | SelectionDAG &DAG) const { |
4155 | 0 | SDValue Op0 = Op.getOperand(0); |
4156 | 0 | SDValue Op1 = Op.getOperand(1); |
4157 | 0 | EVT VT = Op.getValueType(); |
4158 | 0 | SDLoc DL(Op); |
4159 | | |
4160 | | // We use DSGF for 32-bit division. This means the first operand must |
4161 | | // always be 64-bit, and the second operand should be 32-bit whenever |
4162 | | // that is possible, to improve performance. |
4163 | 0 | if (is32Bit(VT)) |
4164 | 0 | Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); |
4165 | 0 | else if (DAG.ComputeNumSignBits(Op1) > 32) |
4166 | 0 | Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); |
4167 | | |
4168 | | // DSG(F) returns the remainder in the even register and the |
4169 | | // quotient in the odd register. |
4170 | 0 | SDValue Ops[2]; |
4171 | 0 | lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); |
4172 | 0 | return DAG.getMergeValues(Ops, DL); |
4173 | 0 | } |
4174 | | |
4175 | | SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, |
4176 | 0 | SelectionDAG &DAG) const { |
4177 | 0 | EVT VT = Op.getValueType(); |
4178 | 0 | SDLoc DL(Op); |
4179 | | |
4180 | | // DL(G) returns the remainder in the even register and the |
4181 | | // quotient in the odd register. |
4182 | 0 | SDValue Ops[2]; |
4183 | 0 | lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, |
4184 | 0 | Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); |
4185 | 0 | return DAG.getMergeValues(Ops, DL); |
4186 | 0 | } |
4187 | | |
4188 | 0 | SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { |
4189 | 0 | assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); |
4190 | | |
4191 | | // Get the known-zero masks for each operand. |
4192 | 0 | SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)}; |
4193 | 0 | KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]), |
4194 | 0 | DAG.computeKnownBits(Ops[1])}; |
4195 | | |
4196 | | // See if the upper 32 bits of one operand and the lower 32 bits of the |
4197 | | // other are known zero. They are the low and high operands respectively. |
4198 | 0 | uint64_t Masks[] = { Known[0].Zero.getZExtValue(), |
4199 | 0 | Known[1].Zero.getZExtValue() }; |
4200 | 0 | unsigned High, Low; |
4201 | 0 | if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) |
4202 | 0 | High = 1, Low = 0; |
4203 | 0 | else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) |
4204 | 0 | High = 0, Low = 1; |
4205 | 0 | else |
4206 | 0 | return Op; |
4207 | | |
4208 | 0 | SDValue LowOp = Ops[Low]; |
4209 | 0 | SDValue HighOp = Ops[High]; |
4210 | | |
4211 | | // If the high part is a constant, we're better off using IILH. |
4212 | 0 | if (HighOp.getOpcode() == ISD::Constant) |
4213 | 0 | return Op; |
4214 | | |
4215 | | // If the low part is a constant that is outside the range of LHI, |
4216 | | // then we're better off using IILF. |
4217 | 0 | if (LowOp.getOpcode() == ISD::Constant) { |
4218 | 0 | int64_t Value = int32_t(LowOp->getAsZExtVal()); |
4219 | 0 | if (!isInt<16>(Value)) |
4220 | 0 | return Op; |
4221 | 0 | } |
4222 | | |
4223 | | // Check whether the high part is an AND that doesn't change the |
4224 | | // high 32 bits and just masks out low bits. We can skip it if so. |
4225 | 0 | if (HighOp.getOpcode() == ISD::AND && |
4226 | 0 | HighOp.getOperand(1).getOpcode() == ISD::Constant) { |
4227 | 0 | SDValue HighOp0 = HighOp.getOperand(0); |
4228 | 0 | uint64_t Mask = HighOp.getConstantOperandVal(1); |
4229 | 0 | if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) |
4230 | 0 | HighOp = HighOp0; |
4231 | 0 | } |
4232 | | |
4233 | | // Take advantage of the fact that all GR32 operations only change the |
4234 | | // low 32 bits by truncating Low to an i32 and inserting it directly |
4235 | | // using a subreg. The interesting cases are those where the truncation |
4236 | | // can be folded. |
4237 | 0 | SDLoc DL(Op); |
4238 | 0 | SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); |
4239 | 0 | return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, |
4240 | 0 | MVT::i64, HighOp, Low32); |
4241 | 0 | } |
4242 | | |
4243 | | // Lower SADDO/SSUBO/UADDO/USUBO nodes. |
4244 | | SDValue SystemZTargetLowering::lowerXALUO(SDValue Op, |
4245 | 0 | SelectionDAG &DAG) const { |
4246 | 0 | SDNode *N = Op.getNode(); |
4247 | 0 | SDValue LHS = N->getOperand(0); |
4248 | 0 | SDValue RHS = N->getOperand(1); |
4249 | 0 | SDLoc DL(N); |
4250 | |
|
4251 | 0 | if (N->getValueType(0) == MVT::i128) { |
4252 | 0 | unsigned BaseOp = 0; |
4253 | 0 | unsigned FlagOp = 0; |
4254 | 0 | switch (Op.getOpcode()) { |
4255 | 0 | default: llvm_unreachable("Unknown instruction!"); |
4256 | 0 | case ISD::UADDO: |
4257 | 0 | BaseOp = ISD::ADD; |
4258 | 0 | FlagOp = SystemZISD::VACC; |
4259 | 0 | break; |
4260 | 0 | case ISD::USUBO: |
4261 | 0 | BaseOp = ISD::SUB; |
4262 | 0 | FlagOp = SystemZISD::VSCBI; |
4263 | 0 | break; |
4264 | 0 | } |
4265 | 0 | SDValue Result = DAG.getNode(BaseOp, DL, MVT::i128, LHS, RHS); |
4266 | 0 | SDValue Flag = DAG.getNode(FlagOp, DL, MVT::i128, LHS, RHS); |
4267 | 0 | Flag = DAG.getNode(ISD::AssertZext, DL, MVT::i128, Flag, |
4268 | 0 | DAG.getValueType(MVT::i1)); |
4269 | 0 | Flag = DAG.getZExtOrTrunc(Flag, DL, N->getValueType(1)); |
4270 | 0 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Flag); |
4271 | 0 | } |
4272 | | |
4273 | 0 | unsigned BaseOp = 0; |
4274 | 0 | unsigned CCValid = 0; |
4275 | 0 | unsigned CCMask = 0; |
4276 | |
|
4277 | 0 | switch (Op.getOpcode()) { |
4278 | 0 | default: llvm_unreachable("Unknown instruction!"); |
4279 | 0 | case ISD::SADDO: |
4280 | 0 | BaseOp = SystemZISD::SADDO; |
4281 | 0 | CCValid = SystemZ::CCMASK_ARITH; |
4282 | 0 | CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; |
4283 | 0 | break; |
4284 | 0 | case ISD::SSUBO: |
4285 | 0 | BaseOp = SystemZISD::SSUBO; |
4286 | 0 | CCValid = SystemZ::CCMASK_ARITH; |
4287 | 0 | CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; |
4288 | 0 | break; |
4289 | 0 | case ISD::UADDO: |
4290 | 0 | BaseOp = SystemZISD::UADDO; |
4291 | 0 | CCValid = SystemZ::CCMASK_LOGICAL; |
4292 | 0 | CCMask = SystemZ::CCMASK_LOGICAL_CARRY; |
4293 | 0 | break; |
4294 | 0 | case ISD::USUBO: |
4295 | 0 | BaseOp = SystemZISD::USUBO; |
4296 | 0 | CCValid = SystemZ::CCMASK_LOGICAL; |
4297 | 0 | CCMask = SystemZ::CCMASK_LOGICAL_BORROW; |
4298 | 0 | break; |
4299 | 0 | } |
4300 | | |
4301 | 0 | SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); |
4302 | 0 | SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); |
4303 | |
|
4304 | 0 | SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); |
4305 | 0 | if (N->getValueType(1) == MVT::i1) |
4306 | 0 | SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); |
4307 | |
|
4308 | 0 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); |
4309 | 0 | } |
4310 | | |
4311 | 0 | static bool isAddCarryChain(SDValue Carry) { |
4312 | 0 | while (Carry.getOpcode() == ISD::UADDO_CARRY) |
4313 | 0 | Carry = Carry.getOperand(2); |
4314 | 0 | return Carry.getOpcode() == ISD::UADDO; |
4315 | 0 | } |
4316 | | |
4317 | 0 | static bool isSubBorrowChain(SDValue Carry) { |
4318 | 0 | while (Carry.getOpcode() == ISD::USUBO_CARRY) |
4319 | 0 | Carry = Carry.getOperand(2); |
4320 | 0 | return Carry.getOpcode() == ISD::USUBO; |
4321 | 0 | } |
4322 | | |
4323 | | // Lower UADDO_CARRY/USUBO_CARRY nodes. |
4324 | | SDValue SystemZTargetLowering::lowerUADDSUBO_CARRY(SDValue Op, |
4325 | 0 | SelectionDAG &DAG) const { |
4326 | |
|
4327 | 0 | SDNode *N = Op.getNode(); |
4328 | 0 | MVT VT = N->getSimpleValueType(0); |
4329 | | |
4330 | | // Let legalize expand this if it isn't a legal type yet. |
4331 | 0 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
4332 | 0 | return SDValue(); |
4333 | | |
4334 | 0 | SDValue LHS = N->getOperand(0); |
4335 | 0 | SDValue RHS = N->getOperand(1); |
4336 | 0 | SDValue Carry = Op.getOperand(2); |
4337 | 0 | SDLoc DL(N); |
4338 | |
|
4339 | 0 | if (VT == MVT::i128) { |
4340 | 0 | unsigned BaseOp = 0; |
4341 | 0 | unsigned FlagOp = 0; |
4342 | 0 | switch (Op.getOpcode()) { |
4343 | 0 | default: llvm_unreachable("Unknown instruction!"); |
4344 | 0 | case ISD::UADDO_CARRY: |
4345 | 0 | BaseOp = SystemZISD::VAC; |
4346 | 0 | FlagOp = SystemZISD::VACCC; |
4347 | 0 | break; |
4348 | 0 | case ISD::USUBO_CARRY: |
4349 | 0 | BaseOp = SystemZISD::VSBI; |
4350 | 0 | FlagOp = SystemZISD::VSBCBI; |
4351 | 0 | break; |
4352 | 0 | } |
4353 | 0 | Carry = DAG.getZExtOrTrunc(Carry, DL, MVT::i128); |
4354 | 0 | SDValue Result = DAG.getNode(BaseOp, DL, MVT::i128, LHS, RHS, Carry); |
4355 | 0 | SDValue Flag = DAG.getNode(FlagOp, DL, MVT::i128, LHS, RHS, Carry); |
4356 | 0 | Flag = DAG.getNode(ISD::AssertZext, DL, MVT::i128, Flag, |
4357 | 0 | DAG.getValueType(MVT::i1)); |
4358 | 0 | Flag = DAG.getZExtOrTrunc(Flag, DL, N->getValueType(1)); |
4359 | 0 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Flag); |
4360 | 0 | } |
4361 | | |
4362 | 0 | unsigned BaseOp = 0; |
4363 | 0 | unsigned CCValid = 0; |
4364 | 0 | unsigned CCMask = 0; |
4365 | |
|
4366 | 0 | switch (Op.getOpcode()) { |
4367 | 0 | default: llvm_unreachable("Unknown instruction!"); |
4368 | 0 | case ISD::UADDO_CARRY: |
4369 | 0 | if (!isAddCarryChain(Carry)) |
4370 | 0 | return SDValue(); |
4371 | | |
4372 | 0 | BaseOp = SystemZISD::ADDCARRY; |
4373 | 0 | CCValid = SystemZ::CCMASK_LOGICAL; |
4374 | 0 | CCMask = SystemZ::CCMASK_LOGICAL_CARRY; |
4375 | 0 | break; |
4376 | 0 | case ISD::USUBO_CARRY: |
4377 | 0 | if (!isSubBorrowChain(Carry)) |
4378 | 0 | return SDValue(); |
4379 | | |
4380 | 0 | BaseOp = SystemZISD::SUBCARRY; |
4381 | 0 | CCValid = SystemZ::CCMASK_LOGICAL; |
4382 | 0 | CCMask = SystemZ::CCMASK_LOGICAL_BORROW; |
4383 | 0 | break; |
4384 | 0 | } |
4385 | | |
4386 | | // Set the condition code from the carry flag. |
4387 | 0 | Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry, |
4388 | 0 | DAG.getConstant(CCValid, DL, MVT::i32), |
4389 | 0 | DAG.getConstant(CCMask, DL, MVT::i32)); |
4390 | |
|
4391 | 0 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
4392 | 0 | SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry); |
4393 | |
|
4394 | 0 | SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); |
4395 | 0 | if (N->getValueType(1) == MVT::i1) |
4396 | 0 | SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); |
4397 | |
|
4398 | 0 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); |
4399 | 0 | } |
4400 | | |
4401 | | SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, |
4402 | 0 | SelectionDAG &DAG) const { |
4403 | 0 | EVT VT = Op.getValueType(); |
4404 | 0 | SDLoc DL(Op); |
4405 | 0 | Op = Op.getOperand(0); |
4406 | |
|
4407 | 0 | if (VT.getScalarSizeInBits() == 128) { |
4408 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op); |
4409 | 0 | Op = DAG.getNode(ISD::CTPOP, DL, MVT::v2i64, Op); |
4410 | 0 | SDValue Tmp = DAG.getSplatBuildVector(MVT::v2i64, DL, |
4411 | 0 | DAG.getConstant(0, DL, MVT::i64)); |
4412 | 0 | Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); |
4413 | 0 | return Op; |
4414 | 0 | } |
4415 | | |
4416 | | // Handle vector types via VPOPCT. |
4417 | 0 | if (VT.isVector()) { |
4418 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); |
4419 | 0 | Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); |
4420 | 0 | switch (VT.getScalarSizeInBits()) { |
4421 | 0 | case 8: |
4422 | 0 | break; |
4423 | 0 | case 16: { |
4424 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); |
4425 | 0 | SDValue Shift = DAG.getConstant(8, DL, MVT::i32); |
4426 | 0 | SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); |
4427 | 0 | Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); |
4428 | 0 | Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); |
4429 | 0 | break; |
4430 | 0 | } |
4431 | 0 | case 32: { |
4432 | 0 | SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL, |
4433 | 0 | DAG.getConstant(0, DL, MVT::i32)); |
4434 | 0 | Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); |
4435 | 0 | break; |
4436 | 0 | } |
4437 | 0 | case 64: { |
4438 | 0 | SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL, |
4439 | 0 | DAG.getConstant(0, DL, MVT::i32)); |
4440 | 0 | Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp); |
4441 | 0 | Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); |
4442 | 0 | break; |
4443 | 0 | } |
4444 | 0 | default: |
4445 | 0 | llvm_unreachable("Unexpected type"); |
4446 | 0 | } |
4447 | 0 | return Op; |
4448 | 0 | } |
4449 | | |
4450 | | // Get the known-zero mask for the operand. |
4451 | 0 | KnownBits Known = DAG.computeKnownBits(Op); |
4452 | 0 | unsigned NumSignificantBits = Known.getMaxValue().getActiveBits(); |
4453 | 0 | if (NumSignificantBits == 0) |
4454 | 0 | return DAG.getConstant(0, DL, VT); |
4455 | | |
4456 | | // Skip known-zero high parts of the operand. |
4457 | 0 | int64_t OrigBitSize = VT.getSizeInBits(); |
4458 | 0 | int64_t BitSize = llvm::bit_ceil(NumSignificantBits); |
4459 | 0 | BitSize = std::min(BitSize, OrigBitSize); |
4460 | | |
4461 | | // The POPCNT instruction counts the number of bits in each byte. |
4462 | 0 | Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op); |
4463 | 0 | Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op); |
4464 | 0 | Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); |
4465 | | |
4466 | | // Add up per-byte counts in a binary tree. All bits of Op at |
4467 | | // position larger than BitSize remain zero throughout. |
4468 | 0 | for (int64_t I = BitSize / 2; I >= 8; I = I / 2) { |
4469 | 0 | SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); |
4470 | 0 | if (BitSize != OrigBitSize) |
4471 | 0 | Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, |
4472 | 0 | DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); |
4473 | 0 | Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); |
4474 | 0 | } |
4475 | | |
4476 | | // Extract overall result from high byte. |
4477 | 0 | if (BitSize > 8) |
4478 | 0 | Op = DAG.getNode(ISD::SRL, DL, VT, Op, |
4479 | 0 | DAG.getConstant(BitSize - 8, DL, VT)); |
4480 | |
|
4481 | 0 | return Op; |
4482 | 0 | } |
4483 | | |
4484 | | SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, |
4485 | 0 | SelectionDAG &DAG) const { |
4486 | 0 | SDLoc DL(Op); |
4487 | 0 | AtomicOrdering FenceOrdering = |
4488 | 0 | static_cast<AtomicOrdering>(Op.getConstantOperandVal(1)); |
4489 | 0 | SyncScope::ID FenceSSID = |
4490 | 0 | static_cast<SyncScope::ID>(Op.getConstantOperandVal(2)); |
4491 | | |
4492 | | // The only fence that needs an instruction is a sequentially-consistent |
4493 | | // cross-thread fence. |
4494 | 0 | if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && |
4495 | 0 | FenceSSID == SyncScope::System) { |
4496 | 0 | return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, |
4497 | 0 | Op.getOperand(0)), |
4498 | 0 | 0); |
4499 | 0 | } |
4500 | | |
4501 | | // MEMBARRIER is a compiler barrier; it codegens to a no-op. |
4502 | 0 | return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); |
4503 | 0 | } |
4504 | | |
4505 | | // Op is an atomic load. Lower it into a normal volatile load. |
4506 | | SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, |
4507 | 0 | SelectionDAG &DAG) const { |
4508 | 0 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
4509 | 0 | if (Node->getMemoryVT() == MVT::i128) { |
4510 | | // Use same code to handle both legal and non-legal i128 types. |
4511 | 0 | SmallVector<SDValue, 2> Results; |
4512 | 0 | LowerOperationWrapper(Node, Results, DAG); |
4513 | 0 | return DAG.getMergeValues(Results, SDLoc(Op)); |
4514 | 0 | } |
4515 | 0 | return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), |
4516 | 0 | Node->getChain(), Node->getBasePtr(), |
4517 | 0 | Node->getMemoryVT(), Node->getMemOperand()); |
4518 | 0 | } |
4519 | | |
4520 | | // Op is an atomic store. Lower it into a normal volatile store. |
4521 | | SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, |
4522 | 0 | SelectionDAG &DAG) const { |
4523 | 0 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
4524 | 0 | if (Node->getMemoryVT() == MVT::i128) { |
4525 | | // Use same code to handle both legal and non-legal i128 types. |
4526 | 0 | SmallVector<SDValue, 1> Results; |
4527 | 0 | LowerOperationWrapper(Node, Results, DAG); |
4528 | 0 | return DAG.getMergeValues(Results, SDLoc(Op)); |
4529 | 0 | } |
4530 | 0 | SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), |
4531 | 0 | Node->getBasePtr(), Node->getMemoryVT(), |
4532 | 0 | Node->getMemOperand()); |
4533 | | // We have to enforce sequential consistency by performing a |
4534 | | // serialization operation after the store. |
4535 | 0 | if (Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent) |
4536 | 0 | Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), |
4537 | 0 | MVT::Other, Chain), 0); |
4538 | 0 | return Chain; |
4539 | 0 | } |
4540 | | |
4541 | | // Prepare for a Compare And Swap for a subword operation. This needs to be |
4542 | | // done in memory with 4 bytes at natural alignment. |
4543 | | static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, |
4544 | | SDValue &AlignedAddr, SDValue &BitShift, |
4545 | 0 | SDValue &NegBitShift) { |
4546 | 0 | EVT PtrVT = Addr.getValueType(); |
4547 | 0 | EVT WideVT = MVT::i32; |
4548 | | |
4549 | | // Get the address of the containing word. |
4550 | 0 | AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, |
4551 | 0 | DAG.getConstant(-4, DL, PtrVT)); |
4552 | | |
4553 | | // Get the number of bits that the word must be rotated left in order |
4554 | | // to bring the field to the top bits of a GR32. |
4555 | 0 | BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, |
4556 | 0 | DAG.getConstant(3, DL, PtrVT)); |
4557 | 0 | BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); |
4558 | | |
4559 | | // Get the complementing shift amount, for rotating a field in the top |
4560 | | // bits back to its proper position. |
4561 | 0 | NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, |
4562 | 0 | DAG.getConstant(0, DL, WideVT), BitShift); |
4563 | |
|
4564 | 0 | } |
4565 | | |
4566 | | // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first |
4567 | | // two into the fullword ATOMIC_LOADW_* operation given by Opcode. |
4568 | | SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, |
4569 | | SelectionDAG &DAG, |
4570 | 0 | unsigned Opcode) const { |
4571 | 0 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
4572 | | |
4573 | | // 32-bit operations need no special handling. |
4574 | 0 | EVT NarrowVT = Node->getMemoryVT(); |
4575 | 0 | EVT WideVT = MVT::i32; |
4576 | 0 | if (NarrowVT == WideVT) |
4577 | 0 | return Op; |
4578 | | |
4579 | 0 | int64_t BitSize = NarrowVT.getSizeInBits(); |
4580 | 0 | SDValue ChainIn = Node->getChain(); |
4581 | 0 | SDValue Addr = Node->getBasePtr(); |
4582 | 0 | SDValue Src2 = Node->getVal(); |
4583 | 0 | MachineMemOperand *MMO = Node->getMemOperand(); |
4584 | 0 | SDLoc DL(Node); |
4585 | | |
4586 | | // Convert atomic subtracts of constants into additions. |
4587 | 0 | if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) |
4588 | 0 | if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { |
4589 | 0 | Opcode = SystemZISD::ATOMIC_LOADW_ADD; |
4590 | 0 | Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); |
4591 | 0 | } |
4592 | |
|
4593 | 0 | SDValue AlignedAddr, BitShift, NegBitShift; |
4594 | 0 | getCSAddressAndShifts(Addr, DAG, DL, AlignedAddr, BitShift, NegBitShift); |
4595 | | |
4596 | | // Extend the source operand to 32 bits and prepare it for the inner loop. |
4597 | | // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other |
4598 | | // operations require the source to be shifted in advance. (This shift |
4599 | | // can be folded if the source is constant.) For AND and NAND, the lower |
4600 | | // bits must be set, while for other opcodes they should be left clear. |
4601 | 0 | if (Opcode != SystemZISD::ATOMIC_SWAPW) |
4602 | 0 | Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, |
4603 | 0 | DAG.getConstant(32 - BitSize, DL, WideVT)); |
4604 | 0 | if (Opcode == SystemZISD::ATOMIC_LOADW_AND || |
4605 | 0 | Opcode == SystemZISD::ATOMIC_LOADW_NAND) |
4606 | 0 | Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, |
4607 | 0 | DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); |
4608 | | |
4609 | | // Construct the ATOMIC_LOADW_* node. |
4610 | 0 | SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); |
4611 | 0 | SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, |
4612 | 0 | DAG.getConstant(BitSize, DL, WideVT) }; |
4613 | 0 | SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, |
4614 | 0 | NarrowVT, MMO); |
4615 | | |
4616 | | // Rotate the result of the final CS so that the field is in the lower |
4617 | | // bits of a GR32, then truncate it. |
4618 | 0 | SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, |
4619 | 0 | DAG.getConstant(BitSize, DL, WideVT)); |
4620 | 0 | SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); |
4621 | |
|
4622 | 0 | SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; |
4623 | 0 | return DAG.getMergeValues(RetOps, DL); |
4624 | 0 | } |
4625 | | |
4626 | | // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations into |
4627 | | // ATOMIC_LOADW_SUBs and convert 32- and 64-bit operations into additions. |
4628 | | SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, |
4629 | 0 | SelectionDAG &DAG) const { |
4630 | 0 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
4631 | 0 | EVT MemVT = Node->getMemoryVT(); |
4632 | 0 | if (MemVT == MVT::i32 || MemVT == MVT::i64) { |
4633 | | // A full-width operation: negate and use LAA(G). |
4634 | 0 | assert(Op.getValueType() == MemVT && "Mismatched VTs"); |
4635 | 0 | assert(Subtarget.hasInterlockedAccess1() && |
4636 | 0 | "Should have been expanded by AtomicExpand pass."); |
4637 | 0 | SDValue Src2 = Node->getVal(); |
4638 | 0 | SDLoc DL(Src2); |
4639 | 0 | SDValue NegSrc2 = |
4640 | 0 | DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), Src2); |
4641 | 0 | return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, |
4642 | 0 | Node->getChain(), Node->getBasePtr(), NegSrc2, |
4643 | 0 | Node->getMemOperand()); |
4644 | 0 | } |
4645 | | |
4646 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); |
4647 | 0 | } |
4648 | | |
4649 | | // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node. |
4650 | | SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, |
4651 | 0 | SelectionDAG &DAG) const { |
4652 | 0 | auto *Node = cast<AtomicSDNode>(Op.getNode()); |
4653 | 0 | SDValue ChainIn = Node->getOperand(0); |
4654 | 0 | SDValue Addr = Node->getOperand(1); |
4655 | 0 | SDValue CmpVal = Node->getOperand(2); |
4656 | 0 | SDValue SwapVal = Node->getOperand(3); |
4657 | 0 | MachineMemOperand *MMO = Node->getMemOperand(); |
4658 | 0 | SDLoc DL(Node); |
4659 | |
|
4660 | 0 | if (Node->getMemoryVT() == MVT::i128) { |
4661 | | // Use same code to handle both legal and non-legal i128 types. |
4662 | 0 | SmallVector<SDValue, 3> Results; |
4663 | 0 | LowerOperationWrapper(Node, Results, DAG); |
4664 | 0 | return DAG.getMergeValues(Results, DL); |
4665 | 0 | } |
4666 | | |
4667 | | // We have native support for 32-bit and 64-bit compare and swap, but we |
4668 | | // still need to expand extracting the "success" result from the CC. |
4669 | 0 | EVT NarrowVT = Node->getMemoryVT(); |
4670 | 0 | EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32; |
4671 | 0 | if (NarrowVT == WideVT) { |
4672 | 0 | SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other); |
4673 | 0 | SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal }; |
4674 | 0 | SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP, |
4675 | 0 | DL, Tys, Ops, NarrowVT, MMO); |
4676 | 0 | SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), |
4677 | 0 | SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); |
4678 | |
|
4679 | 0 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); |
4680 | 0 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); |
4681 | 0 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); |
4682 | 0 | return SDValue(); |
4683 | 0 | } |
4684 | | |
4685 | | // Convert 8-bit and 16-bit compare and swap to a loop, implemented |
4686 | | // via a fullword ATOMIC_CMP_SWAPW operation. |
4687 | 0 | int64_t BitSize = NarrowVT.getSizeInBits(); |
4688 | |
|
4689 | 0 | SDValue AlignedAddr, BitShift, NegBitShift; |
4690 | 0 | getCSAddressAndShifts(Addr, DAG, DL, AlignedAddr, BitShift, NegBitShift); |
4691 | | |
4692 | | // Construct the ATOMIC_CMP_SWAPW node. |
4693 | 0 | SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other); |
4694 | 0 | SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, |
4695 | 0 | NegBitShift, DAG.getConstant(BitSize, DL, WideVT) }; |
4696 | 0 | SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, |
4697 | 0 | VTList, Ops, NarrowVT, MMO); |
4698 | 0 | SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), |
4699 | 0 | SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ); |
4700 | | |
4701 | | // emitAtomicCmpSwapW() will zero extend the result (original value). |
4702 | 0 | SDValue OrigVal = DAG.getNode(ISD::AssertZext, DL, WideVT, AtomicOp.getValue(0), |
4703 | 0 | DAG.getValueType(NarrowVT)); |
4704 | 0 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), OrigVal); |
4705 | 0 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); |
4706 | 0 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); |
4707 | 0 | return SDValue(); |
4708 | 0 | } |
4709 | | |
4710 | | MachineMemOperand::Flags |
4711 | 0 | SystemZTargetLowering::getTargetMMOFlags(const Instruction &I) const { |
4712 | | // Because of how we convert atomic_load and atomic_store to normal loads and |
4713 | | // stores in the DAG, we need to ensure that the MMOs are marked volatile |
4714 | | // since DAGCombine hasn't been updated to account for atomic, but non |
4715 | | // volatile loads. (See D57601) |
4716 | 0 | if (auto *SI = dyn_cast<StoreInst>(&I)) |
4717 | 0 | if (SI->isAtomic()) |
4718 | 0 | return MachineMemOperand::MOVolatile; |
4719 | 0 | if (auto *LI = dyn_cast<LoadInst>(&I)) |
4720 | 0 | if (LI->isAtomic()) |
4721 | 0 | return MachineMemOperand::MOVolatile; |
4722 | 0 | if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) |
4723 | 0 | if (AI->isAtomic()) |
4724 | 0 | return MachineMemOperand::MOVolatile; |
4725 | 0 | if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I)) |
4726 | 0 | if (AI->isAtomic()) |
4727 | 0 | return MachineMemOperand::MOVolatile; |
4728 | 0 | return MachineMemOperand::MONone; |
4729 | 0 | } |
4730 | | |
4731 | | SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, |
4732 | 0 | SelectionDAG &DAG) const { |
4733 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
4734 | 0 | auto *Regs = Subtarget.getSpecialRegisters(); |
4735 | 0 | if (MF.getFunction().getCallingConv() == CallingConv::GHC) |
4736 | 0 | report_fatal_error("Variable-sized stack allocations are not supported " |
4737 | 0 | "in GHC calling convention"); |
4738 | 0 | return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), |
4739 | 0 | Regs->getStackPointerRegister(), Op.getValueType()); |
4740 | 0 | } |
4741 | | |
4742 | | SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, |
4743 | 0 | SelectionDAG &DAG) const { |
4744 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
4745 | 0 | auto *Regs = Subtarget.getSpecialRegisters(); |
4746 | 0 | bool StoreBackchain = MF.getSubtarget<SystemZSubtarget>().hasBackChain(); |
4747 | |
|
4748 | 0 | if (MF.getFunction().getCallingConv() == CallingConv::GHC) |
4749 | 0 | report_fatal_error("Variable-sized stack allocations are not supported " |
4750 | 0 | "in GHC calling convention"); |
4751 | |
|
4752 | 0 | SDValue Chain = Op.getOperand(0); |
4753 | 0 | SDValue NewSP = Op.getOperand(1); |
4754 | 0 | SDValue Backchain; |
4755 | 0 | SDLoc DL(Op); |
4756 | |
|
4757 | 0 | if (StoreBackchain) { |
4758 | 0 | SDValue OldSP = DAG.getCopyFromReg( |
4759 | 0 | Chain, DL, Regs->getStackPointerRegister(), MVT::i64); |
4760 | 0 | Backchain = DAG.getLoad(MVT::i64, DL, Chain, getBackchainAddress(OldSP, DAG), |
4761 | 0 | MachinePointerInfo()); |
4762 | 0 | } |
4763 | |
|
4764 | 0 | Chain = DAG.getCopyToReg(Chain, DL, Regs->getStackPointerRegister(), NewSP); |
4765 | |
|
4766 | 0 | if (StoreBackchain) |
4767 | 0 | Chain = DAG.getStore(Chain, DL, Backchain, getBackchainAddress(NewSP, DAG), |
4768 | 0 | MachinePointerInfo()); |
4769 | |
|
4770 | 0 | return Chain; |
4771 | 0 | } |
4772 | | |
4773 | | SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, |
4774 | 0 | SelectionDAG &DAG) const { |
4775 | 0 | bool IsData = Op.getConstantOperandVal(4); |
4776 | 0 | if (!IsData) |
4777 | | // Just preserve the chain. |
4778 | 0 | return Op.getOperand(0); |
4779 | | |
4780 | 0 | SDLoc DL(Op); |
4781 | 0 | bool IsWrite = Op.getConstantOperandVal(2); |
4782 | 0 | unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; |
4783 | 0 | auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); |
4784 | 0 | SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32), |
4785 | 0 | Op.getOperand(1)}; |
4786 | 0 | return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL, |
4787 | 0 | Node->getVTList(), Ops, |
4788 | 0 | Node->getMemoryVT(), Node->getMemOperand()); |
4789 | 0 | } |
4790 | | |
4791 | | // Convert condition code in CCReg to an i32 value. |
4792 | 0 | static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) { |
4793 | 0 | SDLoc DL(CCReg); |
4794 | 0 | SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg); |
4795 | 0 | return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM, |
4796 | 0 | DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32)); |
4797 | 0 | } |
4798 | | |
4799 | | SDValue |
4800 | | SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, |
4801 | 0 | SelectionDAG &DAG) const { |
4802 | 0 | unsigned Opcode, CCValid; |
4803 | 0 | if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) { |
4804 | 0 | assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); |
4805 | 0 | SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode); |
4806 | 0 | SDValue CC = getCCResult(DAG, SDValue(Node, 0)); |
4807 | 0 | DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC); |
4808 | 0 | return SDValue(); |
4809 | 0 | } |
4810 | | |
4811 | 0 | return SDValue(); |
4812 | 0 | } |
4813 | | |
4814 | | SDValue |
4815 | | SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, |
4816 | 0 | SelectionDAG &DAG) const { |
4817 | 0 | unsigned Opcode, CCValid; |
4818 | 0 | if (isIntrinsicWithCC(Op, Opcode, CCValid)) { |
4819 | 0 | SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode); |
4820 | 0 | if (Op->getNumValues() == 1) |
4821 | 0 | return getCCResult(DAG, SDValue(Node, 0)); |
4822 | 0 | assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result"); |
4823 | 0 | return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), |
4824 | 0 | SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1))); |
4825 | 0 | } |
4826 | | |
4827 | 0 | unsigned Id = Op.getConstantOperandVal(0); |
4828 | 0 | switch (Id) { |
4829 | 0 | case Intrinsic::thread_pointer: |
4830 | 0 | return lowerThreadPointer(SDLoc(Op), DAG); |
4831 | | |
4832 | 0 | case Intrinsic::s390_vpdi: |
4833 | 0 | return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(), |
4834 | 0 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
4835 | | |
4836 | 0 | case Intrinsic::s390_vperm: |
4837 | 0 | return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(), |
4838 | 0 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
4839 | | |
4840 | 0 | case Intrinsic::s390_vuphb: |
4841 | 0 | case Intrinsic::s390_vuphh: |
4842 | 0 | case Intrinsic::s390_vuphf: |
4843 | 0 | return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(), |
4844 | 0 | Op.getOperand(1)); |
4845 | | |
4846 | 0 | case Intrinsic::s390_vuplhb: |
4847 | 0 | case Intrinsic::s390_vuplhh: |
4848 | 0 | case Intrinsic::s390_vuplhf: |
4849 | 0 | return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(), |
4850 | 0 | Op.getOperand(1)); |
4851 | | |
4852 | 0 | case Intrinsic::s390_vuplb: |
4853 | 0 | case Intrinsic::s390_vuplhw: |
4854 | 0 | case Intrinsic::s390_vuplf: |
4855 | 0 | return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(), |
4856 | 0 | Op.getOperand(1)); |
4857 | | |
4858 | 0 | case Intrinsic::s390_vupllb: |
4859 | 0 | case Intrinsic::s390_vupllh: |
4860 | 0 | case Intrinsic::s390_vupllf: |
4861 | 0 | return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(), |
4862 | 0 | Op.getOperand(1)); |
4863 | | |
4864 | 0 | case Intrinsic::s390_vsumb: |
4865 | 0 | case Intrinsic::s390_vsumh: |
4866 | 0 | case Intrinsic::s390_vsumgh: |
4867 | 0 | case Intrinsic::s390_vsumgf: |
4868 | 0 | case Intrinsic::s390_vsumqf: |
4869 | 0 | case Intrinsic::s390_vsumqg: |
4870 | 0 | return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(), |
4871 | 0 | Op.getOperand(1), Op.getOperand(2)); |
4872 | | |
4873 | 0 | case Intrinsic::s390_vaq: |
4874 | 0 | return DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), |
4875 | 0 | Op.getOperand(1), Op.getOperand(2)); |
4876 | 0 | case Intrinsic::s390_vaccb: |
4877 | 0 | case Intrinsic::s390_vacch: |
4878 | 0 | case Intrinsic::s390_vaccf: |
4879 | 0 | case Intrinsic::s390_vaccg: |
4880 | 0 | case Intrinsic::s390_vaccq: |
4881 | 0 | return DAG.getNode(SystemZISD::VACC, SDLoc(Op), Op.getValueType(), |
4882 | 0 | Op.getOperand(1), Op.getOperand(2)); |
4883 | 0 | case Intrinsic::s390_vacq: |
4884 | 0 | return DAG.getNode(SystemZISD::VAC, SDLoc(Op), Op.getValueType(), |
4885 | 0 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
4886 | 0 | case Intrinsic::s390_vacccq: |
4887 | 0 | return DAG.getNode(SystemZISD::VACCC, SDLoc(Op), Op.getValueType(), |
4888 | 0 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
4889 | | |
4890 | 0 | case Intrinsic::s390_vsq: |
4891 | 0 | return DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), |
4892 | 0 | Op.getOperand(1), Op.getOperand(2)); |
4893 | 0 | case Intrinsic::s390_vscbib: |
4894 | 0 | case Intrinsic::s390_vscbih: |
4895 | 0 | case Intrinsic::s390_vscbif: |
4896 | 0 | case Intrinsic::s390_vscbig: |
4897 | 0 | case Intrinsic::s390_vscbiq: |
4898 | 0 | return DAG.getNode(SystemZISD::VSCBI, SDLoc(Op), Op.getValueType(), |
4899 | 0 | Op.getOperand(1), Op.getOperand(2)); |
4900 | 0 | case Intrinsic::s390_vsbiq: |
4901 | 0 | return DAG.getNode(SystemZISD::VSBI, SDLoc(Op), Op.getValueType(), |
4902 | 0 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
4903 | 0 | case Intrinsic::s390_vsbcbiq: |
4904 | 0 | return DAG.getNode(SystemZISD::VSBCBI, SDLoc(Op), Op.getValueType(), |
4905 | 0 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
4906 | 0 | } |
4907 | | |
4908 | 0 | return SDValue(); |
4909 | 0 | } |
4910 | | |
4911 | | namespace { |
4912 | | // Says that SystemZISD operation Opcode can be used to perform the equivalent |
4913 | | // of a VPERM with permute vector Bytes. If Opcode takes three operands, |
4914 | | // Operand is the constant third operand, otherwise it is the number of |
4915 | | // bytes in each element of the result. |
4916 | | struct Permute { |
4917 | | unsigned Opcode; |
4918 | | unsigned Operand; |
4919 | | unsigned char Bytes[SystemZ::VectorBytes]; |
4920 | | }; |
4921 | | } |
4922 | | |
4923 | | static const Permute PermuteForms[] = { |
4924 | | // VMRHG |
4925 | | { SystemZISD::MERGE_HIGH, 8, |
4926 | | { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } }, |
4927 | | // VMRHF |
4928 | | { SystemZISD::MERGE_HIGH, 4, |
4929 | | { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } }, |
4930 | | // VMRHH |
4931 | | { SystemZISD::MERGE_HIGH, 2, |
4932 | | { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } }, |
4933 | | // VMRHB |
4934 | | { SystemZISD::MERGE_HIGH, 1, |
4935 | | { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } }, |
4936 | | // VMRLG |
4937 | | { SystemZISD::MERGE_LOW, 8, |
4938 | | { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } }, |
4939 | | // VMRLF |
4940 | | { SystemZISD::MERGE_LOW, 4, |
4941 | | { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, |
4942 | | // VMRLH |
4943 | | { SystemZISD::MERGE_LOW, 2, |
4944 | | { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } }, |
4945 | | // VMRLB |
4946 | | { SystemZISD::MERGE_LOW, 1, |
4947 | | { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } }, |
4948 | | // VPKG |
4949 | | { SystemZISD::PACK, 4, |
4950 | | { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } }, |
4951 | | // VPKF |
4952 | | { SystemZISD::PACK, 2, |
4953 | | { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } }, |
4954 | | // VPKH |
4955 | | { SystemZISD::PACK, 1, |
4956 | | { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } }, |
4957 | | // VPDI V1, V2, 4 (low half of V1, high half of V2) |
4958 | | { SystemZISD::PERMUTE_DWORDS, 4, |
4959 | | { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, |
4960 | | // VPDI V1, V2, 1 (high half of V1, low half of V2) |
4961 | | { SystemZISD::PERMUTE_DWORDS, 1, |
4962 | | { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } } |
4963 | | }; |
4964 | | |
4965 | | // Called after matching a vector shuffle against a particular pattern. |
4966 | | // Both the original shuffle and the pattern have two vector operands. |
4967 | | // OpNos[0] is the operand of the original shuffle that should be used for |
4968 | | // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything. |
4969 | | // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and |
4970 | | // set OpNo0 and OpNo1 to the shuffle operands that should actually be used |
4971 | | // for operands 0 and 1 of the pattern. |
4972 | 0 | static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) { |
4973 | 0 | if (OpNos[0] < 0) { |
4974 | 0 | if (OpNos[1] < 0) |
4975 | 0 | return false; |
4976 | 0 | OpNo0 = OpNo1 = OpNos[1]; |
4977 | 0 | } else if (OpNos[1] < 0) { |
4978 | 0 | OpNo0 = OpNo1 = OpNos[0]; |
4979 | 0 | } else { |
4980 | 0 | OpNo0 = OpNos[0]; |
4981 | 0 | OpNo1 = OpNos[1]; |
4982 | 0 | } |
4983 | 0 | return true; |
4984 | 0 | } |
4985 | | |
4986 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
4987 | | // undefined bytes. Return true if the VPERM can be implemented using P. |
4988 | | // When returning true set OpNo0 to the VPERM operand that should be |
4989 | | // used for operand 0 of P and likewise OpNo1 for operand 1 of P. |
4990 | | // |
4991 | | // For example, if swapping the VPERM operands allows P to match, OpNo0 |
4992 | | // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one |
4993 | | // operand, but rewriting it to use two duplicated operands allows it to |
4994 | | // match P, then OpNo0 and OpNo1 will be the same. |
4995 | | static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P, |
4996 | 0 | unsigned &OpNo0, unsigned &OpNo1) { |
4997 | 0 | int OpNos[] = { -1, -1 }; |
4998 | 0 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { |
4999 | 0 | int Elt = Bytes[I]; |
5000 | 0 | if (Elt >= 0) { |
5001 | | // Make sure that the two permute vectors use the same suboperand |
5002 | | // byte number. Only the operand numbers (the high bits) are |
5003 | | // allowed to differ. |
5004 | 0 | if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) |
5005 | 0 | return false; |
5006 | 0 | int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes; |
5007 | 0 | int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes; |
5008 | | // Make sure that the operand mappings are consistent with previous |
5009 | | // elements. |
5010 | 0 | if (OpNos[ModelOpNo] == 1 - RealOpNo) |
5011 | 0 | return false; |
5012 | 0 | OpNos[ModelOpNo] = RealOpNo; |
5013 | 0 | } |
5014 | 0 | } |
5015 | 0 | return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); |
5016 | 0 | } |
5017 | | |
5018 | | // As above, but search for a matching permute. |
5019 | | static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes, |
5020 | 0 | unsigned &OpNo0, unsigned &OpNo1) { |
5021 | 0 | for (auto &P : PermuteForms) |
5022 | 0 | if (matchPermute(Bytes, P, OpNo0, OpNo1)) |
5023 | 0 | return &P; |
5024 | 0 | return nullptr; |
5025 | 0 | } |
5026 | | |
5027 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
5028 | | // undefined bytes. This permute is an operand of an outer permute. |
5029 | | // See whether redistributing the -1 bytes gives a shuffle that can be |
5030 | | // implemented using P. If so, set Transform to a VPERM-like permute vector |
5031 | | // that, when applied to the result of P, gives the original permute in Bytes. |
5032 | | static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes, |
5033 | | const Permute &P, |
5034 | 0 | SmallVectorImpl<int> &Transform) { |
5035 | 0 | unsigned To = 0; |
5036 | 0 | for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) { |
5037 | 0 | int Elt = Bytes[From]; |
5038 | 0 | if (Elt < 0) |
5039 | | // Byte number From of the result is undefined. |
5040 | 0 | Transform[From] = -1; |
5041 | 0 | else { |
5042 | 0 | while (P.Bytes[To] != Elt) { |
5043 | 0 | To += 1; |
5044 | 0 | if (To == SystemZ::VectorBytes) |
5045 | 0 | return false; |
5046 | 0 | } |
5047 | 0 | Transform[From] = To; |
5048 | 0 | } |
5049 | 0 | } |
5050 | 0 | return true; |
5051 | 0 | } |
5052 | | |
5053 | | // As above, but search for a matching permute. |
5054 | | static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes, |
5055 | 0 | SmallVectorImpl<int> &Transform) { |
5056 | 0 | for (auto &P : PermuteForms) |
5057 | 0 | if (matchDoublePermute(Bytes, P, Transform)) |
5058 | 0 | return &P; |
5059 | 0 | return nullptr; |
5060 | 0 | } |
5061 | | |
5062 | | // Convert the mask of the given shuffle op into a byte-level mask, |
5063 | | // as if it had type vNi8. |
5064 | | static bool getVPermMask(SDValue ShuffleOp, |
5065 | 0 | SmallVectorImpl<int> &Bytes) { |
5066 | 0 | EVT VT = ShuffleOp.getValueType(); |
5067 | 0 | unsigned NumElements = VT.getVectorNumElements(); |
5068 | 0 | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); |
5069 | |
|
5070 | 0 | if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) { |
5071 | 0 | Bytes.resize(NumElements * BytesPerElement, -1); |
5072 | 0 | for (unsigned I = 0; I < NumElements; ++I) { |
5073 | 0 | int Index = VSN->getMaskElt(I); |
5074 | 0 | if (Index >= 0) |
5075 | 0 | for (unsigned J = 0; J < BytesPerElement; ++J) |
5076 | 0 | Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; |
5077 | 0 | } |
5078 | 0 | return true; |
5079 | 0 | } |
5080 | 0 | if (SystemZISD::SPLAT == ShuffleOp.getOpcode() && |
5081 | 0 | isa<ConstantSDNode>(ShuffleOp.getOperand(1))) { |
5082 | 0 | unsigned Index = ShuffleOp.getConstantOperandVal(1); |
5083 | 0 | Bytes.resize(NumElements * BytesPerElement, -1); |
5084 | 0 | for (unsigned I = 0; I < NumElements; ++I) |
5085 | 0 | for (unsigned J = 0; J < BytesPerElement; ++J) |
5086 | 0 | Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; |
5087 | 0 | return true; |
5088 | 0 | } |
5089 | 0 | return false; |
5090 | 0 | } |
5091 | | |
5092 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
5093 | | // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of |
5094 | | // the result come from a contiguous sequence of bytes from one input. |
5095 | | // Set Base to the selector for the first byte if so. |
5096 | | static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start, |
5097 | 0 | unsigned BytesPerElement, int &Base) { |
5098 | 0 | Base = -1; |
5099 | 0 | for (unsigned I = 0; I < BytesPerElement; ++I) { |
5100 | 0 | if (Bytes[Start + I] >= 0) { |
5101 | 0 | unsigned Elem = Bytes[Start + I]; |
5102 | 0 | if (Base < 0) { |
5103 | 0 | Base = Elem - I; |
5104 | | // Make sure the bytes would come from one input operand. |
5105 | 0 | if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size()) |
5106 | 0 | return false; |
5107 | 0 | } else if (unsigned(Base) != Elem - I) |
5108 | 0 | return false; |
5109 | 0 | } |
5110 | 0 | } |
5111 | 0 | return true; |
5112 | 0 | } |
5113 | | |
5114 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
5115 | | // undefined bytes. Return true if it can be performed using VSLDB. |
5116 | | // When returning true, set StartIndex to the shift amount and OpNo0 |
5117 | | // and OpNo1 to the VPERM operands that should be used as the first |
5118 | | // and second shift operand respectively. |
5119 | | static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes, |
5120 | | unsigned &StartIndex, unsigned &OpNo0, |
5121 | 0 | unsigned &OpNo1) { |
5122 | 0 | int OpNos[] = { -1, -1 }; |
5123 | 0 | int Shift = -1; |
5124 | 0 | for (unsigned I = 0; I < 16; ++I) { |
5125 | 0 | int Index = Bytes[I]; |
5126 | 0 | if (Index >= 0) { |
5127 | 0 | int ExpectedShift = (Index - I) % SystemZ::VectorBytes; |
5128 | 0 | int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes; |
5129 | 0 | int RealOpNo = unsigned(Index) / SystemZ::VectorBytes; |
5130 | 0 | if (Shift < 0) |
5131 | 0 | Shift = ExpectedShift; |
5132 | 0 | else if (Shift != ExpectedShift) |
5133 | 0 | return false; |
5134 | | // Make sure that the operand mappings are consistent with previous |
5135 | | // elements. |
5136 | 0 | if (OpNos[ModelOpNo] == 1 - RealOpNo) |
5137 | 0 | return false; |
5138 | 0 | OpNos[ModelOpNo] = RealOpNo; |
5139 | 0 | } |
5140 | 0 | } |
5141 | 0 | StartIndex = Shift; |
5142 | 0 | return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); |
5143 | 0 | } |
5144 | | |
5145 | | // Create a node that performs P on operands Op0 and Op1, casting the |
5146 | | // operands to the appropriate type. The type of the result is determined by P. |
5147 | | static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, |
5148 | 0 | const Permute &P, SDValue Op0, SDValue Op1) { |
5149 | | // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input |
5150 | | // elements of a PACK are twice as wide as the outputs. |
5151 | 0 | unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 : |
5152 | 0 | P.Opcode == SystemZISD::PACK ? P.Operand * 2 : |
5153 | 0 | P.Operand); |
5154 | | // Cast both operands to the appropriate type. |
5155 | 0 | MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8), |
5156 | 0 | SystemZ::VectorBytes / InBytes); |
5157 | 0 | Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0); |
5158 | 0 | Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1); |
5159 | 0 | SDValue Op; |
5160 | 0 | if (P.Opcode == SystemZISD::PERMUTE_DWORDS) { |
5161 | 0 | SDValue Op2 = DAG.getTargetConstant(P.Operand, DL, MVT::i32); |
5162 | 0 | Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2); |
5163 | 0 | } else if (P.Opcode == SystemZISD::PACK) { |
5164 | 0 | MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), |
5165 | 0 | SystemZ::VectorBytes / P.Operand); |
5166 | 0 | Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1); |
5167 | 0 | } else { |
5168 | 0 | Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1); |
5169 | 0 | } |
5170 | 0 | return Op; |
5171 | 0 | } |
5172 | | |
5173 | 0 | static bool isZeroVector(SDValue N) { |
5174 | 0 | if (N->getOpcode() == ISD::BITCAST) |
5175 | 0 | N = N->getOperand(0); |
5176 | 0 | if (N->getOpcode() == ISD::SPLAT_VECTOR) |
5177 | 0 | if (auto *Op = dyn_cast<ConstantSDNode>(N->getOperand(0))) |
5178 | 0 | return Op->getZExtValue() == 0; |
5179 | 0 | return ISD::isBuildVectorAllZeros(N.getNode()); |
5180 | 0 | } |
5181 | | |
5182 | | // Return the index of the zero/undef vector, or UINT32_MAX if not found. |
5183 | 0 | static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num) { |
5184 | 0 | for (unsigned I = 0; I < Num ; I++) |
5185 | 0 | if (isZeroVector(Ops[I])) |
5186 | 0 | return I; |
5187 | 0 | return UINT32_MAX; |
5188 | 0 | } |
5189 | | |
5190 | | // Bytes is a VPERM-like permute vector, except that -1 is used for |
5191 | | // undefined bytes. Implement it on operands Ops[0] and Ops[1] using |
5192 | | // VSLDB or VPERM. |
5193 | | static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, |
5194 | | SDValue *Ops, |
5195 | 0 | const SmallVectorImpl<int> &Bytes) { |
5196 | 0 | for (unsigned I = 0; I < 2; ++I) |
5197 | 0 | Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]); |
5198 | | |
5199 | | // First see whether VSLDB can be used. |
5200 | 0 | unsigned StartIndex, OpNo0, OpNo1; |
5201 | 0 | if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1)) |
5202 | 0 | return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], |
5203 | 0 | Ops[OpNo1], |
5204 | 0 | DAG.getTargetConstant(StartIndex, DL, MVT::i32)); |
5205 | | |
5206 | | // Fall back on VPERM. Construct an SDNode for the permute vector. Try to |
5207 | | // eliminate a zero vector by reusing any zero index in the permute vector. |
5208 | 0 | unsigned ZeroVecIdx = findZeroVectorIdx(&Ops[0], 2); |
5209 | 0 | if (ZeroVecIdx != UINT32_MAX) { |
5210 | 0 | bool MaskFirst = true; |
5211 | 0 | int ZeroIdx = -1; |
5212 | 0 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { |
5213 | 0 | unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes; |
5214 | 0 | unsigned Byte = unsigned(Bytes[I]) % SystemZ::VectorBytes; |
5215 | 0 | if (OpNo == ZeroVecIdx && I == 0) { |
5216 | | // If the first byte is zero, use mask as first operand. |
5217 | 0 | ZeroIdx = 0; |
5218 | 0 | break; |
5219 | 0 | } |
5220 | 0 | if (OpNo != ZeroVecIdx && Byte == 0) { |
5221 | | // If mask contains a zero, use it by placing that vector first. |
5222 | 0 | ZeroIdx = I + SystemZ::VectorBytes; |
5223 | 0 | MaskFirst = false; |
5224 | 0 | break; |
5225 | 0 | } |
5226 | 0 | } |
5227 | 0 | if (ZeroIdx != -1) { |
5228 | 0 | SDValue IndexNodes[SystemZ::VectorBytes]; |
5229 | 0 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { |
5230 | 0 | if (Bytes[I] >= 0) { |
5231 | 0 | unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes; |
5232 | 0 | unsigned Byte = unsigned(Bytes[I]) % SystemZ::VectorBytes; |
5233 | 0 | if (OpNo == ZeroVecIdx) |
5234 | 0 | IndexNodes[I] = DAG.getConstant(ZeroIdx, DL, MVT::i32); |
5235 | 0 | else { |
5236 | 0 | unsigned BIdx = MaskFirst ? Byte + SystemZ::VectorBytes : Byte; |
5237 | 0 | IndexNodes[I] = DAG.getConstant(BIdx, DL, MVT::i32); |
5238 | 0 | } |
5239 | 0 | } else |
5240 | 0 | IndexNodes[I] = DAG.getUNDEF(MVT::i32); |
5241 | 0 | } |
5242 | 0 | SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); |
5243 | 0 | SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0]; |
5244 | 0 | if (MaskFirst) |
5245 | 0 | return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Mask, Src, |
5246 | 0 | Mask); |
5247 | 0 | else |
5248 | 0 | return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Src, Mask, |
5249 | 0 | Mask); |
5250 | 0 | } |
5251 | 0 | } |
5252 | | |
5253 | 0 | SDValue IndexNodes[SystemZ::VectorBytes]; |
5254 | 0 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) |
5255 | 0 | if (Bytes[I] >= 0) |
5256 | 0 | IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32); |
5257 | 0 | else |
5258 | 0 | IndexNodes[I] = DAG.getUNDEF(MVT::i32); |
5259 | 0 | SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); |
5260 | 0 | return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], |
5261 | 0 | (!Ops[1].isUndef() ? Ops[1] : Ops[0]), Op2); |
5262 | 0 | } |
5263 | | |
5264 | | namespace { |
5265 | | // Describes a general N-operand vector shuffle. |
5266 | | struct GeneralShuffle { |
5267 | 0 | GeneralShuffle(EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {} |
5268 | | void addUndef(); |
5269 | | bool add(SDValue, unsigned); |
5270 | | SDValue getNode(SelectionDAG &, const SDLoc &); |
5271 | | void tryPrepareForUnpack(); |
5272 | 0 | bool unpackWasPrepared() { return UnpackFromEltSize <= 4; } |
5273 | | SDValue insertUnpackIfPrepared(SelectionDAG &DAG, const SDLoc &DL, SDValue Op); |
5274 | | |
5275 | | // The operands of the shuffle. |
5276 | | SmallVector<SDValue, SystemZ::VectorBytes> Ops; |
5277 | | |
5278 | | // Index I is -1 if byte I of the result is undefined. Otherwise the |
5279 | | // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand |
5280 | | // Bytes[I] / SystemZ::VectorBytes. |
5281 | | SmallVector<int, SystemZ::VectorBytes> Bytes; |
5282 | | |
5283 | | // The type of the shuffle result. |
5284 | | EVT VT; |
5285 | | |
5286 | | // Holds a value of 1, 2 or 4 if a final unpack has been prepared for. |
5287 | | unsigned UnpackFromEltSize; |
5288 | | }; |
5289 | | } |
5290 | | |
5291 | | // Add an extra undefined element to the shuffle. |
5292 | 0 | void GeneralShuffle::addUndef() { |
5293 | 0 | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); |
5294 | 0 | for (unsigned I = 0; I < BytesPerElement; ++I) |
5295 | 0 | Bytes.push_back(-1); |
5296 | 0 | } |
5297 | | |
5298 | | // Add an extra element to the shuffle, taking it from element Elem of Op. |
5299 | | // A null Op indicates a vector input whose value will be calculated later; |
5300 | | // there is at most one such input per shuffle and it always has the same |
5301 | | // type as the result. Aborts and returns false if the source vector elements |
5302 | | // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per |
5303 | | // LLVM they become implicitly extended, but this is rare and not optimized. |
5304 | 0 | bool GeneralShuffle::add(SDValue Op, unsigned Elem) { |
5305 | 0 | unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); |
5306 | | |
5307 | | // The source vector can have wider elements than the result, |
5308 | | // either through an explicit TRUNCATE or because of type legalization. |
5309 | | // We want the least significant part. |
5310 | 0 | EVT FromVT = Op.getNode() ? Op.getValueType() : VT; |
5311 | 0 | unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); |
5312 | | |
5313 | | // Return false if the source elements are smaller than their destination |
5314 | | // elements. |
5315 | 0 | if (FromBytesPerElement < BytesPerElement) |
5316 | 0 | return false; |
5317 | | |
5318 | 0 | unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + |
5319 | 0 | (FromBytesPerElement - BytesPerElement)); |
5320 | | |
5321 | | // Look through things like shuffles and bitcasts. |
5322 | 0 | while (Op.getNode()) { |
5323 | 0 | if (Op.getOpcode() == ISD::BITCAST) |
5324 | 0 | Op = Op.getOperand(0); |
5325 | 0 | else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) { |
5326 | | // See whether the bytes we need come from a contiguous part of one |
5327 | | // operand. |
5328 | 0 | SmallVector<int, SystemZ::VectorBytes> OpBytes; |
5329 | 0 | if (!getVPermMask(Op, OpBytes)) |
5330 | 0 | break; |
5331 | 0 | int NewByte; |
5332 | 0 | if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte)) |
5333 | 0 | break; |
5334 | 0 | if (NewByte < 0) { |
5335 | 0 | addUndef(); |
5336 | 0 | return true; |
5337 | 0 | } |
5338 | 0 | Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); |
5339 | 0 | Byte = unsigned(NewByte) % SystemZ::VectorBytes; |
5340 | 0 | } else if (Op.isUndef()) { |
5341 | 0 | addUndef(); |
5342 | 0 | return true; |
5343 | 0 | } else |
5344 | 0 | break; |
5345 | 0 | } |
5346 | | |
5347 | | // Make sure that the source of the extraction is in Ops. |
5348 | 0 | unsigned OpNo = 0; |
5349 | 0 | for (; OpNo < Ops.size(); ++OpNo) |
5350 | 0 | if (Ops[OpNo] == Op) |
5351 | 0 | break; |
5352 | 0 | if (OpNo == Ops.size()) |
5353 | 0 | Ops.push_back(Op); |
5354 | | |
5355 | | // Add the element to Bytes. |
5356 | 0 | unsigned Base = OpNo * SystemZ::VectorBytes + Byte; |
5357 | 0 | for (unsigned I = 0; I < BytesPerElement; ++I) |
5358 | 0 | Bytes.push_back(Base + I); |
5359 | |
|
5360 | 0 | return true; |
5361 | 0 | } |
5362 | | |
5363 | | // Return SDNodes for the completed shuffle. |
5364 | 0 | SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) { |
5365 | 0 | assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"); |
5366 | | |
5367 | 0 | if (Ops.size() == 0) |
5368 | 0 | return DAG.getUNDEF(VT); |
5369 | | |
5370 | | // Use a single unpack if possible as the last operation. |
5371 | 0 | tryPrepareForUnpack(); |
5372 | | |
5373 | | // Make sure that there are at least two shuffle operands. |
5374 | 0 | if (Ops.size() == 1) |
5375 | 0 | Ops.push_back(DAG.getUNDEF(MVT::v16i8)); |
5376 | | |
5377 | | // Create a tree of shuffles, deferring root node until after the loop. |
5378 | | // Try to redistribute the undefined elements of non-root nodes so that |
5379 | | // the non-root shuffles match something like a pack or merge, then adjust |
5380 | | // the parent node's permute vector to compensate for the new order. |
5381 | | // Among other things, this copes with vectors like <2 x i16> that were |
5382 | | // padded with undefined elements during type legalization. |
5383 | | // |
5384 | | // In the best case this redistribution will lead to the whole tree |
5385 | | // using packs and merges. It should rarely be a loss in other cases. |
5386 | 0 | unsigned Stride = 1; |
5387 | 0 | for (; Stride * 2 < Ops.size(); Stride *= 2) { |
5388 | 0 | for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) { |
5389 | 0 | SDValue SubOps[] = { Ops[I], Ops[I + Stride] }; |
5390 | | |
5391 | | // Create a mask for just these two operands. |
5392 | 0 | SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes); |
5393 | 0 | for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { |
5394 | 0 | unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes; |
5395 | 0 | unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes; |
5396 | 0 | if (OpNo == I) |
5397 | 0 | NewBytes[J] = Byte; |
5398 | 0 | else if (OpNo == I + Stride) |
5399 | 0 | NewBytes[J] = SystemZ::VectorBytes + Byte; |
5400 | 0 | else |
5401 | 0 | NewBytes[J] = -1; |
5402 | 0 | } |
5403 | | // See if it would be better to reorganize NewMask to avoid using VPERM. |
5404 | 0 | SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes); |
5405 | 0 | if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) { |
5406 | 0 | Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]); |
5407 | | // Applying NewBytesMap to Ops[I] gets back to NewBytes. |
5408 | 0 | for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { |
5409 | 0 | if (NewBytes[J] >= 0) { |
5410 | 0 | assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && |
5411 | 0 | "Invalid double permute"); |
5412 | 0 | Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J]; |
5413 | 0 | } else |
5414 | 0 | assert(NewBytesMap[J] < 0 && "Invalid double permute"); |
5415 | 0 | } |
5416 | 0 | } else { |
5417 | | // Just use NewBytes on the operands. |
5418 | 0 | Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes); |
5419 | 0 | for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) |
5420 | 0 | if (NewBytes[J] >= 0) |
5421 | 0 | Bytes[J] = I * SystemZ::VectorBytes + J; |
5422 | 0 | } |
5423 | 0 | } |
5424 | 0 | } |
5425 | | |
5426 | | // Now we just have 2 inputs. Put the second operand in Ops[1]. |
5427 | 0 | if (Stride > 1) { |
5428 | 0 | Ops[1] = Ops[Stride]; |
5429 | 0 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) |
5430 | 0 | if (Bytes[I] >= int(SystemZ::VectorBytes)) |
5431 | 0 | Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; |
5432 | 0 | } |
5433 | | |
5434 | | // Look for an instruction that can do the permute without resorting |
5435 | | // to VPERM. |
5436 | 0 | unsigned OpNo0, OpNo1; |
5437 | 0 | SDValue Op; |
5438 | 0 | if (unpackWasPrepared() && Ops[1].isUndef()) |
5439 | 0 | Op = Ops[0]; |
5440 | 0 | else if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1)) |
5441 | 0 | Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]); |
5442 | 0 | else |
5443 | 0 | Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes); |
5444 | |
|
5445 | 0 | Op = insertUnpackIfPrepared(DAG, DL, Op); |
5446 | |
|
5447 | 0 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); |
5448 | 0 | } |
5449 | | |
5450 | | #ifndef NDEBUG |
5451 | 0 | static void dumpBytes(const SmallVectorImpl<int> &Bytes, std::string Msg) { |
5452 | 0 | dbgs() << Msg.c_str() << " { "; |
5453 | 0 | for (unsigned i = 0; i < Bytes.size(); i++) |
5454 | 0 | dbgs() << Bytes[i] << " "; |
5455 | 0 | dbgs() << "}\n"; |
5456 | 0 | } |
5457 | | #endif |
5458 | | |
5459 | | // If the Bytes vector matches an unpack operation, prepare to do the unpack |
5460 | | // after all else by removing the zero vector and the effect of the unpack on |
5461 | | // Bytes. |
5462 | 0 | void GeneralShuffle::tryPrepareForUnpack() { |
5463 | 0 | uint32_t ZeroVecOpNo = findZeroVectorIdx(&Ops[0], Ops.size()); |
5464 | 0 | if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1) |
5465 | 0 | return; |
5466 | | |
5467 | | // Only do this if removing the zero vector reduces the depth, otherwise |
5468 | | // the critical path will increase with the final unpack. |
5469 | 0 | if (Ops.size() > 2 && |
5470 | 0 | Log2_32_Ceil(Ops.size()) == Log2_32_Ceil(Ops.size() - 1)) |
5471 | 0 | return; |
5472 | | |
5473 | | // Find an unpack that would allow removing the zero vector from Ops. |
5474 | 0 | UnpackFromEltSize = 1; |
5475 | 0 | for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) { |
5476 | 0 | bool MatchUnpack = true; |
5477 | 0 | SmallVector<int, SystemZ::VectorBytes> SrcBytes; |
5478 | 0 | for (unsigned Elt = 0; Elt < SystemZ::VectorBytes; Elt++) { |
5479 | 0 | unsigned ToEltSize = UnpackFromEltSize * 2; |
5480 | 0 | bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize; |
5481 | 0 | if (!IsZextByte) |
5482 | 0 | SrcBytes.push_back(Bytes[Elt]); |
5483 | 0 | if (Bytes[Elt] != -1) { |
5484 | 0 | unsigned OpNo = unsigned(Bytes[Elt]) / SystemZ::VectorBytes; |
5485 | 0 | if (IsZextByte != (OpNo == ZeroVecOpNo)) { |
5486 | 0 | MatchUnpack = false; |
5487 | 0 | break; |
5488 | 0 | } |
5489 | 0 | } |
5490 | 0 | } |
5491 | 0 | if (MatchUnpack) { |
5492 | 0 | if (Ops.size() == 2) { |
5493 | | // Don't use unpack if a single source operand needs rearrangement. |
5494 | 0 | for (unsigned i = 0; i < SystemZ::VectorBytes / 2; i++) |
5495 | 0 | if (SrcBytes[i] != -1 && SrcBytes[i] % 16 != int(i)) { |
5496 | 0 | UnpackFromEltSize = UINT_MAX; |
5497 | 0 | return; |
5498 | 0 | } |
5499 | 0 | } |
5500 | 0 | break; |
5501 | 0 | } |
5502 | 0 | } |
5503 | 0 | if (UnpackFromEltSize > 4) |
5504 | 0 | return; |
5505 | | |
5506 | 0 | LLVM_DEBUG(dbgs() << "Preparing for final unpack of element size " |
5507 | 0 | << UnpackFromEltSize << ". Zero vector is Op#" << ZeroVecOpNo |
5508 | 0 | << ".\n"; |
5509 | 0 | dumpBytes(Bytes, "Original Bytes vector:");); |
5510 | | |
5511 | | // Apply the unpack in reverse to the Bytes array. |
5512 | 0 | unsigned B = 0; |
5513 | 0 | for (unsigned Elt = 0; Elt < SystemZ::VectorBytes;) { |
5514 | 0 | Elt += UnpackFromEltSize; |
5515 | 0 | for (unsigned i = 0; i < UnpackFromEltSize; i++, Elt++, B++) |
5516 | 0 | Bytes[B] = Bytes[Elt]; |
5517 | 0 | } |
5518 | 0 | while (B < SystemZ::VectorBytes) |
5519 | 0 | Bytes[B++] = -1; |
5520 | | |
5521 | | // Remove the zero vector from Ops |
5522 | 0 | Ops.erase(&Ops[ZeroVecOpNo]); |
5523 | 0 | for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) |
5524 | 0 | if (Bytes[I] >= 0) { |
5525 | 0 | unsigned OpNo = unsigned(Bytes[I]) / SystemZ::VectorBytes; |
5526 | 0 | if (OpNo > ZeroVecOpNo) |
5527 | 0 | Bytes[I] -= SystemZ::VectorBytes; |
5528 | 0 | } |
5529 | |
|
5530 | 0 | LLVM_DEBUG(dumpBytes(Bytes, "Resulting Bytes vector, zero vector removed:"); |
5531 | 0 | dbgs() << "\n";); |
5532 | 0 | } |
5533 | | |
5534 | | SDValue GeneralShuffle::insertUnpackIfPrepared(SelectionDAG &DAG, |
5535 | | const SDLoc &DL, |
5536 | 0 | SDValue Op) { |
5537 | 0 | if (!unpackWasPrepared()) |
5538 | 0 | return Op; |
5539 | 0 | unsigned InBits = UnpackFromEltSize * 8; |
5540 | 0 | EVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBits), |
5541 | 0 | SystemZ::VectorBits / InBits); |
5542 | 0 | SDValue PackedOp = DAG.getNode(ISD::BITCAST, DL, InVT, Op); |
5543 | 0 | unsigned OutBits = InBits * 2; |
5544 | 0 | EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(OutBits), |
5545 | 0 | SystemZ::VectorBits / OutBits); |
5546 | 0 | return DAG.getNode(SystemZISD::UNPACKL_HIGH, DL, OutVT, PackedOp); |
5547 | 0 | } |
5548 | | |
5549 | | // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion. |
5550 | 0 | static bool isScalarToVector(SDValue Op) { |
5551 | 0 | for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I) |
5552 | 0 | if (!Op.getOperand(I).isUndef()) |
5553 | 0 | return false; |
5554 | 0 | return true; |
5555 | 0 | } |
5556 | | |
5557 | | // Return a vector of type VT that contains Value in the first element. |
5558 | | // The other elements don't matter. |
5559 | | static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
5560 | 0 | SDValue Value) { |
5561 | | // If we have a constant, replicate it to all elements and let the |
5562 | | // BUILD_VECTOR lowering take care of it. |
5563 | 0 | if (Value.getOpcode() == ISD::Constant || |
5564 | 0 | Value.getOpcode() == ISD::ConstantFP) { |
5565 | 0 | SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); |
5566 | 0 | return DAG.getBuildVector(VT, DL, Ops); |
5567 | 0 | } |
5568 | 0 | if (Value.isUndef()) |
5569 | 0 | return DAG.getUNDEF(VT); |
5570 | 0 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); |
5571 | 0 | } |
5572 | | |
5573 | | // Return a vector of type VT in which Op0 is in element 0 and Op1 is in |
5574 | | // element 1. Used for cases in which replication is cheap. |
5575 | | static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
5576 | 0 | SDValue Op0, SDValue Op1) { |
5577 | 0 | if (Op0.isUndef()) { |
5578 | 0 | if (Op1.isUndef()) |
5579 | 0 | return DAG.getUNDEF(VT); |
5580 | 0 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); |
5581 | 0 | } |
5582 | 0 | if (Op1.isUndef()) |
5583 | 0 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); |
5584 | 0 | return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, |
5585 | 0 | buildScalarToVector(DAG, DL, VT, Op0), |
5586 | 0 | buildScalarToVector(DAG, DL, VT, Op1)); |
5587 | 0 | } |
5588 | | |
5589 | | // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64 |
5590 | | // vector for them. |
5591 | | static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, |
5592 | 0 | SDValue Op1) { |
5593 | 0 | if (Op0.isUndef() && Op1.isUndef()) |
5594 | 0 | return DAG.getUNDEF(MVT::v2i64); |
5595 | | // If one of the two inputs is undefined then replicate the other one, |
5596 | | // in order to avoid using another register unnecessarily. |
5597 | 0 | if (Op0.isUndef()) |
5598 | 0 | Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); |
5599 | 0 | else if (Op1.isUndef()) |
5600 | 0 | Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); |
5601 | 0 | else { |
5602 | 0 | Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); |
5603 | 0 | Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); |
5604 | 0 | } |
5605 | 0 | return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1); |
5606 | 0 | } |
5607 | | |
5608 | | // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually |
5609 | | // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for |
5610 | | // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR |
5611 | | // would benefit from this representation and return it if so. |
5612 | | static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, |
5613 | 0 | BuildVectorSDNode *BVN) { |
5614 | 0 | EVT VT = BVN->getValueType(0); |
5615 | 0 | unsigned NumElements = VT.getVectorNumElements(); |
5616 | | |
5617 | | // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation |
5618 | | // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still |
5619 | | // need a BUILD_VECTOR, add an additional placeholder operand for that |
5620 | | // BUILD_VECTOR and store its operands in ResidueOps. |
5621 | 0 | GeneralShuffle GS(VT); |
5622 | 0 | SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps; |
5623 | 0 | bool FoundOne = false; |
5624 | 0 | for (unsigned I = 0; I < NumElements; ++I) { |
5625 | 0 | SDValue Op = BVN->getOperand(I); |
5626 | 0 | if (Op.getOpcode() == ISD::TRUNCATE) |
5627 | 0 | Op = Op.getOperand(0); |
5628 | 0 | if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
5629 | 0 | Op.getOperand(1).getOpcode() == ISD::Constant) { |
5630 | 0 | unsigned Elem = Op.getConstantOperandVal(1); |
5631 | 0 | if (!GS.add(Op.getOperand(0), Elem)) |
5632 | 0 | return SDValue(); |
5633 | 0 | FoundOne = true; |
5634 | 0 | } else if (Op.isUndef()) { |
5635 | 0 | GS.addUndef(); |
5636 | 0 | } else { |
5637 | 0 | if (!GS.add(SDValue(), ResidueOps.size())) |
5638 | 0 | return SDValue(); |
5639 | 0 | ResidueOps.push_back(BVN->getOperand(I)); |
5640 | 0 | } |
5641 | 0 | } |
5642 | | |
5643 | | // Nothing to do if there are no EXTRACT_VECTOR_ELTs. |
5644 | 0 | if (!FoundOne) |
5645 | 0 | return SDValue(); |
5646 | | |
5647 | | // Create the BUILD_VECTOR for the remaining elements, if any. |
5648 | 0 | if (!ResidueOps.empty()) { |
5649 | 0 | while (ResidueOps.size() < NumElements) |
5650 | 0 | ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType())); |
5651 | 0 | for (auto &Op : GS.Ops) { |
5652 | 0 | if (!Op.getNode()) { |
5653 | 0 | Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); |
5654 | 0 | break; |
5655 | 0 | } |
5656 | 0 | } |
5657 | 0 | } |
5658 | 0 | return GS.getNode(DAG, SDLoc(BVN)); |
5659 | 0 | } |
5660 | | |
5661 | 0 | bool SystemZTargetLowering::isVectorElementLoad(SDValue Op) const { |
5662 | 0 | if (Op.getOpcode() == ISD::LOAD && cast<LoadSDNode>(Op)->isUnindexed()) |
5663 | 0 | return true; |
5664 | 0 | if (Subtarget.hasVectorEnhancements2() && Op.getOpcode() == SystemZISD::LRV) |
5665 | 0 | return true; |
5666 | 0 | return false; |
5667 | 0 | } |
5668 | | |
5669 | | // Combine GPR scalar values Elems into a vector of type VT. |
5670 | | SDValue |
5671 | | SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, |
5672 | 0 | SmallVectorImpl<SDValue> &Elems) const { |
5673 | | // See whether there is a single replicated value. |
5674 | 0 | SDValue Single; |
5675 | 0 | unsigned int NumElements = Elems.size(); |
5676 | 0 | unsigned int Count = 0; |
5677 | 0 | for (auto Elem : Elems) { |
5678 | 0 | if (!Elem.isUndef()) { |
5679 | 0 | if (!Single.getNode()) |
5680 | 0 | Single = Elem; |
5681 | 0 | else if (Elem != Single) { |
5682 | 0 | Single = SDValue(); |
5683 | 0 | break; |
5684 | 0 | } |
5685 | 0 | Count += 1; |
5686 | 0 | } |
5687 | 0 | } |
5688 | | // There are three cases here: |
5689 | | // |
5690 | | // - if the only defined element is a loaded one, the best sequence |
5691 | | // is a replicating load. |
5692 | | // |
5693 | | // - otherwise, if the only defined element is an i64 value, we will |
5694 | | // end up with the same VLVGP sequence regardless of whether we short-cut |
5695 | | // for replication or fall through to the later code. |
5696 | | // |
5697 | | // - otherwise, if the only defined element is an i32 or smaller value, |
5698 | | // we would need 2 instructions to replicate it: VLVGP followed by VREPx. |
5699 | | // This is only a win if the single defined element is used more than once. |
5700 | | // In other cases we're better off using a single VLVGx. |
5701 | 0 | if (Single.getNode() && (Count > 1 || isVectorElementLoad(Single))) |
5702 | 0 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); |
5703 | | |
5704 | | // If all elements are loads, use VLREP/VLEs (below). |
5705 | 0 | bool AllLoads = true; |
5706 | 0 | for (auto Elem : Elems) |
5707 | 0 | if (!isVectorElementLoad(Elem)) { |
5708 | 0 | AllLoads = false; |
5709 | 0 | break; |
5710 | 0 | } |
5711 | | |
5712 | | // The best way of building a v2i64 from two i64s is to use VLVGP. |
5713 | 0 | if (VT == MVT::v2i64 && !AllLoads) |
5714 | 0 | return joinDwords(DAG, DL, Elems[0], Elems[1]); |
5715 | | |
5716 | | // Use a 64-bit merge high to combine two doubles. |
5717 | 0 | if (VT == MVT::v2f64 && !AllLoads) |
5718 | 0 | return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); |
5719 | | |
5720 | | // Build v4f32 values directly from the FPRs: |
5721 | | // |
5722 | | // <Axxx> <Bxxx> <Cxxxx> <Dxxx> |
5723 | | // V V VMRHF |
5724 | | // <ABxx> <CDxx> |
5725 | | // V VMRHG |
5726 | | // <ABCD> |
5727 | 0 | if (VT == MVT::v4f32 && !AllLoads) { |
5728 | 0 | SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); |
5729 | 0 | SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); |
5730 | | // Avoid unnecessary undefs by reusing the other operand. |
5731 | 0 | if (Op01.isUndef()) |
5732 | 0 | Op01 = Op23; |
5733 | 0 | else if (Op23.isUndef()) |
5734 | 0 | Op23 = Op01; |
5735 | | // Merging identical replications is a no-op. |
5736 | 0 | if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23) |
5737 | 0 | return Op01; |
5738 | 0 | Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01); |
5739 | 0 | Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23); |
5740 | 0 | SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH, |
5741 | 0 | DL, MVT::v2i64, Op01, Op23); |
5742 | 0 | return DAG.getNode(ISD::BITCAST, DL, VT, Op); |
5743 | 0 | } |
5744 | | |
5745 | | // Collect the constant terms. |
5746 | 0 | SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue()); |
5747 | 0 | SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false); |
5748 | |
|
5749 | 0 | unsigned NumConstants = 0; |
5750 | 0 | for (unsigned I = 0; I < NumElements; ++I) { |
5751 | 0 | SDValue Elem = Elems[I]; |
5752 | 0 | if (Elem.getOpcode() == ISD::Constant || |
5753 | 0 | Elem.getOpcode() == ISD::ConstantFP) { |
5754 | 0 | NumConstants += 1; |
5755 | 0 | Constants[I] = Elem; |
5756 | 0 | Done[I] = true; |
5757 | 0 | } |
5758 | 0 | } |
5759 | | // If there was at least one constant, fill in the other elements of |
5760 | | // Constants with undefs to get a full vector constant and use that |
5761 | | // as the starting point. |
5762 | 0 | SDValue Result; |
5763 | 0 | SDValue ReplicatedVal; |
5764 | 0 | if (NumConstants > 0) { |
5765 | 0 | for (unsigned I = 0; I < NumElements; ++I) |
5766 | 0 | if (!Constants[I].getNode()) |
5767 | 0 | Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); |
5768 | 0 | Result = DAG.getBuildVector(VT, DL, Constants); |
5769 | 0 | } else { |
5770 | | // Otherwise try to use VLREP or VLVGP to start the sequence in order to |
5771 | | // avoid a false dependency on any previous contents of the vector |
5772 | | // register. |
5773 | | |
5774 | | // Use a VLREP if at least one element is a load. Make sure to replicate |
5775 | | // the load with the most elements having its value. |
5776 | 0 | std::map<const SDNode*, unsigned> UseCounts; |
5777 | 0 | SDNode *LoadMaxUses = nullptr; |
5778 | 0 | for (unsigned I = 0; I < NumElements; ++I) |
5779 | 0 | if (isVectorElementLoad(Elems[I])) { |
5780 | 0 | SDNode *Ld = Elems[I].getNode(); |
5781 | 0 | UseCounts[Ld]++; |
5782 | 0 | if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld]) |
5783 | 0 | LoadMaxUses = Ld; |
5784 | 0 | } |
5785 | 0 | if (LoadMaxUses != nullptr) { |
5786 | 0 | ReplicatedVal = SDValue(LoadMaxUses, 0); |
5787 | 0 | Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal); |
5788 | 0 | } else { |
5789 | | // Try to use VLVGP. |
5790 | 0 | unsigned I1 = NumElements / 2 - 1; |
5791 | 0 | unsigned I2 = NumElements - 1; |
5792 | 0 | bool Def1 = !Elems[I1].isUndef(); |
5793 | 0 | bool Def2 = !Elems[I2].isUndef(); |
5794 | 0 | if (Def1 || Def2) { |
5795 | 0 | SDValue Elem1 = Elems[Def1 ? I1 : I2]; |
5796 | 0 | SDValue Elem2 = Elems[Def2 ? I2 : I1]; |
5797 | 0 | Result = DAG.getNode(ISD::BITCAST, DL, VT, |
5798 | 0 | joinDwords(DAG, DL, Elem1, Elem2)); |
5799 | 0 | Done[I1] = true; |
5800 | 0 | Done[I2] = true; |
5801 | 0 | } else |
5802 | 0 | Result = DAG.getUNDEF(VT); |
5803 | 0 | } |
5804 | 0 | } |
5805 | | |
5806 | | // Use VLVGx to insert the other elements. |
5807 | 0 | for (unsigned I = 0; I < NumElements; ++I) |
5808 | 0 | if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal) |
5809 | 0 | Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], |
5810 | 0 | DAG.getConstant(I, DL, MVT::i32)); |
5811 | 0 | return Result; |
5812 | 0 | } |
5813 | | |
5814 | | SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op, |
5815 | 0 | SelectionDAG &DAG) const { |
5816 | 0 | auto *BVN = cast<BuildVectorSDNode>(Op.getNode()); |
5817 | 0 | SDLoc DL(Op); |
5818 | 0 | EVT VT = Op.getValueType(); |
5819 | |
|
5820 | 0 | if (BVN->isConstant()) { |
5821 | 0 | if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget)) |
5822 | 0 | return Op; |
5823 | | |
5824 | | // Fall back to loading it from memory. |
5825 | 0 | return SDValue(); |
5826 | 0 | } |
5827 | | |
5828 | | // See if we should use shuffles to construct the vector from other vectors. |
5829 | 0 | if (SDValue Res = tryBuildVectorShuffle(DAG, BVN)) |
5830 | 0 | return Res; |
5831 | | |
5832 | | // Detect SCALAR_TO_VECTOR conversions. |
5833 | 0 | if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) |
5834 | 0 | return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); |
5835 | | |
5836 | | // Otherwise use buildVector to build the vector up from GPRs. |
5837 | 0 | unsigned NumElements = Op.getNumOperands(); |
5838 | 0 | SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements); |
5839 | 0 | for (unsigned I = 0; I < NumElements; ++I) |
5840 | 0 | Ops[I] = Op.getOperand(I); |
5841 | 0 | return buildVector(DAG, DL, VT, Ops); |
5842 | 0 | } |
5843 | | |
5844 | | SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, |
5845 | 0 | SelectionDAG &DAG) const { |
5846 | 0 | auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode()); |
5847 | 0 | SDLoc DL(Op); |
5848 | 0 | EVT VT = Op.getValueType(); |
5849 | 0 | unsigned NumElements = VT.getVectorNumElements(); |
5850 | |
|
5851 | 0 | if (VSN->isSplat()) { |
5852 | 0 | SDValue Op0 = Op.getOperand(0); |
5853 | 0 | unsigned Index = VSN->getSplatIndex(); |
5854 | 0 | assert(Index < VT.getVectorNumElements() && |
5855 | 0 | "Splat index should be defined and in first operand"); |
5856 | | // See whether the value we're splatting is directly available as a scalar. |
5857 | 0 | if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || |
5858 | 0 | Op0.getOpcode() == ISD::BUILD_VECTOR) |
5859 | 0 | return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); |
5860 | | // Otherwise keep it as a vector-to-vector operation. |
5861 | 0 | return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), |
5862 | 0 | DAG.getTargetConstant(Index, DL, MVT::i32)); |
5863 | 0 | } |
5864 | | |
5865 | 0 | GeneralShuffle GS(VT); |
5866 | 0 | for (unsigned I = 0; I < NumElements; ++I) { |
5867 | 0 | int Elt = VSN->getMaskElt(I); |
5868 | 0 | if (Elt < 0) |
5869 | 0 | GS.addUndef(); |
5870 | 0 | else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements), |
5871 | 0 | unsigned(Elt) % NumElements)) |
5872 | 0 | return SDValue(); |
5873 | 0 | } |
5874 | 0 | return GS.getNode(DAG, SDLoc(VSN)); |
5875 | 0 | } |
5876 | | |
5877 | | SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, |
5878 | 0 | SelectionDAG &DAG) const { |
5879 | 0 | SDLoc DL(Op); |
5880 | | // Just insert the scalar into element 0 of an undefined vector. |
5881 | 0 | return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, |
5882 | 0 | Op.getValueType(), DAG.getUNDEF(Op.getValueType()), |
5883 | 0 | Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32)); |
5884 | 0 | } |
5885 | | |
5886 | | SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, |
5887 | 0 | SelectionDAG &DAG) const { |
5888 | | // Handle insertions of floating-point values. |
5889 | 0 | SDLoc DL(Op); |
5890 | 0 | SDValue Op0 = Op.getOperand(0); |
5891 | 0 | SDValue Op1 = Op.getOperand(1); |
5892 | 0 | SDValue Op2 = Op.getOperand(2); |
5893 | 0 | EVT VT = Op.getValueType(); |
5894 | | |
5895 | | // Insertions into constant indices of a v2f64 can be done using VPDI. |
5896 | | // However, if the inserted value is a bitcast or a constant then it's |
5897 | | // better to use GPRs, as below. |
5898 | 0 | if (VT == MVT::v2f64 && |
5899 | 0 | Op1.getOpcode() != ISD::BITCAST && |
5900 | 0 | Op1.getOpcode() != ISD::ConstantFP && |
5901 | 0 | Op2.getOpcode() == ISD::Constant) { |
5902 | 0 | uint64_t Index = Op2->getAsZExtVal(); |
5903 | 0 | unsigned Mask = VT.getVectorNumElements() - 1; |
5904 | 0 | if (Index <= Mask) |
5905 | 0 | return Op; |
5906 | 0 | } |
5907 | | |
5908 | | // Otherwise bitcast to the equivalent integer form and insert via a GPR. |
5909 | 0 | MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); |
5910 | 0 | MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); |
5911 | 0 | SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, |
5912 | 0 | DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), |
5913 | 0 | DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2); |
5914 | 0 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); |
5915 | 0 | } |
5916 | | |
5917 | | SDValue |
5918 | | SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, |
5919 | 0 | SelectionDAG &DAG) const { |
5920 | | // Handle extractions of floating-point values. |
5921 | 0 | SDLoc DL(Op); |
5922 | 0 | SDValue Op0 = Op.getOperand(0); |
5923 | 0 | SDValue Op1 = Op.getOperand(1); |
5924 | 0 | EVT VT = Op.getValueType(); |
5925 | 0 | EVT VecVT = Op0.getValueType(); |
5926 | | |
5927 | | // Extractions of constant indices can be done directly. |
5928 | 0 | if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) { |
5929 | 0 | uint64_t Index = CIndexN->getZExtValue(); |
5930 | 0 | unsigned Mask = VecVT.getVectorNumElements() - 1; |
5931 | 0 | if (Index <= Mask) |
5932 | 0 | return Op; |
5933 | 0 | } |
5934 | | |
5935 | | // Otherwise bitcast to the equivalent integer form and extract via a GPR. |
5936 | 0 | MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); |
5937 | 0 | MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements()); |
5938 | 0 | SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT, |
5939 | 0 | DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1); |
5940 | 0 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); |
5941 | 0 | } |
5942 | | |
5943 | | SDValue SystemZTargetLowering:: |
5944 | 0 | lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const { |
5945 | 0 | SDValue PackedOp = Op.getOperand(0); |
5946 | 0 | EVT OutVT = Op.getValueType(); |
5947 | 0 | EVT InVT = PackedOp.getValueType(); |
5948 | 0 | unsigned ToBits = OutVT.getScalarSizeInBits(); |
5949 | 0 | unsigned FromBits = InVT.getScalarSizeInBits(); |
5950 | 0 | do { |
5951 | 0 | FromBits *= 2; |
5952 | 0 | EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), |
5953 | 0 | SystemZ::VectorBits / FromBits); |
5954 | 0 | PackedOp = |
5955 | 0 | DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(PackedOp), OutVT, PackedOp); |
5956 | 0 | } while (FromBits != ToBits); |
5957 | 0 | return PackedOp; |
5958 | 0 | } |
5959 | | |
5960 | | // Lower a ZERO_EXTEND_VECTOR_INREG to a vector shuffle with a zero vector. |
5961 | | SDValue SystemZTargetLowering:: |
5962 | 0 | lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const { |
5963 | 0 | SDValue PackedOp = Op.getOperand(0); |
5964 | 0 | SDLoc DL(Op); |
5965 | 0 | EVT OutVT = Op.getValueType(); |
5966 | 0 | EVT InVT = PackedOp.getValueType(); |
5967 | 0 | unsigned InNumElts = InVT.getVectorNumElements(); |
5968 | 0 | unsigned OutNumElts = OutVT.getVectorNumElements(); |
5969 | 0 | unsigned NumInPerOut = InNumElts / OutNumElts; |
5970 | |
|
5971 | 0 | SDValue ZeroVec = |
5972 | 0 | DAG.getSplatVector(InVT, DL, DAG.getConstant(0, DL, InVT.getScalarType())); |
5973 | |
|
5974 | 0 | SmallVector<int, 16> Mask(InNumElts); |
5975 | 0 | unsigned ZeroVecElt = InNumElts; |
5976 | 0 | for (unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) { |
5977 | 0 | unsigned MaskElt = PackedElt * NumInPerOut; |
5978 | 0 | unsigned End = MaskElt + NumInPerOut - 1; |
5979 | 0 | for (; MaskElt < End; MaskElt++) |
5980 | 0 | Mask[MaskElt] = ZeroVecElt++; |
5981 | 0 | Mask[MaskElt] = PackedElt; |
5982 | 0 | } |
5983 | 0 | SDValue Shuf = DAG.getVectorShuffle(InVT, DL, PackedOp, ZeroVec, Mask); |
5984 | 0 | return DAG.getNode(ISD::BITCAST, DL, OutVT, Shuf); |
5985 | 0 | } |
5986 | | |
5987 | | SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, |
5988 | 0 | unsigned ByScalar) const { |
5989 | | // Look for cases where a vector shift can use the *_BY_SCALAR form. |
5990 | 0 | SDValue Op0 = Op.getOperand(0); |
5991 | 0 | SDValue Op1 = Op.getOperand(1); |
5992 | 0 | SDLoc DL(Op); |
5993 | 0 | EVT VT = Op.getValueType(); |
5994 | 0 | unsigned ElemBitSize = VT.getScalarSizeInBits(); |
5995 | | |
5996 | | // See whether the shift vector is a splat represented as BUILD_VECTOR. |
5997 | 0 | if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) { |
5998 | 0 | APInt SplatBits, SplatUndef; |
5999 | 0 | unsigned SplatBitSize; |
6000 | 0 | bool HasAnyUndefs; |
6001 | | // Check for constant splats. Use ElemBitSize as the minimum element |
6002 | | // width and reject splats that need wider elements. |
6003 | 0 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
6004 | 0 | ElemBitSize, true) && |
6005 | 0 | SplatBitSize == ElemBitSize) { |
6006 | 0 | SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff, |
6007 | 0 | DL, MVT::i32); |
6008 | 0 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); |
6009 | 0 | } |
6010 | | // Check for variable splats. |
6011 | 0 | BitVector UndefElements; |
6012 | 0 | SDValue Splat = BVN->getSplatValue(&UndefElements); |
6013 | 0 | if (Splat) { |
6014 | | // Since i32 is the smallest legal type, we either need a no-op |
6015 | | // or a truncation. |
6016 | 0 | SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat); |
6017 | 0 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); |
6018 | 0 | } |
6019 | 0 | } |
6020 | | |
6021 | | // See whether the shift vector is a splat represented as SHUFFLE_VECTOR, |
6022 | | // and the shift amount is directly available in a GPR. |
6023 | 0 | if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) { |
6024 | 0 | if (VSN->isSplat()) { |
6025 | 0 | SDValue VSNOp0 = VSN->getOperand(0); |
6026 | 0 | unsigned Index = VSN->getSplatIndex(); |
6027 | 0 | assert(Index < VT.getVectorNumElements() && |
6028 | 0 | "Splat index should be defined and in first operand"); |
6029 | 0 | if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || |
6030 | 0 | VSNOp0.getOpcode() == ISD::BUILD_VECTOR) { |
6031 | | // Since i32 is the smallest legal type, we either need a no-op |
6032 | | // or a truncation. |
6033 | 0 | SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, |
6034 | 0 | VSNOp0.getOperand(Index)); |
6035 | 0 | return DAG.getNode(ByScalar, DL, VT, Op0, Shift); |
6036 | 0 | } |
6037 | 0 | } |
6038 | 0 | } |
6039 | | |
6040 | | // Otherwise just treat the current form as legal. |
6041 | 0 | return Op; |
6042 | 0 | } |
6043 | | |
6044 | | SDValue SystemZTargetLowering::lowerIS_FPCLASS(SDValue Op, |
6045 | 0 | SelectionDAG &DAG) const { |
6046 | 0 | SDLoc DL(Op); |
6047 | 0 | MVT ResultVT = Op.getSimpleValueType(); |
6048 | 0 | SDValue Arg = Op.getOperand(0); |
6049 | 0 | unsigned Check = Op.getConstantOperandVal(1); |
6050 | |
|
6051 | 0 | unsigned TDCMask = 0; |
6052 | 0 | if (Check & fcSNan) |
6053 | 0 | TDCMask |= SystemZ::TDCMASK_SNAN_PLUS | SystemZ::TDCMASK_SNAN_MINUS; |
6054 | 0 | if (Check & fcQNan) |
6055 | 0 | TDCMask |= SystemZ::TDCMASK_QNAN_PLUS | SystemZ::TDCMASK_QNAN_MINUS; |
6056 | 0 | if (Check & fcPosInf) |
6057 | 0 | TDCMask |= SystemZ::TDCMASK_INFINITY_PLUS; |
6058 | 0 | if (Check & fcNegInf) |
6059 | 0 | TDCMask |= SystemZ::TDCMASK_INFINITY_MINUS; |
6060 | 0 | if (Check & fcPosNormal) |
6061 | 0 | TDCMask |= SystemZ::TDCMASK_NORMAL_PLUS; |
6062 | 0 | if (Check & fcNegNormal) |
6063 | 0 | TDCMask |= SystemZ::TDCMASK_NORMAL_MINUS; |
6064 | 0 | if (Check & fcPosSubnormal) |
6065 | 0 | TDCMask |= SystemZ::TDCMASK_SUBNORMAL_PLUS; |
6066 | 0 | if (Check & fcNegSubnormal) |
6067 | 0 | TDCMask |= SystemZ::TDCMASK_SUBNORMAL_MINUS; |
6068 | 0 | if (Check & fcPosZero) |
6069 | 0 | TDCMask |= SystemZ::TDCMASK_ZERO_PLUS; |
6070 | 0 | if (Check & fcNegZero) |
6071 | 0 | TDCMask |= SystemZ::TDCMASK_ZERO_MINUS; |
6072 | 0 | SDValue TDCMaskV = DAG.getConstant(TDCMask, DL, MVT::i64); |
6073 | |
|
6074 | 0 | SDValue Intr = DAG.getNode(SystemZISD::TDC, DL, ResultVT, Arg, TDCMaskV); |
6075 | 0 | return getCCResult(DAG, Intr); |
6076 | 0 | } |
6077 | | |
6078 | | SDValue SystemZTargetLowering::LowerOperation(SDValue Op, |
6079 | 0 | SelectionDAG &DAG) const { |
6080 | 0 | switch (Op.getOpcode()) { |
6081 | 0 | case ISD::FRAMEADDR: |
6082 | 0 | return lowerFRAMEADDR(Op, DAG); |
6083 | 0 | case ISD::RETURNADDR: |
6084 | 0 | return lowerRETURNADDR(Op, DAG); |
6085 | 0 | case ISD::BR_CC: |
6086 | 0 | return lowerBR_CC(Op, DAG); |
6087 | 0 | case ISD::SELECT_CC: |
6088 | 0 | return lowerSELECT_CC(Op, DAG); |
6089 | 0 | case ISD::SETCC: |
6090 | 0 | return lowerSETCC(Op, DAG); |
6091 | 0 | case ISD::STRICT_FSETCC: |
6092 | 0 | return lowerSTRICT_FSETCC(Op, DAG, false); |
6093 | 0 | case ISD::STRICT_FSETCCS: |
6094 | 0 | return lowerSTRICT_FSETCC(Op, DAG, true); |
6095 | 0 | case ISD::GlobalAddress: |
6096 | 0 | return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); |
6097 | 0 | case ISD::GlobalTLSAddress: |
6098 | 0 | return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); |
6099 | 0 | case ISD::BlockAddress: |
6100 | 0 | return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); |
6101 | 0 | case ISD::JumpTable: |
6102 | 0 | return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); |
6103 | 0 | case ISD::ConstantPool: |
6104 | 0 | return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); |
6105 | 0 | case ISD::BITCAST: |
6106 | 0 | return lowerBITCAST(Op, DAG); |
6107 | 0 | case ISD::VASTART: |
6108 | 0 | return lowerVASTART(Op, DAG); |
6109 | 0 | case ISD::VACOPY: |
6110 | 0 | return lowerVACOPY(Op, DAG); |
6111 | 0 | case ISD::DYNAMIC_STACKALLOC: |
6112 | 0 | return lowerDYNAMIC_STACKALLOC(Op, DAG); |
6113 | 0 | case ISD::GET_DYNAMIC_AREA_OFFSET: |
6114 | 0 | return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); |
6115 | 0 | case ISD::SMUL_LOHI: |
6116 | 0 | return lowerSMUL_LOHI(Op, DAG); |
6117 | 0 | case ISD::UMUL_LOHI: |
6118 | 0 | return lowerUMUL_LOHI(Op, DAG); |
6119 | 0 | case ISD::SDIVREM: |
6120 | 0 | return lowerSDIVREM(Op, DAG); |
6121 | 0 | case ISD::UDIVREM: |
6122 | 0 | return lowerUDIVREM(Op, DAG); |
6123 | 0 | case ISD::SADDO: |
6124 | 0 | case ISD::SSUBO: |
6125 | 0 | case ISD::UADDO: |
6126 | 0 | case ISD::USUBO: |
6127 | 0 | return lowerXALUO(Op, DAG); |
6128 | 0 | case ISD::UADDO_CARRY: |
6129 | 0 | case ISD::USUBO_CARRY: |
6130 | 0 | return lowerUADDSUBO_CARRY(Op, DAG); |
6131 | 0 | case ISD::OR: |
6132 | 0 | return lowerOR(Op, DAG); |
6133 | 0 | case ISD::CTPOP: |
6134 | 0 | return lowerCTPOP(Op, DAG); |
6135 | 0 | case ISD::ATOMIC_FENCE: |
6136 | 0 | return lowerATOMIC_FENCE(Op, DAG); |
6137 | 0 | case ISD::ATOMIC_SWAP: |
6138 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); |
6139 | 0 | case ISD::ATOMIC_STORE: |
6140 | 0 | return lowerATOMIC_STORE(Op, DAG); |
6141 | 0 | case ISD::ATOMIC_LOAD: |
6142 | 0 | return lowerATOMIC_LOAD(Op, DAG); |
6143 | 0 | case ISD::ATOMIC_LOAD_ADD: |
6144 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); |
6145 | 0 | case ISD::ATOMIC_LOAD_SUB: |
6146 | 0 | return lowerATOMIC_LOAD_SUB(Op, DAG); |
6147 | 0 | case ISD::ATOMIC_LOAD_AND: |
6148 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); |
6149 | 0 | case ISD::ATOMIC_LOAD_OR: |
6150 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); |
6151 | 0 | case ISD::ATOMIC_LOAD_XOR: |
6152 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); |
6153 | 0 | case ISD::ATOMIC_LOAD_NAND: |
6154 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); |
6155 | 0 | case ISD::ATOMIC_LOAD_MIN: |
6156 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); |
6157 | 0 | case ISD::ATOMIC_LOAD_MAX: |
6158 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); |
6159 | 0 | case ISD::ATOMIC_LOAD_UMIN: |
6160 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); |
6161 | 0 | case ISD::ATOMIC_LOAD_UMAX: |
6162 | 0 | return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); |
6163 | 0 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
6164 | 0 | return lowerATOMIC_CMP_SWAP(Op, DAG); |
6165 | 0 | case ISD::STACKSAVE: |
6166 | 0 | return lowerSTACKSAVE(Op, DAG); |
6167 | 0 | case ISD::STACKRESTORE: |
6168 | 0 | return lowerSTACKRESTORE(Op, DAG); |
6169 | 0 | case ISD::PREFETCH: |
6170 | 0 | return lowerPREFETCH(Op, DAG); |
6171 | 0 | case ISD::INTRINSIC_W_CHAIN: |
6172 | 0 | return lowerINTRINSIC_W_CHAIN(Op, DAG); |
6173 | 0 | case ISD::INTRINSIC_WO_CHAIN: |
6174 | 0 | return lowerINTRINSIC_WO_CHAIN(Op, DAG); |
6175 | 0 | case ISD::BUILD_VECTOR: |
6176 | 0 | return lowerBUILD_VECTOR(Op, DAG); |
6177 | 0 | case ISD::VECTOR_SHUFFLE: |
6178 | 0 | return lowerVECTOR_SHUFFLE(Op, DAG); |
6179 | 0 | case ISD::SCALAR_TO_VECTOR: |
6180 | 0 | return lowerSCALAR_TO_VECTOR(Op, DAG); |
6181 | 0 | case ISD::INSERT_VECTOR_ELT: |
6182 | 0 | return lowerINSERT_VECTOR_ELT(Op, DAG); |
6183 | 0 | case ISD::EXTRACT_VECTOR_ELT: |
6184 | 0 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); |
6185 | 0 | case ISD::SIGN_EXTEND_VECTOR_INREG: |
6186 | 0 | return lowerSIGN_EXTEND_VECTOR_INREG(Op, DAG); |
6187 | 0 | case ISD::ZERO_EXTEND_VECTOR_INREG: |
6188 | 0 | return lowerZERO_EXTEND_VECTOR_INREG(Op, DAG); |
6189 | 0 | case ISD::SHL: |
6190 | 0 | return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR); |
6191 | 0 | case ISD::SRL: |
6192 | 0 | return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR); |
6193 | 0 | case ISD::SRA: |
6194 | 0 | return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR); |
6195 | 0 | case ISD::ROTL: |
6196 | 0 | return lowerShift(Op, DAG, SystemZISD::VROTL_BY_SCALAR); |
6197 | 0 | case ISD::IS_FPCLASS: |
6198 | 0 | return lowerIS_FPCLASS(Op, DAG); |
6199 | 0 | case ISD::GET_ROUNDING: |
6200 | 0 | return lowerGET_ROUNDING(Op, DAG); |
6201 | 0 | default: |
6202 | 0 | llvm_unreachable("Unexpected node to lower"); |
6203 | 0 | } |
6204 | 0 | } |
6205 | | |
6206 | | // Lower operations with invalid operand or result types (currently used |
6207 | | // only for 128-bit integer types). |
6208 | | void |
6209 | | SystemZTargetLowering::LowerOperationWrapper(SDNode *N, |
6210 | | SmallVectorImpl<SDValue> &Results, |
6211 | 0 | SelectionDAG &DAG) const { |
6212 | 0 | switch (N->getOpcode()) { |
6213 | 0 | case ISD::ATOMIC_LOAD: { |
6214 | 0 | SDLoc DL(N); |
6215 | 0 | SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other); |
6216 | 0 | SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; |
6217 | 0 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); |
6218 | 0 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128, |
6219 | 0 | DL, Tys, Ops, MVT::i128, MMO); |
6220 | 0 | Results.push_back(lowerGR128ToI128(DAG, Res)); |
6221 | 0 | Results.push_back(Res.getValue(1)); |
6222 | 0 | break; |
6223 | 0 | } |
6224 | 0 | case ISD::ATOMIC_STORE: { |
6225 | 0 | SDLoc DL(N); |
6226 | 0 | SDVTList Tys = DAG.getVTList(MVT::Other); |
6227 | 0 | SDValue Ops[] = {N->getOperand(0), lowerI128ToGR128(DAG, N->getOperand(1)), |
6228 | 0 | N->getOperand(2)}; |
6229 | 0 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); |
6230 | 0 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128, |
6231 | 0 | DL, Tys, Ops, MVT::i128, MMO); |
6232 | | // We have to enforce sequential consistency by performing a |
6233 | | // serialization operation after the store. |
6234 | 0 | if (cast<AtomicSDNode>(N)->getSuccessOrdering() == |
6235 | 0 | AtomicOrdering::SequentiallyConsistent) |
6236 | 0 | Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, |
6237 | 0 | MVT::Other, Res), 0); |
6238 | 0 | Results.push_back(Res); |
6239 | 0 | break; |
6240 | 0 | } |
6241 | 0 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { |
6242 | 0 | SDLoc DL(N); |
6243 | 0 | SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other); |
6244 | 0 | SDValue Ops[] = { N->getOperand(0), N->getOperand(1), |
6245 | 0 | lowerI128ToGR128(DAG, N->getOperand(2)), |
6246 | 0 | lowerI128ToGR128(DAG, N->getOperand(3)) }; |
6247 | 0 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); |
6248 | 0 | SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128, |
6249 | 0 | DL, Tys, Ops, MVT::i128, MMO); |
6250 | 0 | SDValue Success = emitSETCC(DAG, DL, Res.getValue(1), |
6251 | 0 | SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); |
6252 | 0 | Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); |
6253 | 0 | Results.push_back(lowerGR128ToI128(DAG, Res)); |
6254 | 0 | Results.push_back(Success); |
6255 | 0 | Results.push_back(Res.getValue(2)); |
6256 | 0 | break; |
6257 | 0 | } |
6258 | 0 | case ISD::BITCAST: { |
6259 | 0 | SDValue Src = N->getOperand(0); |
6260 | 0 | if (N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 && |
6261 | 0 | !useSoftFloat()) { |
6262 | 0 | SDLoc DL(N); |
6263 | 0 | SDValue Lo, Hi; |
6264 | 0 | if (getRepRegClassFor(MVT::f128) == &SystemZ::VR128BitRegClass) { |
6265 | 0 | SDValue VecBC = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Src); |
6266 | 0 | Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, VecBC, |
6267 | 0 | DAG.getConstant(1, DL, MVT::i32)); |
6268 | 0 | Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, VecBC, |
6269 | 0 | DAG.getConstant(0, DL, MVT::i32)); |
6270 | 0 | } else { |
6271 | 0 | assert(getRepRegClassFor(MVT::f128) == &SystemZ::FP128BitRegClass && |
6272 | 0 | "Unrecognized register class for f128."); |
6273 | 0 | SDValue LoFP = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, |
6274 | 0 | DL, MVT::f64, Src); |
6275 | 0 | SDValue HiFP = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, |
6276 | 0 | DL, MVT::f64, Src); |
6277 | 0 | Lo = DAG.getNode(ISD::BITCAST, DL, MVT::i64, LoFP); |
6278 | 0 | Hi = DAG.getNode(ISD::BITCAST, DL, MVT::i64, HiFP); |
6279 | 0 | } |
6280 | 0 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi)); |
6281 | 0 | } |
6282 | 0 | break; |
6283 | 0 | } |
6284 | 0 | default: |
6285 | 0 | llvm_unreachable("Unexpected node to lower"); |
6286 | 0 | } |
6287 | 0 | } |
6288 | | |
6289 | | void |
6290 | | SystemZTargetLowering::ReplaceNodeResults(SDNode *N, |
6291 | | SmallVectorImpl<SDValue> &Results, |
6292 | 0 | SelectionDAG &DAG) const { |
6293 | 0 | return LowerOperationWrapper(N, Results, DAG); |
6294 | 0 | } |
6295 | | |
6296 | 0 | const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { |
6297 | 0 | #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME |
6298 | 0 | switch ((SystemZISD::NodeType)Opcode) { |
6299 | 0 | case SystemZISD::FIRST_NUMBER: break; |
6300 | 0 | OPCODE(RET_GLUE); |
6301 | 0 | OPCODE(CALL); |
6302 | 0 | OPCODE(SIBCALL); |
6303 | 0 | OPCODE(TLS_GDCALL); |
6304 | 0 | OPCODE(TLS_LDCALL); |
6305 | 0 | OPCODE(PCREL_WRAPPER); |
6306 | 0 | OPCODE(PCREL_OFFSET); |
6307 | 0 | OPCODE(ICMP); |
6308 | 0 | OPCODE(FCMP); |
6309 | 0 | OPCODE(STRICT_FCMP); |
6310 | 0 | OPCODE(STRICT_FCMPS); |
6311 | 0 | OPCODE(TM); |
6312 | 0 | OPCODE(BR_CCMASK); |
6313 | 0 | OPCODE(SELECT_CCMASK); |
6314 | 0 | OPCODE(ADJDYNALLOC); |
6315 | 0 | OPCODE(PROBED_ALLOCA); |
6316 | 0 | OPCODE(POPCNT); |
6317 | 0 | OPCODE(SMUL_LOHI); |
6318 | 0 | OPCODE(UMUL_LOHI); |
6319 | 0 | OPCODE(SDIVREM); |
6320 | 0 | OPCODE(UDIVREM); |
6321 | 0 | OPCODE(SADDO); |
6322 | 0 | OPCODE(SSUBO); |
6323 | 0 | OPCODE(UADDO); |
6324 | 0 | OPCODE(USUBO); |
6325 | 0 | OPCODE(ADDCARRY); |
6326 | 0 | OPCODE(SUBCARRY); |
6327 | 0 | OPCODE(GET_CCMASK); |
6328 | 0 | OPCODE(MVC); |
6329 | 0 | OPCODE(NC); |
6330 | 0 | OPCODE(OC); |
6331 | 0 | OPCODE(XC); |
6332 | 0 | OPCODE(CLC); |
6333 | 0 | OPCODE(MEMSET_MVC); |
6334 | 0 | OPCODE(STPCPY); |
6335 | 0 | OPCODE(STRCMP); |
6336 | 0 | OPCODE(SEARCH_STRING); |
6337 | 0 | OPCODE(IPM); |
6338 | 0 | OPCODE(TBEGIN); |
6339 | 0 | OPCODE(TBEGIN_NOFLOAT); |
6340 | 0 | OPCODE(TEND); |
6341 | 0 | OPCODE(BYTE_MASK); |
6342 | 0 | OPCODE(ROTATE_MASK); |
6343 | 0 | OPCODE(REPLICATE); |
6344 | 0 | OPCODE(JOIN_DWORDS); |
6345 | 0 | OPCODE(SPLAT); |
6346 | 0 | OPCODE(MERGE_HIGH); |
6347 | 0 | OPCODE(MERGE_LOW); |
6348 | 0 | OPCODE(SHL_DOUBLE); |
6349 | 0 | OPCODE(PERMUTE_DWORDS); |
6350 | 0 | OPCODE(PERMUTE); |
6351 | 0 | OPCODE(PACK); |
6352 | 0 | OPCODE(PACKS_CC); |
6353 | 0 | OPCODE(PACKLS_CC); |
6354 | 0 | OPCODE(UNPACK_HIGH); |
6355 | 0 | OPCODE(UNPACKL_HIGH); |
6356 | 0 | OPCODE(UNPACK_LOW); |
6357 | 0 | OPCODE(UNPACKL_LOW); |
6358 | 0 | OPCODE(VSHL_BY_SCALAR); |
6359 | 0 | OPCODE(VSRL_BY_SCALAR); |
6360 | 0 | OPCODE(VSRA_BY_SCALAR); |
6361 | 0 | OPCODE(VROTL_BY_SCALAR); |
6362 | 0 | OPCODE(VSUM); |
6363 | 0 | OPCODE(VACC); |
6364 | 0 | OPCODE(VSCBI); |
6365 | 0 | OPCODE(VAC); |
6366 | 0 | OPCODE(VSBI); |
6367 | 0 | OPCODE(VACCC); |
6368 | 0 | OPCODE(VSBCBI); |
6369 | 0 | OPCODE(VICMPE); |
6370 | 0 | OPCODE(VICMPH); |
6371 | 0 | OPCODE(VICMPHL); |
6372 | 0 | OPCODE(VICMPES); |
6373 | 0 | OPCODE(VICMPHS); |
6374 | 0 | OPCODE(VICMPHLS); |
6375 | 0 | OPCODE(VFCMPE); |
6376 | 0 | OPCODE(STRICT_VFCMPE); |
6377 | 0 | OPCODE(STRICT_VFCMPES); |
6378 | 0 | OPCODE(VFCMPH); |
6379 | 0 | OPCODE(STRICT_VFCMPH); |
6380 | 0 | OPCODE(STRICT_VFCMPHS); |
6381 | 0 | OPCODE(VFCMPHE); |
6382 | 0 | OPCODE(STRICT_VFCMPHE); |
6383 | 0 | OPCODE(STRICT_VFCMPHES); |
6384 | 0 | OPCODE(VFCMPES); |
6385 | 0 | OPCODE(VFCMPHS); |
6386 | 0 | OPCODE(VFCMPHES); |
6387 | 0 | OPCODE(VFTCI); |
6388 | 0 | OPCODE(VEXTEND); |
6389 | 0 | OPCODE(STRICT_VEXTEND); |
6390 | 0 | OPCODE(VROUND); |
6391 | 0 | OPCODE(STRICT_VROUND); |
6392 | 0 | OPCODE(VTM); |
6393 | 0 | OPCODE(SCMP128HI); |
6394 | 0 | OPCODE(UCMP128HI); |
6395 | 0 | OPCODE(VFAE_CC); |
6396 | 0 | OPCODE(VFAEZ_CC); |
6397 | 0 | OPCODE(VFEE_CC); |
6398 | 0 | OPCODE(VFEEZ_CC); |
6399 | 0 | OPCODE(VFENE_CC); |
6400 | 0 | OPCODE(VFENEZ_CC); |
6401 | 0 | OPCODE(VISTR_CC); |
6402 | 0 | OPCODE(VSTRC_CC); |
6403 | 0 | OPCODE(VSTRCZ_CC); |
6404 | 0 | OPCODE(VSTRS_CC); |
6405 | 0 | OPCODE(VSTRSZ_CC); |
6406 | 0 | OPCODE(TDC); |
6407 | 0 | OPCODE(ATOMIC_SWAPW); |
6408 | 0 | OPCODE(ATOMIC_LOADW_ADD); |
6409 | 0 | OPCODE(ATOMIC_LOADW_SUB); |
6410 | 0 | OPCODE(ATOMIC_LOADW_AND); |
6411 | 0 | OPCODE(ATOMIC_LOADW_OR); |
6412 | 0 | OPCODE(ATOMIC_LOADW_XOR); |
6413 | 0 | OPCODE(ATOMIC_LOADW_NAND); |
6414 | 0 | OPCODE(ATOMIC_LOADW_MIN); |
6415 | 0 | OPCODE(ATOMIC_LOADW_MAX); |
6416 | 0 | OPCODE(ATOMIC_LOADW_UMIN); |
6417 | 0 | OPCODE(ATOMIC_LOADW_UMAX); |
6418 | 0 | OPCODE(ATOMIC_CMP_SWAPW); |
6419 | 0 | OPCODE(ATOMIC_CMP_SWAP); |
6420 | 0 | OPCODE(ATOMIC_LOAD_128); |
6421 | 0 | OPCODE(ATOMIC_STORE_128); |
6422 | 0 | OPCODE(ATOMIC_CMP_SWAP_128); |
6423 | 0 | OPCODE(LRV); |
6424 | 0 | OPCODE(STRV); |
6425 | 0 | OPCODE(VLER); |
6426 | 0 | OPCODE(VSTER); |
6427 | 0 | OPCODE(PREFETCH); |
6428 | 0 | OPCODE(ADA_ENTRY); |
6429 | 0 | } |
6430 | 0 | return nullptr; |
6431 | 0 | #undef OPCODE |
6432 | 0 | } |
6433 | | |
6434 | | // Return true if VT is a vector whose elements are a whole number of bytes |
6435 | | // in width. Also check for presence of vector support. |
6436 | 0 | bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { |
6437 | 0 | if (!Subtarget.hasVector()) |
6438 | 0 | return false; |
6439 | | |
6440 | 0 | return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); |
6441 | 0 | } |
6442 | | |
6443 | | // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT |
6444 | | // producing a result of type ResVT. Op is a possibly bitcast version |
6445 | | // of the input vector and Index is the index (based on type VecVT) that |
6446 | | // should be extracted. Return the new extraction if a simplification |
6447 | | // was possible or if Force is true. |
6448 | | SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT, |
6449 | | EVT VecVT, SDValue Op, |
6450 | | unsigned Index, |
6451 | | DAGCombinerInfo &DCI, |
6452 | 0 | bool Force) const { |
6453 | 0 | SelectionDAG &DAG = DCI.DAG; |
6454 | | |
6455 | | // The number of bytes being extracted. |
6456 | 0 | unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); |
6457 | |
|
6458 | 0 | for (;;) { |
6459 | 0 | unsigned Opcode = Op.getOpcode(); |
6460 | 0 | if (Opcode == ISD::BITCAST) |
6461 | | // Look through bitcasts. |
6462 | 0 | Op = Op.getOperand(0); |
6463 | 0 | else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) && |
6464 | 0 | canTreatAsByteVector(Op.getValueType())) { |
6465 | | // Get a VPERM-like permute mask and see whether the bytes covered |
6466 | | // by the extracted element are a contiguous sequence from one |
6467 | | // source operand. |
6468 | 0 | SmallVector<int, SystemZ::VectorBytes> Bytes; |
6469 | 0 | if (!getVPermMask(Op, Bytes)) |
6470 | 0 | break; |
6471 | 0 | int First; |
6472 | 0 | if (!getShuffleInput(Bytes, Index * BytesPerElement, |
6473 | 0 | BytesPerElement, First)) |
6474 | 0 | break; |
6475 | 0 | if (First < 0) |
6476 | 0 | return DAG.getUNDEF(ResVT); |
6477 | | // Make sure the contiguous sequence starts at a multiple of the |
6478 | | // original element size. |
6479 | 0 | unsigned Byte = unsigned(First) % Bytes.size(); |
6480 | 0 | if (Byte % BytesPerElement != 0) |
6481 | 0 | break; |
6482 | | // We can get the extracted value directly from an input. |
6483 | 0 | Index = Byte / BytesPerElement; |
6484 | 0 | Op = Op.getOperand(unsigned(First) / Bytes.size()); |
6485 | 0 | Force = true; |
6486 | 0 | } else if (Opcode == ISD::BUILD_VECTOR && |
6487 | 0 | canTreatAsByteVector(Op.getValueType())) { |
6488 | | // We can only optimize this case if the BUILD_VECTOR elements are |
6489 | | // at least as wide as the extracted value. |
6490 | 0 | EVT OpVT = Op.getValueType(); |
6491 | 0 | unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); |
6492 | 0 | if (OpBytesPerElement < BytesPerElement) |
6493 | 0 | break; |
6494 | | // Make sure that the least-significant bit of the extracted value |
6495 | | // is the least significant bit of an input. |
6496 | 0 | unsigned End = (Index + 1) * BytesPerElement; |
6497 | 0 | if (End % OpBytesPerElement != 0) |
6498 | 0 | break; |
6499 | | // We're extracting the low part of one operand of the BUILD_VECTOR. |
6500 | 0 | Op = Op.getOperand(End / OpBytesPerElement - 1); |
6501 | 0 | if (!Op.getValueType().isInteger()) { |
6502 | 0 | EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); |
6503 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); |
6504 | 0 | DCI.AddToWorklist(Op.getNode()); |
6505 | 0 | } |
6506 | 0 | EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); |
6507 | 0 | Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); |
6508 | 0 | if (VT != ResVT) { |
6509 | 0 | DCI.AddToWorklist(Op.getNode()); |
6510 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op); |
6511 | 0 | } |
6512 | 0 | return Op; |
6513 | 0 | } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || |
6514 | 0 | Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || |
6515 | 0 | Opcode == ISD::ANY_EXTEND_VECTOR_INREG) && |
6516 | 0 | canTreatAsByteVector(Op.getValueType()) && |
6517 | 0 | canTreatAsByteVector(Op.getOperand(0).getValueType())) { |
6518 | | // Make sure that only the unextended bits are significant. |
6519 | 0 | EVT ExtVT = Op.getValueType(); |
6520 | 0 | EVT OpVT = Op.getOperand(0).getValueType(); |
6521 | 0 | unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize(); |
6522 | 0 | unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); |
6523 | 0 | unsigned Byte = Index * BytesPerElement; |
6524 | 0 | unsigned SubByte = Byte % ExtBytesPerElement; |
6525 | 0 | unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; |
6526 | 0 | if (SubByte < MinSubByte || |
6527 | 0 | SubByte + BytesPerElement > ExtBytesPerElement) |
6528 | 0 | break; |
6529 | | // Get the byte offset of the unextended element |
6530 | 0 | Byte = Byte / ExtBytesPerElement * OpBytesPerElement; |
6531 | | // ...then add the byte offset relative to that element. |
6532 | 0 | Byte += SubByte - MinSubByte; |
6533 | 0 | if (Byte % BytesPerElement != 0) |
6534 | 0 | break; |
6535 | 0 | Op = Op.getOperand(0); |
6536 | 0 | Index = Byte / BytesPerElement; |
6537 | 0 | Force = true; |
6538 | 0 | } else |
6539 | 0 | break; |
6540 | 0 | } |
6541 | 0 | if (Force) { |
6542 | 0 | if (Op.getValueType() != VecVT) { |
6543 | 0 | Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op); |
6544 | 0 | DCI.AddToWorklist(Op.getNode()); |
6545 | 0 | } |
6546 | 0 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op, |
6547 | 0 | DAG.getConstant(Index, DL, MVT::i32)); |
6548 | 0 | } |
6549 | 0 | return SDValue(); |
6550 | 0 | } |
6551 | | |
6552 | | // Optimize vector operations in scalar value Op on the basis that Op |
6553 | | // is truncated to TruncVT. |
6554 | | SDValue SystemZTargetLowering::combineTruncateExtract( |
6555 | 0 | const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const { |
6556 | | // If we have (trunc (extract_vector_elt X, Y)), try to turn it into |
6557 | | // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements |
6558 | | // of type TruncVT. |
6559 | 0 | if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
6560 | 0 | TruncVT.getSizeInBits() % 8 == 0) { |
6561 | 0 | SDValue Vec = Op.getOperand(0); |
6562 | 0 | EVT VecVT = Vec.getValueType(); |
6563 | 0 | if (canTreatAsByteVector(VecVT)) { |
6564 | 0 | if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { |
6565 | 0 | unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); |
6566 | 0 | unsigned TruncBytes = TruncVT.getStoreSize(); |
6567 | 0 | if (BytesPerElement % TruncBytes == 0) { |
6568 | | // Calculate the value of Y' in the above description. We are |
6569 | | // splitting the original elements into Scale equal-sized pieces |
6570 | | // and for truncation purposes want the last (least-significant) |
6571 | | // of these pieces for IndexN. This is easiest to do by calculating |
6572 | | // the start index of the following element and then subtracting 1. |
6573 | 0 | unsigned Scale = BytesPerElement / TruncBytes; |
6574 | 0 | unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; |
6575 | | |
6576 | | // Defer the creation of the bitcast from X to combineExtract, |
6577 | | // which might be able to optimize the extraction. |
6578 | 0 | VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8), |
6579 | 0 | VecVT.getStoreSize() / TruncBytes); |
6580 | 0 | EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT); |
6581 | 0 | return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true); |
6582 | 0 | } |
6583 | 0 | } |
6584 | 0 | } |
6585 | 0 | } |
6586 | 0 | return SDValue(); |
6587 | 0 | } |
6588 | | |
6589 | | SDValue SystemZTargetLowering::combineZERO_EXTEND( |
6590 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
6591 | | // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2') |
6592 | 0 | SelectionDAG &DAG = DCI.DAG; |
6593 | 0 | SDValue N0 = N->getOperand(0); |
6594 | 0 | EVT VT = N->getValueType(0); |
6595 | 0 | if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) { |
6596 | 0 | auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0)); |
6597 | 0 | auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
6598 | 0 | if (TrueOp && FalseOp) { |
6599 | 0 | SDLoc DL(N0); |
6600 | 0 | SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT), |
6601 | 0 | DAG.getConstant(FalseOp->getZExtValue(), DL, VT), |
6602 | 0 | N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) }; |
6603 | 0 | SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops); |
6604 | | // If N0 has multiple uses, change other uses as well. |
6605 | 0 | if (!N0.hasOneUse()) { |
6606 | 0 | SDValue TruncSelect = |
6607 | 0 | DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect); |
6608 | 0 | DCI.CombineTo(N0.getNode(), TruncSelect); |
6609 | 0 | } |
6610 | 0 | return NewSelect; |
6611 | 0 | } |
6612 | 0 | } |
6613 | 0 | return SDValue(); |
6614 | 0 | } |
6615 | | |
6616 | | SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG( |
6617 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
6618 | | // Convert (sext_in_reg (setcc LHS, RHS, COND), i1) |
6619 | | // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1) |
6620 | | // into (select_cc LHS, RHS, -1, 0, COND) |
6621 | 0 | SelectionDAG &DAG = DCI.DAG; |
6622 | 0 | SDValue N0 = N->getOperand(0); |
6623 | 0 | EVT VT = N->getValueType(0); |
6624 | 0 | EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); |
6625 | 0 | if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND) |
6626 | 0 | N0 = N0.getOperand(0); |
6627 | 0 | if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) { |
6628 | 0 | SDLoc DL(N0); |
6629 | 0 | SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1), |
6630 | 0 | DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT), |
6631 | 0 | N0.getOperand(2) }; |
6632 | 0 | return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); |
6633 | 0 | } |
6634 | 0 | return SDValue(); |
6635 | 0 | } |
6636 | | |
6637 | | SDValue SystemZTargetLowering::combineSIGN_EXTEND( |
6638 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
6639 | | // Convert (sext (ashr (shl X, C1), C2)) to |
6640 | | // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as |
6641 | | // cheap as narrower ones. |
6642 | 0 | SelectionDAG &DAG = DCI.DAG; |
6643 | 0 | SDValue N0 = N->getOperand(0); |
6644 | 0 | EVT VT = N->getValueType(0); |
6645 | 0 | if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { |
6646 | 0 | auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
6647 | 0 | SDValue Inner = N0.getOperand(0); |
6648 | 0 | if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { |
6649 | 0 | if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { |
6650 | 0 | unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); |
6651 | 0 | unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; |
6652 | 0 | unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; |
6653 | 0 | EVT ShiftVT = N0.getOperand(1).getValueType(); |
6654 | 0 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, |
6655 | 0 | Inner.getOperand(0)); |
6656 | 0 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, |
6657 | 0 | DAG.getConstant(NewShlAmt, SDLoc(Inner), |
6658 | 0 | ShiftVT)); |
6659 | 0 | return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, |
6660 | 0 | DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT)); |
6661 | 0 | } |
6662 | 0 | } |
6663 | 0 | } |
6664 | 0 | return SDValue(); |
6665 | 0 | } |
6666 | | |
6667 | | SDValue SystemZTargetLowering::combineMERGE( |
6668 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
6669 | 0 | SelectionDAG &DAG = DCI.DAG; |
6670 | 0 | unsigned Opcode = N->getOpcode(); |
6671 | 0 | SDValue Op0 = N->getOperand(0); |
6672 | 0 | SDValue Op1 = N->getOperand(1); |
6673 | 0 | if (Op0.getOpcode() == ISD::BITCAST) |
6674 | 0 | Op0 = Op0.getOperand(0); |
6675 | 0 | if (ISD::isBuildVectorAllZeros(Op0.getNode())) { |
6676 | | // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF |
6677 | | // for v4f32. |
6678 | 0 | if (Op1 == N->getOperand(0)) |
6679 | 0 | return Op1; |
6680 | | // (z_merge_? 0, X) -> (z_unpackl_? 0, X). |
6681 | 0 | EVT VT = Op1.getValueType(); |
6682 | 0 | unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); |
6683 | 0 | if (ElemBytes <= 4) { |
6684 | 0 | Opcode = (Opcode == SystemZISD::MERGE_HIGH ? |
6685 | 0 | SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW); |
6686 | 0 | EVT InVT = VT.changeVectorElementTypeToInteger(); |
6687 | 0 | EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16), |
6688 | 0 | SystemZ::VectorBytes / ElemBytes / 2); |
6689 | 0 | if (VT != InVT) { |
6690 | 0 | Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1); |
6691 | 0 | DCI.AddToWorklist(Op1.getNode()); |
6692 | 0 | } |
6693 | 0 | SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1); |
6694 | 0 | DCI.AddToWorklist(Op.getNode()); |
6695 | 0 | return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); |
6696 | 0 | } |
6697 | 0 | } |
6698 | 0 | return SDValue(); |
6699 | 0 | } |
6700 | | |
6701 | | SDValue SystemZTargetLowering::combineLOAD( |
6702 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
6703 | 0 | SelectionDAG &DAG = DCI.DAG; |
6704 | 0 | EVT LdVT = N->getValueType(0); |
6705 | 0 | SDLoc DL(N); |
6706 | | |
6707 | | // Replace an i128 load that is used solely to move its value into GPRs |
6708 | | // by separate loads of both halves. |
6709 | 0 | if (LdVT == MVT::i128) { |
6710 | 0 | LoadSDNode *LD = cast<LoadSDNode>(N); |
6711 | 0 | if (!LD->isSimple() || !ISD::isNormalLoad(LD)) |
6712 | 0 | return SDValue(); |
6713 | | |
6714 | | // Scan through all users. |
6715 | 0 | SmallVector<std::pair<SDNode *, int>, 2> Users; |
6716 | 0 | int UsedElements = 0; |
6717 | 0 | for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end(); |
6718 | 0 | UI != UIEnd; ++UI) { |
6719 | | // Skip the uses of the chain. |
6720 | 0 | if (UI.getUse().getResNo() != 0) |
6721 | 0 | continue; |
6722 | | |
6723 | | // Verify every user is a TRUNCATE to i64 of the low or high half ... |
6724 | 0 | SDNode *User = *UI; |
6725 | 0 | int Index = 1; |
6726 | 0 | if (User->getOpcode() == ISD::SRL && |
6727 | 0 | User->getOperand(1).getOpcode() == ISD::Constant && |
6728 | 0 | User->getConstantOperandVal(1) == 64 && User->hasOneUse()) { |
6729 | 0 | User = *User->use_begin(); |
6730 | 0 | Index = 0; |
6731 | 0 | } |
6732 | 0 | if (User->getOpcode() != ISD::TRUNCATE || |
6733 | 0 | User->getValueType(0) != MVT::i64) |
6734 | 0 | return SDValue(); |
6735 | | |
6736 | | // ... and no half is extracted twice. |
6737 | 0 | if (UsedElements & (1 << Index)) |
6738 | 0 | return SDValue(); |
6739 | | |
6740 | 0 | UsedElements |= 1 << Index; |
6741 | 0 | Users.push_back(std::make_pair(User, Index)); |
6742 | 0 | } |
6743 | | |
6744 | | // Rewrite each extraction as an independent load. |
6745 | 0 | SmallVector<SDValue, 2> ArgChains; |
6746 | 0 | for (auto UserAndIndex : Users) { |
6747 | 0 | SDNode *User = UserAndIndex.first; |
6748 | 0 | unsigned Offset = User->getValueType(0).getStoreSize() * UserAndIndex.second; |
6749 | 0 | SDValue Ptr = |
6750 | 0 | DAG.getMemBasePlusOffset(LD->getBasePtr(), TypeSize::getFixed(Offset), DL); |
6751 | 0 | SDValue EltLoad = |
6752 | 0 | DAG.getLoad(User->getValueType(0), DL, LD->getChain(), Ptr, |
6753 | 0 | LD->getPointerInfo().getWithOffset(Offset), |
6754 | 0 | LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), |
6755 | 0 | LD->getAAInfo()); |
6756 | |
|
6757 | 0 | DCI.CombineTo(User, EltLoad, true); |
6758 | 0 | ArgChains.push_back(EltLoad.getValue(1)); |
6759 | 0 | } |
6760 | | |
6761 | | // Collect all chains via TokenFactor. |
6762 | 0 | SDValue Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, |
6763 | 0 | ArgChains); |
6764 | 0 | DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); |
6765 | 0 | DCI.AddToWorklist(Chain.getNode()); |
6766 | 0 | return SDValue(N, 0); |
6767 | 0 | } |
6768 | | |
6769 | 0 | if (LdVT.isVector() || LdVT.isInteger()) |
6770 | 0 | return SDValue(); |
6771 | | // Transform a scalar load that is REPLICATEd as well as having other |
6772 | | // use(s) to the form where the other use(s) use the first element of the |
6773 | | // REPLICATE instead of the load. Otherwise instruction selection will not |
6774 | | // produce a VLREP. Avoid extracting to a GPR, so only do this for floating |
6775 | | // point loads. |
6776 | | |
6777 | 0 | SDValue Replicate; |
6778 | 0 | SmallVector<SDNode*, 8> OtherUses; |
6779 | 0 | for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); |
6780 | 0 | UI != UE; ++UI) { |
6781 | 0 | if (UI->getOpcode() == SystemZISD::REPLICATE) { |
6782 | 0 | if (Replicate) |
6783 | 0 | return SDValue(); // Should never happen |
6784 | 0 | Replicate = SDValue(*UI, 0); |
6785 | 0 | } |
6786 | 0 | else if (UI.getUse().getResNo() == 0) |
6787 | 0 | OtherUses.push_back(*UI); |
6788 | 0 | } |
6789 | 0 | if (!Replicate || OtherUses.empty()) |
6790 | 0 | return SDValue(); |
6791 | | |
6792 | 0 | SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT, |
6793 | 0 | Replicate, DAG.getConstant(0, DL, MVT::i32)); |
6794 | | // Update uses of the loaded Value while preserving old chains. |
6795 | 0 | for (SDNode *U : OtherUses) { |
6796 | 0 | SmallVector<SDValue, 8> Ops; |
6797 | 0 | for (SDValue Op : U->ops()) |
6798 | 0 | Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op); |
6799 | 0 | DAG.UpdateNodeOperands(U, Ops); |
6800 | 0 | } |
6801 | 0 | return SDValue(N, 0); |
6802 | 0 | } |
6803 | | |
6804 | 0 | bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const { |
6805 | 0 | if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) |
6806 | 0 | return true; |
6807 | 0 | if (Subtarget.hasVectorEnhancements2()) |
6808 | 0 | if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128) |
6809 | 0 | return true; |
6810 | 0 | return false; |
6811 | 0 | } |
6812 | | |
6813 | 0 | static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) { |
6814 | 0 | if (!VT.isVector() || !VT.isSimple() || |
6815 | 0 | VT.getSizeInBits() != 128 || |
6816 | 0 | VT.getScalarSizeInBits() % 8 != 0) |
6817 | 0 | return false; |
6818 | | |
6819 | 0 | unsigned NumElts = VT.getVectorNumElements(); |
6820 | 0 | for (unsigned i = 0; i < NumElts; ++i) { |
6821 | 0 | if (M[i] < 0) continue; // ignore UNDEF indices |
6822 | 0 | if ((unsigned) M[i] != NumElts - 1 - i) |
6823 | 0 | return false; |
6824 | 0 | } |
6825 | | |
6826 | 0 | return true; |
6827 | 0 | } |
6828 | | |
6829 | 0 | static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG) { |
6830 | 0 | for (auto *U : StoredVal->uses()) { |
6831 | 0 | if (StoreSDNode *ST = dyn_cast<StoreSDNode>(U)) { |
6832 | 0 | EVT CurrMemVT = ST->getMemoryVT().getScalarType(); |
6833 | 0 | if (CurrMemVT.isRound() && CurrMemVT.getStoreSize() <= 16) |
6834 | 0 | continue; |
6835 | 0 | } else if (isa<BuildVectorSDNode>(U)) { |
6836 | 0 | SDValue BuildVector = SDValue(U, 0); |
6837 | 0 | if (DAG.isSplatValue(BuildVector, true/*AllowUndefs*/) && |
6838 | 0 | isOnlyUsedByStores(BuildVector, DAG)) |
6839 | 0 | continue; |
6840 | 0 | } |
6841 | 0 | return false; |
6842 | 0 | } |
6843 | 0 | return true; |
6844 | 0 | } |
6845 | | |
6846 | 0 | static bool isMovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart) { |
6847 | 0 | if (Val.getOpcode() != ISD::OR || !Val.getNode()->hasOneUse()) |
6848 | 0 | return false; |
6849 | | |
6850 | 0 | SDValue Op0 = Val.getOperand(0); |
6851 | 0 | SDValue Op1 = Val.getOperand(1); |
6852 | |
|
6853 | 0 | if (Op0.getOpcode() == ISD::SHL) |
6854 | 0 | std::swap(Op0, Op1); |
6855 | 0 | if (Op1.getOpcode() != ISD::SHL || !Op1.getNode()->hasOneUse() || |
6856 | 0 | Op1.getOperand(1).getOpcode() != ISD::Constant || |
6857 | 0 | Op1.getConstantOperandVal(1) != 64) |
6858 | 0 | return false; |
6859 | 0 | Op1 = Op1.getOperand(0); |
6860 | |
|
6861 | 0 | if (Op0.getOpcode() != ISD::ZERO_EXTEND || !Op0.getNode()->hasOneUse() || |
6862 | 0 | Op0.getOperand(0).getValueType() != MVT::i64) |
6863 | 0 | return false; |
6864 | 0 | if (Op1.getOpcode() != ISD::ANY_EXTEND || !Op1.getNode()->hasOneUse() || |
6865 | 0 | Op1.getOperand(0).getValueType() != MVT::i64) |
6866 | 0 | return false; |
6867 | | |
6868 | 0 | LoPart = Op0.getOperand(0); |
6869 | 0 | HiPart = Op1.getOperand(0); |
6870 | 0 | return true; |
6871 | 0 | } |
6872 | | |
6873 | | SDValue SystemZTargetLowering::combineSTORE( |
6874 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
6875 | 0 | SelectionDAG &DAG = DCI.DAG; |
6876 | 0 | auto *SN = cast<StoreSDNode>(N); |
6877 | 0 | auto &Op1 = N->getOperand(1); |
6878 | 0 | EVT MemVT = SN->getMemoryVT(); |
6879 | | // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better |
6880 | | // for the extraction to be done on a vMiN value, so that we can use VSTE. |
6881 | | // If X has wider elements then convert it to: |
6882 | | // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z). |
6883 | 0 | if (MemVT.isInteger() && SN->isTruncatingStore()) { |
6884 | 0 | if (SDValue Value = |
6885 | 0 | combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { |
6886 | 0 | DCI.AddToWorklist(Value.getNode()); |
6887 | | |
6888 | | // Rewrite the store with the new form of stored value. |
6889 | 0 | return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, |
6890 | 0 | SN->getBasePtr(), SN->getMemoryVT(), |
6891 | 0 | SN->getMemOperand()); |
6892 | 0 | } |
6893 | 0 | } |
6894 | | // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR |
6895 | 0 | if (!SN->isTruncatingStore() && |
6896 | 0 | Op1.getOpcode() == ISD::BSWAP && |
6897 | 0 | Op1.getNode()->hasOneUse() && |
6898 | 0 | canLoadStoreByteSwapped(Op1.getValueType())) { |
6899 | |
|
6900 | 0 | SDValue BSwapOp = Op1.getOperand(0); |
6901 | |
|
6902 | 0 | if (BSwapOp.getValueType() == MVT::i16) |
6903 | 0 | BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp); |
6904 | |
|
6905 | 0 | SDValue Ops[] = { |
6906 | 0 | N->getOperand(0), BSwapOp, N->getOperand(2) |
6907 | 0 | }; |
6908 | |
|
6909 | 0 | return |
6910 | 0 | DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other), |
6911 | 0 | Ops, MemVT, SN->getMemOperand()); |
6912 | 0 | } |
6913 | | // Combine STORE (element-swap) into VSTER |
6914 | 0 | if (!SN->isTruncatingStore() && |
6915 | 0 | Op1.getOpcode() == ISD::VECTOR_SHUFFLE && |
6916 | 0 | Op1.getNode()->hasOneUse() && |
6917 | 0 | Subtarget.hasVectorEnhancements2()) { |
6918 | 0 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op1.getNode()); |
6919 | 0 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
6920 | 0 | if (isVectorElementSwap(ShuffleMask, Op1.getValueType())) { |
6921 | 0 | SDValue Ops[] = { |
6922 | 0 | N->getOperand(0), Op1.getOperand(0), N->getOperand(2) |
6923 | 0 | }; |
6924 | |
|
6925 | 0 | return DAG.getMemIntrinsicNode(SystemZISD::VSTER, SDLoc(N), |
6926 | 0 | DAG.getVTList(MVT::Other), |
6927 | 0 | Ops, MemVT, SN->getMemOperand()); |
6928 | 0 | } |
6929 | 0 | } |
6930 | | |
6931 | | // Transform a store of an i128 moved from GPRs into two separate stores. |
6932 | 0 | if (MemVT == MVT::i128 && SN->isSimple() && ISD::isNormalStore(SN)) { |
6933 | 0 | SDValue LoPart, HiPart; |
6934 | 0 | if (isMovedFromParts(Op1, LoPart, HiPart)) { |
6935 | 0 | SDLoc DL(SN); |
6936 | 0 | SDValue Chain0 = |
6937 | 0 | DAG.getStore(SN->getChain(), DL, HiPart, SN->getBasePtr(), |
6938 | 0 | SN->getPointerInfo(), SN->getOriginalAlign(), |
6939 | 0 | SN->getMemOperand()->getFlags(), SN->getAAInfo()); |
6940 | 0 | SDValue Chain1 = |
6941 | 0 | DAG.getStore(SN->getChain(), DL, LoPart, |
6942 | 0 | DAG.getObjectPtrOffset(DL, SN->getBasePtr(), |
6943 | 0 | TypeSize::getFixed(8)), |
6944 | 0 | SN->getPointerInfo().getWithOffset(8), |
6945 | 0 | SN->getOriginalAlign(), |
6946 | 0 | SN->getMemOperand()->getFlags(), SN->getAAInfo()); |
6947 | |
|
6948 | 0 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain0, Chain1); |
6949 | 0 | } |
6950 | 0 | } |
6951 | | |
6952 | | // Replicate a reg or immediate with VREP instead of scalar multiply or |
6953 | | // immediate load. It seems best to do this during the first DAGCombine as |
6954 | | // it is straight-forward to handle the zero-extend node in the initial |
6955 | | // DAG, and also not worry about the keeping the new MemVT legal (e.g. when |
6956 | | // extracting an i16 element from a v16i8 vector). |
6957 | 0 | if (Subtarget.hasVector() && DCI.Level == BeforeLegalizeTypes && |
6958 | 0 | isOnlyUsedByStores(Op1, DAG)) { |
6959 | 0 | SDValue Word = SDValue(); |
6960 | 0 | EVT WordVT; |
6961 | | |
6962 | | // Find a replicated immediate and return it if found in Word and its |
6963 | | // type in WordVT. |
6964 | 0 | auto FindReplicatedImm = [&](ConstantSDNode *C, unsigned TotBytes) { |
6965 | | // Some constants are better handled with a scalar store. |
6966 | 0 | if (C->getAPIntValue().getBitWidth() > 64 || C->isAllOnes() || |
6967 | 0 | isInt<16>(C->getSExtValue()) || MemVT.getStoreSize() <= 2) |
6968 | 0 | return; |
6969 | 0 | SystemZVectorConstantInfo VCI(APInt(TotBytes * 8, C->getZExtValue())); |
6970 | 0 | if (VCI.isVectorConstantLegal(Subtarget) && |
6971 | 0 | VCI.Opcode == SystemZISD::REPLICATE) { |
6972 | 0 | Word = DAG.getConstant(VCI.OpVals[0], SDLoc(SN), MVT::i32); |
6973 | 0 | WordVT = VCI.VecVT.getScalarType(); |
6974 | 0 | } |
6975 | 0 | }; |
6976 | | |
6977 | | // Find a replicated register and return it if found in Word and its type |
6978 | | // in WordVT. |
6979 | 0 | auto FindReplicatedReg = [&](SDValue MulOp) { |
6980 | 0 | EVT MulVT = MulOp.getValueType(); |
6981 | 0 | if (MulOp->getOpcode() == ISD::MUL && |
6982 | 0 | (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) { |
6983 | | // Find a zero extended value and its type. |
6984 | 0 | SDValue LHS = MulOp->getOperand(0); |
6985 | 0 | if (LHS->getOpcode() == ISD::ZERO_EXTEND) |
6986 | 0 | WordVT = LHS->getOperand(0).getValueType(); |
6987 | 0 | else if (LHS->getOpcode() == ISD::AssertZext) |
6988 | 0 | WordVT = cast<VTSDNode>(LHS->getOperand(1))->getVT(); |
6989 | 0 | else |
6990 | 0 | return; |
6991 | | // Find a replicating constant, e.g. 0x00010001. |
6992 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) { |
6993 | 0 | SystemZVectorConstantInfo VCI( |
6994 | 0 | APInt(MulVT.getSizeInBits(), C->getZExtValue())); |
6995 | 0 | if (VCI.isVectorConstantLegal(Subtarget) && |
6996 | 0 | VCI.Opcode == SystemZISD::REPLICATE && VCI.OpVals[0] == 1 && |
6997 | 0 | WordVT == VCI.VecVT.getScalarType()) |
6998 | 0 | Word = DAG.getZExtOrTrunc(LHS->getOperand(0), SDLoc(SN), WordVT); |
6999 | 0 | } |
7000 | 0 | } |
7001 | 0 | }; |
7002 | |
|
7003 | 0 | if (isa<BuildVectorSDNode>(Op1) && |
7004 | 0 | DAG.isSplatValue(Op1, true/*AllowUndefs*/)) { |
7005 | 0 | SDValue SplatVal = Op1->getOperand(0); |
7006 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(SplatVal)) |
7007 | 0 | FindReplicatedImm(C, SplatVal.getValueType().getStoreSize()); |
7008 | 0 | else |
7009 | 0 | FindReplicatedReg(SplatVal); |
7010 | 0 | } else { |
7011 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(Op1)) |
7012 | 0 | FindReplicatedImm(C, MemVT.getStoreSize()); |
7013 | 0 | else |
7014 | 0 | FindReplicatedReg(Op1); |
7015 | 0 | } |
7016 | |
|
7017 | 0 | if (Word != SDValue()) { |
7018 | 0 | assert(MemVT.getSizeInBits() % WordVT.getSizeInBits() == 0 && |
7019 | 0 | "Bad type handling"); |
7020 | 0 | unsigned NumElts = MemVT.getSizeInBits() / WordVT.getSizeInBits(); |
7021 | 0 | EVT SplatVT = EVT::getVectorVT(*DAG.getContext(), WordVT, NumElts); |
7022 | 0 | SDValue SplatVal = DAG.getSplatVector(SplatVT, SDLoc(SN), Word); |
7023 | 0 | return DAG.getStore(SN->getChain(), SDLoc(SN), SplatVal, |
7024 | 0 | SN->getBasePtr(), SN->getMemOperand()); |
7025 | 0 | } |
7026 | 0 | } |
7027 | | |
7028 | 0 | return SDValue(); |
7029 | 0 | } |
7030 | | |
7031 | | SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE( |
7032 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7033 | 0 | SelectionDAG &DAG = DCI.DAG; |
7034 | | // Combine element-swap (LOAD) into VLER |
7035 | 0 | if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && |
7036 | 0 | N->getOperand(0).hasOneUse() && |
7037 | 0 | Subtarget.hasVectorEnhancements2()) { |
7038 | 0 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); |
7039 | 0 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
7040 | 0 | if (isVectorElementSwap(ShuffleMask, N->getValueType(0))) { |
7041 | 0 | SDValue Load = N->getOperand(0); |
7042 | 0 | LoadSDNode *LD = cast<LoadSDNode>(Load); |
7043 | | |
7044 | | // Create the element-swapping load. |
7045 | 0 | SDValue Ops[] = { |
7046 | 0 | LD->getChain(), // Chain |
7047 | 0 | LD->getBasePtr() // Ptr |
7048 | 0 | }; |
7049 | 0 | SDValue ESLoad = |
7050 | 0 | DAG.getMemIntrinsicNode(SystemZISD::VLER, SDLoc(N), |
7051 | 0 | DAG.getVTList(LD->getValueType(0), MVT::Other), |
7052 | 0 | Ops, LD->getMemoryVT(), LD->getMemOperand()); |
7053 | | |
7054 | | // First, combine the VECTOR_SHUFFLE away. This makes the value produced |
7055 | | // by the load dead. |
7056 | 0 | DCI.CombineTo(N, ESLoad); |
7057 | | |
7058 | | // Next, combine the load away, we give it a bogus result value but a real |
7059 | | // chain result. The result value is dead because the shuffle is dead. |
7060 | 0 | DCI.CombineTo(Load.getNode(), ESLoad, ESLoad.getValue(1)); |
7061 | | |
7062 | | // Return N so it doesn't get rechecked! |
7063 | 0 | return SDValue(N, 0); |
7064 | 0 | } |
7065 | 0 | } |
7066 | | |
7067 | 0 | return SDValue(); |
7068 | 0 | } |
7069 | | |
7070 | | SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( |
7071 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7072 | 0 | SelectionDAG &DAG = DCI.DAG; |
7073 | |
|
7074 | 0 | if (!Subtarget.hasVector()) |
7075 | 0 | return SDValue(); |
7076 | | |
7077 | | // Look through bitcasts that retain the number of vector elements. |
7078 | 0 | SDValue Op = N->getOperand(0); |
7079 | 0 | if (Op.getOpcode() == ISD::BITCAST && |
7080 | 0 | Op.getValueType().isVector() && |
7081 | 0 | Op.getOperand(0).getValueType().isVector() && |
7082 | 0 | Op.getValueType().getVectorNumElements() == |
7083 | 0 | Op.getOperand(0).getValueType().getVectorNumElements()) |
7084 | 0 | Op = Op.getOperand(0); |
7085 | | |
7086 | | // Pull BSWAP out of a vector extraction. |
7087 | 0 | if (Op.getOpcode() == ISD::BSWAP && Op.hasOneUse()) { |
7088 | 0 | EVT VecVT = Op.getValueType(); |
7089 | 0 | EVT EltVT = VecVT.getVectorElementType(); |
7090 | 0 | Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), EltVT, |
7091 | 0 | Op.getOperand(0), N->getOperand(1)); |
7092 | 0 | DCI.AddToWorklist(Op.getNode()); |
7093 | 0 | Op = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Op); |
7094 | 0 | if (EltVT != N->getValueType(0)) { |
7095 | 0 | DCI.AddToWorklist(Op.getNode()); |
7096 | 0 | Op = DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op); |
7097 | 0 | } |
7098 | 0 | return Op; |
7099 | 0 | } |
7100 | | |
7101 | | // Try to simplify a vector extraction. |
7102 | 0 | if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { |
7103 | 0 | SDValue Op0 = N->getOperand(0); |
7104 | 0 | EVT VecVT = Op0.getValueType(); |
7105 | 0 | return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, |
7106 | 0 | IndexN->getZExtValue(), DCI, false); |
7107 | 0 | } |
7108 | 0 | return SDValue(); |
7109 | 0 | } |
7110 | | |
7111 | | SDValue SystemZTargetLowering::combineJOIN_DWORDS( |
7112 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7113 | 0 | SelectionDAG &DAG = DCI.DAG; |
7114 | | // (join_dwords X, X) == (replicate X) |
7115 | 0 | if (N->getOperand(0) == N->getOperand(1)) |
7116 | 0 | return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), |
7117 | 0 | N->getOperand(0)); |
7118 | 0 | return SDValue(); |
7119 | 0 | } |
7120 | | |
7121 | 0 | static SDValue MergeInputChains(SDNode *N1, SDNode *N2) { |
7122 | 0 | SDValue Chain1 = N1->getOperand(0); |
7123 | 0 | SDValue Chain2 = N2->getOperand(0); |
7124 | | |
7125 | | // Trivial case: both nodes take the same chain. |
7126 | 0 | if (Chain1 == Chain2) |
7127 | 0 | return Chain1; |
7128 | | |
7129 | | // FIXME - we could handle more complex cases via TokenFactor, |
7130 | | // assuming we can verify that this would not create a cycle. |
7131 | 0 | return SDValue(); |
7132 | 0 | } |
7133 | | |
7134 | | SDValue SystemZTargetLowering::combineFP_ROUND( |
7135 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7136 | |
|
7137 | 0 | if (!Subtarget.hasVector()) |
7138 | 0 | return SDValue(); |
7139 | | |
7140 | | // (fpround (extract_vector_elt X 0)) |
7141 | | // (fpround (extract_vector_elt X 1)) -> |
7142 | | // (extract_vector_elt (VROUND X) 0) |
7143 | | // (extract_vector_elt (VROUND X) 2) |
7144 | | // |
7145 | | // This is a special case since the target doesn't really support v2f32s. |
7146 | 0 | unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; |
7147 | 0 | SelectionDAG &DAG = DCI.DAG; |
7148 | 0 | SDValue Op0 = N->getOperand(OpNo); |
7149 | 0 | if (N->getValueType(0) == MVT::f32 && Op0.hasOneUse() && |
7150 | 0 | Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
7151 | 0 | Op0.getOperand(0).getValueType() == MVT::v2f64 && |
7152 | 0 | Op0.getOperand(1).getOpcode() == ISD::Constant && |
7153 | 0 | Op0.getConstantOperandVal(1) == 0) { |
7154 | 0 | SDValue Vec = Op0.getOperand(0); |
7155 | 0 | for (auto *U : Vec->uses()) { |
7156 | 0 | if (U != Op0.getNode() && U->hasOneUse() && |
7157 | 0 | U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
7158 | 0 | U->getOperand(0) == Vec && |
7159 | 0 | U->getOperand(1).getOpcode() == ISD::Constant && |
7160 | 0 | U->getConstantOperandVal(1) == 1) { |
7161 | 0 | SDValue OtherRound = SDValue(*U->use_begin(), 0); |
7162 | 0 | if (OtherRound.getOpcode() == N->getOpcode() && |
7163 | 0 | OtherRound.getOperand(OpNo) == SDValue(U, 0) && |
7164 | 0 | OtherRound.getValueType() == MVT::f32) { |
7165 | 0 | SDValue VRound, Chain; |
7166 | 0 | if (N->isStrictFPOpcode()) { |
7167 | 0 | Chain = MergeInputChains(N, OtherRound.getNode()); |
7168 | 0 | if (!Chain) |
7169 | 0 | continue; |
7170 | 0 | VRound = DAG.getNode(SystemZISD::STRICT_VROUND, SDLoc(N), |
7171 | 0 | {MVT::v4f32, MVT::Other}, {Chain, Vec}); |
7172 | 0 | Chain = VRound.getValue(1); |
7173 | 0 | } else |
7174 | 0 | VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N), |
7175 | 0 | MVT::v4f32, Vec); |
7176 | 0 | DCI.AddToWorklist(VRound.getNode()); |
7177 | 0 | SDValue Extract1 = |
7178 | 0 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32, |
7179 | 0 | VRound, DAG.getConstant(2, SDLoc(U), MVT::i32)); |
7180 | 0 | DCI.AddToWorklist(Extract1.getNode()); |
7181 | 0 | DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1); |
7182 | 0 | if (Chain) |
7183 | 0 | DAG.ReplaceAllUsesOfValueWith(OtherRound.getValue(1), Chain); |
7184 | 0 | SDValue Extract0 = |
7185 | 0 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32, |
7186 | 0 | VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); |
7187 | 0 | if (Chain) |
7188 | 0 | return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0), |
7189 | 0 | N->getVTList(), Extract0, Chain); |
7190 | 0 | return Extract0; |
7191 | 0 | } |
7192 | 0 | } |
7193 | 0 | } |
7194 | 0 | } |
7195 | 0 | return SDValue(); |
7196 | 0 | } |
7197 | | |
7198 | | SDValue SystemZTargetLowering::combineFP_EXTEND( |
7199 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7200 | |
|
7201 | 0 | if (!Subtarget.hasVector()) |
7202 | 0 | return SDValue(); |
7203 | | |
7204 | | // (fpextend (extract_vector_elt X 0)) |
7205 | | // (fpextend (extract_vector_elt X 2)) -> |
7206 | | // (extract_vector_elt (VEXTEND X) 0) |
7207 | | // (extract_vector_elt (VEXTEND X) 1) |
7208 | | // |
7209 | | // This is a special case since the target doesn't really support v2f32s. |
7210 | 0 | unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; |
7211 | 0 | SelectionDAG &DAG = DCI.DAG; |
7212 | 0 | SDValue Op0 = N->getOperand(OpNo); |
7213 | 0 | if (N->getValueType(0) == MVT::f64 && Op0.hasOneUse() && |
7214 | 0 | Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
7215 | 0 | Op0.getOperand(0).getValueType() == MVT::v4f32 && |
7216 | 0 | Op0.getOperand(1).getOpcode() == ISD::Constant && |
7217 | 0 | Op0.getConstantOperandVal(1) == 0) { |
7218 | 0 | SDValue Vec = Op0.getOperand(0); |
7219 | 0 | for (auto *U : Vec->uses()) { |
7220 | 0 | if (U != Op0.getNode() && U->hasOneUse() && |
7221 | 0 | U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
7222 | 0 | U->getOperand(0) == Vec && |
7223 | 0 | U->getOperand(1).getOpcode() == ISD::Constant && |
7224 | 0 | U->getConstantOperandVal(1) == 2) { |
7225 | 0 | SDValue OtherExtend = SDValue(*U->use_begin(), 0); |
7226 | 0 | if (OtherExtend.getOpcode() == N->getOpcode() && |
7227 | 0 | OtherExtend.getOperand(OpNo) == SDValue(U, 0) && |
7228 | 0 | OtherExtend.getValueType() == MVT::f64) { |
7229 | 0 | SDValue VExtend, Chain; |
7230 | 0 | if (N->isStrictFPOpcode()) { |
7231 | 0 | Chain = MergeInputChains(N, OtherExtend.getNode()); |
7232 | 0 | if (!Chain) |
7233 | 0 | continue; |
7234 | 0 | VExtend = DAG.getNode(SystemZISD::STRICT_VEXTEND, SDLoc(N), |
7235 | 0 | {MVT::v2f64, MVT::Other}, {Chain, Vec}); |
7236 | 0 | Chain = VExtend.getValue(1); |
7237 | 0 | } else |
7238 | 0 | VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N), |
7239 | 0 | MVT::v2f64, Vec); |
7240 | 0 | DCI.AddToWorklist(VExtend.getNode()); |
7241 | 0 | SDValue Extract1 = |
7242 | 0 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64, |
7243 | 0 | VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32)); |
7244 | 0 | DCI.AddToWorklist(Extract1.getNode()); |
7245 | 0 | DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1); |
7246 | 0 | if (Chain) |
7247 | 0 | DAG.ReplaceAllUsesOfValueWith(OtherExtend.getValue(1), Chain); |
7248 | 0 | SDValue Extract0 = |
7249 | 0 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64, |
7250 | 0 | VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); |
7251 | 0 | if (Chain) |
7252 | 0 | return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0), |
7253 | 0 | N->getVTList(), Extract0, Chain); |
7254 | 0 | return Extract0; |
7255 | 0 | } |
7256 | 0 | } |
7257 | 0 | } |
7258 | 0 | } |
7259 | 0 | return SDValue(); |
7260 | 0 | } |
7261 | | |
7262 | | SDValue SystemZTargetLowering::combineINT_TO_FP( |
7263 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7264 | 0 | if (DCI.Level != BeforeLegalizeTypes) |
7265 | 0 | return SDValue(); |
7266 | 0 | SelectionDAG &DAG = DCI.DAG; |
7267 | 0 | LLVMContext &Ctx = *DAG.getContext(); |
7268 | 0 | unsigned Opcode = N->getOpcode(); |
7269 | 0 | EVT OutVT = N->getValueType(0); |
7270 | 0 | Type *OutLLVMTy = OutVT.getTypeForEVT(Ctx); |
7271 | 0 | SDValue Op = N->getOperand(0); |
7272 | 0 | unsigned OutScalarBits = OutLLVMTy->getScalarSizeInBits(); |
7273 | 0 | unsigned InScalarBits = Op->getValueType(0).getScalarSizeInBits(); |
7274 | | |
7275 | | // Insert an extension before type-legalization to avoid scalarization, e.g.: |
7276 | | // v2f64 = uint_to_fp v2i16 |
7277 | | // => |
7278 | | // v2f64 = uint_to_fp (v2i64 zero_extend v2i16) |
7279 | 0 | if (OutLLVMTy->isVectorTy() && OutScalarBits > InScalarBits && |
7280 | 0 | OutScalarBits <= 64) { |
7281 | 0 | unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements(); |
7282 | 0 | EVT ExtVT = EVT::getVectorVT( |
7283 | 0 | Ctx, EVT::getIntegerVT(Ctx, OutLLVMTy->getScalarSizeInBits()), NumElts); |
7284 | 0 | unsigned ExtOpcode = |
7285 | 0 | (Opcode == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND); |
7286 | 0 | SDValue ExtOp = DAG.getNode(ExtOpcode, SDLoc(N), ExtVT, Op); |
7287 | 0 | return DAG.getNode(Opcode, SDLoc(N), OutVT, ExtOp); |
7288 | 0 | } |
7289 | 0 | return SDValue(); |
7290 | 0 | } |
7291 | | |
7292 | | SDValue SystemZTargetLowering::combineBSWAP( |
7293 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7294 | 0 | SelectionDAG &DAG = DCI.DAG; |
7295 | | // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR |
7296 | 0 | if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && |
7297 | 0 | N->getOperand(0).hasOneUse() && |
7298 | 0 | canLoadStoreByteSwapped(N->getValueType(0))) { |
7299 | 0 | SDValue Load = N->getOperand(0); |
7300 | 0 | LoadSDNode *LD = cast<LoadSDNode>(Load); |
7301 | | |
7302 | | // Create the byte-swapping load. |
7303 | 0 | SDValue Ops[] = { |
7304 | 0 | LD->getChain(), // Chain |
7305 | 0 | LD->getBasePtr() // Ptr |
7306 | 0 | }; |
7307 | 0 | EVT LoadVT = N->getValueType(0); |
7308 | 0 | if (LoadVT == MVT::i16) |
7309 | 0 | LoadVT = MVT::i32; |
7310 | 0 | SDValue BSLoad = |
7311 | 0 | DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N), |
7312 | 0 | DAG.getVTList(LoadVT, MVT::Other), |
7313 | 0 | Ops, LD->getMemoryVT(), LD->getMemOperand()); |
7314 | | |
7315 | | // If this is an i16 load, insert the truncate. |
7316 | 0 | SDValue ResVal = BSLoad; |
7317 | 0 | if (N->getValueType(0) == MVT::i16) |
7318 | 0 | ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad); |
7319 | | |
7320 | | // First, combine the bswap away. This makes the value produced by the |
7321 | | // load dead. |
7322 | 0 | DCI.CombineTo(N, ResVal); |
7323 | | |
7324 | | // Next, combine the load away, we give it a bogus result value but a real |
7325 | | // chain result. The result value is dead because the bswap is dead. |
7326 | 0 | DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); |
7327 | | |
7328 | | // Return N so it doesn't get rechecked! |
7329 | 0 | return SDValue(N, 0); |
7330 | 0 | } |
7331 | | |
7332 | | // Look through bitcasts that retain the number of vector elements. |
7333 | 0 | SDValue Op = N->getOperand(0); |
7334 | 0 | if (Op.getOpcode() == ISD::BITCAST && |
7335 | 0 | Op.getValueType().isVector() && |
7336 | 0 | Op.getOperand(0).getValueType().isVector() && |
7337 | 0 | Op.getValueType().getVectorNumElements() == |
7338 | 0 | Op.getOperand(0).getValueType().getVectorNumElements()) |
7339 | 0 | Op = Op.getOperand(0); |
7340 | | |
7341 | | // Push BSWAP into a vector insertion if at least one side then simplifies. |
7342 | 0 | if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && Op.hasOneUse()) { |
7343 | 0 | SDValue Vec = Op.getOperand(0); |
7344 | 0 | SDValue Elt = Op.getOperand(1); |
7345 | 0 | SDValue Idx = Op.getOperand(2); |
7346 | |
|
7347 | 0 | if (DAG.isConstantIntBuildVectorOrConstantInt(Vec) || |
7348 | 0 | Vec.getOpcode() == ISD::BSWAP || Vec.isUndef() || |
7349 | 0 | DAG.isConstantIntBuildVectorOrConstantInt(Elt) || |
7350 | 0 | Elt.getOpcode() == ISD::BSWAP || Elt.isUndef() || |
7351 | 0 | (canLoadStoreByteSwapped(N->getValueType(0)) && |
7352 | 0 | ISD::isNON_EXTLoad(Elt.getNode()) && Elt.hasOneUse())) { |
7353 | 0 | EVT VecVT = N->getValueType(0); |
7354 | 0 | EVT EltVT = N->getValueType(0).getVectorElementType(); |
7355 | 0 | if (VecVT != Vec.getValueType()) { |
7356 | 0 | Vec = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Vec); |
7357 | 0 | DCI.AddToWorklist(Vec.getNode()); |
7358 | 0 | } |
7359 | 0 | if (EltVT != Elt.getValueType()) { |
7360 | 0 | Elt = DAG.getNode(ISD::BITCAST, SDLoc(N), EltVT, Elt); |
7361 | 0 | DCI.AddToWorklist(Elt.getNode()); |
7362 | 0 | } |
7363 | 0 | Vec = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Vec); |
7364 | 0 | DCI.AddToWorklist(Vec.getNode()); |
7365 | 0 | Elt = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Elt); |
7366 | 0 | DCI.AddToWorklist(Elt.getNode()); |
7367 | 0 | return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VecVT, |
7368 | 0 | Vec, Elt, Idx); |
7369 | 0 | } |
7370 | 0 | } |
7371 | | |
7372 | | // Push BSWAP into a vector shuffle if at least one side then simplifies. |
7373 | 0 | ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(Op); |
7374 | 0 | if (SV && Op.hasOneUse()) { |
7375 | 0 | SDValue Op0 = Op.getOperand(0); |
7376 | 0 | SDValue Op1 = Op.getOperand(1); |
7377 | |
|
7378 | 0 | if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || |
7379 | 0 | Op0.getOpcode() == ISD::BSWAP || Op0.isUndef() || |
7380 | 0 | DAG.isConstantIntBuildVectorOrConstantInt(Op1) || |
7381 | 0 | Op1.getOpcode() == ISD::BSWAP || Op1.isUndef()) { |
7382 | 0 | EVT VecVT = N->getValueType(0); |
7383 | 0 | if (VecVT != Op0.getValueType()) { |
7384 | 0 | Op0 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op0); |
7385 | 0 | DCI.AddToWorklist(Op0.getNode()); |
7386 | 0 | } |
7387 | 0 | if (VecVT != Op1.getValueType()) { |
7388 | 0 | Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op1); |
7389 | 0 | DCI.AddToWorklist(Op1.getNode()); |
7390 | 0 | } |
7391 | 0 | Op0 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op0); |
7392 | 0 | DCI.AddToWorklist(Op0.getNode()); |
7393 | 0 | Op1 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op1); |
7394 | 0 | DCI.AddToWorklist(Op1.getNode()); |
7395 | 0 | return DAG.getVectorShuffle(VecVT, SDLoc(N), Op0, Op1, SV->getMask()); |
7396 | 0 | } |
7397 | 0 | } |
7398 | | |
7399 | 0 | return SDValue(); |
7400 | 0 | } |
7401 | | |
7402 | 0 | static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) { |
7403 | | // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code |
7404 | | // set by the CCReg instruction using the CCValid / CCMask masks, |
7405 | | // If the CCReg instruction is itself a ICMP testing the condition |
7406 | | // code set by some other instruction, see whether we can directly |
7407 | | // use that condition code. |
7408 | | |
7409 | | // Verify that we have an ICMP against some constant. |
7410 | 0 | if (CCValid != SystemZ::CCMASK_ICMP) |
7411 | 0 | return false; |
7412 | 0 | auto *ICmp = CCReg.getNode(); |
7413 | 0 | if (ICmp->getOpcode() != SystemZISD::ICMP) |
7414 | 0 | return false; |
7415 | 0 | auto *CompareLHS = ICmp->getOperand(0).getNode(); |
7416 | 0 | auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1)); |
7417 | 0 | if (!CompareRHS) |
7418 | 0 | return false; |
7419 | | |
7420 | | // Optimize the case where CompareLHS is a SELECT_CCMASK. |
7421 | 0 | if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) { |
7422 | | // Verify that we have an appropriate mask for a EQ or NE comparison. |
7423 | 0 | bool Invert = false; |
7424 | 0 | if (CCMask == SystemZ::CCMASK_CMP_NE) |
7425 | 0 | Invert = !Invert; |
7426 | 0 | else if (CCMask != SystemZ::CCMASK_CMP_EQ) |
7427 | 0 | return false; |
7428 | | |
7429 | | // Verify that the ICMP compares against one of select values. |
7430 | 0 | auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0)); |
7431 | 0 | if (!TrueVal) |
7432 | 0 | return false; |
7433 | 0 | auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); |
7434 | 0 | if (!FalseVal) |
7435 | 0 | return false; |
7436 | 0 | if (CompareRHS->getZExtValue() == FalseVal->getZExtValue()) |
7437 | 0 | Invert = !Invert; |
7438 | 0 | else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue()) |
7439 | 0 | return false; |
7440 | | |
7441 | | // Compute the effective CC mask for the new branch or select. |
7442 | 0 | auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2)); |
7443 | 0 | auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3)); |
7444 | 0 | if (!NewCCValid || !NewCCMask) |
7445 | 0 | return false; |
7446 | 0 | CCValid = NewCCValid->getZExtValue(); |
7447 | 0 | CCMask = NewCCMask->getZExtValue(); |
7448 | 0 | if (Invert) |
7449 | 0 | CCMask ^= CCValid; |
7450 | | |
7451 | | // Return the updated CCReg link. |
7452 | 0 | CCReg = CompareLHS->getOperand(4); |
7453 | 0 | return true; |
7454 | 0 | } |
7455 | | |
7456 | | // Optimize the case where CompareRHS is (SRA (SHL (IPM))). |
7457 | 0 | if (CompareLHS->getOpcode() == ISD::SRA) { |
7458 | 0 | auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); |
7459 | 0 | if (!SRACount || SRACount->getZExtValue() != 30) |
7460 | 0 | return false; |
7461 | 0 | auto *SHL = CompareLHS->getOperand(0).getNode(); |
7462 | 0 | if (SHL->getOpcode() != ISD::SHL) |
7463 | 0 | return false; |
7464 | 0 | auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1)); |
7465 | 0 | if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC) |
7466 | 0 | return false; |
7467 | 0 | auto *IPM = SHL->getOperand(0).getNode(); |
7468 | 0 | if (IPM->getOpcode() != SystemZISD::IPM) |
7469 | 0 | return false; |
7470 | | |
7471 | | // Avoid introducing CC spills (because SRA would clobber CC). |
7472 | 0 | if (!CompareLHS->hasOneUse()) |
7473 | 0 | return false; |
7474 | | // Verify that the ICMP compares against zero. |
7475 | 0 | if (CompareRHS->getZExtValue() != 0) |
7476 | 0 | return false; |
7477 | | |
7478 | | // Compute the effective CC mask for the new branch or select. |
7479 | 0 | CCMask = SystemZ::reverseCCMask(CCMask); |
7480 | | |
7481 | | // Return the updated CCReg link. |
7482 | 0 | CCReg = IPM->getOperand(0); |
7483 | 0 | return true; |
7484 | 0 | } |
7485 | | |
7486 | 0 | return false; |
7487 | 0 | } |
7488 | | |
7489 | | SDValue SystemZTargetLowering::combineBR_CCMASK( |
7490 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7491 | 0 | SelectionDAG &DAG = DCI.DAG; |
7492 | | |
7493 | | // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK. |
7494 | 0 | auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
7495 | 0 | auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); |
7496 | 0 | if (!CCValid || !CCMask) |
7497 | 0 | return SDValue(); |
7498 | | |
7499 | 0 | int CCValidVal = CCValid->getZExtValue(); |
7500 | 0 | int CCMaskVal = CCMask->getZExtValue(); |
7501 | 0 | SDValue Chain = N->getOperand(0); |
7502 | 0 | SDValue CCReg = N->getOperand(4); |
7503 | |
|
7504 | 0 | if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) |
7505 | 0 | return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0), |
7506 | 0 | Chain, |
7507 | 0 | DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32), |
7508 | 0 | DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32), |
7509 | 0 | N->getOperand(3), CCReg); |
7510 | 0 | return SDValue(); |
7511 | 0 | } |
7512 | | |
7513 | | SDValue SystemZTargetLowering::combineSELECT_CCMASK( |
7514 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7515 | 0 | SelectionDAG &DAG = DCI.DAG; |
7516 | | |
7517 | | // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK. |
7518 | 0 | auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2)); |
7519 | 0 | auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3)); |
7520 | 0 | if (!CCValid || !CCMask) |
7521 | 0 | return SDValue(); |
7522 | | |
7523 | 0 | int CCValidVal = CCValid->getZExtValue(); |
7524 | 0 | int CCMaskVal = CCMask->getZExtValue(); |
7525 | 0 | SDValue CCReg = N->getOperand(4); |
7526 | |
|
7527 | 0 | if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) |
7528 | 0 | return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), |
7529 | 0 | N->getOperand(0), N->getOperand(1), |
7530 | 0 | DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32), |
7531 | 0 | DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32), |
7532 | 0 | CCReg); |
7533 | 0 | return SDValue(); |
7534 | 0 | } |
7535 | | |
7536 | | |
7537 | | SDValue SystemZTargetLowering::combineGET_CCMASK( |
7538 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7539 | | |
7540 | | // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible |
7541 | 0 | auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
7542 | 0 | auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); |
7543 | 0 | if (!CCValid || !CCMask) |
7544 | 0 | return SDValue(); |
7545 | 0 | int CCValidVal = CCValid->getZExtValue(); |
7546 | 0 | int CCMaskVal = CCMask->getZExtValue(); |
7547 | |
|
7548 | 0 | SDValue Select = N->getOperand(0); |
7549 | 0 | if (Select->getOpcode() == ISD::TRUNCATE) |
7550 | 0 | Select = Select->getOperand(0); |
7551 | 0 | if (Select->getOpcode() != SystemZISD::SELECT_CCMASK) |
7552 | 0 | return SDValue(); |
7553 | | |
7554 | 0 | auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2)); |
7555 | 0 | auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3)); |
7556 | 0 | if (!SelectCCValid || !SelectCCMask) |
7557 | 0 | return SDValue(); |
7558 | 0 | int SelectCCValidVal = SelectCCValid->getZExtValue(); |
7559 | 0 | int SelectCCMaskVal = SelectCCMask->getZExtValue(); |
7560 | |
|
7561 | 0 | auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0)); |
7562 | 0 | auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1)); |
7563 | 0 | if (!TrueVal || !FalseVal) |
7564 | 0 | return SDValue(); |
7565 | 0 | if (TrueVal->getZExtValue() == 1 && FalseVal->getZExtValue() == 0) |
7566 | 0 | ; |
7567 | 0 | else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() == 1) |
7568 | 0 | SelectCCMaskVal ^= SelectCCValidVal; |
7569 | 0 | else |
7570 | 0 | return SDValue(); |
7571 | | |
7572 | 0 | if (SelectCCValidVal & ~CCValidVal) |
7573 | 0 | return SDValue(); |
7574 | 0 | if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal)) |
7575 | 0 | return SDValue(); |
7576 | | |
7577 | 0 | return Select->getOperand(4); |
7578 | 0 | } |
7579 | | |
7580 | | SDValue SystemZTargetLowering::combineIntDIVREM( |
7581 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7582 | 0 | SelectionDAG &DAG = DCI.DAG; |
7583 | 0 | EVT VT = N->getValueType(0); |
7584 | | // In the case where the divisor is a vector of constants a cheaper |
7585 | | // sequence of instructions can replace the divide. BuildSDIV is called to |
7586 | | // do this during DAG combining, but it only succeeds when it can build a |
7587 | | // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and |
7588 | | // since it is not Legal but Custom it can only happen before |
7589 | | // legalization. Therefore we must scalarize this early before Combine |
7590 | | // 1. For widened vectors, this is already the result of type legalization. |
7591 | 0 | if (DCI.Level == BeforeLegalizeTypes && VT.isVector() && isTypeLegal(VT) && |
7592 | 0 | DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1))) |
7593 | 0 | return DAG.UnrollVectorOp(N); |
7594 | 0 | return SDValue(); |
7595 | 0 | } |
7596 | | |
7597 | | SDValue SystemZTargetLowering::combineINTRINSIC( |
7598 | 0 | SDNode *N, DAGCombinerInfo &DCI) const { |
7599 | 0 | SelectionDAG &DAG = DCI.DAG; |
7600 | |
|
7601 | 0 | unsigned Id = N->getConstantOperandVal(1); |
7602 | 0 | switch (Id) { |
7603 | | // VECTOR LOAD (RIGHTMOST) WITH LENGTH with a length operand of 15 |
7604 | | // or larger is simply a vector load. |
7605 | 0 | case Intrinsic::s390_vll: |
7606 | 0 | case Intrinsic::s390_vlrl: |
7607 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) |
7608 | 0 | if (C->getZExtValue() >= 15) |
7609 | 0 | return DAG.getLoad(N->getValueType(0), SDLoc(N), N->getOperand(0), |
7610 | 0 | N->getOperand(3), MachinePointerInfo()); |
7611 | 0 | break; |
7612 | | // Likewise for VECTOR STORE (RIGHTMOST) WITH LENGTH. |
7613 | 0 | case Intrinsic::s390_vstl: |
7614 | 0 | case Intrinsic::s390_vstrl: |
7615 | 0 | if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(3))) |
7616 | 0 | if (C->getZExtValue() >= 15) |
7617 | 0 | return DAG.getStore(N->getOperand(0), SDLoc(N), N->getOperand(2), |
7618 | 0 | N->getOperand(4), MachinePointerInfo()); |
7619 | 0 | break; |
7620 | 0 | } |
7621 | | |
7622 | 0 | return SDValue(); |
7623 | 0 | } |
7624 | | |
7625 | 0 | SDValue SystemZTargetLowering::unwrapAddress(SDValue N) const { |
7626 | 0 | if (N->getOpcode() == SystemZISD::PCREL_WRAPPER) |
7627 | 0 | return N->getOperand(0); |
7628 | 0 | return N; |
7629 | 0 | } |
7630 | | |
7631 | | SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, |
7632 | 0 | DAGCombinerInfo &DCI) const { |
7633 | 0 | switch(N->getOpcode()) { |
7634 | 0 | default: break; |
7635 | 0 | case ISD::ZERO_EXTEND: return combineZERO_EXTEND(N, DCI); |
7636 | 0 | case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI); |
7637 | 0 | case ISD::SIGN_EXTEND_INREG: return combineSIGN_EXTEND_INREG(N, DCI); |
7638 | 0 | case SystemZISD::MERGE_HIGH: |
7639 | 0 | case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI); |
7640 | 0 | case ISD::LOAD: return combineLOAD(N, DCI); |
7641 | 0 | case ISD::STORE: return combineSTORE(N, DCI); |
7642 | 0 | case ISD::VECTOR_SHUFFLE: return combineVECTOR_SHUFFLE(N, DCI); |
7643 | 0 | case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI); |
7644 | 0 | case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI); |
7645 | 0 | case ISD::STRICT_FP_ROUND: |
7646 | 0 | case ISD::FP_ROUND: return combineFP_ROUND(N, DCI); |
7647 | 0 | case ISD::STRICT_FP_EXTEND: |
7648 | 0 | case ISD::FP_EXTEND: return combineFP_EXTEND(N, DCI); |
7649 | 0 | case ISD::SINT_TO_FP: |
7650 | 0 | case ISD::UINT_TO_FP: return combineINT_TO_FP(N, DCI); |
7651 | 0 | case ISD::BSWAP: return combineBSWAP(N, DCI); |
7652 | 0 | case SystemZISD::BR_CCMASK: return combineBR_CCMASK(N, DCI); |
7653 | 0 | case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI); |
7654 | 0 | case SystemZISD::GET_CCMASK: return combineGET_CCMASK(N, DCI); |
7655 | 0 | case ISD::SDIV: |
7656 | 0 | case ISD::UDIV: |
7657 | 0 | case ISD::SREM: |
7658 | 0 | case ISD::UREM: return combineIntDIVREM(N, DCI); |
7659 | 0 | case ISD::INTRINSIC_W_CHAIN: |
7660 | 0 | case ISD::INTRINSIC_VOID: return combineINTRINSIC(N, DCI); |
7661 | 0 | } |
7662 | | |
7663 | 0 | return SDValue(); |
7664 | 0 | } |
7665 | | |
7666 | | // Return the demanded elements for the OpNo source operand of Op. DemandedElts |
7667 | | // are for Op. |
7668 | | static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, |
7669 | 0 | unsigned OpNo) { |
7670 | 0 | EVT VT = Op.getValueType(); |
7671 | 0 | unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1); |
7672 | 0 | APInt SrcDemE; |
7673 | 0 | unsigned Opcode = Op.getOpcode(); |
7674 | 0 | if (Opcode == ISD::INTRINSIC_WO_CHAIN) { |
7675 | 0 | unsigned Id = Op.getConstantOperandVal(0); |
7676 | 0 | switch (Id) { |
7677 | 0 | case Intrinsic::s390_vpksh: // PACKS |
7678 | 0 | case Intrinsic::s390_vpksf: |
7679 | 0 | case Intrinsic::s390_vpksg: |
7680 | 0 | case Intrinsic::s390_vpkshs: // PACKS_CC |
7681 | 0 | case Intrinsic::s390_vpksfs: |
7682 | 0 | case Intrinsic::s390_vpksgs: |
7683 | 0 | case Intrinsic::s390_vpklsh: // PACKLS |
7684 | 0 | case Intrinsic::s390_vpklsf: |
7685 | 0 | case Intrinsic::s390_vpklsg: |
7686 | 0 | case Intrinsic::s390_vpklshs: // PACKLS_CC |
7687 | 0 | case Intrinsic::s390_vpklsfs: |
7688 | 0 | case Intrinsic::s390_vpklsgs: |
7689 | | // VECTOR PACK truncates the elements of two source vectors into one. |
7690 | 0 | SrcDemE = DemandedElts; |
7691 | 0 | if (OpNo == 2) |
7692 | 0 | SrcDemE.lshrInPlace(NumElts / 2); |
7693 | 0 | SrcDemE = SrcDemE.trunc(NumElts / 2); |
7694 | 0 | break; |
7695 | | // VECTOR UNPACK extends half the elements of the source vector. |
7696 | 0 | case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH |
7697 | 0 | case Intrinsic::s390_vuphh: |
7698 | 0 | case Intrinsic::s390_vuphf: |
7699 | 0 | case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH |
7700 | 0 | case Intrinsic::s390_vuplhh: |
7701 | 0 | case Intrinsic::s390_vuplhf: |
7702 | 0 | SrcDemE = APInt(NumElts * 2, 0); |
7703 | 0 | SrcDemE.insertBits(DemandedElts, 0); |
7704 | 0 | break; |
7705 | 0 | case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW |
7706 | 0 | case Intrinsic::s390_vuplhw: |
7707 | 0 | case Intrinsic::s390_vuplf: |
7708 | 0 | case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW |
7709 | 0 | case Intrinsic::s390_vupllh: |
7710 | 0 | case Intrinsic::s390_vupllf: |
7711 | 0 | SrcDemE = APInt(NumElts * 2, 0); |
7712 | 0 | SrcDemE.insertBits(DemandedElts, NumElts); |
7713 | 0 | break; |
7714 | 0 | case Intrinsic::s390_vpdi: { |
7715 | | // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source. |
7716 | 0 | SrcDemE = APInt(NumElts, 0); |
7717 | 0 | if (!DemandedElts[OpNo - 1]) |
7718 | 0 | break; |
7719 | 0 | unsigned Mask = Op.getConstantOperandVal(3); |
7720 | 0 | unsigned MaskBit = ((OpNo - 1) ? 1 : 4); |
7721 | | // Demand input element 0 or 1, given by the mask bit value. |
7722 | 0 | SrcDemE.setBit((Mask & MaskBit)? 1 : 0); |
7723 | 0 | break; |
7724 | 0 | } |
7725 | 0 | case Intrinsic::s390_vsldb: { |
7726 | | // VECTOR SHIFT LEFT DOUBLE BY BYTE |
7727 | 0 | assert(VT == MVT::v16i8 && "Unexpected type."); |
7728 | 0 | unsigned FirstIdx = Op.getConstantOperandVal(3); |
7729 | 0 | assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand."); |
7730 | 0 | unsigned NumSrc0Els = 16 - FirstIdx; |
7731 | 0 | SrcDemE = APInt(NumElts, 0); |
7732 | 0 | if (OpNo == 1) { |
7733 | 0 | APInt DemEls = DemandedElts.trunc(NumSrc0Els); |
7734 | 0 | SrcDemE.insertBits(DemEls, FirstIdx); |
7735 | 0 | } else { |
7736 | 0 | APInt DemEls = DemandedElts.lshr(NumSrc0Els); |
7737 | 0 | SrcDemE.insertBits(DemEls, 0); |
7738 | 0 | } |
7739 | 0 | break; |
7740 | 0 | } |
7741 | 0 | case Intrinsic::s390_vperm: |
7742 | 0 | SrcDemE = APInt(NumElts, 1); |
7743 | 0 | break; |
7744 | 0 | default: |
7745 | 0 | llvm_unreachable("Unhandled intrinsic."); |
7746 | 0 | break; |
7747 | 0 | } |
7748 | 0 | } else { |
7749 | 0 | switch (Opcode) { |
7750 | 0 | case SystemZISD::JOIN_DWORDS: |
7751 | | // Scalar operand. |
7752 | 0 | SrcDemE = APInt(1, 1); |
7753 | 0 | break; |
7754 | 0 | case SystemZISD::SELECT_CCMASK: |
7755 | 0 | SrcDemE = DemandedElts; |
7756 | 0 | break; |
7757 | 0 | default: |
7758 | 0 | llvm_unreachable("Unhandled opcode."); |
7759 | 0 | break; |
7760 | 0 | } |
7761 | 0 | } |
7762 | 0 | return SrcDemE; |
7763 | 0 | } |
7764 | | |
7765 | | static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, |
7766 | | const APInt &DemandedElts, |
7767 | | const SelectionDAG &DAG, unsigned Depth, |
7768 | 0 | unsigned OpNo) { |
7769 | 0 | APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); |
7770 | 0 | APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); |
7771 | 0 | KnownBits LHSKnown = |
7772 | 0 | DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); |
7773 | 0 | KnownBits RHSKnown = |
7774 | 0 | DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); |
7775 | 0 | Known = LHSKnown.intersectWith(RHSKnown); |
7776 | 0 | } |
7777 | | |
7778 | | void |
7779 | | SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
7780 | | KnownBits &Known, |
7781 | | const APInt &DemandedElts, |
7782 | | const SelectionDAG &DAG, |
7783 | 0 | unsigned Depth) const { |
7784 | 0 | Known.resetAll(); |
7785 | | |
7786 | | // Intrinsic CC result is returned in the two low bits. |
7787 | 0 | unsigned tmp0, tmp1; // not used |
7788 | 0 | if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) { |
7789 | 0 | Known.Zero.setBitsFrom(2); |
7790 | 0 | return; |
7791 | 0 | } |
7792 | 0 | EVT VT = Op.getValueType(); |
7793 | 0 | if (Op.getResNo() != 0 || VT == MVT::Untyped) |
7794 | 0 | return; |
7795 | 0 | assert (Known.getBitWidth() == VT.getScalarSizeInBits() && |
7796 | 0 | "KnownBits does not match VT in bitwidth"); |
7797 | 0 | assert ((!VT.isVector() || |
7798 | 0 | (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && |
7799 | 0 | "DemandedElts does not match VT number of elements"); |
7800 | 0 | unsigned BitWidth = Known.getBitWidth(); |
7801 | 0 | unsigned Opcode = Op.getOpcode(); |
7802 | 0 | if (Opcode == ISD::INTRINSIC_WO_CHAIN) { |
7803 | 0 | bool IsLogical = false; |
7804 | 0 | unsigned Id = Op.getConstantOperandVal(0); |
7805 | 0 | switch (Id) { |
7806 | 0 | case Intrinsic::s390_vpksh: // PACKS |
7807 | 0 | case Intrinsic::s390_vpksf: |
7808 | 0 | case Intrinsic::s390_vpksg: |
7809 | 0 | case Intrinsic::s390_vpkshs: // PACKS_CC |
7810 | 0 | case Intrinsic::s390_vpksfs: |
7811 | 0 | case Intrinsic::s390_vpksgs: |
7812 | 0 | case Intrinsic::s390_vpklsh: // PACKLS |
7813 | 0 | case Intrinsic::s390_vpklsf: |
7814 | 0 | case Intrinsic::s390_vpklsg: |
7815 | 0 | case Intrinsic::s390_vpklshs: // PACKLS_CC |
7816 | 0 | case Intrinsic::s390_vpklsfs: |
7817 | 0 | case Intrinsic::s390_vpklsgs: |
7818 | 0 | case Intrinsic::s390_vpdi: |
7819 | 0 | case Intrinsic::s390_vsldb: |
7820 | 0 | case Intrinsic::s390_vperm: |
7821 | 0 | computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1); |
7822 | 0 | break; |
7823 | 0 | case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH |
7824 | 0 | case Intrinsic::s390_vuplhh: |
7825 | 0 | case Intrinsic::s390_vuplhf: |
7826 | 0 | case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW |
7827 | 0 | case Intrinsic::s390_vupllh: |
7828 | 0 | case Intrinsic::s390_vupllf: |
7829 | 0 | IsLogical = true; |
7830 | 0 | [[fallthrough]]; |
7831 | 0 | case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH |
7832 | 0 | case Intrinsic::s390_vuphh: |
7833 | 0 | case Intrinsic::s390_vuphf: |
7834 | 0 | case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW |
7835 | 0 | case Intrinsic::s390_vuplhw: |
7836 | 0 | case Intrinsic::s390_vuplf: { |
7837 | 0 | SDValue SrcOp = Op.getOperand(1); |
7838 | 0 | APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0); |
7839 | 0 | Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1); |
7840 | 0 | if (IsLogical) { |
7841 | 0 | Known = Known.zext(BitWidth); |
7842 | 0 | } else |
7843 | 0 | Known = Known.sext(BitWidth); |
7844 | 0 | break; |
7845 | 0 | } |
7846 | 0 | default: |
7847 | 0 | break; |
7848 | 0 | } |
7849 | 0 | } else { |
7850 | 0 | switch (Opcode) { |
7851 | 0 | case SystemZISD::JOIN_DWORDS: |
7852 | 0 | case SystemZISD::SELECT_CCMASK: |
7853 | 0 | computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0); |
7854 | 0 | break; |
7855 | 0 | case SystemZISD::REPLICATE: { |
7856 | 0 | SDValue SrcOp = Op.getOperand(0); |
7857 | 0 | Known = DAG.computeKnownBits(SrcOp, Depth + 1); |
7858 | 0 | if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp)) |
7859 | 0 | Known = Known.sext(BitWidth); // VREPI sign extends the immedate. |
7860 | 0 | break; |
7861 | 0 | } |
7862 | 0 | default: |
7863 | 0 | break; |
7864 | 0 | } |
7865 | 0 | } |
7866 | | |
7867 | | // Known has the width of the source operand(s). Adjust if needed to match |
7868 | | // the passed bitwidth. |
7869 | 0 | if (Known.getBitWidth() != BitWidth) |
7870 | 0 | Known = Known.anyextOrTrunc(BitWidth); |
7871 | 0 | } |
7872 | | |
7873 | | static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, |
7874 | | const SelectionDAG &DAG, unsigned Depth, |
7875 | 0 | unsigned OpNo) { |
7876 | 0 | APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); |
7877 | 0 | unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); |
7878 | 0 | if (LHS == 1) return 1; // Early out. |
7879 | 0 | APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); |
7880 | 0 | unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); |
7881 | 0 | if (RHS == 1) return 1; // Early out. |
7882 | 0 | unsigned Common = std::min(LHS, RHS); |
7883 | 0 | unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits(); |
7884 | 0 | EVT VT = Op.getValueType(); |
7885 | 0 | unsigned VTBits = VT.getScalarSizeInBits(); |
7886 | 0 | if (SrcBitWidth > VTBits) { // PACK |
7887 | 0 | unsigned SrcExtraBits = SrcBitWidth - VTBits; |
7888 | 0 | if (Common > SrcExtraBits) |
7889 | 0 | return (Common - SrcExtraBits); |
7890 | 0 | return 1; |
7891 | 0 | } |
7892 | 0 | assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth."); |
7893 | 0 | return Common; |
7894 | 0 | } |
7895 | | |
7896 | | unsigned |
7897 | | SystemZTargetLowering::ComputeNumSignBitsForTargetNode( |
7898 | | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, |
7899 | 0 | unsigned Depth) const { |
7900 | 0 | if (Op.getResNo() != 0) |
7901 | 0 | return 1; |
7902 | 0 | unsigned Opcode = Op.getOpcode(); |
7903 | 0 | if (Opcode == ISD::INTRINSIC_WO_CHAIN) { |
7904 | 0 | unsigned Id = Op.getConstantOperandVal(0); |
7905 | 0 | switch (Id) { |
7906 | 0 | case Intrinsic::s390_vpksh: // PACKS |
7907 | 0 | case Intrinsic::s390_vpksf: |
7908 | 0 | case Intrinsic::s390_vpksg: |
7909 | 0 | case Intrinsic::s390_vpkshs: // PACKS_CC |
7910 | 0 | case Intrinsic::s390_vpksfs: |
7911 | 0 | case Intrinsic::s390_vpksgs: |
7912 | 0 | case Intrinsic::s390_vpklsh: // PACKLS |
7913 | 0 | case Intrinsic::s390_vpklsf: |
7914 | 0 | case Intrinsic::s390_vpklsg: |
7915 | 0 | case Intrinsic::s390_vpklshs: // PACKLS_CC |
7916 | 0 | case Intrinsic::s390_vpklsfs: |
7917 | 0 | case Intrinsic::s390_vpklsgs: |
7918 | 0 | case Intrinsic::s390_vpdi: |
7919 | 0 | case Intrinsic::s390_vsldb: |
7920 | 0 | case Intrinsic::s390_vperm: |
7921 | 0 | return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1); |
7922 | 0 | case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH |
7923 | 0 | case Intrinsic::s390_vuphh: |
7924 | 0 | case Intrinsic::s390_vuphf: |
7925 | 0 | case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW |
7926 | 0 | case Intrinsic::s390_vuplhw: |
7927 | 0 | case Intrinsic::s390_vuplf: { |
7928 | 0 | SDValue PackedOp = Op.getOperand(1); |
7929 | 0 | APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1); |
7930 | 0 | unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1); |
7931 | 0 | EVT VT = Op.getValueType(); |
7932 | 0 | unsigned VTBits = VT.getScalarSizeInBits(); |
7933 | 0 | Tmp += VTBits - PackedOp.getScalarValueSizeInBits(); |
7934 | 0 | return Tmp; |
7935 | 0 | } |
7936 | 0 | default: |
7937 | 0 | break; |
7938 | 0 | } |
7939 | 0 | } else { |
7940 | 0 | switch (Opcode) { |
7941 | 0 | case SystemZISD::SELECT_CCMASK: |
7942 | 0 | return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0); |
7943 | 0 | default: |
7944 | 0 | break; |
7945 | 0 | } |
7946 | 0 | } |
7947 | | |
7948 | 0 | return 1; |
7949 | 0 | } |
7950 | | |
7951 | | bool SystemZTargetLowering:: |
7952 | | isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, |
7953 | | const APInt &DemandedElts, const SelectionDAG &DAG, |
7954 | 0 | bool PoisonOnly, unsigned Depth) const { |
7955 | 0 | switch (Op->getOpcode()) { |
7956 | 0 | case SystemZISD::PCREL_WRAPPER: |
7957 | 0 | case SystemZISD::PCREL_OFFSET: |
7958 | 0 | return true; |
7959 | 0 | } |
7960 | 0 | return false; |
7961 | 0 | } |
7962 | | |
7963 | | unsigned |
7964 | 0 | SystemZTargetLowering::getStackProbeSize(const MachineFunction &MF) const { |
7965 | 0 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); |
7966 | 0 | unsigned StackAlign = TFI->getStackAlignment(); |
7967 | 0 | assert(StackAlign >=1 && isPowerOf2_32(StackAlign) && |
7968 | 0 | "Unexpected stack alignment"); |
7969 | | // The default stack probe size is 4096 if the function has no |
7970 | | // stack-probe-size attribute. |
7971 | 0 | unsigned StackProbeSize = |
7972 | 0 | MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size", 4096); |
7973 | | // Round down to the stack alignment. |
7974 | 0 | StackProbeSize &= ~(StackAlign - 1); |
7975 | 0 | return StackProbeSize ? StackProbeSize : StackAlign; |
7976 | 0 | } |
7977 | | |
7978 | | //===----------------------------------------------------------------------===// |
7979 | | // Custom insertion |
7980 | | //===----------------------------------------------------------------------===// |
7981 | | |
7982 | | // Force base value Base into a register before MI. Return the register. |
7983 | | static Register forceReg(MachineInstr &MI, MachineOperand &Base, |
7984 | 0 | const SystemZInstrInfo *TII) { |
7985 | 0 | MachineBasicBlock *MBB = MI.getParent(); |
7986 | 0 | MachineFunction &MF = *MBB->getParent(); |
7987 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
7988 | |
|
7989 | 0 | if (Base.isReg()) { |
7990 | | // Copy Base into a new virtual register to help register coalescing in |
7991 | | // cases with multiple uses. |
7992 | 0 | Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
7993 | 0 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::COPY), Reg) |
7994 | 0 | .add(Base); |
7995 | 0 | return Reg; |
7996 | 0 | } |
7997 | | |
7998 | 0 | Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
7999 | 0 | BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) |
8000 | 0 | .add(Base) |
8001 | 0 | .addImm(0) |
8002 | 0 | .addReg(0); |
8003 | 0 | return Reg; |
8004 | 0 | } |
8005 | | |
8006 | | // The CC operand of MI might be missing a kill marker because there |
8007 | | // were multiple uses of CC, and ISel didn't know which to mark. |
8008 | | // Figure out whether MI should have had a kill marker. |
8009 | 0 | static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) { |
8010 | | // Scan forward through BB for a use/def of CC. |
8011 | 0 | MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI))); |
8012 | 0 | for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) { |
8013 | 0 | const MachineInstr& mi = *miI; |
8014 | 0 | if (mi.readsRegister(SystemZ::CC)) |
8015 | 0 | return false; |
8016 | 0 | if (mi.definesRegister(SystemZ::CC)) |
8017 | 0 | break; // Should have kill-flag - update below. |
8018 | 0 | } |
8019 | | |
8020 | | // If we hit the end of the block, check whether CC is live into a |
8021 | | // successor. |
8022 | 0 | if (miI == MBB->end()) { |
8023 | 0 | for (const MachineBasicBlock *Succ : MBB->successors()) |
8024 | 0 | if (Succ->isLiveIn(SystemZ::CC)) |
8025 | 0 | return false; |
8026 | 0 | } |
8027 | | |
8028 | 0 | return true; |
8029 | 0 | } |
8030 | | |
8031 | | // Return true if it is OK for this Select pseudo-opcode to be cascaded |
8032 | | // together with other Select pseudo-opcodes into a single basic-block with |
8033 | | // a conditional jump around it. |
8034 | 0 | static bool isSelectPseudo(MachineInstr &MI) { |
8035 | 0 | switch (MI.getOpcode()) { |
8036 | 0 | case SystemZ::Select32: |
8037 | 0 | case SystemZ::Select64: |
8038 | 0 | case SystemZ::Select128: |
8039 | 0 | case SystemZ::SelectF32: |
8040 | 0 | case SystemZ::SelectF64: |
8041 | 0 | case SystemZ::SelectF128: |
8042 | 0 | case SystemZ::SelectVR32: |
8043 | 0 | case SystemZ::SelectVR64: |
8044 | 0 | case SystemZ::SelectVR128: |
8045 | 0 | return true; |
8046 | | |
8047 | 0 | default: |
8048 | 0 | return false; |
8049 | 0 | } |
8050 | 0 | } |
8051 | | |
8052 | | // Helper function, which inserts PHI functions into SinkMBB: |
8053 | | // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ], |
8054 | | // where %FalseValue(i) and %TrueValue(i) are taken from Selects. |
8055 | | static void createPHIsForSelects(SmallVector<MachineInstr*, 8> &Selects, |
8056 | | MachineBasicBlock *TrueMBB, |
8057 | | MachineBasicBlock *FalseMBB, |
8058 | 0 | MachineBasicBlock *SinkMBB) { |
8059 | 0 | MachineFunction *MF = TrueMBB->getParent(); |
8060 | 0 | const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); |
8061 | |
|
8062 | 0 | MachineInstr *FirstMI = Selects.front(); |
8063 | 0 | unsigned CCValid = FirstMI->getOperand(3).getImm(); |
8064 | 0 | unsigned CCMask = FirstMI->getOperand(4).getImm(); |
8065 | |
|
8066 | 0 | MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin(); |
8067 | | |
8068 | | // As we are creating the PHIs, we have to be careful if there is more than |
8069 | | // one. Later Selects may reference the results of earlier Selects, but later |
8070 | | // PHIs have to reference the individual true/false inputs from earlier PHIs. |
8071 | | // That also means that PHI construction must work forward from earlier to |
8072 | | // later, and that the code must maintain a mapping from earlier PHI's |
8073 | | // destination registers, and the registers that went into the PHI. |
8074 | 0 | DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable; |
8075 | |
|
8076 | 0 | for (auto *MI : Selects) { |
8077 | 0 | Register DestReg = MI->getOperand(0).getReg(); |
8078 | 0 | Register TrueReg = MI->getOperand(1).getReg(); |
8079 | 0 | Register FalseReg = MI->getOperand(2).getReg(); |
8080 | | |
8081 | | // If this Select we are generating is the opposite condition from |
8082 | | // the jump we generated, then we have to swap the operands for the |
8083 | | // PHI that is going to be generated. |
8084 | 0 | if (MI->getOperand(4).getImm() == (CCValid ^ CCMask)) |
8085 | 0 | std::swap(TrueReg, FalseReg); |
8086 | |
|
8087 | 0 | if (RegRewriteTable.contains(TrueReg)) |
8088 | 0 | TrueReg = RegRewriteTable[TrueReg].first; |
8089 | |
|
8090 | 0 | if (RegRewriteTable.contains(FalseReg)) |
8091 | 0 | FalseReg = RegRewriteTable[FalseReg].second; |
8092 | |
|
8093 | 0 | DebugLoc DL = MI->getDebugLoc(); |
8094 | 0 | BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg) |
8095 | 0 | .addReg(TrueReg).addMBB(TrueMBB) |
8096 | 0 | .addReg(FalseReg).addMBB(FalseMBB); |
8097 | | |
8098 | | // Add this PHI to the rewrite table. |
8099 | 0 | RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg); |
8100 | 0 | } |
8101 | |
|
8102 | 0 | MF->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); |
8103 | 0 | } |
8104 | | |
8105 | | // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. |
8106 | | MachineBasicBlock * |
8107 | | SystemZTargetLowering::emitSelect(MachineInstr &MI, |
8108 | 0 | MachineBasicBlock *MBB) const { |
8109 | 0 | assert(isSelectPseudo(MI) && "Bad call to emitSelect()"); |
8110 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8111 | |
|
8112 | 0 | unsigned CCValid = MI.getOperand(3).getImm(); |
8113 | 0 | unsigned CCMask = MI.getOperand(4).getImm(); |
8114 | | |
8115 | | // If we have a sequence of Select* pseudo instructions using the |
8116 | | // same condition code value, we want to expand all of them into |
8117 | | // a single pair of basic blocks using the same condition. |
8118 | 0 | SmallVector<MachineInstr*, 8> Selects; |
8119 | 0 | SmallVector<MachineInstr*, 8> DbgValues; |
8120 | 0 | Selects.push_back(&MI); |
8121 | 0 | unsigned Count = 0; |
8122 | 0 | for (MachineInstr &NextMI : llvm::make_range( |
8123 | 0 | std::next(MachineBasicBlock::iterator(MI)), MBB->end())) { |
8124 | 0 | if (isSelectPseudo(NextMI)) { |
8125 | 0 | assert(NextMI.getOperand(3).getImm() == CCValid && |
8126 | 0 | "Bad CCValid operands since CC was not redefined."); |
8127 | 0 | if (NextMI.getOperand(4).getImm() == CCMask || |
8128 | 0 | NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) { |
8129 | 0 | Selects.push_back(&NextMI); |
8130 | 0 | continue; |
8131 | 0 | } |
8132 | 0 | break; |
8133 | 0 | } |
8134 | 0 | if (NextMI.definesRegister(SystemZ::CC) || NextMI.usesCustomInsertionHook()) |
8135 | 0 | break; |
8136 | 0 | bool User = false; |
8137 | 0 | for (auto *SelMI : Selects) |
8138 | 0 | if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) { |
8139 | 0 | User = true; |
8140 | 0 | break; |
8141 | 0 | } |
8142 | 0 | if (NextMI.isDebugInstr()) { |
8143 | 0 | if (User) { |
8144 | 0 | assert(NextMI.isDebugValue() && "Unhandled debug opcode."); |
8145 | 0 | DbgValues.push_back(&NextMI); |
8146 | 0 | } |
8147 | 0 | } else if (User || ++Count > 20) |
8148 | 0 | break; |
8149 | 0 | } |
8150 | |
|
8151 | 0 | MachineInstr *LastMI = Selects.back(); |
8152 | 0 | bool CCKilled = |
8153 | 0 | (LastMI->killsRegister(SystemZ::CC) || checkCCKill(*LastMI, MBB)); |
8154 | 0 | MachineBasicBlock *StartMBB = MBB; |
8155 | 0 | MachineBasicBlock *JoinMBB = SystemZ::splitBlockAfter(LastMI, MBB); |
8156 | 0 | MachineBasicBlock *FalseMBB = SystemZ::emitBlockAfter(StartMBB); |
8157 | | |
8158 | | // Unless CC was killed in the last Select instruction, mark it as |
8159 | | // live-in to both FalseMBB and JoinMBB. |
8160 | 0 | if (!CCKilled) { |
8161 | 0 | FalseMBB->addLiveIn(SystemZ::CC); |
8162 | 0 | JoinMBB->addLiveIn(SystemZ::CC); |
8163 | 0 | } |
8164 | | |
8165 | | // StartMBB: |
8166 | | // BRC CCMask, JoinMBB |
8167 | | // # fallthrough to FalseMBB |
8168 | 0 | MBB = StartMBB; |
8169 | 0 | BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC)) |
8170 | 0 | .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); |
8171 | 0 | MBB->addSuccessor(JoinMBB); |
8172 | 0 | MBB->addSuccessor(FalseMBB); |
8173 | | |
8174 | | // FalseMBB: |
8175 | | // # fallthrough to JoinMBB |
8176 | 0 | MBB = FalseMBB; |
8177 | 0 | MBB->addSuccessor(JoinMBB); |
8178 | | |
8179 | | // JoinMBB: |
8180 | | // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] |
8181 | | // ... |
8182 | 0 | MBB = JoinMBB; |
8183 | 0 | createPHIsForSelects(Selects, StartMBB, FalseMBB, MBB); |
8184 | 0 | for (auto *SelMI : Selects) |
8185 | 0 | SelMI->eraseFromParent(); |
8186 | |
|
8187 | 0 | MachineBasicBlock::iterator InsertPos = MBB->getFirstNonPHI(); |
8188 | 0 | for (auto *DbgMI : DbgValues) |
8189 | 0 | MBB->splice(InsertPos, StartMBB, DbgMI); |
8190 | |
|
8191 | 0 | return JoinMBB; |
8192 | 0 | } |
8193 | | |
8194 | | // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. |
8195 | | // StoreOpcode is the store to use and Invert says whether the store should |
8196 | | // happen when the condition is false rather than true. If a STORE ON |
8197 | | // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. |
8198 | | MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, |
8199 | | MachineBasicBlock *MBB, |
8200 | | unsigned StoreOpcode, |
8201 | | unsigned STOCOpcode, |
8202 | 0 | bool Invert) const { |
8203 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8204 | |
|
8205 | 0 | Register SrcReg = MI.getOperand(0).getReg(); |
8206 | 0 | MachineOperand Base = MI.getOperand(1); |
8207 | 0 | int64_t Disp = MI.getOperand(2).getImm(); |
8208 | 0 | Register IndexReg = MI.getOperand(3).getReg(); |
8209 | 0 | unsigned CCValid = MI.getOperand(4).getImm(); |
8210 | 0 | unsigned CCMask = MI.getOperand(5).getImm(); |
8211 | 0 | DebugLoc DL = MI.getDebugLoc(); |
8212 | |
|
8213 | 0 | StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); |
8214 | | |
8215 | | // ISel pattern matching also adds a load memory operand of the same |
8216 | | // address, so take special care to find the storing memory operand. |
8217 | 0 | MachineMemOperand *MMO = nullptr; |
8218 | 0 | for (auto *I : MI.memoperands()) |
8219 | 0 | if (I->isStore()) { |
8220 | 0 | MMO = I; |
8221 | 0 | break; |
8222 | 0 | } |
8223 | | |
8224 | | // Use STOCOpcode if possible. We could use different store patterns in |
8225 | | // order to avoid matching the index register, but the performance trade-offs |
8226 | | // might be more complicated in that case. |
8227 | 0 | if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { |
8228 | 0 | if (Invert) |
8229 | 0 | CCMask ^= CCValid; |
8230 | |
|
8231 | 0 | BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) |
8232 | 0 | .addReg(SrcReg) |
8233 | 0 | .add(Base) |
8234 | 0 | .addImm(Disp) |
8235 | 0 | .addImm(CCValid) |
8236 | 0 | .addImm(CCMask) |
8237 | 0 | .addMemOperand(MMO); |
8238 | |
|
8239 | 0 | MI.eraseFromParent(); |
8240 | 0 | return MBB; |
8241 | 0 | } |
8242 | | |
8243 | | // Get the condition needed to branch around the store. |
8244 | 0 | if (!Invert) |
8245 | 0 | CCMask ^= CCValid; |
8246 | |
|
8247 | 0 | MachineBasicBlock *StartMBB = MBB; |
8248 | 0 | MachineBasicBlock *JoinMBB = SystemZ::splitBlockBefore(MI, MBB); |
8249 | 0 | MachineBasicBlock *FalseMBB = SystemZ::emitBlockAfter(StartMBB); |
8250 | | |
8251 | | // Unless CC was killed in the CondStore instruction, mark it as |
8252 | | // live-in to both FalseMBB and JoinMBB. |
8253 | 0 | if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) { |
8254 | 0 | FalseMBB->addLiveIn(SystemZ::CC); |
8255 | 0 | JoinMBB->addLiveIn(SystemZ::CC); |
8256 | 0 | } |
8257 | | |
8258 | | // StartMBB: |
8259 | | // BRC CCMask, JoinMBB |
8260 | | // # fallthrough to FalseMBB |
8261 | 0 | MBB = StartMBB; |
8262 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8263 | 0 | .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); |
8264 | 0 | MBB->addSuccessor(JoinMBB); |
8265 | 0 | MBB->addSuccessor(FalseMBB); |
8266 | | |
8267 | | // FalseMBB: |
8268 | | // store %SrcReg, %Disp(%Index,%Base) |
8269 | | // # fallthrough to JoinMBB |
8270 | 0 | MBB = FalseMBB; |
8271 | 0 | BuildMI(MBB, DL, TII->get(StoreOpcode)) |
8272 | 0 | .addReg(SrcReg) |
8273 | 0 | .add(Base) |
8274 | 0 | .addImm(Disp) |
8275 | 0 | .addReg(IndexReg) |
8276 | 0 | .addMemOperand(MMO); |
8277 | 0 | MBB->addSuccessor(JoinMBB); |
8278 | |
|
8279 | 0 | MI.eraseFromParent(); |
8280 | 0 | return JoinMBB; |
8281 | 0 | } |
8282 | | |
8283 | | // Implement EmitInstrWithCustomInserter for pseudo [SU]Cmp128Hi instruction MI. |
8284 | | MachineBasicBlock * |
8285 | | SystemZTargetLowering::emitICmp128Hi(MachineInstr &MI, |
8286 | | MachineBasicBlock *MBB, |
8287 | 0 | bool Unsigned) const { |
8288 | 0 | MachineFunction &MF = *MBB->getParent(); |
8289 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8290 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
8291 | | |
8292 | | // Synthetic instruction to compare 128-bit values. |
8293 | | // Sets CC 1 if Op0 > Op1, sets a different CC otherwise. |
8294 | 0 | Register Op0 = MI.getOperand(0).getReg(); |
8295 | 0 | Register Op1 = MI.getOperand(1).getReg(); |
8296 | |
|
8297 | 0 | MachineBasicBlock *StartMBB = MBB; |
8298 | 0 | MachineBasicBlock *JoinMBB = SystemZ::splitBlockAfter(MI, MBB); |
8299 | 0 | MachineBasicBlock *HiEqMBB = SystemZ::emitBlockAfter(StartMBB); |
8300 | | |
8301 | | // StartMBB: |
8302 | | // |
8303 | | // Use VECTOR ELEMENT COMPARE [LOGICAL] to compare the high parts. |
8304 | | // Swap the inputs to get: |
8305 | | // CC 1 if high(Op0) > high(Op1) |
8306 | | // CC 2 if high(Op0) < high(Op1) |
8307 | | // CC 0 if high(Op0) == high(Op1) |
8308 | | // |
8309 | | // If CC != 0, we'd done, so jump over the next instruction. |
8310 | | // |
8311 | | // VEC[L]G Op1, Op0 |
8312 | | // JNE JoinMBB |
8313 | | // # fallthrough to HiEqMBB |
8314 | 0 | MBB = StartMBB; |
8315 | 0 | int HiOpcode = Unsigned? SystemZ::VECLG : SystemZ::VECG; |
8316 | 0 | BuildMI(MBB, MI.getDebugLoc(), TII->get(HiOpcode)) |
8317 | 0 | .addReg(Op1).addReg(Op0); |
8318 | 0 | BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC)) |
8319 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE).addMBB(JoinMBB); |
8320 | 0 | MBB->addSuccessor(JoinMBB); |
8321 | 0 | MBB->addSuccessor(HiEqMBB); |
8322 | | |
8323 | | // HiEqMBB: |
8324 | | // |
8325 | | // Otherwise, use VECTOR COMPARE HIGH LOGICAL. |
8326 | | // Since we already know the high parts are equal, the CC |
8327 | | // result will only depend on the low parts: |
8328 | | // CC 1 if low(Op0) > low(Op1) |
8329 | | // CC 3 if low(Op0) <= low(Op1) |
8330 | | // |
8331 | | // VCHLGS Tmp, Op0, Op1 |
8332 | | // # fallthrough to JoinMBB |
8333 | 0 | MBB = HiEqMBB; |
8334 | 0 | Register Temp = MRI.createVirtualRegister(&SystemZ::VR128BitRegClass); |
8335 | 0 | BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::VCHLGS), Temp) |
8336 | 0 | .addReg(Op0).addReg(Op1); |
8337 | 0 | MBB->addSuccessor(JoinMBB); |
8338 | | |
8339 | | // Mark CC as live-in to JoinMBB. |
8340 | 0 | JoinMBB->addLiveIn(SystemZ::CC); |
8341 | |
|
8342 | 0 | MI.eraseFromParent(); |
8343 | 0 | return JoinMBB; |
8344 | 0 | } |
8345 | | |
8346 | | // Implement EmitInstrWithCustomInserter for subword pseudo ATOMIC_LOADW_* or |
8347 | | // ATOMIC_SWAPW instruction MI. BinOpcode is the instruction that performs |
8348 | | // the binary operation elided by "*", or 0 for ATOMIC_SWAPW. Invert says |
8349 | | // whether the field should be inverted after performing BinOpcode (e.g. for |
8350 | | // NAND). |
8351 | | MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( |
8352 | | MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode, |
8353 | 0 | bool Invert) const { |
8354 | 0 | MachineFunction &MF = *MBB->getParent(); |
8355 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8356 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
8357 | | |
8358 | | // Extract the operands. Base can be a register or a frame index. |
8359 | | // Src2 can be a register or immediate. |
8360 | 0 | Register Dest = MI.getOperand(0).getReg(); |
8361 | 0 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); |
8362 | 0 | int64_t Disp = MI.getOperand(2).getImm(); |
8363 | 0 | MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); |
8364 | 0 | Register BitShift = MI.getOperand(4).getReg(); |
8365 | 0 | Register NegBitShift = MI.getOperand(5).getReg(); |
8366 | 0 | unsigned BitSize = MI.getOperand(6).getImm(); |
8367 | 0 | DebugLoc DL = MI.getDebugLoc(); |
8368 | | |
8369 | | // Get the right opcodes for the displacement. |
8370 | 0 | unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); |
8371 | 0 | unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); |
8372 | 0 | assert(LOpcode && CSOpcode && "Displacement out of range"); |
8373 | | |
8374 | | // Create virtual registers for temporary results. |
8375 | 0 | Register OrigVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8376 | 0 | Register OldVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8377 | 0 | Register NewVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8378 | 0 | Register RotatedOldVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8379 | 0 | Register RotatedNewVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8380 | | |
8381 | | // Insert a basic block for the main loop. |
8382 | 0 | MachineBasicBlock *StartMBB = MBB; |
8383 | 0 | MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); |
8384 | 0 | MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); |
8385 | | |
8386 | | // StartMBB: |
8387 | | // ... |
8388 | | // %OrigVal = L Disp(%Base) |
8389 | | // # fall through to LoopMBB |
8390 | 0 | MBB = StartMBB; |
8391 | 0 | BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); |
8392 | 0 | MBB->addSuccessor(LoopMBB); |
8393 | | |
8394 | | // LoopMBB: |
8395 | | // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] |
8396 | | // %RotatedOldVal = RLL %OldVal, 0(%BitShift) |
8397 | | // %RotatedNewVal = OP %RotatedOldVal, %Src2 |
8398 | | // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) |
8399 | | // %Dest = CS %OldVal, %NewVal, Disp(%Base) |
8400 | | // JNE LoopMBB |
8401 | | // # fall through to DoneMBB |
8402 | 0 | MBB = LoopMBB; |
8403 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) |
8404 | 0 | .addReg(OrigVal).addMBB(StartMBB) |
8405 | 0 | .addReg(Dest).addMBB(LoopMBB); |
8406 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) |
8407 | 0 | .addReg(OldVal).addReg(BitShift).addImm(0); |
8408 | 0 | if (Invert) { |
8409 | | // Perform the operation normally and then invert every bit of the field. |
8410 | 0 | Register Tmp = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8411 | 0 | BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); |
8412 | | // XILF with the upper BitSize bits set. |
8413 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) |
8414 | 0 | .addReg(Tmp).addImm(-1U << (32 - BitSize)); |
8415 | 0 | } else if (BinOpcode) |
8416 | | // A simply binary operation. |
8417 | 0 | BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) |
8418 | 0 | .addReg(RotatedOldVal) |
8419 | 0 | .add(Src2); |
8420 | 0 | else |
8421 | | // Use RISBG to rotate Src2 into position and use it to replace the |
8422 | | // field in RotatedOldVal. |
8423 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) |
8424 | 0 | .addReg(RotatedOldVal).addReg(Src2.getReg()) |
8425 | 0 | .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); |
8426 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) |
8427 | 0 | .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); |
8428 | 0 | BuildMI(MBB, DL, TII->get(CSOpcode), Dest) |
8429 | 0 | .addReg(OldVal) |
8430 | 0 | .addReg(NewVal) |
8431 | 0 | .add(Base) |
8432 | 0 | .addImm(Disp); |
8433 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8434 | 0 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); |
8435 | 0 | MBB->addSuccessor(LoopMBB); |
8436 | 0 | MBB->addSuccessor(DoneMBB); |
8437 | |
|
8438 | 0 | MI.eraseFromParent(); |
8439 | 0 | return DoneMBB; |
8440 | 0 | } |
8441 | | |
8442 | | // Implement EmitInstrWithCustomInserter for subword pseudo |
8443 | | // ATOMIC_LOADW_{,U}{MIN,MAX} instruction MI. CompareOpcode is the |
8444 | | // instruction that should be used to compare the current field with the |
8445 | | // minimum or maximum value. KeepOldMask is the BRC condition-code mask |
8446 | | // for when the current field should be kept. |
8447 | | MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( |
8448 | | MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode, |
8449 | 0 | unsigned KeepOldMask) const { |
8450 | 0 | MachineFunction &MF = *MBB->getParent(); |
8451 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8452 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
8453 | | |
8454 | | // Extract the operands. Base can be a register or a frame index. |
8455 | 0 | Register Dest = MI.getOperand(0).getReg(); |
8456 | 0 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); |
8457 | 0 | int64_t Disp = MI.getOperand(2).getImm(); |
8458 | 0 | Register Src2 = MI.getOperand(3).getReg(); |
8459 | 0 | Register BitShift = MI.getOperand(4).getReg(); |
8460 | 0 | Register NegBitShift = MI.getOperand(5).getReg(); |
8461 | 0 | unsigned BitSize = MI.getOperand(6).getImm(); |
8462 | 0 | DebugLoc DL = MI.getDebugLoc(); |
8463 | | |
8464 | | // Get the right opcodes for the displacement. |
8465 | 0 | unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); |
8466 | 0 | unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); |
8467 | 0 | assert(LOpcode && CSOpcode && "Displacement out of range"); |
8468 | | |
8469 | | // Create virtual registers for temporary results. |
8470 | 0 | Register OrigVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8471 | 0 | Register OldVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8472 | 0 | Register NewVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8473 | 0 | Register RotatedOldVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8474 | 0 | Register RotatedAltVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8475 | 0 | Register RotatedNewVal = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); |
8476 | | |
8477 | | // Insert 3 basic blocks for the loop. |
8478 | 0 | MachineBasicBlock *StartMBB = MBB; |
8479 | 0 | MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); |
8480 | 0 | MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); |
8481 | 0 | MachineBasicBlock *UseAltMBB = SystemZ::emitBlockAfter(LoopMBB); |
8482 | 0 | MachineBasicBlock *UpdateMBB = SystemZ::emitBlockAfter(UseAltMBB); |
8483 | | |
8484 | | // StartMBB: |
8485 | | // ... |
8486 | | // %OrigVal = L Disp(%Base) |
8487 | | // # fall through to LoopMBB |
8488 | 0 | MBB = StartMBB; |
8489 | 0 | BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); |
8490 | 0 | MBB->addSuccessor(LoopMBB); |
8491 | | |
8492 | | // LoopMBB: |
8493 | | // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] |
8494 | | // %RotatedOldVal = RLL %OldVal, 0(%BitShift) |
8495 | | // CompareOpcode %RotatedOldVal, %Src2 |
8496 | | // BRC KeepOldMask, UpdateMBB |
8497 | 0 | MBB = LoopMBB; |
8498 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) |
8499 | 0 | .addReg(OrigVal).addMBB(StartMBB) |
8500 | 0 | .addReg(Dest).addMBB(UpdateMBB); |
8501 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) |
8502 | 0 | .addReg(OldVal).addReg(BitShift).addImm(0); |
8503 | 0 | BuildMI(MBB, DL, TII->get(CompareOpcode)) |
8504 | 0 | .addReg(RotatedOldVal).addReg(Src2); |
8505 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8506 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); |
8507 | 0 | MBB->addSuccessor(UpdateMBB); |
8508 | 0 | MBB->addSuccessor(UseAltMBB); |
8509 | | |
8510 | | // UseAltMBB: |
8511 | | // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 |
8512 | | // # fall through to UpdateMBB |
8513 | 0 | MBB = UseAltMBB; |
8514 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) |
8515 | 0 | .addReg(RotatedOldVal).addReg(Src2) |
8516 | 0 | .addImm(32).addImm(31 + BitSize).addImm(0); |
8517 | 0 | MBB->addSuccessor(UpdateMBB); |
8518 | | |
8519 | | // UpdateMBB: |
8520 | | // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], |
8521 | | // [ %RotatedAltVal, UseAltMBB ] |
8522 | | // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) |
8523 | | // %Dest = CS %OldVal, %NewVal, Disp(%Base) |
8524 | | // JNE LoopMBB |
8525 | | // # fall through to DoneMBB |
8526 | 0 | MBB = UpdateMBB; |
8527 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) |
8528 | 0 | .addReg(RotatedOldVal).addMBB(LoopMBB) |
8529 | 0 | .addReg(RotatedAltVal).addMBB(UseAltMBB); |
8530 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) |
8531 | 0 | .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); |
8532 | 0 | BuildMI(MBB, DL, TII->get(CSOpcode), Dest) |
8533 | 0 | .addReg(OldVal) |
8534 | 0 | .addReg(NewVal) |
8535 | 0 | .add(Base) |
8536 | 0 | .addImm(Disp); |
8537 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8538 | 0 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); |
8539 | 0 | MBB->addSuccessor(LoopMBB); |
8540 | 0 | MBB->addSuccessor(DoneMBB); |
8541 | |
|
8542 | 0 | MI.eraseFromParent(); |
8543 | 0 | return DoneMBB; |
8544 | 0 | } |
8545 | | |
8546 | | // Implement EmitInstrWithCustomInserter for subword pseudo ATOMIC_CMP_SWAPW |
8547 | | // instruction MI. |
8548 | | MachineBasicBlock * |
8549 | | SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, |
8550 | 0 | MachineBasicBlock *MBB) const { |
8551 | 0 | MachineFunction &MF = *MBB->getParent(); |
8552 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8553 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
8554 | | |
8555 | | // Extract the operands. Base can be a register or a frame index. |
8556 | 0 | Register Dest = MI.getOperand(0).getReg(); |
8557 | 0 | MachineOperand Base = earlyUseOperand(MI.getOperand(1)); |
8558 | 0 | int64_t Disp = MI.getOperand(2).getImm(); |
8559 | 0 | Register CmpVal = MI.getOperand(3).getReg(); |
8560 | 0 | Register OrigSwapVal = MI.getOperand(4).getReg(); |
8561 | 0 | Register BitShift = MI.getOperand(5).getReg(); |
8562 | 0 | Register NegBitShift = MI.getOperand(6).getReg(); |
8563 | 0 | int64_t BitSize = MI.getOperand(7).getImm(); |
8564 | 0 | DebugLoc DL = MI.getDebugLoc(); |
8565 | |
|
8566 | 0 | const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; |
8567 | | |
8568 | | // Get the right opcodes for the displacement and zero-extension. |
8569 | 0 | unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); |
8570 | 0 | unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); |
8571 | 0 | unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR; |
8572 | 0 | assert(LOpcode && CSOpcode && "Displacement out of range"); |
8573 | | |
8574 | | // Create virtual registers for temporary results. |
8575 | 0 | Register OrigOldVal = MRI.createVirtualRegister(RC); |
8576 | 0 | Register OldVal = MRI.createVirtualRegister(RC); |
8577 | 0 | Register SwapVal = MRI.createVirtualRegister(RC); |
8578 | 0 | Register StoreVal = MRI.createVirtualRegister(RC); |
8579 | 0 | Register OldValRot = MRI.createVirtualRegister(RC); |
8580 | 0 | Register RetryOldVal = MRI.createVirtualRegister(RC); |
8581 | 0 | Register RetrySwapVal = MRI.createVirtualRegister(RC); |
8582 | | |
8583 | | // Insert 2 basic blocks for the loop. |
8584 | 0 | MachineBasicBlock *StartMBB = MBB; |
8585 | 0 | MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); |
8586 | 0 | MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); |
8587 | 0 | MachineBasicBlock *SetMBB = SystemZ::emitBlockAfter(LoopMBB); |
8588 | | |
8589 | | // StartMBB: |
8590 | | // ... |
8591 | | // %OrigOldVal = L Disp(%Base) |
8592 | | // # fall through to LoopMBB |
8593 | 0 | MBB = StartMBB; |
8594 | 0 | BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) |
8595 | 0 | .add(Base) |
8596 | 0 | .addImm(Disp) |
8597 | 0 | .addReg(0); |
8598 | 0 | MBB->addSuccessor(LoopMBB); |
8599 | | |
8600 | | // LoopMBB: |
8601 | | // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] |
8602 | | // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] |
8603 | | // %OldValRot = RLL %OldVal, BitSize(%BitShift) |
8604 | | // ^^ The low BitSize bits contain the field |
8605 | | // of interest. |
8606 | | // %RetrySwapVal = RISBG32 %SwapVal, %OldValRot, 32, 63-BitSize, 0 |
8607 | | // ^^ Replace the upper 32-BitSize bits of the |
8608 | | // swap value with those that we loaded and rotated. |
8609 | | // %Dest = LL[CH] %OldValRot |
8610 | | // CR %Dest, %CmpVal |
8611 | | // JNE DoneMBB |
8612 | | // # Fall through to SetMBB |
8613 | 0 | MBB = LoopMBB; |
8614 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) |
8615 | 0 | .addReg(OrigOldVal).addMBB(StartMBB) |
8616 | 0 | .addReg(RetryOldVal).addMBB(SetMBB); |
8617 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) |
8618 | 0 | .addReg(OrigSwapVal).addMBB(StartMBB) |
8619 | 0 | .addReg(RetrySwapVal).addMBB(SetMBB); |
8620 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), OldValRot) |
8621 | 0 | .addReg(OldVal).addReg(BitShift).addImm(BitSize); |
8622 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) |
8623 | 0 | .addReg(SwapVal).addReg(OldValRot).addImm(32).addImm(63 - BitSize).addImm(0); |
8624 | 0 | BuildMI(MBB, DL, TII->get(ZExtOpcode), Dest) |
8625 | 0 | .addReg(OldValRot); |
8626 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CR)) |
8627 | 0 | .addReg(Dest).addReg(CmpVal); |
8628 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8629 | 0 | .addImm(SystemZ::CCMASK_ICMP) |
8630 | 0 | .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); |
8631 | 0 | MBB->addSuccessor(DoneMBB); |
8632 | 0 | MBB->addSuccessor(SetMBB); |
8633 | | |
8634 | | // SetMBB: |
8635 | | // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) |
8636 | | // ^^ Rotate the new field to its proper position. |
8637 | | // %RetryOldVal = CS %OldVal, %StoreVal, Disp(%Base) |
8638 | | // JNE LoopMBB |
8639 | | // # fall through to ExitMBB |
8640 | 0 | MBB = SetMBB; |
8641 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) |
8642 | 0 | .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); |
8643 | 0 | BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) |
8644 | 0 | .addReg(OldVal) |
8645 | 0 | .addReg(StoreVal) |
8646 | 0 | .add(Base) |
8647 | 0 | .addImm(Disp); |
8648 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8649 | 0 | .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); |
8650 | 0 | MBB->addSuccessor(LoopMBB); |
8651 | 0 | MBB->addSuccessor(DoneMBB); |
8652 | | |
8653 | | // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in |
8654 | | // to the block after the loop. At this point, CC may have been defined |
8655 | | // either by the CR in LoopMBB or by the CS in SetMBB. |
8656 | 0 | if (!MI.registerDefIsDead(SystemZ::CC)) |
8657 | 0 | DoneMBB->addLiveIn(SystemZ::CC); |
8658 | |
|
8659 | 0 | MI.eraseFromParent(); |
8660 | 0 | return DoneMBB; |
8661 | 0 | } |
8662 | | |
8663 | | // Emit a move from two GR64s to a GR128. |
8664 | | MachineBasicBlock * |
8665 | | SystemZTargetLowering::emitPair128(MachineInstr &MI, |
8666 | 0 | MachineBasicBlock *MBB) const { |
8667 | 0 | MachineFunction &MF = *MBB->getParent(); |
8668 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8669 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
8670 | 0 | DebugLoc DL = MI.getDebugLoc(); |
8671 | |
|
8672 | 0 | Register Dest = MI.getOperand(0).getReg(); |
8673 | 0 | Register Hi = MI.getOperand(1).getReg(); |
8674 | 0 | Register Lo = MI.getOperand(2).getReg(); |
8675 | 0 | Register Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
8676 | 0 | Register Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
8677 | |
|
8678 | 0 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); |
8679 | 0 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) |
8680 | 0 | .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64); |
8681 | 0 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) |
8682 | 0 | .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64); |
8683 | |
|
8684 | 0 | MI.eraseFromParent(); |
8685 | 0 | return MBB; |
8686 | 0 | } |
8687 | | |
8688 | | // Emit an extension from a GR64 to a GR128. ClearEven is true |
8689 | | // if the high register of the GR128 value must be cleared or false if |
8690 | | // it's "don't care". |
8691 | | MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, |
8692 | | MachineBasicBlock *MBB, |
8693 | 0 | bool ClearEven) const { |
8694 | 0 | MachineFunction &MF = *MBB->getParent(); |
8695 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8696 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
8697 | 0 | DebugLoc DL = MI.getDebugLoc(); |
8698 | |
|
8699 | 0 | Register Dest = MI.getOperand(0).getReg(); |
8700 | 0 | Register Src = MI.getOperand(1).getReg(); |
8701 | 0 | Register In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
8702 | |
|
8703 | 0 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); |
8704 | 0 | if (ClearEven) { |
8705 | 0 | Register NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); |
8706 | 0 | Register Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); |
8707 | |
|
8708 | 0 | BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) |
8709 | 0 | .addImm(0); |
8710 | 0 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) |
8711 | 0 | .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); |
8712 | 0 | In128 = NewIn128; |
8713 | 0 | } |
8714 | 0 | BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) |
8715 | 0 | .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); |
8716 | |
|
8717 | 0 | MI.eraseFromParent(); |
8718 | 0 | return MBB; |
8719 | 0 | } |
8720 | | |
8721 | | MachineBasicBlock * |
8722 | | SystemZTargetLowering::emitMemMemWrapper(MachineInstr &MI, |
8723 | | MachineBasicBlock *MBB, |
8724 | 0 | unsigned Opcode, bool IsMemset) const { |
8725 | 0 | MachineFunction &MF = *MBB->getParent(); |
8726 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
8727 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
8728 | 0 | DebugLoc DL = MI.getDebugLoc(); |
8729 | |
|
8730 | 0 | MachineOperand DestBase = earlyUseOperand(MI.getOperand(0)); |
8731 | 0 | uint64_t DestDisp = MI.getOperand(1).getImm(); |
8732 | 0 | MachineOperand SrcBase = MachineOperand::CreateReg(0U, false); |
8733 | 0 | uint64_t SrcDisp; |
8734 | | |
8735 | | // Fold the displacement Disp if it is out of range. |
8736 | 0 | auto foldDisplIfNeeded = [&](MachineOperand &Base, uint64_t &Disp) -> void { |
8737 | 0 | if (!isUInt<12>(Disp)) { |
8738 | 0 | Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
8739 | 0 | unsigned Opcode = TII->getOpcodeForOffset(SystemZ::LA, Disp); |
8740 | 0 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode), Reg) |
8741 | 0 | .add(Base).addImm(Disp).addReg(0); |
8742 | 0 | Base = MachineOperand::CreateReg(Reg, false); |
8743 | 0 | Disp = 0; |
8744 | 0 | } |
8745 | 0 | }; |
8746 | |
|
8747 | 0 | if (!IsMemset) { |
8748 | 0 | SrcBase = earlyUseOperand(MI.getOperand(2)); |
8749 | 0 | SrcDisp = MI.getOperand(3).getImm(); |
8750 | 0 | } else { |
8751 | 0 | SrcBase = DestBase; |
8752 | 0 | SrcDisp = DestDisp++; |
8753 | 0 | foldDisplIfNeeded(DestBase, DestDisp); |
8754 | 0 | } |
8755 | |
|
8756 | 0 | MachineOperand &LengthMO = MI.getOperand(IsMemset ? 2 : 4); |
8757 | 0 | bool IsImmForm = LengthMO.isImm(); |
8758 | 0 | bool IsRegForm = !IsImmForm; |
8759 | | |
8760 | | // Build and insert one Opcode of Length, with special treatment for memset. |
8761 | 0 | auto insertMemMemOp = [&](MachineBasicBlock *InsMBB, |
8762 | 0 | MachineBasicBlock::iterator InsPos, |
8763 | 0 | MachineOperand DBase, uint64_t DDisp, |
8764 | 0 | MachineOperand SBase, uint64_t SDisp, |
8765 | 0 | unsigned Length) -> void { |
8766 | 0 | assert(Length > 0 && Length <= 256 && "Building memory op with bad length."); |
8767 | 0 | if (IsMemset) { |
8768 | 0 | MachineOperand ByteMO = earlyUseOperand(MI.getOperand(3)); |
8769 | 0 | if (ByteMO.isImm()) |
8770 | 0 | BuildMI(*InsMBB, InsPos, DL, TII->get(SystemZ::MVI)) |
8771 | 0 | .add(SBase).addImm(SDisp).add(ByteMO); |
8772 | 0 | else |
8773 | 0 | BuildMI(*InsMBB, InsPos, DL, TII->get(SystemZ::STC)) |
8774 | 0 | .add(ByteMO).add(SBase).addImm(SDisp).addReg(0); |
8775 | 0 | if (--Length == 0) |
8776 | 0 | return; |
8777 | 0 | } |
8778 | 0 | BuildMI(*MBB, InsPos, DL, TII->get(Opcode)) |
8779 | 0 | .add(DBase).addImm(DDisp).addImm(Length) |
8780 | 0 | .add(SBase).addImm(SDisp) |
8781 | 0 | .setMemRefs(MI.memoperands()); |
8782 | 0 | }; |
8783 | |
|
8784 | 0 | bool NeedsLoop = false; |
8785 | 0 | uint64_t ImmLength = 0; |
8786 | 0 | Register LenAdjReg = SystemZ::NoRegister; |
8787 | 0 | if (IsImmForm) { |
8788 | 0 | ImmLength = LengthMO.getImm(); |
8789 | 0 | ImmLength += IsMemset ? 2 : 1; // Add back the subtracted adjustment. |
8790 | 0 | if (ImmLength == 0) { |
8791 | 0 | MI.eraseFromParent(); |
8792 | 0 | return MBB; |
8793 | 0 | } |
8794 | 0 | if (Opcode == SystemZ::CLC) { |
8795 | 0 | if (ImmLength > 3 * 256) |
8796 | | // A two-CLC sequence is a clear win over a loop, not least because |
8797 | | // it needs only one branch. A three-CLC sequence needs the same |
8798 | | // number of branches as a loop (i.e. 2), but is shorter. That |
8799 | | // brings us to lengths greater than 768 bytes. It seems relatively |
8800 | | // likely that a difference will be found within the first 768 bytes, |
8801 | | // so we just optimize for the smallest number of branch |
8802 | | // instructions, in order to avoid polluting the prediction buffer |
8803 | | // too much. |
8804 | 0 | NeedsLoop = true; |
8805 | 0 | } else if (ImmLength > 6 * 256) |
8806 | | // The heuristic we use is to prefer loops for anything that would |
8807 | | // require 7 or more MVCs. With these kinds of sizes there isn't much |
8808 | | // to choose between straight-line code and looping code, since the |
8809 | | // time will be dominated by the MVCs themselves. |
8810 | 0 | NeedsLoop = true; |
8811 | 0 | } else { |
8812 | 0 | NeedsLoop = true; |
8813 | 0 | LenAdjReg = LengthMO.getReg(); |
8814 | 0 | } |
8815 | | |
8816 | | // When generating more than one CLC, all but the last will need to |
8817 | | // branch to the end when a difference is found. |
8818 | 0 | MachineBasicBlock *EndMBB = |
8819 | 0 | (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop) |
8820 | 0 | ? SystemZ::splitBlockAfter(MI, MBB) |
8821 | 0 | : nullptr); |
8822 | |
|
8823 | 0 | if (NeedsLoop) { |
8824 | 0 | Register StartCountReg = |
8825 | 0 | MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); |
8826 | 0 | if (IsImmForm) { |
8827 | 0 | TII->loadImmediate(*MBB, MI, StartCountReg, ImmLength / 256); |
8828 | 0 | ImmLength &= 255; |
8829 | 0 | } else { |
8830 | 0 | BuildMI(*MBB, MI, DL, TII->get(SystemZ::SRLG), StartCountReg) |
8831 | 0 | .addReg(LenAdjReg) |
8832 | 0 | .addReg(0) |
8833 | 0 | .addImm(8); |
8834 | 0 | } |
8835 | |
|
8836 | 0 | bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); |
8837 | 0 | auto loadZeroAddress = [&]() -> MachineOperand { |
8838 | 0 | Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
8839 | 0 | BuildMI(*MBB, MI, DL, TII->get(SystemZ::LGHI), Reg).addImm(0); |
8840 | 0 | return MachineOperand::CreateReg(Reg, false); |
8841 | 0 | }; |
8842 | 0 | if (DestBase.isReg() && DestBase.getReg() == SystemZ::NoRegister) |
8843 | 0 | DestBase = loadZeroAddress(); |
8844 | 0 | if (SrcBase.isReg() && SrcBase.getReg() == SystemZ::NoRegister) |
8845 | 0 | SrcBase = HaveSingleBase ? DestBase : loadZeroAddress(); |
8846 | |
|
8847 | 0 | MachineBasicBlock *StartMBB = nullptr; |
8848 | 0 | MachineBasicBlock *LoopMBB = nullptr; |
8849 | 0 | MachineBasicBlock *NextMBB = nullptr; |
8850 | 0 | MachineBasicBlock *DoneMBB = nullptr; |
8851 | 0 | MachineBasicBlock *AllDoneMBB = nullptr; |
8852 | |
|
8853 | 0 | Register StartSrcReg = forceReg(MI, SrcBase, TII); |
8854 | 0 | Register StartDestReg = |
8855 | 0 | (HaveSingleBase ? StartSrcReg : forceReg(MI, DestBase, TII)); |
8856 | |
|
8857 | 0 | const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; |
8858 | 0 | Register ThisSrcReg = MRI.createVirtualRegister(RC); |
8859 | 0 | Register ThisDestReg = |
8860 | 0 | (HaveSingleBase ? ThisSrcReg : MRI.createVirtualRegister(RC)); |
8861 | 0 | Register NextSrcReg = MRI.createVirtualRegister(RC); |
8862 | 0 | Register NextDestReg = |
8863 | 0 | (HaveSingleBase ? NextSrcReg : MRI.createVirtualRegister(RC)); |
8864 | 0 | RC = &SystemZ::GR64BitRegClass; |
8865 | 0 | Register ThisCountReg = MRI.createVirtualRegister(RC); |
8866 | 0 | Register NextCountReg = MRI.createVirtualRegister(RC); |
8867 | |
|
8868 | 0 | if (IsRegForm) { |
8869 | 0 | AllDoneMBB = SystemZ::splitBlockBefore(MI, MBB); |
8870 | 0 | StartMBB = SystemZ::emitBlockAfter(MBB); |
8871 | 0 | LoopMBB = SystemZ::emitBlockAfter(StartMBB); |
8872 | 0 | NextMBB = (EndMBB ? SystemZ::emitBlockAfter(LoopMBB) : LoopMBB); |
8873 | 0 | DoneMBB = SystemZ::emitBlockAfter(NextMBB); |
8874 | | |
8875 | | // MBB: |
8876 | | // # Jump to AllDoneMBB if LenAdjReg means 0, or fall thru to StartMBB. |
8877 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) |
8878 | 0 | .addReg(LenAdjReg).addImm(IsMemset ? -2 : -1); |
8879 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8880 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ) |
8881 | 0 | .addMBB(AllDoneMBB); |
8882 | 0 | MBB->addSuccessor(AllDoneMBB); |
8883 | 0 | if (!IsMemset) |
8884 | 0 | MBB->addSuccessor(StartMBB); |
8885 | 0 | else { |
8886 | | // MemsetOneCheckMBB: |
8887 | | // # Jump to MemsetOneMBB for a memset of length 1, or |
8888 | | // # fall thru to StartMBB. |
8889 | 0 | MachineBasicBlock *MemsetOneCheckMBB = SystemZ::emitBlockAfter(MBB); |
8890 | 0 | MachineBasicBlock *MemsetOneMBB = SystemZ::emitBlockAfter(&*MF.rbegin()); |
8891 | 0 | MBB->addSuccessor(MemsetOneCheckMBB); |
8892 | 0 | MBB = MemsetOneCheckMBB; |
8893 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) |
8894 | 0 | .addReg(LenAdjReg).addImm(-1); |
8895 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8896 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ) |
8897 | 0 | .addMBB(MemsetOneMBB); |
8898 | 0 | MBB->addSuccessor(MemsetOneMBB, {10, 100}); |
8899 | 0 | MBB->addSuccessor(StartMBB, {90, 100}); |
8900 | | |
8901 | | // MemsetOneMBB: |
8902 | | // # Jump back to AllDoneMBB after a single MVI or STC. |
8903 | 0 | MBB = MemsetOneMBB; |
8904 | 0 | insertMemMemOp(MBB, MBB->end(), |
8905 | 0 | MachineOperand::CreateReg(StartDestReg, false), DestDisp, |
8906 | 0 | MachineOperand::CreateReg(StartSrcReg, false), SrcDisp, |
8907 | 0 | 1); |
8908 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::J)).addMBB(AllDoneMBB); |
8909 | 0 | MBB->addSuccessor(AllDoneMBB); |
8910 | 0 | } |
8911 | | |
8912 | | // StartMBB: |
8913 | | // # Jump to DoneMBB if %StartCountReg is zero, or fall through to LoopMBB. |
8914 | 0 | MBB = StartMBB; |
8915 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) |
8916 | 0 | .addReg(StartCountReg).addImm(0); |
8917 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8918 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ) |
8919 | 0 | .addMBB(DoneMBB); |
8920 | 0 | MBB->addSuccessor(DoneMBB); |
8921 | 0 | MBB->addSuccessor(LoopMBB); |
8922 | 0 | } |
8923 | 0 | else { |
8924 | 0 | StartMBB = MBB; |
8925 | 0 | DoneMBB = SystemZ::splitBlockBefore(MI, MBB); |
8926 | 0 | LoopMBB = SystemZ::emitBlockAfter(StartMBB); |
8927 | 0 | NextMBB = (EndMBB ? SystemZ::emitBlockAfter(LoopMBB) : LoopMBB); |
8928 | | |
8929 | | // StartMBB: |
8930 | | // # fall through to LoopMBB |
8931 | 0 | MBB->addSuccessor(LoopMBB); |
8932 | |
|
8933 | 0 | DestBase = MachineOperand::CreateReg(NextDestReg, false); |
8934 | 0 | SrcBase = MachineOperand::CreateReg(NextSrcReg, false); |
8935 | 0 | if (EndMBB && !ImmLength) |
8936 | | // If the loop handled the whole CLC range, DoneMBB will be empty with |
8937 | | // CC live-through into EndMBB, so add it as live-in. |
8938 | 0 | DoneMBB->addLiveIn(SystemZ::CC); |
8939 | 0 | } |
8940 | | |
8941 | | // LoopMBB: |
8942 | | // %ThisDestReg = phi [ %StartDestReg, StartMBB ], |
8943 | | // [ %NextDestReg, NextMBB ] |
8944 | | // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], |
8945 | | // [ %NextSrcReg, NextMBB ] |
8946 | | // %ThisCountReg = phi [ %StartCountReg, StartMBB ], |
8947 | | // [ %NextCountReg, NextMBB ] |
8948 | | // ( PFD 2, 768+DestDisp(%ThisDestReg) ) |
8949 | | // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) |
8950 | | // ( JLH EndMBB ) |
8951 | | // |
8952 | | // The prefetch is used only for MVC. The JLH is used only for CLC. |
8953 | 0 | MBB = LoopMBB; |
8954 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) |
8955 | 0 | .addReg(StartDestReg).addMBB(StartMBB) |
8956 | 0 | .addReg(NextDestReg).addMBB(NextMBB); |
8957 | 0 | if (!HaveSingleBase) |
8958 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) |
8959 | 0 | .addReg(StartSrcReg).addMBB(StartMBB) |
8960 | 0 | .addReg(NextSrcReg).addMBB(NextMBB); |
8961 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) |
8962 | 0 | .addReg(StartCountReg).addMBB(StartMBB) |
8963 | 0 | .addReg(NextCountReg).addMBB(NextMBB); |
8964 | 0 | if (Opcode == SystemZ::MVC) |
8965 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PFD)) |
8966 | 0 | .addImm(SystemZ::PFD_WRITE) |
8967 | 0 | .addReg(ThisDestReg).addImm(DestDisp - IsMemset + 768).addReg(0); |
8968 | 0 | insertMemMemOp(MBB, MBB->end(), |
8969 | 0 | MachineOperand::CreateReg(ThisDestReg, false), DestDisp, |
8970 | 0 | MachineOperand::CreateReg(ThisSrcReg, false), SrcDisp, 256); |
8971 | 0 | if (EndMBB) { |
8972 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8973 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) |
8974 | 0 | .addMBB(EndMBB); |
8975 | 0 | MBB->addSuccessor(EndMBB); |
8976 | 0 | MBB->addSuccessor(NextMBB); |
8977 | 0 | } |
8978 | | |
8979 | | // NextMBB: |
8980 | | // %NextDestReg = LA 256(%ThisDestReg) |
8981 | | // %NextSrcReg = LA 256(%ThisSrcReg) |
8982 | | // %NextCountReg = AGHI %ThisCountReg, -1 |
8983 | | // CGHI %NextCountReg, 0 |
8984 | | // JLH LoopMBB |
8985 | | // # fall through to DoneMBB |
8986 | | // |
8987 | | // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. |
8988 | 0 | MBB = NextMBB; |
8989 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) |
8990 | 0 | .addReg(ThisDestReg).addImm(256).addReg(0); |
8991 | 0 | if (!HaveSingleBase) |
8992 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) |
8993 | 0 | .addReg(ThisSrcReg).addImm(256).addReg(0); |
8994 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) |
8995 | 0 | .addReg(ThisCountReg).addImm(-1); |
8996 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) |
8997 | 0 | .addReg(NextCountReg).addImm(0); |
8998 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
8999 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) |
9000 | 0 | .addMBB(LoopMBB); |
9001 | 0 | MBB->addSuccessor(LoopMBB); |
9002 | 0 | MBB->addSuccessor(DoneMBB); |
9003 | |
|
9004 | 0 | MBB = DoneMBB; |
9005 | 0 | if (IsRegForm) { |
9006 | | // DoneMBB: |
9007 | | // # Make PHIs for RemDestReg/RemSrcReg as the loop may or may not run. |
9008 | | // # Use EXecute Relative Long for the remainder of the bytes. The target |
9009 | | // instruction of the EXRL will have a length field of 1 since 0 is an |
9010 | | // illegal value. The number of bytes processed becomes (%LenAdjReg & |
9011 | | // 0xff) + 1. |
9012 | | // # Fall through to AllDoneMBB. |
9013 | 0 | Register RemSrcReg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
9014 | 0 | Register RemDestReg = HaveSingleBase ? RemSrcReg |
9015 | 0 | : MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
9016 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), RemDestReg) |
9017 | 0 | .addReg(StartDestReg).addMBB(StartMBB) |
9018 | 0 | .addReg(NextDestReg).addMBB(NextMBB); |
9019 | 0 | if (!HaveSingleBase) |
9020 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), RemSrcReg) |
9021 | 0 | .addReg(StartSrcReg).addMBB(StartMBB) |
9022 | 0 | .addReg(NextSrcReg).addMBB(NextMBB); |
9023 | 0 | if (IsMemset) |
9024 | 0 | insertMemMemOp(MBB, MBB->end(), |
9025 | 0 | MachineOperand::CreateReg(RemDestReg, false), DestDisp, |
9026 | 0 | MachineOperand::CreateReg(RemSrcReg, false), SrcDisp, 1); |
9027 | 0 | MachineInstrBuilder EXRL_MIB = |
9028 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::EXRL_Pseudo)) |
9029 | 0 | .addImm(Opcode) |
9030 | 0 | .addReg(LenAdjReg) |
9031 | 0 | .addReg(RemDestReg).addImm(DestDisp) |
9032 | 0 | .addReg(RemSrcReg).addImm(SrcDisp); |
9033 | 0 | MBB->addSuccessor(AllDoneMBB); |
9034 | 0 | MBB = AllDoneMBB; |
9035 | 0 | if (Opcode != SystemZ::MVC) { |
9036 | 0 | EXRL_MIB.addReg(SystemZ::CC, RegState::ImplicitDefine); |
9037 | 0 | if (EndMBB) |
9038 | 0 | MBB->addLiveIn(SystemZ::CC); |
9039 | 0 | } |
9040 | 0 | } |
9041 | 0 | MF.getProperties().reset(MachineFunctionProperties::Property::NoPHIs); |
9042 | 0 | } |
9043 | | |
9044 | | // Handle any remaining bytes with straight-line code. |
9045 | 0 | while (ImmLength > 0) { |
9046 | 0 | uint64_t ThisLength = std::min(ImmLength, uint64_t(256)); |
9047 | | // The previous iteration might have created out-of-range displacements. |
9048 | | // Apply them using LA/LAY if so. |
9049 | 0 | foldDisplIfNeeded(DestBase, DestDisp); |
9050 | 0 | foldDisplIfNeeded(SrcBase, SrcDisp); |
9051 | 0 | insertMemMemOp(MBB, MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength); |
9052 | 0 | DestDisp += ThisLength; |
9053 | 0 | SrcDisp += ThisLength; |
9054 | 0 | ImmLength -= ThisLength; |
9055 | | // If there's another CLC to go, branch to the end if a difference |
9056 | | // was found. |
9057 | 0 | if (EndMBB && ImmLength > 0) { |
9058 | 0 | MachineBasicBlock *NextMBB = SystemZ::splitBlockBefore(MI, MBB); |
9059 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
9060 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) |
9061 | 0 | .addMBB(EndMBB); |
9062 | 0 | MBB->addSuccessor(EndMBB); |
9063 | 0 | MBB->addSuccessor(NextMBB); |
9064 | 0 | MBB = NextMBB; |
9065 | 0 | } |
9066 | 0 | } |
9067 | 0 | if (EndMBB) { |
9068 | 0 | MBB->addSuccessor(EndMBB); |
9069 | 0 | MBB = EndMBB; |
9070 | 0 | MBB->addLiveIn(SystemZ::CC); |
9071 | 0 | } |
9072 | |
|
9073 | 0 | MI.eraseFromParent(); |
9074 | 0 | return MBB; |
9075 | 0 | } |
9076 | | |
9077 | | // Decompose string pseudo-instruction MI into a loop that continually performs |
9078 | | // Opcode until CC != 3. |
9079 | | MachineBasicBlock *SystemZTargetLowering::emitStringWrapper( |
9080 | 0 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { |
9081 | 0 | MachineFunction &MF = *MBB->getParent(); |
9082 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
9083 | 0 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
9084 | 0 | DebugLoc DL = MI.getDebugLoc(); |
9085 | |
|
9086 | 0 | uint64_t End1Reg = MI.getOperand(0).getReg(); |
9087 | 0 | uint64_t Start1Reg = MI.getOperand(1).getReg(); |
9088 | 0 | uint64_t Start2Reg = MI.getOperand(2).getReg(); |
9089 | 0 | uint64_t CharReg = MI.getOperand(3).getReg(); |
9090 | |
|
9091 | 0 | const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; |
9092 | 0 | uint64_t This1Reg = MRI.createVirtualRegister(RC); |
9093 | 0 | uint64_t This2Reg = MRI.createVirtualRegister(RC); |
9094 | 0 | uint64_t End2Reg = MRI.createVirtualRegister(RC); |
9095 | |
|
9096 | 0 | MachineBasicBlock *StartMBB = MBB; |
9097 | 0 | MachineBasicBlock *DoneMBB = SystemZ::splitBlockBefore(MI, MBB); |
9098 | 0 | MachineBasicBlock *LoopMBB = SystemZ::emitBlockAfter(StartMBB); |
9099 | | |
9100 | | // StartMBB: |
9101 | | // # fall through to LoopMBB |
9102 | 0 | MBB->addSuccessor(LoopMBB); |
9103 | | |
9104 | | // LoopMBB: |
9105 | | // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] |
9106 | | // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] |
9107 | | // R0L = %CharReg |
9108 | | // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L |
9109 | | // JO LoopMBB |
9110 | | // # fall through to DoneMBB |
9111 | | // |
9112 | | // The load of R0L can be hoisted by post-RA LICM. |
9113 | 0 | MBB = LoopMBB; |
9114 | |
|
9115 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) |
9116 | 0 | .addReg(Start1Reg).addMBB(StartMBB) |
9117 | 0 | .addReg(End1Reg).addMBB(LoopMBB); |
9118 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) |
9119 | 0 | .addReg(Start2Reg).addMBB(StartMBB) |
9120 | 0 | .addReg(End2Reg).addMBB(LoopMBB); |
9121 | 0 | BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); |
9122 | 0 | BuildMI(MBB, DL, TII->get(Opcode)) |
9123 | 0 | .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) |
9124 | 0 | .addReg(This1Reg).addReg(This2Reg); |
9125 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
9126 | 0 | .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); |
9127 | 0 | MBB->addSuccessor(LoopMBB); |
9128 | 0 | MBB->addSuccessor(DoneMBB); |
9129 | |
|
9130 | 0 | DoneMBB->addLiveIn(SystemZ::CC); |
9131 | |
|
9132 | 0 | MI.eraseFromParent(); |
9133 | 0 | return DoneMBB; |
9134 | 0 | } |
9135 | | |
9136 | | // Update TBEGIN instruction with final opcode and register clobbers. |
9137 | | MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin( |
9138 | | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode, |
9139 | 0 | bool NoFloat) const { |
9140 | 0 | MachineFunction &MF = *MBB->getParent(); |
9141 | 0 | const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); |
9142 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
9143 | | |
9144 | | // Update opcode. |
9145 | 0 | MI.setDesc(TII->get(Opcode)); |
9146 | | |
9147 | | // We cannot handle a TBEGIN that clobbers the stack or frame pointer. |
9148 | | // Make sure to add the corresponding GRSM bits if they are missing. |
9149 | 0 | uint64_t Control = MI.getOperand(2).getImm(); |
9150 | 0 | static const unsigned GPRControlBit[16] = { |
9151 | 0 | 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000, |
9152 | 0 | 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 |
9153 | 0 | }; |
9154 | 0 | Control |= GPRControlBit[15]; |
9155 | 0 | if (TFI->hasFP(MF)) |
9156 | 0 | Control |= GPRControlBit[11]; |
9157 | 0 | MI.getOperand(2).setImm(Control); |
9158 | | |
9159 | | // Add GPR clobbers. |
9160 | 0 | for (int I = 0; I < 16; I++) { |
9161 | 0 | if ((Control & GPRControlBit[I]) == 0) { |
9162 | 0 | unsigned Reg = SystemZMC::GR64Regs[I]; |
9163 | 0 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); |
9164 | 0 | } |
9165 | 0 | } |
9166 | | |
9167 | | // Add FPR/VR clobbers. |
9168 | 0 | if (!NoFloat && (Control & 4) != 0) { |
9169 | 0 | if (Subtarget.hasVector()) { |
9170 | 0 | for (unsigned Reg : SystemZMC::VR128Regs) { |
9171 | 0 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); |
9172 | 0 | } |
9173 | 0 | } else { |
9174 | 0 | for (unsigned Reg : SystemZMC::FP64Regs) { |
9175 | 0 | MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); |
9176 | 0 | } |
9177 | 0 | } |
9178 | 0 | } |
9179 | |
|
9180 | 0 | return MBB; |
9181 | 0 | } |
9182 | | |
9183 | | MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( |
9184 | 0 | MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { |
9185 | 0 | MachineFunction &MF = *MBB->getParent(); |
9186 | 0 | MachineRegisterInfo *MRI = &MF.getRegInfo(); |
9187 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
9188 | 0 | DebugLoc DL = MI.getDebugLoc(); |
9189 | |
|
9190 | 0 | Register SrcReg = MI.getOperand(0).getReg(); |
9191 | | |
9192 | | // Create new virtual register of the same class as source. |
9193 | 0 | const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); |
9194 | 0 | Register DstReg = MRI->createVirtualRegister(RC); |
9195 | | |
9196 | | // Replace pseudo with a normal load-and-test that models the def as |
9197 | | // well. |
9198 | 0 | BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) |
9199 | 0 | .addReg(SrcReg) |
9200 | 0 | .setMIFlags(MI.getFlags()); |
9201 | 0 | MI.eraseFromParent(); |
9202 | |
|
9203 | 0 | return MBB; |
9204 | 0 | } |
9205 | | |
9206 | | MachineBasicBlock *SystemZTargetLowering::emitProbedAlloca( |
9207 | 0 | MachineInstr &MI, MachineBasicBlock *MBB) const { |
9208 | 0 | MachineFunction &MF = *MBB->getParent(); |
9209 | 0 | MachineRegisterInfo *MRI = &MF.getRegInfo(); |
9210 | 0 | const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); |
9211 | 0 | DebugLoc DL = MI.getDebugLoc(); |
9212 | 0 | const unsigned ProbeSize = getStackProbeSize(MF); |
9213 | 0 | Register DstReg = MI.getOperand(0).getReg(); |
9214 | 0 | Register SizeReg = MI.getOperand(2).getReg(); |
9215 | |
|
9216 | 0 | MachineBasicBlock *StartMBB = MBB; |
9217 | 0 | MachineBasicBlock *DoneMBB = SystemZ::splitBlockAfter(MI, MBB); |
9218 | 0 | MachineBasicBlock *LoopTestMBB = SystemZ::emitBlockAfter(StartMBB); |
9219 | 0 | MachineBasicBlock *LoopBodyMBB = SystemZ::emitBlockAfter(LoopTestMBB); |
9220 | 0 | MachineBasicBlock *TailTestMBB = SystemZ::emitBlockAfter(LoopBodyMBB); |
9221 | 0 | MachineBasicBlock *TailMBB = SystemZ::emitBlockAfter(TailTestMBB); |
9222 | |
|
9223 | 0 | MachineMemOperand *VolLdMMO = MF.getMachineMemOperand(MachinePointerInfo(), |
9224 | 0 | MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad, 8, Align(1)); |
9225 | |
|
9226 | 0 | Register PHIReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
9227 | 0 | Register IncReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass); |
9228 | | |
9229 | | // LoopTestMBB |
9230 | | // BRC TailTestMBB |
9231 | | // # fallthrough to LoopBodyMBB |
9232 | 0 | StartMBB->addSuccessor(LoopTestMBB); |
9233 | 0 | MBB = LoopTestMBB; |
9234 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::PHI), PHIReg) |
9235 | 0 | .addReg(SizeReg) |
9236 | 0 | .addMBB(StartMBB) |
9237 | 0 | .addReg(IncReg) |
9238 | 0 | .addMBB(LoopBodyMBB); |
9239 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CLGFI)) |
9240 | 0 | .addReg(PHIReg) |
9241 | 0 | .addImm(ProbeSize); |
9242 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
9243 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_LT) |
9244 | 0 | .addMBB(TailTestMBB); |
9245 | 0 | MBB->addSuccessor(LoopBodyMBB); |
9246 | 0 | MBB->addSuccessor(TailTestMBB); |
9247 | | |
9248 | | // LoopBodyMBB: Allocate and probe by means of a volatile compare. |
9249 | | // J LoopTestMBB |
9250 | 0 | MBB = LoopBodyMBB; |
9251 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), IncReg) |
9252 | 0 | .addReg(PHIReg) |
9253 | 0 | .addImm(ProbeSize); |
9254 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), SystemZ::R15D) |
9255 | 0 | .addReg(SystemZ::R15D) |
9256 | 0 | .addImm(ProbeSize); |
9257 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D) |
9258 | 0 | .addReg(SystemZ::R15D).addImm(ProbeSize - 8).addReg(0) |
9259 | 0 | .setMemRefs(VolLdMMO); |
9260 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::J)).addMBB(LoopTestMBB); |
9261 | 0 | MBB->addSuccessor(LoopTestMBB); |
9262 | | |
9263 | | // TailTestMBB |
9264 | | // BRC DoneMBB |
9265 | | // # fallthrough to TailMBB |
9266 | 0 | MBB = TailTestMBB; |
9267 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) |
9268 | 0 | .addReg(PHIReg) |
9269 | 0 | .addImm(0); |
9270 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::BRC)) |
9271 | 0 | .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_EQ) |
9272 | 0 | .addMBB(DoneMBB); |
9273 | 0 | MBB->addSuccessor(TailMBB); |
9274 | 0 | MBB->addSuccessor(DoneMBB); |
9275 | | |
9276 | | // TailMBB |
9277 | | // # fallthrough to DoneMBB |
9278 | 0 | MBB = TailMBB; |
9279 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::SLGR), SystemZ::R15D) |
9280 | 0 | .addReg(SystemZ::R15D) |
9281 | 0 | .addReg(PHIReg); |
9282 | 0 | BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D) |
9283 | 0 | .addReg(SystemZ::R15D).addImm(-8).addReg(PHIReg) |
9284 | 0 | .setMemRefs(VolLdMMO); |
9285 | 0 | MBB->addSuccessor(DoneMBB); |
9286 | | |
9287 | | // DoneMBB |
9288 | 0 | MBB = DoneMBB; |
9289 | 0 | BuildMI(*MBB, MBB->begin(), DL, TII->get(TargetOpcode::COPY), DstReg) |
9290 | 0 | .addReg(SystemZ::R15D); |
9291 | |
|
9292 | 0 | MI.eraseFromParent(); |
9293 | 0 | return DoneMBB; |
9294 | 0 | } |
9295 | | |
9296 | | SDValue SystemZTargetLowering:: |
9297 | 0 | getBackchainAddress(SDValue SP, SelectionDAG &DAG) const { |
9298 | 0 | MachineFunction &MF = DAG.getMachineFunction(); |
9299 | 0 | auto *TFL = Subtarget.getFrameLowering<SystemZELFFrameLowering>(); |
9300 | 0 | SDLoc DL(SP); |
9301 | 0 | return DAG.getNode(ISD::ADD, DL, MVT::i64, SP, |
9302 | 0 | DAG.getIntPtrConstant(TFL->getBackchainOffset(MF), DL)); |
9303 | 0 | } |
9304 | | |
9305 | | MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( |
9306 | 0 | MachineInstr &MI, MachineBasicBlock *MBB) const { |
9307 | 0 | switch (MI.getOpcode()) { |
9308 | 0 | case SystemZ::Select32: |
9309 | 0 | case SystemZ::Select64: |
9310 | 0 | case SystemZ::Select128: |
9311 | 0 | case SystemZ::SelectF32: |
9312 | 0 | case SystemZ::SelectF64: |
9313 | 0 | case SystemZ::SelectF128: |
9314 | 0 | case SystemZ::SelectVR32: |
9315 | 0 | case SystemZ::SelectVR64: |
9316 | 0 | case SystemZ::SelectVR128: |
9317 | 0 | return emitSelect(MI, MBB); |
9318 | | |
9319 | 0 | case SystemZ::CondStore8Mux: |
9320 | 0 | return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); |
9321 | 0 | case SystemZ::CondStore8MuxInv: |
9322 | 0 | return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); |
9323 | 0 | case SystemZ::CondStore16Mux: |
9324 | 0 | return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); |
9325 | 0 | case SystemZ::CondStore16MuxInv: |
9326 | 0 | return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); |
9327 | 0 | case SystemZ::CondStore32Mux: |
9328 | 0 | return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false); |
9329 | 0 | case SystemZ::CondStore32MuxInv: |
9330 | 0 | return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true); |
9331 | 0 | case SystemZ::CondStore8: |
9332 | 0 | return emitCondStore(MI, MBB, SystemZ::STC, 0, false); |
9333 | 0 | case SystemZ::CondStore8Inv: |
9334 | 0 | return emitCondStore(MI, MBB, SystemZ::STC, 0, true); |
9335 | 0 | case SystemZ::CondStore16: |
9336 | 0 | return emitCondStore(MI, MBB, SystemZ::STH, 0, false); |
9337 | 0 | case SystemZ::CondStore16Inv: |
9338 | 0 | return emitCondStore(MI, MBB, SystemZ::STH, 0, true); |
9339 | 0 | case SystemZ::CondStore32: |
9340 | 0 | return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); |
9341 | 0 | case SystemZ::CondStore32Inv: |
9342 | 0 | return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); |
9343 | 0 | case SystemZ::CondStore64: |
9344 | 0 | return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); |
9345 | 0 | case SystemZ::CondStore64Inv: |
9346 | 0 | return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); |
9347 | 0 | case SystemZ::CondStoreF32: |
9348 | 0 | return emitCondStore(MI, MBB, SystemZ::STE, 0, false); |
9349 | 0 | case SystemZ::CondStoreF32Inv: |
9350 | 0 | return emitCondStore(MI, MBB, SystemZ::STE, 0, true); |
9351 | 0 | case SystemZ::CondStoreF64: |
9352 | 0 | return emitCondStore(MI, MBB, SystemZ::STD, 0, false); |
9353 | 0 | case SystemZ::CondStoreF64Inv: |
9354 | 0 | return emitCondStore(MI, MBB, SystemZ::STD, 0, true); |
9355 | | |
9356 | 0 | case SystemZ::SCmp128Hi: |
9357 | 0 | return emitICmp128Hi(MI, MBB, false); |
9358 | 0 | case SystemZ::UCmp128Hi: |
9359 | 0 | return emitICmp128Hi(MI, MBB, true); |
9360 | | |
9361 | 0 | case SystemZ::PAIR128: |
9362 | 0 | return emitPair128(MI, MBB); |
9363 | 0 | case SystemZ::AEXT128: |
9364 | 0 | return emitExt128(MI, MBB, false); |
9365 | 0 | case SystemZ::ZEXT128: |
9366 | 0 | return emitExt128(MI, MBB, true); |
9367 | | |
9368 | 0 | case SystemZ::ATOMIC_SWAPW: |
9369 | 0 | return emitAtomicLoadBinary(MI, MBB, 0); |
9370 | | |
9371 | 0 | case SystemZ::ATOMIC_LOADW_AR: |
9372 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AR); |
9373 | 0 | case SystemZ::ATOMIC_LOADW_AFI: |
9374 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI); |
9375 | | |
9376 | 0 | case SystemZ::ATOMIC_LOADW_SR: |
9377 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::SR); |
9378 | | |
9379 | 0 | case SystemZ::ATOMIC_LOADW_NR: |
9380 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR); |
9381 | 0 | case SystemZ::ATOMIC_LOADW_NILH: |
9382 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH); |
9383 | | |
9384 | 0 | case SystemZ::ATOMIC_LOADW_OR: |
9385 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OR); |
9386 | 0 | case SystemZ::ATOMIC_LOADW_OILH: |
9387 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH); |
9388 | | |
9389 | 0 | case SystemZ::ATOMIC_LOADW_XR: |
9390 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XR); |
9391 | 0 | case SystemZ::ATOMIC_LOADW_XILF: |
9392 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF); |
9393 | | |
9394 | 0 | case SystemZ::ATOMIC_LOADW_NRi: |
9395 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, true); |
9396 | 0 | case SystemZ::ATOMIC_LOADW_NILHi: |
9397 | 0 | return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, true); |
9398 | | |
9399 | 0 | case SystemZ::ATOMIC_LOADW_MIN: |
9400 | 0 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, SystemZ::CCMASK_CMP_LE); |
9401 | 0 | case SystemZ::ATOMIC_LOADW_MAX: |
9402 | 0 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, SystemZ::CCMASK_CMP_GE); |
9403 | 0 | case SystemZ::ATOMIC_LOADW_UMIN: |
9404 | 0 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, SystemZ::CCMASK_CMP_LE); |
9405 | 0 | case SystemZ::ATOMIC_LOADW_UMAX: |
9406 | 0 | return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, SystemZ::CCMASK_CMP_GE); |
9407 | | |
9408 | 0 | case SystemZ::ATOMIC_CMP_SWAPW: |
9409 | 0 | return emitAtomicCmpSwapW(MI, MBB); |
9410 | 0 | case SystemZ::MVCImm: |
9411 | 0 | case SystemZ::MVCReg: |
9412 | 0 | return emitMemMemWrapper(MI, MBB, SystemZ::MVC); |
9413 | 0 | case SystemZ::NCImm: |
9414 | 0 | return emitMemMemWrapper(MI, MBB, SystemZ::NC); |
9415 | 0 | case SystemZ::OCImm: |
9416 | 0 | return emitMemMemWrapper(MI, MBB, SystemZ::OC); |
9417 | 0 | case SystemZ::XCImm: |
9418 | 0 | case SystemZ::XCReg: |
9419 | 0 | return emitMemMemWrapper(MI, MBB, SystemZ::XC); |
9420 | 0 | case SystemZ::CLCImm: |
9421 | 0 | case SystemZ::CLCReg: |
9422 | 0 | return emitMemMemWrapper(MI, MBB, SystemZ::CLC); |
9423 | 0 | case SystemZ::MemsetImmImm: |
9424 | 0 | case SystemZ::MemsetImmReg: |
9425 | 0 | case SystemZ::MemsetRegImm: |
9426 | 0 | case SystemZ::MemsetRegReg: |
9427 | 0 | return emitMemMemWrapper(MI, MBB, SystemZ::MVC, true/*IsMemset*/); |
9428 | 0 | case SystemZ::CLSTLoop: |
9429 | 0 | return emitStringWrapper(MI, MBB, SystemZ::CLST); |
9430 | 0 | case SystemZ::MVSTLoop: |
9431 | 0 | return emitStringWrapper(MI, MBB, SystemZ::MVST); |
9432 | 0 | case SystemZ::SRSTLoop: |
9433 | 0 | return emitStringWrapper(MI, MBB, SystemZ::SRST); |
9434 | 0 | case SystemZ::TBEGIN: |
9435 | 0 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false); |
9436 | 0 | case SystemZ::TBEGIN_nofloat: |
9437 | 0 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true); |
9438 | 0 | case SystemZ::TBEGINC: |
9439 | 0 | return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true); |
9440 | 0 | case SystemZ::LTEBRCompare_Pseudo: |
9441 | 0 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR); |
9442 | 0 | case SystemZ::LTDBRCompare_Pseudo: |
9443 | 0 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR); |
9444 | 0 | case SystemZ::LTXBRCompare_Pseudo: |
9445 | 0 | return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR); |
9446 | | |
9447 | 0 | case SystemZ::PROBED_ALLOCA: |
9448 | 0 | return emitProbedAlloca(MI, MBB); |
9449 | | |
9450 | 0 | case TargetOpcode::STACKMAP: |
9451 | 0 | case TargetOpcode::PATCHPOINT: |
9452 | 0 | return emitPatchPoint(MI, MBB); |
9453 | | |
9454 | 0 | default: |
9455 | 0 | llvm_unreachable("Unexpected instr type to insert"); |
9456 | 0 | } |
9457 | 0 | } |
9458 | | |
9459 | | // This is only used by the isel schedulers, and is needed only to prevent |
9460 | | // compiler from crashing when list-ilp is used. |
9461 | | const TargetRegisterClass * |
9462 | 0 | SystemZTargetLowering::getRepRegClassFor(MVT VT) const { |
9463 | 0 | if (VT == MVT::Untyped) |
9464 | 0 | return &SystemZ::ADDR128BitRegClass; |
9465 | 0 | return TargetLowering::getRepRegClassFor(VT); |
9466 | 0 | } |
9467 | | |
9468 | | SDValue SystemZTargetLowering::lowerGET_ROUNDING(SDValue Op, |
9469 | 0 | SelectionDAG &DAG) const { |
9470 | 0 | SDLoc dl(Op); |
9471 | | /* |
9472 | | The rounding method is in FPC Byte 3 bits 6-7, and has the following |
9473 | | settings: |
9474 | | 00 Round to nearest |
9475 | | 01 Round to 0 |
9476 | | 10 Round to +inf |
9477 | | 11 Round to -inf |
9478 | | |
9479 | | FLT_ROUNDS, on the other hand, expects the following: |
9480 | | -1 Undefined |
9481 | | 0 Round to 0 |
9482 | | 1 Round to nearest |
9483 | | 2 Round to +inf |
9484 | | 3 Round to -inf |
9485 | | */ |
9486 | | |
9487 | | // Save FPC to register. |
9488 | 0 | SDValue Chain = Op.getOperand(0); |
9489 | 0 | SDValue EFPC( |
9490 | 0 | DAG.getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0); |
9491 | 0 | Chain = EFPC.getValue(1); |
9492 | | |
9493 | | // Transform as necessary |
9494 | 0 | SDValue CWD1 = DAG.getNode(ISD::AND, dl, MVT::i32, EFPC, |
9495 | 0 | DAG.getConstant(3, dl, MVT::i32)); |
9496 | | // RetVal = (CWD1 ^ (CWD1 >> 1)) ^ 1 |
9497 | 0 | SDValue CWD2 = DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, |
9498 | 0 | DAG.getNode(ISD::SRL, dl, MVT::i32, CWD1, |
9499 | 0 | DAG.getConstant(1, dl, MVT::i32))); |
9500 | |
|
9501 | 0 | SDValue RetVal = DAG.getNode(ISD::XOR, dl, MVT::i32, CWD2, |
9502 | 0 | DAG.getConstant(1, dl, MVT::i32)); |
9503 | 0 | RetVal = DAG.getZExtOrTrunc(RetVal, dl, Op.getValueType()); |
9504 | |
|
9505 | 0 | return DAG.getMergeValues({RetVal, Chain}, dl); |
9506 | 0 | } |