/src/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file defines the interfaces that Hexagon uses to lower LLVM code into a |
10 | | // selection DAG. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H |
15 | | #define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H |
16 | | |
17 | | #include "Hexagon.h" |
18 | | #include "MCTargetDesc/HexagonMCTargetDesc.h" |
19 | | #include "llvm/ADT/StringRef.h" |
20 | | #include "llvm/CodeGen/ISDOpcodes.h" |
21 | | #include "llvm/CodeGen/MachineValueType.h" |
22 | | #include "llvm/CodeGen/SelectionDAGNodes.h" |
23 | | #include "llvm/CodeGen/TargetLowering.h" |
24 | | #include "llvm/CodeGen/ValueTypes.h" |
25 | | #include "llvm/IR/CallingConv.h" |
26 | | #include "llvm/IR/InlineAsm.h" |
27 | | #include <cstdint> |
28 | | #include <utility> |
29 | | |
30 | | namespace llvm { |
31 | | |
32 | | namespace HexagonISD { |
33 | | |
34 | | enum NodeType : unsigned { |
35 | | OP_BEGIN = ISD::BUILTIN_OP_END, |
36 | | |
37 | | CONST32 = OP_BEGIN, |
38 | | CONST32_GP, // For marking data present in GP. |
39 | | ADDC, // Add with carry: (X, Y, Cin) -> (X+Y, Cout). |
40 | | SUBC, // Sub with carry: (X, Y, Cin) -> (X+~Y+Cin, Cout). |
41 | | ALLOCA, |
42 | | |
43 | | AT_GOT, // Index in GOT. |
44 | | AT_PCREL, // Offset relative to PC. |
45 | | |
46 | | CALL, // Function call. |
47 | | CALLnr, // Function call that does not return. |
48 | | CALLR, |
49 | | |
50 | | RET_GLUE, // Return with a glue operand. |
51 | | BARRIER, // Memory barrier. |
52 | | JT, // Jump table. |
53 | | CP, // Constant pool. |
54 | | |
55 | | COMBINE, |
56 | | VASL, // Vector shifts by a scalar value |
57 | | VASR, |
58 | | VLSR, |
59 | | MFSHL, // Funnel shifts with the shift amount guaranteed to be |
60 | | MFSHR, // within the range of the bit width of the element. |
61 | | |
62 | | SSAT, // Signed saturate. |
63 | | USAT, // Unsigned saturate. |
64 | | SMUL_LOHI, // Same as ISD::SMUL_LOHI, but opaque to the combiner. |
65 | | UMUL_LOHI, // Same as ISD::UMUL_LOHI, but opaque to the combiner. |
66 | | // We want to legalize MULH[SU] to [SU]MUL_LOHI, but the |
67 | | // combiner will keep rewriting it back to MULH[SU]. |
68 | | USMUL_LOHI, // Like SMUL_LOHI, but unsigned*signed. |
69 | | |
70 | | TSTBIT, |
71 | | INSERT, |
72 | | EXTRACTU, |
73 | | VEXTRACTW, |
74 | | VINSERTW0, |
75 | | VROR, |
76 | | TC_RETURN, |
77 | | EH_RETURN, |
78 | | DCFETCH, |
79 | | READCYCLE, |
80 | | PTRUE, |
81 | | PFALSE, |
82 | | D2P, // Convert 8-byte value to 8-bit predicate register. [*] |
83 | | P2D, // Convert 8-bit predicate register to 8-byte value. [*] |
84 | | V2Q, // Convert HVX vector to a vector predicate reg. [*] |
85 | | Q2V, // Convert vector predicate to an HVX vector. [*] |
86 | | // [*] The equivalence is defined as "Q <=> (V != 0)", |
87 | | // where the != operation compares bytes. |
88 | | // Note: V != 0 is implemented as V >u 0. |
89 | | QCAT, |
90 | | QTRUE, |
91 | | QFALSE, |
92 | | |
93 | | TL_EXTEND, // Wrappers for ISD::*_EXTEND and ISD::TRUNCATE to prevent DAG |
94 | | TL_TRUNCATE, // from auto-folding operations, e.g. |
95 | | // (i32 ext (i16 ext i8)) would be folded to (i32 ext i8). |
96 | | // To simplify the type legalization, we want to keep these |
97 | | // single steps separate during type legalization. |
98 | | // TL_[EXTEND|TRUNCATE] Inp, i128 _, i32 Opc |
99 | | // * Inp is the original input to extend/truncate, |
100 | | // * _ is a dummy operand with an illegal type (can be undef), |
101 | | // * Opc is the original opcode. |
102 | | // The legalization process (in Hexagon lowering code) will |
103 | | // first deal with the "real" types (i.e. Inp and the result), |
104 | | // and once all of them are processed, the wrapper node will |
105 | | // be replaced with the original ISD node. The dummy illegal |
106 | | // operand is there to make sure that the legalization hooks |
107 | | // are called again after everything else is legal, giving |
108 | | // us the opportunity to undo the wrapping. |
109 | | |
110 | | TYPECAST, // No-op that's used to convert between different legal |
111 | | // types in a register. |
112 | | VALIGN, // Align two vectors (in Op0, Op1) to one that would have |
113 | | // been loaded from address in Op2. |
114 | | VALIGNADDR, // Align vector address: Op0 & -Op1, except when it is |
115 | | // an address in a vector load, then it's a no-op. |
116 | | ISEL, // Marker for nodes that were created during ISel, and |
117 | | // which need explicit selection (would have been left |
118 | | // unselected otherwise). |
119 | | OP_END |
120 | | }; |
121 | | |
122 | | } // end namespace HexagonISD |
123 | | |
124 | | class HexagonSubtarget; |
125 | | |
126 | | class HexagonTargetLowering : public TargetLowering { |
127 | | int VarArgsFrameOffset; // Frame offset to start of varargs area. |
128 | | const HexagonTargetMachine &HTM; |
129 | | const HexagonSubtarget &Subtarget; |
130 | | |
131 | | public: |
132 | | explicit HexagonTargetLowering(const TargetMachine &TM, |
133 | | const HexagonSubtarget &ST); |
134 | | |
135 | | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
136 | | /// for tail call optimization. Targets which want to do tail call |
137 | | /// optimization should implement this function. |
138 | | bool IsEligibleForTailCallOptimization(SDValue Callee, |
139 | | CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, |
140 | | bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs, |
141 | | const SmallVectorImpl<SDValue> &OutVals, |
142 | | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const; |
143 | | |
144 | | bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, |
145 | | MachineFunction &MF, |
146 | | unsigned Intrinsic) const override; |
147 | | |
148 | | bool isTruncateFree(Type *Ty1, Type *Ty2) const override; |
149 | | bool isTruncateFree(EVT VT1, EVT VT2) const override; |
150 | | |
151 | 0 | bool isCheapToSpeculateCttz(Type *) const override { return true; } |
152 | 0 | bool isCheapToSpeculateCtlz(Type *) const override { return true; } |
153 | 0 | bool isCtlzFast() const override { return true; } |
154 | | |
155 | | bool hasBitTest(SDValue X, SDValue Y) const override; |
156 | | |
157 | | bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; |
158 | | |
159 | | /// Return true if an FMA operation is faster than a pair of mul and add |
160 | | /// instructions. fmuladd intrinsics will be expanded to FMAs when this |
161 | | /// method returns true (and FMAs are legal), otherwise fmuladd is |
162 | | /// expanded to mul + add. |
163 | | bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, |
164 | | EVT) const override; |
165 | | |
166 | | // Should we expand the build vector with shuffles? |
167 | | bool shouldExpandBuildVectorWithShuffles(EVT VT, |
168 | | unsigned DefinedValues) const override; |
169 | | bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, |
170 | | unsigned Index) const override; |
171 | | |
172 | | bool isTargetCanonicalConstantNode(SDValue Op) const override; |
173 | | |
174 | | bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override; |
175 | | LegalizeTypeAction getPreferredVectorAction(MVT VT) const override; |
176 | | LegalizeAction getCustomOperationAction(SDNode &Op) const override; |
177 | | |
178 | | SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; |
179 | | void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, |
180 | | SelectionDAG &DAG) const override; |
181 | | void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, |
182 | | SelectionDAG &DAG) const override; |
183 | | |
184 | | const char *getTargetNodeName(unsigned Opcode) const override; |
185 | | |
186 | | SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; |
187 | | SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; |
188 | | SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
189 | | SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; |
190 | | SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; |
191 | | SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; |
192 | | SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; |
193 | | SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const; |
194 | | SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const; |
195 | | SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const; |
196 | | SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
197 | | SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
198 | | SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; |
199 | | SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const; |
200 | | SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const; |
201 | | SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const; |
202 | | SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const; |
203 | | SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const; |
204 | | |
205 | | SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; |
206 | | SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const; |
207 | | SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; |
208 | | SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; |
209 | | SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const; |
210 | | SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; |
211 | | SDValue |
212 | | LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
213 | | const SmallVectorImpl<ISD::InputArg> &Ins, |
214 | | const SDLoc &dl, SelectionDAG &DAG, |
215 | | SmallVectorImpl<SDValue> &InVals) const override; |
216 | | SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const; |
217 | | SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; |
218 | | SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; |
219 | | SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
220 | | SelectionDAG &DAG) const; |
221 | | SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, |
222 | | SelectionDAG &DAG) const; |
223 | | SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, |
224 | | SelectionDAG &DAG) const; |
225 | | SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, |
226 | | GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, |
227 | | unsigned ReturnReg, unsigned char OperandGlues) const; |
228 | | SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; |
229 | | |
230 | | SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, |
231 | | SmallVectorImpl<SDValue> &InVals) const override; |
232 | | SDValue LowerCallResult(SDValue Chain, SDValue InGlue, |
233 | | CallingConv::ID CallConv, bool isVarArg, |
234 | | const SmallVectorImpl<ISD::InputArg> &Ins, |
235 | | const SDLoc &dl, SelectionDAG &DAG, |
236 | | SmallVectorImpl<SDValue> &InVals, |
237 | | const SmallVectorImpl<SDValue> &OutVals, |
238 | | SDValue Callee) const; |
239 | | |
240 | | SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; |
241 | | SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const; |
242 | | SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; |
243 | | SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const; |
244 | | SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; |
245 | | |
246 | | bool CanLowerReturn(CallingConv::ID CallConv, |
247 | | MachineFunction &MF, bool isVarArg, |
248 | | const SmallVectorImpl<ISD::OutputArg> &Outs, |
249 | | LLVMContext &Context) const override; |
250 | | |
251 | | SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
252 | | const SmallVectorImpl<ISD::OutputArg> &Outs, |
253 | | const SmallVectorImpl<SDValue> &OutVals, |
254 | | const SDLoc &dl, SelectionDAG &DAG) const override; |
255 | | |
256 | | SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; |
257 | | |
258 | | bool mayBeEmittedAsTailCall(const CallInst *CI) const override; |
259 | | |
260 | | Register getRegisterByName(const char* RegName, LLT VT, |
261 | | const MachineFunction &MF) const override; |
262 | | |
263 | | /// If a physical register, this returns the register that receives the |
264 | | /// exception address on entry to an EH pad. |
265 | | Register |
266 | 40.2k | getExceptionPointerRegister(const Constant *PersonalityFn) const override { |
267 | 40.2k | return Hexagon::R0; |
268 | 40.2k | } |
269 | | |
270 | | /// If a physical register, this returns the register that receives the |
271 | | /// exception typeid on entry to a landing pad. |
272 | | Register |
273 | 40.2k | getExceptionSelectorRegister(const Constant *PersonalityFn) const override { |
274 | 40.2k | return Hexagon::R1; |
275 | 40.2k | } |
276 | | |
277 | | SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; |
278 | | SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; |
279 | | SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; |
280 | | SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; |
281 | | |
282 | | EVT getSetCCResultType(const DataLayout &, LLVMContext &C, |
283 | 55.6k | EVT VT) const override { |
284 | 55.6k | if (!VT.isVector()) |
285 | 55.6k | return MVT::i1; |
286 | 0 | else |
287 | 0 | return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); |
288 | 55.6k | } |
289 | | |
290 | | bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
291 | | SDValue &Base, SDValue &Offset, |
292 | | ISD::MemIndexedMode &AM, |
293 | | SelectionDAG &DAG) const override; |
294 | | |
295 | | ConstraintType getConstraintType(StringRef Constraint) const override; |
296 | | |
297 | | std::pair<unsigned, const TargetRegisterClass *> |
298 | | getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
299 | | StringRef Constraint, MVT VT) const override; |
300 | | |
301 | | // Intrinsics |
302 | | SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; |
303 | | SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; |
304 | | /// isLegalAddressingMode - Return true if the addressing mode represented |
305 | | /// by AM is legal for this target, for a load/store of the specified type. |
306 | | /// The type may be VoidTy, in which case only return true if the addressing |
307 | | /// mode is legal for a load/store of any legal type. |
308 | | /// TODO: Handle pre/postinc as well. |
309 | | bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, |
310 | | Type *Ty, unsigned AS, |
311 | | Instruction *I = nullptr) const override; |
312 | | /// Return true if folding a constant offset with the given GlobalAddress |
313 | | /// is legal. It is frequently not legal in PIC relocation models. |
314 | | bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; |
315 | | |
316 | | bool isFPImmLegal(const APFloat &Imm, EVT VT, |
317 | | bool ForCodeSize) const override; |
318 | | |
319 | | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
320 | | /// icmp immediate, that is the target has icmp instructions which can |
321 | | /// compare a register against the immediate without having to materialize |
322 | | /// the immediate into a register. |
323 | | bool isLegalICmpImmediate(int64_t Imm) const override; |
324 | | |
325 | | EVT getOptimalMemOpType(const MemOp &Op, |
326 | | const AttributeList &FuncAttributes) const override; |
327 | | |
328 | | bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, |
329 | | unsigned AddrSpace, Align Alignment, |
330 | | MachineMemOperand::Flags Flags, |
331 | | unsigned *Fast) const override; |
332 | | |
333 | | bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, |
334 | | Align Alignment, |
335 | | MachineMemOperand::Flags Flags, |
336 | | unsigned *Fast) const override; |
337 | | |
338 | | /// Returns relocation base for the given PIC jumptable. |
339 | | SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) |
340 | | const override; |
341 | | |
342 | | bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, |
343 | | EVT NewVT) const override; |
344 | | |
345 | | void AdjustInstrPostInstrSelection(MachineInstr &MI, |
346 | | SDNode *Node) const override; |
347 | | |
348 | | // Handling of atomic RMW instructions. |
349 | | Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, |
350 | | AtomicOrdering Ord) const override; |
351 | | Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, |
352 | | AtomicOrdering Ord) const override; |
353 | | AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; |
354 | | AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override; |
355 | | AtomicExpansionKind |
356 | | shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; |
357 | | |
358 | | AtomicExpansionKind |
359 | 0 | shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override { |
360 | 0 | return AtomicExpansionKind::LLSC; |
361 | 0 | } |
362 | | |
363 | | private: |
364 | | void initializeHVXLowering(); |
365 | | unsigned getPreferredHvxVectorAction(MVT VecTy) const; |
366 | | unsigned getCustomHvxOperationAction(SDNode &Op) const; |
367 | | |
368 | | bool validateConstPtrAlignment(SDValue Ptr, Align NeedAlign, const SDLoc &dl, |
369 | | SelectionDAG &DAG) const; |
370 | | SDValue replaceMemWithUndef(SDValue Op, SelectionDAG &DAG) const; |
371 | | |
372 | | std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const; |
373 | | |
374 | | bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy, |
375 | | SelectionDAG &DAG, |
376 | | MutableArrayRef<ConstantInt*> Consts) const; |
377 | | SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy, |
378 | | SelectionDAG &DAG) const; |
379 | | SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy, |
380 | | SelectionDAG &DAG) const; |
381 | | SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl, |
382 | | MVT ValTy, MVT ResTy, SelectionDAG &DAG) const; |
383 | | SDValue extractVectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl, |
384 | | MVT ValTy, MVT ResTy, SelectionDAG &DAG) const; |
385 | | SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV, |
386 | | const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const; |
387 | | SDValue insertVectorPred(SDValue VecV, SDValue ValV, SDValue IdxV, |
388 | | const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const; |
389 | | SDValue expandPredicate(SDValue Vec32, const SDLoc &dl, |
390 | | SelectionDAG &DAG) const; |
391 | | SDValue contractPredicate(SDValue Vec64, const SDLoc &dl, |
392 | | SelectionDAG &DAG) const; |
393 | | SDValue getSplatValue(SDValue Op, SelectionDAG &DAG) const; |
394 | | SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const; |
395 | | SDValue appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) const; |
396 | | SDValue getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl, MVT ResTy, |
397 | | SelectionDAG &DAG) const; |
398 | | |
399 | 0 | bool isUndef(SDValue Op) const { |
400 | 0 | if (Op.isMachineOpcode()) |
401 | 0 | return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF; |
402 | 0 | return Op.getOpcode() == ISD::UNDEF; |
403 | 0 | } |
404 | | SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty, |
405 | 0 | ArrayRef<SDValue> Ops, SelectionDAG &DAG) const { |
406 | 0 | SDNode *N = DAG.getMachineNode(MachineOpc, dl, Ty, Ops); |
407 | 0 | return SDValue(N, 0); |
408 | 0 | } |
409 | | SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const; |
410 | | |
411 | | using VectorPair = std::pair<SDValue, SDValue>; |
412 | | using TypePair = std::pair<MVT, MVT>; |
413 | | |
414 | | SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops, |
415 | | const SDLoc &dl, SelectionDAG &DAG) const; |
416 | | |
417 | 304k | MVT ty(SDValue Op) const { |
418 | 304k | return Op.getValueType().getSimpleVT(); |
419 | 304k | } |
420 | 0 | TypePair ty(const VectorPair &Ops) const { |
421 | 0 | return { Ops.first.getValueType().getSimpleVT(), |
422 | 0 | Ops.second.getValueType().getSimpleVT() }; |
423 | 0 | } |
424 | 0 | MVT tyScalar(MVT Ty) const { |
425 | 0 | if (!Ty.isVector()) |
426 | 0 | return Ty; |
427 | 0 | return MVT::getIntegerVT(Ty.getSizeInBits()); |
428 | 0 | } |
429 | 0 | MVT tyVector(MVT Ty, MVT ElemTy) const { |
430 | 0 | if (Ty.isVector() && Ty.getVectorElementType() == ElemTy) |
431 | 0 | return Ty; |
432 | 0 | unsigned TyWidth = Ty.getSizeInBits(); |
433 | 0 | unsigned ElemWidth = ElemTy.getSizeInBits(); |
434 | 0 | assert((TyWidth % ElemWidth) == 0); |
435 | 0 | return MVT::getVectorVT(ElemTy, TyWidth/ElemWidth); |
436 | 0 | } |
437 | | |
438 | | MVT typeJoin(const TypePair &Tys) const; |
439 | | TypePair typeSplit(MVT Ty) const; |
440 | | MVT typeExtElem(MVT VecTy, unsigned Factor) const; |
441 | | MVT typeTruncElem(MVT VecTy, unsigned Factor) const; |
442 | | TypePair typeExtendToWider(MVT Ty0, MVT Ty1) const; |
443 | | TypePair typeWidenToWider(MVT Ty0, MVT Ty1) const; |
444 | | MVT typeLegalize(MVT Ty, SelectionDAG &DAG) const; |
445 | | MVT typeWidenToHvx(MVT Ty) const; |
446 | | |
447 | | SDValue opJoin(const VectorPair &Ops, const SDLoc &dl, |
448 | | SelectionDAG &DAG) const; |
449 | | VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const; |
450 | | SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const; |
451 | | |
452 | 0 | SDValue LoHalf(SDValue V, SelectionDAG &DAG) const { |
453 | 0 | MVT Ty = ty(V); |
454 | 0 | const SDLoc &dl(V); |
455 | 0 | if (!Ty.isVector()) { |
456 | 0 | assert(Ty.getSizeInBits() == 64); |
457 | 0 | return DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, V); |
458 | 0 | } |
459 | 0 | MVT HalfTy = typeSplit(Ty).first; |
460 | 0 | SDValue Idx = getZero(dl, MVT::i32, DAG); |
461 | 0 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfTy, V, Idx); |
462 | 0 | } |
463 | 0 | SDValue HiHalf(SDValue V, SelectionDAG &DAG) const { |
464 | 0 | MVT Ty = ty(V); |
465 | 0 | const SDLoc &dl(V); |
466 | 0 | if (!Ty.isVector()) { |
467 | 0 | assert(Ty.getSizeInBits() == 64); |
468 | 0 | return DAG.getTargetExtractSubreg(Hexagon::isub_hi, dl, MVT::i32, V); |
469 | 0 | } |
470 | 0 | MVT HalfTy = typeSplit(Ty).first; |
471 | 0 | SDValue Idx = DAG.getConstant(HalfTy.getVectorNumElements(), dl, MVT::i32); |
472 | 0 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfTy, V, Idx); |
473 | 0 | } |
474 | | |
475 | | bool allowsHvxMemoryAccess(MVT VecTy, MachineMemOperand::Flags Flags, |
476 | | unsigned *Fast) const; |
477 | | bool allowsHvxMisalignedMemoryAccesses(MVT VecTy, |
478 | | MachineMemOperand::Flags Flags, |
479 | | unsigned *Fast) const; |
480 | | void AdjustHvxInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const; |
481 | | |
482 | | bool isHvxSingleTy(MVT Ty) const; |
483 | | bool isHvxPairTy(MVT Ty) const; |
484 | | bool isHvxBoolTy(MVT Ty) const; |
485 | | SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy, |
486 | | SelectionDAG &DAG) const; |
487 | | SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const; |
488 | | SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1, |
489 | | ArrayRef<int> Mask, SelectionDAG &DAG) const; |
490 | | |
491 | | SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl, |
492 | | MVT VecTy, SelectionDAG &DAG) const; |
493 | | SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl, |
494 | | MVT VecTy, SelectionDAG &DAG) const; |
495 | | SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl, |
496 | | unsigned BitBytes, bool ZeroFill, |
497 | | SelectionDAG &DAG) const; |
498 | | SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl, |
499 | | MVT ResTy, SelectionDAG &DAG) const; |
500 | | SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl, |
501 | | MVT ResTy, SelectionDAG &DAG) const; |
502 | | SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV, |
503 | | const SDLoc &dl, SelectionDAG &DAG) const; |
504 | | SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV, |
505 | | const SDLoc &dl, SelectionDAG &DAG) const; |
506 | | SDValue extractHvxSubvectorReg(SDValue OrigOp, SDValue VecV, SDValue IdxV, |
507 | | const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) |
508 | | const; |
509 | | SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl, |
510 | | MVT ResTy, SelectionDAG &DAG) const; |
511 | | SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV, |
512 | | const SDLoc &dl, SelectionDAG &DAG) const; |
513 | | SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV, |
514 | | const SDLoc &dl, SelectionDAG &DAG) const; |
515 | | SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy, |
516 | | bool ZeroExt, SelectionDAG &DAG) const; |
517 | | SDValue compressHvxPred(SDValue VecQ, const SDLoc &dl, MVT ResTy, |
518 | | SelectionDAG &DAG) const; |
519 | | SDValue resizeToWidth(SDValue VecV, MVT ResTy, bool Signed, const SDLoc &dl, |
520 | | SelectionDAG &DAG) const; |
521 | | SDValue extractSubvector(SDValue Vec, MVT SubTy, unsigned SubIdx, |
522 | | SelectionDAG &DAG) const; |
523 | | VectorPair emitHvxAddWithOverflow(SDValue A, SDValue B, const SDLoc &dl, |
524 | | bool Signed, SelectionDAG &DAG) const; |
525 | | VectorPair emitHvxShiftRightRnd(SDValue Val, unsigned Amt, bool Signed, |
526 | | SelectionDAG &DAG) const; |
527 | | SDValue emitHvxMulHsV60(SDValue A, SDValue B, const SDLoc &dl, |
528 | | SelectionDAG &DAG) const; |
529 | | SDValue emitHvxMulLoHiV60(SDValue A, bool SignedA, SDValue B, bool SignedB, |
530 | | const SDLoc &dl, SelectionDAG &DAG) const; |
531 | | SDValue emitHvxMulLoHiV62(SDValue A, bool SignedA, SDValue B, bool SignedB, |
532 | | const SDLoc &dl, SelectionDAG &DAG) const; |
533 | | |
534 | | SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const; |
535 | | SDValue LowerHvxSplatVector(SDValue Op, SelectionDAG &DAG) const; |
536 | | SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const; |
537 | | SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const; |
538 | | SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const; |
539 | | SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const; |
540 | | SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const; |
541 | | SDValue LowerHvxBitcast(SDValue Op, SelectionDAG &DAG) const; |
542 | | SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const; |
543 | | SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const; |
544 | | SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const; |
545 | | SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const; |
546 | | SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const; |
547 | | SDValue LowerHvxMulLoHi(SDValue Op, SelectionDAG &DAG) const; |
548 | | SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const; |
549 | | SDValue LowerHvxSelect(SDValue Op, SelectionDAG &DAG) const; |
550 | | SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const; |
551 | | SDValue LowerHvxFunnelShift(SDValue Op, SelectionDAG &DAG) const; |
552 | | SDValue LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const; |
553 | | SDValue LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const; |
554 | | SDValue LowerHvxFpExtend(SDValue Op, SelectionDAG &DAG) const; |
555 | | SDValue LowerHvxFpToInt(SDValue Op, SelectionDAG &DAG) const; |
556 | | SDValue LowerHvxIntToFp(SDValue Op, SelectionDAG &DAG) const; |
557 | | SDValue ExpandHvxFpToInt(SDValue Op, SelectionDAG &DAG) const; |
558 | | SDValue ExpandHvxIntToFp(SDValue Op, SelectionDAG &DAG) const; |
559 | | |
560 | | VectorPair SplitVectorOp(SDValue Op, SelectionDAG &DAG) const; |
561 | | |
562 | | SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const; |
563 | | SDValue WidenHvxLoad(SDValue Op, SelectionDAG &DAG) const; |
564 | | SDValue WidenHvxStore(SDValue Op, SelectionDAG &DAG) const; |
565 | | SDValue WidenHvxSetCC(SDValue Op, SelectionDAG &DAG) const; |
566 | | SDValue LegalizeHvxResize(SDValue Op, SelectionDAG &DAG) const; |
567 | | SDValue ExpandHvxResizeIntoSteps(SDValue Op, SelectionDAG &DAG) const; |
568 | | SDValue EqualizeFpIntConversion(SDValue Op, SelectionDAG &DAG) const; |
569 | | |
570 | | SDValue CreateTLWrapper(SDValue Op, SelectionDAG &DAG) const; |
571 | | SDValue RemoveTLWrapper(SDValue Op, SelectionDAG &DAG) const; |
572 | | |
573 | | std::pair<const TargetRegisterClass*, uint8_t> |
574 | | findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) |
575 | | const override; |
576 | | |
577 | | bool shouldSplitToHvx(MVT Ty, SelectionDAG &DAG) const; |
578 | | bool shouldWidenToHvx(MVT Ty, SelectionDAG &DAG) const; |
579 | | bool isHvxOperation(SDNode *N, SelectionDAG &DAG) const; |
580 | | SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const; |
581 | | void LowerHvxOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, |
582 | | SelectionDAG &DAG) const; |
583 | | void ReplaceHvxNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, |
584 | | SelectionDAG &DAG) const; |
585 | | |
586 | | SDValue combineTruncateBeforeLegal(SDValue Op, DAGCombinerInfo &DCI) const; |
587 | | SDValue combineConcatVectorsBeforeLegal(SDValue Op, DAGCombinerInfo & DCI) |
588 | | const; |
589 | | SDValue combineVectorShuffleBeforeLegal(SDValue Op, DAGCombinerInfo & DCI) |
590 | | const; |
591 | | |
592 | | SDValue PerformHvxDAGCombine(SDNode * N, DAGCombinerInfo & DCI) const; |
593 | | }; |
594 | | |
595 | | } // end namespace llvm |
596 | | |
597 | | #endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H |