/src/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include "AMDKernelCodeT.h" |
10 | | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
11 | | #include "MCTargetDesc/AMDGPUTargetStreamer.h" |
12 | | #include "SIDefines.h" |
13 | | #include "SIInstrInfo.h" |
14 | | #include "SIRegisterInfo.h" |
15 | | #include "TargetInfo/AMDGPUTargetInfo.h" |
16 | | #include "Utils/AMDGPUAsmUtils.h" |
17 | | #include "Utils/AMDGPUBaseInfo.h" |
18 | | #include "Utils/AMDKernelCodeTUtils.h" |
19 | | #include "llvm/ADT/APFloat.h" |
20 | | #include "llvm/ADT/SmallBitVector.h" |
21 | | #include "llvm/ADT/StringSet.h" |
22 | | #include "llvm/ADT/Twine.h" |
23 | | #include "llvm/BinaryFormat/ELF.h" |
24 | | #include "llvm/CodeGen/MachineValueType.h" |
25 | | #include "llvm/MC/MCAsmInfo.h" |
26 | | #include "llvm/MC/MCContext.h" |
27 | | #include "llvm/MC/MCExpr.h" |
28 | | #include "llvm/MC/MCInst.h" |
29 | | #include "llvm/MC/MCInstrDesc.h" |
30 | | #include "llvm/MC/MCParser/MCAsmLexer.h" |
31 | | #include "llvm/MC/MCParser/MCAsmParser.h" |
32 | | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" |
33 | | #include "llvm/MC/MCParser/MCTargetAsmParser.h" |
34 | | #include "llvm/MC/MCSymbol.h" |
35 | | #include "llvm/MC/TargetRegistry.h" |
36 | | #include "llvm/Support/AMDGPUMetadata.h" |
37 | | #include "llvm/Support/AMDHSAKernelDescriptor.h" |
38 | | #include "llvm/Support/Casting.h" |
39 | | #include "llvm/Support/MathExtras.h" |
40 | | #include "llvm/TargetParser/TargetParser.h" |
41 | | #include <optional> |
42 | | |
43 | | using namespace llvm; |
44 | | using namespace llvm::AMDGPU; |
45 | | using namespace llvm::amdhsa; |
46 | | |
47 | | namespace { |
48 | | |
49 | | class AMDGPUAsmParser; |
50 | | |
51 | | enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL }; |
52 | | |
53 | | //===----------------------------------------------------------------------===// |
54 | | // Operand |
55 | | //===----------------------------------------------------------------------===// |
56 | | |
57 | | class AMDGPUOperand : public MCParsedAsmOperand { |
58 | | enum KindTy { |
59 | | Token, |
60 | | Immediate, |
61 | | Register, |
62 | | Expression |
63 | | } Kind; |
64 | | |
65 | | SMLoc StartLoc, EndLoc; |
66 | | const AMDGPUAsmParser *AsmParser; |
67 | | |
68 | | public: |
69 | | AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_) |
70 | 0 | : Kind(Kind_), AsmParser(AsmParser_) {} |
71 | | |
72 | | using Ptr = std::unique_ptr<AMDGPUOperand>; |
73 | | |
74 | | struct Modifiers { |
75 | | bool Abs = false; |
76 | | bool Neg = false; |
77 | | bool Sext = false; |
78 | | bool Lit = false; |
79 | | |
80 | 0 | bool hasFPModifiers() const { return Abs || Neg; } |
81 | 0 | bool hasIntModifiers() const { return Sext; } |
82 | 0 | bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); } |
83 | | |
84 | 0 | int64_t getFPModifiersOperand() const { |
85 | 0 | int64_t Operand = 0; |
86 | 0 | Operand |= Abs ? SISrcMods::ABS : 0u; |
87 | 0 | Operand |= Neg ? SISrcMods::NEG : 0u; |
88 | 0 | return Operand; |
89 | 0 | } |
90 | | |
91 | 0 | int64_t getIntModifiersOperand() const { |
92 | 0 | int64_t Operand = 0; |
93 | 0 | Operand |= Sext ? SISrcMods::SEXT : 0u; |
94 | 0 | return Operand; |
95 | 0 | } |
96 | | |
97 | 0 | int64_t getModifiersOperand() const { |
98 | 0 | assert(!(hasFPModifiers() && hasIntModifiers()) |
99 | 0 | && "fp and int modifiers should not be used simultaneously"); |
100 | 0 | if (hasFPModifiers()) { |
101 | 0 | return getFPModifiersOperand(); |
102 | 0 | } else if (hasIntModifiers()) { |
103 | 0 | return getIntModifiersOperand(); |
104 | 0 | } else { |
105 | 0 | return 0; |
106 | 0 | } |
107 | 0 | } |
108 | | |
109 | | friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods); |
110 | | }; |
111 | | |
112 | | enum ImmTy { |
113 | | ImmTyNone, |
114 | | ImmTyGDS, |
115 | | ImmTyLDS, |
116 | | ImmTyOffen, |
117 | | ImmTyIdxen, |
118 | | ImmTyAddr64, |
119 | | ImmTyOffset, |
120 | | ImmTyInstOffset, |
121 | | ImmTyOffset0, |
122 | | ImmTyOffset1, |
123 | | ImmTySMEMOffsetMod, |
124 | | ImmTyCPol, |
125 | | ImmTyTFE, |
126 | | ImmTyD16, |
127 | | ImmTyClampSI, |
128 | | ImmTyOModSI, |
129 | | ImmTySDWADstSel, |
130 | | ImmTySDWASrc0Sel, |
131 | | ImmTySDWASrc1Sel, |
132 | | ImmTySDWADstUnused, |
133 | | ImmTyDMask, |
134 | | ImmTyDim, |
135 | | ImmTyUNorm, |
136 | | ImmTyDA, |
137 | | ImmTyR128A16, |
138 | | ImmTyA16, |
139 | | ImmTyLWE, |
140 | | ImmTyExpTgt, |
141 | | ImmTyExpCompr, |
142 | | ImmTyExpVM, |
143 | | ImmTyFORMAT, |
144 | | ImmTyHwreg, |
145 | | ImmTyOff, |
146 | | ImmTySendMsg, |
147 | | ImmTyInterpSlot, |
148 | | ImmTyInterpAttr, |
149 | | ImmTyInterpAttrChan, |
150 | | ImmTyOpSel, |
151 | | ImmTyOpSelHi, |
152 | | ImmTyNegLo, |
153 | | ImmTyNegHi, |
154 | | ImmTyDPP8, |
155 | | ImmTyDppCtrl, |
156 | | ImmTyDppRowMask, |
157 | | ImmTyDppBankMask, |
158 | | ImmTyDppBoundCtrl, |
159 | | ImmTyDppFI, |
160 | | ImmTySwizzle, |
161 | | ImmTyGprIdxMode, |
162 | | ImmTyHigh, |
163 | | ImmTyBLGP, |
164 | | ImmTyCBSZ, |
165 | | ImmTyABID, |
166 | | ImmTyEndpgm, |
167 | | ImmTyWaitVDST, |
168 | | ImmTyWaitEXP, |
169 | | ImmTyWaitVAVDst, |
170 | | ImmTyWaitVMVSrc, |
171 | | }; |
172 | | |
173 | | // Immediate operand kind. |
174 | | // It helps to identify the location of an offending operand after an error. |
175 | | // Note that regular literals and mandatory literals (KImm) must be handled |
176 | | // differently. When looking for an offending operand, we should usually |
177 | | // ignore mandatory literals because they are part of the instruction and |
178 | | // cannot be changed. Report location of mandatory operands only for VOPD, |
179 | | // when both OpX and OpY have a KImm and there are no other literals. |
180 | | enum ImmKindTy { |
181 | | ImmKindTyNone, |
182 | | ImmKindTyLiteral, |
183 | | ImmKindTyMandatoryLiteral, |
184 | | ImmKindTyConst, |
185 | | }; |
186 | | |
187 | | private: |
188 | | struct TokOp { |
189 | | const char *Data; |
190 | | unsigned Length; |
191 | | }; |
192 | | |
193 | | struct ImmOp { |
194 | | int64_t Val; |
195 | | ImmTy Type; |
196 | | bool IsFPImm; |
197 | | mutable ImmKindTy Kind; |
198 | | Modifiers Mods; |
199 | | }; |
200 | | |
201 | | struct RegOp { |
202 | | unsigned RegNo; |
203 | | Modifiers Mods; |
204 | | }; |
205 | | |
206 | | union { |
207 | | TokOp Tok; |
208 | | ImmOp Imm; |
209 | | RegOp Reg; |
210 | | const MCExpr *Expr; |
211 | | }; |
212 | | |
213 | | public: |
214 | 0 | bool isToken() const override { return Kind == Token; } |
215 | | |
216 | 0 | bool isSymbolRefExpr() const { |
217 | 0 | return isExpr() && Expr && isa<MCSymbolRefExpr>(Expr); |
218 | 0 | } |
219 | | |
220 | 0 | bool isImm() const override { |
221 | 0 | return Kind == Immediate; |
222 | 0 | } |
223 | | |
224 | 0 | void setImmKindNone() const { |
225 | 0 | assert(isImm()); |
226 | 0 | Imm.Kind = ImmKindTyNone; |
227 | 0 | } |
228 | | |
229 | 0 | void setImmKindLiteral() const { |
230 | 0 | assert(isImm()); |
231 | 0 | Imm.Kind = ImmKindTyLiteral; |
232 | 0 | } |
233 | | |
234 | 0 | void setImmKindMandatoryLiteral() const { |
235 | 0 | assert(isImm()); |
236 | 0 | Imm.Kind = ImmKindTyMandatoryLiteral; |
237 | 0 | } |
238 | | |
239 | 0 | void setImmKindConst() const { |
240 | 0 | assert(isImm()); |
241 | 0 | Imm.Kind = ImmKindTyConst; |
242 | 0 | } |
243 | | |
244 | 0 | bool IsImmKindLiteral() const { |
245 | 0 | return isImm() && Imm.Kind == ImmKindTyLiteral; |
246 | 0 | } |
247 | | |
248 | 0 | bool IsImmKindMandatoryLiteral() const { |
249 | 0 | return isImm() && Imm.Kind == ImmKindTyMandatoryLiteral; |
250 | 0 | } |
251 | | |
252 | 0 | bool isImmKindConst() const { |
253 | 0 | return isImm() && Imm.Kind == ImmKindTyConst; |
254 | 0 | } |
255 | | |
256 | | bool isInlinableImm(MVT type) const; |
257 | | bool isLiteralImm(MVT type) const; |
258 | | |
259 | 0 | bool isRegKind() const { |
260 | 0 | return Kind == Register; |
261 | 0 | } |
262 | | |
263 | 0 | bool isReg() const override { |
264 | 0 | return isRegKind() && !hasModifiers(); |
265 | 0 | } |
266 | | |
267 | 0 | bool isRegOrInline(unsigned RCID, MVT type) const { |
268 | 0 | return isRegClass(RCID) || isInlinableImm(type); |
269 | 0 | } |
270 | | |
271 | 0 | bool isRegOrImmWithInputMods(unsigned RCID, MVT type) const { |
272 | 0 | return isRegOrInline(RCID, type) || isLiteralImm(type); |
273 | 0 | } |
274 | | |
275 | 0 | bool isRegOrImmWithInt16InputMods() const { |
276 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16); |
277 | 0 | } |
278 | | |
279 | 0 | bool isRegOrImmWithIntT16InputMods() const { |
280 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_16RegClassID, MVT::i16); |
281 | 0 | } |
282 | | |
283 | 0 | bool isRegOrImmWithInt32InputMods() const { |
284 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32); |
285 | 0 | } |
286 | | |
287 | 0 | bool isRegOrInlineImmWithInt16InputMods() const { |
288 | 0 | return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i16); |
289 | 0 | } |
290 | | |
291 | 0 | bool isRegOrInlineImmWithInt32InputMods() const { |
292 | 0 | return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i32); |
293 | 0 | } |
294 | | |
295 | 0 | bool isRegOrImmWithInt64InputMods() const { |
296 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64); |
297 | 0 | } |
298 | | |
299 | 0 | bool isRegOrImmWithFP16InputMods() const { |
300 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16); |
301 | 0 | } |
302 | | |
303 | 0 | bool isRegOrImmWithFPT16InputMods() const { |
304 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_16RegClassID, MVT::f16); |
305 | 0 | } |
306 | | |
307 | 0 | bool isRegOrImmWithFP32InputMods() const { |
308 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32); |
309 | 0 | } |
310 | | |
311 | 0 | bool isRegOrImmWithFP64InputMods() const { |
312 | 0 | return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64); |
313 | 0 | } |
314 | | |
315 | 0 | bool isRegOrInlineImmWithFP16InputMods() const { |
316 | 0 | return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::f16); |
317 | 0 | } |
318 | | |
319 | 0 | bool isRegOrInlineImmWithFP32InputMods() const { |
320 | 0 | return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::f32); |
321 | 0 | } |
322 | | |
323 | | |
324 | 0 | bool isVReg() const { |
325 | 0 | return isRegClass(AMDGPU::VGPR_32RegClassID) || |
326 | 0 | isRegClass(AMDGPU::VReg_64RegClassID) || |
327 | 0 | isRegClass(AMDGPU::VReg_96RegClassID) || |
328 | 0 | isRegClass(AMDGPU::VReg_128RegClassID) || |
329 | 0 | isRegClass(AMDGPU::VReg_160RegClassID) || |
330 | 0 | isRegClass(AMDGPU::VReg_192RegClassID) || |
331 | 0 | isRegClass(AMDGPU::VReg_256RegClassID) || |
332 | 0 | isRegClass(AMDGPU::VReg_512RegClassID) || |
333 | 0 | isRegClass(AMDGPU::VReg_1024RegClassID); |
334 | 0 | } |
335 | | |
336 | 0 | bool isVReg32() const { |
337 | 0 | return isRegClass(AMDGPU::VGPR_32RegClassID); |
338 | 0 | } |
339 | | |
340 | 0 | bool isVReg32OrOff() const { |
341 | 0 | return isOff() || isVReg32(); |
342 | 0 | } |
343 | | |
344 | 0 | bool isNull() const { |
345 | 0 | return isRegKind() && getReg() == AMDGPU::SGPR_NULL; |
346 | 0 | } |
347 | | |
348 | | bool isVRegWithInputMods() const; |
349 | | bool isT16VRegWithInputMods() const; |
350 | | |
351 | | bool isSDWAOperand(MVT type) const; |
352 | | bool isSDWAFP16Operand() const; |
353 | | bool isSDWAFP32Operand() const; |
354 | | bool isSDWAInt16Operand() const; |
355 | | bool isSDWAInt32Operand() const; |
356 | | |
357 | 0 | bool isImmTy(ImmTy ImmT) const { |
358 | 0 | return isImm() && Imm.Type == ImmT; |
359 | 0 | } |
360 | | |
361 | 0 | template <ImmTy Ty> bool isImmTy() const { return isImmTy(Ty); } Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)1>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)14>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)49>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)12>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)22>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)23>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)25>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)13>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)26>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)28>() const Unexecuted instantiation: AMDGPUAsmParser.cpp:bool (anonymous namespace)::AMDGPUOperand::isImmTy<((anonymous namespace)::AMDGPUOperand::ImmTy)29>() const |
362 | | |
363 | 0 | bool isImmLiteral() const { return isImmTy(ImmTyNone); } |
364 | | |
365 | 0 | bool isImmModifier() const { |
366 | 0 | return isImm() && Imm.Type != ImmTyNone; |
367 | 0 | } |
368 | | |
369 | 0 | bool isOModSI() const { return isImmTy(ImmTyOModSI); } |
370 | 0 | bool isDMask() const { return isImmTy(ImmTyDMask); } |
371 | 0 | bool isDim() const { return isImmTy(ImmTyDim); } |
372 | 0 | bool isR128A16() const { return isImmTy(ImmTyR128A16); } |
373 | 0 | bool isOff() const { return isImmTy(ImmTyOff); } |
374 | 0 | bool isExpTgt() const { return isImmTy(ImmTyExpTgt); } |
375 | 0 | bool isOffen() const { return isImmTy(ImmTyOffen); } |
376 | 0 | bool isIdxen() const { return isImmTy(ImmTyIdxen); } |
377 | 0 | bool isAddr64() const { return isImmTy(ImmTyAddr64); } |
378 | 0 | bool isOffset() const { return isImmTy(ImmTyOffset); } |
379 | 0 | bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<8>(getImm()); } |
380 | 0 | bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); } |
381 | 0 | bool isSMEMOffsetMod() const { return isImmTy(ImmTySMEMOffsetMod); } |
382 | 0 | bool isFlatOffset() const { return isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset); } |
383 | 0 | bool isGDS() const { return isImmTy(ImmTyGDS); } |
384 | 0 | bool isLDS() const { return isImmTy(ImmTyLDS); } |
385 | 0 | bool isCPol() const { return isImmTy(ImmTyCPol); } |
386 | 0 | bool isTFE() const { return isImmTy(ImmTyTFE); } |
387 | 0 | bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<7>(getImm()); } |
388 | 0 | bool isDppBankMask() const { return isImmTy(ImmTyDppBankMask); } |
389 | 0 | bool isDppRowMask() const { return isImmTy(ImmTyDppRowMask); } |
390 | 0 | bool isDppBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); } |
391 | 0 | bool isDppFI() const { return isImmTy(ImmTyDppFI); } |
392 | 0 | bool isSDWADstSel() const { return isImmTy(ImmTySDWADstSel); } |
393 | 0 | bool isSDWASrc0Sel() const { return isImmTy(ImmTySDWASrc0Sel); } |
394 | 0 | bool isSDWASrc1Sel() const { return isImmTy(ImmTySDWASrc1Sel); } |
395 | 0 | bool isSDWADstUnused() const { return isImmTy(ImmTySDWADstUnused); } |
396 | 0 | bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); } |
397 | 0 | bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); } |
398 | 0 | bool isInterpAttrChan() const { return isImmTy(ImmTyInterpAttrChan); } |
399 | 0 | bool isOpSel() const { return isImmTy(ImmTyOpSel); } |
400 | 0 | bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); } |
401 | 0 | bool isNegLo() const { return isImmTy(ImmTyNegLo); } |
402 | 0 | bool isNegHi() const { return isImmTy(ImmTyNegHi); } |
403 | | |
404 | 0 | bool isRegOrImm() const { |
405 | 0 | return isReg() || isImm(); |
406 | 0 | } |
407 | | |
408 | | bool isRegClass(unsigned RCID) const; |
409 | | |
410 | | bool isInlineValue() const; |
411 | | |
412 | 0 | bool isRegOrInlineNoMods(unsigned RCID, MVT type) const { |
413 | 0 | return isRegOrInline(RCID, type) && !hasModifiers(); |
414 | 0 | } |
415 | | |
416 | 0 | bool isSCSrcB16() const { |
417 | 0 | return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16); |
418 | 0 | } |
419 | | |
420 | 0 | bool isSCSrcV2B16() const { |
421 | 0 | return isSCSrcB16(); |
422 | 0 | } |
423 | | |
424 | 0 | bool isSCSrcB32() const { |
425 | 0 | return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32); |
426 | 0 | } |
427 | | |
428 | 0 | bool isSCSrcB64() const { |
429 | 0 | return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64); |
430 | 0 | } |
431 | | |
432 | | bool isBoolReg() const; |
433 | | |
434 | 0 | bool isSCSrcF16() const { |
435 | 0 | return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16); |
436 | 0 | } |
437 | | |
438 | 0 | bool isSCSrcV2F16() const { |
439 | 0 | return isSCSrcF16(); |
440 | 0 | } |
441 | | |
442 | 0 | bool isSCSrcF32() const { |
443 | 0 | return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32); |
444 | 0 | } |
445 | | |
446 | 0 | bool isSCSrcF64() const { |
447 | 0 | return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64); |
448 | 0 | } |
449 | | |
450 | 0 | bool isSSrcB32() const { |
451 | 0 | return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr(); |
452 | 0 | } |
453 | | |
454 | 0 | bool isSSrcB16() const { |
455 | 0 | return isSCSrcB16() || isLiteralImm(MVT::i16); |
456 | 0 | } |
457 | | |
458 | 0 | bool isSSrcV2B16() const { |
459 | 0 | llvm_unreachable("cannot happen"); |
460 | 0 | return isSSrcB16(); |
461 | 0 | } |
462 | | |
463 | 0 | bool isSSrcB64() const { |
464 | | // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits. |
465 | | // See isVSrc64(). |
466 | 0 | return isSCSrcB64() || isLiteralImm(MVT::i64); |
467 | 0 | } |
468 | | |
469 | 0 | bool isSSrcF32() const { |
470 | 0 | return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr(); |
471 | 0 | } |
472 | | |
473 | 0 | bool isSSrcF64() const { |
474 | 0 | return isSCSrcB64() || isLiteralImm(MVT::f64); |
475 | 0 | } |
476 | | |
477 | 0 | bool isSSrcF16() const { |
478 | 0 | return isSCSrcB16() || isLiteralImm(MVT::f16); |
479 | 0 | } |
480 | | |
481 | 0 | bool isSSrcV2F16() const { |
482 | 0 | llvm_unreachable("cannot happen"); |
483 | 0 | return isSSrcF16(); |
484 | 0 | } |
485 | | |
486 | 0 | bool isSSrcV2FP32() const { |
487 | 0 | llvm_unreachable("cannot happen"); |
488 | 0 | return isSSrcF32(); |
489 | 0 | } |
490 | | |
491 | 0 | bool isSCSrcV2FP32() const { |
492 | 0 | llvm_unreachable("cannot happen"); |
493 | 0 | return isSCSrcF32(); |
494 | 0 | } |
495 | | |
496 | 0 | bool isSSrcV2INT32() const { |
497 | 0 | llvm_unreachable("cannot happen"); |
498 | 0 | return isSSrcB32(); |
499 | 0 | } |
500 | | |
501 | 0 | bool isSCSrcV2INT32() const { |
502 | 0 | llvm_unreachable("cannot happen"); |
503 | 0 | return isSCSrcB32(); |
504 | 0 | } |
505 | | |
506 | 0 | bool isSSrcOrLdsB32() const { |
507 | 0 | return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) || |
508 | 0 | isLiteralImm(MVT::i32) || isExpr(); |
509 | 0 | } |
510 | | |
511 | 0 | bool isVCSrcB32() const { |
512 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32); |
513 | 0 | } |
514 | | |
515 | 0 | bool isVCSrcB64() const { |
516 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64); |
517 | 0 | } |
518 | | |
519 | 0 | bool isVCSrcTB16() const { |
520 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_16RegClassID, MVT::i16); |
521 | 0 | } |
522 | | |
523 | 0 | bool isVCSrcTB16_Lo128() const { |
524 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_16_Lo128RegClassID, MVT::i16); |
525 | 0 | } |
526 | | |
527 | 0 | bool isVCSrcFake16B16_Lo128() const { |
528 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_32_Lo128RegClassID, MVT::i16); |
529 | 0 | } |
530 | | |
531 | 0 | bool isVCSrcB16() const { |
532 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16); |
533 | 0 | } |
534 | | |
535 | 0 | bool isVCSrcV2B16() const { |
536 | 0 | return isVCSrcB16(); |
537 | 0 | } |
538 | | |
539 | 0 | bool isVCSrcF32() const { |
540 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32); |
541 | 0 | } |
542 | | |
543 | 0 | bool isVCSrcF64() const { |
544 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64); |
545 | 0 | } |
546 | | |
547 | 0 | bool isVCSrcTF16() const { |
548 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_16RegClassID, MVT::f16); |
549 | 0 | } |
550 | | |
551 | 0 | bool isVCSrcTF16_Lo128() const { |
552 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_16_Lo128RegClassID, MVT::f16); |
553 | 0 | } |
554 | | |
555 | 0 | bool isVCSrcFake16F16_Lo128() const { |
556 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_32_Lo128RegClassID, MVT::f16); |
557 | 0 | } |
558 | | |
559 | 0 | bool isVCSrcF16() const { |
560 | 0 | return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16); |
561 | 0 | } |
562 | | |
563 | 0 | bool isVCSrcV2F16() const { |
564 | 0 | return isVCSrcF16(); |
565 | 0 | } |
566 | | |
567 | 0 | bool isVSrcB32() const { |
568 | 0 | return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr(); |
569 | 0 | } |
570 | | |
571 | 0 | bool isVSrcB64() const { |
572 | 0 | return isVCSrcF64() || isLiteralImm(MVT::i64); |
573 | 0 | } |
574 | | |
575 | 0 | bool isVSrcTB16() const { return isVCSrcTB16() || isLiteralImm(MVT::i16); } |
576 | | |
577 | 0 | bool isVSrcTB16_Lo128() const { |
578 | 0 | return isVCSrcTB16_Lo128() || isLiteralImm(MVT::i16); |
579 | 0 | } |
580 | | |
581 | 0 | bool isVSrcFake16B16_Lo128() const { |
582 | 0 | return isVCSrcFake16B16_Lo128() || isLiteralImm(MVT::i16); |
583 | 0 | } |
584 | | |
585 | 0 | bool isVSrcB16() const { |
586 | 0 | return isVCSrcB16() || isLiteralImm(MVT::i16); |
587 | 0 | } |
588 | | |
589 | 0 | bool isVSrcV2B16() const { |
590 | 0 | return isVSrcB16() || isLiteralImm(MVT::v2i16); |
591 | 0 | } |
592 | | |
593 | 0 | bool isVCSrcV2FP32() const { |
594 | 0 | return isVCSrcF64(); |
595 | 0 | } |
596 | | |
597 | 0 | bool isVSrcV2FP32() const { |
598 | 0 | return isVSrcF64() || isLiteralImm(MVT::v2f32); |
599 | 0 | } |
600 | | |
601 | 0 | bool isVCSrcV2INT32() const { |
602 | 0 | return isVCSrcB64(); |
603 | 0 | } |
604 | | |
605 | 0 | bool isVSrcV2INT32() const { |
606 | 0 | return isVSrcB64() || isLiteralImm(MVT::v2i32); |
607 | 0 | } |
608 | | |
609 | 0 | bool isVSrcF32() const { |
610 | 0 | return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr(); |
611 | 0 | } |
612 | | |
613 | 0 | bool isVSrcF64() const { |
614 | 0 | return isVCSrcF64() || isLiteralImm(MVT::f64); |
615 | 0 | } |
616 | | |
617 | 0 | bool isVSrcTF16() const { return isVCSrcTF16() || isLiteralImm(MVT::f16); } |
618 | | |
619 | 0 | bool isVSrcTF16_Lo128() const { |
620 | 0 | return isVCSrcTF16_Lo128() || isLiteralImm(MVT::f16); |
621 | 0 | } |
622 | | |
623 | 0 | bool isVSrcFake16F16_Lo128() const { |
624 | 0 | return isVCSrcFake16F16_Lo128() || isLiteralImm(MVT::f16); |
625 | 0 | } |
626 | | |
627 | 0 | bool isVSrcF16() const { |
628 | 0 | return isVCSrcF16() || isLiteralImm(MVT::f16); |
629 | 0 | } |
630 | | |
631 | 0 | bool isVSrcV2F16() const { |
632 | 0 | return isVSrcF16() || isLiteralImm(MVT::v2f16); |
633 | 0 | } |
634 | | |
635 | 0 | bool isVISrcB32() const { |
636 | 0 | return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i32); |
637 | 0 | } |
638 | | |
639 | 0 | bool isVISrcB16() const { |
640 | 0 | return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i16); |
641 | 0 | } |
642 | | |
643 | 0 | bool isVISrcV2B16() const { |
644 | 0 | return isVISrcB16(); |
645 | 0 | } |
646 | | |
647 | 0 | bool isVISrcF32() const { |
648 | 0 | return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f32); |
649 | 0 | } |
650 | | |
651 | 0 | bool isVISrcF16() const { |
652 | 0 | return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f16); |
653 | 0 | } |
654 | | |
655 | 0 | bool isVISrcV2F16() const { |
656 | 0 | return isVISrcF16() || isVISrcB32(); |
657 | 0 | } |
658 | | |
659 | 0 | bool isVISrc_64B64() const { |
660 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i64); |
661 | 0 | } |
662 | | |
663 | 0 | bool isVISrc_64F64() const { |
664 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f64); |
665 | 0 | } |
666 | | |
667 | 0 | bool isVISrc_64V2FP32() const { |
668 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f32); |
669 | 0 | } |
670 | | |
671 | 0 | bool isVISrc_64V2INT32() const { |
672 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i32); |
673 | 0 | } |
674 | | |
675 | 0 | bool isVISrc_256B64() const { |
676 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i64); |
677 | 0 | } |
678 | | |
679 | 0 | bool isVISrc_256F64() const { |
680 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f64); |
681 | 0 | } |
682 | | |
683 | 0 | bool isVISrc_128B16() const { |
684 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::i16); |
685 | 0 | } |
686 | | |
687 | 0 | bool isVISrc_128V2B16() const { |
688 | 0 | return isVISrc_128B16(); |
689 | 0 | } |
690 | | |
691 | 0 | bool isVISrc_128B32() const { |
692 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::i32); |
693 | 0 | } |
694 | | |
695 | 0 | bool isVISrc_128F32() const { |
696 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f32); |
697 | 0 | } |
698 | | |
699 | 0 | bool isVISrc_256V2FP32() const { |
700 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f32); |
701 | 0 | } |
702 | | |
703 | 0 | bool isVISrc_256V2INT32() const { |
704 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i32); |
705 | 0 | } |
706 | | |
707 | 0 | bool isVISrc_512B32() const { |
708 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::i32); |
709 | 0 | } |
710 | | |
711 | 0 | bool isVISrc_512B16() const { |
712 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::i16); |
713 | 0 | } |
714 | | |
715 | 0 | bool isVISrc_512V2B16() const { |
716 | 0 | return isVISrc_512B16(); |
717 | 0 | } |
718 | | |
719 | 0 | bool isVISrc_512F32() const { |
720 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::f32); |
721 | 0 | } |
722 | | |
723 | 0 | bool isVISrc_512F16() const { |
724 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::f16); |
725 | 0 | } |
726 | | |
727 | 0 | bool isVISrc_512V2F16() const { |
728 | 0 | return isVISrc_512F16() || isVISrc_512B32(); |
729 | 0 | } |
730 | | |
731 | 0 | bool isVISrc_1024B32() const { |
732 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::i32); |
733 | 0 | } |
734 | | |
735 | 0 | bool isVISrc_1024B16() const { |
736 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::i16); |
737 | 0 | } |
738 | | |
739 | 0 | bool isVISrc_1024V2B16() const { |
740 | 0 | return isVISrc_1024B16(); |
741 | 0 | } |
742 | | |
743 | 0 | bool isVISrc_1024F32() const { |
744 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::f32); |
745 | 0 | } |
746 | | |
747 | 0 | bool isVISrc_1024F16() const { |
748 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::f16); |
749 | 0 | } |
750 | | |
751 | 0 | bool isVISrc_1024V2F16() const { |
752 | 0 | return isVISrc_1024F16() || isVISrc_1024B32(); |
753 | 0 | } |
754 | | |
755 | 0 | bool isAISrcB32() const { |
756 | 0 | return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i32); |
757 | 0 | } |
758 | | |
759 | 0 | bool isAISrcB16() const { |
760 | 0 | return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i16); |
761 | 0 | } |
762 | | |
763 | 0 | bool isAISrcV2B16() const { |
764 | 0 | return isAISrcB16(); |
765 | 0 | } |
766 | | |
767 | 0 | bool isAISrcF32() const { |
768 | 0 | return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f32); |
769 | 0 | } |
770 | | |
771 | 0 | bool isAISrcF16() const { |
772 | 0 | return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f16); |
773 | 0 | } |
774 | | |
775 | 0 | bool isAISrcV2F16() const { |
776 | 0 | return isAISrcF16() || isAISrcB32(); |
777 | 0 | } |
778 | | |
779 | 0 | bool isAISrc_64B64() const { |
780 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::i64); |
781 | 0 | } |
782 | | |
783 | 0 | bool isAISrc_64F64() const { |
784 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::f64); |
785 | 0 | } |
786 | | |
787 | 0 | bool isAISrc_128B32() const { |
788 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i32); |
789 | 0 | } |
790 | | |
791 | 0 | bool isAISrc_128B16() const { |
792 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i16); |
793 | 0 | } |
794 | | |
795 | 0 | bool isAISrc_128V2B16() const { |
796 | 0 | return isAISrc_128B16(); |
797 | 0 | } |
798 | | |
799 | 0 | bool isAISrc_128F32() const { |
800 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f32); |
801 | 0 | } |
802 | | |
803 | 0 | bool isAISrc_128F16() const { |
804 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f16); |
805 | 0 | } |
806 | | |
807 | 0 | bool isAISrc_128V2F16() const { |
808 | 0 | return isAISrc_128F16() || isAISrc_128B32(); |
809 | 0 | } |
810 | | |
811 | 0 | bool isVISrc_128F16() const { |
812 | 0 | return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f16); |
813 | 0 | } |
814 | | |
815 | 0 | bool isVISrc_128V2F16() const { |
816 | 0 | return isVISrc_128F16() || isVISrc_128B32(); |
817 | 0 | } |
818 | | |
819 | 0 | bool isAISrc_256B64() const { |
820 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::i64); |
821 | 0 | } |
822 | | |
823 | 0 | bool isAISrc_256F64() const { |
824 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::f64); |
825 | 0 | } |
826 | | |
827 | 0 | bool isAISrc_512B32() const { |
828 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i32); |
829 | 0 | } |
830 | | |
831 | 0 | bool isAISrc_512B16() const { |
832 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i16); |
833 | 0 | } |
834 | | |
835 | 0 | bool isAISrc_512V2B16() const { |
836 | 0 | return isAISrc_512B16(); |
837 | 0 | } |
838 | | |
839 | 0 | bool isAISrc_512F32() const { |
840 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f32); |
841 | 0 | } |
842 | | |
843 | 0 | bool isAISrc_512F16() const { |
844 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f16); |
845 | 0 | } |
846 | | |
847 | 0 | bool isAISrc_512V2F16() const { |
848 | 0 | return isAISrc_512F16() || isAISrc_512B32(); |
849 | 0 | } |
850 | | |
851 | 0 | bool isAISrc_1024B32() const { |
852 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i32); |
853 | 0 | } |
854 | | |
855 | 0 | bool isAISrc_1024B16() const { |
856 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i16); |
857 | 0 | } |
858 | | |
859 | 0 | bool isAISrc_1024V2B16() const { |
860 | 0 | return isAISrc_1024B16(); |
861 | 0 | } |
862 | | |
863 | 0 | bool isAISrc_1024F32() const { |
864 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f32); |
865 | 0 | } |
866 | | |
867 | 0 | bool isAISrc_1024F16() const { |
868 | 0 | return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f16); |
869 | 0 | } |
870 | | |
871 | 0 | bool isAISrc_1024V2F16() const { |
872 | 0 | return isAISrc_1024F16() || isAISrc_1024B32(); |
873 | 0 | } |
874 | | |
875 | 0 | bool isKImmFP32() const { |
876 | 0 | return isLiteralImm(MVT::f32); |
877 | 0 | } |
878 | | |
879 | 0 | bool isKImmFP16() const { |
880 | 0 | return isLiteralImm(MVT::f16); |
881 | 0 | } |
882 | | |
883 | 0 | bool isMem() const override { |
884 | 0 | return false; |
885 | 0 | } |
886 | | |
887 | 0 | bool isExpr() const { |
888 | 0 | return Kind == Expression; |
889 | 0 | } |
890 | | |
891 | 0 | bool isSOPPBrTarget() const { return isExpr() || isImm(); } |
892 | | |
893 | | bool isSWaitCnt() const; |
894 | | bool isDepCtr() const; |
895 | | bool isSDelayALU() const; |
896 | | bool isHwreg() const; |
897 | | bool isSendMsg() const; |
898 | | bool isSplitBarrier() const; |
899 | | bool isSwizzle() const; |
900 | | bool isSMRDOffset8() const; |
901 | | bool isSMEMOffset() const; |
902 | | bool isSMRDLiteralOffset() const; |
903 | | bool isDPP8() const; |
904 | | bool isDPPCtrl() const; |
905 | | bool isBLGP() const; |
906 | | bool isCBSZ() const; |
907 | | bool isABID() const; |
908 | | bool isGPRIdxMode() const; |
909 | | bool isS16Imm() const; |
910 | | bool isU16Imm() const; |
911 | | bool isEndpgm() const; |
912 | | bool isWaitVDST() const; |
913 | | bool isWaitEXP() const; |
914 | | bool isWaitVAVDst() const; |
915 | | bool isWaitVMVSrc() const; |
916 | | |
917 | 0 | auto getPredicate(std::function<bool(const AMDGPUOperand &Op)> P) const { |
918 | 0 | return std::bind(P, *this); |
919 | 0 | } |
920 | | |
921 | 0 | StringRef getToken() const { |
922 | 0 | assert(isToken()); |
923 | 0 | return StringRef(Tok.Data, Tok.Length); |
924 | 0 | } |
925 | | |
926 | 0 | int64_t getImm() const { |
927 | 0 | assert(isImm()); |
928 | 0 | return Imm.Val; |
929 | 0 | } |
930 | | |
931 | 0 | void setImm(int64_t Val) { |
932 | 0 | assert(isImm()); |
933 | 0 | Imm.Val = Val; |
934 | 0 | } |
935 | | |
936 | 0 | ImmTy getImmTy() const { |
937 | 0 | assert(isImm()); |
938 | 0 | return Imm.Type; |
939 | 0 | } |
940 | | |
941 | 0 | unsigned getReg() const override { |
942 | 0 | assert(isRegKind()); |
943 | 0 | return Reg.RegNo; |
944 | 0 | } |
945 | | |
946 | 0 | SMLoc getStartLoc() const override { |
947 | 0 | return StartLoc; |
948 | 0 | } |
949 | | |
950 | 0 | SMLoc getEndLoc() const override { |
951 | 0 | return EndLoc; |
952 | 0 | } |
953 | | |
954 | 0 | SMRange getLocRange() const { |
955 | 0 | return SMRange(StartLoc, EndLoc); |
956 | 0 | } |
957 | | |
958 | 0 | Modifiers getModifiers() const { |
959 | 0 | assert(isRegKind() || isImmTy(ImmTyNone)); |
960 | 0 | return isRegKind() ? Reg.Mods : Imm.Mods; |
961 | 0 | } |
962 | | |
963 | 0 | void setModifiers(Modifiers Mods) { |
964 | 0 | assert(isRegKind() || isImmTy(ImmTyNone)); |
965 | 0 | if (isRegKind()) |
966 | 0 | Reg.Mods = Mods; |
967 | 0 | else |
968 | 0 | Imm.Mods = Mods; |
969 | 0 | } |
970 | | |
971 | 0 | bool hasModifiers() const { |
972 | 0 | return getModifiers().hasModifiers(); |
973 | 0 | } |
974 | | |
975 | 0 | bool hasFPModifiers() const { |
976 | 0 | return getModifiers().hasFPModifiers(); |
977 | 0 | } |
978 | | |
979 | 0 | bool hasIntModifiers() const { |
980 | 0 | return getModifiers().hasIntModifiers(); |
981 | 0 | } |
982 | | |
983 | | uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const; |
984 | | |
985 | | void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const; |
986 | | |
987 | | void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const; |
988 | | |
989 | | void addRegOperands(MCInst &Inst, unsigned N) const; |
990 | | |
991 | 0 | void addRegOrImmOperands(MCInst &Inst, unsigned N) const { |
992 | 0 | if (isRegKind()) |
993 | 0 | addRegOperands(Inst, N); |
994 | 0 | else |
995 | 0 | addImmOperands(Inst, N); |
996 | 0 | } |
997 | | |
998 | 0 | void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const { |
999 | 0 | Modifiers Mods = getModifiers(); |
1000 | 0 | Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand())); |
1001 | 0 | if (isRegKind()) { |
1002 | 0 | addRegOperands(Inst, N); |
1003 | 0 | } else { |
1004 | 0 | addImmOperands(Inst, N, false); |
1005 | 0 | } |
1006 | 0 | } |
1007 | | |
1008 | 0 | void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const { |
1009 | 0 | assert(!hasIntModifiers()); |
1010 | 0 | addRegOrImmWithInputModsOperands(Inst, N); |
1011 | 0 | } |
1012 | | |
1013 | 0 | void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const { |
1014 | 0 | assert(!hasFPModifiers()); |
1015 | 0 | addRegOrImmWithInputModsOperands(Inst, N); |
1016 | 0 | } |
1017 | | |
1018 | 0 | void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const { |
1019 | 0 | Modifiers Mods = getModifiers(); |
1020 | 0 | Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand())); |
1021 | 0 | assert(isRegKind()); |
1022 | 0 | addRegOperands(Inst, N); |
1023 | 0 | } |
1024 | | |
1025 | 0 | void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const { |
1026 | 0 | assert(!hasIntModifiers()); |
1027 | 0 | addRegWithInputModsOperands(Inst, N); |
1028 | 0 | } |
1029 | | |
1030 | 0 | void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const { |
1031 | 0 | assert(!hasFPModifiers()); |
1032 | 0 | addRegWithInputModsOperands(Inst, N); |
1033 | 0 | } |
1034 | | |
1035 | 0 | static void printImmTy(raw_ostream& OS, ImmTy Type) { |
1036 | | // clang-format off |
1037 | 0 | switch (Type) { |
1038 | 0 | case ImmTyNone: OS << "None"; break; |
1039 | 0 | case ImmTyGDS: OS << "GDS"; break; |
1040 | 0 | case ImmTyLDS: OS << "LDS"; break; |
1041 | 0 | case ImmTyOffen: OS << "Offen"; break; |
1042 | 0 | case ImmTyIdxen: OS << "Idxen"; break; |
1043 | 0 | case ImmTyAddr64: OS << "Addr64"; break; |
1044 | 0 | case ImmTyOffset: OS << "Offset"; break; |
1045 | 0 | case ImmTyInstOffset: OS << "InstOffset"; break; |
1046 | 0 | case ImmTyOffset0: OS << "Offset0"; break; |
1047 | 0 | case ImmTyOffset1: OS << "Offset1"; break; |
1048 | 0 | case ImmTySMEMOffsetMod: OS << "SMEMOffsetMod"; break; |
1049 | 0 | case ImmTyCPol: OS << "CPol"; break; |
1050 | 0 | case ImmTyTFE: OS << "TFE"; break; |
1051 | 0 | case ImmTyD16: OS << "D16"; break; |
1052 | 0 | case ImmTyFORMAT: OS << "FORMAT"; break; |
1053 | 0 | case ImmTyClampSI: OS << "ClampSI"; break; |
1054 | 0 | case ImmTyOModSI: OS << "OModSI"; break; |
1055 | 0 | case ImmTyDPP8: OS << "DPP8"; break; |
1056 | 0 | case ImmTyDppCtrl: OS << "DppCtrl"; break; |
1057 | 0 | case ImmTyDppRowMask: OS << "DppRowMask"; break; |
1058 | 0 | case ImmTyDppBankMask: OS << "DppBankMask"; break; |
1059 | 0 | case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break; |
1060 | 0 | case ImmTyDppFI: OS << "DppFI"; break; |
1061 | 0 | case ImmTySDWADstSel: OS << "SDWADstSel"; break; |
1062 | 0 | case ImmTySDWASrc0Sel: OS << "SDWASrc0Sel"; break; |
1063 | 0 | case ImmTySDWASrc1Sel: OS << "SDWASrc1Sel"; break; |
1064 | 0 | case ImmTySDWADstUnused: OS << "SDWADstUnused"; break; |
1065 | 0 | case ImmTyDMask: OS << "DMask"; break; |
1066 | 0 | case ImmTyDim: OS << "Dim"; break; |
1067 | 0 | case ImmTyUNorm: OS << "UNorm"; break; |
1068 | 0 | case ImmTyDA: OS << "DA"; break; |
1069 | 0 | case ImmTyR128A16: OS << "R128A16"; break; |
1070 | 0 | case ImmTyA16: OS << "A16"; break; |
1071 | 0 | case ImmTyLWE: OS << "LWE"; break; |
1072 | 0 | case ImmTyOff: OS << "Off"; break; |
1073 | 0 | case ImmTyExpTgt: OS << "ExpTgt"; break; |
1074 | 0 | case ImmTyExpCompr: OS << "ExpCompr"; break; |
1075 | 0 | case ImmTyExpVM: OS << "ExpVM"; break; |
1076 | 0 | case ImmTyHwreg: OS << "Hwreg"; break; |
1077 | 0 | case ImmTySendMsg: OS << "SendMsg"; break; |
1078 | 0 | case ImmTyInterpSlot: OS << "InterpSlot"; break; |
1079 | 0 | case ImmTyInterpAttr: OS << "InterpAttr"; break; |
1080 | 0 | case ImmTyInterpAttrChan: OS << "InterpAttrChan"; break; |
1081 | 0 | case ImmTyOpSel: OS << "OpSel"; break; |
1082 | 0 | case ImmTyOpSelHi: OS << "OpSelHi"; break; |
1083 | 0 | case ImmTyNegLo: OS << "NegLo"; break; |
1084 | 0 | case ImmTyNegHi: OS << "NegHi"; break; |
1085 | 0 | case ImmTySwizzle: OS << "Swizzle"; break; |
1086 | 0 | case ImmTyGprIdxMode: OS << "GprIdxMode"; break; |
1087 | 0 | case ImmTyHigh: OS << "High"; break; |
1088 | 0 | case ImmTyBLGP: OS << "BLGP"; break; |
1089 | 0 | case ImmTyCBSZ: OS << "CBSZ"; break; |
1090 | 0 | case ImmTyABID: OS << "ABID"; break; |
1091 | 0 | case ImmTyEndpgm: OS << "Endpgm"; break; |
1092 | 0 | case ImmTyWaitVDST: OS << "WaitVDST"; break; |
1093 | 0 | case ImmTyWaitEXP: OS << "WaitEXP"; break; |
1094 | 0 | case ImmTyWaitVAVDst: OS << "WaitVAVDst"; break; |
1095 | 0 | case ImmTyWaitVMVSrc: OS << "WaitVMVSrc"; break; |
1096 | 0 | } |
1097 | | // clang-format on |
1098 | 0 | } |
1099 | | |
1100 | 0 | void print(raw_ostream &OS) const override { |
1101 | 0 | switch (Kind) { |
1102 | 0 | case Register: |
1103 | 0 | OS << "<register " << getReg() << " mods: " << Reg.Mods << '>'; |
1104 | 0 | break; |
1105 | 0 | case Immediate: |
1106 | 0 | OS << '<' << getImm(); |
1107 | 0 | if (getImmTy() != ImmTyNone) { |
1108 | 0 | OS << " type: "; printImmTy(OS, getImmTy()); |
1109 | 0 | } |
1110 | 0 | OS << " mods: " << Imm.Mods << '>'; |
1111 | 0 | break; |
1112 | 0 | case Token: |
1113 | 0 | OS << '\'' << getToken() << '\''; |
1114 | 0 | break; |
1115 | 0 | case Expression: |
1116 | 0 | OS << "<expr " << *Expr << '>'; |
1117 | 0 | break; |
1118 | 0 | } |
1119 | 0 | } |
1120 | | |
1121 | | static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser, |
1122 | | int64_t Val, SMLoc Loc, |
1123 | | ImmTy Type = ImmTyNone, |
1124 | 0 | bool IsFPImm = false) { |
1125 | 0 | auto Op = std::make_unique<AMDGPUOperand>(Immediate, AsmParser); |
1126 | 0 | Op->Imm.Val = Val; |
1127 | 0 | Op->Imm.IsFPImm = IsFPImm; |
1128 | 0 | Op->Imm.Kind = ImmKindTyNone; |
1129 | 0 | Op->Imm.Type = Type; |
1130 | 0 | Op->Imm.Mods = Modifiers(); |
1131 | 0 | Op->StartLoc = Loc; |
1132 | 0 | Op->EndLoc = Loc; |
1133 | 0 | return Op; |
1134 | 0 | } |
1135 | | |
1136 | | static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser, |
1137 | | StringRef Str, SMLoc Loc, |
1138 | 0 | bool HasExplicitEncodingSize = true) { |
1139 | 0 | auto Res = std::make_unique<AMDGPUOperand>(Token, AsmParser); |
1140 | 0 | Res->Tok.Data = Str.data(); |
1141 | 0 | Res->Tok.Length = Str.size(); |
1142 | 0 | Res->StartLoc = Loc; |
1143 | 0 | Res->EndLoc = Loc; |
1144 | 0 | return Res; |
1145 | 0 | } |
1146 | | |
1147 | | static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser, |
1148 | | unsigned RegNo, SMLoc S, |
1149 | 0 | SMLoc E) { |
1150 | 0 | auto Op = std::make_unique<AMDGPUOperand>(Register, AsmParser); |
1151 | 0 | Op->Reg.RegNo = RegNo; |
1152 | 0 | Op->Reg.Mods = Modifiers(); |
1153 | 0 | Op->StartLoc = S; |
1154 | 0 | Op->EndLoc = E; |
1155 | 0 | return Op; |
1156 | 0 | } |
1157 | | |
1158 | | static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser, |
1159 | 0 | const class MCExpr *Expr, SMLoc S) { |
1160 | 0 | auto Op = std::make_unique<AMDGPUOperand>(Expression, AsmParser); |
1161 | 0 | Op->Expr = Expr; |
1162 | 0 | Op->StartLoc = S; |
1163 | 0 | Op->EndLoc = S; |
1164 | 0 | return Op; |
1165 | 0 | } |
1166 | | }; |
1167 | | |
1168 | 0 | raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) { |
1169 | 0 | OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext; |
1170 | 0 | return OS; |
1171 | 0 | } |
1172 | | |
1173 | | //===----------------------------------------------------------------------===// |
1174 | | // AsmParser |
1175 | | //===----------------------------------------------------------------------===// |
1176 | | |
1177 | | // Holds info related to the current kernel, e.g. count of SGPRs used. |
1178 | | // Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next |
1179 | | // .amdgpu_hsa_kernel or at EOF. |
1180 | | class KernelScopeInfo { |
1181 | | int SgprIndexUnusedMin = -1; |
1182 | | int VgprIndexUnusedMin = -1; |
1183 | | int AgprIndexUnusedMin = -1; |
1184 | | MCContext *Ctx = nullptr; |
1185 | | MCSubtargetInfo const *MSTI = nullptr; |
1186 | | |
1187 | 0 | void usesSgprAt(int i) { |
1188 | 0 | if (i >= SgprIndexUnusedMin) { |
1189 | 0 | SgprIndexUnusedMin = ++i; |
1190 | 0 | if (Ctx) { |
1191 | 0 | MCSymbol* const Sym = |
1192 | 0 | Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count")); |
1193 | 0 | Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx)); |
1194 | 0 | } |
1195 | 0 | } |
1196 | 0 | } |
1197 | | |
1198 | 0 | void usesVgprAt(int i) { |
1199 | 0 | if (i >= VgprIndexUnusedMin) { |
1200 | 0 | VgprIndexUnusedMin = ++i; |
1201 | 0 | if (Ctx) { |
1202 | 0 | MCSymbol* const Sym = |
1203 | 0 | Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count")); |
1204 | 0 | int totalVGPR = getTotalNumVGPRs(isGFX90A(*MSTI), AgprIndexUnusedMin, |
1205 | 0 | VgprIndexUnusedMin); |
1206 | 0 | Sym->setVariableValue(MCConstantExpr::create(totalVGPR, *Ctx)); |
1207 | 0 | } |
1208 | 0 | } |
1209 | 0 | } |
1210 | | |
1211 | 0 | void usesAgprAt(int i) { |
1212 | | // Instruction will error in AMDGPUAsmParser::MatchAndEmitInstruction |
1213 | 0 | if (!hasMAIInsts(*MSTI)) |
1214 | 0 | return; |
1215 | | |
1216 | 0 | if (i >= AgprIndexUnusedMin) { |
1217 | 0 | AgprIndexUnusedMin = ++i; |
1218 | 0 | if (Ctx) { |
1219 | 0 | MCSymbol* const Sym = |
1220 | 0 | Ctx->getOrCreateSymbol(Twine(".kernel.agpr_count")); |
1221 | 0 | Sym->setVariableValue(MCConstantExpr::create(AgprIndexUnusedMin, *Ctx)); |
1222 | | |
1223 | | // Also update vgpr_count (dependent on agpr_count for gfx908/gfx90a) |
1224 | 0 | MCSymbol* const vSym = |
1225 | 0 | Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count")); |
1226 | 0 | int totalVGPR = getTotalNumVGPRs(isGFX90A(*MSTI), AgprIndexUnusedMin, |
1227 | 0 | VgprIndexUnusedMin); |
1228 | 0 | vSym->setVariableValue(MCConstantExpr::create(totalVGPR, *Ctx)); |
1229 | 0 | } |
1230 | 0 | } |
1231 | 0 | } |
1232 | | |
1233 | | public: |
1234 | 0 | KernelScopeInfo() = default; |
1235 | | |
1236 | 0 | void initialize(MCContext &Context) { |
1237 | 0 | Ctx = &Context; |
1238 | 0 | MSTI = Ctx->getSubtargetInfo(); |
1239 | |
|
1240 | 0 | usesSgprAt(SgprIndexUnusedMin = -1); |
1241 | 0 | usesVgprAt(VgprIndexUnusedMin = -1); |
1242 | 0 | if (hasMAIInsts(*MSTI)) { |
1243 | 0 | usesAgprAt(AgprIndexUnusedMin = -1); |
1244 | 0 | } |
1245 | 0 | } |
1246 | | |
1247 | | void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, |
1248 | 0 | unsigned RegWidth) { |
1249 | 0 | switch (RegKind) { |
1250 | 0 | case IS_SGPR: |
1251 | 0 | usesSgprAt(DwordRegIndex + divideCeil(RegWidth, 32) - 1); |
1252 | 0 | break; |
1253 | 0 | case IS_AGPR: |
1254 | 0 | usesAgprAt(DwordRegIndex + divideCeil(RegWidth, 32) - 1); |
1255 | 0 | break; |
1256 | 0 | case IS_VGPR: |
1257 | 0 | usesVgprAt(DwordRegIndex + divideCeil(RegWidth, 32) - 1); |
1258 | 0 | break; |
1259 | 0 | default: |
1260 | 0 | break; |
1261 | 0 | } |
1262 | 0 | } |
1263 | | }; |
1264 | | |
1265 | | class AMDGPUAsmParser : public MCTargetAsmParser { |
1266 | | MCAsmParser &Parser; |
1267 | | |
1268 | | unsigned ForcedEncodingSize = 0; |
1269 | | bool ForcedDPP = false; |
1270 | | bool ForcedSDWA = false; |
1271 | | KernelScopeInfo KernelScope; |
1272 | | |
1273 | | /// @name Auto-generated Match Functions |
1274 | | /// { |
1275 | | |
1276 | | #define GET_ASSEMBLER_HEADER |
1277 | | #include "AMDGPUGenAsmMatcher.inc" |
1278 | | |
1279 | | /// } |
1280 | | |
1281 | | private: |
1282 | | bool ParseAsAbsoluteExpression(uint32_t &Ret); |
1283 | | bool OutOfRangeError(SMRange Range); |
1284 | | /// Calculate VGPR/SGPR blocks required for given target, reserved |
1285 | | /// registers, and user-specified NextFreeXGPR values. |
1286 | | /// |
1287 | | /// \param Features [in] Target features, used for bug corrections. |
1288 | | /// \param VCCUsed [in] Whether VCC special SGPR is reserved. |
1289 | | /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved. |
1290 | | /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved. |
1291 | | /// \param EnableWavefrontSize32 [in] Value of ENABLE_WAVEFRONT_SIZE32 kernel |
1292 | | /// descriptor field, if valid. |
1293 | | /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one. |
1294 | | /// \param VGPRRange [in] Token range, used for VGPR diagnostics. |
1295 | | /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one. |
1296 | | /// \param SGPRRange [in] Token range, used for SGPR diagnostics. |
1297 | | /// \param VGPRBlocks [out] Result VGPR block count. |
1298 | | /// \param SGPRBlocks [out] Result SGPR block count. |
1299 | | bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed, |
1300 | | bool FlatScrUsed, bool XNACKUsed, |
1301 | | std::optional<bool> EnableWavefrontSize32, |
1302 | | unsigned NextFreeVGPR, SMRange VGPRRange, |
1303 | | unsigned NextFreeSGPR, SMRange SGPRRange, |
1304 | | unsigned &VGPRBlocks, unsigned &SGPRBlocks); |
1305 | | bool ParseDirectiveAMDGCNTarget(); |
1306 | | bool ParseDirectiveAMDHSAKernel(); |
1307 | | bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor); |
1308 | | bool ParseDirectiveHSACodeObjectVersion(); |
1309 | | bool ParseDirectiveHSACodeObjectISA(); |
1310 | | bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header); |
1311 | | bool ParseDirectiveAMDKernelCodeT(); |
1312 | | // TODO: Possibly make subtargetHasRegister const. |
1313 | | bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo); |
1314 | | bool ParseDirectiveAMDGPUHsaKernel(); |
1315 | | |
1316 | | bool ParseDirectiveISAVersion(); |
1317 | | bool ParseDirectiveHSAMetadata(); |
1318 | | bool ParseDirectivePALMetadataBegin(); |
1319 | | bool ParseDirectivePALMetadata(); |
1320 | | bool ParseDirectiveAMDGPULDS(); |
1321 | | |
1322 | | /// Common code to parse out a block of text (typically YAML) between start and |
1323 | | /// end directives. |
1324 | | bool ParseToEndDirective(const char *AssemblerDirectiveBegin, |
1325 | | const char *AssemblerDirectiveEnd, |
1326 | | std::string &CollectString); |
1327 | | |
1328 | | bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, |
1329 | | RegisterKind RegKind, unsigned Reg1, SMLoc Loc); |
1330 | | bool ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg, |
1331 | | unsigned &RegNum, unsigned &RegWidth, |
1332 | | bool RestoreOnFailure = false); |
1333 | | bool ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg, |
1334 | | unsigned &RegNum, unsigned &RegWidth, |
1335 | | SmallVectorImpl<AsmToken> &Tokens); |
1336 | | unsigned ParseRegularReg(RegisterKind &RegKind, unsigned &RegNum, |
1337 | | unsigned &RegWidth, |
1338 | | SmallVectorImpl<AsmToken> &Tokens); |
1339 | | unsigned ParseSpecialReg(RegisterKind &RegKind, unsigned &RegNum, |
1340 | | unsigned &RegWidth, |
1341 | | SmallVectorImpl<AsmToken> &Tokens); |
1342 | | unsigned ParseRegList(RegisterKind &RegKind, unsigned &RegNum, |
1343 | | unsigned &RegWidth, SmallVectorImpl<AsmToken> &Tokens); |
1344 | | bool ParseRegRange(unsigned& Num, unsigned& Width); |
1345 | | unsigned getRegularReg(RegisterKind RegKind, unsigned RegNum, unsigned SubReg, |
1346 | | unsigned RegWidth, SMLoc Loc); |
1347 | | |
1348 | | bool isRegister(); |
1349 | | bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const; |
1350 | | std::optional<StringRef> getGprCountSymbolName(RegisterKind RegKind); |
1351 | | void initializeGprCountSymbol(RegisterKind RegKind); |
1352 | | bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex, |
1353 | | unsigned RegWidth); |
1354 | | void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, |
1355 | | bool IsAtomic); |
1356 | | |
1357 | | public: |
1358 | | enum AMDGPUMatchResultTy { |
1359 | | Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY |
1360 | | }; |
1361 | | enum OperandMode { |
1362 | | OperandMode_Default, |
1363 | | OperandMode_NSA, |
1364 | | }; |
1365 | | |
1366 | | using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>; |
1367 | | |
1368 | | AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser, |
1369 | | const MCInstrInfo &MII, |
1370 | | const MCTargetOptions &Options) |
1371 | 0 | : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) { |
1372 | 0 | MCAsmParserExtension::Initialize(Parser); |
1373 | |
|
1374 | 0 | if (getFeatureBits().none()) { |
1375 | | // Set default features. |
1376 | 0 | copySTI().ToggleFeature("southern-islands"); |
1377 | 0 | } |
1378 | |
|
1379 | 0 | setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits())); |
1380 | |
|
1381 | 0 | { |
1382 | | // TODO: make those pre-defined variables read-only. |
1383 | | // Currently there is none suitable machinery in the core llvm-mc for this. |
1384 | | // MCSymbol::isRedefinable is intended for another purpose, and |
1385 | | // AsmParser::parseDirectiveSet() cannot be specialized for specific target. |
1386 | 0 | AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU()); |
1387 | 0 | MCContext &Ctx = getContext(); |
1388 | 0 | if (ISA.Major >= 6 && isHsaAbi(getSTI())) { |
1389 | 0 | MCSymbol *Sym = |
1390 | 0 | Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number")); |
1391 | 0 | Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx)); |
1392 | 0 | Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_minor")); |
1393 | 0 | Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx)); |
1394 | 0 | Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_stepping")); |
1395 | 0 | Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx)); |
1396 | 0 | } else { |
1397 | 0 | MCSymbol *Sym = |
1398 | 0 | Ctx.getOrCreateSymbol(Twine(".option.machine_version_major")); |
1399 | 0 | Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx)); |
1400 | 0 | Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor")); |
1401 | 0 | Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx)); |
1402 | 0 | Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping")); |
1403 | 0 | Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx)); |
1404 | 0 | } |
1405 | 0 | if (ISA.Major >= 6 && isHsaAbi(getSTI())) { |
1406 | 0 | initializeGprCountSymbol(IS_VGPR); |
1407 | 0 | initializeGprCountSymbol(IS_SGPR); |
1408 | 0 | } else |
1409 | 0 | KernelScope.initialize(getContext()); |
1410 | 0 | } |
1411 | 0 | } |
1412 | | |
1413 | 0 | bool hasMIMG_R128() const { |
1414 | 0 | return AMDGPU::hasMIMG_R128(getSTI()); |
1415 | 0 | } |
1416 | | |
1417 | 0 | bool hasPackedD16() const { |
1418 | 0 | return AMDGPU::hasPackedD16(getSTI()); |
1419 | 0 | } |
1420 | | |
1421 | 0 | bool hasA16() const { return AMDGPU::hasA16(getSTI()); } |
1422 | | |
1423 | 0 | bool hasG16() const { return AMDGPU::hasG16(getSTI()); } |
1424 | | |
1425 | 0 | bool hasGDS() const { return AMDGPU::hasGDS(getSTI()); } |
1426 | | |
1427 | 0 | bool isSI() const { |
1428 | 0 | return AMDGPU::isSI(getSTI()); |
1429 | 0 | } |
1430 | | |
1431 | 0 | bool isCI() const { |
1432 | 0 | return AMDGPU::isCI(getSTI()); |
1433 | 0 | } |
1434 | | |
1435 | 0 | bool isVI() const { |
1436 | 0 | return AMDGPU::isVI(getSTI()); |
1437 | 0 | } |
1438 | | |
1439 | 0 | bool isGFX9() const { |
1440 | 0 | return AMDGPU::isGFX9(getSTI()); |
1441 | 0 | } |
1442 | | |
1443 | | // TODO: isGFX90A is also true for GFX940. We need to clean it. |
1444 | 0 | bool isGFX90A() const { |
1445 | 0 | return AMDGPU::isGFX90A(getSTI()); |
1446 | 0 | } |
1447 | | |
1448 | 0 | bool isGFX940() const { |
1449 | 0 | return AMDGPU::isGFX940(getSTI()); |
1450 | 0 | } |
1451 | | |
1452 | 0 | bool isGFX9Plus() const { |
1453 | 0 | return AMDGPU::isGFX9Plus(getSTI()); |
1454 | 0 | } |
1455 | | |
1456 | 0 | bool isGFX10() const { |
1457 | 0 | return AMDGPU::isGFX10(getSTI()); |
1458 | 0 | } |
1459 | | |
1460 | 0 | bool isGFX10Plus() const { return AMDGPU::isGFX10Plus(getSTI()); } |
1461 | | |
1462 | 0 | bool isGFX11() const { |
1463 | 0 | return AMDGPU::isGFX11(getSTI()); |
1464 | 0 | } |
1465 | | |
1466 | 0 | bool isGFX11Plus() const { |
1467 | 0 | return AMDGPU::isGFX11Plus(getSTI()); |
1468 | 0 | } |
1469 | | |
1470 | 0 | bool isGFX12() const { return AMDGPU::isGFX12(getSTI()); } |
1471 | | |
1472 | 0 | bool isGFX12Plus() const { return AMDGPU::isGFX12Plus(getSTI()); } |
1473 | | |
1474 | 0 | bool isGFX10_AEncoding() const { return AMDGPU::isGFX10_AEncoding(getSTI()); } |
1475 | | |
1476 | 0 | bool isGFX10_BEncoding() const { |
1477 | 0 | return AMDGPU::isGFX10_BEncoding(getSTI()); |
1478 | 0 | } |
1479 | | |
1480 | 0 | bool hasInv2PiInlineImm() const { |
1481 | 0 | return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]; |
1482 | 0 | } |
1483 | | |
1484 | 0 | bool hasFlatOffsets() const { |
1485 | 0 | return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets]; |
1486 | 0 | } |
1487 | | |
1488 | 0 | bool hasArchitectedFlatScratch() const { |
1489 | 0 | return getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch]; |
1490 | 0 | } |
1491 | | |
1492 | 0 | bool hasSGPR102_SGPR103() const { |
1493 | 0 | return !isVI() && !isGFX9(); |
1494 | 0 | } |
1495 | | |
1496 | 0 | bool hasSGPR104_SGPR105() const { return isGFX10Plus(); } |
1497 | | |
1498 | 0 | bool hasIntClamp() const { |
1499 | 0 | return getFeatureBits()[AMDGPU::FeatureIntClamp]; |
1500 | 0 | } |
1501 | | |
1502 | 0 | bool hasPartialNSAEncoding() const { |
1503 | 0 | return getFeatureBits()[AMDGPU::FeaturePartialNSAEncoding]; |
1504 | 0 | } |
1505 | | |
1506 | 0 | unsigned getNSAMaxSize(bool HasSampler = false) const { |
1507 | 0 | return AMDGPU::getNSAMaxSize(getSTI(), HasSampler); |
1508 | 0 | } |
1509 | | |
1510 | 0 | unsigned getMaxNumUserSGPRs() const { |
1511 | 0 | return AMDGPU::getMaxNumUserSGPRs(getSTI()); |
1512 | 0 | } |
1513 | | |
1514 | 0 | bool hasKernargPreload() const { return AMDGPU::hasKernargPreload(getSTI()); } |
1515 | | |
1516 | 0 | AMDGPUTargetStreamer &getTargetStreamer() { |
1517 | 0 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); |
1518 | 0 | return static_cast<AMDGPUTargetStreamer &>(TS); |
1519 | 0 | } |
1520 | | |
1521 | 0 | const MCRegisterInfo *getMRI() const { |
1522 | | // We need this const_cast because for some reason getContext() is not const |
1523 | | // in MCAsmParser. |
1524 | 0 | return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo(); |
1525 | 0 | } |
1526 | | |
1527 | 0 | const MCInstrInfo *getMII() const { |
1528 | 0 | return &MII; |
1529 | 0 | } |
1530 | | |
1531 | 0 | const FeatureBitset &getFeatureBits() const { |
1532 | 0 | return getSTI().getFeatureBits(); |
1533 | 0 | } |
1534 | | |
1535 | 0 | void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; } |
1536 | 0 | void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; } |
1537 | 0 | void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; } |
1538 | | |
1539 | 0 | unsigned getForcedEncodingSize() const { return ForcedEncodingSize; } |
1540 | 0 | bool isForcedVOP3() const { return ForcedEncodingSize == 64; } |
1541 | 0 | bool isForcedDPP() const { return ForcedDPP; } |
1542 | 0 | bool isForcedSDWA() const { return ForcedSDWA; } |
1543 | | ArrayRef<unsigned> getMatchedVariants() const; |
1544 | | StringRef getMatchedVariantName() const; |
1545 | | |
1546 | | std::unique_ptr<AMDGPUOperand> parseRegister(bool RestoreOnFailure = false); |
1547 | | bool ParseRegister(MCRegister &RegNo, SMLoc &StartLoc, SMLoc &EndLoc, |
1548 | | bool RestoreOnFailure); |
1549 | | bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override; |
1550 | | ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, |
1551 | | SMLoc &EndLoc) override; |
1552 | | unsigned checkTargetMatchPredicate(MCInst &Inst) override; |
1553 | | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, |
1554 | | unsigned Kind) override; |
1555 | | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
1556 | | OperandVector &Operands, MCStreamer &Out, |
1557 | | uint64_t &ErrorInfo, |
1558 | | bool MatchingInlineAsm) override; |
1559 | | bool ParseDirective(AsmToken DirectiveID) override; |
1560 | | ParseStatus parseOperand(OperandVector &Operands, StringRef Mnemonic, |
1561 | | OperandMode Mode = OperandMode_Default); |
1562 | | StringRef parseMnemonicSuffix(StringRef Name); |
1563 | | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, |
1564 | | SMLoc NameLoc, OperandVector &Operands) override; |
1565 | | //bool ProcessInstruction(MCInst &Inst); |
1566 | | |
1567 | | ParseStatus parseTokenOp(StringRef Name, OperandVector &Operands); |
1568 | | |
1569 | | ParseStatus parseIntWithPrefix(const char *Prefix, int64_t &Int); |
1570 | | |
1571 | | ParseStatus |
1572 | | parseIntWithPrefix(const char *Prefix, OperandVector &Operands, |
1573 | | AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone, |
1574 | | std::function<bool(int64_t &)> ConvertResult = nullptr); |
1575 | | |
1576 | | ParseStatus parseOperandArrayWithPrefix( |
1577 | | const char *Prefix, OperandVector &Operands, |
1578 | | AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone, |
1579 | | bool (*ConvertResult)(int64_t &) = nullptr); |
1580 | | |
1581 | | ParseStatus |
1582 | | parseNamedBit(StringRef Name, OperandVector &Operands, |
1583 | | AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone); |
1584 | | unsigned getCPolKind(StringRef Id, StringRef Mnemo, bool &Disabling) const; |
1585 | | ParseStatus parseCPol(OperandVector &Operands); |
1586 | | ParseStatus parseScope(OperandVector &Operands, int64_t &Scope); |
1587 | | ParseStatus parseTH(OperandVector &Operands, int64_t &TH); |
1588 | | ParseStatus parseStringWithPrefix(StringRef Prefix, StringRef &Value, |
1589 | | SMLoc &StringLoc); |
1590 | | |
1591 | | bool isModifier(); |
1592 | | bool isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const; |
1593 | | bool isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const; |
1594 | | bool isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const; |
1595 | | bool isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const; |
1596 | | bool parseSP3NegModifier(); |
1597 | | ParseStatus parseImm(OperandVector &Operands, bool HasSP3AbsModifier = false, |
1598 | | bool HasLit = false); |
1599 | | ParseStatus parseReg(OperandVector &Operands); |
1600 | | ParseStatus parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod = false, |
1601 | | bool HasLit = false); |
1602 | | ParseStatus parseRegOrImmWithFPInputMods(OperandVector &Operands, |
1603 | | bool AllowImm = true); |
1604 | | ParseStatus parseRegOrImmWithIntInputMods(OperandVector &Operands, |
1605 | | bool AllowImm = true); |
1606 | | ParseStatus parseRegWithFPInputMods(OperandVector &Operands); |
1607 | | ParseStatus parseRegWithIntInputMods(OperandVector &Operands); |
1608 | | ParseStatus parseVReg32OrOff(OperandVector &Operands); |
1609 | | ParseStatus parseDfmtNfmt(int64_t &Format); |
1610 | | ParseStatus parseUfmt(int64_t &Format); |
1611 | | ParseStatus parseSymbolicSplitFormat(StringRef FormatStr, SMLoc Loc, |
1612 | | int64_t &Format); |
1613 | | ParseStatus parseSymbolicUnifiedFormat(StringRef FormatStr, SMLoc Loc, |
1614 | | int64_t &Format); |
1615 | | ParseStatus parseFORMAT(OperandVector &Operands); |
1616 | | ParseStatus parseSymbolicOrNumericFormat(int64_t &Format); |
1617 | | ParseStatus parseNumericFormat(int64_t &Format); |
1618 | | ParseStatus parseFlatOffset(OperandVector &Operands); |
1619 | | ParseStatus parseR128A16(OperandVector &Operands); |
1620 | | ParseStatus parseBLGP(OperandVector &Operands); |
1621 | | bool tryParseFmt(const char *Pref, int64_t MaxVal, int64_t &Val); |
1622 | | bool matchDfmtNfmt(int64_t &Dfmt, int64_t &Nfmt, StringRef FormatStr, SMLoc Loc); |
1623 | | |
1624 | | void cvtExp(MCInst &Inst, const OperandVector &Operands); |
1625 | | |
1626 | | bool parseCnt(int64_t &IntVal); |
1627 | | ParseStatus parseSWaitCnt(OperandVector &Operands); |
1628 | | |
1629 | | bool parseDepCtr(int64_t &IntVal, unsigned &Mask); |
1630 | | void depCtrError(SMLoc Loc, int ErrorId, StringRef DepCtrName); |
1631 | | ParseStatus parseDepCtr(OperandVector &Operands); |
1632 | | |
1633 | | bool parseDelay(int64_t &Delay); |
1634 | | ParseStatus parseSDelayALU(OperandVector &Operands); |
1635 | | |
1636 | | ParseStatus parseHwreg(OperandVector &Operands); |
1637 | | |
1638 | | private: |
1639 | | struct OperandInfoTy { |
1640 | | SMLoc Loc; |
1641 | | int64_t Id; |
1642 | | bool IsSymbolic = false; |
1643 | | bool IsDefined = false; |
1644 | | |
1645 | 0 | OperandInfoTy(int64_t Id_) : Id(Id_) {} |
1646 | | }; |
1647 | | |
1648 | | bool parseSendMsgBody(OperandInfoTy &Msg, OperandInfoTy &Op, OperandInfoTy &Stream); |
1649 | | bool validateSendMsg(const OperandInfoTy &Msg, |
1650 | | const OperandInfoTy &Op, |
1651 | | const OperandInfoTy &Stream); |
1652 | | |
1653 | | bool parseHwregBody(OperandInfoTy &HwReg, |
1654 | | OperandInfoTy &Offset, |
1655 | | OperandInfoTy &Width); |
1656 | | bool validateHwreg(const OperandInfoTy &HwReg, |
1657 | | const OperandInfoTy &Offset, |
1658 | | const OperandInfoTy &Width); |
1659 | | |
1660 | | SMLoc getFlatOffsetLoc(const OperandVector &Operands) const; |
1661 | | SMLoc getSMEMOffsetLoc(const OperandVector &Operands) const; |
1662 | | SMLoc getBLGPLoc(const OperandVector &Operands) const; |
1663 | | |
1664 | | SMLoc getOperandLoc(std::function<bool(const AMDGPUOperand&)> Test, |
1665 | | const OperandVector &Operands) const; |
1666 | | SMLoc getImmLoc(AMDGPUOperand::ImmTy Type, const OperandVector &Operands) const; |
1667 | | SMLoc getRegLoc(unsigned Reg, const OperandVector &Operands) const; |
1668 | | SMLoc getLitLoc(const OperandVector &Operands, |
1669 | | bool SearchMandatoryLiterals = false) const; |
1670 | | SMLoc getMandatoryLitLoc(const OperandVector &Operands) const; |
1671 | | SMLoc getConstLoc(const OperandVector &Operands) const; |
1672 | | SMLoc getInstLoc(const OperandVector &Operands) const; |
1673 | | |
1674 | | bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc, const OperandVector &Operands); |
1675 | | bool validateOffset(const MCInst &Inst, const OperandVector &Operands); |
1676 | | bool validateFlatOffset(const MCInst &Inst, const OperandVector &Operands); |
1677 | | bool validateSMEMOffset(const MCInst &Inst, const OperandVector &Operands); |
1678 | | bool validateSOPLiteral(const MCInst &Inst) const; |
1679 | | bool validateConstantBusLimitations(const MCInst &Inst, const OperandVector &Operands); |
1680 | | bool validateVOPDRegBankConstraints(const MCInst &Inst, |
1681 | | const OperandVector &Operands); |
1682 | | bool validateIntClampSupported(const MCInst &Inst); |
1683 | | bool validateMIMGAtomicDMask(const MCInst &Inst); |
1684 | | bool validateMIMGGatherDMask(const MCInst &Inst); |
1685 | | bool validateMovrels(const MCInst &Inst, const OperandVector &Operands); |
1686 | | bool validateMIMGDataSize(const MCInst &Inst, const SMLoc &IDLoc); |
1687 | | bool validateMIMGAddrSize(const MCInst &Inst, const SMLoc &IDLoc); |
1688 | | bool validateMIMGD16(const MCInst &Inst); |
1689 | | bool validateMIMGMSAA(const MCInst &Inst); |
1690 | | bool validateOpSel(const MCInst &Inst); |
1691 | | bool validateDPP(const MCInst &Inst, const OperandVector &Operands); |
1692 | | bool validateVccOperand(unsigned Reg) const; |
1693 | | bool validateVOPLiteral(const MCInst &Inst, const OperandVector &Operands); |
1694 | | bool validateMAIAccWrite(const MCInst &Inst, const OperandVector &Operands); |
1695 | | bool validateMAISrc2(const MCInst &Inst, const OperandVector &Operands); |
1696 | | bool validateMFMA(const MCInst &Inst, const OperandVector &Operands); |
1697 | | bool validateAGPRLdSt(const MCInst &Inst) const; |
1698 | | bool validateVGPRAlign(const MCInst &Inst) const; |
1699 | | bool validateBLGP(const MCInst &Inst, const OperandVector &Operands); |
1700 | | bool validateDS(const MCInst &Inst, const OperandVector &Operands); |
1701 | | bool validateGWS(const MCInst &Inst, const OperandVector &Operands); |
1702 | | bool validateDivScale(const MCInst &Inst); |
1703 | | bool validateWaitCnt(const MCInst &Inst, const OperandVector &Operands); |
1704 | | bool validateCoherencyBits(const MCInst &Inst, const OperandVector &Operands, |
1705 | | const SMLoc &IDLoc); |
1706 | | bool validateTHAndScopeBits(const MCInst &Inst, const OperandVector &Operands, |
1707 | | const unsigned CPol); |
1708 | | bool validateExeczVcczOperands(const OperandVector &Operands); |
1709 | | bool validateTFE(const MCInst &Inst, const OperandVector &Operands); |
1710 | | std::optional<StringRef> validateLdsDirect(const MCInst &Inst); |
1711 | | unsigned getConstantBusLimit(unsigned Opcode) const; |
1712 | | bool usesConstantBus(const MCInst &Inst, unsigned OpIdx); |
1713 | | bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const; |
1714 | | unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const; |
1715 | | |
1716 | | bool isSupportedMnemo(StringRef Mnemo, |
1717 | | const FeatureBitset &FBS); |
1718 | | bool isSupportedMnemo(StringRef Mnemo, |
1719 | | const FeatureBitset &FBS, |
1720 | | ArrayRef<unsigned> Variants); |
1721 | | bool checkUnsupportedInstruction(StringRef Name, const SMLoc &IDLoc); |
1722 | | |
1723 | | bool isId(const StringRef Id) const; |
1724 | | bool isId(const AsmToken &Token, const StringRef Id) const; |
1725 | | bool isToken(const AsmToken::TokenKind Kind) const; |
1726 | | StringRef getId() const; |
1727 | | bool trySkipId(const StringRef Id); |
1728 | | bool trySkipId(const StringRef Pref, const StringRef Id); |
1729 | | bool trySkipId(const StringRef Id, const AsmToken::TokenKind Kind); |
1730 | | bool trySkipToken(const AsmToken::TokenKind Kind); |
1731 | | bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg); |
1732 | | bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string"); |
1733 | | bool parseId(StringRef &Val, const StringRef ErrMsg = ""); |
1734 | | |
1735 | | void peekTokens(MutableArrayRef<AsmToken> Tokens); |
1736 | | AsmToken::TokenKind getTokenKind() const; |
1737 | | bool parseExpr(int64_t &Imm, StringRef Expected = ""); |
1738 | | bool parseExpr(OperandVector &Operands); |
1739 | | StringRef getTokenStr() const; |
1740 | | AsmToken peekToken(bool ShouldSkipSpace = true); |
1741 | | AsmToken getToken() const; |
1742 | | SMLoc getLoc() const; |
1743 | | void lex(); |
1744 | | |
1745 | | public: |
1746 | | void onBeginOfFile() override; |
1747 | | |
1748 | | ParseStatus parseCustomOperand(OperandVector &Operands, unsigned MCK); |
1749 | | |
1750 | | ParseStatus parseExpTgt(OperandVector &Operands); |
1751 | | ParseStatus parseSendMsg(OperandVector &Operands); |
1752 | | ParseStatus parseInterpSlot(OperandVector &Operands); |
1753 | | ParseStatus parseInterpAttr(OperandVector &Operands); |
1754 | | ParseStatus parseSOPPBrTarget(OperandVector &Operands); |
1755 | | ParseStatus parseBoolReg(OperandVector &Operands); |
1756 | | |
1757 | | bool parseSwizzleOperand(int64_t &Op, |
1758 | | const unsigned MinVal, |
1759 | | const unsigned MaxVal, |
1760 | | const StringRef ErrMsg, |
1761 | | SMLoc &Loc); |
1762 | | bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op, |
1763 | | const unsigned MinVal, |
1764 | | const unsigned MaxVal, |
1765 | | const StringRef ErrMsg); |
1766 | | ParseStatus parseSwizzle(OperandVector &Operands); |
1767 | | bool parseSwizzleOffset(int64_t &Imm); |
1768 | | bool parseSwizzleMacro(int64_t &Imm); |
1769 | | bool parseSwizzleQuadPerm(int64_t &Imm); |
1770 | | bool parseSwizzleBitmaskPerm(int64_t &Imm); |
1771 | | bool parseSwizzleBroadcast(int64_t &Imm); |
1772 | | bool parseSwizzleSwap(int64_t &Imm); |
1773 | | bool parseSwizzleReverse(int64_t &Imm); |
1774 | | |
1775 | | ParseStatus parseGPRIdxMode(OperandVector &Operands); |
1776 | | int64_t parseGPRIdxMacro(); |
1777 | | |
1778 | 0 | void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false); } |
1779 | 0 | void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true); } |
1780 | | |
1781 | | ParseStatus parseOModSI(OperandVector &Operands); |
1782 | | |
1783 | | void cvtVOP3(MCInst &Inst, const OperandVector &Operands, |
1784 | | OptionalImmIndexMap &OptionalIdx); |
1785 | | void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands); |
1786 | | void cvtVOP3(MCInst &Inst, const OperandVector &Operands); |
1787 | | void cvtVOP3P(MCInst &Inst, const OperandVector &Operands); |
1788 | | void cvtVOPD(MCInst &Inst, const OperandVector &Operands); |
1789 | | void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands, |
1790 | | OptionalImmIndexMap &OptionalIdx); |
1791 | | void cvtVOP3P(MCInst &Inst, const OperandVector &Operands, |
1792 | | OptionalImmIndexMap &OptionalIdx); |
1793 | | |
1794 | | void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands); |
1795 | | void cvtVINTERP(MCInst &Inst, const OperandVector &Operands); |
1796 | | |
1797 | | bool parseDimId(unsigned &Encoding); |
1798 | | ParseStatus parseDim(OperandVector &Operands); |
1799 | | bool convertDppBoundCtrl(int64_t &BoundCtrl); |
1800 | | ParseStatus parseDPP8(OperandVector &Operands); |
1801 | | ParseStatus parseDPPCtrl(OperandVector &Operands); |
1802 | | bool isSupportedDPPCtrl(StringRef Ctrl, const OperandVector &Operands); |
1803 | | int64_t parseDPPCtrlSel(StringRef Ctrl); |
1804 | | int64_t parseDPPCtrlPerm(); |
1805 | | void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool IsDPP8 = false); |
1806 | 0 | void cvtDPP8(MCInst &Inst, const OperandVector &Operands) { |
1807 | 0 | cvtDPP(Inst, Operands, true); |
1808 | 0 | } |
1809 | | void cvtVOP3DPP(MCInst &Inst, const OperandVector &Operands, |
1810 | | bool IsDPP8 = false); |
1811 | 0 | void cvtVOP3DPP8(MCInst &Inst, const OperandVector &Operands) { |
1812 | 0 | cvtVOP3DPP(Inst, Operands, true); |
1813 | 0 | } |
1814 | | |
1815 | | ParseStatus parseSDWASel(OperandVector &Operands, StringRef Prefix, |
1816 | | AMDGPUOperand::ImmTy Type); |
1817 | | ParseStatus parseSDWADstUnused(OperandVector &Operands); |
1818 | | void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands); |
1819 | | void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands); |
1820 | | void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands); |
1821 | | void cvtSdwaVOP2e(MCInst &Inst, const OperandVector &Operands); |
1822 | | void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands); |
1823 | | void cvtSDWA(MCInst &Inst, const OperandVector &Operands, |
1824 | | uint64_t BasicInstType, |
1825 | | bool SkipDstVcc = false, |
1826 | | bool SkipSrcVcc = false); |
1827 | | |
1828 | | ParseStatus parseEndpgm(OperandVector &Operands); |
1829 | | |
1830 | | ParseStatus parseVOPD(OperandVector &Operands); |
1831 | | }; |
1832 | | |
1833 | | } // end anonymous namespace |
1834 | | |
1835 | | // May be called with integer type with equivalent bitwidth. |
1836 | 0 | static const fltSemantics *getFltSemantics(unsigned Size) { |
1837 | 0 | switch (Size) { |
1838 | 0 | case 4: |
1839 | 0 | return &APFloat::IEEEsingle(); |
1840 | 0 | case 8: |
1841 | 0 | return &APFloat::IEEEdouble(); |
1842 | 0 | case 2: |
1843 | 0 | return &APFloat::IEEEhalf(); |
1844 | 0 | default: |
1845 | 0 | llvm_unreachable("unsupported fp type"); |
1846 | 0 | } |
1847 | 0 | } |
1848 | | |
1849 | 0 | static const fltSemantics *getFltSemantics(MVT VT) { |
1850 | 0 | return getFltSemantics(VT.getSizeInBits() / 8); |
1851 | 0 | } |
1852 | | |
1853 | 0 | static const fltSemantics *getOpFltSemantics(uint8_t OperandType) { |
1854 | 0 | switch (OperandType) { |
1855 | 0 | case AMDGPU::OPERAND_REG_IMM_INT32: |
1856 | 0 | case AMDGPU::OPERAND_REG_IMM_FP32: |
1857 | 0 | case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: |
1858 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT32: |
1859 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP32: |
1860 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_INT32: |
1861 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP32: |
1862 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: |
1863 | 0 | case AMDGPU::OPERAND_REG_IMM_V2FP32: |
1864 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: |
1865 | 0 | case AMDGPU::OPERAND_REG_IMM_V2INT32: |
1866 | 0 | case AMDGPU::OPERAND_REG_IMM_V2INT16: |
1867 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: |
1868 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: |
1869 | 0 | case AMDGPU::OPERAND_KIMM32: |
1870 | 0 | case AMDGPU::OPERAND_INLINE_SPLIT_BARRIER_INT32: |
1871 | 0 | return &APFloat::IEEEsingle(); |
1872 | 0 | case AMDGPU::OPERAND_REG_IMM_INT64: |
1873 | 0 | case AMDGPU::OPERAND_REG_IMM_FP64: |
1874 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT64: |
1875 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP64: |
1876 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP64: |
1877 | 0 | return &APFloat::IEEEdouble(); |
1878 | 0 | case AMDGPU::OPERAND_REG_IMM_INT16: |
1879 | 0 | case AMDGPU::OPERAND_REG_IMM_FP16: |
1880 | 0 | case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: |
1881 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT16: |
1882 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP16: |
1883 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: |
1884 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_INT16: |
1885 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP16: |
1886 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: |
1887 | 0 | case AMDGPU::OPERAND_REG_IMM_V2FP16: |
1888 | 0 | case AMDGPU::OPERAND_KIMM16: |
1889 | 0 | return &APFloat::IEEEhalf(); |
1890 | 0 | default: |
1891 | 0 | llvm_unreachable("unsupported fp type"); |
1892 | 0 | } |
1893 | 0 | } |
1894 | | |
1895 | | //===----------------------------------------------------------------------===// |
1896 | | // Operand |
1897 | | //===----------------------------------------------------------------------===// |
1898 | | |
1899 | 0 | static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) { |
1900 | 0 | bool Lost; |
1901 | | |
1902 | | // Convert literal to single precision |
1903 | 0 | APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT), |
1904 | 0 | APFloat::rmNearestTiesToEven, |
1905 | 0 | &Lost); |
1906 | | // We allow precision lost but not overflow or underflow |
1907 | 0 | if (Status != APFloat::opOK && |
1908 | 0 | Lost && |
1909 | 0 | ((Status & APFloat::opOverflow) != 0 || |
1910 | 0 | (Status & APFloat::opUnderflow) != 0)) { |
1911 | 0 | return false; |
1912 | 0 | } |
1913 | | |
1914 | 0 | return true; |
1915 | 0 | } |
1916 | | |
1917 | 0 | static bool isSafeTruncation(int64_t Val, unsigned Size) { |
1918 | 0 | return isUIntN(Size, Val) || isIntN(Size, Val); |
1919 | 0 | } |
1920 | | |
1921 | 0 | static bool isInlineableLiteralOp16(int64_t Val, MVT VT, bool HasInv2Pi) { |
1922 | 0 | if (VT.getScalarType() == MVT::i16) { |
1923 | | // FP immediate values are broken. |
1924 | 0 | return isInlinableIntLiteral(Val); |
1925 | 0 | } |
1926 | | |
1927 | | // f16/v2f16 operands work correctly for all values. |
1928 | 0 | return AMDGPU::isInlinableLiteral16(Val, HasInv2Pi); |
1929 | 0 | } |
1930 | | |
1931 | 0 | bool AMDGPUOperand::isInlinableImm(MVT type) const { |
1932 | | |
1933 | | // This is a hack to enable named inline values like |
1934 | | // shared_base with both 32-bit and 64-bit operands. |
1935 | | // Note that these values are defined as |
1936 | | // 32-bit operands only. |
1937 | 0 | if (isInlineValue()) { |
1938 | 0 | return true; |
1939 | 0 | } |
1940 | | |
1941 | 0 | if (!isImmTy(ImmTyNone)) { |
1942 | | // Only plain immediates are inlinable (e.g. "clamp" attribute is not) |
1943 | 0 | return false; |
1944 | 0 | } |
1945 | | // TODO: We should avoid using host float here. It would be better to |
1946 | | // check the float bit values which is what a few other places do. |
1947 | | // We've had bot failures before due to weird NaN support on mips hosts. |
1948 | | |
1949 | 0 | APInt Literal(64, Imm.Val); |
1950 | |
|
1951 | 0 | if (Imm.IsFPImm) { // We got fp literal token |
1952 | 0 | if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand |
1953 | 0 | return AMDGPU::isInlinableLiteral64(Imm.Val, |
1954 | 0 | AsmParser->hasInv2PiInlineImm()); |
1955 | 0 | } |
1956 | | |
1957 | 0 | APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val)); |
1958 | 0 | if (!canLosslesslyConvertToFPType(FPLiteral, type)) |
1959 | 0 | return false; |
1960 | | |
1961 | 0 | if (type.getScalarSizeInBits() == 16) { |
1962 | 0 | return isInlineableLiteralOp16( |
1963 | 0 | static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()), |
1964 | 0 | type, AsmParser->hasInv2PiInlineImm()); |
1965 | 0 | } |
1966 | | |
1967 | | // Check if single precision literal is inlinable |
1968 | 0 | return AMDGPU::isInlinableLiteral32( |
1969 | 0 | static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()), |
1970 | 0 | AsmParser->hasInv2PiInlineImm()); |
1971 | 0 | } |
1972 | | |
1973 | | // We got int literal token. |
1974 | 0 | if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand |
1975 | 0 | return AMDGPU::isInlinableLiteral64(Imm.Val, |
1976 | 0 | AsmParser->hasInv2PiInlineImm()); |
1977 | 0 | } |
1978 | | |
1979 | 0 | if (!isSafeTruncation(Imm.Val, type.getScalarSizeInBits())) { |
1980 | 0 | return false; |
1981 | 0 | } |
1982 | | |
1983 | 0 | if (type.getScalarSizeInBits() == 16) { |
1984 | 0 | return isInlineableLiteralOp16( |
1985 | 0 | static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()), |
1986 | 0 | type, AsmParser->hasInv2PiInlineImm()); |
1987 | 0 | } |
1988 | | |
1989 | 0 | return AMDGPU::isInlinableLiteral32( |
1990 | 0 | static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()), |
1991 | 0 | AsmParser->hasInv2PiInlineImm()); |
1992 | 0 | } |
1993 | | |
1994 | 0 | bool AMDGPUOperand::isLiteralImm(MVT type) const { |
1995 | | // Check that this immediate can be added as literal |
1996 | 0 | if (!isImmTy(ImmTyNone)) { |
1997 | 0 | return false; |
1998 | 0 | } |
1999 | | |
2000 | 0 | if (!Imm.IsFPImm) { |
2001 | | // We got int literal token. |
2002 | |
|
2003 | 0 | if (type == MVT::f64 && hasFPModifiers()) { |
2004 | | // Cannot apply fp modifiers to int literals preserving the same semantics |
2005 | | // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity, |
2006 | | // disable these cases. |
2007 | 0 | return false; |
2008 | 0 | } |
2009 | | |
2010 | 0 | unsigned Size = type.getSizeInBits(); |
2011 | 0 | if (Size == 64) |
2012 | 0 | Size = 32; |
2013 | | |
2014 | | // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP |
2015 | | // types. |
2016 | 0 | return isSafeTruncation(Imm.Val, Size); |
2017 | 0 | } |
2018 | | |
2019 | | // We got fp literal token |
2020 | 0 | if (type == MVT::f64) { // Expected 64-bit fp operand |
2021 | | // We would set low 64-bits of literal to zeroes but we accept this literals |
2022 | 0 | return true; |
2023 | 0 | } |
2024 | | |
2025 | 0 | if (type == MVT::i64) { // Expected 64-bit int operand |
2026 | | // We don't allow fp literals in 64-bit integer instructions. It is |
2027 | | // unclear how we should encode them. |
2028 | 0 | return false; |
2029 | 0 | } |
2030 | | |
2031 | | // We allow fp literals with f16x2 operands assuming that the specified |
2032 | | // literal goes into the lower half and the upper half is zero. We also |
2033 | | // require that the literal may be losslessly converted to f16. |
2034 | | // |
2035 | | // For i16x2 operands, we assume that the specified literal is encoded as a |
2036 | | // single-precision float. This is pretty odd, but it matches SP3 and what |
2037 | | // happens in hardware. |
2038 | 0 | MVT ExpectedType = (type == MVT::v2f16) ? MVT::f16 |
2039 | 0 | : (type == MVT::v2i16) ? MVT::f32 |
2040 | 0 | : (type == MVT::v2f32) ? MVT::f32 |
2041 | 0 | : type; |
2042 | |
|
2043 | 0 | APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val)); |
2044 | 0 | return canLosslesslyConvertToFPType(FPLiteral, ExpectedType); |
2045 | 0 | } |
2046 | | |
2047 | 0 | bool AMDGPUOperand::isRegClass(unsigned RCID) const { |
2048 | 0 | return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg()); |
2049 | 0 | } |
2050 | | |
2051 | 0 | bool AMDGPUOperand::isVRegWithInputMods() const { |
2052 | 0 | return isRegClass(AMDGPU::VGPR_32RegClassID) || |
2053 | | // GFX90A allows DPP on 64-bit operands. |
2054 | 0 | (isRegClass(AMDGPU::VReg_64RegClassID) && |
2055 | 0 | AsmParser->getFeatureBits()[AMDGPU::FeatureDPALU_DPP]); |
2056 | 0 | } |
2057 | | |
2058 | 0 | bool AMDGPUOperand::isT16VRegWithInputMods() const { |
2059 | 0 | return isRegClass(AMDGPU::VGPR_32_Lo128RegClassID); |
2060 | 0 | } |
2061 | | |
2062 | 0 | bool AMDGPUOperand::isSDWAOperand(MVT type) const { |
2063 | 0 | if (AsmParser->isVI()) |
2064 | 0 | return isVReg32(); |
2065 | 0 | else if (AsmParser->isGFX9Plus()) |
2066 | 0 | return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type); |
2067 | 0 | else |
2068 | 0 | return false; |
2069 | 0 | } |
2070 | | |
2071 | 0 | bool AMDGPUOperand::isSDWAFP16Operand() const { |
2072 | 0 | return isSDWAOperand(MVT::f16); |
2073 | 0 | } |
2074 | | |
2075 | 0 | bool AMDGPUOperand::isSDWAFP32Operand() const { |
2076 | 0 | return isSDWAOperand(MVT::f32); |
2077 | 0 | } |
2078 | | |
2079 | 0 | bool AMDGPUOperand::isSDWAInt16Operand() const { |
2080 | 0 | return isSDWAOperand(MVT::i16); |
2081 | 0 | } |
2082 | | |
2083 | 0 | bool AMDGPUOperand::isSDWAInt32Operand() const { |
2084 | 0 | return isSDWAOperand(MVT::i32); |
2085 | 0 | } |
2086 | | |
2087 | 0 | bool AMDGPUOperand::isBoolReg() const { |
2088 | 0 | auto FB = AsmParser->getFeatureBits(); |
2089 | 0 | return isReg() && ((FB[AMDGPU::FeatureWavefrontSize64] && isSCSrcB64()) || |
2090 | 0 | (FB[AMDGPU::FeatureWavefrontSize32] && isSCSrcB32())); |
2091 | 0 | } |
2092 | | |
2093 | | uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const |
2094 | 0 | { |
2095 | 0 | assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers()); |
2096 | 0 | assert(Size == 2 || Size == 4 || Size == 8); |
2097 | | |
2098 | 0 | const uint64_t FpSignMask = (1ULL << (Size * 8 - 1)); |
2099 | |
|
2100 | 0 | if (Imm.Mods.Abs) { |
2101 | 0 | Val &= ~FpSignMask; |
2102 | 0 | } |
2103 | 0 | if (Imm.Mods.Neg) { |
2104 | 0 | Val ^= FpSignMask; |
2105 | 0 | } |
2106 | |
|
2107 | 0 | return Val; |
2108 | 0 | } |
2109 | | |
2110 | 0 | void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const { |
2111 | 0 | if (isExpr()) { |
2112 | 0 | Inst.addOperand(MCOperand::createExpr(Expr)); |
2113 | 0 | return; |
2114 | 0 | } |
2115 | | |
2116 | 0 | if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()), |
2117 | 0 | Inst.getNumOperands())) { |
2118 | 0 | addLiteralImmOperand(Inst, Imm.Val, |
2119 | 0 | ApplyModifiers & |
2120 | 0 | isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers()); |
2121 | 0 | } else { |
2122 | 0 | assert(!isImmTy(ImmTyNone) || !hasModifiers()); |
2123 | 0 | Inst.addOperand(MCOperand::createImm(Imm.Val)); |
2124 | 0 | setImmKindNone(); |
2125 | 0 | } |
2126 | 0 | } |
2127 | | |
2128 | 0 | void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const { |
2129 | 0 | const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode()); |
2130 | 0 | auto OpNum = Inst.getNumOperands(); |
2131 | | // Check that this operand accepts literals |
2132 | 0 | assert(AMDGPU::isSISrcOperand(InstDesc, OpNum)); |
2133 | | |
2134 | 0 | if (ApplyModifiers) { |
2135 | 0 | assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum)); |
2136 | 0 | const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum); |
2137 | 0 | Val = applyInputFPModifiers(Val, Size); |
2138 | 0 | } |
2139 | | |
2140 | 0 | APInt Literal(64, Val); |
2141 | 0 | uint8_t OpTy = InstDesc.operands()[OpNum].OperandType; |
2142 | |
|
2143 | 0 | if (Imm.IsFPImm) { // We got fp literal token |
2144 | 0 | switch (OpTy) { |
2145 | 0 | case AMDGPU::OPERAND_REG_IMM_INT64: |
2146 | 0 | case AMDGPU::OPERAND_REG_IMM_FP64: |
2147 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT64: |
2148 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP64: |
2149 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP64: |
2150 | 0 | if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(), |
2151 | 0 | AsmParser->hasInv2PiInlineImm())) { |
2152 | 0 | Inst.addOperand(MCOperand::createImm(Literal.getZExtValue())); |
2153 | 0 | setImmKindConst(); |
2154 | 0 | return; |
2155 | 0 | } |
2156 | | |
2157 | | // Non-inlineable |
2158 | 0 | if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand |
2159 | | // For fp operands we check if low 32 bits are zeros |
2160 | 0 | if (Literal.getLoBits(32) != 0) { |
2161 | 0 | const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(), |
2162 | 0 | "Can't encode literal as exact 64-bit floating-point operand. " |
2163 | 0 | "Low 32-bits will be set to zero"); |
2164 | 0 | Val &= 0xffffffff00000000u; |
2165 | 0 | } |
2166 | |
|
2167 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2168 | 0 | setImmKindLiteral(); |
2169 | 0 | return; |
2170 | 0 | } |
2171 | | |
2172 | | // We don't allow fp literals in 64-bit integer instructions. It is |
2173 | | // unclear how we should encode them. This case should be checked earlier |
2174 | | // in predicate methods (isLiteralImm()) |
2175 | 0 | llvm_unreachable("fp literal in 64-bit integer instruction."); |
2176 | |
|
2177 | 0 | case AMDGPU::OPERAND_REG_IMM_INT32: |
2178 | 0 | case AMDGPU::OPERAND_REG_IMM_FP32: |
2179 | 0 | case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: |
2180 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT32: |
2181 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP32: |
2182 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_INT32: |
2183 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP32: |
2184 | 0 | case AMDGPU::OPERAND_REG_IMM_INT16: |
2185 | 0 | case AMDGPU::OPERAND_REG_IMM_FP16: |
2186 | 0 | case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: |
2187 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT16: |
2188 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP16: |
2189 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: |
2190 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: |
2191 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_INT16: |
2192 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP16: |
2193 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: |
2194 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: |
2195 | 0 | case AMDGPU::OPERAND_REG_IMM_V2INT16: |
2196 | 0 | case AMDGPU::OPERAND_REG_IMM_V2FP16: |
2197 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: |
2198 | 0 | case AMDGPU::OPERAND_REG_IMM_V2FP32: |
2199 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: |
2200 | 0 | case AMDGPU::OPERAND_REG_IMM_V2INT32: |
2201 | 0 | case AMDGPU::OPERAND_KIMM32: |
2202 | 0 | case AMDGPU::OPERAND_KIMM16: |
2203 | 0 | case AMDGPU::OPERAND_INLINE_SPLIT_BARRIER_INT32: { |
2204 | 0 | bool lost; |
2205 | 0 | APFloat FPLiteral(APFloat::IEEEdouble(), Literal); |
2206 | | // Convert literal to single precision |
2207 | 0 | FPLiteral.convert(*getOpFltSemantics(OpTy), |
2208 | 0 | APFloat::rmNearestTiesToEven, &lost); |
2209 | | // We allow precision lost but not overflow or underflow. This should be |
2210 | | // checked earlier in isLiteralImm() |
2211 | |
|
2212 | 0 | uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue(); |
2213 | 0 | Inst.addOperand(MCOperand::createImm(ImmVal)); |
2214 | 0 | if (OpTy == AMDGPU::OPERAND_KIMM32 || OpTy == AMDGPU::OPERAND_KIMM16) { |
2215 | 0 | setImmKindMandatoryLiteral(); |
2216 | 0 | } else { |
2217 | 0 | setImmKindLiteral(); |
2218 | 0 | } |
2219 | 0 | return; |
2220 | 0 | } |
2221 | 0 | default: |
2222 | 0 | llvm_unreachable("invalid operand size"); |
2223 | 0 | } |
2224 | | |
2225 | 0 | return; |
2226 | 0 | } |
2227 | | |
2228 | | // We got int literal token. |
2229 | | // Only sign extend inline immediates. |
2230 | 0 | switch (OpTy) { |
2231 | 0 | case AMDGPU::OPERAND_REG_IMM_INT32: |
2232 | 0 | case AMDGPU::OPERAND_REG_IMM_FP32: |
2233 | 0 | case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: |
2234 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT32: |
2235 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP32: |
2236 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_INT32: |
2237 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP32: |
2238 | 0 | case AMDGPU::OPERAND_REG_IMM_V2INT16: |
2239 | 0 | case AMDGPU::OPERAND_REG_IMM_V2FP16: |
2240 | 0 | case AMDGPU::OPERAND_REG_IMM_V2FP32: |
2241 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: |
2242 | 0 | case AMDGPU::OPERAND_REG_IMM_V2INT32: |
2243 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: |
2244 | 0 | case AMDGPU::OPERAND_INLINE_SPLIT_BARRIER_INT32: |
2245 | 0 | if (isSafeTruncation(Val, 32) && |
2246 | 0 | AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val), |
2247 | 0 | AsmParser->hasInv2PiInlineImm())) { |
2248 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2249 | 0 | setImmKindConst(); |
2250 | 0 | return; |
2251 | 0 | } |
2252 | | |
2253 | 0 | Inst.addOperand(MCOperand::createImm(Val & 0xffffffff)); |
2254 | 0 | setImmKindLiteral(); |
2255 | 0 | return; |
2256 | | |
2257 | 0 | case AMDGPU::OPERAND_REG_IMM_INT64: |
2258 | 0 | case AMDGPU::OPERAND_REG_IMM_FP64: |
2259 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT64: |
2260 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP64: |
2261 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP64: |
2262 | 0 | if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) { |
2263 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2264 | 0 | setImmKindConst(); |
2265 | 0 | return; |
2266 | 0 | } |
2267 | | |
2268 | 0 | Val = AMDGPU::isSISrcFPOperand(InstDesc, OpNum) ? (uint64_t)Val << 32 |
2269 | 0 | : Lo_32(Val); |
2270 | |
|
2271 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2272 | 0 | setImmKindLiteral(); |
2273 | 0 | return; |
2274 | | |
2275 | 0 | case AMDGPU::OPERAND_REG_IMM_INT16: |
2276 | 0 | case AMDGPU::OPERAND_REG_IMM_FP16: |
2277 | 0 | case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: |
2278 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_INT16: |
2279 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_FP16: |
2280 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_INT16: |
2281 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_FP16: |
2282 | 0 | if (isSafeTruncation(Val, 16) && |
2283 | 0 | AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val), |
2284 | 0 | AsmParser->hasInv2PiInlineImm())) { |
2285 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2286 | 0 | setImmKindConst(); |
2287 | 0 | return; |
2288 | 0 | } |
2289 | | |
2290 | 0 | Inst.addOperand(MCOperand::createImm(Val & 0xffff)); |
2291 | 0 | setImmKindLiteral(); |
2292 | 0 | return; |
2293 | | |
2294 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: |
2295 | 0 | case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: |
2296 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: |
2297 | 0 | case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { |
2298 | 0 | assert(isSafeTruncation(Val, 16)); |
2299 | 0 | assert(AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val), |
2300 | 0 | AsmParser->hasInv2PiInlineImm())); |
2301 | | |
2302 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2303 | 0 | return; |
2304 | 0 | } |
2305 | 0 | case AMDGPU::OPERAND_KIMM32: |
2306 | 0 | Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue())); |
2307 | 0 | setImmKindMandatoryLiteral(); |
2308 | 0 | return; |
2309 | 0 | case AMDGPU::OPERAND_KIMM16: |
2310 | 0 | Inst.addOperand(MCOperand::createImm(Literal.getLoBits(16).getZExtValue())); |
2311 | 0 | setImmKindMandatoryLiteral(); |
2312 | 0 | return; |
2313 | 0 | default: |
2314 | 0 | llvm_unreachable("invalid operand size"); |
2315 | 0 | } |
2316 | 0 | } |
2317 | | |
2318 | 0 | void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const { |
2319 | 0 | Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI()))); |
2320 | 0 | } |
2321 | | |
2322 | 0 | bool AMDGPUOperand::isInlineValue() const { |
2323 | 0 | return isRegKind() && ::isInlineValue(getReg()); |
2324 | 0 | } |
2325 | | |
2326 | | //===----------------------------------------------------------------------===// |
2327 | | // AsmParser |
2328 | | //===----------------------------------------------------------------------===// |
2329 | | |
2330 | 0 | static int getRegClass(RegisterKind Is, unsigned RegWidth) { |
2331 | 0 | if (Is == IS_VGPR) { |
2332 | 0 | switch (RegWidth) { |
2333 | 0 | default: return -1; |
2334 | 0 | case 32: |
2335 | 0 | return AMDGPU::VGPR_32RegClassID; |
2336 | 0 | case 64: |
2337 | 0 | return AMDGPU::VReg_64RegClassID; |
2338 | 0 | case 96: |
2339 | 0 | return AMDGPU::VReg_96RegClassID; |
2340 | 0 | case 128: |
2341 | 0 | return AMDGPU::VReg_128RegClassID; |
2342 | 0 | case 160: |
2343 | 0 | return AMDGPU::VReg_160RegClassID; |
2344 | 0 | case 192: |
2345 | 0 | return AMDGPU::VReg_192RegClassID; |
2346 | 0 | case 224: |
2347 | 0 | return AMDGPU::VReg_224RegClassID; |
2348 | 0 | case 256: |
2349 | 0 | return AMDGPU::VReg_256RegClassID; |
2350 | 0 | case 288: |
2351 | 0 | return AMDGPU::VReg_288RegClassID; |
2352 | 0 | case 320: |
2353 | 0 | return AMDGPU::VReg_320RegClassID; |
2354 | 0 | case 352: |
2355 | 0 | return AMDGPU::VReg_352RegClassID; |
2356 | 0 | case 384: |
2357 | 0 | return AMDGPU::VReg_384RegClassID; |
2358 | 0 | case 512: |
2359 | 0 | return AMDGPU::VReg_512RegClassID; |
2360 | 0 | case 1024: |
2361 | 0 | return AMDGPU::VReg_1024RegClassID; |
2362 | 0 | } |
2363 | 0 | } else if (Is == IS_TTMP) { |
2364 | 0 | switch (RegWidth) { |
2365 | 0 | default: return -1; |
2366 | 0 | case 32: |
2367 | 0 | return AMDGPU::TTMP_32RegClassID; |
2368 | 0 | case 64: |
2369 | 0 | return AMDGPU::TTMP_64RegClassID; |
2370 | 0 | case 128: |
2371 | 0 | return AMDGPU::TTMP_128RegClassID; |
2372 | 0 | case 256: |
2373 | 0 | return AMDGPU::TTMP_256RegClassID; |
2374 | 0 | case 512: |
2375 | 0 | return AMDGPU::TTMP_512RegClassID; |
2376 | 0 | } |
2377 | 0 | } else if (Is == IS_SGPR) { |
2378 | 0 | switch (RegWidth) { |
2379 | 0 | default: return -1; |
2380 | 0 | case 32: |
2381 | 0 | return AMDGPU::SGPR_32RegClassID; |
2382 | 0 | case 64: |
2383 | 0 | return AMDGPU::SGPR_64RegClassID; |
2384 | 0 | case 96: |
2385 | 0 | return AMDGPU::SGPR_96RegClassID; |
2386 | 0 | case 128: |
2387 | 0 | return AMDGPU::SGPR_128RegClassID; |
2388 | 0 | case 160: |
2389 | 0 | return AMDGPU::SGPR_160RegClassID; |
2390 | 0 | case 192: |
2391 | 0 | return AMDGPU::SGPR_192RegClassID; |
2392 | 0 | case 224: |
2393 | 0 | return AMDGPU::SGPR_224RegClassID; |
2394 | 0 | case 256: |
2395 | 0 | return AMDGPU::SGPR_256RegClassID; |
2396 | 0 | case 288: |
2397 | 0 | return AMDGPU::SGPR_288RegClassID; |
2398 | 0 | case 320: |
2399 | 0 | return AMDGPU::SGPR_320RegClassID; |
2400 | 0 | case 352: |
2401 | 0 | return AMDGPU::SGPR_352RegClassID; |
2402 | 0 | case 384: |
2403 | 0 | return AMDGPU::SGPR_384RegClassID; |
2404 | 0 | case 512: |
2405 | 0 | return AMDGPU::SGPR_512RegClassID; |
2406 | 0 | } |
2407 | 0 | } else if (Is == IS_AGPR) { |
2408 | 0 | switch (RegWidth) { |
2409 | 0 | default: return -1; |
2410 | 0 | case 32: |
2411 | 0 | return AMDGPU::AGPR_32RegClassID; |
2412 | 0 | case 64: |
2413 | 0 | return AMDGPU::AReg_64RegClassID; |
2414 | 0 | case 96: |
2415 | 0 | return AMDGPU::AReg_96RegClassID; |
2416 | 0 | case 128: |
2417 | 0 | return AMDGPU::AReg_128RegClassID; |
2418 | 0 | case 160: |
2419 | 0 | return AMDGPU::AReg_160RegClassID; |
2420 | 0 | case 192: |
2421 | 0 | return AMDGPU::AReg_192RegClassID; |
2422 | 0 | case 224: |
2423 | 0 | return AMDGPU::AReg_224RegClassID; |
2424 | 0 | case 256: |
2425 | 0 | return AMDGPU::AReg_256RegClassID; |
2426 | 0 | case 288: |
2427 | 0 | return AMDGPU::AReg_288RegClassID; |
2428 | 0 | case 320: |
2429 | 0 | return AMDGPU::AReg_320RegClassID; |
2430 | 0 | case 352: |
2431 | 0 | return AMDGPU::AReg_352RegClassID; |
2432 | 0 | case 384: |
2433 | 0 | return AMDGPU::AReg_384RegClassID; |
2434 | 0 | case 512: |
2435 | 0 | return AMDGPU::AReg_512RegClassID; |
2436 | 0 | case 1024: |
2437 | 0 | return AMDGPU::AReg_1024RegClassID; |
2438 | 0 | } |
2439 | 0 | } |
2440 | 0 | return -1; |
2441 | 0 | } |
2442 | | |
2443 | 0 | static unsigned getSpecialRegForName(StringRef RegName) { |
2444 | 0 | return StringSwitch<unsigned>(RegName) |
2445 | 0 | .Case("exec", AMDGPU::EXEC) |
2446 | 0 | .Case("vcc", AMDGPU::VCC) |
2447 | 0 | .Case("flat_scratch", AMDGPU::FLAT_SCR) |
2448 | 0 | .Case("xnack_mask", AMDGPU::XNACK_MASK) |
2449 | 0 | .Case("shared_base", AMDGPU::SRC_SHARED_BASE) |
2450 | 0 | .Case("src_shared_base", AMDGPU::SRC_SHARED_BASE) |
2451 | 0 | .Case("shared_limit", AMDGPU::SRC_SHARED_LIMIT) |
2452 | 0 | .Case("src_shared_limit", AMDGPU::SRC_SHARED_LIMIT) |
2453 | 0 | .Case("private_base", AMDGPU::SRC_PRIVATE_BASE) |
2454 | 0 | .Case("src_private_base", AMDGPU::SRC_PRIVATE_BASE) |
2455 | 0 | .Case("private_limit", AMDGPU::SRC_PRIVATE_LIMIT) |
2456 | 0 | .Case("src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT) |
2457 | 0 | .Case("pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID) |
2458 | 0 | .Case("src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID) |
2459 | 0 | .Case("lds_direct", AMDGPU::LDS_DIRECT) |
2460 | 0 | .Case("src_lds_direct", AMDGPU::LDS_DIRECT) |
2461 | 0 | .Case("m0", AMDGPU::M0) |
2462 | 0 | .Case("vccz", AMDGPU::SRC_VCCZ) |
2463 | 0 | .Case("src_vccz", AMDGPU::SRC_VCCZ) |
2464 | 0 | .Case("execz", AMDGPU::SRC_EXECZ) |
2465 | 0 | .Case("src_execz", AMDGPU::SRC_EXECZ) |
2466 | 0 | .Case("scc", AMDGPU::SRC_SCC) |
2467 | 0 | .Case("src_scc", AMDGPU::SRC_SCC) |
2468 | 0 | .Case("tba", AMDGPU::TBA) |
2469 | 0 | .Case("tma", AMDGPU::TMA) |
2470 | 0 | .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) |
2471 | 0 | .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) |
2472 | 0 | .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO) |
2473 | 0 | .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI) |
2474 | 0 | .Case("vcc_lo", AMDGPU::VCC_LO) |
2475 | 0 | .Case("vcc_hi", AMDGPU::VCC_HI) |
2476 | 0 | .Case("exec_lo", AMDGPU::EXEC_LO) |
2477 | 0 | .Case("exec_hi", AMDGPU::EXEC_HI) |
2478 | 0 | .Case("tma_lo", AMDGPU::TMA_LO) |
2479 | 0 | .Case("tma_hi", AMDGPU::TMA_HI) |
2480 | 0 | .Case("tba_lo", AMDGPU::TBA_LO) |
2481 | 0 | .Case("tba_hi", AMDGPU::TBA_HI) |
2482 | 0 | .Case("pc", AMDGPU::PC_REG) |
2483 | 0 | .Case("null", AMDGPU::SGPR_NULL) |
2484 | 0 | .Default(AMDGPU::NoRegister); |
2485 | 0 | } |
2486 | | |
2487 | | bool AMDGPUAsmParser::ParseRegister(MCRegister &RegNo, SMLoc &StartLoc, |
2488 | 0 | SMLoc &EndLoc, bool RestoreOnFailure) { |
2489 | 0 | auto R = parseRegister(); |
2490 | 0 | if (!R) return true; |
2491 | 0 | assert(R->isReg()); |
2492 | 0 | RegNo = R->getReg(); |
2493 | 0 | StartLoc = R->getStartLoc(); |
2494 | 0 | EndLoc = R->getEndLoc(); |
2495 | 0 | return false; |
2496 | 0 | } |
2497 | | |
2498 | | bool AMDGPUAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc, |
2499 | 0 | SMLoc &EndLoc) { |
2500 | 0 | return ParseRegister(Reg, StartLoc, EndLoc, /*RestoreOnFailure=*/false); |
2501 | 0 | } |
2502 | | |
2503 | | ParseStatus AMDGPUAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, |
2504 | 0 | SMLoc &EndLoc) { |
2505 | 0 | bool Result = ParseRegister(Reg, StartLoc, EndLoc, /*RestoreOnFailure=*/true); |
2506 | 0 | bool PendingErrors = getParser().hasPendingError(); |
2507 | 0 | getParser().clearPendingErrors(); |
2508 | 0 | if (PendingErrors) |
2509 | 0 | return ParseStatus::Failure; |
2510 | 0 | if (Result) |
2511 | 0 | return ParseStatus::NoMatch; |
2512 | 0 | return ParseStatus::Success; |
2513 | 0 | } |
2514 | | |
2515 | | bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth, |
2516 | | RegisterKind RegKind, unsigned Reg1, |
2517 | 0 | SMLoc Loc) { |
2518 | 0 | switch (RegKind) { |
2519 | 0 | case IS_SPECIAL: |
2520 | 0 | if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { |
2521 | 0 | Reg = AMDGPU::EXEC; |
2522 | 0 | RegWidth = 64; |
2523 | 0 | return true; |
2524 | 0 | } |
2525 | 0 | if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { |
2526 | 0 | Reg = AMDGPU::FLAT_SCR; |
2527 | 0 | RegWidth = 64; |
2528 | 0 | return true; |
2529 | 0 | } |
2530 | 0 | if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) { |
2531 | 0 | Reg = AMDGPU::XNACK_MASK; |
2532 | 0 | RegWidth = 64; |
2533 | 0 | return true; |
2534 | 0 | } |
2535 | 0 | if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { |
2536 | 0 | Reg = AMDGPU::VCC; |
2537 | 0 | RegWidth = 64; |
2538 | 0 | return true; |
2539 | 0 | } |
2540 | 0 | if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { |
2541 | 0 | Reg = AMDGPU::TBA; |
2542 | 0 | RegWidth = 64; |
2543 | 0 | return true; |
2544 | 0 | } |
2545 | 0 | if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { |
2546 | 0 | Reg = AMDGPU::TMA; |
2547 | 0 | RegWidth = 64; |
2548 | 0 | return true; |
2549 | 0 | } |
2550 | 0 | Error(Loc, "register does not fit in the list"); |
2551 | 0 | return false; |
2552 | 0 | case IS_VGPR: |
2553 | 0 | case IS_SGPR: |
2554 | 0 | case IS_AGPR: |
2555 | 0 | case IS_TTMP: |
2556 | 0 | if (Reg1 != Reg + RegWidth / 32) { |
2557 | 0 | Error(Loc, "registers in a list must have consecutive indices"); |
2558 | 0 | return false; |
2559 | 0 | } |
2560 | 0 | RegWidth += 32; |
2561 | 0 | return true; |
2562 | 0 | default: |
2563 | 0 | llvm_unreachable("unexpected register kind"); |
2564 | 0 | } |
2565 | 0 | } |
2566 | | |
2567 | | struct RegInfo { |
2568 | | StringLiteral Name; |
2569 | | RegisterKind Kind; |
2570 | | }; |
2571 | | |
2572 | | static constexpr RegInfo RegularRegisters[] = { |
2573 | | {{"v"}, IS_VGPR}, |
2574 | | {{"s"}, IS_SGPR}, |
2575 | | {{"ttmp"}, IS_TTMP}, |
2576 | | {{"acc"}, IS_AGPR}, |
2577 | | {{"a"}, IS_AGPR}, |
2578 | | }; |
2579 | | |
2580 | 0 | static bool isRegularReg(RegisterKind Kind) { |
2581 | 0 | return Kind == IS_VGPR || |
2582 | 0 | Kind == IS_SGPR || |
2583 | 0 | Kind == IS_TTMP || |
2584 | 0 | Kind == IS_AGPR; |
2585 | 0 | } |
2586 | | |
2587 | 0 | static const RegInfo* getRegularRegInfo(StringRef Str) { |
2588 | 0 | for (const RegInfo &Reg : RegularRegisters) |
2589 | 0 | if (Str.starts_with(Reg.Name)) |
2590 | 0 | return &Reg; |
2591 | 0 | return nullptr; |
2592 | 0 | } |
2593 | | |
2594 | 0 | static bool getRegNum(StringRef Str, unsigned& Num) { |
2595 | 0 | return !Str.getAsInteger(10, Num); |
2596 | 0 | } |
2597 | | |
2598 | | bool |
2599 | | AMDGPUAsmParser::isRegister(const AsmToken &Token, |
2600 | 0 | const AsmToken &NextToken) const { |
2601 | | |
2602 | | // A list of consecutive registers: [s0,s1,s2,s3] |
2603 | 0 | if (Token.is(AsmToken::LBrac)) |
2604 | 0 | return true; |
2605 | | |
2606 | 0 | if (!Token.is(AsmToken::Identifier)) |
2607 | 0 | return false; |
2608 | | |
2609 | | // A single register like s0 or a range of registers like s[0:1] |
2610 | | |
2611 | 0 | StringRef Str = Token.getString(); |
2612 | 0 | const RegInfo *Reg = getRegularRegInfo(Str); |
2613 | 0 | if (Reg) { |
2614 | 0 | StringRef RegName = Reg->Name; |
2615 | 0 | StringRef RegSuffix = Str.substr(RegName.size()); |
2616 | 0 | if (!RegSuffix.empty()) { |
2617 | 0 | RegSuffix.consume_back(".l"); |
2618 | 0 | RegSuffix.consume_back(".h"); |
2619 | 0 | unsigned Num; |
2620 | | // A single register with an index: rXX |
2621 | 0 | if (getRegNum(RegSuffix, Num)) |
2622 | 0 | return true; |
2623 | 0 | } else { |
2624 | | // A range of registers: r[XX:YY]. |
2625 | 0 | if (NextToken.is(AsmToken::LBrac)) |
2626 | 0 | return true; |
2627 | 0 | } |
2628 | 0 | } |
2629 | | |
2630 | 0 | return getSpecialRegForName(Str) != AMDGPU::NoRegister; |
2631 | 0 | } |
2632 | | |
2633 | | bool |
2634 | | AMDGPUAsmParser::isRegister() |
2635 | 0 | { |
2636 | 0 | return isRegister(getToken(), peekToken()); |
2637 | 0 | } |
2638 | | |
2639 | | unsigned AMDGPUAsmParser::getRegularReg(RegisterKind RegKind, unsigned RegNum, |
2640 | | unsigned SubReg, unsigned RegWidth, |
2641 | 0 | SMLoc Loc) { |
2642 | 0 | assert(isRegularReg(RegKind)); |
2643 | | |
2644 | 0 | unsigned AlignSize = 1; |
2645 | 0 | if (RegKind == IS_SGPR || RegKind == IS_TTMP) { |
2646 | | // SGPR and TTMP registers must be aligned. |
2647 | | // Max required alignment is 4 dwords. |
2648 | 0 | AlignSize = std::min(llvm::bit_ceil(RegWidth / 32), 4u); |
2649 | 0 | } |
2650 | |
|
2651 | 0 | if (RegNum % AlignSize != 0) { |
2652 | 0 | Error(Loc, "invalid register alignment"); |
2653 | 0 | return AMDGPU::NoRegister; |
2654 | 0 | } |
2655 | | |
2656 | 0 | unsigned RegIdx = RegNum / AlignSize; |
2657 | 0 | int RCID = getRegClass(RegKind, RegWidth); |
2658 | 0 | if (RCID == -1) { |
2659 | 0 | Error(Loc, "invalid or unsupported register size"); |
2660 | 0 | return AMDGPU::NoRegister; |
2661 | 0 | } |
2662 | | |
2663 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
2664 | 0 | const MCRegisterClass RC = TRI->getRegClass(RCID); |
2665 | 0 | if (RegIdx >= RC.getNumRegs()) { |
2666 | 0 | Error(Loc, "register index is out of range"); |
2667 | 0 | return AMDGPU::NoRegister; |
2668 | 0 | } |
2669 | | |
2670 | 0 | unsigned Reg = RC.getRegister(RegIdx); |
2671 | |
|
2672 | 0 | if (SubReg) { |
2673 | 0 | Reg = TRI->getSubReg(Reg, SubReg); |
2674 | | |
2675 | | // Currently all regular registers have their .l and .h subregisters, so |
2676 | | // we should never need to generate an error here. |
2677 | 0 | assert(Reg && "Invalid subregister!"); |
2678 | 0 | } |
2679 | | |
2680 | 0 | return Reg; |
2681 | 0 | } |
2682 | | |
2683 | 0 | bool AMDGPUAsmParser::ParseRegRange(unsigned &Num, unsigned &RegWidth) { |
2684 | 0 | int64_t RegLo, RegHi; |
2685 | 0 | if (!skipToken(AsmToken::LBrac, "missing register index")) |
2686 | 0 | return false; |
2687 | | |
2688 | 0 | SMLoc FirstIdxLoc = getLoc(); |
2689 | 0 | SMLoc SecondIdxLoc; |
2690 | |
|
2691 | 0 | if (!parseExpr(RegLo)) |
2692 | 0 | return false; |
2693 | | |
2694 | 0 | if (trySkipToken(AsmToken::Colon)) { |
2695 | 0 | SecondIdxLoc = getLoc(); |
2696 | 0 | if (!parseExpr(RegHi)) |
2697 | 0 | return false; |
2698 | 0 | } else { |
2699 | 0 | RegHi = RegLo; |
2700 | 0 | } |
2701 | | |
2702 | 0 | if (!skipToken(AsmToken::RBrac, "expected a closing square bracket")) |
2703 | 0 | return false; |
2704 | | |
2705 | 0 | if (!isUInt<32>(RegLo)) { |
2706 | 0 | Error(FirstIdxLoc, "invalid register index"); |
2707 | 0 | return false; |
2708 | 0 | } |
2709 | | |
2710 | 0 | if (!isUInt<32>(RegHi)) { |
2711 | 0 | Error(SecondIdxLoc, "invalid register index"); |
2712 | 0 | return false; |
2713 | 0 | } |
2714 | | |
2715 | 0 | if (RegLo > RegHi) { |
2716 | 0 | Error(FirstIdxLoc, "first register index should not exceed second index"); |
2717 | 0 | return false; |
2718 | 0 | } |
2719 | | |
2720 | 0 | Num = static_cast<unsigned>(RegLo); |
2721 | 0 | RegWidth = 32 * ((RegHi - RegLo) + 1); |
2722 | 0 | return true; |
2723 | 0 | } |
2724 | | |
2725 | | unsigned AMDGPUAsmParser::ParseSpecialReg(RegisterKind &RegKind, |
2726 | | unsigned &RegNum, unsigned &RegWidth, |
2727 | 0 | SmallVectorImpl<AsmToken> &Tokens) { |
2728 | 0 | assert(isToken(AsmToken::Identifier)); |
2729 | 0 | unsigned Reg = getSpecialRegForName(getTokenStr()); |
2730 | 0 | if (Reg) { |
2731 | 0 | RegNum = 0; |
2732 | 0 | RegWidth = 32; |
2733 | 0 | RegKind = IS_SPECIAL; |
2734 | 0 | Tokens.push_back(getToken()); |
2735 | 0 | lex(); // skip register name |
2736 | 0 | } |
2737 | 0 | return Reg; |
2738 | 0 | } |
2739 | | |
2740 | | unsigned AMDGPUAsmParser::ParseRegularReg(RegisterKind &RegKind, |
2741 | | unsigned &RegNum, unsigned &RegWidth, |
2742 | 0 | SmallVectorImpl<AsmToken> &Tokens) { |
2743 | 0 | assert(isToken(AsmToken::Identifier)); |
2744 | 0 | StringRef RegName = getTokenStr(); |
2745 | 0 | auto Loc = getLoc(); |
2746 | |
|
2747 | 0 | const RegInfo *RI = getRegularRegInfo(RegName); |
2748 | 0 | if (!RI) { |
2749 | 0 | Error(Loc, "invalid register name"); |
2750 | 0 | return AMDGPU::NoRegister; |
2751 | 0 | } |
2752 | | |
2753 | 0 | Tokens.push_back(getToken()); |
2754 | 0 | lex(); // skip register name |
2755 | |
|
2756 | 0 | RegKind = RI->Kind; |
2757 | 0 | StringRef RegSuffix = RegName.substr(RI->Name.size()); |
2758 | 0 | unsigned SubReg = NoSubRegister; |
2759 | 0 | if (!RegSuffix.empty()) { |
2760 | | // We don't know the opcode till we are done parsing, so we don't know if |
2761 | | // registers should be 16 or 32 bit. It is therefore mandatory to put .l or |
2762 | | // .h to correctly specify 16 bit registers. We also can't determine class |
2763 | | // VGPR_16_Lo128 or VGPR_16, so always parse them as VGPR_16. |
2764 | 0 | if (RegSuffix.consume_back(".l")) |
2765 | 0 | SubReg = AMDGPU::lo16; |
2766 | 0 | else if (RegSuffix.consume_back(".h")) |
2767 | 0 | SubReg = AMDGPU::hi16; |
2768 | | |
2769 | | // Single 32-bit register: vXX. |
2770 | 0 | if (!getRegNum(RegSuffix, RegNum)) { |
2771 | 0 | Error(Loc, "invalid register index"); |
2772 | 0 | return AMDGPU::NoRegister; |
2773 | 0 | } |
2774 | 0 | RegWidth = 32; |
2775 | 0 | } else { |
2776 | | // Range of registers: v[XX:YY]. ":YY" is optional. |
2777 | 0 | if (!ParseRegRange(RegNum, RegWidth)) |
2778 | 0 | return AMDGPU::NoRegister; |
2779 | 0 | } |
2780 | | |
2781 | 0 | return getRegularReg(RegKind, RegNum, SubReg, RegWidth, Loc); |
2782 | 0 | } |
2783 | | |
2784 | | unsigned AMDGPUAsmParser::ParseRegList(RegisterKind &RegKind, unsigned &RegNum, |
2785 | | unsigned &RegWidth, |
2786 | 0 | SmallVectorImpl<AsmToken> &Tokens) { |
2787 | 0 | unsigned Reg = AMDGPU::NoRegister; |
2788 | 0 | auto ListLoc = getLoc(); |
2789 | |
|
2790 | 0 | if (!skipToken(AsmToken::LBrac, |
2791 | 0 | "expected a register or a list of registers")) { |
2792 | 0 | return AMDGPU::NoRegister; |
2793 | 0 | } |
2794 | | |
2795 | | // List of consecutive registers, e.g.: [s0,s1,s2,s3] |
2796 | | |
2797 | 0 | auto Loc = getLoc(); |
2798 | 0 | if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) |
2799 | 0 | return AMDGPU::NoRegister; |
2800 | 0 | if (RegWidth != 32) { |
2801 | 0 | Error(Loc, "expected a single 32-bit register"); |
2802 | 0 | return AMDGPU::NoRegister; |
2803 | 0 | } |
2804 | | |
2805 | 0 | for (; trySkipToken(AsmToken::Comma); ) { |
2806 | 0 | RegisterKind NextRegKind; |
2807 | 0 | unsigned NextReg, NextRegNum, NextRegWidth; |
2808 | 0 | Loc = getLoc(); |
2809 | |
|
2810 | 0 | if (!ParseAMDGPURegister(NextRegKind, NextReg, |
2811 | 0 | NextRegNum, NextRegWidth, |
2812 | 0 | Tokens)) { |
2813 | 0 | return AMDGPU::NoRegister; |
2814 | 0 | } |
2815 | 0 | if (NextRegWidth != 32) { |
2816 | 0 | Error(Loc, "expected a single 32-bit register"); |
2817 | 0 | return AMDGPU::NoRegister; |
2818 | 0 | } |
2819 | 0 | if (NextRegKind != RegKind) { |
2820 | 0 | Error(Loc, "registers in a list must be of the same kind"); |
2821 | 0 | return AMDGPU::NoRegister; |
2822 | 0 | } |
2823 | 0 | if (!AddNextRegisterToList(Reg, RegWidth, RegKind, NextReg, Loc)) |
2824 | 0 | return AMDGPU::NoRegister; |
2825 | 0 | } |
2826 | | |
2827 | 0 | if (!skipToken(AsmToken::RBrac, |
2828 | 0 | "expected a comma or a closing square bracket")) { |
2829 | 0 | return AMDGPU::NoRegister; |
2830 | 0 | } |
2831 | | |
2832 | 0 | if (isRegularReg(RegKind)) |
2833 | 0 | Reg = getRegularReg(RegKind, RegNum, NoSubRegister, RegWidth, ListLoc); |
2834 | |
|
2835 | 0 | return Reg; |
2836 | 0 | } |
2837 | | |
2838 | | bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg, |
2839 | | unsigned &RegNum, unsigned &RegWidth, |
2840 | 0 | SmallVectorImpl<AsmToken> &Tokens) { |
2841 | 0 | auto Loc = getLoc(); |
2842 | 0 | Reg = AMDGPU::NoRegister; |
2843 | |
|
2844 | 0 | if (isToken(AsmToken::Identifier)) { |
2845 | 0 | Reg = ParseSpecialReg(RegKind, RegNum, RegWidth, Tokens); |
2846 | 0 | if (Reg == AMDGPU::NoRegister) |
2847 | 0 | Reg = ParseRegularReg(RegKind, RegNum, RegWidth, Tokens); |
2848 | 0 | } else { |
2849 | 0 | Reg = ParseRegList(RegKind, RegNum, RegWidth, Tokens); |
2850 | 0 | } |
2851 | |
|
2852 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
2853 | 0 | if (Reg == AMDGPU::NoRegister) { |
2854 | 0 | assert(Parser.hasPendingError()); |
2855 | 0 | return false; |
2856 | 0 | } |
2857 | | |
2858 | 0 | if (!subtargetHasRegister(*TRI, Reg)) { |
2859 | 0 | if (Reg == AMDGPU::SGPR_NULL) { |
2860 | 0 | Error(Loc, "'null' operand is not supported on this GPU"); |
2861 | 0 | } else { |
2862 | 0 | Error(Loc, "register not available on this GPU"); |
2863 | 0 | } |
2864 | 0 | return false; |
2865 | 0 | } |
2866 | | |
2867 | 0 | return true; |
2868 | 0 | } |
2869 | | |
2870 | | bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg, |
2871 | | unsigned &RegNum, unsigned &RegWidth, |
2872 | 0 | bool RestoreOnFailure /*=false*/) { |
2873 | 0 | Reg = AMDGPU::NoRegister; |
2874 | |
|
2875 | 0 | SmallVector<AsmToken, 1> Tokens; |
2876 | 0 | if (ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, Tokens)) { |
2877 | 0 | if (RestoreOnFailure) { |
2878 | 0 | while (!Tokens.empty()) { |
2879 | 0 | getLexer().UnLex(Tokens.pop_back_val()); |
2880 | 0 | } |
2881 | 0 | } |
2882 | 0 | return true; |
2883 | 0 | } |
2884 | 0 | return false; |
2885 | 0 | } |
2886 | | |
2887 | | std::optional<StringRef> |
2888 | 0 | AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) { |
2889 | 0 | switch (RegKind) { |
2890 | 0 | case IS_VGPR: |
2891 | 0 | return StringRef(".amdgcn.next_free_vgpr"); |
2892 | 0 | case IS_SGPR: |
2893 | 0 | return StringRef(".amdgcn.next_free_sgpr"); |
2894 | 0 | default: |
2895 | 0 | return std::nullopt; |
2896 | 0 | } |
2897 | 0 | } |
2898 | | |
2899 | 0 | void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) { |
2900 | 0 | auto SymbolName = getGprCountSymbolName(RegKind); |
2901 | 0 | assert(SymbolName && "initializing invalid register kind"); |
2902 | 0 | MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName); |
2903 | 0 | Sym->setVariableValue(MCConstantExpr::create(0, getContext())); |
2904 | 0 | } |
2905 | | |
2906 | | bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind, |
2907 | | unsigned DwordRegIndex, |
2908 | 0 | unsigned RegWidth) { |
2909 | | // Symbols are only defined for GCN targets |
2910 | 0 | if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6) |
2911 | 0 | return true; |
2912 | | |
2913 | 0 | auto SymbolName = getGprCountSymbolName(RegKind); |
2914 | 0 | if (!SymbolName) |
2915 | 0 | return true; |
2916 | 0 | MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName); |
2917 | |
|
2918 | 0 | int64_t NewMax = DwordRegIndex + divideCeil(RegWidth, 32) - 1; |
2919 | 0 | int64_t OldCount; |
2920 | |
|
2921 | 0 | if (!Sym->isVariable()) |
2922 | 0 | return !Error(getLoc(), |
2923 | 0 | ".amdgcn.next_free_{v,s}gpr symbols must be variable"); |
2924 | 0 | if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount)) |
2925 | 0 | return !Error( |
2926 | 0 | getLoc(), |
2927 | 0 | ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions"); |
2928 | | |
2929 | 0 | if (OldCount <= NewMax) |
2930 | 0 | Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext())); |
2931 | |
|
2932 | 0 | return true; |
2933 | 0 | } |
2934 | | |
2935 | | std::unique_ptr<AMDGPUOperand> |
2936 | 0 | AMDGPUAsmParser::parseRegister(bool RestoreOnFailure) { |
2937 | 0 | const auto &Tok = getToken(); |
2938 | 0 | SMLoc StartLoc = Tok.getLoc(); |
2939 | 0 | SMLoc EndLoc = Tok.getEndLoc(); |
2940 | 0 | RegisterKind RegKind; |
2941 | 0 | unsigned Reg, RegNum, RegWidth; |
2942 | |
|
2943 | 0 | if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { |
2944 | 0 | return nullptr; |
2945 | 0 | } |
2946 | 0 | if (isHsaAbi(getSTI())) { |
2947 | 0 | if (!updateGprCountSymbols(RegKind, RegNum, RegWidth)) |
2948 | 0 | return nullptr; |
2949 | 0 | } else |
2950 | 0 | KernelScope.usesRegister(RegKind, RegNum, RegWidth); |
2951 | 0 | return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc); |
2952 | 0 | } |
2953 | | |
2954 | | ParseStatus AMDGPUAsmParser::parseImm(OperandVector &Operands, |
2955 | 0 | bool HasSP3AbsModifier, bool HasLit) { |
2956 | | // TODO: add syntactic sugar for 1/(2*PI) |
2957 | |
|
2958 | 0 | if (isRegister()) |
2959 | 0 | return ParseStatus::NoMatch; |
2960 | 0 | assert(!isModifier()); |
2961 | | |
2962 | 0 | if (!HasLit) { |
2963 | 0 | HasLit = trySkipId("lit"); |
2964 | 0 | if (HasLit) { |
2965 | 0 | if (!skipToken(AsmToken::LParen, "expected left paren after lit")) |
2966 | 0 | return ParseStatus::Failure; |
2967 | 0 | ParseStatus S = parseImm(Operands, HasSP3AbsModifier, HasLit); |
2968 | 0 | if (S.isSuccess() && |
2969 | 0 | !skipToken(AsmToken::RParen, "expected closing parentheses")) |
2970 | 0 | return ParseStatus::Failure; |
2971 | 0 | return S; |
2972 | 0 | } |
2973 | 0 | } |
2974 | | |
2975 | 0 | const auto& Tok = getToken(); |
2976 | 0 | const auto& NextTok = peekToken(); |
2977 | 0 | bool IsReal = Tok.is(AsmToken::Real); |
2978 | 0 | SMLoc S = getLoc(); |
2979 | 0 | bool Negate = false; |
2980 | |
|
2981 | 0 | if (!IsReal && Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Real)) { |
2982 | 0 | lex(); |
2983 | 0 | IsReal = true; |
2984 | 0 | Negate = true; |
2985 | 0 | } |
2986 | |
|
2987 | 0 | AMDGPUOperand::Modifiers Mods; |
2988 | 0 | Mods.Lit = HasLit; |
2989 | |
|
2990 | 0 | if (IsReal) { |
2991 | | // Floating-point expressions are not supported. |
2992 | | // Can only allow floating-point literals with an |
2993 | | // optional sign. |
2994 | |
|
2995 | 0 | StringRef Num = getTokenStr(); |
2996 | 0 | lex(); |
2997 | |
|
2998 | 0 | APFloat RealVal(APFloat::IEEEdouble()); |
2999 | 0 | auto roundMode = APFloat::rmNearestTiesToEven; |
3000 | 0 | if (errorToBool(RealVal.convertFromString(Num, roundMode).takeError())) |
3001 | 0 | return ParseStatus::Failure; |
3002 | 0 | if (Negate) |
3003 | 0 | RealVal.changeSign(); |
3004 | |
|
3005 | 0 | Operands.push_back( |
3006 | 0 | AMDGPUOperand::CreateImm(this, RealVal.bitcastToAPInt().getZExtValue(), S, |
3007 | 0 | AMDGPUOperand::ImmTyNone, true)); |
3008 | 0 | AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back()); |
3009 | 0 | Op.setModifiers(Mods); |
3010 | |
|
3011 | 0 | return ParseStatus::Success; |
3012 | |
|
3013 | 0 | } else { |
3014 | 0 | int64_t IntVal; |
3015 | 0 | const MCExpr *Expr; |
3016 | 0 | SMLoc S = getLoc(); |
3017 | |
|
3018 | 0 | if (HasSP3AbsModifier) { |
3019 | | // This is a workaround for handling expressions |
3020 | | // as arguments of SP3 'abs' modifier, for example: |
3021 | | // |1.0| |
3022 | | // |-1| |
3023 | | // |1+x| |
3024 | | // This syntax is not compatible with syntax of standard |
3025 | | // MC expressions (due to the trailing '|'). |
3026 | 0 | SMLoc EndLoc; |
3027 | 0 | if (getParser().parsePrimaryExpr(Expr, EndLoc, nullptr)) |
3028 | 0 | return ParseStatus::Failure; |
3029 | 0 | } else { |
3030 | 0 | if (Parser.parseExpression(Expr)) |
3031 | 0 | return ParseStatus::Failure; |
3032 | 0 | } |
3033 | | |
3034 | 0 | if (Expr->evaluateAsAbsolute(IntVal)) { |
3035 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S)); |
3036 | 0 | AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back()); |
3037 | 0 | Op.setModifiers(Mods); |
3038 | 0 | } else { |
3039 | 0 | if (HasLit) |
3040 | 0 | return ParseStatus::NoMatch; |
3041 | 0 | Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S)); |
3042 | 0 | } |
3043 | | |
3044 | 0 | return ParseStatus::Success; |
3045 | 0 | } |
3046 | | |
3047 | 0 | return ParseStatus::NoMatch; |
3048 | 0 | } |
3049 | | |
3050 | 0 | ParseStatus AMDGPUAsmParser::parseReg(OperandVector &Operands) { |
3051 | 0 | if (!isRegister()) |
3052 | 0 | return ParseStatus::NoMatch; |
3053 | | |
3054 | 0 | if (auto R = parseRegister()) { |
3055 | 0 | assert(R->isReg()); |
3056 | 0 | Operands.push_back(std::move(R)); |
3057 | 0 | return ParseStatus::Success; |
3058 | 0 | } |
3059 | 0 | return ParseStatus::Failure; |
3060 | 0 | } |
3061 | | |
3062 | | ParseStatus AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, |
3063 | 0 | bool HasSP3AbsMod, bool HasLit) { |
3064 | 0 | ParseStatus Res = parseReg(Operands); |
3065 | 0 | if (!Res.isNoMatch()) |
3066 | 0 | return Res; |
3067 | 0 | if (isModifier()) |
3068 | 0 | return ParseStatus::NoMatch; |
3069 | 0 | return parseImm(Operands, HasSP3AbsMod, HasLit); |
3070 | 0 | } |
3071 | | |
3072 | | bool |
3073 | 0 | AMDGPUAsmParser::isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const { |
3074 | 0 | if (Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::LParen)) { |
3075 | 0 | const auto &str = Token.getString(); |
3076 | 0 | return str == "abs" || str == "neg" || str == "sext"; |
3077 | 0 | } |
3078 | 0 | return false; |
3079 | 0 | } |
3080 | | |
3081 | | bool |
3082 | 0 | AMDGPUAsmParser::isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const { |
3083 | 0 | return Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::Colon); |
3084 | 0 | } |
3085 | | |
3086 | | bool |
3087 | 0 | AMDGPUAsmParser::isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const { |
3088 | 0 | return isNamedOperandModifier(Token, NextToken) || Token.is(AsmToken::Pipe); |
3089 | 0 | } |
3090 | | |
3091 | | bool |
3092 | 0 | AMDGPUAsmParser::isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const { |
3093 | 0 | return isRegister(Token, NextToken) || isOperandModifier(Token, NextToken); |
3094 | 0 | } |
3095 | | |
3096 | | // Check if this is an operand modifier or an opcode modifier |
3097 | | // which may look like an expression but it is not. We should |
3098 | | // avoid parsing these modifiers as expressions. Currently |
3099 | | // recognized sequences are: |
3100 | | // |...| |
3101 | | // abs(...) |
3102 | | // neg(...) |
3103 | | // sext(...) |
3104 | | // -reg |
3105 | | // -|...| |
3106 | | // -abs(...) |
3107 | | // name:... |
3108 | | // |
3109 | | bool |
3110 | 0 | AMDGPUAsmParser::isModifier() { |
3111 | |
|
3112 | 0 | AsmToken Tok = getToken(); |
3113 | 0 | AsmToken NextToken[2]; |
3114 | 0 | peekTokens(NextToken); |
3115 | |
|
3116 | 0 | return isOperandModifier(Tok, NextToken[0]) || |
3117 | 0 | (Tok.is(AsmToken::Minus) && isRegOrOperandModifier(NextToken[0], NextToken[1])) || |
3118 | 0 | isOpcodeModifierWithVal(Tok, NextToken[0]); |
3119 | 0 | } |
3120 | | |
3121 | | // Check if the current token is an SP3 'neg' modifier. |
3122 | | // Currently this modifier is allowed in the following context: |
3123 | | // |
3124 | | // 1. Before a register, e.g. "-v0", "-v[...]" or "-[v0,v1]". |
3125 | | // 2. Before an 'abs' modifier: -abs(...) |
3126 | | // 3. Before an SP3 'abs' modifier: -|...| |
3127 | | // |
3128 | | // In all other cases "-" is handled as a part |
3129 | | // of an expression that follows the sign. |
3130 | | // |
3131 | | // Note: When "-" is followed by an integer literal, |
3132 | | // this is interpreted as integer negation rather |
3133 | | // than a floating-point NEG modifier applied to N. |
3134 | | // Beside being contr-intuitive, such use of floating-point |
3135 | | // NEG modifier would have resulted in different meaning |
3136 | | // of integer literals used with VOP1/2/C and VOP3, |
3137 | | // for example: |
3138 | | // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF |
3139 | | // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001 |
3140 | | // Negative fp literals with preceding "-" are |
3141 | | // handled likewise for uniformity |
3142 | | // |
3143 | | bool |
3144 | 0 | AMDGPUAsmParser::parseSP3NegModifier() { |
3145 | |
|
3146 | 0 | AsmToken NextToken[2]; |
3147 | 0 | peekTokens(NextToken); |
3148 | |
|
3149 | 0 | if (isToken(AsmToken::Minus) && |
3150 | 0 | (isRegister(NextToken[0], NextToken[1]) || |
3151 | 0 | NextToken[0].is(AsmToken::Pipe) || |
3152 | 0 | isId(NextToken[0], "abs"))) { |
3153 | 0 | lex(); |
3154 | 0 | return true; |
3155 | 0 | } |
3156 | | |
3157 | 0 | return false; |
3158 | 0 | } |
3159 | | |
3160 | | ParseStatus |
3161 | | AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands, |
3162 | 0 | bool AllowImm) { |
3163 | 0 | bool Neg, SP3Neg; |
3164 | 0 | bool Abs, SP3Abs; |
3165 | 0 | bool Lit; |
3166 | 0 | SMLoc Loc; |
3167 | | |
3168 | | // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead. |
3169 | 0 | if (isToken(AsmToken::Minus) && peekToken().is(AsmToken::Minus)) |
3170 | 0 | return Error(getLoc(), "invalid syntax, expected 'neg' modifier"); |
3171 | | |
3172 | 0 | SP3Neg = parseSP3NegModifier(); |
3173 | |
|
3174 | 0 | Loc = getLoc(); |
3175 | 0 | Neg = trySkipId("neg"); |
3176 | 0 | if (Neg && SP3Neg) |
3177 | 0 | return Error(Loc, "expected register or immediate"); |
3178 | 0 | if (Neg && !skipToken(AsmToken::LParen, "expected left paren after neg")) |
3179 | 0 | return ParseStatus::Failure; |
3180 | | |
3181 | 0 | Abs = trySkipId("abs"); |
3182 | 0 | if (Abs && !skipToken(AsmToken::LParen, "expected left paren after abs")) |
3183 | 0 | return ParseStatus::Failure; |
3184 | | |
3185 | 0 | Lit = trySkipId("lit"); |
3186 | 0 | if (Lit && !skipToken(AsmToken::LParen, "expected left paren after lit")) |
3187 | 0 | return ParseStatus::Failure; |
3188 | | |
3189 | 0 | Loc = getLoc(); |
3190 | 0 | SP3Abs = trySkipToken(AsmToken::Pipe); |
3191 | 0 | if (Abs && SP3Abs) |
3192 | 0 | return Error(Loc, "expected register or immediate"); |
3193 | | |
3194 | 0 | ParseStatus Res; |
3195 | 0 | if (AllowImm) { |
3196 | 0 | Res = parseRegOrImm(Operands, SP3Abs, Lit); |
3197 | 0 | } else { |
3198 | 0 | Res = parseReg(Operands); |
3199 | 0 | } |
3200 | 0 | if (!Res.isSuccess()) |
3201 | 0 | return (SP3Neg || Neg || SP3Abs || Abs || Lit) ? ParseStatus::Failure : Res; |
3202 | | |
3203 | 0 | if (Lit && !Operands.back()->isImm()) |
3204 | 0 | Error(Loc, "expected immediate with lit modifier"); |
3205 | |
|
3206 | 0 | if (SP3Abs && !skipToken(AsmToken::Pipe, "expected vertical bar")) |
3207 | 0 | return ParseStatus::Failure; |
3208 | 0 | if (Abs && !skipToken(AsmToken::RParen, "expected closing parentheses")) |
3209 | 0 | return ParseStatus::Failure; |
3210 | 0 | if (Neg && !skipToken(AsmToken::RParen, "expected closing parentheses")) |
3211 | 0 | return ParseStatus::Failure; |
3212 | 0 | if (Lit && !skipToken(AsmToken::RParen, "expected closing parentheses")) |
3213 | 0 | return ParseStatus::Failure; |
3214 | | |
3215 | 0 | AMDGPUOperand::Modifiers Mods; |
3216 | 0 | Mods.Abs = Abs || SP3Abs; |
3217 | 0 | Mods.Neg = Neg || SP3Neg; |
3218 | 0 | Mods.Lit = Lit; |
3219 | |
|
3220 | 0 | if (Mods.hasFPModifiers() || Lit) { |
3221 | 0 | AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back()); |
3222 | 0 | if (Op.isExpr()) |
3223 | 0 | return Error(Op.getStartLoc(), "expected an absolute expression"); |
3224 | 0 | Op.setModifiers(Mods); |
3225 | 0 | } |
3226 | 0 | return ParseStatus::Success; |
3227 | 0 | } |
3228 | | |
3229 | | ParseStatus |
3230 | | AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands, |
3231 | 0 | bool AllowImm) { |
3232 | 0 | bool Sext = trySkipId("sext"); |
3233 | 0 | if (Sext && !skipToken(AsmToken::LParen, "expected left paren after sext")) |
3234 | 0 | return ParseStatus::Failure; |
3235 | | |
3236 | 0 | ParseStatus Res; |
3237 | 0 | if (AllowImm) { |
3238 | 0 | Res = parseRegOrImm(Operands); |
3239 | 0 | } else { |
3240 | 0 | Res = parseReg(Operands); |
3241 | 0 | } |
3242 | 0 | if (!Res.isSuccess()) |
3243 | 0 | return Sext ? ParseStatus::Failure : Res; |
3244 | | |
3245 | 0 | if (Sext && !skipToken(AsmToken::RParen, "expected closing parentheses")) |
3246 | 0 | return ParseStatus::Failure; |
3247 | | |
3248 | 0 | AMDGPUOperand::Modifiers Mods; |
3249 | 0 | Mods.Sext = Sext; |
3250 | |
|
3251 | 0 | if (Mods.hasIntModifiers()) { |
3252 | 0 | AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back()); |
3253 | 0 | if (Op.isExpr()) |
3254 | 0 | return Error(Op.getStartLoc(), "expected an absolute expression"); |
3255 | 0 | Op.setModifiers(Mods); |
3256 | 0 | } |
3257 | | |
3258 | 0 | return ParseStatus::Success; |
3259 | 0 | } |
3260 | | |
3261 | 0 | ParseStatus AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) { |
3262 | 0 | return parseRegOrImmWithFPInputMods(Operands, false); |
3263 | 0 | } |
3264 | | |
3265 | 0 | ParseStatus AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) { |
3266 | 0 | return parseRegOrImmWithIntInputMods(Operands, false); |
3267 | 0 | } |
3268 | | |
3269 | 0 | ParseStatus AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) { |
3270 | 0 | auto Loc = getLoc(); |
3271 | 0 | if (trySkipId("off")) { |
3272 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Loc, |
3273 | 0 | AMDGPUOperand::ImmTyOff, false)); |
3274 | 0 | return ParseStatus::Success; |
3275 | 0 | } |
3276 | | |
3277 | 0 | if (!isRegister()) |
3278 | 0 | return ParseStatus::NoMatch; |
3279 | | |
3280 | 0 | std::unique_ptr<AMDGPUOperand> Reg = parseRegister(); |
3281 | 0 | if (Reg) { |
3282 | 0 | Operands.push_back(std::move(Reg)); |
3283 | 0 | return ParseStatus::Success; |
3284 | 0 | } |
3285 | | |
3286 | 0 | return ParseStatus::Failure; |
3287 | 0 | } |
3288 | | |
3289 | 0 | unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) { |
3290 | 0 | uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; |
3291 | |
|
3292 | 0 | if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) || |
3293 | 0 | (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) || |
3294 | 0 | (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) || |
3295 | 0 | (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) ) |
3296 | 0 | return Match_InvalidOperand; |
3297 | | |
3298 | 0 | if ((TSFlags & SIInstrFlags::VOP3) && |
3299 | 0 | (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) && |
3300 | 0 | getForcedEncodingSize() != 64) |
3301 | 0 | return Match_PreferE32; |
3302 | | |
3303 | 0 | if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi || |
3304 | 0 | Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) { |
3305 | | // v_mac_f32/16 allow only dst_sel == DWORD; |
3306 | 0 | auto OpNum = |
3307 | 0 | AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel); |
3308 | 0 | const auto &Op = Inst.getOperand(OpNum); |
3309 | 0 | if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) { |
3310 | 0 | return Match_InvalidOperand; |
3311 | 0 | } |
3312 | 0 | } |
3313 | | |
3314 | 0 | return Match_Success; |
3315 | 0 | } |
3316 | | |
3317 | 0 | static ArrayRef<unsigned> getAllVariants() { |
3318 | 0 | static const unsigned Variants[] = { |
3319 | 0 | AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3, |
3320 | 0 | AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, |
3321 | 0 | AMDGPUAsmVariants::DPP, AMDGPUAsmVariants::VOP3_DPP |
3322 | 0 | }; |
3323 | |
|
3324 | 0 | return ArrayRef(Variants); |
3325 | 0 | } |
3326 | | |
3327 | | // What asm variants we should check |
3328 | 0 | ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const { |
3329 | 0 | if (isForcedDPP() && isForcedVOP3()) { |
3330 | 0 | static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3_DPP}; |
3331 | 0 | return ArrayRef(Variants); |
3332 | 0 | } |
3333 | 0 | if (getForcedEncodingSize() == 32) { |
3334 | 0 | static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT}; |
3335 | 0 | return ArrayRef(Variants); |
3336 | 0 | } |
3337 | | |
3338 | 0 | if (isForcedVOP3()) { |
3339 | 0 | static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3}; |
3340 | 0 | return ArrayRef(Variants); |
3341 | 0 | } |
3342 | | |
3343 | 0 | if (isForcedSDWA()) { |
3344 | 0 | static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA, |
3345 | 0 | AMDGPUAsmVariants::SDWA9}; |
3346 | 0 | return ArrayRef(Variants); |
3347 | 0 | } |
3348 | | |
3349 | 0 | if (isForcedDPP()) { |
3350 | 0 | static const unsigned Variants[] = {AMDGPUAsmVariants::DPP}; |
3351 | 0 | return ArrayRef(Variants); |
3352 | 0 | } |
3353 | | |
3354 | 0 | return getAllVariants(); |
3355 | 0 | } |
3356 | | |
3357 | 0 | StringRef AMDGPUAsmParser::getMatchedVariantName() const { |
3358 | 0 | if (isForcedDPP() && isForcedVOP3()) |
3359 | 0 | return "e64_dpp"; |
3360 | | |
3361 | 0 | if (getForcedEncodingSize() == 32) |
3362 | 0 | return "e32"; |
3363 | | |
3364 | 0 | if (isForcedVOP3()) |
3365 | 0 | return "e64"; |
3366 | | |
3367 | 0 | if (isForcedSDWA()) |
3368 | 0 | return "sdwa"; |
3369 | | |
3370 | 0 | if (isForcedDPP()) |
3371 | 0 | return "dpp"; |
3372 | | |
3373 | 0 | return ""; |
3374 | 0 | } |
3375 | | |
3376 | 0 | unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const { |
3377 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
3378 | 0 | for (MCPhysReg Reg : Desc.implicit_uses()) { |
3379 | 0 | switch (Reg) { |
3380 | 0 | case AMDGPU::FLAT_SCR: |
3381 | 0 | case AMDGPU::VCC: |
3382 | 0 | case AMDGPU::VCC_LO: |
3383 | 0 | case AMDGPU::VCC_HI: |
3384 | 0 | case AMDGPU::M0: |
3385 | 0 | return Reg; |
3386 | 0 | default: |
3387 | 0 | break; |
3388 | 0 | } |
3389 | 0 | } |
3390 | 0 | return AMDGPU::NoRegister; |
3391 | 0 | } |
3392 | | |
3393 | | // NB: This code is correct only when used to check constant |
3394 | | // bus limitations because GFX7 support no f16 inline constants. |
3395 | | // Note that there are no cases when a GFX7 opcode violates |
3396 | | // constant bus limitations due to the use of an f16 constant. |
3397 | | bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst, |
3398 | 0 | unsigned OpIdx) const { |
3399 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
3400 | |
|
3401 | 0 | if (!AMDGPU::isSISrcOperand(Desc, OpIdx) || |
3402 | 0 | AMDGPU::isKImmOperand(Desc, OpIdx)) { |
3403 | 0 | return false; |
3404 | 0 | } |
3405 | | |
3406 | 0 | const MCOperand &MO = Inst.getOperand(OpIdx); |
3407 | |
|
3408 | 0 | int64_t Val = MO.getImm(); |
3409 | 0 | auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx); |
3410 | |
|
3411 | 0 | switch (OpSize) { // expected operand size |
3412 | 0 | case 8: |
3413 | 0 | return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm()); |
3414 | 0 | case 4: |
3415 | 0 | return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm()); |
3416 | 0 | case 2: { |
3417 | 0 | const unsigned OperandType = Desc.operands()[OpIdx].OperandType; |
3418 | 0 | if (OperandType == AMDGPU::OPERAND_REG_IMM_INT16 || |
3419 | 0 | OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT16 || |
3420 | 0 | OperandType == AMDGPU::OPERAND_REG_INLINE_AC_INT16) |
3421 | 0 | return AMDGPU::isInlinableIntLiteral(Val); |
3422 | | |
3423 | 0 | if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 || |
3424 | 0 | OperandType == AMDGPU::OPERAND_REG_INLINE_AC_V2INT16 || |
3425 | 0 | OperandType == AMDGPU::OPERAND_REG_IMM_V2INT16) |
3426 | 0 | return AMDGPU::isInlinableLiteralV2I16(Val); |
3427 | | |
3428 | 0 | if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16 || |
3429 | 0 | OperandType == AMDGPU::OPERAND_REG_INLINE_AC_V2FP16 || |
3430 | 0 | OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16) |
3431 | 0 | return AMDGPU::isInlinableLiteralV2F16(Val); |
3432 | | |
3433 | 0 | return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm()); |
3434 | 0 | } |
3435 | 0 | default: |
3436 | 0 | llvm_unreachable("invalid operand size"); |
3437 | 0 | } |
3438 | 0 | } |
3439 | | |
3440 | 0 | unsigned AMDGPUAsmParser::getConstantBusLimit(unsigned Opcode) const { |
3441 | 0 | if (!isGFX10Plus()) |
3442 | 0 | return 1; |
3443 | | |
3444 | 0 | switch (Opcode) { |
3445 | | // 64-bit shift instructions can use only one scalar value input |
3446 | 0 | case AMDGPU::V_LSHLREV_B64_e64: |
3447 | 0 | case AMDGPU::V_LSHLREV_B64_gfx10: |
3448 | 0 | case AMDGPU::V_LSHLREV_B64_e64_gfx11: |
3449 | 0 | case AMDGPU::V_LSHLREV_B64_e32_gfx12: |
3450 | 0 | case AMDGPU::V_LSHLREV_B64_e64_gfx12: |
3451 | 0 | case AMDGPU::V_LSHRREV_B64_e64: |
3452 | 0 | case AMDGPU::V_LSHRREV_B64_gfx10: |
3453 | 0 | case AMDGPU::V_LSHRREV_B64_e64_gfx11: |
3454 | 0 | case AMDGPU::V_LSHRREV_B64_e64_gfx12: |
3455 | 0 | case AMDGPU::V_ASHRREV_I64_e64: |
3456 | 0 | case AMDGPU::V_ASHRREV_I64_gfx10: |
3457 | 0 | case AMDGPU::V_ASHRREV_I64_e64_gfx11: |
3458 | 0 | case AMDGPU::V_ASHRREV_I64_e64_gfx12: |
3459 | 0 | case AMDGPU::V_LSHL_B64_e64: |
3460 | 0 | case AMDGPU::V_LSHR_B64_e64: |
3461 | 0 | case AMDGPU::V_ASHR_I64_e64: |
3462 | 0 | return 1; |
3463 | 0 | default: |
3464 | 0 | return 2; |
3465 | 0 | } |
3466 | 0 | } |
3467 | | |
3468 | | constexpr unsigned MAX_SRC_OPERANDS_NUM = 6; |
3469 | | using OperandIndices = SmallVector<int16_t, MAX_SRC_OPERANDS_NUM>; |
3470 | | |
3471 | | // Get regular operand indices in the same order as specified |
3472 | | // in the instruction (but append mandatory literals to the end). |
3473 | | static OperandIndices getSrcOperandIndices(unsigned Opcode, |
3474 | 0 | bool AddMandatoryLiterals = false) { |
3475 | |
|
3476 | 0 | int16_t ImmIdx = |
3477 | 0 | AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::imm) : -1; |
3478 | |
|
3479 | 0 | if (isVOPD(Opcode)) { |
3480 | 0 | int16_t ImmDeferredIdx = |
3481 | 0 | AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::immDeferred) |
3482 | 0 | : -1; |
3483 | |
|
3484 | 0 | return {getNamedOperandIdx(Opcode, OpName::src0X), |
3485 | 0 | getNamedOperandIdx(Opcode, OpName::vsrc1X), |
3486 | 0 | getNamedOperandIdx(Opcode, OpName::src0Y), |
3487 | 0 | getNamedOperandIdx(Opcode, OpName::vsrc1Y), |
3488 | 0 | ImmDeferredIdx, |
3489 | 0 | ImmIdx}; |
3490 | 0 | } |
3491 | | |
3492 | 0 | return {getNamedOperandIdx(Opcode, OpName::src0), |
3493 | 0 | getNamedOperandIdx(Opcode, OpName::src1), |
3494 | 0 | getNamedOperandIdx(Opcode, OpName::src2), ImmIdx}; |
3495 | 0 | } |
3496 | | |
3497 | 0 | bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) { |
3498 | 0 | const MCOperand &MO = Inst.getOperand(OpIdx); |
3499 | 0 | if (MO.isImm()) { |
3500 | 0 | return !isInlineConstant(Inst, OpIdx); |
3501 | 0 | } else if (MO.isReg()) { |
3502 | 0 | auto Reg = MO.getReg(); |
3503 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
3504 | 0 | auto PReg = mc2PseudoReg(Reg); |
3505 | 0 | return isSGPR(PReg, TRI) && PReg != SGPR_NULL; |
3506 | 0 | } else { |
3507 | 0 | return true; |
3508 | 0 | } |
3509 | 0 | } |
3510 | | |
3511 | | bool AMDGPUAsmParser::validateConstantBusLimitations( |
3512 | 0 | const MCInst &Inst, const OperandVector &Operands) { |
3513 | 0 | const unsigned Opcode = Inst.getOpcode(); |
3514 | 0 | const MCInstrDesc &Desc = MII.get(Opcode); |
3515 | 0 | unsigned LastSGPR = AMDGPU::NoRegister; |
3516 | 0 | unsigned ConstantBusUseCount = 0; |
3517 | 0 | unsigned NumLiterals = 0; |
3518 | 0 | unsigned LiteralSize; |
3519 | |
|
3520 | 0 | if (!(Desc.TSFlags & |
3521 | 0 | (SIInstrFlags::VOPC | SIInstrFlags::VOP1 | SIInstrFlags::VOP2 | |
3522 | 0 | SIInstrFlags::VOP3 | SIInstrFlags::VOP3P | SIInstrFlags::SDWA)) && |
3523 | 0 | !isVOPD(Opcode)) |
3524 | 0 | return true; |
3525 | | |
3526 | | // Check special imm operands (used by madmk, etc) |
3527 | 0 | if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::imm)) { |
3528 | 0 | ++NumLiterals; |
3529 | 0 | LiteralSize = 4; |
3530 | 0 | } |
3531 | |
|
3532 | 0 | SmallDenseSet<unsigned> SGPRsUsed; |
3533 | 0 | unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst); |
3534 | 0 | if (SGPRUsed != AMDGPU::NoRegister) { |
3535 | 0 | SGPRsUsed.insert(SGPRUsed); |
3536 | 0 | ++ConstantBusUseCount; |
3537 | 0 | } |
3538 | |
|
3539 | 0 | OperandIndices OpIndices = getSrcOperandIndices(Opcode); |
3540 | |
|
3541 | 0 | for (int OpIdx : OpIndices) { |
3542 | 0 | if (OpIdx == -1) |
3543 | 0 | continue; |
3544 | | |
3545 | 0 | const MCOperand &MO = Inst.getOperand(OpIdx); |
3546 | 0 | if (usesConstantBus(Inst, OpIdx)) { |
3547 | 0 | if (MO.isReg()) { |
3548 | 0 | LastSGPR = mc2PseudoReg(MO.getReg()); |
3549 | | // Pairs of registers with a partial intersections like these |
3550 | | // s0, s[0:1] |
3551 | | // flat_scratch_lo, flat_scratch |
3552 | | // flat_scratch_lo, flat_scratch_hi |
3553 | | // are theoretically valid but they are disabled anyway. |
3554 | | // Note that this code mimics SIInstrInfo::verifyInstruction |
3555 | 0 | if (SGPRsUsed.insert(LastSGPR).second) { |
3556 | 0 | ++ConstantBusUseCount; |
3557 | 0 | } |
3558 | 0 | } else { // Expression or a literal |
3559 | |
|
3560 | 0 | if (Desc.operands()[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE) |
3561 | 0 | continue; // special operand like VINTERP attr_chan |
3562 | | |
3563 | | // An instruction may use only one literal. |
3564 | | // This has been validated on the previous step. |
3565 | | // See validateVOPLiteral. |
3566 | | // This literal may be used as more than one operand. |
3567 | | // If all these operands are of the same size, |
3568 | | // this literal counts as one scalar value. |
3569 | | // Otherwise it counts as 2 scalar values. |
3570 | | // See "GFX10 Shader Programming", section 3.6.2.3. |
3571 | | |
3572 | 0 | unsigned Size = AMDGPU::getOperandSize(Desc, OpIdx); |
3573 | 0 | if (Size < 4) |
3574 | 0 | Size = 4; |
3575 | |
|
3576 | 0 | if (NumLiterals == 0) { |
3577 | 0 | NumLiterals = 1; |
3578 | 0 | LiteralSize = Size; |
3579 | 0 | } else if (LiteralSize != Size) { |
3580 | 0 | NumLiterals = 2; |
3581 | 0 | } |
3582 | 0 | } |
3583 | 0 | } |
3584 | 0 | } |
3585 | 0 | ConstantBusUseCount += NumLiterals; |
3586 | |
|
3587 | 0 | if (ConstantBusUseCount <= getConstantBusLimit(Opcode)) |
3588 | 0 | return true; |
3589 | | |
3590 | 0 | SMLoc LitLoc = getLitLoc(Operands); |
3591 | 0 | SMLoc RegLoc = getRegLoc(LastSGPR, Operands); |
3592 | 0 | SMLoc Loc = (LitLoc.getPointer() < RegLoc.getPointer()) ? RegLoc : LitLoc; |
3593 | 0 | Error(Loc, "invalid operand (violates constant bus restrictions)"); |
3594 | 0 | return false; |
3595 | 0 | } |
3596 | | |
3597 | | bool AMDGPUAsmParser::validateVOPDRegBankConstraints( |
3598 | 0 | const MCInst &Inst, const OperandVector &Operands) { |
3599 | |
|
3600 | 0 | const unsigned Opcode = Inst.getOpcode(); |
3601 | 0 | if (!isVOPD(Opcode)) |
3602 | 0 | return true; |
3603 | | |
3604 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
3605 | |
|
3606 | 0 | auto getVRegIdx = [&](unsigned, unsigned OperandIdx) { |
3607 | 0 | const MCOperand &Opr = Inst.getOperand(OperandIdx); |
3608 | 0 | return (Opr.isReg() && !isSGPR(mc2PseudoReg(Opr.getReg()), TRI)) |
3609 | 0 | ? Opr.getReg() |
3610 | 0 | : MCRegister::NoRegister; |
3611 | 0 | }; |
3612 | | |
3613 | | // On GFX12 if both OpX and OpY are V_MOV_B32 then OPY uses SRC2 source-cache. |
3614 | 0 | bool SkipSrc = Opcode == AMDGPU::V_DUAL_MOV_B32_e32_X_MOV_B32_e32_gfx12; |
3615 | |
|
3616 | 0 | const auto &InstInfo = getVOPDInstInfo(Opcode, &MII); |
3617 | 0 | auto InvalidCompOprIdx = |
3618 | 0 | InstInfo.getInvalidCompOperandIndex(getVRegIdx, SkipSrc); |
3619 | 0 | if (!InvalidCompOprIdx) |
3620 | 0 | return true; |
3621 | | |
3622 | 0 | auto CompOprIdx = *InvalidCompOprIdx; |
3623 | 0 | auto ParsedIdx = |
3624 | 0 | std::max(InstInfo[VOPD::X].getIndexInParsedOperands(CompOprIdx), |
3625 | 0 | InstInfo[VOPD::Y].getIndexInParsedOperands(CompOprIdx)); |
3626 | 0 | assert(ParsedIdx > 0 && ParsedIdx < Operands.size()); |
3627 | | |
3628 | 0 | auto Loc = ((AMDGPUOperand &)*Operands[ParsedIdx]).getStartLoc(); |
3629 | 0 | if (CompOprIdx == VOPD::Component::DST) { |
3630 | 0 | Error(Loc, "one dst register must be even and the other odd"); |
3631 | 0 | } else { |
3632 | 0 | auto CompSrcIdx = CompOprIdx - VOPD::Component::DST_NUM; |
3633 | 0 | Error(Loc, Twine("src") + Twine(CompSrcIdx) + |
3634 | 0 | " operands must use different VGPR banks"); |
3635 | 0 | } |
3636 | |
|
3637 | 0 | return false; |
3638 | 0 | } |
3639 | | |
3640 | 0 | bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) { |
3641 | |
|
3642 | 0 | const unsigned Opc = Inst.getOpcode(); |
3643 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3644 | |
|
3645 | 0 | if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) { |
3646 | 0 | int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp); |
3647 | 0 | assert(ClampIdx != -1); |
3648 | 0 | return Inst.getOperand(ClampIdx).getImm() == 0; |
3649 | 0 | } |
3650 | | |
3651 | 0 | return true; |
3652 | 0 | } |
3653 | | |
3654 | | constexpr uint64_t MIMGFlags = |
3655 | | SIInstrFlags::MIMG | SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE; |
3656 | | |
3657 | | bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, |
3658 | 0 | const SMLoc &IDLoc) { |
3659 | |
|
3660 | 0 | const unsigned Opc = Inst.getOpcode(); |
3661 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3662 | |
|
3663 | 0 | if ((Desc.TSFlags & MIMGFlags) == 0) |
3664 | 0 | return true; |
3665 | | |
3666 | 0 | int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); |
3667 | 0 | int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask); |
3668 | 0 | int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe); |
3669 | |
|
3670 | 0 | assert(VDataIdx != -1); |
3671 | | |
3672 | 0 | if ((DMaskIdx == -1 || TFEIdx == -1) && isGFX10_AEncoding()) // intersect_ray |
3673 | 0 | return true; |
3674 | | |
3675 | 0 | unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx); |
3676 | 0 | unsigned TFESize = (TFEIdx != -1 && Inst.getOperand(TFEIdx).getImm()) ? 1 : 0; |
3677 | 0 | unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf; |
3678 | 0 | if (DMask == 0) |
3679 | 0 | DMask = 1; |
3680 | |
|
3681 | 0 | bool IsPackedD16 = false; |
3682 | 0 | unsigned DataSize = |
3683 | 0 | (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : llvm::popcount(DMask); |
3684 | 0 | if (hasPackedD16()) { |
3685 | 0 | int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16); |
3686 | 0 | IsPackedD16 = D16Idx >= 0; |
3687 | 0 | if (IsPackedD16 && Inst.getOperand(D16Idx).getImm()) |
3688 | 0 | DataSize = (DataSize + 1) / 2; |
3689 | 0 | } |
3690 | |
|
3691 | 0 | if ((VDataSize / 4) == DataSize + TFESize) |
3692 | 0 | return true; |
3693 | | |
3694 | 0 | StringRef Modifiers; |
3695 | 0 | if (isGFX90A()) |
3696 | 0 | Modifiers = IsPackedD16 ? "dmask and d16" : "dmask"; |
3697 | 0 | else |
3698 | 0 | Modifiers = IsPackedD16 ? "dmask, d16 and tfe" : "dmask and tfe"; |
3699 | |
|
3700 | 0 | Error(IDLoc, Twine("image data size does not match ") + Modifiers); |
3701 | 0 | return false; |
3702 | 0 | } |
3703 | | |
3704 | | bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, |
3705 | 0 | const SMLoc &IDLoc) { |
3706 | 0 | const unsigned Opc = Inst.getOpcode(); |
3707 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3708 | |
|
3709 | 0 | if ((Desc.TSFlags & MIMGFlags) == 0 || !isGFX10Plus()) |
3710 | 0 | return true; |
3711 | | |
3712 | 0 | const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc); |
3713 | |
|
3714 | 0 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
3715 | 0 | AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); |
3716 | 0 | int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); |
3717 | 0 | int RSrcOpName = Desc.TSFlags & SIInstrFlags::MIMG ? AMDGPU::OpName::srsrc |
3718 | 0 | : AMDGPU::OpName::rsrc; |
3719 | 0 | int SrsrcIdx = AMDGPU::getNamedOperandIdx(Opc, RSrcOpName); |
3720 | 0 | int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim); |
3721 | 0 | int A16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::a16); |
3722 | |
|
3723 | 0 | assert(VAddr0Idx != -1); |
3724 | 0 | assert(SrsrcIdx != -1); |
3725 | 0 | assert(SrsrcIdx > VAddr0Idx); |
3726 | | |
3727 | 0 | bool IsA16 = (A16Idx != -1 && Inst.getOperand(A16Idx).getImm()); |
3728 | 0 | if (BaseOpcode->BVH) { |
3729 | 0 | if (IsA16 == BaseOpcode->A16) |
3730 | 0 | return true; |
3731 | 0 | Error(IDLoc, "image address size does not match a16"); |
3732 | 0 | return false; |
3733 | 0 | } |
3734 | | |
3735 | 0 | unsigned Dim = Inst.getOperand(DimIdx).getImm(); |
3736 | 0 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim); |
3737 | 0 | bool IsNSA = SrsrcIdx - VAddr0Idx > 1; |
3738 | 0 | unsigned ActualAddrSize = |
3739 | 0 | IsNSA ? SrsrcIdx - VAddr0Idx |
3740 | 0 | : AMDGPU::getRegOperandSize(getMRI(), Desc, VAddr0Idx) / 4; |
3741 | |
|
3742 | 0 | unsigned ExpectedAddrSize = |
3743 | 0 | AMDGPU::getAddrSizeMIMGOp(BaseOpcode, DimInfo, IsA16, hasG16()); |
3744 | |
|
3745 | 0 | if (IsNSA) { |
3746 | 0 | if (hasPartialNSAEncoding() && |
3747 | 0 | ExpectedAddrSize > |
3748 | 0 | getNSAMaxSize(Desc.TSFlags & SIInstrFlags::VSAMPLE)) { |
3749 | 0 | int VAddrLastIdx = SrsrcIdx - 1; |
3750 | 0 | unsigned VAddrLastSize = |
3751 | 0 | AMDGPU::getRegOperandSize(getMRI(), Desc, VAddrLastIdx) / 4; |
3752 | |
|
3753 | 0 | ActualAddrSize = VAddrLastIdx - VAddr0Idx + VAddrLastSize; |
3754 | 0 | } |
3755 | 0 | } else { |
3756 | 0 | if (ExpectedAddrSize > 12) |
3757 | 0 | ExpectedAddrSize = 16; |
3758 | | |
3759 | | // Allow oversized 8 VGPR vaddr when only 5/6/7 VGPRs are required. |
3760 | | // This provides backward compatibility for assembly created |
3761 | | // before 160b/192b/224b types were directly supported. |
3762 | 0 | if (ActualAddrSize == 8 && (ExpectedAddrSize >= 5 && ExpectedAddrSize <= 7)) |
3763 | 0 | return true; |
3764 | 0 | } |
3765 | | |
3766 | 0 | if (ActualAddrSize == ExpectedAddrSize) |
3767 | 0 | return true; |
3768 | | |
3769 | 0 | Error(IDLoc, "image address size does not match dim and a16"); |
3770 | 0 | return false; |
3771 | 0 | } |
3772 | | |
3773 | 0 | bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) { |
3774 | |
|
3775 | 0 | const unsigned Opc = Inst.getOpcode(); |
3776 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3777 | |
|
3778 | 0 | if ((Desc.TSFlags & MIMGFlags) == 0) |
3779 | 0 | return true; |
3780 | 0 | if (!Desc.mayLoad() || !Desc.mayStore()) |
3781 | 0 | return true; // Not atomic |
3782 | | |
3783 | 0 | int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask); |
3784 | 0 | unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf; |
3785 | | |
3786 | | // This is an incomplete check because image_atomic_cmpswap |
3787 | | // may only use 0x3 and 0xf while other atomic operations |
3788 | | // may use 0x1 and 0x3. However these limitations are |
3789 | | // verified when we check that dmask matches dst size. |
3790 | 0 | return DMask == 0x1 || DMask == 0x3 || DMask == 0xf; |
3791 | 0 | } |
3792 | | |
3793 | 0 | bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) { |
3794 | |
|
3795 | 0 | const unsigned Opc = Inst.getOpcode(); |
3796 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3797 | |
|
3798 | 0 | if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0) |
3799 | 0 | return true; |
3800 | | |
3801 | 0 | int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask); |
3802 | 0 | unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf; |
3803 | | |
3804 | | // GATHER4 instructions use dmask in a different fashion compared to |
3805 | | // other MIMG instructions. The only useful DMASK values are |
3806 | | // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns |
3807 | | // (red,red,red,red) etc.) The ISA document doesn't mention |
3808 | | // this. |
3809 | 0 | return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8; |
3810 | 0 | } |
3811 | | |
3812 | 0 | bool AMDGPUAsmParser::validateMIMGMSAA(const MCInst &Inst) { |
3813 | 0 | const unsigned Opc = Inst.getOpcode(); |
3814 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3815 | |
|
3816 | 0 | if ((Desc.TSFlags & MIMGFlags) == 0) |
3817 | 0 | return true; |
3818 | | |
3819 | 0 | const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc); |
3820 | 0 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
3821 | 0 | AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); |
3822 | |
|
3823 | 0 | if (!BaseOpcode->MSAA) |
3824 | 0 | return true; |
3825 | | |
3826 | 0 | int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim); |
3827 | 0 | assert(DimIdx != -1); |
3828 | | |
3829 | 0 | unsigned Dim = Inst.getOperand(DimIdx).getImm(); |
3830 | 0 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim); |
3831 | |
|
3832 | 0 | return DimInfo->MSAA; |
3833 | 0 | } |
3834 | | |
3835 | | static bool IsMovrelsSDWAOpcode(const unsigned Opcode) |
3836 | 0 | { |
3837 | 0 | switch (Opcode) { |
3838 | 0 | case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: |
3839 | 0 | case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: |
3840 | 0 | case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: |
3841 | 0 | return true; |
3842 | 0 | default: |
3843 | 0 | return false; |
3844 | 0 | } |
3845 | 0 | } |
3846 | | |
3847 | | // movrels* opcodes should only allow VGPRS as src0. |
3848 | | // This is specified in .td description for vop1/vop3, |
3849 | | // but sdwa is handled differently. See isSDWAOperand. |
3850 | | bool AMDGPUAsmParser::validateMovrels(const MCInst &Inst, |
3851 | 0 | const OperandVector &Operands) { |
3852 | |
|
3853 | 0 | const unsigned Opc = Inst.getOpcode(); |
3854 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3855 | |
|
3856 | 0 | if ((Desc.TSFlags & SIInstrFlags::SDWA) == 0 || !IsMovrelsSDWAOpcode(Opc)) |
3857 | 0 | return true; |
3858 | | |
3859 | 0 | const int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); |
3860 | 0 | assert(Src0Idx != -1); |
3861 | | |
3862 | 0 | SMLoc ErrLoc; |
3863 | 0 | const MCOperand &Src0 = Inst.getOperand(Src0Idx); |
3864 | 0 | if (Src0.isReg()) { |
3865 | 0 | auto Reg = mc2PseudoReg(Src0.getReg()); |
3866 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
3867 | 0 | if (!isSGPR(Reg, TRI)) |
3868 | 0 | return true; |
3869 | 0 | ErrLoc = getRegLoc(Reg, Operands); |
3870 | 0 | } else { |
3871 | 0 | ErrLoc = getConstLoc(Operands); |
3872 | 0 | } |
3873 | | |
3874 | 0 | Error(ErrLoc, "source operand must be a VGPR"); |
3875 | 0 | return false; |
3876 | 0 | } |
3877 | | |
3878 | | bool AMDGPUAsmParser::validateMAIAccWrite(const MCInst &Inst, |
3879 | 0 | const OperandVector &Operands) { |
3880 | |
|
3881 | 0 | const unsigned Opc = Inst.getOpcode(); |
3882 | |
|
3883 | 0 | if (Opc != AMDGPU::V_ACCVGPR_WRITE_B32_vi) |
3884 | 0 | return true; |
3885 | | |
3886 | 0 | const int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); |
3887 | 0 | assert(Src0Idx != -1); |
3888 | | |
3889 | 0 | const MCOperand &Src0 = Inst.getOperand(Src0Idx); |
3890 | 0 | if (!Src0.isReg()) |
3891 | 0 | return true; |
3892 | | |
3893 | 0 | auto Reg = mc2PseudoReg(Src0.getReg()); |
3894 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
3895 | 0 | if (!isGFX90A() && isSGPR(Reg, TRI)) { |
3896 | 0 | Error(getRegLoc(Reg, Operands), |
3897 | 0 | "source operand must be either a VGPR or an inline constant"); |
3898 | 0 | return false; |
3899 | 0 | } |
3900 | | |
3901 | 0 | return true; |
3902 | 0 | } |
3903 | | |
3904 | | bool AMDGPUAsmParser::validateMAISrc2(const MCInst &Inst, |
3905 | 0 | const OperandVector &Operands) { |
3906 | 0 | unsigned Opcode = Inst.getOpcode(); |
3907 | 0 | const MCInstrDesc &Desc = MII.get(Opcode); |
3908 | |
|
3909 | 0 | if (!(Desc.TSFlags & SIInstrFlags::IsMAI) || |
3910 | 0 | !getFeatureBits()[FeatureMFMAInlineLiteralBug]) |
3911 | 0 | return true; |
3912 | | |
3913 | 0 | const int Src2Idx = getNamedOperandIdx(Opcode, OpName::src2); |
3914 | 0 | if (Src2Idx == -1) |
3915 | 0 | return true; |
3916 | | |
3917 | 0 | if (Inst.getOperand(Src2Idx).isImm() && isInlineConstant(Inst, Src2Idx)) { |
3918 | 0 | Error(getConstLoc(Operands), |
3919 | 0 | "inline constants are not allowed for this operand"); |
3920 | 0 | return false; |
3921 | 0 | } |
3922 | | |
3923 | 0 | return true; |
3924 | 0 | } |
3925 | | |
3926 | | bool AMDGPUAsmParser::validateMFMA(const MCInst &Inst, |
3927 | 0 | const OperandVector &Operands) { |
3928 | 0 | const unsigned Opc = Inst.getOpcode(); |
3929 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3930 | |
|
3931 | 0 | if ((Desc.TSFlags & SIInstrFlags::IsMAI) == 0) |
3932 | 0 | return true; |
3933 | | |
3934 | 0 | const int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); |
3935 | 0 | if (Src2Idx == -1) |
3936 | 0 | return true; |
3937 | | |
3938 | 0 | const MCOperand &Src2 = Inst.getOperand(Src2Idx); |
3939 | 0 | if (!Src2.isReg()) |
3940 | 0 | return true; |
3941 | | |
3942 | 0 | MCRegister Src2Reg = Src2.getReg(); |
3943 | 0 | MCRegister DstReg = Inst.getOperand(0).getReg(); |
3944 | 0 | if (Src2Reg == DstReg) |
3945 | 0 | return true; |
3946 | | |
3947 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
3948 | 0 | if (TRI->getRegClass(Desc.operands()[0].RegClass).getSizeInBits() <= 128) |
3949 | 0 | return true; |
3950 | | |
3951 | 0 | if (TRI->regsOverlap(Src2Reg, DstReg)) { |
3952 | 0 | Error(getRegLoc(mc2PseudoReg(Src2Reg), Operands), |
3953 | 0 | "source 2 operand must not partially overlap with dst"); |
3954 | 0 | return false; |
3955 | 0 | } |
3956 | | |
3957 | 0 | return true; |
3958 | 0 | } |
3959 | | |
3960 | 0 | bool AMDGPUAsmParser::validateDivScale(const MCInst &Inst) { |
3961 | 0 | switch (Inst.getOpcode()) { |
3962 | 0 | default: |
3963 | 0 | return true; |
3964 | 0 | case V_DIV_SCALE_F32_gfx6_gfx7: |
3965 | 0 | case V_DIV_SCALE_F32_vi: |
3966 | 0 | case V_DIV_SCALE_F32_gfx10: |
3967 | 0 | case V_DIV_SCALE_F64_gfx6_gfx7: |
3968 | 0 | case V_DIV_SCALE_F64_vi: |
3969 | 0 | case V_DIV_SCALE_F64_gfx10: |
3970 | 0 | break; |
3971 | 0 | } |
3972 | | |
3973 | | // TODO: Check that src0 = src1 or src2. |
3974 | | |
3975 | 0 | for (auto Name : {AMDGPU::OpName::src0_modifiers, |
3976 | 0 | AMDGPU::OpName::src2_modifiers, |
3977 | 0 | AMDGPU::OpName::src2_modifiers}) { |
3978 | 0 | if (Inst.getOperand(AMDGPU::getNamedOperandIdx(Inst.getOpcode(), Name)) |
3979 | 0 | .getImm() & |
3980 | 0 | SISrcMods::ABS) { |
3981 | 0 | return false; |
3982 | 0 | } |
3983 | 0 | } |
3984 | | |
3985 | 0 | return true; |
3986 | 0 | } |
3987 | | |
3988 | 0 | bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) { |
3989 | |
|
3990 | 0 | const unsigned Opc = Inst.getOpcode(); |
3991 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
3992 | |
|
3993 | 0 | if ((Desc.TSFlags & MIMGFlags) == 0) |
3994 | 0 | return true; |
3995 | | |
3996 | 0 | int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16); |
3997 | 0 | if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) { |
3998 | 0 | if (isCI() || isSI()) |
3999 | 0 | return false; |
4000 | 0 | } |
4001 | | |
4002 | 0 | return true; |
4003 | 0 | } |
4004 | | |
4005 | | static bool IsRevOpcode(const unsigned Opcode) |
4006 | 0 | { |
4007 | 0 | switch (Opcode) { |
4008 | 0 | case AMDGPU::V_SUBREV_F32_e32: |
4009 | 0 | case AMDGPU::V_SUBREV_F32_e64: |
4010 | 0 | case AMDGPU::V_SUBREV_F32_e32_gfx10: |
4011 | 0 | case AMDGPU::V_SUBREV_F32_e32_gfx6_gfx7: |
4012 | 0 | case AMDGPU::V_SUBREV_F32_e32_vi: |
4013 | 0 | case AMDGPU::V_SUBREV_F32_e64_gfx10: |
4014 | 0 | case AMDGPU::V_SUBREV_F32_e64_gfx6_gfx7: |
4015 | 0 | case AMDGPU::V_SUBREV_F32_e64_vi: |
4016 | |
|
4017 | 0 | case AMDGPU::V_SUBREV_CO_U32_e32: |
4018 | 0 | case AMDGPU::V_SUBREV_CO_U32_e64: |
4019 | 0 | case AMDGPU::V_SUBREV_I32_e32_gfx6_gfx7: |
4020 | 0 | case AMDGPU::V_SUBREV_I32_e64_gfx6_gfx7: |
4021 | |
|
4022 | 0 | case AMDGPU::V_SUBBREV_U32_e32: |
4023 | 0 | case AMDGPU::V_SUBBREV_U32_e64: |
4024 | 0 | case AMDGPU::V_SUBBREV_U32_e32_gfx6_gfx7: |
4025 | 0 | case AMDGPU::V_SUBBREV_U32_e32_vi: |
4026 | 0 | case AMDGPU::V_SUBBREV_U32_e64_gfx6_gfx7: |
4027 | 0 | case AMDGPU::V_SUBBREV_U32_e64_vi: |
4028 | |
|
4029 | 0 | case AMDGPU::V_SUBREV_U32_e32: |
4030 | 0 | case AMDGPU::V_SUBREV_U32_e64: |
4031 | 0 | case AMDGPU::V_SUBREV_U32_e32_gfx9: |
4032 | 0 | case AMDGPU::V_SUBREV_U32_e32_vi: |
4033 | 0 | case AMDGPU::V_SUBREV_U32_e64_gfx9: |
4034 | 0 | case AMDGPU::V_SUBREV_U32_e64_vi: |
4035 | |
|
4036 | 0 | case AMDGPU::V_SUBREV_F16_e32: |
4037 | 0 | case AMDGPU::V_SUBREV_F16_e64: |
4038 | 0 | case AMDGPU::V_SUBREV_F16_e32_gfx10: |
4039 | 0 | case AMDGPU::V_SUBREV_F16_e32_vi: |
4040 | 0 | case AMDGPU::V_SUBREV_F16_e64_gfx10: |
4041 | 0 | case AMDGPU::V_SUBREV_F16_e64_vi: |
4042 | |
|
4043 | 0 | case AMDGPU::V_SUBREV_U16_e32: |
4044 | 0 | case AMDGPU::V_SUBREV_U16_e64: |
4045 | 0 | case AMDGPU::V_SUBREV_U16_e32_vi: |
4046 | 0 | case AMDGPU::V_SUBREV_U16_e64_vi: |
4047 | |
|
4048 | 0 | case AMDGPU::V_SUBREV_CO_U32_e32_gfx9: |
4049 | 0 | case AMDGPU::V_SUBREV_CO_U32_e64_gfx10: |
4050 | 0 | case AMDGPU::V_SUBREV_CO_U32_e64_gfx9: |
4051 | |
|
4052 | 0 | case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9: |
4053 | 0 | case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9: |
4054 | |
|
4055 | 0 | case AMDGPU::V_SUBREV_NC_U32_e32_gfx10: |
4056 | 0 | case AMDGPU::V_SUBREV_NC_U32_e64_gfx10: |
4057 | |
|
4058 | 0 | case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10: |
4059 | 0 | case AMDGPU::V_SUBREV_CO_CI_U32_e64_gfx10: |
4060 | |
|
4061 | 0 | case AMDGPU::V_LSHRREV_B32_e32: |
4062 | 0 | case AMDGPU::V_LSHRREV_B32_e64: |
4063 | 0 | case AMDGPU::V_LSHRREV_B32_e32_gfx6_gfx7: |
4064 | 0 | case AMDGPU::V_LSHRREV_B32_e64_gfx6_gfx7: |
4065 | 0 | case AMDGPU::V_LSHRREV_B32_e32_vi: |
4066 | 0 | case AMDGPU::V_LSHRREV_B32_e64_vi: |
4067 | 0 | case AMDGPU::V_LSHRREV_B32_e32_gfx10: |
4068 | 0 | case AMDGPU::V_LSHRREV_B32_e64_gfx10: |
4069 | |
|
4070 | 0 | case AMDGPU::V_ASHRREV_I32_e32: |
4071 | 0 | case AMDGPU::V_ASHRREV_I32_e64: |
4072 | 0 | case AMDGPU::V_ASHRREV_I32_e32_gfx10: |
4073 | 0 | case AMDGPU::V_ASHRREV_I32_e32_gfx6_gfx7: |
4074 | 0 | case AMDGPU::V_ASHRREV_I32_e32_vi: |
4075 | 0 | case AMDGPU::V_ASHRREV_I32_e64_gfx10: |
4076 | 0 | case AMDGPU::V_ASHRREV_I32_e64_gfx6_gfx7: |
4077 | 0 | case AMDGPU::V_ASHRREV_I32_e64_vi: |
4078 | |
|
4079 | 0 | case AMDGPU::V_LSHLREV_B32_e32: |
4080 | 0 | case AMDGPU::V_LSHLREV_B32_e64: |
4081 | 0 | case AMDGPU::V_LSHLREV_B32_e32_gfx10: |
4082 | 0 | case AMDGPU::V_LSHLREV_B32_e32_gfx6_gfx7: |
4083 | 0 | case AMDGPU::V_LSHLREV_B32_e32_vi: |
4084 | 0 | case AMDGPU::V_LSHLREV_B32_e64_gfx10: |
4085 | 0 | case AMDGPU::V_LSHLREV_B32_e64_gfx6_gfx7: |
4086 | 0 | case AMDGPU::V_LSHLREV_B32_e64_vi: |
4087 | |
|
4088 | 0 | case AMDGPU::V_LSHLREV_B16_e32: |
4089 | 0 | case AMDGPU::V_LSHLREV_B16_e64: |
4090 | 0 | case AMDGPU::V_LSHLREV_B16_e32_vi: |
4091 | 0 | case AMDGPU::V_LSHLREV_B16_e64_vi: |
4092 | 0 | case AMDGPU::V_LSHLREV_B16_gfx10: |
4093 | |
|
4094 | 0 | case AMDGPU::V_LSHRREV_B16_e32: |
4095 | 0 | case AMDGPU::V_LSHRREV_B16_e64: |
4096 | 0 | case AMDGPU::V_LSHRREV_B16_e32_vi: |
4097 | 0 | case AMDGPU::V_LSHRREV_B16_e64_vi: |
4098 | 0 | case AMDGPU::V_LSHRREV_B16_gfx10: |
4099 | |
|
4100 | 0 | case AMDGPU::V_ASHRREV_I16_e32: |
4101 | 0 | case AMDGPU::V_ASHRREV_I16_e64: |
4102 | 0 | case AMDGPU::V_ASHRREV_I16_e32_vi: |
4103 | 0 | case AMDGPU::V_ASHRREV_I16_e64_vi: |
4104 | 0 | case AMDGPU::V_ASHRREV_I16_gfx10: |
4105 | |
|
4106 | 0 | case AMDGPU::V_LSHLREV_B64_e64: |
4107 | 0 | case AMDGPU::V_LSHLREV_B64_gfx10: |
4108 | 0 | case AMDGPU::V_LSHLREV_B64_vi: |
4109 | |
|
4110 | 0 | case AMDGPU::V_LSHRREV_B64_e64: |
4111 | 0 | case AMDGPU::V_LSHRREV_B64_gfx10: |
4112 | 0 | case AMDGPU::V_LSHRREV_B64_vi: |
4113 | |
|
4114 | 0 | case AMDGPU::V_ASHRREV_I64_e64: |
4115 | 0 | case AMDGPU::V_ASHRREV_I64_gfx10: |
4116 | 0 | case AMDGPU::V_ASHRREV_I64_vi: |
4117 | |
|
4118 | 0 | case AMDGPU::V_PK_LSHLREV_B16: |
4119 | 0 | case AMDGPU::V_PK_LSHLREV_B16_gfx10: |
4120 | 0 | case AMDGPU::V_PK_LSHLREV_B16_vi: |
4121 | |
|
4122 | 0 | case AMDGPU::V_PK_LSHRREV_B16: |
4123 | 0 | case AMDGPU::V_PK_LSHRREV_B16_gfx10: |
4124 | 0 | case AMDGPU::V_PK_LSHRREV_B16_vi: |
4125 | 0 | case AMDGPU::V_PK_ASHRREV_I16: |
4126 | 0 | case AMDGPU::V_PK_ASHRREV_I16_gfx10: |
4127 | 0 | case AMDGPU::V_PK_ASHRREV_I16_vi: |
4128 | 0 | return true; |
4129 | 0 | default: |
4130 | 0 | return false; |
4131 | 0 | } |
4132 | 0 | } |
4133 | | |
4134 | | std::optional<StringRef> |
4135 | 0 | AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) { |
4136 | |
|
4137 | 0 | using namespace SIInstrFlags; |
4138 | 0 | const unsigned Opcode = Inst.getOpcode(); |
4139 | 0 | const MCInstrDesc &Desc = MII.get(Opcode); |
4140 | | |
4141 | | // lds_direct register is defined so that it can be used |
4142 | | // with 9-bit operands only. Ignore encodings which do not accept these. |
4143 | 0 | const auto Enc = VOP1 | VOP2 | VOP3 | VOPC | VOP3P | SIInstrFlags::SDWA; |
4144 | 0 | if ((Desc.TSFlags & Enc) == 0) |
4145 | 0 | return std::nullopt; |
4146 | | |
4147 | 0 | for (auto SrcName : {OpName::src0, OpName::src1, OpName::src2}) { |
4148 | 0 | auto SrcIdx = getNamedOperandIdx(Opcode, SrcName); |
4149 | 0 | if (SrcIdx == -1) |
4150 | 0 | break; |
4151 | 0 | const auto &Src = Inst.getOperand(SrcIdx); |
4152 | 0 | if (Src.isReg() && Src.getReg() == LDS_DIRECT) { |
4153 | |
|
4154 | 0 | if (isGFX90A() || isGFX11Plus()) |
4155 | 0 | return StringRef("lds_direct is not supported on this GPU"); |
4156 | | |
4157 | 0 | if (IsRevOpcode(Opcode) || (Desc.TSFlags & SIInstrFlags::SDWA)) |
4158 | 0 | return StringRef("lds_direct cannot be used with this instruction"); |
4159 | | |
4160 | 0 | if (SrcName != OpName::src0) |
4161 | 0 | return StringRef("lds_direct may be used as src0 only"); |
4162 | 0 | } |
4163 | 0 | } |
4164 | | |
4165 | 0 | return std::nullopt; |
4166 | 0 | } |
4167 | | |
4168 | 0 | SMLoc AMDGPUAsmParser::getFlatOffsetLoc(const OperandVector &Operands) const { |
4169 | 0 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { |
4170 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); |
4171 | 0 | if (Op.isFlatOffset()) |
4172 | 0 | return Op.getStartLoc(); |
4173 | 0 | } |
4174 | 0 | return getLoc(); |
4175 | 0 | } |
4176 | | |
4177 | | bool AMDGPUAsmParser::validateOffset(const MCInst &Inst, |
4178 | 0 | const OperandVector &Operands) { |
4179 | 0 | auto Opcode = Inst.getOpcode(); |
4180 | 0 | auto OpNum = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::offset); |
4181 | 0 | if (OpNum == -1) |
4182 | 0 | return true; |
4183 | | |
4184 | 0 | uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; |
4185 | 0 | if ((TSFlags & SIInstrFlags::FLAT)) |
4186 | 0 | return validateFlatOffset(Inst, Operands); |
4187 | | |
4188 | 0 | if ((TSFlags & SIInstrFlags::SMRD)) |
4189 | 0 | return validateSMEMOffset(Inst, Operands); |
4190 | | |
4191 | 0 | const auto &Op = Inst.getOperand(OpNum); |
4192 | 0 | if (isGFX12Plus() && |
4193 | 0 | (TSFlags & (SIInstrFlags::MUBUF | SIInstrFlags::MTBUF))) { |
4194 | 0 | const unsigned OffsetSize = 24; |
4195 | 0 | if (!isIntN(OffsetSize, Op.getImm())) { |
4196 | 0 | Error(getFlatOffsetLoc(Operands), |
4197 | 0 | Twine("expected a ") + Twine(OffsetSize) + "-bit signed offset"); |
4198 | 0 | return false; |
4199 | 0 | } |
4200 | 0 | } else { |
4201 | 0 | const unsigned OffsetSize = 16; |
4202 | 0 | if (!isUIntN(OffsetSize, Op.getImm())) { |
4203 | 0 | Error(getFlatOffsetLoc(Operands), |
4204 | 0 | Twine("expected a ") + Twine(OffsetSize) + "-bit unsigned offset"); |
4205 | 0 | return false; |
4206 | 0 | } |
4207 | 0 | } |
4208 | 0 | return true; |
4209 | 0 | } |
4210 | | |
4211 | | bool AMDGPUAsmParser::validateFlatOffset(const MCInst &Inst, |
4212 | 0 | const OperandVector &Operands) { |
4213 | 0 | uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; |
4214 | 0 | if ((TSFlags & SIInstrFlags::FLAT) == 0) |
4215 | 0 | return true; |
4216 | | |
4217 | 0 | auto Opcode = Inst.getOpcode(); |
4218 | 0 | auto OpNum = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::offset); |
4219 | 0 | assert(OpNum != -1); |
4220 | | |
4221 | 0 | const auto &Op = Inst.getOperand(OpNum); |
4222 | 0 | if (!hasFlatOffsets() && Op.getImm() != 0) { |
4223 | 0 | Error(getFlatOffsetLoc(Operands), |
4224 | 0 | "flat offset modifier is not supported on this GPU"); |
4225 | 0 | return false; |
4226 | 0 | } |
4227 | | |
4228 | | // For pre-GFX12 FLAT instructions the offset must be positive; |
4229 | | // MSB is ignored and forced to zero. |
4230 | 0 | unsigned OffsetSize = AMDGPU::getNumFlatOffsetBits(getSTI()); |
4231 | 0 | bool AllowNegative = |
4232 | 0 | (TSFlags & (SIInstrFlags::FlatGlobal | SIInstrFlags::FlatScratch)) || |
4233 | 0 | isGFX12Plus(); |
4234 | 0 | if (!isIntN(OffsetSize, Op.getImm()) || (!AllowNegative && Op.getImm() < 0)) { |
4235 | 0 | Error(getFlatOffsetLoc(Operands), |
4236 | 0 | Twine("expected a ") + |
4237 | 0 | (AllowNegative ? Twine(OffsetSize) + "-bit signed offset" |
4238 | 0 | : Twine(OffsetSize - 1) + "-bit unsigned offset")); |
4239 | 0 | return false; |
4240 | 0 | } |
4241 | | |
4242 | 0 | return true; |
4243 | 0 | } |
4244 | | |
4245 | 0 | SMLoc AMDGPUAsmParser::getSMEMOffsetLoc(const OperandVector &Operands) const { |
4246 | | // Start with second operand because SMEM Offset cannot be dst or src0. |
4247 | 0 | for (unsigned i = 2, e = Operands.size(); i != e; ++i) { |
4248 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); |
4249 | 0 | if (Op.isSMEMOffset() || Op.isSMEMOffsetMod()) |
4250 | 0 | return Op.getStartLoc(); |
4251 | 0 | } |
4252 | 0 | return getLoc(); |
4253 | 0 | } |
4254 | | |
4255 | | bool AMDGPUAsmParser::validateSMEMOffset(const MCInst &Inst, |
4256 | 0 | const OperandVector &Operands) { |
4257 | 0 | if (isCI() || isSI()) |
4258 | 0 | return true; |
4259 | | |
4260 | 0 | uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; |
4261 | 0 | if ((TSFlags & SIInstrFlags::SMRD) == 0) |
4262 | 0 | return true; |
4263 | | |
4264 | 0 | auto Opcode = Inst.getOpcode(); |
4265 | 0 | auto OpNum = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::offset); |
4266 | 0 | if (OpNum == -1) |
4267 | 0 | return true; |
4268 | | |
4269 | 0 | const auto &Op = Inst.getOperand(OpNum); |
4270 | 0 | if (!Op.isImm()) |
4271 | 0 | return true; |
4272 | | |
4273 | 0 | uint64_t Offset = Op.getImm(); |
4274 | 0 | bool IsBuffer = AMDGPU::getSMEMIsBuffer(Opcode); |
4275 | 0 | if (AMDGPU::isLegalSMRDEncodedUnsignedOffset(getSTI(), Offset) || |
4276 | 0 | AMDGPU::isLegalSMRDEncodedSignedOffset(getSTI(), Offset, IsBuffer)) |
4277 | 0 | return true; |
4278 | | |
4279 | 0 | Error(getSMEMOffsetLoc(Operands), |
4280 | 0 | isGFX12Plus() ? "expected a 24-bit signed offset" |
4281 | 0 | : (isVI() || IsBuffer) ? "expected a 20-bit unsigned offset" |
4282 | 0 | : "expected a 21-bit signed offset"); |
4283 | |
|
4284 | 0 | return false; |
4285 | 0 | } |
4286 | | |
4287 | 0 | bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst) const { |
4288 | 0 | unsigned Opcode = Inst.getOpcode(); |
4289 | 0 | const MCInstrDesc &Desc = MII.get(Opcode); |
4290 | 0 | if (!(Desc.TSFlags & (SIInstrFlags::SOP2 | SIInstrFlags::SOPC))) |
4291 | 0 | return true; |
4292 | | |
4293 | 0 | const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); |
4294 | 0 | const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); |
4295 | |
|
4296 | 0 | const int OpIndices[] = { Src0Idx, Src1Idx }; |
4297 | |
|
4298 | 0 | unsigned NumExprs = 0; |
4299 | 0 | unsigned NumLiterals = 0; |
4300 | 0 | uint32_t LiteralValue; |
4301 | |
|
4302 | 0 | for (int OpIdx : OpIndices) { |
4303 | 0 | if (OpIdx == -1) break; |
4304 | | |
4305 | 0 | const MCOperand &MO = Inst.getOperand(OpIdx); |
4306 | | // Exclude special imm operands (like that used by s_set_gpr_idx_on) |
4307 | 0 | if (AMDGPU::isSISrcOperand(Desc, OpIdx)) { |
4308 | 0 | if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) { |
4309 | 0 | uint32_t Value = static_cast<uint32_t>(MO.getImm()); |
4310 | 0 | if (NumLiterals == 0 || LiteralValue != Value) { |
4311 | 0 | LiteralValue = Value; |
4312 | 0 | ++NumLiterals; |
4313 | 0 | } |
4314 | 0 | } else if (MO.isExpr()) { |
4315 | 0 | ++NumExprs; |
4316 | 0 | } |
4317 | 0 | } |
4318 | 0 | } |
4319 | |
|
4320 | 0 | return NumLiterals + NumExprs <= 1; |
4321 | 0 | } |
4322 | | |
4323 | 0 | bool AMDGPUAsmParser::validateOpSel(const MCInst &Inst) { |
4324 | 0 | const unsigned Opc = Inst.getOpcode(); |
4325 | 0 | if (isPermlane16(Opc)) { |
4326 | 0 | int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel); |
4327 | 0 | unsigned OpSel = Inst.getOperand(OpSelIdx).getImm(); |
4328 | |
|
4329 | 0 | if (OpSel & ~3) |
4330 | 0 | return false; |
4331 | 0 | } |
4332 | | |
4333 | 0 | uint64_t TSFlags = MII.get(Opc).TSFlags; |
4334 | |
|
4335 | 0 | if (isGFX940() && (TSFlags & SIInstrFlags::IsDOT)) { |
4336 | 0 | int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel); |
4337 | 0 | if (OpSelIdx != -1) { |
4338 | 0 | if (Inst.getOperand(OpSelIdx).getImm() != 0) |
4339 | 0 | return false; |
4340 | 0 | } |
4341 | 0 | int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi); |
4342 | 0 | if (OpSelHiIdx != -1) { |
4343 | 0 | if (Inst.getOperand(OpSelHiIdx).getImm() != -1) |
4344 | 0 | return false; |
4345 | 0 | } |
4346 | 0 | } |
4347 | | |
4348 | | // op_sel[0:1] must be 0 for v_dot2_bf16_bf16 and v_dot2_f16_f16 (VOP3 Dot). |
4349 | 0 | if (isGFX11Plus() && (TSFlags & SIInstrFlags::IsDOT) && |
4350 | 0 | (TSFlags & SIInstrFlags::VOP3) && !(TSFlags & SIInstrFlags::VOP3P)) { |
4351 | 0 | int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel); |
4352 | 0 | unsigned OpSel = Inst.getOperand(OpSelIdx).getImm(); |
4353 | 0 | if (OpSel & 3) |
4354 | 0 | return false; |
4355 | 0 | } |
4356 | | |
4357 | 0 | return true; |
4358 | 0 | } |
4359 | | |
4360 | | bool AMDGPUAsmParser::validateDPP(const MCInst &Inst, |
4361 | 0 | const OperandVector &Operands) { |
4362 | 0 | const unsigned Opc = Inst.getOpcode(); |
4363 | 0 | int DppCtrlIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dpp_ctrl); |
4364 | 0 | if (DppCtrlIdx >= 0) { |
4365 | 0 | unsigned DppCtrl = Inst.getOperand(DppCtrlIdx).getImm(); |
4366 | |
|
4367 | 0 | if (!AMDGPU::isLegalDPALU_DPPControl(DppCtrl) && |
4368 | 0 | AMDGPU::isDPALU_DPP(MII.get(Opc))) { |
4369 | | // DP ALU DPP is supported for row_newbcast only on GFX9* |
4370 | 0 | SMLoc S = getImmLoc(AMDGPUOperand::ImmTyDppCtrl, Operands); |
4371 | 0 | Error(S, "DP ALU dpp only supports row_newbcast"); |
4372 | 0 | return false; |
4373 | 0 | } |
4374 | 0 | } |
4375 | | |
4376 | 0 | int Dpp8Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dpp8); |
4377 | 0 | bool IsDPP = DppCtrlIdx >= 0 || Dpp8Idx >= 0; |
4378 | |
|
4379 | 0 | if (IsDPP && !hasDPPSrc1SGPR(getSTI())) { |
4380 | 0 | int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); |
4381 | 0 | if (Src1Idx >= 0) { |
4382 | 0 | const MCOperand &Src1 = Inst.getOperand(Src1Idx); |
4383 | 0 | const MCRegisterInfo *TRI = getContext().getRegisterInfo(); |
4384 | 0 | if (Src1.isImm() || |
4385 | 0 | (Src1.isReg() && isSGPR(mc2PseudoReg(Src1.getReg()), TRI))) { |
4386 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[Src1Idx]); |
4387 | 0 | Error(Op.getStartLoc(), "invalid operand for instruction"); |
4388 | 0 | return false; |
4389 | 0 | } |
4390 | 0 | } |
4391 | 0 | } |
4392 | | |
4393 | 0 | return true; |
4394 | 0 | } |
4395 | | |
4396 | | // Check if VCC register matches wavefront size |
4397 | 0 | bool AMDGPUAsmParser::validateVccOperand(unsigned Reg) const { |
4398 | 0 | auto FB = getFeatureBits(); |
4399 | 0 | return (FB[AMDGPU::FeatureWavefrontSize64] && Reg == AMDGPU::VCC) || |
4400 | 0 | (FB[AMDGPU::FeatureWavefrontSize32] && Reg == AMDGPU::VCC_LO); |
4401 | 0 | } |
4402 | | |
4403 | | // One unique literal can be used. VOP3 literal is only allowed in GFX10+ |
4404 | | bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst, |
4405 | 0 | const OperandVector &Operands) { |
4406 | 0 | unsigned Opcode = Inst.getOpcode(); |
4407 | 0 | const MCInstrDesc &Desc = MII.get(Opcode); |
4408 | 0 | bool HasMandatoryLiteral = getNamedOperandIdx(Opcode, OpName::imm) != -1; |
4409 | 0 | if (!(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)) && |
4410 | 0 | !HasMandatoryLiteral && !isVOPD(Opcode)) |
4411 | 0 | return true; |
4412 | | |
4413 | 0 | OperandIndices OpIndices = getSrcOperandIndices(Opcode, HasMandatoryLiteral); |
4414 | |
|
4415 | 0 | unsigned NumExprs = 0; |
4416 | 0 | unsigned NumLiterals = 0; |
4417 | 0 | uint32_t LiteralValue; |
4418 | |
|
4419 | 0 | for (int OpIdx : OpIndices) { |
4420 | 0 | if (OpIdx == -1) |
4421 | 0 | continue; |
4422 | | |
4423 | 0 | const MCOperand &MO = Inst.getOperand(OpIdx); |
4424 | 0 | if (!MO.isImm() && !MO.isExpr()) |
4425 | 0 | continue; |
4426 | 0 | if (!isSISrcOperand(Desc, OpIdx)) |
4427 | 0 | continue; |
4428 | | |
4429 | 0 | if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) { |
4430 | 0 | uint64_t Value = static_cast<uint64_t>(MO.getImm()); |
4431 | 0 | bool IsFP64 = AMDGPU::isSISrcFPOperand(Desc, OpIdx) && |
4432 | 0 | AMDGPU::getOperandSize(Desc.operands()[OpIdx]) == 8; |
4433 | 0 | bool IsValid32Op = AMDGPU::isValid32BitLiteral(Value, IsFP64); |
4434 | |
|
4435 | 0 | if (!IsValid32Op && !isInt<32>(Value) && !isUInt<32>(Value)) { |
4436 | 0 | Error(getLitLoc(Operands), "invalid operand for instruction"); |
4437 | 0 | return false; |
4438 | 0 | } |
4439 | | |
4440 | 0 | if (IsFP64 && IsValid32Op) |
4441 | 0 | Value = Hi_32(Value); |
4442 | |
|
4443 | 0 | if (NumLiterals == 0 || LiteralValue != Value) { |
4444 | 0 | LiteralValue = Value; |
4445 | 0 | ++NumLiterals; |
4446 | 0 | } |
4447 | 0 | } else if (MO.isExpr()) { |
4448 | 0 | ++NumExprs; |
4449 | 0 | } |
4450 | 0 | } |
4451 | 0 | NumLiterals += NumExprs; |
4452 | |
|
4453 | 0 | if (!NumLiterals) |
4454 | 0 | return true; |
4455 | | |
4456 | 0 | if (!HasMandatoryLiteral && !getFeatureBits()[FeatureVOP3Literal]) { |
4457 | 0 | Error(getLitLoc(Operands), "literal operands are not supported"); |
4458 | 0 | return false; |
4459 | 0 | } |
4460 | | |
4461 | 0 | if (NumLiterals > 1) { |
4462 | 0 | Error(getLitLoc(Operands, true), "only one unique literal operand is allowed"); |
4463 | 0 | return false; |
4464 | 0 | } |
4465 | | |
4466 | 0 | return true; |
4467 | 0 | } |
4468 | | |
4469 | | // Returns -1 if not a register, 0 if VGPR and 1 if AGPR. |
4470 | | static int IsAGPROperand(const MCInst &Inst, uint16_t NameIdx, |
4471 | 0 | const MCRegisterInfo *MRI) { |
4472 | 0 | int OpIdx = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), NameIdx); |
4473 | 0 | if (OpIdx < 0) |
4474 | 0 | return -1; |
4475 | | |
4476 | 0 | const MCOperand &Op = Inst.getOperand(OpIdx); |
4477 | 0 | if (!Op.isReg()) |
4478 | 0 | return -1; |
4479 | | |
4480 | 0 | unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0); |
4481 | 0 | auto Reg = Sub ? Sub : Op.getReg(); |
4482 | 0 | const MCRegisterClass &AGPR32 = MRI->getRegClass(AMDGPU::AGPR_32RegClassID); |
4483 | 0 | return AGPR32.contains(Reg) ? 1 : 0; |
4484 | 0 | } |
4485 | | |
4486 | 0 | bool AMDGPUAsmParser::validateAGPRLdSt(const MCInst &Inst) const { |
4487 | 0 | uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; |
4488 | 0 | if ((TSFlags & (SIInstrFlags::FLAT | SIInstrFlags::MUBUF | |
4489 | 0 | SIInstrFlags::MTBUF | SIInstrFlags::MIMG | |
4490 | 0 | SIInstrFlags::DS)) == 0) |
4491 | 0 | return true; |
4492 | | |
4493 | 0 | uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 |
4494 | 0 | : AMDGPU::OpName::vdata; |
4495 | |
|
4496 | 0 | const MCRegisterInfo *MRI = getMRI(); |
4497 | 0 | int DstAreg = IsAGPROperand(Inst, AMDGPU::OpName::vdst, MRI); |
4498 | 0 | int DataAreg = IsAGPROperand(Inst, DataNameIdx, MRI); |
4499 | |
|
4500 | 0 | if ((TSFlags & SIInstrFlags::DS) && DataAreg >= 0) { |
4501 | 0 | int Data2Areg = IsAGPROperand(Inst, AMDGPU::OpName::data1, MRI); |
4502 | 0 | if (Data2Areg >= 0 && Data2Areg != DataAreg) |
4503 | 0 | return false; |
4504 | 0 | } |
4505 | | |
4506 | 0 | auto FB = getFeatureBits(); |
4507 | 0 | if (FB[AMDGPU::FeatureGFX90AInsts]) { |
4508 | 0 | if (DataAreg < 0 || DstAreg < 0) |
4509 | 0 | return true; |
4510 | 0 | return DstAreg == DataAreg; |
4511 | 0 | } |
4512 | | |
4513 | 0 | return DstAreg < 1 && DataAreg < 1; |
4514 | 0 | } |
4515 | | |
4516 | 0 | bool AMDGPUAsmParser::validateVGPRAlign(const MCInst &Inst) const { |
4517 | 0 | auto FB = getFeatureBits(); |
4518 | 0 | if (!FB[AMDGPU::FeatureGFX90AInsts]) |
4519 | 0 | return true; |
4520 | | |
4521 | 0 | const MCRegisterInfo *MRI = getMRI(); |
4522 | 0 | const MCRegisterClass &VGPR32 = MRI->getRegClass(AMDGPU::VGPR_32RegClassID); |
4523 | 0 | const MCRegisterClass &AGPR32 = MRI->getRegClass(AMDGPU::AGPR_32RegClassID); |
4524 | 0 | for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { |
4525 | 0 | const MCOperand &Op = Inst.getOperand(I); |
4526 | 0 | if (!Op.isReg()) |
4527 | 0 | continue; |
4528 | | |
4529 | 0 | unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0); |
4530 | 0 | if (!Sub) |
4531 | 0 | continue; |
4532 | | |
4533 | 0 | if (VGPR32.contains(Sub) && ((Sub - AMDGPU::VGPR0) & 1)) |
4534 | 0 | return false; |
4535 | 0 | if (AGPR32.contains(Sub) && ((Sub - AMDGPU::AGPR0) & 1)) |
4536 | 0 | return false; |
4537 | 0 | } |
4538 | | |
4539 | 0 | return true; |
4540 | 0 | } |
4541 | | |
4542 | 0 | SMLoc AMDGPUAsmParser::getBLGPLoc(const OperandVector &Operands) const { |
4543 | 0 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { |
4544 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); |
4545 | 0 | if (Op.isBLGP()) |
4546 | 0 | return Op.getStartLoc(); |
4547 | 0 | } |
4548 | 0 | return SMLoc(); |
4549 | 0 | } |
4550 | | |
4551 | | bool AMDGPUAsmParser::validateBLGP(const MCInst &Inst, |
4552 | 0 | const OperandVector &Operands) { |
4553 | 0 | unsigned Opc = Inst.getOpcode(); |
4554 | 0 | int BlgpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::blgp); |
4555 | 0 | if (BlgpIdx == -1) |
4556 | 0 | return true; |
4557 | 0 | SMLoc BLGPLoc = getBLGPLoc(Operands); |
4558 | 0 | if (!BLGPLoc.isValid()) |
4559 | 0 | return true; |
4560 | 0 | bool IsNeg = StringRef(BLGPLoc.getPointer()).starts_with("neg:"); |
4561 | 0 | auto FB = getFeatureBits(); |
4562 | 0 | bool UsesNeg = false; |
4563 | 0 | if (FB[AMDGPU::FeatureGFX940Insts]) { |
4564 | 0 | switch (Opc) { |
4565 | 0 | case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd: |
4566 | 0 | case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd: |
4567 | 0 | case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd: |
4568 | 0 | case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd: |
4569 | 0 | UsesNeg = true; |
4570 | 0 | } |
4571 | 0 | } |
4572 | |
|
4573 | 0 | if (IsNeg == UsesNeg) |
4574 | 0 | return true; |
4575 | | |
4576 | 0 | Error(BLGPLoc, |
4577 | 0 | UsesNeg ? "invalid modifier: blgp is not supported" |
4578 | 0 | : "invalid modifier: neg is not supported"); |
4579 | |
|
4580 | 0 | return false; |
4581 | 0 | } |
4582 | | |
4583 | | bool AMDGPUAsmParser::validateWaitCnt(const MCInst &Inst, |
4584 | 0 | const OperandVector &Operands) { |
4585 | 0 | if (!isGFX11Plus()) |
4586 | 0 | return true; |
4587 | | |
4588 | 0 | unsigned Opc = Inst.getOpcode(); |
4589 | 0 | if (Opc != AMDGPU::S_WAITCNT_EXPCNT_gfx11 && |
4590 | 0 | Opc != AMDGPU::S_WAITCNT_LGKMCNT_gfx11 && |
4591 | 0 | Opc != AMDGPU::S_WAITCNT_VMCNT_gfx11 && |
4592 | 0 | Opc != AMDGPU::S_WAITCNT_VSCNT_gfx11) |
4593 | 0 | return true; |
4594 | | |
4595 | 0 | int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); |
4596 | 0 | assert(Src0Idx >= 0 && Inst.getOperand(Src0Idx).isReg()); |
4597 | 0 | auto Reg = mc2PseudoReg(Inst.getOperand(Src0Idx).getReg()); |
4598 | 0 | if (Reg == AMDGPU::SGPR_NULL) |
4599 | 0 | return true; |
4600 | | |
4601 | 0 | SMLoc RegLoc = getRegLoc(Reg, Operands); |
4602 | 0 | Error(RegLoc, "src0 must be null"); |
4603 | 0 | return false; |
4604 | 0 | } |
4605 | | |
4606 | | bool AMDGPUAsmParser::validateDS(const MCInst &Inst, |
4607 | 0 | const OperandVector &Operands) { |
4608 | 0 | uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; |
4609 | 0 | if ((TSFlags & SIInstrFlags::DS) == 0) |
4610 | 0 | return true; |
4611 | 0 | if (TSFlags & SIInstrFlags::GWS) |
4612 | 0 | return validateGWS(Inst, Operands); |
4613 | | // Only validate GDS for non-GWS instructions. |
4614 | 0 | if (hasGDS()) |
4615 | 0 | return true; |
4616 | 0 | int GDSIdx = |
4617 | 0 | AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::gds); |
4618 | 0 | if (GDSIdx < 0) |
4619 | 0 | return true; |
4620 | 0 | unsigned GDS = Inst.getOperand(GDSIdx).getImm(); |
4621 | 0 | if (GDS) { |
4622 | 0 | SMLoc S = getImmLoc(AMDGPUOperand::ImmTyGDS, Operands); |
4623 | 0 | Error(S, "gds modifier is not supported on this GPU"); |
4624 | 0 | return false; |
4625 | 0 | } |
4626 | 0 | return true; |
4627 | 0 | } |
4628 | | |
4629 | | // gfx90a has an undocumented limitation: |
4630 | | // DS_GWS opcodes must use even aligned registers. |
4631 | | bool AMDGPUAsmParser::validateGWS(const MCInst &Inst, |
4632 | 0 | const OperandVector &Operands) { |
4633 | 0 | if (!getFeatureBits()[AMDGPU::FeatureGFX90AInsts]) |
4634 | 0 | return true; |
4635 | | |
4636 | 0 | int Opc = Inst.getOpcode(); |
4637 | 0 | if (Opc != AMDGPU::DS_GWS_INIT_vi && Opc != AMDGPU::DS_GWS_BARRIER_vi && |
4638 | 0 | Opc != AMDGPU::DS_GWS_SEMA_BR_vi) |
4639 | 0 | return true; |
4640 | | |
4641 | 0 | const MCRegisterInfo *MRI = getMRI(); |
4642 | 0 | const MCRegisterClass &VGPR32 = MRI->getRegClass(AMDGPU::VGPR_32RegClassID); |
4643 | 0 | int Data0Pos = |
4644 | 0 | AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0); |
4645 | 0 | assert(Data0Pos != -1); |
4646 | 0 | auto Reg = Inst.getOperand(Data0Pos).getReg(); |
4647 | 0 | auto RegIdx = Reg - (VGPR32.contains(Reg) ? AMDGPU::VGPR0 : AMDGPU::AGPR0); |
4648 | 0 | if (RegIdx & 1) { |
4649 | 0 | SMLoc RegLoc = getRegLoc(Reg, Operands); |
4650 | 0 | Error(RegLoc, "vgpr must be even aligned"); |
4651 | 0 | return false; |
4652 | 0 | } |
4653 | | |
4654 | 0 | return true; |
4655 | 0 | } |
4656 | | |
4657 | | bool AMDGPUAsmParser::validateCoherencyBits(const MCInst &Inst, |
4658 | | const OperandVector &Operands, |
4659 | 0 | const SMLoc &IDLoc) { |
4660 | 0 | int CPolPos = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), |
4661 | 0 | AMDGPU::OpName::cpol); |
4662 | 0 | if (CPolPos == -1) |
4663 | 0 | return true; |
4664 | | |
4665 | 0 | unsigned CPol = Inst.getOperand(CPolPos).getImm(); |
4666 | |
|
4667 | 0 | if (isGFX12Plus()) |
4668 | 0 | return validateTHAndScopeBits(Inst, Operands, CPol); |
4669 | | |
4670 | 0 | uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; |
4671 | 0 | if (TSFlags & SIInstrFlags::SMRD) { |
4672 | 0 | if (CPol && (isSI() || isCI())) { |
4673 | 0 | SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); |
4674 | 0 | Error(S, "cache policy is not supported for SMRD instructions"); |
4675 | 0 | return false; |
4676 | 0 | } |
4677 | 0 | if (CPol & ~(AMDGPU::CPol::GLC | AMDGPU::CPol::DLC)) { |
4678 | 0 | Error(IDLoc, "invalid cache policy for SMEM instruction"); |
4679 | 0 | return false; |
4680 | 0 | } |
4681 | 0 | } |
4682 | | |
4683 | 0 | if (isGFX90A() && !isGFX940() && (CPol & CPol::SCC)) { |
4684 | 0 | const uint64_t AllowSCCModifier = SIInstrFlags::MUBUF | |
4685 | 0 | SIInstrFlags::MTBUF | SIInstrFlags::MIMG | |
4686 | 0 | SIInstrFlags::FLAT; |
4687 | 0 | if (!(TSFlags & AllowSCCModifier)) { |
4688 | 0 | SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); |
4689 | 0 | StringRef CStr(S.getPointer()); |
4690 | 0 | S = SMLoc::getFromPointer(&CStr.data()[CStr.find("scc")]); |
4691 | 0 | Error(S, |
4692 | 0 | "scc modifier is not supported for this instruction on this GPU"); |
4693 | 0 | return false; |
4694 | 0 | } |
4695 | 0 | } |
4696 | | |
4697 | 0 | if (!(TSFlags & (SIInstrFlags::IsAtomicNoRet | SIInstrFlags::IsAtomicRet))) |
4698 | 0 | return true; |
4699 | | |
4700 | 0 | if (TSFlags & SIInstrFlags::IsAtomicRet) { |
4701 | 0 | if (!(TSFlags & SIInstrFlags::MIMG) && !(CPol & CPol::GLC)) { |
4702 | 0 | Error(IDLoc, isGFX940() ? "instruction must use sc0" |
4703 | 0 | : "instruction must use glc"); |
4704 | 0 | return false; |
4705 | 0 | } |
4706 | 0 | } else { |
4707 | 0 | if (CPol & CPol::GLC) { |
4708 | 0 | SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); |
4709 | 0 | StringRef CStr(S.getPointer()); |
4710 | 0 | S = SMLoc::getFromPointer( |
4711 | 0 | &CStr.data()[CStr.find(isGFX940() ? "sc0" : "glc")]); |
4712 | 0 | Error(S, isGFX940() ? "instruction must not use sc0" |
4713 | 0 | : "instruction must not use glc"); |
4714 | 0 | return false; |
4715 | 0 | } |
4716 | 0 | } |
4717 | | |
4718 | 0 | return true; |
4719 | 0 | } |
4720 | | |
4721 | | bool AMDGPUAsmParser::validateTHAndScopeBits(const MCInst &Inst, |
4722 | | const OperandVector &Operands, |
4723 | 0 | const unsigned CPol) { |
4724 | 0 | const unsigned TH = CPol & AMDGPU::CPol::TH; |
4725 | 0 | const unsigned Scope = CPol & AMDGPU::CPol::SCOPE; |
4726 | |
|
4727 | 0 | const unsigned Opcode = Inst.getOpcode(); |
4728 | 0 | const MCInstrDesc &TID = MII.get(Opcode); |
4729 | |
|
4730 | 0 | auto PrintError = [&](StringRef Msg) { |
4731 | 0 | SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); |
4732 | 0 | Error(S, Msg); |
4733 | 0 | return false; |
4734 | 0 | }; |
4735 | |
|
4736 | 0 | if ((TID.TSFlags & SIInstrFlags::IsAtomicRet) && |
4737 | 0 | (TID.TSFlags & (SIInstrFlags::FLAT | SIInstrFlags::MUBUF)) && |
4738 | 0 | (!(TH & AMDGPU::CPol::TH_ATOMIC_RETURN))) |
4739 | 0 | return PrintError("instruction must use th:TH_ATOMIC_RETURN"); |
4740 | | |
4741 | 0 | if (TH == 0) |
4742 | 0 | return true; |
4743 | | |
4744 | 0 | if ((TID.TSFlags & SIInstrFlags::SMRD) && |
4745 | 0 | ((TH == AMDGPU::CPol::TH_NT_RT) || (TH == AMDGPU::CPol::TH_RT_NT) || |
4746 | 0 | (TH == AMDGPU::CPol::TH_NT_HT))) |
4747 | 0 | return PrintError("invalid th value for SMEM instruction"); |
4748 | | |
4749 | 0 | if (TH == AMDGPU::CPol::TH_BYPASS) { |
4750 | 0 | if ((Scope != AMDGPU::CPol::SCOPE_SYS && |
4751 | 0 | CPol & AMDGPU::CPol::TH_REAL_BYPASS) || |
4752 | 0 | (Scope == AMDGPU::CPol::SCOPE_SYS && |
4753 | 0 | !(CPol & AMDGPU::CPol::TH_REAL_BYPASS))) |
4754 | 0 | return PrintError("scope and th combination is not valid"); |
4755 | 0 | } |
4756 | | |
4757 | 0 | bool IsStore = TID.mayStore(); |
4758 | 0 | bool IsAtomic = |
4759 | 0 | TID.TSFlags & (SIInstrFlags::IsAtomicNoRet | SIInstrFlags::IsAtomicRet); |
4760 | |
|
4761 | 0 | if (IsAtomic) { |
4762 | 0 | if (!(CPol & AMDGPU::CPol::TH_TYPE_ATOMIC)) |
4763 | 0 | return PrintError("invalid th value for atomic instructions"); |
4764 | 0 | } else if (IsStore) { |
4765 | 0 | if (!(CPol & AMDGPU::CPol::TH_TYPE_STORE)) |
4766 | 0 | return PrintError("invalid th value for store instructions"); |
4767 | 0 | } else { |
4768 | 0 | if (!(CPol & AMDGPU::CPol::TH_TYPE_LOAD)) |
4769 | 0 | return PrintError("invalid th value for load instructions"); |
4770 | 0 | } |
4771 | | |
4772 | 0 | return true; |
4773 | 0 | } |
4774 | | |
4775 | 0 | bool AMDGPUAsmParser::validateExeczVcczOperands(const OperandVector &Operands) { |
4776 | 0 | if (!isGFX11Plus()) |
4777 | 0 | return true; |
4778 | 0 | for (auto &Operand : Operands) { |
4779 | 0 | if (!Operand->isReg()) |
4780 | 0 | continue; |
4781 | 0 | unsigned Reg = Operand->getReg(); |
4782 | 0 | if (Reg == SRC_EXECZ || Reg == SRC_VCCZ) { |
4783 | 0 | Error(getRegLoc(Reg, Operands), |
4784 | 0 | "execz and vccz are not supported on this GPU"); |
4785 | 0 | return false; |
4786 | 0 | } |
4787 | 0 | } |
4788 | 0 | return true; |
4789 | 0 | } |
4790 | | |
4791 | | bool AMDGPUAsmParser::validateTFE(const MCInst &Inst, |
4792 | 0 | const OperandVector &Operands) { |
4793 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
4794 | 0 | if (Desc.mayStore() && |
4795 | 0 | (Desc.TSFlags & (SIInstrFlags::MUBUF | SIInstrFlags::MTBUF))) { |
4796 | 0 | SMLoc Loc = getImmLoc(AMDGPUOperand::ImmTyTFE, Operands); |
4797 | 0 | if (Loc != getInstLoc(Operands)) { |
4798 | 0 | Error(Loc, "TFE modifier has no meaning for store instructions"); |
4799 | 0 | return false; |
4800 | 0 | } |
4801 | 0 | } |
4802 | | |
4803 | 0 | return true; |
4804 | 0 | } |
4805 | | |
4806 | | bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst, |
4807 | | const SMLoc &IDLoc, |
4808 | 0 | const OperandVector &Operands) { |
4809 | 0 | if (auto ErrMsg = validateLdsDirect(Inst)) { |
4810 | 0 | Error(getRegLoc(LDS_DIRECT, Operands), *ErrMsg); |
4811 | 0 | return false; |
4812 | 0 | } |
4813 | 0 | if (!validateSOPLiteral(Inst)) { |
4814 | 0 | Error(getLitLoc(Operands), |
4815 | 0 | "only one unique literal operand is allowed"); |
4816 | 0 | return false; |
4817 | 0 | } |
4818 | 0 | if (!validateVOPLiteral(Inst, Operands)) { |
4819 | 0 | return false; |
4820 | 0 | } |
4821 | 0 | if (!validateConstantBusLimitations(Inst, Operands)) { |
4822 | 0 | return false; |
4823 | 0 | } |
4824 | 0 | if (!validateVOPDRegBankConstraints(Inst, Operands)) { |
4825 | 0 | return false; |
4826 | 0 | } |
4827 | 0 | if (!validateIntClampSupported(Inst)) { |
4828 | 0 | Error(getImmLoc(AMDGPUOperand::ImmTyClampSI, Operands), |
4829 | 0 | "integer clamping is not supported on this GPU"); |
4830 | 0 | return false; |
4831 | 0 | } |
4832 | 0 | if (!validateOpSel(Inst)) { |
4833 | 0 | Error(getImmLoc(AMDGPUOperand::ImmTyOpSel, Operands), |
4834 | 0 | "invalid op_sel operand"); |
4835 | 0 | return false; |
4836 | 0 | } |
4837 | 0 | if (!validateDPP(Inst, Operands)) { |
4838 | 0 | return false; |
4839 | 0 | } |
4840 | | // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate. |
4841 | 0 | if (!validateMIMGD16(Inst)) { |
4842 | 0 | Error(getImmLoc(AMDGPUOperand::ImmTyD16, Operands), |
4843 | 0 | "d16 modifier is not supported on this GPU"); |
4844 | 0 | return false; |
4845 | 0 | } |
4846 | 0 | if (!validateMIMGMSAA(Inst)) { |
4847 | 0 | Error(getImmLoc(AMDGPUOperand::ImmTyDim, Operands), |
4848 | 0 | "invalid dim; must be MSAA type"); |
4849 | 0 | return false; |
4850 | 0 | } |
4851 | 0 | if (!validateMIMGDataSize(Inst, IDLoc)) { |
4852 | 0 | return false; |
4853 | 0 | } |
4854 | 0 | if (!validateMIMGAddrSize(Inst, IDLoc)) |
4855 | 0 | return false; |
4856 | 0 | if (!validateMIMGAtomicDMask(Inst)) { |
4857 | 0 | Error(getImmLoc(AMDGPUOperand::ImmTyDMask, Operands), |
4858 | 0 | "invalid atomic image dmask"); |
4859 | 0 | return false; |
4860 | 0 | } |
4861 | 0 | if (!validateMIMGGatherDMask(Inst)) { |
4862 | 0 | Error(getImmLoc(AMDGPUOperand::ImmTyDMask, Operands), |
4863 | 0 | "invalid image_gather dmask: only one bit must be set"); |
4864 | 0 | return false; |
4865 | 0 | } |
4866 | 0 | if (!validateMovrels(Inst, Operands)) { |
4867 | 0 | return false; |
4868 | 0 | } |
4869 | 0 | if (!validateOffset(Inst, Operands)) { |
4870 | 0 | return false; |
4871 | 0 | } |
4872 | 0 | if (!validateMAIAccWrite(Inst, Operands)) { |
4873 | 0 | return false; |
4874 | 0 | } |
4875 | 0 | if (!validateMAISrc2(Inst, Operands)) { |
4876 | 0 | return false; |
4877 | 0 | } |
4878 | 0 | if (!validateMFMA(Inst, Operands)) { |
4879 | 0 | return false; |
4880 | 0 | } |
4881 | 0 | if (!validateCoherencyBits(Inst, Operands, IDLoc)) { |
4882 | 0 | return false; |
4883 | 0 | } |
4884 | | |
4885 | 0 | if (!validateAGPRLdSt(Inst)) { |
4886 | 0 | Error(IDLoc, getFeatureBits()[AMDGPU::FeatureGFX90AInsts] |
4887 | 0 | ? "invalid register class: data and dst should be all VGPR or AGPR" |
4888 | 0 | : "invalid register class: agpr loads and stores not supported on this GPU" |
4889 | 0 | ); |
4890 | 0 | return false; |
4891 | 0 | } |
4892 | 0 | if (!validateVGPRAlign(Inst)) { |
4893 | 0 | Error(IDLoc, |
4894 | 0 | "invalid register class: vgpr tuples must be 64 bit aligned"); |
4895 | 0 | return false; |
4896 | 0 | } |
4897 | 0 | if (!validateDS(Inst, Operands)) { |
4898 | 0 | return false; |
4899 | 0 | } |
4900 | | |
4901 | 0 | if (!validateBLGP(Inst, Operands)) { |
4902 | 0 | return false; |
4903 | 0 | } |
4904 | | |
4905 | 0 | if (!validateDivScale(Inst)) { |
4906 | 0 | Error(IDLoc, "ABS not allowed in VOP3B instructions"); |
4907 | 0 | return false; |
4908 | 0 | } |
4909 | 0 | if (!validateWaitCnt(Inst, Operands)) { |
4910 | 0 | return false; |
4911 | 0 | } |
4912 | 0 | if (!validateExeczVcczOperands(Operands)) { |
4913 | 0 | return false; |
4914 | 0 | } |
4915 | 0 | if (!validateTFE(Inst, Operands)) { |
4916 | 0 | return false; |
4917 | 0 | } |
4918 | | |
4919 | 0 | return true; |
4920 | 0 | } |
4921 | | |
4922 | | static std::string AMDGPUMnemonicSpellCheck(StringRef S, |
4923 | | const FeatureBitset &FBS, |
4924 | | unsigned VariantID = 0); |
4925 | | |
4926 | | static bool AMDGPUCheckMnemonic(StringRef Mnemonic, |
4927 | | const FeatureBitset &AvailableFeatures, |
4928 | | unsigned VariantID); |
4929 | | |
4930 | | bool AMDGPUAsmParser::isSupportedMnemo(StringRef Mnemo, |
4931 | 0 | const FeatureBitset &FBS) { |
4932 | 0 | return isSupportedMnemo(Mnemo, FBS, getAllVariants()); |
4933 | 0 | } |
4934 | | |
4935 | | bool AMDGPUAsmParser::isSupportedMnemo(StringRef Mnemo, |
4936 | | const FeatureBitset &FBS, |
4937 | 0 | ArrayRef<unsigned> Variants) { |
4938 | 0 | for (auto Variant : Variants) { |
4939 | 0 | if (AMDGPUCheckMnemonic(Mnemo, FBS, Variant)) |
4940 | 0 | return true; |
4941 | 0 | } |
4942 | | |
4943 | 0 | return false; |
4944 | 0 | } |
4945 | | |
4946 | | bool AMDGPUAsmParser::checkUnsupportedInstruction(StringRef Mnemo, |
4947 | 0 | const SMLoc &IDLoc) { |
4948 | 0 | FeatureBitset FBS = ComputeAvailableFeatures(getFeatureBits()); |
4949 | | |
4950 | | // Check if requested instruction variant is supported. |
4951 | 0 | if (isSupportedMnemo(Mnemo, FBS, getMatchedVariants())) |
4952 | 0 | return false; |
4953 | | |
4954 | | // This instruction is not supported. |
4955 | | // Clear any other pending errors because they are no longer relevant. |
4956 | 0 | getParser().clearPendingErrors(); |
4957 | | |
4958 | | // Requested instruction variant is not supported. |
4959 | | // Check if any other variants are supported. |
4960 | 0 | StringRef VariantName = getMatchedVariantName(); |
4961 | 0 | if (!VariantName.empty() && isSupportedMnemo(Mnemo, FBS)) { |
4962 | 0 | return Error(IDLoc, |
4963 | 0 | Twine(VariantName, |
4964 | 0 | " variant of this instruction is not supported")); |
4965 | 0 | } |
4966 | | |
4967 | | // Check if this instruction may be used with a different wavesize. |
4968 | 0 | if (isGFX10Plus() && getFeatureBits()[AMDGPU::FeatureWavefrontSize64] && |
4969 | 0 | !getFeatureBits()[AMDGPU::FeatureWavefrontSize32]) { |
4970 | |
|
4971 | 0 | FeatureBitset FeaturesWS32 = getFeatureBits(); |
4972 | 0 | FeaturesWS32.flip(AMDGPU::FeatureWavefrontSize64) |
4973 | 0 | .flip(AMDGPU::FeatureWavefrontSize32); |
4974 | 0 | FeatureBitset AvailableFeaturesWS32 = |
4975 | 0 | ComputeAvailableFeatures(FeaturesWS32); |
4976 | |
|
4977 | 0 | if (isSupportedMnemo(Mnemo, AvailableFeaturesWS32, getMatchedVariants())) |
4978 | 0 | return Error(IDLoc, "instruction requires wavesize=32"); |
4979 | 0 | } |
4980 | | |
4981 | | // Finally check if this instruction is supported on any other GPU. |
4982 | 0 | if (isSupportedMnemo(Mnemo, FeatureBitset().set())) { |
4983 | 0 | return Error(IDLoc, "instruction not supported on this GPU"); |
4984 | 0 | } |
4985 | | |
4986 | | // Instruction not supported on any GPU. Probably a typo. |
4987 | 0 | std::string Suggestion = AMDGPUMnemonicSpellCheck(Mnemo, FBS); |
4988 | 0 | return Error(IDLoc, "invalid instruction" + Suggestion); |
4989 | 0 | } |
4990 | | |
4991 | | static bool isInvalidVOPDY(const OperandVector &Operands, |
4992 | 0 | uint64_t InvalidOprIdx) { |
4993 | 0 | assert(InvalidOprIdx < Operands.size()); |
4994 | 0 | const auto &Op = ((AMDGPUOperand &)*Operands[InvalidOprIdx]); |
4995 | 0 | if (Op.isToken() && InvalidOprIdx > 1) { |
4996 | 0 | const auto &PrevOp = ((AMDGPUOperand &)*Operands[InvalidOprIdx - 1]); |
4997 | 0 | return PrevOp.isToken() && PrevOp.getToken() == "::"; |
4998 | 0 | } |
4999 | 0 | return false; |
5000 | 0 | } |
5001 | | |
5002 | | bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
5003 | | OperandVector &Operands, |
5004 | | MCStreamer &Out, |
5005 | | uint64_t &ErrorInfo, |
5006 | 0 | bool MatchingInlineAsm) { |
5007 | 0 | MCInst Inst; |
5008 | 0 | unsigned Result = Match_Success; |
5009 | 0 | for (auto Variant : getMatchedVariants()) { |
5010 | 0 | uint64_t EI; |
5011 | 0 | auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm, |
5012 | 0 | Variant); |
5013 | | // We order match statuses from least to most specific. We use most specific |
5014 | | // status as resulting |
5015 | | // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32 |
5016 | 0 | if ((R == Match_Success) || |
5017 | 0 | (R == Match_PreferE32) || |
5018 | 0 | (R == Match_MissingFeature && Result != Match_PreferE32) || |
5019 | 0 | (R == Match_InvalidOperand && Result != Match_MissingFeature |
5020 | 0 | && Result != Match_PreferE32) || |
5021 | 0 | (R == Match_MnemonicFail && Result != Match_InvalidOperand |
5022 | 0 | && Result != Match_MissingFeature |
5023 | 0 | && Result != Match_PreferE32)) { |
5024 | 0 | Result = R; |
5025 | 0 | ErrorInfo = EI; |
5026 | 0 | } |
5027 | 0 | if (R == Match_Success) |
5028 | 0 | break; |
5029 | 0 | } |
5030 | |
|
5031 | 0 | if (Result == Match_Success) { |
5032 | 0 | if (!validateInstruction(Inst, IDLoc, Operands)) { |
5033 | 0 | return true; |
5034 | 0 | } |
5035 | 0 | Inst.setLoc(IDLoc); |
5036 | 0 | Out.emitInstruction(Inst, getSTI()); |
5037 | 0 | return false; |
5038 | 0 | } |
5039 | | |
5040 | 0 | StringRef Mnemo = ((AMDGPUOperand &)*Operands[0]).getToken(); |
5041 | 0 | if (checkUnsupportedInstruction(Mnemo, IDLoc)) { |
5042 | 0 | return true; |
5043 | 0 | } |
5044 | | |
5045 | 0 | switch (Result) { |
5046 | 0 | default: break; |
5047 | 0 | case Match_MissingFeature: |
5048 | | // It has been verified that the specified instruction |
5049 | | // mnemonic is valid. A match was found but it requires |
5050 | | // features which are not supported on this GPU. |
5051 | 0 | return Error(IDLoc, "operands are not valid for this GPU or mode"); |
5052 | | |
5053 | 0 | case Match_InvalidOperand: { |
5054 | 0 | SMLoc ErrorLoc = IDLoc; |
5055 | 0 | if (ErrorInfo != ~0ULL) { |
5056 | 0 | if (ErrorInfo >= Operands.size()) { |
5057 | 0 | return Error(IDLoc, "too few operands for instruction"); |
5058 | 0 | } |
5059 | 0 | ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc(); |
5060 | 0 | if (ErrorLoc == SMLoc()) |
5061 | 0 | ErrorLoc = IDLoc; |
5062 | |
|
5063 | 0 | if (isInvalidVOPDY(Operands, ErrorInfo)) |
5064 | 0 | return Error(ErrorLoc, "invalid VOPDY instruction"); |
5065 | 0 | } |
5066 | 0 | return Error(ErrorLoc, "invalid operand for instruction"); |
5067 | 0 | } |
5068 | | |
5069 | 0 | case Match_PreferE32: |
5070 | 0 | return Error(IDLoc, "internal error: instruction without _e64 suffix " |
5071 | 0 | "should be encoded as e32"); |
5072 | 0 | case Match_MnemonicFail: |
5073 | 0 | llvm_unreachable("Invalid instructions should have been handled already"); |
5074 | 0 | } |
5075 | 0 | llvm_unreachable("Implement any new match types added!"); |
5076 | 0 | } |
5077 | | |
5078 | 0 | bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) { |
5079 | 0 | int64_t Tmp = -1; |
5080 | 0 | if (!isToken(AsmToken::Integer) && !isToken(AsmToken::Identifier)) { |
5081 | 0 | return true; |
5082 | 0 | } |
5083 | 0 | if (getParser().parseAbsoluteExpression(Tmp)) { |
5084 | 0 | return true; |
5085 | 0 | } |
5086 | 0 | Ret = static_cast<uint32_t>(Tmp); |
5087 | 0 | return false; |
5088 | 0 | } |
5089 | | |
5090 | | bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major, |
5091 | 0 | uint32_t &Minor) { |
5092 | 0 | if (ParseAsAbsoluteExpression(Major)) |
5093 | 0 | return TokError("invalid major version"); |
5094 | | |
5095 | 0 | if (!trySkipToken(AsmToken::Comma)) |
5096 | 0 | return TokError("minor version number required, comma expected"); |
5097 | | |
5098 | 0 | if (ParseAsAbsoluteExpression(Minor)) |
5099 | 0 | return TokError("invalid minor version"); |
5100 | | |
5101 | 0 | return false; |
5102 | 0 | } |
5103 | | |
5104 | 0 | bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() { |
5105 | 0 | if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) |
5106 | 0 | return TokError("directive only supported for amdgcn architecture"); |
5107 | | |
5108 | 0 | std::string TargetIDDirective; |
5109 | 0 | SMLoc TargetStart = getTok().getLoc(); |
5110 | 0 | if (getParser().parseEscapedString(TargetIDDirective)) |
5111 | 0 | return true; |
5112 | | |
5113 | 0 | SMRange TargetRange = SMRange(TargetStart, getTok().getLoc()); |
5114 | 0 | if (getTargetStreamer().getTargetID()->toString() != TargetIDDirective) |
5115 | 0 | return getParser().Error(TargetRange.Start, |
5116 | 0 | (Twine(".amdgcn_target directive's target id ") + |
5117 | 0 | Twine(TargetIDDirective) + |
5118 | 0 | Twine(" does not match the specified target id ") + |
5119 | 0 | Twine(getTargetStreamer().getTargetID()->toString())).str()); |
5120 | | |
5121 | 0 | return false; |
5122 | 0 | } |
5123 | | |
5124 | 0 | bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) { |
5125 | 0 | return Error(Range.Start, "value out of range", Range); |
5126 | 0 | } |
5127 | | |
5128 | | bool AMDGPUAsmParser::calculateGPRBlocks( |
5129 | | const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed, |
5130 | | bool XNACKUsed, std::optional<bool> EnableWavefrontSize32, |
5131 | | unsigned NextFreeVGPR, SMRange VGPRRange, unsigned NextFreeSGPR, |
5132 | 0 | SMRange SGPRRange, unsigned &VGPRBlocks, unsigned &SGPRBlocks) { |
5133 | | // TODO(scott.linder): These calculations are duplicated from |
5134 | | // AMDGPUAsmPrinter::getSIProgramInfo and could be unified. |
5135 | 0 | IsaVersion Version = getIsaVersion(getSTI().getCPU()); |
5136 | |
|
5137 | 0 | unsigned NumVGPRs = NextFreeVGPR; |
5138 | 0 | unsigned NumSGPRs = NextFreeSGPR; |
5139 | |
|
5140 | 0 | if (Version.Major >= 10) |
5141 | 0 | NumSGPRs = 0; |
5142 | 0 | else { |
5143 | 0 | unsigned MaxAddressableNumSGPRs = |
5144 | 0 | IsaInfo::getAddressableNumSGPRs(&getSTI()); |
5145 | |
|
5146 | 0 | if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) && |
5147 | 0 | NumSGPRs > MaxAddressableNumSGPRs) |
5148 | 0 | return OutOfRangeError(SGPRRange); |
5149 | | |
5150 | 0 | NumSGPRs += |
5151 | 0 | IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed); |
5152 | |
|
5153 | 0 | if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) && |
5154 | 0 | NumSGPRs > MaxAddressableNumSGPRs) |
5155 | 0 | return OutOfRangeError(SGPRRange); |
5156 | | |
5157 | 0 | if (Features.test(FeatureSGPRInitBug)) |
5158 | 0 | NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; |
5159 | 0 | } |
5160 | | |
5161 | 0 | VGPRBlocks = |
5162 | 0 | IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs, EnableWavefrontSize32); |
5163 | 0 | SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs); |
5164 | |
|
5165 | 0 | return false; |
5166 | 0 | } |
5167 | | |
5168 | 0 | bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() { |
5169 | 0 | if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) |
5170 | 0 | return TokError("directive only supported for amdgcn architecture"); |
5171 | | |
5172 | 0 | if (!isHsaAbi(getSTI())) |
5173 | 0 | return TokError("directive only supported for amdhsa OS"); |
5174 | | |
5175 | 0 | StringRef KernelName; |
5176 | 0 | if (getParser().parseIdentifier(KernelName)) |
5177 | 0 | return true; |
5178 | | |
5179 | 0 | kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor(&getSTI()); |
5180 | |
|
5181 | 0 | StringSet<> Seen; |
5182 | |
|
5183 | 0 | IsaVersion IVersion = getIsaVersion(getSTI().getCPU()); |
5184 | |
|
5185 | 0 | SMRange VGPRRange; |
5186 | 0 | uint64_t NextFreeVGPR = 0; |
5187 | 0 | uint64_t AccumOffset = 0; |
5188 | 0 | uint64_t SharedVGPRCount = 0; |
5189 | 0 | uint64_t PreloadLength = 0; |
5190 | 0 | uint64_t PreloadOffset = 0; |
5191 | 0 | SMRange SGPRRange; |
5192 | 0 | uint64_t NextFreeSGPR = 0; |
5193 | | |
5194 | | // Count the number of user SGPRs implied from the enabled feature bits. |
5195 | 0 | unsigned ImpliedUserSGPRCount = 0; |
5196 | | |
5197 | | // Track if the asm explicitly contains the directive for the user SGPR |
5198 | | // count. |
5199 | 0 | std::optional<unsigned> ExplicitUserSGPRCount; |
5200 | 0 | bool ReserveVCC = true; |
5201 | 0 | bool ReserveFlatScr = true; |
5202 | 0 | std::optional<bool> EnableWavefrontSize32; |
5203 | |
|
5204 | 0 | while (true) { |
5205 | 0 | while (trySkipToken(AsmToken::EndOfStatement)); |
5206 | |
|
5207 | 0 | StringRef ID; |
5208 | 0 | SMRange IDRange = getTok().getLocRange(); |
5209 | 0 | if (!parseId(ID, "expected .amdhsa_ directive or .end_amdhsa_kernel")) |
5210 | 0 | return true; |
5211 | | |
5212 | 0 | if (ID == ".end_amdhsa_kernel") |
5213 | 0 | break; |
5214 | | |
5215 | 0 | if (!Seen.insert(ID).second) |
5216 | 0 | return TokError(".amdhsa_ directives cannot be repeated"); |
5217 | | |
5218 | 0 | SMLoc ValStart = getLoc(); |
5219 | 0 | int64_t IVal; |
5220 | 0 | if (getParser().parseAbsoluteExpression(IVal)) |
5221 | 0 | return true; |
5222 | 0 | SMLoc ValEnd = getLoc(); |
5223 | 0 | SMRange ValRange = SMRange(ValStart, ValEnd); |
5224 | |
|
5225 | 0 | if (IVal < 0) |
5226 | 0 | return OutOfRangeError(ValRange); |
5227 | | |
5228 | 0 | uint64_t Val = IVal; |
5229 | |
|
5230 | 0 | #define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \ |
5231 | 0 | if (!isUInt<ENTRY##_WIDTH>(VALUE)) \ |
5232 | 0 | return OutOfRangeError(RANGE); \ |
5233 | 0 | AMDHSA_BITS_SET(FIELD, ENTRY, VALUE); |
5234 | |
|
5235 | 0 | if (ID == ".amdhsa_group_segment_fixed_size") { |
5236 | 0 | if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val)) |
5237 | 0 | return OutOfRangeError(ValRange); |
5238 | 0 | KD.group_segment_fixed_size = Val; |
5239 | 0 | } else if (ID == ".amdhsa_private_segment_fixed_size") { |
5240 | 0 | if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val)) |
5241 | 0 | return OutOfRangeError(ValRange); |
5242 | 0 | KD.private_segment_fixed_size = Val; |
5243 | 0 | } else if (ID == ".amdhsa_kernarg_size") { |
5244 | 0 | if (!isUInt<sizeof(KD.kernarg_size) * CHAR_BIT>(Val)) |
5245 | 0 | return OutOfRangeError(ValRange); |
5246 | 0 | KD.kernarg_size = Val; |
5247 | 0 | } else if (ID == ".amdhsa_user_sgpr_count") { |
5248 | 0 | ExplicitUserSGPRCount = Val; |
5249 | 0 | } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") { |
5250 | 0 | if (hasArchitectedFlatScratch()) |
5251 | 0 | return Error(IDRange.Start, |
5252 | 0 | "directive is not supported with architected flat scratch", |
5253 | 0 | IDRange); |
5254 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5255 | 0 | KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER, |
5256 | 0 | Val, ValRange); |
5257 | 0 | if (Val) |
5258 | 0 | ImpliedUserSGPRCount += 4; |
5259 | 0 | } else if (ID == ".amdhsa_user_sgpr_kernarg_preload_length") { |
5260 | 0 | if (!hasKernargPreload()) |
5261 | 0 | return Error(IDRange.Start, "directive requires gfx90a+", IDRange); |
5262 | | |
5263 | 0 | if (Val > getMaxNumUserSGPRs()) |
5264 | 0 | return OutOfRangeError(ValRange); |
5265 | 0 | PARSE_BITS_ENTRY(KD.kernarg_preload, KERNARG_PRELOAD_SPEC_LENGTH, Val, |
5266 | 0 | ValRange); |
5267 | 0 | if (Val) { |
5268 | 0 | ImpliedUserSGPRCount += Val; |
5269 | 0 | PreloadLength = Val; |
5270 | 0 | } |
5271 | 0 | } else if (ID == ".amdhsa_user_sgpr_kernarg_preload_offset") { |
5272 | 0 | if (!hasKernargPreload()) |
5273 | 0 | return Error(IDRange.Start, "directive requires gfx90a+", IDRange); |
5274 | | |
5275 | 0 | if (Val >= 1024) |
5276 | 0 | return OutOfRangeError(ValRange); |
5277 | 0 | PARSE_BITS_ENTRY(KD.kernarg_preload, KERNARG_PRELOAD_SPEC_OFFSET, Val, |
5278 | 0 | ValRange); |
5279 | 0 | if (Val) |
5280 | 0 | PreloadOffset = Val; |
5281 | 0 | } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") { |
5282 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5283 | 0 | KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val, |
5284 | 0 | ValRange); |
5285 | 0 | if (Val) |
5286 | 0 | ImpliedUserSGPRCount += 2; |
5287 | 0 | } else if (ID == ".amdhsa_user_sgpr_queue_ptr") { |
5288 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5289 | 0 | KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val, |
5290 | 0 | ValRange); |
5291 | 0 | if (Val) |
5292 | 0 | ImpliedUserSGPRCount += 2; |
5293 | 0 | } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") { |
5294 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5295 | 0 | KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR, |
5296 | 0 | Val, ValRange); |
5297 | 0 | if (Val) |
5298 | 0 | ImpliedUserSGPRCount += 2; |
5299 | 0 | } else if (ID == ".amdhsa_user_sgpr_dispatch_id") { |
5300 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5301 | 0 | KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val, |
5302 | 0 | ValRange); |
5303 | 0 | if (Val) |
5304 | 0 | ImpliedUserSGPRCount += 2; |
5305 | 0 | } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") { |
5306 | 0 | if (hasArchitectedFlatScratch()) |
5307 | 0 | return Error(IDRange.Start, |
5308 | 0 | "directive is not supported with architected flat scratch", |
5309 | 0 | IDRange); |
5310 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5311 | 0 | KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val, |
5312 | 0 | ValRange); |
5313 | 0 | if (Val) |
5314 | 0 | ImpliedUserSGPRCount += 2; |
5315 | 0 | } else if (ID == ".amdhsa_user_sgpr_private_segment_size") { |
5316 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5317 | 0 | KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE, |
5318 | 0 | Val, ValRange); |
5319 | 0 | if (Val) |
5320 | 0 | ImpliedUserSGPRCount += 1; |
5321 | 0 | } else if (ID == ".amdhsa_wavefront_size32") { |
5322 | 0 | if (IVersion.Major < 10) |
5323 | 0 | return Error(IDRange.Start, "directive requires gfx10+", IDRange); |
5324 | 0 | EnableWavefrontSize32 = Val; |
5325 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5326 | 0 | KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, |
5327 | 0 | Val, ValRange); |
5328 | 0 | } else if (ID == ".amdhsa_uses_dynamic_stack") { |
5329 | 0 | PARSE_BITS_ENTRY(KD.kernel_code_properties, |
5330 | 0 | KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK, Val, ValRange); |
5331 | 0 | } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") { |
5332 | 0 | if (hasArchitectedFlatScratch()) |
5333 | 0 | return Error(IDRange.Start, |
5334 | 0 | "directive is not supported with architected flat scratch", |
5335 | 0 | IDRange); |
5336 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5337 | 0 | COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, Val, ValRange); |
5338 | 0 | } else if (ID == ".amdhsa_enable_private_segment") { |
5339 | 0 | if (!hasArchitectedFlatScratch()) |
5340 | 0 | return Error( |
5341 | 0 | IDRange.Start, |
5342 | 0 | "directive is not supported without architected flat scratch", |
5343 | 0 | IDRange); |
5344 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5345 | 0 | COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, Val, ValRange); |
5346 | 0 | } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") { |
5347 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5348 | 0 | COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val, |
5349 | 0 | ValRange); |
5350 | 0 | } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") { |
5351 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5352 | 0 | COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val, |
5353 | 0 | ValRange); |
5354 | 0 | } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") { |
5355 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5356 | 0 | COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val, |
5357 | 0 | ValRange); |
5358 | 0 | } else if (ID == ".amdhsa_system_sgpr_workgroup_info") { |
5359 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5360 | 0 | COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val, |
5361 | 0 | ValRange); |
5362 | 0 | } else if (ID == ".amdhsa_system_vgpr_workitem_id") { |
5363 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5364 | 0 | COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val, |
5365 | 0 | ValRange); |
5366 | 0 | } else if (ID == ".amdhsa_next_free_vgpr") { |
5367 | 0 | VGPRRange = ValRange; |
5368 | 0 | NextFreeVGPR = Val; |
5369 | 0 | } else if (ID == ".amdhsa_next_free_sgpr") { |
5370 | 0 | SGPRRange = ValRange; |
5371 | 0 | NextFreeSGPR = Val; |
5372 | 0 | } else if (ID == ".amdhsa_accum_offset") { |
5373 | 0 | if (!isGFX90A()) |
5374 | 0 | return Error(IDRange.Start, "directive requires gfx90a+", IDRange); |
5375 | 0 | AccumOffset = Val; |
5376 | 0 | } else if (ID == ".amdhsa_reserve_vcc") { |
5377 | 0 | if (!isUInt<1>(Val)) |
5378 | 0 | return OutOfRangeError(ValRange); |
5379 | 0 | ReserveVCC = Val; |
5380 | 0 | } else if (ID == ".amdhsa_reserve_flat_scratch") { |
5381 | 0 | if (IVersion.Major < 7) |
5382 | 0 | return Error(IDRange.Start, "directive requires gfx7+", IDRange); |
5383 | 0 | if (hasArchitectedFlatScratch()) |
5384 | 0 | return Error(IDRange.Start, |
5385 | 0 | "directive is not supported with architected flat scratch", |
5386 | 0 | IDRange); |
5387 | 0 | if (!isUInt<1>(Val)) |
5388 | 0 | return OutOfRangeError(ValRange); |
5389 | 0 | ReserveFlatScr = Val; |
5390 | 0 | } else if (ID == ".amdhsa_reserve_xnack_mask") { |
5391 | 0 | if (IVersion.Major < 8) |
5392 | 0 | return Error(IDRange.Start, "directive requires gfx8+", IDRange); |
5393 | 0 | if (!isUInt<1>(Val)) |
5394 | 0 | return OutOfRangeError(ValRange); |
5395 | 0 | if (Val != getTargetStreamer().getTargetID()->isXnackOnOrAny()) |
5396 | 0 | return getParser().Error(IDRange.Start, ".amdhsa_reserve_xnack_mask does not match target id", |
5397 | 0 | IDRange); |
5398 | 0 | } else if (ID == ".amdhsa_float_round_mode_32") { |
5399 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, |
5400 | 0 | COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange); |
5401 | 0 | } else if (ID == ".amdhsa_float_round_mode_16_64") { |
5402 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, |
5403 | 0 | COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange); |
5404 | 0 | } else if (ID == ".amdhsa_float_denorm_mode_32") { |
5405 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, |
5406 | 0 | COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange); |
5407 | 0 | } else if (ID == ".amdhsa_float_denorm_mode_16_64") { |
5408 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, |
5409 | 0 | COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val, |
5410 | 0 | ValRange); |
5411 | 0 | } else if (ID == ".amdhsa_dx10_clamp") { |
5412 | 0 | if (IVersion.Major >= 12) |
5413 | 0 | return Error(IDRange.Start, "directive unsupported on gfx12+", IDRange); |
5414 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, |
5415 | 0 | COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP, Val, |
5416 | 0 | ValRange); |
5417 | 0 | } else if (ID == ".amdhsa_ieee_mode") { |
5418 | 0 | if (IVersion.Major >= 12) |
5419 | 0 | return Error(IDRange.Start, "directive unsupported on gfx12+", IDRange); |
5420 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, |
5421 | 0 | COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE, Val, |
5422 | 0 | ValRange); |
5423 | 0 | } else if (ID == ".amdhsa_fp16_overflow") { |
5424 | 0 | if (IVersion.Major < 9) |
5425 | 0 | return Error(IDRange.Start, "directive requires gfx9+", IDRange); |
5426 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL, Val, |
5427 | 0 | ValRange); |
5428 | 0 | } else if (ID == ".amdhsa_tg_split") { |
5429 | 0 | if (!isGFX90A()) |
5430 | 0 | return Error(IDRange.Start, "directive requires gfx90a+", IDRange); |
5431 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc3, COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, Val, |
5432 | 0 | ValRange); |
5433 | 0 | } else if (ID == ".amdhsa_workgroup_processor_mode") { |
5434 | 0 | if (IVersion.Major < 10) |
5435 | 0 | return Error(IDRange.Start, "directive requires gfx10+", IDRange); |
5436 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE, Val, |
5437 | 0 | ValRange); |
5438 | 0 | } else if (ID == ".amdhsa_memory_ordered") { |
5439 | 0 | if (IVersion.Major < 10) |
5440 | 0 | return Error(IDRange.Start, "directive requires gfx10+", IDRange); |
5441 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED, Val, |
5442 | 0 | ValRange); |
5443 | 0 | } else if (ID == ".amdhsa_forward_progress") { |
5444 | 0 | if (IVersion.Major < 10) |
5445 | 0 | return Error(IDRange.Start, "directive requires gfx10+", IDRange); |
5446 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS, Val, |
5447 | 0 | ValRange); |
5448 | 0 | } else if (ID == ".amdhsa_shared_vgpr_count") { |
5449 | 0 | if (IVersion.Major < 10 || IVersion.Major >= 12) |
5450 | 0 | return Error(IDRange.Start, "directive requires gfx10 or gfx11", |
5451 | 0 | IDRange); |
5452 | 0 | SharedVGPRCount = Val; |
5453 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc3, |
5454 | 0 | COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT, Val, |
5455 | 0 | ValRange); |
5456 | 0 | } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") { |
5457 | 0 | PARSE_BITS_ENTRY( |
5458 | 0 | KD.compute_pgm_rsrc2, |
5459 | 0 | COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val, |
5460 | 0 | ValRange); |
5461 | 0 | } else if (ID == ".amdhsa_exception_fp_denorm_src") { |
5462 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5463 | 0 | COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE, |
5464 | 0 | Val, ValRange); |
5465 | 0 | } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") { |
5466 | 0 | PARSE_BITS_ENTRY( |
5467 | 0 | KD.compute_pgm_rsrc2, |
5468 | 0 | COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val, |
5469 | 0 | ValRange); |
5470 | 0 | } else if (ID == ".amdhsa_exception_fp_ieee_overflow") { |
5471 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5472 | 0 | COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW, |
5473 | 0 | Val, ValRange); |
5474 | 0 | } else if (ID == ".amdhsa_exception_fp_ieee_underflow") { |
5475 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5476 | 0 | COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW, |
5477 | 0 | Val, ValRange); |
5478 | 0 | } else if (ID == ".amdhsa_exception_fp_ieee_inexact") { |
5479 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5480 | 0 | COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT, |
5481 | 0 | Val, ValRange); |
5482 | 0 | } else if (ID == ".amdhsa_exception_int_div_zero") { |
5483 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2, |
5484 | 0 | COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO, |
5485 | 0 | Val, ValRange); |
5486 | 0 | } else if (ID == ".amdhsa_round_robin_scheduling") { |
5487 | 0 | if (IVersion.Major < 12) |
5488 | 0 | return Error(IDRange.Start, "directive requires gfx12+", IDRange); |
5489 | 0 | PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, |
5490 | 0 | COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN, Val, |
5491 | 0 | ValRange); |
5492 | 0 | } else { |
5493 | 0 | return Error(IDRange.Start, "unknown .amdhsa_kernel directive", IDRange); |
5494 | 0 | } |
5495 | |
|
5496 | 0 | #undef PARSE_BITS_ENTRY |
5497 | 0 | } |
5498 | | |
5499 | 0 | if (!Seen.contains(".amdhsa_next_free_vgpr")) |
5500 | 0 | return TokError(".amdhsa_next_free_vgpr directive is required"); |
5501 | | |
5502 | 0 | if (!Seen.contains(".amdhsa_next_free_sgpr")) |
5503 | 0 | return TokError(".amdhsa_next_free_sgpr directive is required"); |
5504 | | |
5505 | 0 | unsigned VGPRBlocks; |
5506 | 0 | unsigned SGPRBlocks; |
5507 | 0 | if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr, |
5508 | 0 | getTargetStreamer().getTargetID()->isXnackOnOrAny(), |
5509 | 0 | EnableWavefrontSize32, NextFreeVGPR, |
5510 | 0 | VGPRRange, NextFreeSGPR, SGPRRange, VGPRBlocks, |
5511 | 0 | SGPRBlocks)) |
5512 | 0 | return true; |
5513 | | |
5514 | 0 | if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>( |
5515 | 0 | VGPRBlocks)) |
5516 | 0 | return OutOfRangeError(VGPRRange); |
5517 | 0 | AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, |
5518 | 0 | COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks); |
5519 | |
|
5520 | 0 | if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>( |
5521 | 0 | SGPRBlocks)) |
5522 | 0 | return OutOfRangeError(SGPRRange); |
5523 | 0 | AMDHSA_BITS_SET(KD.compute_pgm_rsrc1, |
5524 | 0 | COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT, |
5525 | 0 | SGPRBlocks); |
5526 | |
|
5527 | 0 | if (ExplicitUserSGPRCount && ImpliedUserSGPRCount > *ExplicitUserSGPRCount) |
5528 | 0 | return TokError("amdgpu_user_sgpr_count smaller than than implied by " |
5529 | 0 | "enabled user SGPRs"); |
5530 | | |
5531 | 0 | unsigned UserSGPRCount = |
5532 | 0 | ExplicitUserSGPRCount ? *ExplicitUserSGPRCount : ImpliedUserSGPRCount; |
5533 | |
|
5534 | 0 | if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount)) |
5535 | 0 | return TokError("too many user SGPRs enabled"); |
5536 | 0 | AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT, |
5537 | 0 | UserSGPRCount); |
5538 | |
|
5539 | 0 | if (PreloadLength && KD.kernarg_size && |
5540 | 0 | (PreloadLength * 4 + PreloadOffset * 4 > KD.kernarg_size)) |
5541 | 0 | return TokError("Kernarg preload length + offset is larger than the " |
5542 | 0 | "kernarg segment size"); |
5543 | | |
5544 | 0 | if (isGFX90A()) { |
5545 | 0 | if (!Seen.contains(".amdhsa_accum_offset")) |
5546 | 0 | return TokError(".amdhsa_accum_offset directive is required"); |
5547 | 0 | if (AccumOffset < 4 || AccumOffset > 256 || (AccumOffset & 3)) |
5548 | 0 | return TokError("accum_offset should be in range [4..256] in " |
5549 | 0 | "increments of 4"); |
5550 | 0 | if (AccumOffset > alignTo(std::max((uint64_t)1, NextFreeVGPR), 4)) |
5551 | 0 | return TokError("accum_offset exceeds total VGPR allocation"); |
5552 | 0 | AMDHSA_BITS_SET(KD.compute_pgm_rsrc3, COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET, |
5553 | 0 | (AccumOffset / 4 - 1)); |
5554 | 0 | } |
5555 | | |
5556 | 0 | if (IVersion.Major >= 10 && IVersion.Major < 12) { |
5557 | | // SharedVGPRCount < 16 checked by PARSE_ENTRY_BITS |
5558 | 0 | if (SharedVGPRCount && EnableWavefrontSize32 && *EnableWavefrontSize32) { |
5559 | 0 | return TokError("shared_vgpr_count directive not valid on " |
5560 | 0 | "wavefront size 32"); |
5561 | 0 | } |
5562 | 0 | if (SharedVGPRCount * 2 + VGPRBlocks > 63) { |
5563 | 0 | return TokError("shared_vgpr_count*2 + " |
5564 | 0 | "compute_pgm_rsrc1.GRANULATED_WORKITEM_VGPR_COUNT cannot " |
5565 | 0 | "exceed 63\n"); |
5566 | 0 | } |
5567 | 0 | } |
5568 | | |
5569 | 0 | getTargetStreamer().EmitAmdhsaKernelDescriptor( |
5570 | 0 | getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC, |
5571 | 0 | ReserveFlatScr, AMDGPU::getAmdhsaCodeObjectVersion()); |
5572 | 0 | return false; |
5573 | 0 | } |
5574 | | |
5575 | 0 | bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() { |
5576 | 0 | uint32_t Major; |
5577 | 0 | uint32_t Minor; |
5578 | |
|
5579 | 0 | if (ParseDirectiveMajorMinor(Major, Minor)) |
5580 | 0 | return true; |
5581 | | |
5582 | 0 | getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor); |
5583 | 0 | return false; |
5584 | 0 | } |
5585 | | |
5586 | 0 | bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() { |
5587 | 0 | uint32_t Major; |
5588 | 0 | uint32_t Minor; |
5589 | 0 | uint32_t Stepping; |
5590 | 0 | StringRef VendorName; |
5591 | 0 | StringRef ArchName; |
5592 | | |
5593 | | // If this directive has no arguments, then use the ISA version for the |
5594 | | // targeted GPU. |
5595 | 0 | if (isToken(AsmToken::EndOfStatement)) { |
5596 | 0 | AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU()); |
5597 | 0 | getTargetStreamer().EmitDirectiveHSACodeObjectISAV2(ISA.Major, ISA.Minor, |
5598 | 0 | ISA.Stepping, |
5599 | 0 | "AMD", "AMDGPU"); |
5600 | 0 | return false; |
5601 | 0 | } |
5602 | | |
5603 | 0 | if (ParseDirectiveMajorMinor(Major, Minor)) |
5604 | 0 | return true; |
5605 | | |
5606 | 0 | if (!trySkipToken(AsmToken::Comma)) |
5607 | 0 | return TokError("stepping version number required, comma expected"); |
5608 | | |
5609 | 0 | if (ParseAsAbsoluteExpression(Stepping)) |
5610 | 0 | return TokError("invalid stepping version"); |
5611 | | |
5612 | 0 | if (!trySkipToken(AsmToken::Comma)) |
5613 | 0 | return TokError("vendor name required, comma expected"); |
5614 | | |
5615 | 0 | if (!parseString(VendorName, "invalid vendor name")) |
5616 | 0 | return true; |
5617 | | |
5618 | 0 | if (!trySkipToken(AsmToken::Comma)) |
5619 | 0 | return TokError("arch name required, comma expected"); |
5620 | | |
5621 | 0 | if (!parseString(ArchName, "invalid arch name")) |
5622 | 0 | return true; |
5623 | | |
5624 | 0 | getTargetStreamer().EmitDirectiveHSACodeObjectISAV2(Major, Minor, Stepping, |
5625 | 0 | VendorName, ArchName); |
5626 | 0 | return false; |
5627 | 0 | } |
5628 | | |
5629 | | bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID, |
5630 | 0 | amd_kernel_code_t &Header) { |
5631 | | // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing |
5632 | | // assembly for backwards compatibility. |
5633 | 0 | if (ID == "max_scratch_backing_memory_byte_size") { |
5634 | 0 | Parser.eatToEndOfStatement(); |
5635 | 0 | return false; |
5636 | 0 | } |
5637 | | |
5638 | 0 | SmallString<40> ErrStr; |
5639 | 0 | raw_svector_ostream Err(ErrStr); |
5640 | 0 | if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) { |
5641 | 0 | return TokError(Err.str()); |
5642 | 0 | } |
5643 | 0 | Lex(); |
5644 | |
|
5645 | 0 | if (ID == "enable_dx10_clamp") { |
5646 | 0 | if (G_00B848_DX10_CLAMP(Header.compute_pgm_resource_registers) && |
5647 | 0 | isGFX12Plus()) |
5648 | 0 | return TokError("enable_dx10_clamp=1 is not allowed on GFX12+"); |
5649 | 0 | } |
5650 | | |
5651 | 0 | if (ID == "enable_ieee_mode") { |
5652 | 0 | if (G_00B848_IEEE_MODE(Header.compute_pgm_resource_registers) && |
5653 | 0 | isGFX12Plus()) |
5654 | 0 | return TokError("enable_ieee_mode=1 is not allowed on GFX12+"); |
5655 | 0 | } |
5656 | | |
5657 | 0 | if (ID == "enable_wavefront_size32") { |
5658 | 0 | if (Header.code_properties & AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32) { |
5659 | 0 | if (!isGFX10Plus()) |
5660 | 0 | return TokError("enable_wavefront_size32=1 is only allowed on GFX10+"); |
5661 | 0 | if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32]) |
5662 | 0 | return TokError("enable_wavefront_size32=1 requires +WavefrontSize32"); |
5663 | 0 | } else { |
5664 | 0 | if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64]) |
5665 | 0 | return TokError("enable_wavefront_size32=0 requires +WavefrontSize64"); |
5666 | 0 | } |
5667 | 0 | } |
5668 | | |
5669 | 0 | if (ID == "wavefront_size") { |
5670 | 0 | if (Header.wavefront_size == 5) { |
5671 | 0 | if (!isGFX10Plus()) |
5672 | 0 | return TokError("wavefront_size=5 is only allowed on GFX10+"); |
5673 | 0 | if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32]) |
5674 | 0 | return TokError("wavefront_size=5 requires +WavefrontSize32"); |
5675 | 0 | } else if (Header.wavefront_size == 6) { |
5676 | 0 | if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64]) |
5677 | 0 | return TokError("wavefront_size=6 requires +WavefrontSize64"); |
5678 | 0 | } |
5679 | 0 | } |
5680 | | |
5681 | 0 | if (ID == "enable_wgp_mode") { |
5682 | 0 | if (G_00B848_WGP_MODE(Header.compute_pgm_resource_registers) && |
5683 | 0 | !isGFX10Plus()) |
5684 | 0 | return TokError("enable_wgp_mode=1 is only allowed on GFX10+"); |
5685 | 0 | } |
5686 | | |
5687 | 0 | if (ID == "enable_mem_ordered") { |
5688 | 0 | if (G_00B848_MEM_ORDERED(Header.compute_pgm_resource_registers) && |
5689 | 0 | !isGFX10Plus()) |
5690 | 0 | return TokError("enable_mem_ordered=1 is only allowed on GFX10+"); |
5691 | 0 | } |
5692 | | |
5693 | 0 | if (ID == "enable_fwd_progress") { |
5694 | 0 | if (G_00B848_FWD_PROGRESS(Header.compute_pgm_resource_registers) && |
5695 | 0 | !isGFX10Plus()) |
5696 | 0 | return TokError("enable_fwd_progress=1 is only allowed on GFX10+"); |
5697 | 0 | } |
5698 | | |
5699 | 0 | return false; |
5700 | 0 | } |
5701 | | |
5702 | 0 | bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() { |
5703 | 0 | amd_kernel_code_t Header; |
5704 | 0 | AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI()); |
5705 | |
|
5706 | 0 | while (true) { |
5707 | | // Lex EndOfStatement. This is in a while loop, because lexing a comment |
5708 | | // will set the current token to EndOfStatement. |
5709 | 0 | while(trySkipToken(AsmToken::EndOfStatement)); |
5710 | |
|
5711 | 0 | StringRef ID; |
5712 | 0 | if (!parseId(ID, "expected value identifier or .end_amd_kernel_code_t")) |
5713 | 0 | return true; |
5714 | | |
5715 | 0 | if (ID == ".end_amd_kernel_code_t") |
5716 | 0 | break; |
5717 | | |
5718 | 0 | if (ParseAMDKernelCodeTValue(ID, Header)) |
5719 | 0 | return true; |
5720 | 0 | } |
5721 | | |
5722 | 0 | getTargetStreamer().EmitAMDKernelCodeT(Header); |
5723 | |
|
5724 | 0 | return false; |
5725 | 0 | } |
5726 | | |
5727 | 0 | bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() { |
5728 | 0 | StringRef KernelName; |
5729 | 0 | if (!parseId(KernelName, "expected symbol name")) |
5730 | 0 | return true; |
5731 | | |
5732 | 0 | getTargetStreamer().EmitAMDGPUSymbolType(KernelName, |
5733 | 0 | ELF::STT_AMDGPU_HSA_KERNEL); |
5734 | |
|
5735 | 0 | KernelScope.initialize(getContext()); |
5736 | 0 | return false; |
5737 | 0 | } |
5738 | | |
5739 | 0 | bool AMDGPUAsmParser::ParseDirectiveISAVersion() { |
5740 | 0 | if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) { |
5741 | 0 | return Error(getLoc(), |
5742 | 0 | ".amd_amdgpu_isa directive is not available on non-amdgcn " |
5743 | 0 | "architectures"); |
5744 | 0 | } |
5745 | | |
5746 | 0 | auto TargetIDDirective = getLexer().getTok().getStringContents(); |
5747 | 0 | if (getTargetStreamer().getTargetID()->toString() != TargetIDDirective) |
5748 | 0 | return Error(getParser().getTok().getLoc(), "target id must match options"); |
5749 | | |
5750 | 0 | getTargetStreamer().EmitISAVersion(); |
5751 | 0 | Lex(); |
5752 | |
|
5753 | 0 | return false; |
5754 | 0 | } |
5755 | | |
5756 | 0 | bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() { |
5757 | 0 | assert(isHsaAbi(getSTI())); |
5758 | | |
5759 | 0 | std::string HSAMetadataString; |
5760 | 0 | if (ParseToEndDirective(HSAMD::V3::AssemblerDirectiveBegin, |
5761 | 0 | HSAMD::V3::AssemblerDirectiveEnd, HSAMetadataString)) |
5762 | 0 | return true; |
5763 | | |
5764 | 0 | if (!getTargetStreamer().EmitHSAMetadataV3(HSAMetadataString)) |
5765 | 0 | return Error(getLoc(), "invalid HSA metadata"); |
5766 | | |
5767 | 0 | return false; |
5768 | 0 | } |
5769 | | |
5770 | | /// Common code to parse out a block of text (typically YAML) between start and |
5771 | | /// end directives. |
5772 | | bool AMDGPUAsmParser::ParseToEndDirective(const char *AssemblerDirectiveBegin, |
5773 | | const char *AssemblerDirectiveEnd, |
5774 | 0 | std::string &CollectString) { |
5775 | |
|
5776 | 0 | raw_string_ostream CollectStream(CollectString); |
5777 | |
|
5778 | 0 | getLexer().setSkipSpace(false); |
5779 | |
|
5780 | 0 | bool FoundEnd = false; |
5781 | 0 | while (!isToken(AsmToken::Eof)) { |
5782 | 0 | while (isToken(AsmToken::Space)) { |
5783 | 0 | CollectStream << getTokenStr(); |
5784 | 0 | Lex(); |
5785 | 0 | } |
5786 | |
|
5787 | 0 | if (trySkipId(AssemblerDirectiveEnd)) { |
5788 | 0 | FoundEnd = true; |
5789 | 0 | break; |
5790 | 0 | } |
5791 | | |
5792 | 0 | CollectStream << Parser.parseStringToEndOfStatement() |
5793 | 0 | << getContext().getAsmInfo()->getSeparatorString(); |
5794 | |
|
5795 | 0 | Parser.eatToEndOfStatement(); |
5796 | 0 | } |
5797 | |
|
5798 | 0 | getLexer().setSkipSpace(true); |
5799 | |
|
5800 | 0 | if (isToken(AsmToken::Eof) && !FoundEnd) { |
5801 | 0 | return TokError(Twine("expected directive ") + |
5802 | 0 | Twine(AssemblerDirectiveEnd) + Twine(" not found")); |
5803 | 0 | } |
5804 | | |
5805 | 0 | CollectStream.flush(); |
5806 | 0 | return false; |
5807 | 0 | } |
5808 | | |
5809 | | /// Parse the assembler directive for new MsgPack-format PAL metadata. |
5810 | 0 | bool AMDGPUAsmParser::ParseDirectivePALMetadataBegin() { |
5811 | 0 | std::string String; |
5812 | 0 | if (ParseToEndDirective(AMDGPU::PALMD::AssemblerDirectiveBegin, |
5813 | 0 | AMDGPU::PALMD::AssemblerDirectiveEnd, String)) |
5814 | 0 | return true; |
5815 | | |
5816 | 0 | auto PALMetadata = getTargetStreamer().getPALMetadata(); |
5817 | 0 | if (!PALMetadata->setFromString(String)) |
5818 | 0 | return Error(getLoc(), "invalid PAL metadata"); |
5819 | 0 | return false; |
5820 | 0 | } |
5821 | | |
5822 | | /// Parse the assembler directive for old linear-format PAL metadata. |
5823 | 0 | bool AMDGPUAsmParser::ParseDirectivePALMetadata() { |
5824 | 0 | if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) { |
5825 | 0 | return Error(getLoc(), |
5826 | 0 | (Twine(PALMD::AssemblerDirective) + Twine(" directive is " |
5827 | 0 | "not available on non-amdpal OSes")).str()); |
5828 | 0 | } |
5829 | | |
5830 | 0 | auto PALMetadata = getTargetStreamer().getPALMetadata(); |
5831 | 0 | PALMetadata->setLegacy(); |
5832 | 0 | for (;;) { |
5833 | 0 | uint32_t Key, Value; |
5834 | 0 | if (ParseAsAbsoluteExpression(Key)) { |
5835 | 0 | return TokError(Twine("invalid value in ") + |
5836 | 0 | Twine(PALMD::AssemblerDirective)); |
5837 | 0 | } |
5838 | 0 | if (!trySkipToken(AsmToken::Comma)) { |
5839 | 0 | return TokError(Twine("expected an even number of values in ") + |
5840 | 0 | Twine(PALMD::AssemblerDirective)); |
5841 | 0 | } |
5842 | 0 | if (ParseAsAbsoluteExpression(Value)) { |
5843 | 0 | return TokError(Twine("invalid value in ") + |
5844 | 0 | Twine(PALMD::AssemblerDirective)); |
5845 | 0 | } |
5846 | 0 | PALMetadata->setRegister(Key, Value); |
5847 | 0 | if (!trySkipToken(AsmToken::Comma)) |
5848 | 0 | break; |
5849 | 0 | } |
5850 | 0 | return false; |
5851 | 0 | } |
5852 | | |
5853 | | /// ParseDirectiveAMDGPULDS |
5854 | | /// ::= .amdgpu_lds identifier ',' size_expression [',' align_expression] |
5855 | 0 | bool AMDGPUAsmParser::ParseDirectiveAMDGPULDS() { |
5856 | 0 | if (getParser().checkForValidSection()) |
5857 | 0 | return true; |
5858 | | |
5859 | 0 | StringRef Name; |
5860 | 0 | SMLoc NameLoc = getLoc(); |
5861 | 0 | if (getParser().parseIdentifier(Name)) |
5862 | 0 | return TokError("expected identifier in directive"); |
5863 | | |
5864 | 0 | MCSymbol *Symbol = getContext().getOrCreateSymbol(Name); |
5865 | 0 | if (getParser().parseComma()) |
5866 | 0 | return true; |
5867 | | |
5868 | 0 | unsigned LocalMemorySize = AMDGPU::IsaInfo::getLocalMemorySize(&getSTI()); |
5869 | |
|
5870 | 0 | int64_t Size; |
5871 | 0 | SMLoc SizeLoc = getLoc(); |
5872 | 0 | if (getParser().parseAbsoluteExpression(Size)) |
5873 | 0 | return true; |
5874 | 0 | if (Size < 0) |
5875 | 0 | return Error(SizeLoc, "size must be non-negative"); |
5876 | 0 | if (Size > LocalMemorySize) |
5877 | 0 | return Error(SizeLoc, "size is too large"); |
5878 | | |
5879 | 0 | int64_t Alignment = 4; |
5880 | 0 | if (trySkipToken(AsmToken::Comma)) { |
5881 | 0 | SMLoc AlignLoc = getLoc(); |
5882 | 0 | if (getParser().parseAbsoluteExpression(Alignment)) |
5883 | 0 | return true; |
5884 | 0 | if (Alignment < 0 || !isPowerOf2_64(Alignment)) |
5885 | 0 | return Error(AlignLoc, "alignment must be a power of two"); |
5886 | | |
5887 | | // Alignment larger than the size of LDS is possible in theory, as long |
5888 | | // as the linker manages to place to symbol at address 0, but we do want |
5889 | | // to make sure the alignment fits nicely into a 32-bit integer. |
5890 | 0 | if (Alignment >= 1u << 31) |
5891 | 0 | return Error(AlignLoc, "alignment is too large"); |
5892 | 0 | } |
5893 | | |
5894 | 0 | if (parseEOL()) |
5895 | 0 | return true; |
5896 | | |
5897 | 0 | Symbol->redefineIfPossible(); |
5898 | 0 | if (!Symbol->isUndefined()) |
5899 | 0 | return Error(NameLoc, "invalid symbol redefinition"); |
5900 | | |
5901 | 0 | getTargetStreamer().emitAMDGPULDS(Symbol, Size, Align(Alignment)); |
5902 | 0 | return false; |
5903 | 0 | } |
5904 | | |
5905 | 0 | bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) { |
5906 | 0 | StringRef IDVal = DirectiveID.getString(); |
5907 | |
|
5908 | 0 | if (isHsaAbi(getSTI())) { |
5909 | 0 | if (IDVal == ".amdhsa_kernel") |
5910 | 0 | return ParseDirectiveAMDHSAKernel(); |
5911 | | |
5912 | | // TODO: Restructure/combine with PAL metadata directive. |
5913 | 0 | if (IDVal == AMDGPU::HSAMD::V3::AssemblerDirectiveBegin) |
5914 | 0 | return ParseDirectiveHSAMetadata(); |
5915 | 0 | } else { |
5916 | 0 | if (IDVal == ".hsa_code_object_version") |
5917 | 0 | return ParseDirectiveHSACodeObjectVersion(); |
5918 | | |
5919 | 0 | if (IDVal == ".hsa_code_object_isa") |
5920 | 0 | return ParseDirectiveHSACodeObjectISA(); |
5921 | | |
5922 | 0 | if (IDVal == ".amd_kernel_code_t") |
5923 | 0 | return ParseDirectiveAMDKernelCodeT(); |
5924 | | |
5925 | 0 | if (IDVal == ".amdgpu_hsa_kernel") |
5926 | 0 | return ParseDirectiveAMDGPUHsaKernel(); |
5927 | | |
5928 | 0 | if (IDVal == ".amd_amdgpu_isa") |
5929 | 0 | return ParseDirectiveISAVersion(); |
5930 | | |
5931 | 0 | if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin) { |
5932 | 0 | return Error(getLoc(), (Twine(HSAMD::AssemblerDirectiveBegin) + |
5933 | 0 | Twine(" directive is " |
5934 | 0 | "not available on non-amdhsa OSes")) |
5935 | 0 | .str()); |
5936 | 0 | } |
5937 | 0 | } |
5938 | | |
5939 | 0 | if (IDVal == ".amdgcn_target") |
5940 | 0 | return ParseDirectiveAMDGCNTarget(); |
5941 | | |
5942 | 0 | if (IDVal == ".amdgpu_lds") |
5943 | 0 | return ParseDirectiveAMDGPULDS(); |
5944 | | |
5945 | 0 | if (IDVal == PALMD::AssemblerDirectiveBegin) |
5946 | 0 | return ParseDirectivePALMetadataBegin(); |
5947 | | |
5948 | 0 | if (IDVal == PALMD::AssemblerDirective) |
5949 | 0 | return ParseDirectivePALMetadata(); |
5950 | | |
5951 | 0 | return true; |
5952 | 0 | } |
5953 | | |
5954 | | bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI, |
5955 | 0 | unsigned RegNo) { |
5956 | |
|
5957 | 0 | if (MRI.regsOverlap(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, RegNo)) |
5958 | 0 | return isGFX9Plus(); |
5959 | | |
5960 | | // GFX10+ has 2 more SGPRs 104 and 105. |
5961 | 0 | if (MRI.regsOverlap(AMDGPU::SGPR104_SGPR105, RegNo)) |
5962 | 0 | return hasSGPR104_SGPR105(); |
5963 | | |
5964 | 0 | switch (RegNo) { |
5965 | 0 | case AMDGPU::SRC_SHARED_BASE_LO: |
5966 | 0 | case AMDGPU::SRC_SHARED_BASE: |
5967 | 0 | case AMDGPU::SRC_SHARED_LIMIT_LO: |
5968 | 0 | case AMDGPU::SRC_SHARED_LIMIT: |
5969 | 0 | case AMDGPU::SRC_PRIVATE_BASE_LO: |
5970 | 0 | case AMDGPU::SRC_PRIVATE_BASE: |
5971 | 0 | case AMDGPU::SRC_PRIVATE_LIMIT_LO: |
5972 | 0 | case AMDGPU::SRC_PRIVATE_LIMIT: |
5973 | 0 | return isGFX9Plus(); |
5974 | 0 | case AMDGPU::SRC_POPS_EXITING_WAVE_ID: |
5975 | 0 | return isGFX9Plus() && !isGFX11Plus(); |
5976 | 0 | case AMDGPU::TBA: |
5977 | 0 | case AMDGPU::TBA_LO: |
5978 | 0 | case AMDGPU::TBA_HI: |
5979 | 0 | case AMDGPU::TMA: |
5980 | 0 | case AMDGPU::TMA_LO: |
5981 | 0 | case AMDGPU::TMA_HI: |
5982 | 0 | return !isGFX9Plus(); |
5983 | 0 | case AMDGPU::XNACK_MASK: |
5984 | 0 | case AMDGPU::XNACK_MASK_LO: |
5985 | 0 | case AMDGPU::XNACK_MASK_HI: |
5986 | 0 | return (isVI() || isGFX9()) && getTargetStreamer().getTargetID()->isXnackSupported(); |
5987 | 0 | case AMDGPU::SGPR_NULL: |
5988 | 0 | return isGFX10Plus(); |
5989 | 0 | default: |
5990 | 0 | break; |
5991 | 0 | } |
5992 | | |
5993 | 0 | if (isCI()) |
5994 | 0 | return true; |
5995 | | |
5996 | 0 | if (isSI() || isGFX10Plus()) { |
5997 | | // No flat_scr on SI. |
5998 | | // On GFX10Plus flat scratch is not a valid register operand and can only be |
5999 | | // accessed with s_setreg/s_getreg. |
6000 | 0 | switch (RegNo) { |
6001 | 0 | case AMDGPU::FLAT_SCR: |
6002 | 0 | case AMDGPU::FLAT_SCR_LO: |
6003 | 0 | case AMDGPU::FLAT_SCR_HI: |
6004 | 0 | return false; |
6005 | 0 | default: |
6006 | 0 | return true; |
6007 | 0 | } |
6008 | 0 | } |
6009 | | |
6010 | | // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that |
6011 | | // SI/CI have. |
6012 | 0 | if (MRI.regsOverlap(AMDGPU::SGPR102_SGPR103, RegNo)) |
6013 | 0 | return hasSGPR102_SGPR103(); |
6014 | | |
6015 | 0 | return true; |
6016 | 0 | } |
6017 | | |
6018 | | ParseStatus AMDGPUAsmParser::parseOperand(OperandVector &Operands, |
6019 | | StringRef Mnemonic, |
6020 | 0 | OperandMode Mode) { |
6021 | 0 | ParseStatus Res = parseVOPD(Operands); |
6022 | 0 | if (Res.isSuccess() || Res.isFailure() || isToken(AsmToken::EndOfStatement)) |
6023 | 0 | return Res; |
6024 | | |
6025 | | // Try to parse with a custom parser |
6026 | 0 | Res = MatchOperandParserImpl(Operands, Mnemonic); |
6027 | | |
6028 | | // If we successfully parsed the operand or if there as an error parsing, |
6029 | | // we are done. |
6030 | | // |
6031 | | // If we are parsing after we reach EndOfStatement then this means we |
6032 | | // are appending default values to the Operands list. This is only done |
6033 | | // by custom parser, so we shouldn't continue on to the generic parsing. |
6034 | 0 | if (Res.isSuccess() || Res.isFailure() || isToken(AsmToken::EndOfStatement)) |
6035 | 0 | return Res; |
6036 | | |
6037 | 0 | SMLoc RBraceLoc; |
6038 | 0 | SMLoc LBraceLoc = getLoc(); |
6039 | 0 | if (Mode == OperandMode_NSA && trySkipToken(AsmToken::LBrac)) { |
6040 | 0 | unsigned Prefix = Operands.size(); |
6041 | |
|
6042 | 0 | for (;;) { |
6043 | 0 | auto Loc = getLoc(); |
6044 | 0 | Res = parseReg(Operands); |
6045 | 0 | if (Res.isNoMatch()) |
6046 | 0 | Error(Loc, "expected a register"); |
6047 | 0 | if (!Res.isSuccess()) |
6048 | 0 | return ParseStatus::Failure; |
6049 | | |
6050 | 0 | RBraceLoc = getLoc(); |
6051 | 0 | if (trySkipToken(AsmToken::RBrac)) |
6052 | 0 | break; |
6053 | | |
6054 | 0 | if (!skipToken(AsmToken::Comma, |
6055 | 0 | "expected a comma or a closing square bracket")) |
6056 | 0 | return ParseStatus::Failure; |
6057 | 0 | } |
6058 | | |
6059 | 0 | if (Operands.size() - Prefix > 1) { |
6060 | 0 | Operands.insert(Operands.begin() + Prefix, |
6061 | 0 | AMDGPUOperand::CreateToken(this, "[", LBraceLoc)); |
6062 | 0 | Operands.push_back(AMDGPUOperand::CreateToken(this, "]", RBraceLoc)); |
6063 | 0 | } |
6064 | |
|
6065 | 0 | return ParseStatus::Success; |
6066 | 0 | } |
6067 | | |
6068 | 0 | return parseRegOrImm(Operands); |
6069 | 0 | } |
6070 | | |
6071 | 0 | StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) { |
6072 | | // Clear any forced encodings from the previous instruction. |
6073 | 0 | setForcedEncodingSize(0); |
6074 | 0 | setForcedDPP(false); |
6075 | 0 | setForcedSDWA(false); |
6076 | |
|
6077 | 0 | if (Name.ends_with("_e64_dpp")) { |
6078 | 0 | setForcedDPP(true); |
6079 | 0 | setForcedEncodingSize(64); |
6080 | 0 | return Name.substr(0, Name.size() - 8); |
6081 | 0 | } else if (Name.ends_with("_e64")) { |
6082 | 0 | setForcedEncodingSize(64); |
6083 | 0 | return Name.substr(0, Name.size() - 4); |
6084 | 0 | } else if (Name.ends_with("_e32")) { |
6085 | 0 | setForcedEncodingSize(32); |
6086 | 0 | return Name.substr(0, Name.size() - 4); |
6087 | 0 | } else if (Name.ends_with("_dpp")) { |
6088 | 0 | setForcedDPP(true); |
6089 | 0 | return Name.substr(0, Name.size() - 4); |
6090 | 0 | } else if (Name.ends_with("_sdwa")) { |
6091 | 0 | setForcedSDWA(true); |
6092 | 0 | return Name.substr(0, Name.size() - 5); |
6093 | 0 | } |
6094 | 0 | return Name; |
6095 | 0 | } |
6096 | | |
6097 | | static void applyMnemonicAliases(StringRef &Mnemonic, |
6098 | | const FeatureBitset &Features, |
6099 | | unsigned VariantID); |
6100 | | |
6101 | | bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info, |
6102 | | StringRef Name, |
6103 | 0 | SMLoc NameLoc, OperandVector &Operands) { |
6104 | | // Add the instruction mnemonic |
6105 | 0 | Name = parseMnemonicSuffix(Name); |
6106 | | |
6107 | | // If the target architecture uses MnemonicAlias, call it here to parse |
6108 | | // operands correctly. |
6109 | 0 | applyMnemonicAliases(Name, getAvailableFeatures(), 0); |
6110 | |
|
6111 | 0 | Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc)); |
6112 | |
|
6113 | 0 | bool IsMIMG = Name.starts_with("image_"); |
6114 | |
|
6115 | 0 | while (!trySkipToken(AsmToken::EndOfStatement)) { |
6116 | 0 | OperandMode Mode = OperandMode_Default; |
6117 | 0 | if (IsMIMG && isGFX10Plus() && Operands.size() == 2) |
6118 | 0 | Mode = OperandMode_NSA; |
6119 | 0 | ParseStatus Res = parseOperand(Operands, Name, Mode); |
6120 | |
|
6121 | 0 | if (!Res.isSuccess()) { |
6122 | 0 | checkUnsupportedInstruction(Name, NameLoc); |
6123 | 0 | if (!Parser.hasPendingError()) { |
6124 | | // FIXME: use real operand location rather than the current location. |
6125 | 0 | StringRef Msg = Res.isFailure() ? "failed parsing operand." |
6126 | 0 | : "not a valid operand."; |
6127 | 0 | Error(getLoc(), Msg); |
6128 | 0 | } |
6129 | 0 | while (!trySkipToken(AsmToken::EndOfStatement)) { |
6130 | 0 | lex(); |
6131 | 0 | } |
6132 | 0 | return true; |
6133 | 0 | } |
6134 | | |
6135 | | // Eat the comma or space if there is one. |
6136 | 0 | trySkipToken(AsmToken::Comma); |
6137 | 0 | } |
6138 | | |
6139 | 0 | return false; |
6140 | 0 | } |
6141 | | |
6142 | | //===----------------------------------------------------------------------===// |
6143 | | // Utility functions |
6144 | | //===----------------------------------------------------------------------===// |
6145 | | |
6146 | | ParseStatus AMDGPUAsmParser::parseTokenOp(StringRef Name, |
6147 | 0 | OperandVector &Operands) { |
6148 | 0 | SMLoc S = getLoc(); |
6149 | 0 | if (!trySkipId(Name)) |
6150 | 0 | return ParseStatus::NoMatch; |
6151 | | |
6152 | 0 | Operands.push_back(AMDGPUOperand::CreateToken(this, Name, S)); |
6153 | 0 | return ParseStatus::Success; |
6154 | 0 | } |
6155 | | |
6156 | | ParseStatus AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, |
6157 | 0 | int64_t &IntVal) { |
6158 | |
|
6159 | 0 | if (!trySkipId(Prefix, AsmToken::Colon)) |
6160 | 0 | return ParseStatus::NoMatch; |
6161 | | |
6162 | 0 | return parseExpr(IntVal) ? ParseStatus::Success : ParseStatus::Failure; |
6163 | 0 | } |
6164 | | |
6165 | | ParseStatus AMDGPUAsmParser::parseIntWithPrefix( |
6166 | | const char *Prefix, OperandVector &Operands, AMDGPUOperand::ImmTy ImmTy, |
6167 | 0 | std::function<bool(int64_t &)> ConvertResult) { |
6168 | 0 | SMLoc S = getLoc(); |
6169 | 0 | int64_t Value = 0; |
6170 | |
|
6171 | 0 | ParseStatus Res = parseIntWithPrefix(Prefix, Value); |
6172 | 0 | if (!Res.isSuccess()) |
6173 | 0 | return Res; |
6174 | | |
6175 | 0 | if (ConvertResult && !ConvertResult(Value)) { |
6176 | 0 | Error(S, "invalid " + StringRef(Prefix) + " value."); |
6177 | 0 | } |
6178 | |
|
6179 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy)); |
6180 | 0 | return ParseStatus::Success; |
6181 | 0 | } |
6182 | | |
6183 | | ParseStatus AMDGPUAsmParser::parseOperandArrayWithPrefix( |
6184 | | const char *Prefix, OperandVector &Operands, AMDGPUOperand::ImmTy ImmTy, |
6185 | 0 | bool (*ConvertResult)(int64_t &)) { |
6186 | 0 | SMLoc S = getLoc(); |
6187 | 0 | if (!trySkipId(Prefix, AsmToken::Colon)) |
6188 | 0 | return ParseStatus::NoMatch; |
6189 | | |
6190 | 0 | if (!skipToken(AsmToken::LBrac, "expected a left square bracket")) |
6191 | 0 | return ParseStatus::Failure; |
6192 | | |
6193 | 0 | unsigned Val = 0; |
6194 | 0 | const unsigned MaxSize = 4; |
6195 | | |
6196 | | // FIXME: How to verify the number of elements matches the number of src |
6197 | | // operands? |
6198 | 0 | for (int I = 0; ; ++I) { |
6199 | 0 | int64_t Op; |
6200 | 0 | SMLoc Loc = getLoc(); |
6201 | 0 | if (!parseExpr(Op)) |
6202 | 0 | return ParseStatus::Failure; |
6203 | | |
6204 | 0 | if (Op != 0 && Op != 1) |
6205 | 0 | return Error(Loc, "invalid " + StringRef(Prefix) + " value."); |
6206 | | |
6207 | 0 | Val |= (Op << I); |
6208 | |
|
6209 | 0 | if (trySkipToken(AsmToken::RBrac)) |
6210 | 0 | break; |
6211 | | |
6212 | 0 | if (I + 1 == MaxSize) |
6213 | 0 | return Error(getLoc(), "expected a closing square bracket"); |
6214 | | |
6215 | 0 | if (!skipToken(AsmToken::Comma, "expected a comma")) |
6216 | 0 | return ParseStatus::Failure; |
6217 | 0 | } |
6218 | | |
6219 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy)); |
6220 | 0 | return ParseStatus::Success; |
6221 | 0 | } |
6222 | | |
6223 | | ParseStatus AMDGPUAsmParser::parseNamedBit(StringRef Name, |
6224 | | OperandVector &Operands, |
6225 | 0 | AMDGPUOperand::ImmTy ImmTy) { |
6226 | 0 | int64_t Bit; |
6227 | 0 | SMLoc S = getLoc(); |
6228 | |
|
6229 | 0 | if (trySkipId(Name)) { |
6230 | 0 | Bit = 1; |
6231 | 0 | } else if (trySkipId("no", Name)) { |
6232 | 0 | Bit = 0; |
6233 | 0 | } else { |
6234 | 0 | return ParseStatus::NoMatch; |
6235 | 0 | } |
6236 | | |
6237 | 0 | if (Name == "r128" && !hasMIMG_R128()) |
6238 | 0 | return Error(S, "r128 modifier is not supported on this GPU"); |
6239 | 0 | if (Name == "a16" && !hasA16()) |
6240 | 0 | return Error(S, "a16 modifier is not supported on this GPU"); |
6241 | | |
6242 | 0 | if (isGFX9() && ImmTy == AMDGPUOperand::ImmTyA16) |
6243 | 0 | ImmTy = AMDGPUOperand::ImmTyR128A16; |
6244 | |
|
6245 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy)); |
6246 | 0 | return ParseStatus::Success; |
6247 | 0 | } |
6248 | | |
6249 | | unsigned AMDGPUAsmParser::getCPolKind(StringRef Id, StringRef Mnemo, |
6250 | 0 | bool &Disabling) const { |
6251 | 0 | Disabling = Id.consume_front("no"); |
6252 | |
|
6253 | 0 | if (isGFX940() && !Mnemo.starts_with("s_")) { |
6254 | 0 | return StringSwitch<unsigned>(Id) |
6255 | 0 | .Case("nt", AMDGPU::CPol::NT) |
6256 | 0 | .Case("sc0", AMDGPU::CPol::SC0) |
6257 | 0 | .Case("sc1", AMDGPU::CPol::SC1) |
6258 | 0 | .Default(0); |
6259 | 0 | } |
6260 | | |
6261 | 0 | return StringSwitch<unsigned>(Id) |
6262 | 0 | .Case("dlc", AMDGPU::CPol::DLC) |
6263 | 0 | .Case("glc", AMDGPU::CPol::GLC) |
6264 | 0 | .Case("scc", AMDGPU::CPol::SCC) |
6265 | 0 | .Case("slc", AMDGPU::CPol::SLC) |
6266 | 0 | .Default(0); |
6267 | 0 | } |
6268 | | |
6269 | 0 | ParseStatus AMDGPUAsmParser::parseCPol(OperandVector &Operands) { |
6270 | 0 | if (isGFX12Plus()) { |
6271 | 0 | SMLoc StringLoc = getLoc(); |
6272 | |
|
6273 | 0 | int64_t CPolVal = 0; |
6274 | 0 | ParseStatus ResTH = ParseStatus::NoMatch; |
6275 | 0 | ParseStatus ResScope = ParseStatus::NoMatch; |
6276 | |
|
6277 | 0 | for (;;) { |
6278 | 0 | if (ResTH.isNoMatch()) { |
6279 | 0 | int64_t TH; |
6280 | 0 | ResTH = parseTH(Operands, TH); |
6281 | 0 | if (ResTH.isFailure()) |
6282 | 0 | return ResTH; |
6283 | 0 | if (ResTH.isSuccess()) { |
6284 | 0 | CPolVal |= TH; |
6285 | 0 | continue; |
6286 | 0 | } |
6287 | 0 | } |
6288 | | |
6289 | 0 | if (ResScope.isNoMatch()) { |
6290 | 0 | int64_t Scope; |
6291 | 0 | ResScope = parseScope(Operands, Scope); |
6292 | 0 | if (ResScope.isFailure()) |
6293 | 0 | return ResScope; |
6294 | 0 | if (ResScope.isSuccess()) { |
6295 | 0 | CPolVal |= Scope; |
6296 | 0 | continue; |
6297 | 0 | } |
6298 | 0 | } |
6299 | | |
6300 | 0 | break; |
6301 | 0 | } |
6302 | | |
6303 | 0 | if (ResTH.isNoMatch() && ResScope.isNoMatch()) |
6304 | 0 | return ParseStatus::NoMatch; |
6305 | | |
6306 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, CPolVal, StringLoc, |
6307 | 0 | AMDGPUOperand::ImmTyCPol)); |
6308 | 0 | return ParseStatus::Success; |
6309 | 0 | } |
6310 | | |
6311 | 0 | StringRef Mnemo = ((AMDGPUOperand &)*Operands[0]).getToken(); |
6312 | 0 | SMLoc OpLoc = getLoc(); |
6313 | 0 | unsigned Enabled = 0, Seen = 0; |
6314 | 0 | for (;;) { |
6315 | 0 | SMLoc S = getLoc(); |
6316 | 0 | bool Disabling; |
6317 | 0 | unsigned CPol = getCPolKind(getId(), Mnemo, Disabling); |
6318 | 0 | if (!CPol) |
6319 | 0 | break; |
6320 | | |
6321 | 0 | lex(); |
6322 | |
|
6323 | 0 | if (!isGFX10Plus() && CPol == AMDGPU::CPol::DLC) |
6324 | 0 | return Error(S, "dlc modifier is not supported on this GPU"); |
6325 | | |
6326 | 0 | if (!isGFX90A() && CPol == AMDGPU::CPol::SCC) |
6327 | 0 | return Error(S, "scc modifier is not supported on this GPU"); |
6328 | | |
6329 | 0 | if (Seen & CPol) |
6330 | 0 | return Error(S, "duplicate cache policy modifier"); |
6331 | | |
6332 | 0 | if (!Disabling) |
6333 | 0 | Enabled |= CPol; |
6334 | |
|
6335 | 0 | Seen |= CPol; |
6336 | 0 | } |
6337 | | |
6338 | 0 | if (!Seen) |
6339 | 0 | return ParseStatus::NoMatch; |
6340 | | |
6341 | 0 | Operands.push_back( |
6342 | 0 | AMDGPUOperand::CreateImm(this, Enabled, OpLoc, AMDGPUOperand::ImmTyCPol)); |
6343 | 0 | return ParseStatus::Success; |
6344 | 0 | } |
6345 | | |
6346 | | ParseStatus AMDGPUAsmParser::parseScope(OperandVector &Operands, |
6347 | 0 | int64_t &Scope) { |
6348 | 0 | Scope = AMDGPU::CPol::SCOPE_CU; // default; |
6349 | |
|
6350 | 0 | StringRef Value; |
6351 | 0 | SMLoc StringLoc; |
6352 | 0 | ParseStatus Res; |
6353 | |
|
6354 | 0 | Res = parseStringWithPrefix("scope", Value, StringLoc); |
6355 | 0 | if (!Res.isSuccess()) |
6356 | 0 | return Res; |
6357 | | |
6358 | 0 | Scope = StringSwitch<int64_t>(Value) |
6359 | 0 | .Case("SCOPE_CU", AMDGPU::CPol::SCOPE_CU) |
6360 | 0 | .Case("SCOPE_SE", AMDGPU::CPol::SCOPE_SE) |
6361 | 0 | .Case("SCOPE_DEV", AMDGPU::CPol::SCOPE_DEV) |
6362 | 0 | .Case("SCOPE_SYS", AMDGPU::CPol::SCOPE_SYS) |
6363 | 0 | .Default(0xffffffff); |
6364 | |
|
6365 | 0 | if (Scope == 0xffffffff) |
6366 | 0 | return Error(StringLoc, "invalid scope value"); |
6367 | | |
6368 | 0 | return ParseStatus::Success; |
6369 | 0 | } |
6370 | | |
6371 | 0 | ParseStatus AMDGPUAsmParser::parseTH(OperandVector &Operands, int64_t &TH) { |
6372 | 0 | TH = AMDGPU::CPol::TH_RT; // default |
6373 | |
|
6374 | 0 | StringRef Value; |
6375 | 0 | SMLoc StringLoc; |
6376 | 0 | ParseStatus Res = parseStringWithPrefix("th", Value, StringLoc); |
6377 | 0 | if (!Res.isSuccess()) |
6378 | 0 | return Res; |
6379 | | |
6380 | 0 | if (Value == "TH_DEFAULT") |
6381 | 0 | TH = AMDGPU::CPol::TH_RT; |
6382 | 0 | else if (Value == "TH_STORE_LU" || Value == "TH_LOAD_RT_WB" || |
6383 | 0 | Value == "TH_LOAD_NT_WB") { |
6384 | 0 | return Error(StringLoc, "invalid th value"); |
6385 | 0 | } else if (Value.starts_with("TH_ATOMIC_")) { |
6386 | 0 | Value = Value.drop_front(10); |
6387 | 0 | TH = AMDGPU::CPol::TH_TYPE_ATOMIC; |
6388 | 0 | } else if (Value.starts_with("TH_LOAD_")) { |
6389 | 0 | Value = Value.drop_front(8); |
6390 | 0 | TH = AMDGPU::CPol::TH_TYPE_LOAD; |
6391 | 0 | } else if (Value.starts_with("TH_STORE_")) { |
6392 | 0 | Value = Value.drop_front(9); |
6393 | 0 | TH = AMDGPU::CPol::TH_TYPE_STORE; |
6394 | 0 | } else { |
6395 | 0 | return Error(StringLoc, "invalid th value"); |
6396 | 0 | } |
6397 | | |
6398 | 0 | if (Value == "BYPASS") |
6399 | 0 | TH |= AMDGPU::CPol::TH_REAL_BYPASS; |
6400 | |
|
6401 | 0 | if (TH != 0) { |
6402 | 0 | if (TH & AMDGPU::CPol::TH_TYPE_ATOMIC) |
6403 | 0 | TH |= StringSwitch<int64_t>(Value) |
6404 | 0 | .Case("RETURN", AMDGPU::CPol::TH_ATOMIC_RETURN) |
6405 | 0 | .Case("RT", AMDGPU::CPol::TH_RT) |
6406 | 0 | .Case("RT_RETURN", AMDGPU::CPol::TH_ATOMIC_RETURN) |
6407 | 0 | .Case("NT", AMDGPU::CPol::TH_ATOMIC_NT) |
6408 | 0 | .Case("NT_RETURN", AMDGPU::CPol::TH_ATOMIC_NT | |
6409 | 0 | AMDGPU::CPol::TH_ATOMIC_RETURN) |
6410 | 0 | .Case("CASCADE_RT", AMDGPU::CPol::TH_ATOMIC_CASCADE) |
6411 | 0 | .Case("CASCADE_NT", AMDGPU::CPol::TH_ATOMIC_CASCADE | |
6412 | 0 | AMDGPU::CPol::TH_ATOMIC_NT) |
6413 | 0 | .Default(0xffffffff); |
6414 | 0 | else |
6415 | 0 | TH |= StringSwitch<int64_t>(Value) |
6416 | 0 | .Case("RT", AMDGPU::CPol::TH_RT) |
6417 | 0 | .Case("NT", AMDGPU::CPol::TH_NT) |
6418 | 0 | .Case("HT", AMDGPU::CPol::TH_HT) |
6419 | 0 | .Case("LU", AMDGPU::CPol::TH_LU) |
6420 | 0 | .Case("RT_WB", AMDGPU::CPol::TH_RT_WB) |
6421 | 0 | .Case("NT_RT", AMDGPU::CPol::TH_NT_RT) |
6422 | 0 | .Case("RT_NT", AMDGPU::CPol::TH_RT_NT) |
6423 | 0 | .Case("NT_HT", AMDGPU::CPol::TH_NT_HT) |
6424 | 0 | .Case("NT_WB", AMDGPU::CPol::TH_NT_WB) |
6425 | 0 | .Case("BYPASS", AMDGPU::CPol::TH_BYPASS) |
6426 | 0 | .Default(0xffffffff); |
6427 | 0 | } |
6428 | |
|
6429 | 0 | if (TH == 0xffffffff) |
6430 | 0 | return Error(StringLoc, "invalid th value"); |
6431 | | |
6432 | 0 | return ParseStatus::Success; |
6433 | 0 | } |
6434 | | |
6435 | | static void addOptionalImmOperand( |
6436 | | MCInst& Inst, const OperandVector& Operands, |
6437 | | AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx, |
6438 | | AMDGPUOperand::ImmTy ImmT, |
6439 | 0 | int64_t Default = 0) { |
6440 | 0 | auto i = OptionalIdx.find(ImmT); |
6441 | 0 | if (i != OptionalIdx.end()) { |
6442 | 0 | unsigned Idx = i->second; |
6443 | 0 | ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1); |
6444 | 0 | } else { |
6445 | 0 | Inst.addOperand(MCOperand::createImm(Default)); |
6446 | 0 | } |
6447 | 0 | } |
6448 | | |
6449 | | ParseStatus AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, |
6450 | | StringRef &Value, |
6451 | 0 | SMLoc &StringLoc) { |
6452 | 0 | if (!trySkipId(Prefix, AsmToken::Colon)) |
6453 | 0 | return ParseStatus::NoMatch; |
6454 | | |
6455 | 0 | StringLoc = getLoc(); |
6456 | 0 | return parseId(Value, "expected an identifier") ? ParseStatus::Success |
6457 | 0 | : ParseStatus::Failure; |
6458 | 0 | } |
6459 | | |
6460 | | //===----------------------------------------------------------------------===// |
6461 | | // MTBUF format |
6462 | | //===----------------------------------------------------------------------===// |
6463 | | |
6464 | | bool AMDGPUAsmParser::tryParseFmt(const char *Pref, |
6465 | | int64_t MaxVal, |
6466 | 0 | int64_t &Fmt) { |
6467 | 0 | int64_t Val; |
6468 | 0 | SMLoc Loc = getLoc(); |
6469 | |
|
6470 | 0 | auto Res = parseIntWithPrefix(Pref, Val); |
6471 | 0 | if (Res.isFailure()) |
6472 | 0 | return false; |
6473 | 0 | if (Res.isNoMatch()) |
6474 | 0 | return true; |
6475 | | |
6476 | 0 | if (Val < 0 || Val > MaxVal) { |
6477 | 0 | Error(Loc, Twine("out of range ", StringRef(Pref))); |
6478 | 0 | return false; |
6479 | 0 | } |
6480 | | |
6481 | 0 | Fmt = Val; |
6482 | 0 | return true; |
6483 | 0 | } |
6484 | | |
6485 | | // dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their |
6486 | | // values to live in a joint format operand in the MCInst encoding. |
6487 | 0 | ParseStatus AMDGPUAsmParser::parseDfmtNfmt(int64_t &Format) { |
6488 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6489 | |
|
6490 | 0 | int64_t Dfmt = DFMT_UNDEF; |
6491 | 0 | int64_t Nfmt = NFMT_UNDEF; |
6492 | | |
6493 | | // dfmt and nfmt can appear in either order, and each is optional. |
6494 | 0 | for (int I = 0; I < 2; ++I) { |
6495 | 0 | if (Dfmt == DFMT_UNDEF && !tryParseFmt("dfmt", DFMT_MAX, Dfmt)) |
6496 | 0 | return ParseStatus::Failure; |
6497 | | |
6498 | 0 | if (Nfmt == NFMT_UNDEF && !tryParseFmt("nfmt", NFMT_MAX, Nfmt)) |
6499 | 0 | return ParseStatus::Failure; |
6500 | | |
6501 | | // Skip optional comma between dfmt/nfmt |
6502 | | // but guard against 2 commas following each other. |
6503 | 0 | if ((Dfmt == DFMT_UNDEF) != (Nfmt == NFMT_UNDEF) && |
6504 | 0 | !peekToken().is(AsmToken::Comma)) { |
6505 | 0 | trySkipToken(AsmToken::Comma); |
6506 | 0 | } |
6507 | 0 | } |
6508 | | |
6509 | 0 | if (Dfmt == DFMT_UNDEF && Nfmt == NFMT_UNDEF) |
6510 | 0 | return ParseStatus::NoMatch; |
6511 | | |
6512 | 0 | Dfmt = (Dfmt == DFMT_UNDEF) ? DFMT_DEFAULT : Dfmt; |
6513 | 0 | Nfmt = (Nfmt == NFMT_UNDEF) ? NFMT_DEFAULT : Nfmt; |
6514 | |
|
6515 | 0 | Format = encodeDfmtNfmt(Dfmt, Nfmt); |
6516 | 0 | return ParseStatus::Success; |
6517 | 0 | } |
6518 | | |
6519 | 0 | ParseStatus AMDGPUAsmParser::parseUfmt(int64_t &Format) { |
6520 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6521 | |
|
6522 | 0 | int64_t Fmt = UFMT_UNDEF; |
6523 | |
|
6524 | 0 | if (!tryParseFmt("format", UFMT_MAX, Fmt)) |
6525 | 0 | return ParseStatus::Failure; |
6526 | | |
6527 | 0 | if (Fmt == UFMT_UNDEF) |
6528 | 0 | return ParseStatus::NoMatch; |
6529 | | |
6530 | 0 | Format = Fmt; |
6531 | 0 | return ParseStatus::Success; |
6532 | 0 | } |
6533 | | |
6534 | | bool AMDGPUAsmParser::matchDfmtNfmt(int64_t &Dfmt, |
6535 | | int64_t &Nfmt, |
6536 | | StringRef FormatStr, |
6537 | 0 | SMLoc Loc) { |
6538 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6539 | 0 | int64_t Format; |
6540 | |
|
6541 | 0 | Format = getDfmt(FormatStr); |
6542 | 0 | if (Format != DFMT_UNDEF) { |
6543 | 0 | Dfmt = Format; |
6544 | 0 | return true; |
6545 | 0 | } |
6546 | | |
6547 | 0 | Format = getNfmt(FormatStr, getSTI()); |
6548 | 0 | if (Format != NFMT_UNDEF) { |
6549 | 0 | Nfmt = Format; |
6550 | 0 | return true; |
6551 | 0 | } |
6552 | | |
6553 | 0 | Error(Loc, "unsupported format"); |
6554 | 0 | return false; |
6555 | 0 | } |
6556 | | |
6557 | | ParseStatus AMDGPUAsmParser::parseSymbolicSplitFormat(StringRef FormatStr, |
6558 | | SMLoc FormatLoc, |
6559 | 0 | int64_t &Format) { |
6560 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6561 | |
|
6562 | 0 | int64_t Dfmt = DFMT_UNDEF; |
6563 | 0 | int64_t Nfmt = NFMT_UNDEF; |
6564 | 0 | if (!matchDfmtNfmt(Dfmt, Nfmt, FormatStr, FormatLoc)) |
6565 | 0 | return ParseStatus::Failure; |
6566 | | |
6567 | 0 | if (trySkipToken(AsmToken::Comma)) { |
6568 | 0 | StringRef Str; |
6569 | 0 | SMLoc Loc = getLoc(); |
6570 | 0 | if (!parseId(Str, "expected a format string") || |
6571 | 0 | !matchDfmtNfmt(Dfmt, Nfmt, Str, Loc)) |
6572 | 0 | return ParseStatus::Failure; |
6573 | 0 | if (Dfmt == DFMT_UNDEF) |
6574 | 0 | return Error(Loc, "duplicate numeric format"); |
6575 | 0 | if (Nfmt == NFMT_UNDEF) |
6576 | 0 | return Error(Loc, "duplicate data format"); |
6577 | 0 | } |
6578 | | |
6579 | 0 | Dfmt = (Dfmt == DFMT_UNDEF) ? DFMT_DEFAULT : Dfmt; |
6580 | 0 | Nfmt = (Nfmt == NFMT_UNDEF) ? NFMT_DEFAULT : Nfmt; |
6581 | |
|
6582 | 0 | if (isGFX10Plus()) { |
6583 | 0 | auto Ufmt = convertDfmtNfmt2Ufmt(Dfmt, Nfmt, getSTI()); |
6584 | 0 | if (Ufmt == UFMT_UNDEF) |
6585 | 0 | return Error(FormatLoc, "unsupported format"); |
6586 | 0 | Format = Ufmt; |
6587 | 0 | } else { |
6588 | 0 | Format = encodeDfmtNfmt(Dfmt, Nfmt); |
6589 | 0 | } |
6590 | | |
6591 | 0 | return ParseStatus::Success; |
6592 | 0 | } |
6593 | | |
6594 | | ParseStatus AMDGPUAsmParser::parseSymbolicUnifiedFormat(StringRef FormatStr, |
6595 | | SMLoc Loc, |
6596 | 0 | int64_t &Format) { |
6597 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6598 | |
|
6599 | 0 | auto Id = getUnifiedFormat(FormatStr, getSTI()); |
6600 | 0 | if (Id == UFMT_UNDEF) |
6601 | 0 | return ParseStatus::NoMatch; |
6602 | | |
6603 | 0 | if (!isGFX10Plus()) |
6604 | 0 | return Error(Loc, "unified format is not supported on this GPU"); |
6605 | | |
6606 | 0 | Format = Id; |
6607 | 0 | return ParseStatus::Success; |
6608 | 0 | } |
6609 | | |
6610 | 0 | ParseStatus AMDGPUAsmParser::parseNumericFormat(int64_t &Format) { |
6611 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6612 | 0 | SMLoc Loc = getLoc(); |
6613 | |
|
6614 | 0 | if (!parseExpr(Format)) |
6615 | 0 | return ParseStatus::Failure; |
6616 | 0 | if (!isValidFormatEncoding(Format, getSTI())) |
6617 | 0 | return Error(Loc, "out of range format"); |
6618 | | |
6619 | 0 | return ParseStatus::Success; |
6620 | 0 | } |
6621 | | |
6622 | 0 | ParseStatus AMDGPUAsmParser::parseSymbolicOrNumericFormat(int64_t &Format) { |
6623 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6624 | |
|
6625 | 0 | if (!trySkipId("format", AsmToken::Colon)) |
6626 | 0 | return ParseStatus::NoMatch; |
6627 | | |
6628 | 0 | if (trySkipToken(AsmToken::LBrac)) { |
6629 | 0 | StringRef FormatStr; |
6630 | 0 | SMLoc Loc = getLoc(); |
6631 | 0 | if (!parseId(FormatStr, "expected a format string")) |
6632 | 0 | return ParseStatus::Failure; |
6633 | | |
6634 | 0 | auto Res = parseSymbolicUnifiedFormat(FormatStr, Loc, Format); |
6635 | 0 | if (Res.isNoMatch()) |
6636 | 0 | Res = parseSymbolicSplitFormat(FormatStr, Loc, Format); |
6637 | 0 | if (!Res.isSuccess()) |
6638 | 0 | return Res; |
6639 | | |
6640 | 0 | if (!skipToken(AsmToken::RBrac, "expected a closing square bracket")) |
6641 | 0 | return ParseStatus::Failure; |
6642 | | |
6643 | 0 | return ParseStatus::Success; |
6644 | 0 | } |
6645 | | |
6646 | 0 | return parseNumericFormat(Format); |
6647 | 0 | } |
6648 | | |
6649 | 0 | ParseStatus AMDGPUAsmParser::parseFORMAT(OperandVector &Operands) { |
6650 | 0 | using namespace llvm::AMDGPU::MTBUFFormat; |
6651 | |
|
6652 | 0 | int64_t Format = getDefaultFormatEncoding(getSTI()); |
6653 | 0 | ParseStatus Res; |
6654 | 0 | SMLoc Loc = getLoc(); |
6655 | | |
6656 | | // Parse legacy format syntax. |
6657 | 0 | Res = isGFX10Plus() ? parseUfmt(Format) : parseDfmtNfmt(Format); |
6658 | 0 | if (Res.isFailure()) |
6659 | 0 | return Res; |
6660 | | |
6661 | 0 | bool FormatFound = Res.isSuccess(); |
6662 | |
|
6663 | 0 | Operands.push_back( |
6664 | 0 | AMDGPUOperand::CreateImm(this, Format, Loc, AMDGPUOperand::ImmTyFORMAT)); |
6665 | |
|
6666 | 0 | if (FormatFound) |
6667 | 0 | trySkipToken(AsmToken::Comma); |
6668 | |
|
6669 | 0 | if (isToken(AsmToken::EndOfStatement)) { |
6670 | | // We are expecting an soffset operand, |
6671 | | // but let matcher handle the error. |
6672 | 0 | return ParseStatus::Success; |
6673 | 0 | } |
6674 | | |
6675 | | // Parse soffset. |
6676 | 0 | Res = parseRegOrImm(Operands); |
6677 | 0 | if (!Res.isSuccess()) |
6678 | 0 | return Res; |
6679 | | |
6680 | 0 | trySkipToken(AsmToken::Comma); |
6681 | |
|
6682 | 0 | if (!FormatFound) { |
6683 | 0 | Res = parseSymbolicOrNumericFormat(Format); |
6684 | 0 | if (Res.isFailure()) |
6685 | 0 | return Res; |
6686 | 0 | if (Res.isSuccess()) { |
6687 | 0 | auto Size = Operands.size(); |
6688 | 0 | AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands[Size - 2]); |
6689 | 0 | assert(Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyFORMAT); |
6690 | 0 | Op.setImm(Format); |
6691 | 0 | } |
6692 | 0 | return ParseStatus::Success; |
6693 | 0 | } |
6694 | | |
6695 | 0 | if (isId("format") && peekToken().is(AsmToken::Colon)) |
6696 | 0 | return Error(getLoc(), "duplicate format"); |
6697 | 0 | return ParseStatus::Success; |
6698 | 0 | } |
6699 | | |
6700 | 0 | ParseStatus AMDGPUAsmParser::parseFlatOffset(OperandVector &Operands) { |
6701 | 0 | ParseStatus Res = |
6702 | 0 | parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset); |
6703 | 0 | if (Res.isNoMatch()) { |
6704 | 0 | Res = parseIntWithPrefix("inst_offset", Operands, |
6705 | 0 | AMDGPUOperand::ImmTyInstOffset); |
6706 | 0 | } |
6707 | 0 | return Res; |
6708 | 0 | } |
6709 | | |
6710 | 0 | ParseStatus AMDGPUAsmParser::parseR128A16(OperandVector &Operands) { |
6711 | 0 | ParseStatus Res = |
6712 | 0 | parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128A16); |
6713 | 0 | if (Res.isNoMatch()) |
6714 | 0 | Res = parseNamedBit("a16", Operands, AMDGPUOperand::ImmTyA16); |
6715 | 0 | return Res; |
6716 | 0 | } |
6717 | | |
6718 | 0 | ParseStatus AMDGPUAsmParser::parseBLGP(OperandVector &Operands) { |
6719 | 0 | ParseStatus Res = |
6720 | 0 | parseIntWithPrefix("blgp", Operands, AMDGPUOperand::ImmTyBLGP); |
6721 | 0 | if (Res.isNoMatch()) { |
6722 | 0 | Res = |
6723 | 0 | parseOperandArrayWithPrefix("neg", Operands, AMDGPUOperand::ImmTyBLGP); |
6724 | 0 | } |
6725 | 0 | return Res; |
6726 | 0 | } |
6727 | | |
6728 | | //===----------------------------------------------------------------------===// |
6729 | | // Exp |
6730 | | //===----------------------------------------------------------------------===// |
6731 | | |
6732 | 0 | void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) { |
6733 | 0 | OptionalImmIndexMap OptionalIdx; |
6734 | |
|
6735 | 0 | unsigned OperandIdx[4]; |
6736 | 0 | unsigned EnMask = 0; |
6737 | 0 | int SrcIdx = 0; |
6738 | |
|
6739 | 0 | for (unsigned i = 1, e = Operands.size(); i != e; ++i) { |
6740 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); |
6741 | | |
6742 | | // Add the register arguments |
6743 | 0 | if (Op.isReg()) { |
6744 | 0 | assert(SrcIdx < 4); |
6745 | 0 | OperandIdx[SrcIdx] = Inst.size(); |
6746 | 0 | Op.addRegOperands(Inst, 1); |
6747 | 0 | ++SrcIdx; |
6748 | 0 | continue; |
6749 | 0 | } |
6750 | | |
6751 | 0 | if (Op.isOff()) { |
6752 | 0 | assert(SrcIdx < 4); |
6753 | 0 | OperandIdx[SrcIdx] = Inst.size(); |
6754 | 0 | Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister)); |
6755 | 0 | ++SrcIdx; |
6756 | 0 | continue; |
6757 | 0 | } |
6758 | | |
6759 | 0 | if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) { |
6760 | 0 | Op.addImmOperands(Inst, 1); |
6761 | 0 | continue; |
6762 | 0 | } |
6763 | | |
6764 | 0 | if (Op.isToken() && (Op.getToken() == "done" || Op.getToken() == "row_en")) |
6765 | 0 | continue; |
6766 | | |
6767 | | // Handle optional arguments |
6768 | 0 | OptionalIdx[Op.getImmTy()] = i; |
6769 | 0 | } |
6770 | |
|
6771 | 0 | assert(SrcIdx == 4); |
6772 | | |
6773 | 0 | bool Compr = false; |
6774 | 0 | if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) { |
6775 | 0 | Compr = true; |
6776 | 0 | Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]); |
6777 | 0 | Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister); |
6778 | 0 | Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister); |
6779 | 0 | } |
6780 | |
|
6781 | 0 | for (auto i = 0; i < SrcIdx; ++i) { |
6782 | 0 | if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) { |
6783 | 0 | EnMask |= Compr? (0x3 << i * 2) : (0x1 << i); |
6784 | 0 | } |
6785 | 0 | } |
6786 | |
|
6787 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM); |
6788 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr); |
6789 | |
|
6790 | 0 | Inst.addOperand(MCOperand::createImm(EnMask)); |
6791 | 0 | } |
6792 | | |
6793 | | //===----------------------------------------------------------------------===// |
6794 | | // s_waitcnt |
6795 | | //===----------------------------------------------------------------------===// |
6796 | | |
6797 | | static bool |
6798 | | encodeCnt( |
6799 | | const AMDGPU::IsaVersion ISA, |
6800 | | int64_t &IntVal, |
6801 | | int64_t CntVal, |
6802 | | bool Saturate, |
6803 | | unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned), |
6804 | | unsigned (*decode)(const IsaVersion &Version, unsigned)) |
6805 | 0 | { |
6806 | 0 | bool Failed = false; |
6807 | |
|
6808 | 0 | IntVal = encode(ISA, IntVal, CntVal); |
6809 | 0 | if (CntVal != decode(ISA, IntVal)) { |
6810 | 0 | if (Saturate) { |
6811 | 0 | IntVal = encode(ISA, IntVal, -1); |
6812 | 0 | } else { |
6813 | 0 | Failed = true; |
6814 | 0 | } |
6815 | 0 | } |
6816 | 0 | return Failed; |
6817 | 0 | } |
6818 | | |
6819 | 0 | bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) { |
6820 | |
|
6821 | 0 | SMLoc CntLoc = getLoc(); |
6822 | 0 | StringRef CntName = getTokenStr(); |
6823 | |
|
6824 | 0 | if (!skipToken(AsmToken::Identifier, "expected a counter name") || |
6825 | 0 | !skipToken(AsmToken::LParen, "expected a left parenthesis")) |
6826 | 0 | return false; |
6827 | | |
6828 | 0 | int64_t CntVal; |
6829 | 0 | SMLoc ValLoc = getLoc(); |
6830 | 0 | if (!parseExpr(CntVal)) |
6831 | 0 | return false; |
6832 | | |
6833 | 0 | AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU()); |
6834 | |
|
6835 | 0 | bool Failed = true; |
6836 | 0 | bool Sat = CntName.ends_with("_sat"); |
6837 | |
|
6838 | 0 | if (CntName == "vmcnt" || CntName == "vmcnt_sat") { |
6839 | 0 | Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt); |
6840 | 0 | } else if (CntName == "expcnt" || CntName == "expcnt_sat") { |
6841 | 0 | Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt); |
6842 | 0 | } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") { |
6843 | 0 | Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt); |
6844 | 0 | } else { |
6845 | 0 | Error(CntLoc, "invalid counter name " + CntName); |
6846 | 0 | return false; |
6847 | 0 | } |
6848 | | |
6849 | 0 | if (Failed) { |
6850 | 0 | Error(ValLoc, "too large value for " + CntName); |
6851 | 0 | return false; |
6852 | 0 | } |
6853 | | |
6854 | 0 | if (!skipToken(AsmToken::RParen, "expected a closing parenthesis")) |
6855 | 0 | return false; |
6856 | | |
6857 | 0 | if (trySkipToken(AsmToken::Amp) || trySkipToken(AsmToken::Comma)) { |
6858 | 0 | if (isToken(AsmToken::EndOfStatement)) { |
6859 | 0 | Error(getLoc(), "expected a counter name"); |
6860 | 0 | return false; |
6861 | 0 | } |
6862 | 0 | } |
6863 | | |
6864 | 0 | return true; |
6865 | 0 | } |
6866 | | |
6867 | 0 | ParseStatus AMDGPUAsmParser::parseSWaitCnt(OperandVector &Operands) { |
6868 | 0 | AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU()); |
6869 | 0 | int64_t Waitcnt = getWaitcntBitMask(ISA); |
6870 | 0 | SMLoc S = getLoc(); |
6871 | |
|
6872 | 0 | if (isToken(AsmToken::Identifier) && peekToken().is(AsmToken::LParen)) { |
6873 | 0 | while (!isToken(AsmToken::EndOfStatement)) { |
6874 | 0 | if (!parseCnt(Waitcnt)) |
6875 | 0 | return ParseStatus::Failure; |
6876 | 0 | } |
6877 | 0 | } else { |
6878 | 0 | if (!parseExpr(Waitcnt)) |
6879 | 0 | return ParseStatus::Failure; |
6880 | 0 | } |
6881 | | |
6882 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S)); |
6883 | 0 | return ParseStatus::Success; |
6884 | 0 | } |
6885 | | |
6886 | 0 | bool AMDGPUAsmParser::parseDelay(int64_t &Delay) { |
6887 | 0 | SMLoc FieldLoc = getLoc(); |
6888 | 0 | StringRef FieldName = getTokenStr(); |
6889 | 0 | if (!skipToken(AsmToken::Identifier, "expected a field name") || |
6890 | 0 | !skipToken(AsmToken::LParen, "expected a left parenthesis")) |
6891 | 0 | return false; |
6892 | | |
6893 | 0 | SMLoc ValueLoc = getLoc(); |
6894 | 0 | StringRef ValueName = getTokenStr(); |
6895 | 0 | if (!skipToken(AsmToken::Identifier, "expected a value name") || |
6896 | 0 | !skipToken(AsmToken::RParen, "expected a right parenthesis")) |
6897 | 0 | return false; |
6898 | | |
6899 | 0 | unsigned Shift; |
6900 | 0 | if (FieldName == "instid0") { |
6901 | 0 | Shift = 0; |
6902 | 0 | } else if (FieldName == "instskip") { |
6903 | 0 | Shift = 4; |
6904 | 0 | } else if (FieldName == "instid1") { |
6905 | 0 | Shift = 7; |
6906 | 0 | } else { |
6907 | 0 | Error(FieldLoc, "invalid field name " + FieldName); |
6908 | 0 | return false; |
6909 | 0 | } |
6910 | | |
6911 | 0 | int Value; |
6912 | 0 | if (Shift == 4) { |
6913 | | // Parse values for instskip. |
6914 | 0 | Value = StringSwitch<int>(ValueName) |
6915 | 0 | .Case("SAME", 0) |
6916 | 0 | .Case("NEXT", 1) |
6917 | 0 | .Case("SKIP_1", 2) |
6918 | 0 | .Case("SKIP_2", 3) |
6919 | 0 | .Case("SKIP_3", 4) |
6920 | 0 | .Case("SKIP_4", 5) |
6921 | 0 | .Default(-1); |
6922 | 0 | } else { |
6923 | | // Parse values for instid0 and instid1. |
6924 | 0 | Value = StringSwitch<int>(ValueName) |
6925 | 0 | .Case("NO_DEP", 0) |
6926 | 0 | .Case("VALU_DEP_1", 1) |
6927 | 0 | .Case("VALU_DEP_2", 2) |
6928 | 0 | .Case("VALU_DEP_3", 3) |
6929 | 0 | .Case("VALU_DEP_4", 4) |
6930 | 0 | .Case("TRANS32_DEP_1", 5) |
6931 | 0 | .Case("TRANS32_DEP_2", 6) |
6932 | 0 | .Case("TRANS32_DEP_3", 7) |
6933 | 0 | .Case("FMA_ACCUM_CYCLE_1", 8) |
6934 | 0 | .Case("SALU_CYCLE_1", 9) |
6935 | 0 | .Case("SALU_CYCLE_2", 10) |
6936 | 0 | .Case("SALU_CYCLE_3", 11) |
6937 | 0 | .Default(-1); |
6938 | 0 | } |
6939 | 0 | if (Value < 0) { |
6940 | 0 | Error(ValueLoc, "invalid value name " + ValueName); |
6941 | 0 | return false; |
6942 | 0 | } |
6943 | | |
6944 | 0 | Delay |= Value << Shift; |
6945 | 0 | return true; |
6946 | 0 | } |
6947 | | |
6948 | 0 | ParseStatus AMDGPUAsmParser::parseSDelayALU(OperandVector &Operands) { |
6949 | 0 | int64_t Delay = 0; |
6950 | 0 | SMLoc S = getLoc(); |
6951 | |
|
6952 | 0 | if (isToken(AsmToken::Identifier) && peekToken().is(AsmToken::LParen)) { |
6953 | 0 | do { |
6954 | 0 | if (!parseDelay(Delay)) |
6955 | 0 | return ParseStatus::Failure; |
6956 | 0 | } while (trySkipToken(AsmToken::Pipe)); |
6957 | 0 | } else { |
6958 | 0 | if (!parseExpr(Delay)) |
6959 | 0 | return ParseStatus::Failure; |
6960 | 0 | } |
6961 | | |
6962 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Delay, S)); |
6963 | 0 | return ParseStatus::Success; |
6964 | 0 | } |
6965 | | |
6966 | | bool |
6967 | 0 | AMDGPUOperand::isSWaitCnt() const { |
6968 | 0 | return isImm(); |
6969 | 0 | } |
6970 | | |
6971 | 0 | bool AMDGPUOperand::isSDelayALU() const { return isImm(); } |
6972 | | |
6973 | | //===----------------------------------------------------------------------===// |
6974 | | // DepCtr |
6975 | | //===----------------------------------------------------------------------===// |
6976 | | |
6977 | | void AMDGPUAsmParser::depCtrError(SMLoc Loc, int ErrorId, |
6978 | 0 | StringRef DepCtrName) { |
6979 | 0 | switch (ErrorId) { |
6980 | 0 | case OPR_ID_UNKNOWN: |
6981 | 0 | Error(Loc, Twine("invalid counter name ", DepCtrName)); |
6982 | 0 | return; |
6983 | 0 | case OPR_ID_UNSUPPORTED: |
6984 | 0 | Error(Loc, Twine(DepCtrName, " is not supported on this GPU")); |
6985 | 0 | return; |
6986 | 0 | case OPR_ID_DUPLICATE: |
6987 | 0 | Error(Loc, Twine("duplicate counter name ", DepCtrName)); |
6988 | 0 | return; |
6989 | 0 | case OPR_VAL_INVALID: |
6990 | 0 | Error(Loc, Twine("invalid value for ", DepCtrName)); |
6991 | 0 | return; |
6992 | 0 | default: |
6993 | 0 | assert(false); |
6994 | 0 | } |
6995 | 0 | } |
6996 | | |
6997 | 0 | bool AMDGPUAsmParser::parseDepCtr(int64_t &DepCtr, unsigned &UsedOprMask) { |
6998 | |
|
6999 | 0 | using namespace llvm::AMDGPU::DepCtr; |
7000 | |
|
7001 | 0 | SMLoc DepCtrLoc = getLoc(); |
7002 | 0 | StringRef DepCtrName = getTokenStr(); |
7003 | |
|
7004 | 0 | if (!skipToken(AsmToken::Identifier, "expected a counter name") || |
7005 | 0 | !skipToken(AsmToken::LParen, "expected a left parenthesis")) |
7006 | 0 | return false; |
7007 | | |
7008 | 0 | int64_t ExprVal; |
7009 | 0 | if (!parseExpr(ExprVal)) |
7010 | 0 | return false; |
7011 | | |
7012 | 0 | unsigned PrevOprMask = UsedOprMask; |
7013 | 0 | int CntVal = encodeDepCtr(DepCtrName, ExprVal, UsedOprMask, getSTI()); |
7014 | |
|
7015 | 0 | if (CntVal < 0) { |
7016 | 0 | depCtrError(DepCtrLoc, CntVal, DepCtrName); |
7017 | 0 | return false; |
7018 | 0 | } |
7019 | | |
7020 | 0 | if (!skipToken(AsmToken::RParen, "expected a closing parenthesis")) |
7021 | 0 | return false; |
7022 | | |
7023 | 0 | if (trySkipToken(AsmToken::Amp) || trySkipToken(AsmToken::Comma)) { |
7024 | 0 | if (isToken(AsmToken::EndOfStatement)) { |
7025 | 0 | Error(getLoc(), "expected a counter name"); |
7026 | 0 | return false; |
7027 | 0 | } |
7028 | 0 | } |
7029 | | |
7030 | 0 | unsigned CntValMask = PrevOprMask ^ UsedOprMask; |
7031 | 0 | DepCtr = (DepCtr & ~CntValMask) | CntVal; |
7032 | 0 | return true; |
7033 | 0 | } |
7034 | | |
7035 | 0 | ParseStatus AMDGPUAsmParser::parseDepCtr(OperandVector &Operands) { |
7036 | 0 | using namespace llvm::AMDGPU::DepCtr; |
7037 | |
|
7038 | 0 | int64_t DepCtr = getDefaultDepCtrEncoding(getSTI()); |
7039 | 0 | SMLoc Loc = getLoc(); |
7040 | |
|
7041 | 0 | if (isToken(AsmToken::Identifier) && peekToken().is(AsmToken::LParen)) { |
7042 | 0 | unsigned UsedOprMask = 0; |
7043 | 0 | while (!isToken(AsmToken::EndOfStatement)) { |
7044 | 0 | if (!parseDepCtr(DepCtr, UsedOprMask)) |
7045 | 0 | return ParseStatus::Failure; |
7046 | 0 | } |
7047 | 0 | } else { |
7048 | 0 | if (!parseExpr(DepCtr)) |
7049 | 0 | return ParseStatus::Failure; |
7050 | 0 | } |
7051 | | |
7052 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, DepCtr, Loc)); |
7053 | 0 | return ParseStatus::Success; |
7054 | 0 | } |
7055 | | |
7056 | 0 | bool AMDGPUOperand::isDepCtr() const { return isS16Imm(); } |
7057 | | |
7058 | | //===----------------------------------------------------------------------===// |
7059 | | // hwreg |
7060 | | //===----------------------------------------------------------------------===// |
7061 | | |
7062 | | bool |
7063 | | AMDGPUAsmParser::parseHwregBody(OperandInfoTy &HwReg, |
7064 | | OperandInfoTy &Offset, |
7065 | 0 | OperandInfoTy &Width) { |
7066 | 0 | using namespace llvm::AMDGPU::Hwreg; |
7067 | | |
7068 | | // The register may be specified by name or using a numeric code |
7069 | 0 | HwReg.Loc = getLoc(); |
7070 | 0 | if (isToken(AsmToken::Identifier) && |
7071 | 0 | (HwReg.Id = getHwregId(getTokenStr(), getSTI())) != OPR_ID_UNKNOWN) { |
7072 | 0 | HwReg.IsSymbolic = true; |
7073 | 0 | lex(); // skip register name |
7074 | 0 | } else if (!parseExpr(HwReg.Id, "a register name")) { |
7075 | 0 | return false; |
7076 | 0 | } |
7077 | | |
7078 | 0 | if (trySkipToken(AsmToken::RParen)) |
7079 | 0 | return true; |
7080 | | |
7081 | | // parse optional params |
7082 | 0 | if (!skipToken(AsmToken::Comma, "expected a comma or a closing parenthesis")) |
7083 | 0 | return false; |
7084 | | |
7085 | 0 | Offset.Loc = getLoc(); |
7086 | 0 | if (!parseExpr(Offset.Id)) |
7087 | 0 | return false; |
7088 | | |
7089 | 0 | if (!skipToken(AsmToken::Comma, "expected a comma")) |
7090 | 0 | return false; |
7091 | | |
7092 | 0 | Width.Loc = getLoc(); |
7093 | 0 | return parseExpr(Width.Id) && |
7094 | 0 | skipToken(AsmToken::RParen, "expected a closing parenthesis"); |
7095 | 0 | } |
7096 | | |
7097 | | bool |
7098 | | AMDGPUAsmParser::validateHwreg(const OperandInfoTy &HwReg, |
7099 | | const OperandInfoTy &Offset, |
7100 | 0 | const OperandInfoTy &Width) { |
7101 | |
|
7102 | 0 | using namespace llvm::AMDGPU::Hwreg; |
7103 | |
|
7104 | 0 | if (HwReg.IsSymbolic) { |
7105 | 0 | if (HwReg.Id == OPR_ID_UNSUPPORTED) { |
7106 | 0 | Error(HwReg.Loc, |
7107 | 0 | "specified hardware register is not supported on this GPU"); |
7108 | 0 | return false; |
7109 | 0 | } |
7110 | 0 | } else { |
7111 | 0 | if (!isValidHwreg(HwReg.Id)) { |
7112 | 0 | Error(HwReg.Loc, |
7113 | 0 | "invalid code of hardware register: only 6-bit values are legal"); |
7114 | 0 | return false; |
7115 | 0 | } |
7116 | 0 | } |
7117 | 0 | if (!isValidHwregOffset(Offset.Id)) { |
7118 | 0 | Error(Offset.Loc, "invalid bit offset: only 5-bit values are legal"); |
7119 | 0 | return false; |
7120 | 0 | } |
7121 | 0 | if (!isValidHwregWidth(Width.Id)) { |
7122 | 0 | Error(Width.Loc, |
7123 | 0 | "invalid bitfield width: only values from 1 to 32 are legal"); |
7124 | 0 | return false; |
7125 | 0 | } |
7126 | 0 | return true; |
7127 | 0 | } |
7128 | | |
7129 | 0 | ParseStatus AMDGPUAsmParser::parseHwreg(OperandVector &Operands) { |
7130 | 0 | using namespace llvm::AMDGPU::Hwreg; |
7131 | |
|
7132 | 0 | int64_t ImmVal = 0; |
7133 | 0 | SMLoc Loc = getLoc(); |
7134 | |
|
7135 | 0 | if (trySkipId("hwreg", AsmToken::LParen)) { |
7136 | 0 | OperandInfoTy HwReg(OPR_ID_UNKNOWN); |
7137 | 0 | OperandInfoTy Offset(OFFSET_DEFAULT_); |
7138 | 0 | OperandInfoTy Width(WIDTH_DEFAULT_); |
7139 | 0 | if (parseHwregBody(HwReg, Offset, Width) && |
7140 | 0 | validateHwreg(HwReg, Offset, Width)) { |
7141 | 0 | ImmVal = encodeHwreg(HwReg.Id, Offset.Id, Width.Id); |
7142 | 0 | } else { |
7143 | 0 | return ParseStatus::Failure; |
7144 | 0 | } |
7145 | 0 | } else if (parseExpr(ImmVal, "a hwreg macro")) { |
7146 | 0 | if (ImmVal < 0 || !isUInt<16>(ImmVal)) |
7147 | 0 | return Error(Loc, "invalid immediate: only 16-bit values are legal"); |
7148 | 0 | } else { |
7149 | 0 | return ParseStatus::Failure; |
7150 | 0 | } |
7151 | | |
7152 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTyHwreg)); |
7153 | 0 | return ParseStatus::Success; |
7154 | 0 | } |
7155 | | |
7156 | 0 | bool AMDGPUOperand::isHwreg() const { |
7157 | 0 | return isImmTy(ImmTyHwreg); |
7158 | 0 | } |
7159 | | |
7160 | | //===----------------------------------------------------------------------===// |
7161 | | // sendmsg |
7162 | | //===----------------------------------------------------------------------===// |
7163 | | |
7164 | | bool |
7165 | | AMDGPUAsmParser::parseSendMsgBody(OperandInfoTy &Msg, |
7166 | | OperandInfoTy &Op, |
7167 | 0 | OperandInfoTy &Stream) { |
7168 | 0 | using namespace llvm::AMDGPU::SendMsg; |
7169 | |
|
7170 | 0 | Msg.Loc = getLoc(); |
7171 | 0 | if (isToken(AsmToken::Identifier) && |
7172 | 0 | (Msg.Id = getMsgId(getTokenStr(), getSTI())) != OPR_ID_UNKNOWN) { |
7173 | 0 | Msg.IsSymbolic = true; |
7174 | 0 | lex(); // skip message name |
7175 | 0 | } else if (!parseExpr(Msg.Id, "a message name")) { |
7176 | 0 | return false; |
7177 | 0 | } |
7178 | | |
7179 | 0 | if (trySkipToken(AsmToken::Comma)) { |
7180 | 0 | Op.IsDefined = true; |
7181 | 0 | Op.Loc = getLoc(); |
7182 | 0 | if (isToken(AsmToken::Identifier) && |
7183 | 0 | (Op.Id = getMsgOpId(Msg.Id, getTokenStr())) >= 0) { |
7184 | 0 | lex(); // skip operation name |
7185 | 0 | } else if (!parseExpr(Op.Id, "an operation name")) { |
7186 | 0 | return false; |
7187 | 0 | } |
7188 | | |
7189 | 0 | if (trySkipToken(AsmToken::Comma)) { |
7190 | 0 | Stream.IsDefined = true; |
7191 | 0 | Stream.Loc = getLoc(); |
7192 | 0 | if (!parseExpr(Stream.Id)) |
7193 | 0 | return false; |
7194 | 0 | } |
7195 | 0 | } |
7196 | | |
7197 | 0 | return skipToken(AsmToken::RParen, "expected a closing parenthesis"); |
7198 | 0 | } |
7199 | | |
7200 | | bool |
7201 | | AMDGPUAsmParser::validateSendMsg(const OperandInfoTy &Msg, |
7202 | | const OperandInfoTy &Op, |
7203 | 0 | const OperandInfoTy &Stream) { |
7204 | 0 | using namespace llvm::AMDGPU::SendMsg; |
7205 | | |
7206 | | // Validation strictness depends on whether message is specified |
7207 | | // in a symbolic or in a numeric form. In the latter case |
7208 | | // only encoding possibility is checked. |
7209 | 0 | bool Strict = Msg.IsSymbolic; |
7210 | |
|
7211 | 0 | if (Strict) { |
7212 | 0 | if (Msg.Id == OPR_ID_UNSUPPORTED) { |
7213 | 0 | Error(Msg.Loc, "specified message id is not supported on this GPU"); |
7214 | 0 | return false; |
7215 | 0 | } |
7216 | 0 | } else { |
7217 | 0 | if (!isValidMsgId(Msg.Id, getSTI())) { |
7218 | 0 | Error(Msg.Loc, "invalid message id"); |
7219 | 0 | return false; |
7220 | 0 | } |
7221 | 0 | } |
7222 | 0 | if (Strict && (msgRequiresOp(Msg.Id, getSTI()) != Op.IsDefined)) { |
7223 | 0 | if (Op.IsDefined) { |
7224 | 0 | Error(Op.Loc, "message does not support operations"); |
7225 | 0 | } else { |
7226 | 0 | Error(Msg.Loc, "missing message operation"); |
7227 | 0 | } |
7228 | 0 | return false; |
7229 | 0 | } |
7230 | 0 | if (!isValidMsgOp(Msg.Id, Op.Id, getSTI(), Strict)) { |
7231 | 0 | Error(Op.Loc, "invalid operation id"); |
7232 | 0 | return false; |
7233 | 0 | } |
7234 | 0 | if (Strict && !msgSupportsStream(Msg.Id, Op.Id, getSTI()) && |
7235 | 0 | Stream.IsDefined) { |
7236 | 0 | Error(Stream.Loc, "message operation does not support streams"); |
7237 | 0 | return false; |
7238 | 0 | } |
7239 | 0 | if (!isValidMsgStream(Msg.Id, Op.Id, Stream.Id, getSTI(), Strict)) { |
7240 | 0 | Error(Stream.Loc, "invalid message stream id"); |
7241 | 0 | return false; |
7242 | 0 | } |
7243 | 0 | return true; |
7244 | 0 | } |
7245 | | |
7246 | 0 | ParseStatus AMDGPUAsmParser::parseSendMsg(OperandVector &Operands) { |
7247 | 0 | using namespace llvm::AMDGPU::SendMsg; |
7248 | |
|
7249 | 0 | int64_t ImmVal = 0; |
7250 | 0 | SMLoc Loc = getLoc(); |
7251 | |
|
7252 | 0 | if (trySkipId("sendmsg", AsmToken::LParen)) { |
7253 | 0 | OperandInfoTy Msg(OPR_ID_UNKNOWN); |
7254 | 0 | OperandInfoTy Op(OP_NONE_); |
7255 | 0 | OperandInfoTy Stream(STREAM_ID_NONE_); |
7256 | 0 | if (parseSendMsgBody(Msg, Op, Stream) && |
7257 | 0 | validateSendMsg(Msg, Op, Stream)) { |
7258 | 0 | ImmVal = encodeMsg(Msg.Id, Op.Id, Stream.Id); |
7259 | 0 | } else { |
7260 | 0 | return ParseStatus::Failure; |
7261 | 0 | } |
7262 | 0 | } else if (parseExpr(ImmVal, "a sendmsg macro")) { |
7263 | 0 | if (ImmVal < 0 || !isUInt<16>(ImmVal)) |
7264 | 0 | return Error(Loc, "invalid immediate: only 16-bit values are legal"); |
7265 | 0 | } else { |
7266 | 0 | return ParseStatus::Failure; |
7267 | 0 | } |
7268 | | |
7269 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTySendMsg)); |
7270 | 0 | return ParseStatus::Success; |
7271 | 0 | } |
7272 | | |
7273 | 0 | bool AMDGPUOperand::isSendMsg() const { |
7274 | 0 | return isImmTy(ImmTySendMsg); |
7275 | 0 | } |
7276 | | |
7277 | | //===----------------------------------------------------------------------===// |
7278 | | // v_interp |
7279 | | //===----------------------------------------------------------------------===// |
7280 | | |
7281 | 0 | ParseStatus AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) { |
7282 | 0 | StringRef Str; |
7283 | 0 | SMLoc S = getLoc(); |
7284 | |
|
7285 | 0 | if (!parseId(Str)) |
7286 | 0 | return ParseStatus::NoMatch; |
7287 | | |
7288 | 0 | int Slot = StringSwitch<int>(Str) |
7289 | 0 | .Case("p10", 0) |
7290 | 0 | .Case("p20", 1) |
7291 | 0 | .Case("p0", 2) |
7292 | 0 | .Default(-1); |
7293 | |
|
7294 | 0 | if (Slot == -1) |
7295 | 0 | return Error(S, "invalid interpolation slot"); |
7296 | | |
7297 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S, |
7298 | 0 | AMDGPUOperand::ImmTyInterpSlot)); |
7299 | 0 | return ParseStatus::Success; |
7300 | 0 | } |
7301 | | |
7302 | 0 | ParseStatus AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) { |
7303 | 0 | StringRef Str; |
7304 | 0 | SMLoc S = getLoc(); |
7305 | |
|
7306 | 0 | if (!parseId(Str)) |
7307 | 0 | return ParseStatus::NoMatch; |
7308 | | |
7309 | 0 | if (!Str.starts_with("attr")) |
7310 | 0 | return Error(S, "invalid interpolation attribute"); |
7311 | | |
7312 | 0 | StringRef Chan = Str.take_back(2); |
7313 | 0 | int AttrChan = StringSwitch<int>(Chan) |
7314 | 0 | .Case(".x", 0) |
7315 | 0 | .Case(".y", 1) |
7316 | 0 | .Case(".z", 2) |
7317 | 0 | .Case(".w", 3) |
7318 | 0 | .Default(-1); |
7319 | 0 | if (AttrChan == -1) |
7320 | 0 | return Error(S, "invalid or missing interpolation attribute channel"); |
7321 | | |
7322 | 0 | Str = Str.drop_back(2).drop_front(4); |
7323 | |
|
7324 | 0 | uint8_t Attr; |
7325 | 0 | if (Str.getAsInteger(10, Attr)) |
7326 | 0 | return Error(S, "invalid or missing interpolation attribute number"); |
7327 | | |
7328 | 0 | if (Attr > 32) |
7329 | 0 | return Error(S, "out of bounds interpolation attribute number"); |
7330 | | |
7331 | 0 | SMLoc SChan = SMLoc::getFromPointer(Chan.data()); |
7332 | |
|
7333 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S, |
7334 | 0 | AMDGPUOperand::ImmTyInterpAttr)); |
7335 | 0 | Operands.push_back(AMDGPUOperand::CreateImm( |
7336 | 0 | this, AttrChan, SChan, AMDGPUOperand::ImmTyInterpAttrChan)); |
7337 | 0 | return ParseStatus::Success; |
7338 | 0 | } |
7339 | | |
7340 | | //===----------------------------------------------------------------------===// |
7341 | | // exp |
7342 | | //===----------------------------------------------------------------------===// |
7343 | | |
7344 | 0 | ParseStatus AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) { |
7345 | 0 | using namespace llvm::AMDGPU::Exp; |
7346 | |
|
7347 | 0 | StringRef Str; |
7348 | 0 | SMLoc S = getLoc(); |
7349 | |
|
7350 | 0 | if (!parseId(Str)) |
7351 | 0 | return ParseStatus::NoMatch; |
7352 | | |
7353 | 0 | unsigned Id = getTgtId(Str); |
7354 | 0 | if (Id == ET_INVALID || !isSupportedTgtId(Id, getSTI())) |
7355 | 0 | return Error(S, (Id == ET_INVALID) |
7356 | 0 | ? "invalid exp target" |
7357 | 0 | : "exp target is not supported on this GPU"); |
7358 | | |
7359 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Id, S, |
7360 | 0 | AMDGPUOperand::ImmTyExpTgt)); |
7361 | 0 | return ParseStatus::Success; |
7362 | 0 | } |
7363 | | |
7364 | | //===----------------------------------------------------------------------===// |
7365 | | // parser helpers |
7366 | | //===----------------------------------------------------------------------===// |
7367 | | |
7368 | | bool |
7369 | 0 | AMDGPUAsmParser::isId(const AsmToken &Token, const StringRef Id) const { |
7370 | 0 | return Token.is(AsmToken::Identifier) && Token.getString() == Id; |
7371 | 0 | } |
7372 | | |
7373 | | bool |
7374 | 0 | AMDGPUAsmParser::isId(const StringRef Id) const { |
7375 | 0 | return isId(getToken(), Id); |
7376 | 0 | } |
7377 | | |
7378 | | bool |
7379 | 0 | AMDGPUAsmParser::isToken(const AsmToken::TokenKind Kind) const { |
7380 | 0 | return getTokenKind() == Kind; |
7381 | 0 | } |
7382 | | |
7383 | 0 | StringRef AMDGPUAsmParser::getId() const { |
7384 | 0 | return isToken(AsmToken::Identifier) ? getTokenStr() : StringRef(); |
7385 | 0 | } |
7386 | | |
7387 | | bool |
7388 | 0 | AMDGPUAsmParser::trySkipId(const StringRef Id) { |
7389 | 0 | if (isId(Id)) { |
7390 | 0 | lex(); |
7391 | 0 | return true; |
7392 | 0 | } |
7393 | 0 | return false; |
7394 | 0 | } |
7395 | | |
7396 | | bool |
7397 | 0 | AMDGPUAsmParser::trySkipId(const StringRef Pref, const StringRef Id) { |
7398 | 0 | if (isToken(AsmToken::Identifier)) { |
7399 | 0 | StringRef Tok = getTokenStr(); |
7400 | 0 | if (Tok.starts_with(Pref) && Tok.drop_front(Pref.size()) == Id) { |
7401 | 0 | lex(); |
7402 | 0 | return true; |
7403 | 0 | } |
7404 | 0 | } |
7405 | 0 | return false; |
7406 | 0 | } |
7407 | | |
7408 | | bool |
7409 | 0 | AMDGPUAsmParser::trySkipId(const StringRef Id, const AsmToken::TokenKind Kind) { |
7410 | 0 | if (isId(Id) && peekToken().is(Kind)) { |
7411 | 0 | lex(); |
7412 | 0 | lex(); |
7413 | 0 | return true; |
7414 | 0 | } |
7415 | 0 | return false; |
7416 | 0 | } |
7417 | | |
7418 | | bool |
7419 | 0 | AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) { |
7420 | 0 | if (isToken(Kind)) { |
7421 | 0 | lex(); |
7422 | 0 | return true; |
7423 | 0 | } |
7424 | 0 | return false; |
7425 | 0 | } |
7426 | | |
7427 | | bool |
7428 | | AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind, |
7429 | 0 | const StringRef ErrMsg) { |
7430 | 0 | if (!trySkipToken(Kind)) { |
7431 | 0 | Error(getLoc(), ErrMsg); |
7432 | 0 | return false; |
7433 | 0 | } |
7434 | 0 | return true; |
7435 | 0 | } |
7436 | | |
7437 | | bool |
7438 | 0 | AMDGPUAsmParser::parseExpr(int64_t &Imm, StringRef Expected) { |
7439 | 0 | SMLoc S = getLoc(); |
7440 | |
|
7441 | 0 | const MCExpr *Expr; |
7442 | 0 | if (Parser.parseExpression(Expr)) |
7443 | 0 | return false; |
7444 | | |
7445 | 0 | if (Expr->evaluateAsAbsolute(Imm)) |
7446 | 0 | return true; |
7447 | | |
7448 | 0 | if (Expected.empty()) { |
7449 | 0 | Error(S, "expected absolute expression"); |
7450 | 0 | } else { |
7451 | 0 | Error(S, Twine("expected ", Expected) + |
7452 | 0 | Twine(" or an absolute expression")); |
7453 | 0 | } |
7454 | 0 | return false; |
7455 | 0 | } |
7456 | | |
7457 | | bool |
7458 | 0 | AMDGPUAsmParser::parseExpr(OperandVector &Operands) { |
7459 | 0 | SMLoc S = getLoc(); |
7460 | |
|
7461 | 0 | const MCExpr *Expr; |
7462 | 0 | if (Parser.parseExpression(Expr)) |
7463 | 0 | return false; |
7464 | | |
7465 | 0 | int64_t IntVal; |
7466 | 0 | if (Expr->evaluateAsAbsolute(IntVal)) { |
7467 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S)); |
7468 | 0 | } else { |
7469 | 0 | Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S)); |
7470 | 0 | } |
7471 | 0 | return true; |
7472 | 0 | } |
7473 | | |
7474 | | bool |
7475 | 0 | AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) { |
7476 | 0 | if (isToken(AsmToken::String)) { |
7477 | 0 | Val = getToken().getStringContents(); |
7478 | 0 | lex(); |
7479 | 0 | return true; |
7480 | 0 | } else { |
7481 | 0 | Error(getLoc(), ErrMsg); |
7482 | 0 | return false; |
7483 | 0 | } |
7484 | 0 | } |
7485 | | |
7486 | | bool |
7487 | 0 | AMDGPUAsmParser::parseId(StringRef &Val, const StringRef ErrMsg) { |
7488 | 0 | if (isToken(AsmToken::Identifier)) { |
7489 | 0 | Val = getTokenStr(); |
7490 | 0 | lex(); |
7491 | 0 | return true; |
7492 | 0 | } else { |
7493 | 0 | if (!ErrMsg.empty()) |
7494 | 0 | Error(getLoc(), ErrMsg); |
7495 | 0 | return false; |
7496 | 0 | } |
7497 | 0 | } |
7498 | | |
7499 | | AsmToken |
7500 | 0 | AMDGPUAsmParser::getToken() const { |
7501 | 0 | return Parser.getTok(); |
7502 | 0 | } |
7503 | | |
7504 | 0 | AsmToken AMDGPUAsmParser::peekToken(bool ShouldSkipSpace) { |
7505 | 0 | return isToken(AsmToken::EndOfStatement) |
7506 | 0 | ? getToken() |
7507 | 0 | : getLexer().peekTok(ShouldSkipSpace); |
7508 | 0 | } |
7509 | | |
7510 | | void |
7511 | 0 | AMDGPUAsmParser::peekTokens(MutableArrayRef<AsmToken> Tokens) { |
7512 | 0 | auto TokCount = getLexer().peekTokens(Tokens); |
7513 | |
|
7514 | 0 | for (auto Idx = TokCount; Idx < Tokens.size(); ++Idx) |
7515 | 0 | Tokens[Idx] = AsmToken(AsmToken::Error, ""); |
7516 | 0 | } |
7517 | | |
7518 | | AsmToken::TokenKind |
7519 | 0 | AMDGPUAsmParser::getTokenKind() const { |
7520 | 0 | return getLexer().getKind(); |
7521 | 0 | } |
7522 | | |
7523 | | SMLoc |
7524 | 0 | AMDGPUAsmParser::getLoc() const { |
7525 | 0 | return getToken().getLoc(); |
7526 | 0 | } |
7527 | | |
7528 | | StringRef |
7529 | 0 | AMDGPUAsmParser::getTokenStr() const { |
7530 | 0 | return getToken().getString(); |
7531 | 0 | } |
7532 | | |
7533 | | void |
7534 | 0 | AMDGPUAsmParser::lex() { |
7535 | 0 | Parser.Lex(); |
7536 | 0 | } |
7537 | | |
7538 | 0 | SMLoc AMDGPUAsmParser::getInstLoc(const OperandVector &Operands) const { |
7539 | 0 | return ((AMDGPUOperand &)*Operands[0]).getStartLoc(); |
7540 | 0 | } |
7541 | | |
7542 | | SMLoc |
7543 | | AMDGPUAsmParser::getOperandLoc(std::function<bool(const AMDGPUOperand&)> Test, |
7544 | 0 | const OperandVector &Operands) const { |
7545 | 0 | for (unsigned i = Operands.size() - 1; i > 0; --i) { |
7546 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); |
7547 | 0 | if (Test(Op)) |
7548 | 0 | return Op.getStartLoc(); |
7549 | 0 | } |
7550 | 0 | return getInstLoc(Operands); |
7551 | 0 | } |
7552 | | |
7553 | | SMLoc |
7554 | | AMDGPUAsmParser::getImmLoc(AMDGPUOperand::ImmTy Type, |
7555 | 0 | const OperandVector &Operands) const { |
7556 | 0 | auto Test = [=](const AMDGPUOperand& Op) { return Op.isImmTy(Type); }; |
7557 | 0 | return getOperandLoc(Test, Operands); |
7558 | 0 | } |
7559 | | |
7560 | | SMLoc |
7561 | | AMDGPUAsmParser::getRegLoc(unsigned Reg, |
7562 | 0 | const OperandVector &Operands) const { |
7563 | 0 | auto Test = [=](const AMDGPUOperand& Op) { |
7564 | 0 | return Op.isRegKind() && Op.getReg() == Reg; |
7565 | 0 | }; |
7566 | 0 | return getOperandLoc(Test, Operands); |
7567 | 0 | } |
7568 | | |
7569 | | SMLoc AMDGPUAsmParser::getLitLoc(const OperandVector &Operands, |
7570 | 0 | bool SearchMandatoryLiterals) const { |
7571 | 0 | auto Test = [](const AMDGPUOperand& Op) { |
7572 | 0 | return Op.IsImmKindLiteral() || Op.isExpr(); |
7573 | 0 | }; |
7574 | 0 | SMLoc Loc = getOperandLoc(Test, Operands); |
7575 | 0 | if (SearchMandatoryLiterals && Loc == getInstLoc(Operands)) |
7576 | 0 | Loc = getMandatoryLitLoc(Operands); |
7577 | 0 | return Loc; |
7578 | 0 | } |
7579 | | |
7580 | 0 | SMLoc AMDGPUAsmParser::getMandatoryLitLoc(const OperandVector &Operands) const { |
7581 | 0 | auto Test = [](const AMDGPUOperand &Op) { |
7582 | 0 | return Op.IsImmKindMandatoryLiteral(); |
7583 | 0 | }; |
7584 | 0 | return getOperandLoc(Test, Operands); |
7585 | 0 | } |
7586 | | |
7587 | | SMLoc |
7588 | 0 | AMDGPUAsmParser::getConstLoc(const OperandVector &Operands) const { |
7589 | 0 | auto Test = [](const AMDGPUOperand& Op) { |
7590 | 0 | return Op.isImmKindConst(); |
7591 | 0 | }; |
7592 | 0 | return getOperandLoc(Test, Operands); |
7593 | 0 | } |
7594 | | |
7595 | | //===----------------------------------------------------------------------===// |
7596 | | // swizzle |
7597 | | //===----------------------------------------------------------------------===// |
7598 | | |
7599 | | LLVM_READNONE |
7600 | | static unsigned |
7601 | | encodeBitmaskPerm(const unsigned AndMask, |
7602 | | const unsigned OrMask, |
7603 | 0 | const unsigned XorMask) { |
7604 | 0 | using namespace llvm::AMDGPU::Swizzle; |
7605 | |
|
7606 | 0 | return BITMASK_PERM_ENC | |
7607 | 0 | (AndMask << BITMASK_AND_SHIFT) | |
7608 | 0 | (OrMask << BITMASK_OR_SHIFT) | |
7609 | 0 | (XorMask << BITMASK_XOR_SHIFT); |
7610 | 0 | } |
7611 | | |
7612 | | bool |
7613 | | AMDGPUAsmParser::parseSwizzleOperand(int64_t &Op, |
7614 | | const unsigned MinVal, |
7615 | | const unsigned MaxVal, |
7616 | | const StringRef ErrMsg, |
7617 | 0 | SMLoc &Loc) { |
7618 | 0 | if (!skipToken(AsmToken::Comma, "expected a comma")) { |
7619 | 0 | return false; |
7620 | 0 | } |
7621 | 0 | Loc = getLoc(); |
7622 | 0 | if (!parseExpr(Op)) { |
7623 | 0 | return false; |
7624 | 0 | } |
7625 | 0 | if (Op < MinVal || Op > MaxVal) { |
7626 | 0 | Error(Loc, ErrMsg); |
7627 | 0 | return false; |
7628 | 0 | } |
7629 | | |
7630 | 0 | return true; |
7631 | 0 | } |
7632 | | |
7633 | | bool |
7634 | | AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op, |
7635 | | const unsigned MinVal, |
7636 | | const unsigned MaxVal, |
7637 | 0 | const StringRef ErrMsg) { |
7638 | 0 | SMLoc Loc; |
7639 | 0 | for (unsigned i = 0; i < OpNum; ++i) { |
7640 | 0 | if (!parseSwizzleOperand(Op[i], MinVal, MaxVal, ErrMsg, Loc)) |
7641 | 0 | return false; |
7642 | 0 | } |
7643 | | |
7644 | 0 | return true; |
7645 | 0 | } |
7646 | | |
7647 | | bool |
7648 | 0 | AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) { |
7649 | 0 | using namespace llvm::AMDGPU::Swizzle; |
7650 | |
|
7651 | 0 | int64_t Lane[LANE_NUM]; |
7652 | 0 | if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX, |
7653 | 0 | "expected a 2-bit lane id")) { |
7654 | 0 | Imm = QUAD_PERM_ENC; |
7655 | 0 | for (unsigned I = 0; I < LANE_NUM; ++I) { |
7656 | 0 | Imm |= Lane[I] << (LANE_SHIFT * I); |
7657 | 0 | } |
7658 | 0 | return true; |
7659 | 0 | } |
7660 | 0 | return false; |
7661 | 0 | } |
7662 | | |
7663 | | bool |
7664 | 0 | AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) { |
7665 | 0 | using namespace llvm::AMDGPU::Swizzle; |
7666 | |
|
7667 | 0 | SMLoc Loc; |
7668 | 0 | int64_t GroupSize; |
7669 | 0 | int64_t LaneIdx; |
7670 | |
|
7671 | 0 | if (!parseSwizzleOperand(GroupSize, |
7672 | 0 | 2, 32, |
7673 | 0 | "group size must be in the interval [2,32]", |
7674 | 0 | Loc)) { |
7675 | 0 | return false; |
7676 | 0 | } |
7677 | 0 | if (!isPowerOf2_64(GroupSize)) { |
7678 | 0 | Error(Loc, "group size must be a power of two"); |
7679 | 0 | return false; |
7680 | 0 | } |
7681 | 0 | if (parseSwizzleOperand(LaneIdx, |
7682 | 0 | 0, GroupSize - 1, |
7683 | 0 | "lane id must be in the interval [0,group size - 1]", |
7684 | 0 | Loc)) { |
7685 | 0 | Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0); |
7686 | 0 | return true; |
7687 | 0 | } |
7688 | 0 | return false; |
7689 | 0 | } |
7690 | | |
7691 | | bool |
7692 | 0 | AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) { |
7693 | 0 | using namespace llvm::AMDGPU::Swizzle; |
7694 | |
|
7695 | 0 | SMLoc Loc; |
7696 | 0 | int64_t GroupSize; |
7697 | |
|
7698 | 0 | if (!parseSwizzleOperand(GroupSize, |
7699 | 0 | 2, 32, |
7700 | 0 | "group size must be in the interval [2,32]", |
7701 | 0 | Loc)) { |
7702 | 0 | return false; |
7703 | 0 | } |
7704 | 0 | if (!isPowerOf2_64(GroupSize)) { |
7705 | 0 | Error(Loc, "group size must be a power of two"); |
7706 | 0 | return false; |
7707 | 0 | } |
7708 | | |
7709 | 0 | Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1); |
7710 | 0 | return true; |
7711 | 0 | } |
7712 | | |
7713 | | bool |
7714 | 0 | AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) { |
7715 | 0 | using namespace llvm::AMDGPU::Swizzle; |
7716 | |
|
7717 | 0 | SMLoc Loc; |
7718 | 0 | int64_t GroupSize; |
7719 | |
|
7720 | 0 | if (!parseSwizzleOperand(GroupSize, |
7721 | 0 | 1, 16, |
7722 | 0 | "group size must be in the interval [1,16]", |
7723 | 0 | Loc)) { |
7724 | 0 | return false; |
7725 | 0 | } |
7726 | 0 | if (!isPowerOf2_64(GroupSize)) { |
7727 | 0 | Error(Loc, "group size must be a power of two"); |
7728 | 0 | return false; |
7729 | 0 | } |
7730 | | |
7731 | 0 | Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize); |
7732 | 0 | return true; |
7733 | 0 | } |
7734 | | |
7735 | | bool |
7736 | 0 | AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) { |
7737 | 0 | using namespace llvm::AMDGPU::Swizzle; |
7738 | |
|
7739 | 0 | if (!skipToken(AsmToken::Comma, "expected a comma")) { |
7740 | 0 | return false; |
7741 | 0 | } |
7742 | | |
7743 | 0 | StringRef Ctl; |
7744 | 0 | SMLoc StrLoc = getLoc(); |
7745 | 0 | if (!parseString(Ctl)) { |
7746 | 0 | return false; |
7747 | 0 | } |
7748 | 0 | if (Ctl.size() != BITMASK_WIDTH) { |
7749 | 0 | Error(StrLoc, "expected a 5-character mask"); |
7750 | 0 | return false; |
7751 | 0 | } |
7752 | | |
7753 | 0 | unsigned AndMask = 0; |
7754 | 0 | unsigned OrMask = 0; |
7755 | 0 | unsigned XorMask = 0; |
7756 | |
|
7757 | 0 | for (size_t i = 0; i < Ctl.size(); ++i) { |
7758 | 0 | unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i); |
7759 | 0 | switch(Ctl[i]) { |
7760 | 0 | default: |
7761 | 0 | Error(StrLoc, "invalid mask"); |
7762 | 0 | return false; |
7763 | 0 | case '0': |
7764 | 0 | break; |
7765 | 0 | case '1': |
7766 | 0 | OrMask |= Mask; |
7767 | 0 | break; |
7768 | 0 | case 'p': |
7769 | 0 | AndMask |= Mask; |
7770 | 0 | break; |
7771 | 0 | case 'i': |
7772 | 0 | AndMask |= Mask; |
7773 | 0 | XorMask |= Mask; |
7774 | 0 | break; |
7775 | 0 | } |
7776 | 0 | } |
7777 | | |
7778 | 0 | Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask); |
7779 | 0 | return true; |
7780 | 0 | } |
7781 | | |
7782 | | bool |
7783 | 0 | AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) { |
7784 | |
|
7785 | 0 | SMLoc OffsetLoc = getLoc(); |
7786 | |
|
7787 | 0 | if (!parseExpr(Imm, "a swizzle macro")) { |
7788 | 0 | return false; |
7789 | 0 | } |
7790 | 0 | if (!isUInt<16>(Imm)) { |
7791 | 0 | Error(OffsetLoc, "expected a 16-bit offset"); |
7792 | 0 | return false; |
7793 | 0 | } |
7794 | 0 | return true; |
7795 | 0 | } |
7796 | | |
7797 | | bool |
7798 | 0 | AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) { |
7799 | 0 | using namespace llvm::AMDGPU::Swizzle; |
7800 | |
|
7801 | 0 | if (skipToken(AsmToken::LParen, "expected a left parentheses")) { |
7802 | |
|
7803 | 0 | SMLoc ModeLoc = getLoc(); |
7804 | 0 | bool Ok = false; |
7805 | |
|
7806 | 0 | if (trySkipId(IdSymbolic[ID_QUAD_PERM])) { |
7807 | 0 | Ok = parseSwizzleQuadPerm(Imm); |
7808 | 0 | } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) { |
7809 | 0 | Ok = parseSwizzleBitmaskPerm(Imm); |
7810 | 0 | } else if (trySkipId(IdSymbolic[ID_BROADCAST])) { |
7811 | 0 | Ok = parseSwizzleBroadcast(Imm); |
7812 | 0 | } else if (trySkipId(IdSymbolic[ID_SWAP])) { |
7813 | 0 | Ok = parseSwizzleSwap(Imm); |
7814 | 0 | } else if (trySkipId(IdSymbolic[ID_REVERSE])) { |
7815 | 0 | Ok = parseSwizzleReverse(Imm); |
7816 | 0 | } else { |
7817 | 0 | Error(ModeLoc, "expected a swizzle mode"); |
7818 | 0 | } |
7819 | |
|
7820 | 0 | return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses"); |
7821 | 0 | } |
7822 | | |
7823 | 0 | return false; |
7824 | 0 | } |
7825 | | |
7826 | 0 | ParseStatus AMDGPUAsmParser::parseSwizzle(OperandVector &Operands) { |
7827 | 0 | SMLoc S = getLoc(); |
7828 | 0 | int64_t Imm = 0; |
7829 | |
|
7830 | 0 | if (trySkipId("offset")) { |
7831 | |
|
7832 | 0 | bool Ok = false; |
7833 | 0 | if (skipToken(AsmToken::Colon, "expected a colon")) { |
7834 | 0 | if (trySkipId("swizzle")) { |
7835 | 0 | Ok = parseSwizzleMacro(Imm); |
7836 | 0 | } else { |
7837 | 0 | Ok = parseSwizzleOffset(Imm); |
7838 | 0 | } |
7839 | 0 | } |
7840 | |
|
7841 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle)); |
7842 | |
|
7843 | 0 | return Ok ? ParseStatus::Success : ParseStatus::Failure; |
7844 | 0 | } |
7845 | 0 | return ParseStatus::NoMatch; |
7846 | 0 | } |
7847 | | |
7848 | | bool |
7849 | 0 | AMDGPUOperand::isSwizzle() const { |
7850 | 0 | return isImmTy(ImmTySwizzle); |
7851 | 0 | } |
7852 | | |
7853 | | //===----------------------------------------------------------------------===// |
7854 | | // VGPR Index Mode |
7855 | | //===----------------------------------------------------------------------===// |
7856 | | |
7857 | 0 | int64_t AMDGPUAsmParser::parseGPRIdxMacro() { |
7858 | |
|
7859 | 0 | using namespace llvm::AMDGPU::VGPRIndexMode; |
7860 | |
|
7861 | 0 | if (trySkipToken(AsmToken::RParen)) { |
7862 | 0 | return OFF; |
7863 | 0 | } |
7864 | | |
7865 | 0 | int64_t Imm = 0; |
7866 | |
|
7867 | 0 | while (true) { |
7868 | 0 | unsigned Mode = 0; |
7869 | 0 | SMLoc S = getLoc(); |
7870 | |
|
7871 | 0 | for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) { |
7872 | 0 | if (trySkipId(IdSymbolic[ModeId])) { |
7873 | 0 | Mode = 1 << ModeId; |
7874 | 0 | break; |
7875 | 0 | } |
7876 | 0 | } |
7877 | |
|
7878 | 0 | if (Mode == 0) { |
7879 | 0 | Error(S, (Imm == 0)? |
7880 | 0 | "expected a VGPR index mode or a closing parenthesis" : |
7881 | 0 | "expected a VGPR index mode"); |
7882 | 0 | return UNDEF; |
7883 | 0 | } |
7884 | | |
7885 | 0 | if (Imm & Mode) { |
7886 | 0 | Error(S, "duplicate VGPR index mode"); |
7887 | 0 | return UNDEF; |
7888 | 0 | } |
7889 | 0 | Imm |= Mode; |
7890 | |
|
7891 | 0 | if (trySkipToken(AsmToken::RParen)) |
7892 | 0 | break; |
7893 | 0 | if (!skipToken(AsmToken::Comma, |
7894 | 0 | "expected a comma or a closing parenthesis")) |
7895 | 0 | return UNDEF; |
7896 | 0 | } |
7897 | | |
7898 | 0 | return Imm; |
7899 | 0 | } |
7900 | | |
7901 | 0 | ParseStatus AMDGPUAsmParser::parseGPRIdxMode(OperandVector &Operands) { |
7902 | |
|
7903 | 0 | using namespace llvm::AMDGPU::VGPRIndexMode; |
7904 | |
|
7905 | 0 | int64_t Imm = 0; |
7906 | 0 | SMLoc S = getLoc(); |
7907 | |
|
7908 | 0 | if (trySkipId("gpr_idx", AsmToken::LParen)) { |
7909 | 0 | Imm = parseGPRIdxMacro(); |
7910 | 0 | if (Imm == UNDEF) |
7911 | 0 | return ParseStatus::Failure; |
7912 | 0 | } else { |
7913 | 0 | if (getParser().parseAbsoluteExpression(Imm)) |
7914 | 0 | return ParseStatus::Failure; |
7915 | 0 | if (Imm < 0 || !isUInt<4>(Imm)) |
7916 | 0 | return Error(S, "invalid immediate: only 4-bit values are legal"); |
7917 | 0 | } |
7918 | | |
7919 | 0 | Operands.push_back( |
7920 | 0 | AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyGprIdxMode)); |
7921 | 0 | return ParseStatus::Success; |
7922 | 0 | } |
7923 | | |
7924 | 0 | bool AMDGPUOperand::isGPRIdxMode() const { |
7925 | 0 | return isImmTy(ImmTyGprIdxMode); |
7926 | 0 | } |
7927 | | |
7928 | | //===----------------------------------------------------------------------===// |
7929 | | // sopp branch targets |
7930 | | //===----------------------------------------------------------------------===// |
7931 | | |
7932 | 0 | ParseStatus AMDGPUAsmParser::parseSOPPBrTarget(OperandVector &Operands) { |
7933 | | |
7934 | | // Make sure we are not parsing something |
7935 | | // that looks like a label or an expression but is not. |
7936 | | // This will improve error messages. |
7937 | 0 | if (isRegister() || isModifier()) |
7938 | 0 | return ParseStatus::NoMatch; |
7939 | | |
7940 | 0 | if (!parseExpr(Operands)) |
7941 | 0 | return ParseStatus::Failure; |
7942 | | |
7943 | 0 | AMDGPUOperand &Opr = ((AMDGPUOperand &)*Operands[Operands.size() - 1]); |
7944 | 0 | assert(Opr.isImm() || Opr.isExpr()); |
7945 | 0 | SMLoc Loc = Opr.getStartLoc(); |
7946 | | |
7947 | | // Currently we do not support arbitrary expressions as branch targets. |
7948 | | // Only labels and absolute expressions are accepted. |
7949 | 0 | if (Opr.isExpr() && !Opr.isSymbolRefExpr()) { |
7950 | 0 | Error(Loc, "expected an absolute expression or a label"); |
7951 | 0 | } else if (Opr.isImm() && !Opr.isS16Imm()) { |
7952 | 0 | Error(Loc, "expected a 16-bit signed jump offset"); |
7953 | 0 | } |
7954 | |
|
7955 | 0 | return ParseStatus::Success; |
7956 | 0 | } |
7957 | | |
7958 | | //===----------------------------------------------------------------------===// |
7959 | | // Boolean holding registers |
7960 | | //===----------------------------------------------------------------------===// |
7961 | | |
7962 | 0 | ParseStatus AMDGPUAsmParser::parseBoolReg(OperandVector &Operands) { |
7963 | 0 | return parseReg(Operands); |
7964 | 0 | } |
7965 | | |
7966 | | //===----------------------------------------------------------------------===// |
7967 | | // mubuf |
7968 | | //===----------------------------------------------------------------------===// |
7969 | | |
7970 | | void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst, |
7971 | | const OperandVector &Operands, |
7972 | 0 | bool IsAtomic) { |
7973 | 0 | OptionalImmIndexMap OptionalIdx; |
7974 | 0 | unsigned FirstOperandIdx = 1; |
7975 | 0 | bool IsAtomicReturn = false; |
7976 | |
|
7977 | 0 | if (IsAtomic) { |
7978 | 0 | for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) { |
7979 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); |
7980 | 0 | if (!Op.isCPol()) |
7981 | 0 | continue; |
7982 | 0 | IsAtomicReturn = Op.getImm() & AMDGPU::CPol::GLC; |
7983 | 0 | break; |
7984 | 0 | } |
7985 | |
|
7986 | 0 | if (!IsAtomicReturn) { |
7987 | 0 | int NewOpc = AMDGPU::getAtomicNoRetOp(Inst.getOpcode()); |
7988 | 0 | if (NewOpc != -1) |
7989 | 0 | Inst.setOpcode(NewOpc); |
7990 | 0 | } |
7991 | |
|
7992 | 0 | IsAtomicReturn = MII.get(Inst.getOpcode()).TSFlags & |
7993 | 0 | SIInstrFlags::IsAtomicRet; |
7994 | 0 | } |
7995 | |
|
7996 | 0 | for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) { |
7997 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); |
7998 | | |
7999 | | // Add the register arguments |
8000 | 0 | if (Op.isReg()) { |
8001 | 0 | Op.addRegOperands(Inst, 1); |
8002 | | // Insert a tied src for atomic return dst. |
8003 | | // This cannot be postponed as subsequent calls to |
8004 | | // addImmOperands rely on correct number of MC operands. |
8005 | 0 | if (IsAtomicReturn && i == FirstOperandIdx) |
8006 | 0 | Op.addRegOperands(Inst, 1); |
8007 | 0 | continue; |
8008 | 0 | } |
8009 | | |
8010 | | // Handle the case where soffset is an immediate |
8011 | 0 | if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) { |
8012 | 0 | Op.addImmOperands(Inst, 1); |
8013 | 0 | continue; |
8014 | 0 | } |
8015 | | |
8016 | | // Handle tokens like 'offen' which are sometimes hard-coded into the |
8017 | | // asm string. There are no MCInst operands for these. |
8018 | 0 | if (Op.isToken()) { |
8019 | 0 | continue; |
8020 | 0 | } |
8021 | 0 | assert(Op.isImm()); |
8022 | | |
8023 | | // Handle optional arguments |
8024 | 0 | OptionalIdx[Op.getImmTy()] = i; |
8025 | 0 | } |
8026 | |
|
8027 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset); |
8028 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0); |
8029 | 0 | } |
8030 | | |
8031 | | //===----------------------------------------------------------------------===// |
8032 | | // smrd |
8033 | | //===----------------------------------------------------------------------===// |
8034 | | |
8035 | 0 | bool AMDGPUOperand::isSMRDOffset8() const { |
8036 | 0 | return isImmLiteral() && isUInt<8>(getImm()); |
8037 | 0 | } |
8038 | | |
8039 | 0 | bool AMDGPUOperand::isSMEMOffset() const { |
8040 | | // Offset range is checked later by validator. |
8041 | 0 | return isImmLiteral(); |
8042 | 0 | } |
8043 | | |
8044 | 0 | bool AMDGPUOperand::isSMRDLiteralOffset() const { |
8045 | | // 32-bit literals are only supported on CI and we only want to use them |
8046 | | // when the offset is > 8-bits. |
8047 | 0 | return isImmLiteral() && !isUInt<8>(getImm()) && isUInt<32>(getImm()); |
8048 | 0 | } |
8049 | | |
8050 | | //===----------------------------------------------------------------------===// |
8051 | | // vop3 |
8052 | | //===----------------------------------------------------------------------===// |
8053 | | |
8054 | 0 | static bool ConvertOmodMul(int64_t &Mul) { |
8055 | 0 | if (Mul != 1 && Mul != 2 && Mul != 4) |
8056 | 0 | return false; |
8057 | | |
8058 | 0 | Mul >>= 1; |
8059 | 0 | return true; |
8060 | 0 | } |
8061 | | |
8062 | 0 | static bool ConvertOmodDiv(int64_t &Div) { |
8063 | 0 | if (Div == 1) { |
8064 | 0 | Div = 0; |
8065 | 0 | return true; |
8066 | 0 | } |
8067 | | |
8068 | 0 | if (Div == 2) { |
8069 | 0 | Div = 3; |
8070 | 0 | return true; |
8071 | 0 | } |
8072 | | |
8073 | 0 | return false; |
8074 | 0 | } |
8075 | | |
8076 | | // For pre-gfx11 targets, both bound_ctrl:0 and bound_ctrl:1 are encoded as 1. |
8077 | | // This is intentional and ensures compatibility with sp3. |
8078 | | // See bug 35397 for details. |
8079 | 0 | bool AMDGPUAsmParser::convertDppBoundCtrl(int64_t &BoundCtrl) { |
8080 | 0 | if (BoundCtrl == 0 || BoundCtrl == 1) { |
8081 | 0 | if (!isGFX11Plus()) |
8082 | 0 | BoundCtrl = 1; |
8083 | 0 | return true; |
8084 | 0 | } |
8085 | 0 | return false; |
8086 | 0 | } |
8087 | | |
8088 | 0 | void AMDGPUAsmParser::onBeginOfFile() { |
8089 | 0 | if (!getParser().getStreamer().getTargetStreamer() || |
8090 | 0 | getSTI().getTargetTriple().getArch() == Triple::r600) |
8091 | 0 | return; |
8092 | | |
8093 | 0 | if (!getTargetStreamer().getTargetID()) |
8094 | 0 | getTargetStreamer().initializeTargetID(getSTI(), getSTI().getFeatureString(), |
8095 | | // TODO: Should try to check code object version from directive??? |
8096 | 0 | AMDGPU::getAmdhsaCodeObjectVersion()); |
8097 | |
|
8098 | 0 | if (isHsaAbi(getSTI())) |
8099 | 0 | getTargetStreamer().EmitDirectiveAMDGCNTarget(); |
8100 | 0 | } |
8101 | | |
8102 | 0 | ParseStatus AMDGPUAsmParser::parseOModSI(OperandVector &Operands) { |
8103 | 0 | StringRef Name = getTokenStr(); |
8104 | 0 | if (Name == "mul") { |
8105 | 0 | return parseIntWithPrefix("mul", Operands, |
8106 | 0 | AMDGPUOperand::ImmTyOModSI, ConvertOmodMul); |
8107 | 0 | } |
8108 | | |
8109 | 0 | if (Name == "div") { |
8110 | 0 | return parseIntWithPrefix("div", Operands, |
8111 | 0 | AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv); |
8112 | 0 | } |
8113 | | |
8114 | 0 | return ParseStatus::NoMatch; |
8115 | 0 | } |
8116 | | |
8117 | | // Determines which bit DST_OP_SEL occupies in the op_sel operand according to |
8118 | | // the number of src operands present, then copies that bit into src0_modifiers. |
8119 | 0 | void cvtVOP3DstOpSelOnly(MCInst &Inst) { |
8120 | 0 | int Opc = Inst.getOpcode(); |
8121 | 0 | int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel); |
8122 | 0 | if (OpSelIdx == -1) |
8123 | 0 | return; |
8124 | | |
8125 | 0 | int SrcNum; |
8126 | 0 | const int Ops[] = { AMDGPU::OpName::src0, |
8127 | 0 | AMDGPU::OpName::src1, |
8128 | 0 | AMDGPU::OpName::src2 }; |
8129 | 0 | for (SrcNum = 0; SrcNum < 3 && AMDGPU::hasNamedOperand(Opc, Ops[SrcNum]); |
8130 | 0 | ++SrcNum) |
8131 | 0 | ; |
8132 | 0 | assert(SrcNum > 0); |
8133 | | |
8134 | 0 | unsigned OpSel = Inst.getOperand(OpSelIdx).getImm(); |
8135 | |
|
8136 | 0 | if ((OpSel & (1 << SrcNum)) != 0) { |
8137 | 0 | int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); |
8138 | 0 | uint32_t ModVal = Inst.getOperand(ModIdx).getImm(); |
8139 | 0 | Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL); |
8140 | 0 | } |
8141 | 0 | } |
8142 | | |
8143 | | void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, |
8144 | 0 | const OperandVector &Operands) { |
8145 | 0 | cvtVOP3P(Inst, Operands); |
8146 | 0 | cvtVOP3DstOpSelOnly(Inst); |
8147 | 0 | } |
8148 | | |
8149 | | void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands, |
8150 | 0 | OptionalImmIndexMap &OptionalIdx) { |
8151 | 0 | cvtVOP3P(Inst, Operands, OptionalIdx); |
8152 | 0 | cvtVOP3DstOpSelOnly(Inst); |
8153 | 0 | } |
8154 | | |
8155 | 0 | static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) { |
8156 | 0 | return |
8157 | | // 1. This operand is input modifiers |
8158 | 0 | Desc.operands()[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS |
8159 | | // 2. This is not last operand |
8160 | 0 | && Desc.NumOperands > (OpNum + 1) |
8161 | | // 3. Next operand is register class |
8162 | 0 | && Desc.operands()[OpNum + 1].RegClass != -1 |
8163 | | // 4. Next register is not tied to any other operand |
8164 | 0 | && Desc.getOperandConstraint(OpNum + 1, |
8165 | 0 | MCOI::OperandConstraint::TIED_TO) == -1; |
8166 | 0 | } |
8167 | | |
8168 | | void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands) |
8169 | 0 | { |
8170 | 0 | OptionalImmIndexMap OptionalIdx; |
8171 | 0 | unsigned Opc = Inst.getOpcode(); |
8172 | |
|
8173 | 0 | unsigned I = 1; |
8174 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
8175 | 0 | for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { |
8176 | 0 | ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); |
8177 | 0 | } |
8178 | |
|
8179 | 0 | for (unsigned E = Operands.size(); I != E; ++I) { |
8180 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); |
8181 | 0 | if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { |
8182 | 0 | Op.addRegOrImmWithFPInputModsOperands(Inst, 2); |
8183 | 0 | } else if (Op.isInterpSlot() || Op.isInterpAttr() || |
8184 | 0 | Op.isInterpAttrChan()) { |
8185 | 0 | Inst.addOperand(MCOperand::createImm(Op.getImm())); |
8186 | 0 | } else if (Op.isImmModifier()) { |
8187 | 0 | OptionalIdx[Op.getImmTy()] = I; |
8188 | 0 | } else { |
8189 | 0 | llvm_unreachable("unhandled operand type"); |
8190 | 0 | } |
8191 | 0 | } |
8192 | |
|
8193 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::high)) |
8194 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
8195 | 0 | AMDGPUOperand::ImmTyHigh); |
8196 | |
|
8197 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp)) |
8198 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
8199 | 0 | AMDGPUOperand::ImmTyClampSI); |
8200 | |
|
8201 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod)) |
8202 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
8203 | 0 | AMDGPUOperand::ImmTyOModSI); |
8204 | 0 | } |
8205 | | |
8206 | | void AMDGPUAsmParser::cvtVINTERP(MCInst &Inst, const OperandVector &Operands) |
8207 | 0 | { |
8208 | 0 | OptionalImmIndexMap OptionalIdx; |
8209 | 0 | unsigned Opc = Inst.getOpcode(); |
8210 | |
|
8211 | 0 | unsigned I = 1; |
8212 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
8213 | 0 | for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { |
8214 | 0 | ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); |
8215 | 0 | } |
8216 | |
|
8217 | 0 | for (unsigned E = Operands.size(); I != E; ++I) { |
8218 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); |
8219 | 0 | if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { |
8220 | 0 | Op.addRegOrImmWithFPInputModsOperands(Inst, 2); |
8221 | 0 | } else if (Op.isImmModifier()) { |
8222 | 0 | OptionalIdx[Op.getImmTy()] = I; |
8223 | 0 | } else { |
8224 | 0 | llvm_unreachable("unhandled operand type"); |
8225 | 0 | } |
8226 | 0 | } |
8227 | |
|
8228 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI); |
8229 | |
|
8230 | 0 | int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel); |
8231 | 0 | if (OpSelIdx != -1) |
8232 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOpSel); |
8233 | |
|
8234 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyWaitEXP); |
8235 | |
|
8236 | 0 | if (OpSelIdx == -1) |
8237 | 0 | return; |
8238 | | |
8239 | 0 | const int Ops[] = { AMDGPU::OpName::src0, |
8240 | 0 | AMDGPU::OpName::src1, |
8241 | 0 | AMDGPU::OpName::src2 }; |
8242 | 0 | const int ModOps[] = { AMDGPU::OpName::src0_modifiers, |
8243 | 0 | AMDGPU::OpName::src1_modifiers, |
8244 | 0 | AMDGPU::OpName::src2_modifiers }; |
8245 | |
|
8246 | 0 | unsigned OpSel = Inst.getOperand(OpSelIdx).getImm(); |
8247 | |
|
8248 | 0 | for (int J = 0; J < 3; ++J) { |
8249 | 0 | int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]); |
8250 | 0 | if (OpIdx == -1) |
8251 | 0 | break; |
8252 | | |
8253 | 0 | int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]); |
8254 | 0 | uint32_t ModVal = Inst.getOperand(ModIdx).getImm(); |
8255 | |
|
8256 | 0 | if ((OpSel & (1 << J)) != 0) |
8257 | 0 | ModVal |= SISrcMods::OP_SEL_0; |
8258 | 0 | if (ModOps[J] == AMDGPU::OpName::src0_modifiers && |
8259 | 0 | (OpSel & (1 << 3)) != 0) |
8260 | 0 | ModVal |= SISrcMods::DST_OP_SEL; |
8261 | |
|
8262 | 0 | Inst.getOperand(ModIdx).setImm(ModVal); |
8263 | 0 | } |
8264 | 0 | } |
8265 | | |
8266 | | void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands, |
8267 | 0 | OptionalImmIndexMap &OptionalIdx) { |
8268 | 0 | unsigned Opc = Inst.getOpcode(); |
8269 | |
|
8270 | 0 | unsigned I = 1; |
8271 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
8272 | 0 | for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { |
8273 | 0 | ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); |
8274 | 0 | } |
8275 | |
|
8276 | 0 | for (unsigned E = Operands.size(); I != E; ++I) { |
8277 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); |
8278 | 0 | if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { |
8279 | 0 | Op.addRegOrImmWithFPInputModsOperands(Inst, 2); |
8280 | 0 | } else if (Op.isImmModifier()) { |
8281 | 0 | OptionalIdx[Op.getImmTy()] = I; |
8282 | 0 | } else if (Op.isRegOrImm()) { |
8283 | 0 | Op.addRegOrImmOperands(Inst, 1); |
8284 | 0 | } else { |
8285 | 0 | llvm_unreachable("unhandled operand type"); |
8286 | 0 | } |
8287 | 0 | } |
8288 | |
|
8289 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp)) |
8290 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
8291 | 0 | AMDGPUOperand::ImmTyClampSI); |
8292 | |
|
8293 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod)) |
8294 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
8295 | 0 | AMDGPUOperand::ImmTyOModSI); |
8296 | | |
8297 | | // Special case v_mac_{f16, f32} and v_fmac_{f16, f32} (gfx906/gfx10+): |
8298 | | // it has src2 register operand that is tied to dst operand |
8299 | | // we don't allow modifiers for this operand in assembler so src2_modifiers |
8300 | | // should be 0. |
8301 | 0 | if (isMAC(Opc)) { |
8302 | 0 | auto it = Inst.begin(); |
8303 | 0 | std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers)); |
8304 | 0 | it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2 |
8305 | 0 | ++it; |
8306 | | // Copy the operand to ensure it's not invalidated when Inst grows. |
8307 | 0 | Inst.insert(it, MCOperand(Inst.getOperand(0))); // src2 = dst |
8308 | 0 | } |
8309 | 0 | } |
8310 | | |
8311 | 0 | void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { |
8312 | 0 | OptionalImmIndexMap OptionalIdx; |
8313 | 0 | cvtVOP3(Inst, Operands, OptionalIdx); |
8314 | 0 | } |
8315 | | |
8316 | | void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands, |
8317 | 0 | OptionalImmIndexMap &OptIdx) { |
8318 | 0 | const int Opc = Inst.getOpcode(); |
8319 | 0 | const MCInstrDesc &Desc = MII.get(Opc); |
8320 | |
|
8321 | 0 | const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0; |
8322 | |
|
8323 | 0 | if (Opc == AMDGPU::V_CVT_SR_BF8_F32_vi || |
8324 | 0 | Opc == AMDGPU::V_CVT_SR_FP8_F32_vi) { |
8325 | 0 | Inst.addOperand(MCOperand::createImm(0)); // Placeholder for src2_mods |
8326 | 0 | Inst.addOperand(Inst.getOperand(0)); |
8327 | 0 | } |
8328 | |
|
8329 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in)) { |
8330 | 0 | assert(!IsPacked); |
8331 | 0 | Inst.addOperand(Inst.getOperand(0)); |
8332 | 0 | } |
8333 | | |
8334 | | // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3 |
8335 | | // instruction, and then figure out where to actually put the modifiers |
8336 | | |
8337 | 0 | int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel); |
8338 | 0 | if (OpSelIdx != -1) { |
8339 | 0 | addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel); |
8340 | 0 | } |
8341 | |
|
8342 | 0 | int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi); |
8343 | 0 | if (OpSelHiIdx != -1) { |
8344 | 0 | int DefaultVal = IsPacked ? -1 : 0; |
8345 | 0 | addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi, |
8346 | 0 | DefaultVal); |
8347 | 0 | } |
8348 | |
|
8349 | 0 | int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo); |
8350 | 0 | if (NegLoIdx != -1) { |
8351 | 0 | addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo); |
8352 | 0 | addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi); |
8353 | 0 | } |
8354 | |
|
8355 | 0 | const int Ops[] = { AMDGPU::OpName::src0, |
8356 | 0 | AMDGPU::OpName::src1, |
8357 | 0 | AMDGPU::OpName::src2 }; |
8358 | 0 | const int ModOps[] = { AMDGPU::OpName::src0_modifiers, |
8359 | 0 | AMDGPU::OpName::src1_modifiers, |
8360 | 0 | AMDGPU::OpName::src2_modifiers }; |
8361 | |
|
8362 | 0 | unsigned OpSel = 0; |
8363 | 0 | unsigned OpSelHi = 0; |
8364 | 0 | unsigned NegLo = 0; |
8365 | 0 | unsigned NegHi = 0; |
8366 | |
|
8367 | 0 | if (OpSelIdx != -1) |
8368 | 0 | OpSel = Inst.getOperand(OpSelIdx).getImm(); |
8369 | |
|
8370 | 0 | if (OpSelHiIdx != -1) |
8371 | 0 | OpSelHi = Inst.getOperand(OpSelHiIdx).getImm(); |
8372 | |
|
8373 | 0 | if (NegLoIdx != -1) { |
8374 | 0 | int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi); |
8375 | 0 | NegLo = Inst.getOperand(NegLoIdx).getImm(); |
8376 | 0 | NegHi = Inst.getOperand(NegHiIdx).getImm(); |
8377 | 0 | } |
8378 | |
|
8379 | 0 | for (int J = 0; J < 3; ++J) { |
8380 | 0 | int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]); |
8381 | 0 | if (OpIdx == -1) |
8382 | 0 | break; |
8383 | | |
8384 | 0 | int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]); |
8385 | |
|
8386 | 0 | if (ModIdx == -1) |
8387 | 0 | continue; |
8388 | | |
8389 | 0 | uint32_t ModVal = 0; |
8390 | |
|
8391 | 0 | if ((OpSel & (1 << J)) != 0) |
8392 | 0 | ModVal |= SISrcMods::OP_SEL_0; |
8393 | |
|
8394 | 0 | if ((OpSelHi & (1 << J)) != 0) |
8395 | 0 | ModVal |= SISrcMods::OP_SEL_1; |
8396 | |
|
8397 | 0 | if ((NegLo & (1 << J)) != 0) |
8398 | 0 | ModVal |= SISrcMods::NEG; |
8399 | |
|
8400 | 0 | if ((NegHi & (1 << J)) != 0) |
8401 | 0 | ModVal |= SISrcMods::NEG_HI; |
8402 | |
|
8403 | 0 | Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal); |
8404 | 0 | } |
8405 | 0 | } |
8406 | | |
8407 | 0 | void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) { |
8408 | 0 | OptionalImmIndexMap OptIdx; |
8409 | 0 | cvtVOP3(Inst, Operands, OptIdx); |
8410 | 0 | cvtVOP3P(Inst, Operands, OptIdx); |
8411 | 0 | } |
8412 | | |
8413 | | //===----------------------------------------------------------------------===// |
8414 | | // VOPD |
8415 | | //===----------------------------------------------------------------------===// |
8416 | | |
8417 | 0 | ParseStatus AMDGPUAsmParser::parseVOPD(OperandVector &Operands) { |
8418 | 0 | if (!hasVOPD(getSTI())) |
8419 | 0 | return ParseStatus::NoMatch; |
8420 | | |
8421 | 0 | if (isToken(AsmToken::Colon) && peekToken(false).is(AsmToken::Colon)) { |
8422 | 0 | SMLoc S = getLoc(); |
8423 | 0 | lex(); |
8424 | 0 | lex(); |
8425 | 0 | Operands.push_back(AMDGPUOperand::CreateToken(this, "::", S)); |
8426 | 0 | SMLoc OpYLoc = getLoc(); |
8427 | 0 | StringRef OpYName; |
8428 | 0 | if (isToken(AsmToken::Identifier) && !Parser.parseIdentifier(OpYName)) { |
8429 | 0 | Operands.push_back(AMDGPUOperand::CreateToken(this, OpYName, OpYLoc)); |
8430 | 0 | return ParseStatus::Success; |
8431 | 0 | } |
8432 | 0 | return Error(OpYLoc, "expected a VOPDY instruction after ::"); |
8433 | 0 | } |
8434 | 0 | return ParseStatus::NoMatch; |
8435 | 0 | } |
8436 | | |
8437 | | // Create VOPD MCInst operands using parsed assembler operands. |
8438 | 0 | void AMDGPUAsmParser::cvtVOPD(MCInst &Inst, const OperandVector &Operands) { |
8439 | 0 | auto addOp = [&](uint16_t ParsedOprIdx) { // NOLINT:function pointer |
8440 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ParsedOprIdx]); |
8441 | 0 | if (Op.isReg()) { |
8442 | 0 | Op.addRegOperands(Inst, 1); |
8443 | 0 | return; |
8444 | 0 | } |
8445 | 0 | if (Op.isImm()) { |
8446 | 0 | Op.addImmOperands(Inst, 1); |
8447 | 0 | return; |
8448 | 0 | } |
8449 | 0 | llvm_unreachable("Unhandled operand type in cvtVOPD"); |
8450 | 0 | }; |
8451 | |
|
8452 | 0 | const auto &InstInfo = getVOPDInstInfo(Inst.getOpcode(), &MII); |
8453 | | |
8454 | | // MCInst operands are ordered as follows: |
8455 | | // dstX, dstY, src0X [, other OpX operands], src0Y [, other OpY operands] |
8456 | |
|
8457 | 0 | for (auto CompIdx : VOPD::COMPONENTS) { |
8458 | 0 | addOp(InstInfo[CompIdx].getIndexOfDstInParsedOperands()); |
8459 | 0 | } |
8460 | |
|
8461 | 0 | for (auto CompIdx : VOPD::COMPONENTS) { |
8462 | 0 | const auto &CInfo = InstInfo[CompIdx]; |
8463 | 0 | auto CompSrcOperandsNum = InstInfo[CompIdx].getCompParsedSrcOperandsNum(); |
8464 | 0 | for (unsigned CompSrcIdx = 0; CompSrcIdx < CompSrcOperandsNum; ++CompSrcIdx) |
8465 | 0 | addOp(CInfo.getIndexOfSrcInParsedOperands(CompSrcIdx)); |
8466 | 0 | if (CInfo.hasSrc2Acc()) |
8467 | 0 | addOp(CInfo.getIndexOfDstInParsedOperands()); |
8468 | 0 | } |
8469 | 0 | } |
8470 | | |
8471 | | //===----------------------------------------------------------------------===// |
8472 | | // dpp |
8473 | | //===----------------------------------------------------------------------===// |
8474 | | |
8475 | 0 | bool AMDGPUOperand::isDPP8() const { |
8476 | 0 | return isImmTy(ImmTyDPP8); |
8477 | 0 | } |
8478 | | |
8479 | 0 | bool AMDGPUOperand::isDPPCtrl() const { |
8480 | 0 | using namespace AMDGPU::DPP; |
8481 | |
|
8482 | 0 | bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm()); |
8483 | 0 | if (result) { |
8484 | 0 | int64_t Imm = getImm(); |
8485 | 0 | return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) || |
8486 | 0 | (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) || |
8487 | 0 | (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) || |
8488 | 0 | (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) || |
8489 | 0 | (Imm == DppCtrl::WAVE_SHL1) || |
8490 | 0 | (Imm == DppCtrl::WAVE_ROL1) || |
8491 | 0 | (Imm == DppCtrl::WAVE_SHR1) || |
8492 | 0 | (Imm == DppCtrl::WAVE_ROR1) || |
8493 | 0 | (Imm == DppCtrl::ROW_MIRROR) || |
8494 | 0 | (Imm == DppCtrl::ROW_HALF_MIRROR) || |
8495 | 0 | (Imm == DppCtrl::BCAST15) || |
8496 | 0 | (Imm == DppCtrl::BCAST31) || |
8497 | 0 | (Imm >= DppCtrl::ROW_SHARE_FIRST && Imm <= DppCtrl::ROW_SHARE_LAST) || |
8498 | 0 | (Imm >= DppCtrl::ROW_XMASK_FIRST && Imm <= DppCtrl::ROW_XMASK_LAST); |
8499 | 0 | } |
8500 | 0 | return false; |
8501 | 0 | } |
8502 | | |
8503 | | //===----------------------------------------------------------------------===// |
8504 | | // mAI |
8505 | | //===----------------------------------------------------------------------===// |
8506 | | |
8507 | 0 | bool AMDGPUOperand::isBLGP() const { |
8508 | 0 | return isImm() && getImmTy() == ImmTyBLGP && isUInt<3>(getImm()); |
8509 | 0 | } |
8510 | | |
8511 | 0 | bool AMDGPUOperand::isCBSZ() const { |
8512 | 0 | return isImm() && getImmTy() == ImmTyCBSZ && isUInt<3>(getImm()); |
8513 | 0 | } |
8514 | | |
8515 | 0 | bool AMDGPUOperand::isABID() const { |
8516 | 0 | return isImm() && getImmTy() == ImmTyABID && isUInt<4>(getImm()); |
8517 | 0 | } |
8518 | | |
8519 | 0 | bool AMDGPUOperand::isS16Imm() const { |
8520 | 0 | return isImmLiteral() && (isInt<16>(getImm()) || isUInt<16>(getImm())); |
8521 | 0 | } |
8522 | | |
8523 | 0 | bool AMDGPUOperand::isU16Imm() const { |
8524 | 0 | return isImmLiteral() && isUInt<16>(getImm()); |
8525 | 0 | } |
8526 | | |
8527 | | //===----------------------------------------------------------------------===// |
8528 | | // dim |
8529 | | //===----------------------------------------------------------------------===// |
8530 | | |
8531 | 0 | bool AMDGPUAsmParser::parseDimId(unsigned &Encoding) { |
8532 | | // We want to allow "dim:1D" etc., |
8533 | | // but the initial 1 is tokenized as an integer. |
8534 | 0 | std::string Token; |
8535 | 0 | if (isToken(AsmToken::Integer)) { |
8536 | 0 | SMLoc Loc = getToken().getEndLoc(); |
8537 | 0 | Token = std::string(getTokenStr()); |
8538 | 0 | lex(); |
8539 | 0 | if (getLoc() != Loc) |
8540 | 0 | return false; |
8541 | 0 | } |
8542 | | |
8543 | 0 | StringRef Suffix; |
8544 | 0 | if (!parseId(Suffix)) |
8545 | 0 | return false; |
8546 | 0 | Token += Suffix; |
8547 | |
|
8548 | 0 | StringRef DimId = Token; |
8549 | 0 | if (DimId.starts_with("SQ_RSRC_IMG_")) |
8550 | 0 | DimId = DimId.drop_front(12); |
8551 | |
|
8552 | 0 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByAsmSuffix(DimId); |
8553 | 0 | if (!DimInfo) |
8554 | 0 | return false; |
8555 | | |
8556 | 0 | Encoding = DimInfo->Encoding; |
8557 | 0 | return true; |
8558 | 0 | } |
8559 | | |
8560 | 0 | ParseStatus AMDGPUAsmParser::parseDim(OperandVector &Operands) { |
8561 | 0 | if (!isGFX10Plus()) |
8562 | 0 | return ParseStatus::NoMatch; |
8563 | | |
8564 | 0 | SMLoc S = getLoc(); |
8565 | |
|
8566 | 0 | if (!trySkipId("dim", AsmToken::Colon)) |
8567 | 0 | return ParseStatus::NoMatch; |
8568 | | |
8569 | 0 | unsigned Encoding; |
8570 | 0 | SMLoc Loc = getLoc(); |
8571 | 0 | if (!parseDimId(Encoding)) |
8572 | 0 | return Error(Loc, "invalid dim value"); |
8573 | | |
8574 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Encoding, S, |
8575 | 0 | AMDGPUOperand::ImmTyDim)); |
8576 | 0 | return ParseStatus::Success; |
8577 | 0 | } |
8578 | | |
8579 | | //===----------------------------------------------------------------------===// |
8580 | | // dpp |
8581 | | //===----------------------------------------------------------------------===// |
8582 | | |
8583 | 0 | ParseStatus AMDGPUAsmParser::parseDPP8(OperandVector &Operands) { |
8584 | 0 | SMLoc S = getLoc(); |
8585 | |
|
8586 | 0 | if (!isGFX10Plus() || !trySkipId("dpp8", AsmToken::Colon)) |
8587 | 0 | return ParseStatus::NoMatch; |
8588 | | |
8589 | | // dpp8:[%d,%d,%d,%d,%d,%d,%d,%d] |
8590 | | |
8591 | 0 | int64_t Sels[8]; |
8592 | |
|
8593 | 0 | if (!skipToken(AsmToken::LBrac, "expected an opening square bracket")) |
8594 | 0 | return ParseStatus::Failure; |
8595 | | |
8596 | 0 | for (size_t i = 0; i < 8; ++i) { |
8597 | 0 | if (i > 0 && !skipToken(AsmToken::Comma, "expected a comma")) |
8598 | 0 | return ParseStatus::Failure; |
8599 | | |
8600 | 0 | SMLoc Loc = getLoc(); |
8601 | 0 | if (getParser().parseAbsoluteExpression(Sels[i])) |
8602 | 0 | return ParseStatus::Failure; |
8603 | 0 | if (0 > Sels[i] || 7 < Sels[i]) |
8604 | 0 | return Error(Loc, "expected a 3-bit value"); |
8605 | 0 | } |
8606 | | |
8607 | 0 | if (!skipToken(AsmToken::RBrac, "expected a closing square bracket")) |
8608 | 0 | return ParseStatus::Failure; |
8609 | | |
8610 | 0 | unsigned DPP8 = 0; |
8611 | 0 | for (size_t i = 0; i < 8; ++i) |
8612 | 0 | DPP8 |= (Sels[i] << (i * 3)); |
8613 | |
|
8614 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, DPP8, S, AMDGPUOperand::ImmTyDPP8)); |
8615 | 0 | return ParseStatus::Success; |
8616 | 0 | } |
8617 | | |
8618 | | bool |
8619 | | AMDGPUAsmParser::isSupportedDPPCtrl(StringRef Ctrl, |
8620 | 0 | const OperandVector &Operands) { |
8621 | 0 | if (Ctrl == "row_newbcast") |
8622 | 0 | return isGFX90A(); |
8623 | | |
8624 | 0 | if (Ctrl == "row_share" || |
8625 | 0 | Ctrl == "row_xmask") |
8626 | 0 | return isGFX10Plus(); |
8627 | | |
8628 | 0 | if (Ctrl == "wave_shl" || |
8629 | 0 | Ctrl == "wave_shr" || |
8630 | 0 | Ctrl == "wave_rol" || |
8631 | 0 | Ctrl == "wave_ror" || |
8632 | 0 | Ctrl == "row_bcast") |
8633 | 0 | return isVI() || isGFX9(); |
8634 | | |
8635 | 0 | return Ctrl == "row_mirror" || |
8636 | 0 | Ctrl == "row_half_mirror" || |
8637 | 0 | Ctrl == "quad_perm" || |
8638 | 0 | Ctrl == "row_shl" || |
8639 | 0 | Ctrl == "row_shr" || |
8640 | 0 | Ctrl == "row_ror"; |
8641 | 0 | } |
8642 | | |
8643 | | int64_t |
8644 | 0 | AMDGPUAsmParser::parseDPPCtrlPerm() { |
8645 | | // quad_perm:[%d,%d,%d,%d] |
8646 | |
|
8647 | 0 | if (!skipToken(AsmToken::LBrac, "expected an opening square bracket")) |
8648 | 0 | return -1; |
8649 | | |
8650 | 0 | int64_t Val = 0; |
8651 | 0 | for (int i = 0; i < 4; ++i) { |
8652 | 0 | if (i > 0 && !skipToken(AsmToken::Comma, "expected a comma")) |
8653 | 0 | return -1; |
8654 | | |
8655 | 0 | int64_t Temp; |
8656 | 0 | SMLoc Loc = getLoc(); |
8657 | 0 | if (getParser().parseAbsoluteExpression(Temp)) |
8658 | 0 | return -1; |
8659 | 0 | if (Temp < 0 || Temp > 3) { |
8660 | 0 | Error(Loc, "expected a 2-bit value"); |
8661 | 0 | return -1; |
8662 | 0 | } |
8663 | | |
8664 | 0 | Val += (Temp << i * 2); |
8665 | 0 | } |
8666 | | |
8667 | 0 | if (!skipToken(AsmToken::RBrac, "expected a closing square bracket")) |
8668 | 0 | return -1; |
8669 | | |
8670 | 0 | return Val; |
8671 | 0 | } |
8672 | | |
8673 | | int64_t |
8674 | 0 | AMDGPUAsmParser::parseDPPCtrlSel(StringRef Ctrl) { |
8675 | 0 | using namespace AMDGPU::DPP; |
8676 | | |
8677 | | // sel:%d |
8678 | |
|
8679 | 0 | int64_t Val; |
8680 | 0 | SMLoc Loc = getLoc(); |
8681 | |
|
8682 | 0 | if (getParser().parseAbsoluteExpression(Val)) |
8683 | 0 | return -1; |
8684 | | |
8685 | 0 | struct DppCtrlCheck { |
8686 | 0 | int64_t Ctrl; |
8687 | 0 | int Lo; |
8688 | 0 | int Hi; |
8689 | 0 | }; |
8690 | |
|
8691 | 0 | DppCtrlCheck Check = StringSwitch<DppCtrlCheck>(Ctrl) |
8692 | 0 | .Case("wave_shl", {DppCtrl::WAVE_SHL1, 1, 1}) |
8693 | 0 | .Case("wave_rol", {DppCtrl::WAVE_ROL1, 1, 1}) |
8694 | 0 | .Case("wave_shr", {DppCtrl::WAVE_SHR1, 1, 1}) |
8695 | 0 | .Case("wave_ror", {DppCtrl::WAVE_ROR1, 1, 1}) |
8696 | 0 | .Case("row_shl", {DppCtrl::ROW_SHL0, 1, 15}) |
8697 | 0 | .Case("row_shr", {DppCtrl::ROW_SHR0, 1, 15}) |
8698 | 0 | .Case("row_ror", {DppCtrl::ROW_ROR0, 1, 15}) |
8699 | 0 | .Case("row_share", {DppCtrl::ROW_SHARE_FIRST, 0, 15}) |
8700 | 0 | .Case("row_xmask", {DppCtrl::ROW_XMASK_FIRST, 0, 15}) |
8701 | 0 | .Case("row_newbcast", {DppCtrl::ROW_NEWBCAST_FIRST, 0, 15}) |
8702 | 0 | .Default({-1, 0, 0}); |
8703 | |
|
8704 | 0 | bool Valid; |
8705 | 0 | if (Check.Ctrl == -1) { |
8706 | 0 | Valid = (Ctrl == "row_bcast" && (Val == 15 || Val == 31)); |
8707 | 0 | Val = (Val == 15)? DppCtrl::BCAST15 : DppCtrl::BCAST31; |
8708 | 0 | } else { |
8709 | 0 | Valid = Check.Lo <= Val && Val <= Check.Hi; |
8710 | 0 | Val = (Check.Lo == Check.Hi) ? Check.Ctrl : (Check.Ctrl | Val); |
8711 | 0 | } |
8712 | |
|
8713 | 0 | if (!Valid) { |
8714 | 0 | Error(Loc, Twine("invalid ", Ctrl) + Twine(" value")); |
8715 | 0 | return -1; |
8716 | 0 | } |
8717 | | |
8718 | 0 | return Val; |
8719 | 0 | } |
8720 | | |
8721 | 0 | ParseStatus AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) { |
8722 | 0 | using namespace AMDGPU::DPP; |
8723 | |
|
8724 | 0 | if (!isToken(AsmToken::Identifier) || |
8725 | 0 | !isSupportedDPPCtrl(getTokenStr(), Operands)) |
8726 | 0 | return ParseStatus::NoMatch; |
8727 | | |
8728 | 0 | SMLoc S = getLoc(); |
8729 | 0 | int64_t Val = -1; |
8730 | 0 | StringRef Ctrl; |
8731 | |
|
8732 | 0 | parseId(Ctrl); |
8733 | |
|
8734 | 0 | if (Ctrl == "row_mirror") { |
8735 | 0 | Val = DppCtrl::ROW_MIRROR; |
8736 | 0 | } else if (Ctrl == "row_half_mirror") { |
8737 | 0 | Val = DppCtrl::ROW_HALF_MIRROR; |
8738 | 0 | } else { |
8739 | 0 | if (skipToken(AsmToken::Colon, "expected a colon")) { |
8740 | 0 | if (Ctrl == "quad_perm") { |
8741 | 0 | Val = parseDPPCtrlPerm(); |
8742 | 0 | } else { |
8743 | 0 | Val = parseDPPCtrlSel(Ctrl); |
8744 | 0 | } |
8745 | 0 | } |
8746 | 0 | } |
8747 | |
|
8748 | 0 | if (Val == -1) |
8749 | 0 | return ParseStatus::Failure; |
8750 | | |
8751 | 0 | Operands.push_back( |
8752 | 0 | AMDGPUOperand::CreateImm(this, Val, S, AMDGPUOperand::ImmTyDppCtrl)); |
8753 | 0 | return ParseStatus::Success; |
8754 | 0 | } |
8755 | | |
8756 | | void AMDGPUAsmParser::cvtVOP3DPP(MCInst &Inst, const OperandVector &Operands, |
8757 | 0 | bool IsDPP8) { |
8758 | 0 | OptionalImmIndexMap OptionalIdx; |
8759 | 0 | unsigned Opc = Inst.getOpcode(); |
8760 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
8761 | | |
8762 | | // MAC instructions are special because they have 'old' |
8763 | | // operand which is not tied to dst (but assumed to be). |
8764 | | // They also have dummy unused src2_modifiers. |
8765 | 0 | int OldIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::old); |
8766 | 0 | int Src2ModIdx = |
8767 | 0 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers); |
8768 | 0 | bool IsMAC = OldIdx != -1 && Src2ModIdx != -1 && |
8769 | 0 | Desc.getOperandConstraint(OldIdx, MCOI::TIED_TO) == -1; |
8770 | |
|
8771 | 0 | unsigned I = 1; |
8772 | 0 | for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { |
8773 | 0 | ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); |
8774 | 0 | } |
8775 | |
|
8776 | 0 | int Fi = 0; |
8777 | 0 | for (unsigned E = Operands.size(); I != E; ++I) { |
8778 | |
|
8779 | 0 | if (IsMAC) { |
8780 | 0 | int NumOperands = Inst.getNumOperands(); |
8781 | 0 | if (OldIdx == NumOperands) { |
8782 | | // Handle old operand |
8783 | 0 | constexpr int DST_IDX = 0; |
8784 | 0 | Inst.addOperand(Inst.getOperand(DST_IDX)); |
8785 | 0 | } else if (Src2ModIdx == NumOperands) { |
8786 | | // Add unused dummy src2_modifiers |
8787 | 0 | Inst.addOperand(MCOperand::createImm(0)); |
8788 | 0 | } |
8789 | 0 | } |
8790 | |
|
8791 | 0 | auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(), |
8792 | 0 | MCOI::TIED_TO); |
8793 | 0 | if (TiedTo != -1) { |
8794 | 0 | assert((unsigned)TiedTo < Inst.getNumOperands()); |
8795 | | // handle tied old or src2 for MAC instructions |
8796 | 0 | Inst.addOperand(Inst.getOperand(TiedTo)); |
8797 | 0 | } |
8798 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); |
8799 | | // Add the register arguments |
8800 | 0 | if (IsDPP8 && Op.isDppFI()) { |
8801 | 0 | Fi = Op.getImm(); |
8802 | 0 | } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { |
8803 | 0 | Op.addRegOrImmWithFPInputModsOperands(Inst, 2); |
8804 | 0 | } else if (Op.isReg()) { |
8805 | 0 | Op.addRegOperands(Inst, 1); |
8806 | 0 | } else if (Op.isImm() && |
8807 | 0 | Desc.operands()[Inst.getNumOperands()].RegClass != -1) { |
8808 | 0 | assert(!Op.IsImmKindLiteral() && "Cannot use literal with DPP"); |
8809 | 0 | Op.addImmOperands(Inst, 1); |
8810 | 0 | } else if (Op.isImm()) { |
8811 | 0 | OptionalIdx[Op.getImmTy()] = I; |
8812 | 0 | } else { |
8813 | 0 | llvm_unreachable("unhandled operand type"); |
8814 | 0 | } |
8815 | 0 | } |
8816 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp)) |
8817 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI); |
8818 | |
|
8819 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod)) |
8820 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI); |
8821 | |
|
8822 | 0 | if (Desc.TSFlags & SIInstrFlags::VOP3P) |
8823 | 0 | cvtVOP3P(Inst, Operands, OptionalIdx); |
8824 | 0 | else if (Desc.TSFlags & SIInstrFlags::VOP3) |
8825 | 0 | cvtVOP3OpSel(Inst, Operands, OptionalIdx); |
8826 | 0 | else if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) { |
8827 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOpSel); |
8828 | 0 | } |
8829 | |
|
8830 | 0 | if (IsDPP8) { |
8831 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDPP8); |
8832 | 0 | using namespace llvm::AMDGPU::DPP; |
8833 | 0 | Inst.addOperand(MCOperand::createImm(Fi? DPP8_FI_1 : DPP8_FI_0)); |
8834 | 0 | } else { |
8835 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppCtrl, 0xe4); |
8836 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf); |
8837 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf); |
8838 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl); |
8839 | |
|
8840 | 0 | if (AMDGPU::hasNamedOperand(Inst.getOpcode(), AMDGPU::OpName::fi)) |
8841 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
8842 | 0 | AMDGPUOperand::ImmTyDppFI); |
8843 | 0 | } |
8844 | 0 | } |
8845 | | |
8846 | 0 | void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands, bool IsDPP8) { |
8847 | 0 | OptionalImmIndexMap OptionalIdx; |
8848 | |
|
8849 | 0 | unsigned I = 1; |
8850 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
8851 | 0 | for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { |
8852 | 0 | ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); |
8853 | 0 | } |
8854 | |
|
8855 | 0 | int Fi = 0; |
8856 | 0 | for (unsigned E = Operands.size(); I != E; ++I) { |
8857 | 0 | auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(), |
8858 | 0 | MCOI::TIED_TO); |
8859 | 0 | if (TiedTo != -1) { |
8860 | 0 | assert((unsigned)TiedTo < Inst.getNumOperands()); |
8861 | | // handle tied old or src2 for MAC instructions |
8862 | 0 | Inst.addOperand(Inst.getOperand(TiedTo)); |
8863 | 0 | } |
8864 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); |
8865 | | // Add the register arguments |
8866 | 0 | if (Op.isReg() && validateVccOperand(Op.getReg())) { |
8867 | | // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token. |
8868 | | // Skip it. |
8869 | 0 | continue; |
8870 | 0 | } |
8871 | | |
8872 | 0 | if (IsDPP8) { |
8873 | 0 | if (Op.isDPP8()) { |
8874 | 0 | Op.addImmOperands(Inst, 1); |
8875 | 0 | } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { |
8876 | 0 | Op.addRegWithFPInputModsOperands(Inst, 2); |
8877 | 0 | } else if (Op.isDppFI()) { |
8878 | 0 | Fi = Op.getImm(); |
8879 | 0 | } else if (Op.isReg()) { |
8880 | 0 | Op.addRegOperands(Inst, 1); |
8881 | 0 | } else { |
8882 | 0 | llvm_unreachable("Invalid operand type"); |
8883 | 0 | } |
8884 | 0 | } else { |
8885 | 0 | if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { |
8886 | 0 | Op.addRegWithFPInputModsOperands(Inst, 2); |
8887 | 0 | } else if (Op.isReg()) { |
8888 | 0 | Op.addRegOperands(Inst, 1); |
8889 | 0 | } else if (Op.isDPPCtrl()) { |
8890 | 0 | Op.addImmOperands(Inst, 1); |
8891 | 0 | } else if (Op.isImm()) { |
8892 | | // Handle optional arguments |
8893 | 0 | OptionalIdx[Op.getImmTy()] = I; |
8894 | 0 | } else { |
8895 | 0 | llvm_unreachable("Invalid operand type"); |
8896 | 0 | } |
8897 | 0 | } |
8898 | 0 | } |
8899 | |
|
8900 | 0 | if (IsDPP8) { |
8901 | 0 | using namespace llvm::AMDGPU::DPP; |
8902 | 0 | Inst.addOperand(MCOperand::createImm(Fi? DPP8_FI_1 : DPP8_FI_0)); |
8903 | 0 | } else { |
8904 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf); |
8905 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf); |
8906 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl); |
8907 | 0 | if (AMDGPU::hasNamedOperand(Inst.getOpcode(), AMDGPU::OpName::fi)) { |
8908 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
8909 | 0 | AMDGPUOperand::ImmTyDppFI); |
8910 | 0 | } |
8911 | 0 | } |
8912 | 0 | } |
8913 | | |
8914 | | //===----------------------------------------------------------------------===// |
8915 | | // sdwa |
8916 | | //===----------------------------------------------------------------------===// |
8917 | | |
8918 | | ParseStatus AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, |
8919 | | StringRef Prefix, |
8920 | 0 | AMDGPUOperand::ImmTy Type) { |
8921 | 0 | using namespace llvm::AMDGPU::SDWA; |
8922 | |
|
8923 | 0 | SMLoc S = getLoc(); |
8924 | 0 | StringRef Value; |
8925 | |
|
8926 | 0 | SMLoc StringLoc; |
8927 | 0 | ParseStatus Res = parseStringWithPrefix(Prefix, Value, StringLoc); |
8928 | 0 | if (!Res.isSuccess()) |
8929 | 0 | return Res; |
8930 | | |
8931 | 0 | int64_t Int; |
8932 | 0 | Int = StringSwitch<int64_t>(Value) |
8933 | 0 | .Case("BYTE_0", SdwaSel::BYTE_0) |
8934 | 0 | .Case("BYTE_1", SdwaSel::BYTE_1) |
8935 | 0 | .Case("BYTE_2", SdwaSel::BYTE_2) |
8936 | 0 | .Case("BYTE_3", SdwaSel::BYTE_3) |
8937 | 0 | .Case("WORD_0", SdwaSel::WORD_0) |
8938 | 0 | .Case("WORD_1", SdwaSel::WORD_1) |
8939 | 0 | .Case("DWORD", SdwaSel::DWORD) |
8940 | 0 | .Default(0xffffffff); |
8941 | |
|
8942 | 0 | if (Int == 0xffffffff) |
8943 | 0 | return Error(StringLoc, "invalid " + Twine(Prefix) + " value"); |
8944 | | |
8945 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type)); |
8946 | 0 | return ParseStatus::Success; |
8947 | 0 | } |
8948 | | |
8949 | 0 | ParseStatus AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) { |
8950 | 0 | using namespace llvm::AMDGPU::SDWA; |
8951 | |
|
8952 | 0 | SMLoc S = getLoc(); |
8953 | 0 | StringRef Value; |
8954 | |
|
8955 | 0 | SMLoc StringLoc; |
8956 | 0 | ParseStatus Res = parseStringWithPrefix("dst_unused", Value, StringLoc); |
8957 | 0 | if (!Res.isSuccess()) |
8958 | 0 | return Res; |
8959 | | |
8960 | 0 | int64_t Int; |
8961 | 0 | Int = StringSwitch<int64_t>(Value) |
8962 | 0 | .Case("UNUSED_PAD", DstUnused::UNUSED_PAD) |
8963 | 0 | .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT) |
8964 | 0 | .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE) |
8965 | 0 | .Default(0xffffffff); |
8966 | |
|
8967 | 0 | if (Int == 0xffffffff) |
8968 | 0 | return Error(StringLoc, "invalid dst_unused value"); |
8969 | | |
8970 | 0 | Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySDWADstUnused)); |
8971 | 0 | return ParseStatus::Success; |
8972 | 0 | } |
8973 | | |
8974 | 0 | void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) { |
8975 | 0 | cvtSDWA(Inst, Operands, SIInstrFlags::VOP1); |
8976 | 0 | } |
8977 | | |
8978 | 0 | void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) { |
8979 | 0 | cvtSDWA(Inst, Operands, SIInstrFlags::VOP2); |
8980 | 0 | } |
8981 | | |
8982 | 0 | void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) { |
8983 | 0 | cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true, true); |
8984 | 0 | } |
8985 | | |
8986 | 0 | void AMDGPUAsmParser::cvtSdwaVOP2e(MCInst &Inst, const OperandVector &Operands) { |
8987 | 0 | cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, false, true); |
8988 | 0 | } |
8989 | | |
8990 | 0 | void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) { |
8991 | 0 | cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI()); |
8992 | 0 | } |
8993 | | |
8994 | | void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands, |
8995 | | uint64_t BasicInstType, |
8996 | | bool SkipDstVcc, |
8997 | 0 | bool SkipSrcVcc) { |
8998 | 0 | using namespace llvm::AMDGPU::SDWA; |
8999 | |
|
9000 | 0 | OptionalImmIndexMap OptionalIdx; |
9001 | 0 | bool SkipVcc = SkipDstVcc || SkipSrcVcc; |
9002 | 0 | bool SkippedVcc = false; |
9003 | |
|
9004 | 0 | unsigned I = 1; |
9005 | 0 | const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); |
9006 | 0 | for (unsigned J = 0; J < Desc.getNumDefs(); ++J) { |
9007 | 0 | ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1); |
9008 | 0 | } |
9009 | |
|
9010 | 0 | for (unsigned E = Operands.size(); I != E; ++I) { |
9011 | 0 | AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); |
9012 | 0 | if (SkipVcc && !SkippedVcc && Op.isReg() && |
9013 | 0 | (Op.getReg() == AMDGPU::VCC || Op.getReg() == AMDGPU::VCC_LO)) { |
9014 | | // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst. |
9015 | | // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3) |
9016 | | // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand. |
9017 | | // Skip VCC only if we didn't skip it on previous iteration. |
9018 | | // Note that src0 and src1 occupy 2 slots each because of modifiers. |
9019 | 0 | if (BasicInstType == SIInstrFlags::VOP2 && |
9020 | 0 | ((SkipDstVcc && Inst.getNumOperands() == 1) || |
9021 | 0 | (SkipSrcVcc && Inst.getNumOperands() == 5))) { |
9022 | 0 | SkippedVcc = true; |
9023 | 0 | continue; |
9024 | 0 | } else if (BasicInstType == SIInstrFlags::VOPC && |
9025 | 0 | Inst.getNumOperands() == 0) { |
9026 | 0 | SkippedVcc = true; |
9027 | 0 | continue; |
9028 | 0 | } |
9029 | 0 | } |
9030 | 0 | if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { |
9031 | 0 | Op.addRegOrImmWithInputModsOperands(Inst, 2); |
9032 | 0 | } else if (Op.isImm()) { |
9033 | | // Handle optional arguments |
9034 | 0 | OptionalIdx[Op.getImmTy()] = I; |
9035 | 0 | } else { |
9036 | 0 | llvm_unreachable("Invalid operand type"); |
9037 | 0 | } |
9038 | 0 | SkippedVcc = false; |
9039 | 0 | } |
9040 | |
|
9041 | 0 | const unsigned Opc = Inst.getOpcode(); |
9042 | 0 | if (Opc != AMDGPU::V_NOP_sdwa_gfx10 && Opc != AMDGPU::V_NOP_sdwa_gfx9 && |
9043 | 0 | Opc != AMDGPU::V_NOP_sdwa_vi) { |
9044 | | // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments |
9045 | 0 | switch (BasicInstType) { |
9046 | 0 | case SIInstrFlags::VOP1: |
9047 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp)) |
9048 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
9049 | 0 | AMDGPUOperand::ImmTyClampSI, 0); |
9050 | |
|
9051 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod)) |
9052 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
9053 | 0 | AMDGPUOperand::ImmTyOModSI, 0); |
9054 | |
|
9055 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::dst_sel)) |
9056 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
9057 | 0 | AMDGPUOperand::ImmTySDWADstSel, SdwaSel::DWORD); |
9058 | |
|
9059 | 0 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::dst_unused)) |
9060 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, |
9061 | 0 | AMDGPUOperand::ImmTySDWADstUnused, |
9062 | 0 | DstUnused::UNUSED_PRESERVE); |
9063 | |
|
9064 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySDWASrc0Sel, SdwaSel::DWORD); |
9065 | 0 | break; |
9066 | | |
9067 | 0 | case SIInstrFlags::VOP2: |
9068 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0); |
9069 | |
|
9070 | 0 | if (AMDGPU::hasNamedOperand(Inst.getOpcode(), AMDGPU::OpName::omod)) |
9071 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0); |
9072 | |
|
9073 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySDWADstSel, SdwaSel::DWORD); |
9074 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySDWADstUnused, DstUnused::UNUSED_PRESERVE); |
9075 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySDWASrc0Sel, SdwaSel::DWORD); |
9076 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySDWASrc1Sel, SdwaSel::DWORD); |
9077 | 0 | break; |
9078 | | |
9079 | 0 | case SIInstrFlags::VOPC: |
9080 | 0 | if (AMDGPU::hasNamedOperand(Inst.getOpcode(), AMDGPU::OpName::clamp)) |
9081 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0); |
9082 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySDWASrc0Sel, SdwaSel::DWORD); |
9083 | 0 | addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySDWASrc1Sel, SdwaSel::DWORD); |
9084 | 0 | break; |
9085 | | |
9086 | 0 | default: |
9087 | 0 | llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed"); |
9088 | 0 | } |
9089 | 0 | } |
9090 | | |
9091 | | // special case v_mac_{f16, f32}: |
9092 | | // it has src2 register operand that is tied to dst operand |
9093 | 0 | if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi || |
9094 | 0 | Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) { |
9095 | 0 | auto it = Inst.begin(); |
9096 | 0 | std::advance( |
9097 | 0 | it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2)); |
9098 | 0 | Inst.insert(it, Inst.getOperand(0)); // src2 = dst |
9099 | 0 | } |
9100 | 0 | } |
9101 | | |
9102 | | /// Force static initialization. |
9103 | 24 | extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUAsmParser() { |
9104 | 24 | RegisterMCAsmParser<AMDGPUAsmParser> A(getTheR600Target()); |
9105 | 24 | RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget()); |
9106 | 24 | } |
9107 | | |
9108 | | #define GET_REGISTER_MATCHER |
9109 | | #define GET_MATCHER_IMPLEMENTATION |
9110 | | #define GET_MNEMONIC_SPELL_CHECKER |
9111 | | #define GET_MNEMONIC_CHECKER |
9112 | | #include "AMDGPUGenAsmMatcher.inc" |
9113 | | |
9114 | | ParseStatus AMDGPUAsmParser::parseCustomOperand(OperandVector &Operands, |
9115 | 0 | unsigned MCK) { |
9116 | 0 | switch (MCK) { |
9117 | 0 | case MCK_addr64: |
9118 | 0 | return parseTokenOp("addr64", Operands); |
9119 | 0 | case MCK_done: |
9120 | 0 | return parseTokenOp("done", Operands); |
9121 | 0 | case MCK_idxen: |
9122 | 0 | return parseTokenOp("idxen", Operands); |
9123 | 0 | case MCK_lds: |
9124 | 0 | return parseTokenOp("lds", Operands); |
9125 | 0 | case MCK_offen: |
9126 | 0 | return parseTokenOp("offen", Operands); |
9127 | 0 | case MCK_off: |
9128 | 0 | return parseTokenOp("off", Operands); |
9129 | 0 | case MCK_row_95_en: |
9130 | 0 | return parseTokenOp("row_en", Operands); |
9131 | 0 | case MCK_gds: |
9132 | 0 | return parseNamedBit("gds", Operands, AMDGPUOperand::ImmTyGDS); |
9133 | 0 | case MCK_tfe: |
9134 | 0 | return parseNamedBit("tfe", Operands, AMDGPUOperand::ImmTyTFE); |
9135 | 0 | } |
9136 | 0 | return tryCustomParseOperand(Operands, MCK); |
9137 | 0 | } |
9138 | | |
9139 | | // This function should be defined after auto-generated include so that we have |
9140 | | // MatchClassKind enum defined |
9141 | | unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op, |
9142 | 0 | unsigned Kind) { |
9143 | | // Tokens like "glc" would be parsed as immediate operands in ParseOperand(). |
9144 | | // But MatchInstructionImpl() expects to meet token and fails to validate |
9145 | | // operand. This method checks if we are given immediate operand but expect to |
9146 | | // get corresponding token. |
9147 | 0 | AMDGPUOperand &Operand = (AMDGPUOperand&)Op; |
9148 | 0 | switch (Kind) { |
9149 | 0 | case MCK_addr64: |
9150 | 0 | return Operand.isAddr64() ? Match_Success : Match_InvalidOperand; |
9151 | 0 | case MCK_gds: |
9152 | 0 | return Operand.isGDS() ? Match_Success : Match_InvalidOperand; |
9153 | 0 | case MCK_lds: |
9154 | 0 | return Operand.isLDS() ? Match_Success : Match_InvalidOperand; |
9155 | 0 | case MCK_idxen: |
9156 | 0 | return Operand.isIdxen() ? Match_Success : Match_InvalidOperand; |
9157 | 0 | case MCK_offen: |
9158 | 0 | return Operand.isOffen() ? Match_Success : Match_InvalidOperand; |
9159 | 0 | case MCK_tfe: |
9160 | 0 | return Operand.isTFE() ? Match_Success : Match_InvalidOperand; |
9161 | 0 | case MCK_SSrcB32: |
9162 | | // When operands have expression values, they will return true for isToken, |
9163 | | // because it is not possible to distinguish between a token and an |
9164 | | // expression at parse time. MatchInstructionImpl() will always try to |
9165 | | // match an operand as a token, when isToken returns true, and when the |
9166 | | // name of the expression is not a valid token, the match will fail, |
9167 | | // so we need to handle it here. |
9168 | 0 | return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand; |
9169 | 0 | case MCK_SSrcF32: |
9170 | 0 | return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand; |
9171 | 0 | case MCK_SOPPBrTarget: |
9172 | 0 | return Operand.isSOPPBrTarget() ? Match_Success : Match_InvalidOperand; |
9173 | 0 | case MCK_VReg32OrOff: |
9174 | 0 | return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand; |
9175 | 0 | case MCK_InterpSlot: |
9176 | 0 | return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand; |
9177 | 0 | case MCK_InterpAttr: |
9178 | 0 | return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand; |
9179 | 0 | case MCK_InterpAttrChan: |
9180 | 0 | return Operand.isInterpAttrChan() ? Match_Success : Match_InvalidOperand; |
9181 | 0 | case MCK_SReg_64: |
9182 | 0 | case MCK_SReg_64_XEXEC: |
9183 | | // Null is defined as a 32-bit register but |
9184 | | // it should also be enabled with 64-bit operands. |
9185 | | // The following code enables it for SReg_64 operands |
9186 | | // used as source and destination. Remaining source |
9187 | | // operands are handled in isInlinableImm. |
9188 | 0 | return Operand.isNull() ? Match_Success : Match_InvalidOperand; |
9189 | 0 | default: |
9190 | 0 | return Match_InvalidOperand; |
9191 | 0 | } |
9192 | 0 | } |
9193 | | |
9194 | | //===----------------------------------------------------------------------===// |
9195 | | // endpgm |
9196 | | //===----------------------------------------------------------------------===// |
9197 | | |
9198 | 0 | ParseStatus AMDGPUAsmParser::parseEndpgm(OperandVector &Operands) { |
9199 | 0 | SMLoc S = getLoc(); |
9200 | 0 | int64_t Imm = 0; |
9201 | |
|
9202 | 0 | if (!parseExpr(Imm)) { |
9203 | | // The operand is optional, if not present default to 0 |
9204 | 0 | Imm = 0; |
9205 | 0 | } |
9206 | |
|
9207 | 0 | if (!isUInt<16>(Imm)) |
9208 | 0 | return Error(S, "expected a 16-bit value"); |
9209 | | |
9210 | 0 | Operands.push_back( |
9211 | 0 | AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyEndpgm)); |
9212 | 0 | return ParseStatus::Success; |
9213 | 0 | } |
9214 | | |
9215 | 0 | bool AMDGPUOperand::isEndpgm() const { return isImmTy(ImmTyEndpgm); } |
9216 | | |
9217 | | //===----------------------------------------------------------------------===// |
9218 | | // LDSDIR |
9219 | | //===----------------------------------------------------------------------===// |
9220 | | |
9221 | 0 | bool AMDGPUOperand::isWaitVDST() const { |
9222 | 0 | return isImmTy(ImmTyWaitVDST) && isUInt<4>(getImm()); |
9223 | 0 | } |
9224 | | |
9225 | 0 | bool AMDGPUOperand::isWaitVAVDst() const { |
9226 | 0 | return isImmTy(ImmTyWaitVAVDst) && isUInt<4>(getImm()); |
9227 | 0 | } |
9228 | | |
9229 | 0 | bool AMDGPUOperand::isWaitVMVSrc() const { |
9230 | 0 | return isImmTy(ImmTyWaitVMVSrc) && isUInt<1>(getImm()); |
9231 | 0 | } |
9232 | | |
9233 | | //===----------------------------------------------------------------------===// |
9234 | | // VINTERP |
9235 | | //===----------------------------------------------------------------------===// |
9236 | | |
9237 | 0 | bool AMDGPUOperand::isWaitEXP() const { |
9238 | 0 | return isImmTy(ImmTyWaitEXP) && isUInt<3>(getImm()); |
9239 | 0 | } |
9240 | | |
9241 | | //===----------------------------------------------------------------------===// |
9242 | | // Split Barrier |
9243 | | //===----------------------------------------------------------------------===// |
9244 | | |
9245 | 0 | bool AMDGPUOperand::isSplitBarrier() const { return isInlinableImm(MVT::i32); } |