/src/keystone/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | |
10 | | #include "MCTargetDesc/AArch64AddressingModes.h" |
11 | | #include "MCTargetDesc/AArch64MCExpr.h" |
12 | | #include "MCTargetDesc/AArch64TargetStreamer.h" |
13 | | #include "Utils/AArch64BaseInfo.h" |
14 | | #include "llvm/ADT/APInt.h" |
15 | | #include "llvm/ADT/STLExtras.h" |
16 | | #include "llvm/ADT/SmallString.h" |
17 | | #include "llvm/ADT/SmallVector.h" |
18 | | #include "llvm/ADT/StringSwitch.h" |
19 | | #include "llvm/ADT/Twine.h" |
20 | | #include "llvm/MC/MCContext.h" |
21 | | #include "llvm/MC/MCExpr.h" |
22 | | #include "llvm/MC/MCInst.h" |
23 | | #include "llvm/MC/MCObjectFileInfo.h" |
24 | | #include "llvm/MC/MCParser/MCAsmLexer.h" |
25 | | #include "llvm/MC/MCParser/MCAsmParser.h" |
26 | | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" |
27 | | #include "llvm/MC/MCParser/MCTargetAsmParser.h" |
28 | | #include "llvm/MC/MCRegisterInfo.h" |
29 | | #include "llvm/MC/MCStreamer.h" |
30 | | #include "llvm/MC/MCSubtargetInfo.h" |
31 | | #include "llvm/MC/MCSymbol.h" |
32 | | #include "llvm/Support/ErrorHandling.h" |
33 | | #include "llvm/Support/SourceMgr.h" |
34 | | #include "llvm/Support/TargetRegistry.h" |
35 | | #include "llvm/Support/raw_ostream.h" |
36 | | |
37 | | #include "keystone/arm64.h" |
38 | | |
39 | | #include <cstdio> |
40 | | using namespace llvm_ks; |
41 | | |
42 | | namespace { |
43 | | |
44 | | class AArch64Operand; |
45 | | |
46 | | class AArch64AsmParser : public MCTargetAsmParser { |
47 | | private: |
48 | | StringRef Mnemonic; ///< Instruction mnemonic. |
49 | | |
50 | | // Map of register aliases registers via the .req directive. |
51 | | StringMap<std::pair<bool, unsigned> > RegisterReqs; |
52 | | |
53 | 196 | AArch64TargetStreamer &getTargetStreamer() { |
54 | 196 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); |
55 | 196 | return static_cast<AArch64TargetStreamer &>(TS); |
56 | 196 | } |
57 | | |
58 | 96.1k | SMLoc getLoc() const { return getParser().getTok().getLoc(); } |
59 | | |
60 | | bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); |
61 | | AArch64CC::CondCode parseCondCodeString(StringRef Cond); |
62 | | bool parseCondCode(OperandVector &Operands, bool invertCondCode); |
63 | | unsigned matchRegisterNameAlias(StringRef Name, bool isVector); |
64 | | int tryParseRegister(); |
65 | | int tryMatchVectorRegister(StringRef &Kind, bool expected); |
66 | | bool parseRegister(OperandVector &Operands); |
67 | | bool parseSymbolicImmVal(const MCExpr *&ImmVal); |
68 | | bool parseVectorList(OperandVector &Operands); |
69 | | bool parseOperand(OperandVector &Operands, bool isCondCode, |
70 | | bool invertCondCode); |
71 | | |
72 | 0 | void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); } |
73 | | //bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); } |
74 | | //bool Error(SMLoc L, const Twine &Msg) { return true; } |
75 | | bool showMatchError(SMLoc Loc, unsigned ErrCode); |
76 | | |
77 | | bool parseDirectiveWord(unsigned Size, SMLoc L); |
78 | | bool parseDirectiveInst(SMLoc L); |
79 | | |
80 | | bool parseDirectiveTLSDescCall(SMLoc L); |
81 | | |
82 | | bool parseDirectiveLOH(StringRef LOH, SMLoc L); |
83 | | bool parseDirectiveLtorg(SMLoc L); |
84 | | |
85 | | bool parseDirectiveReq(StringRef Name, SMLoc L); |
86 | | bool parseDirectiveUnreq(SMLoc L); |
87 | | |
88 | | bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc); |
89 | | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
90 | | OperandVector &Operands, MCStreamer &Out, |
91 | | uint64_t &ErrorInfo, |
92 | | bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address) override; |
93 | | /// @name Auto-generated Match Functions |
94 | | /// { |
95 | | |
96 | | #define GET_ASSEMBLER_HEADER |
97 | | #include "AArch64GenAsmMatcher.inc" |
98 | | |
99 | | /// } |
100 | | |
101 | | OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands); |
102 | | OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); |
103 | | OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); |
104 | | OperandMatchResultTy tryParseSysReg(OperandVector &Operands); |
105 | | OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); |
106 | | OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); |
107 | | OperandMatchResultTy tryParsePSBHint(OperandVector &Operands); |
108 | | OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); |
109 | | OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); |
110 | | OperandMatchResultTy tryParseFPImm(OperandVector &Operands); |
111 | | OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands); |
112 | | OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands); |
113 | | bool tryParseVectorRegister(OperandVector &Operands); |
114 | | OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); |
115 | | |
116 | | public: |
117 | | enum AArch64MatchResultTy { |
118 | | Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, |
119 | | #define GET_OPERAND_DIAGNOSTIC_TYPES |
120 | | #include "AArch64GenAsmMatcher.inc" |
121 | | }; |
122 | | AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, |
123 | | const MCInstrInfo &MII, const MCTargetOptions &Options) |
124 | 1.41k | : MCTargetAsmParser(Options, STI) { |
125 | 1.41k | MCAsmParserExtension::Initialize(Parser); |
126 | 1.41k | MCStreamer &S = getParser().getStreamer(); |
127 | 1.41k | if (S.getTargetStreamer() == nullptr) |
128 | 1.41k | new AArch64TargetStreamer(S); |
129 | | |
130 | | // Initialize the set of available features. |
131 | 1.41k | setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); |
132 | 1.41k | } |
133 | | |
134 | | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, |
135 | | SMLoc NameLoc, OperandVector &Operands, unsigned int &ErrorCode) override; |
136 | | bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc, unsigned int &ErrorCode) override; |
137 | | bool ParseDirective(AsmToken DirectiveID) override; |
138 | | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, |
139 | | unsigned Kind) override; |
140 | | |
141 | | static bool classifySymbolRef(const MCExpr *Expr, |
142 | | AArch64MCExpr::VariantKind &ELFRefKind, |
143 | | MCSymbolRefExpr::VariantKind &DarwinRefKind, |
144 | | int64_t &Addend); |
145 | | }; |
146 | | } // end anonymous namespace |
147 | | |
148 | | namespace { |
149 | | |
150 | | /// AArch64Operand - Instances of this class represent a parsed AArch64 machine |
151 | | /// instruction. |
152 | | class AArch64Operand : public MCParsedAsmOperand { |
153 | | private: |
154 | | enum KindTy { |
155 | | k_Immediate, |
156 | | k_ShiftedImm, |
157 | | k_CondCode, |
158 | | k_Register, |
159 | | k_VectorList, |
160 | | k_VectorIndex, |
161 | | k_Token, |
162 | | k_SysReg, |
163 | | k_SysCR, |
164 | | k_Prefetch, |
165 | | k_ShiftExtend, |
166 | | k_FPImm, |
167 | | k_Barrier, |
168 | | k_PSBHint, |
169 | | } Kind; |
170 | | |
171 | | SMLoc StartLoc, EndLoc; |
172 | | |
173 | | struct TokOp { |
174 | | const char *Data; |
175 | | unsigned Length; |
176 | | bool IsSuffix; // Is the operand actually a suffix on the mnemonic. |
177 | | }; |
178 | | |
179 | | struct RegOp { |
180 | | unsigned RegNum; |
181 | | bool isVector; |
182 | | }; |
183 | | |
184 | | struct VectorListOp { |
185 | | unsigned RegNum; |
186 | | unsigned Count; |
187 | | unsigned NumElements; |
188 | | unsigned ElementKind; |
189 | | }; |
190 | | |
191 | | struct VectorIndexOp { |
192 | | unsigned Val; |
193 | | }; |
194 | | |
195 | | struct ImmOp { |
196 | | const MCExpr *Val; |
197 | | }; |
198 | | |
199 | | struct ShiftedImmOp { |
200 | | const MCExpr *Val; |
201 | | unsigned ShiftAmount; |
202 | | }; |
203 | | |
204 | | struct CondCodeOp { |
205 | | AArch64CC::CondCode Code; |
206 | | }; |
207 | | |
208 | | struct FPImmOp { |
209 | | unsigned Val; // Encoded 8-bit representation. |
210 | | }; |
211 | | |
212 | | struct BarrierOp { |
213 | | unsigned Val; // Not the enum since not all values have names. |
214 | | const char *Data; |
215 | | unsigned Length; |
216 | | }; |
217 | | |
218 | | struct SysRegOp { |
219 | | const char *Data; |
220 | | unsigned Length; |
221 | | uint32_t MRSReg; |
222 | | uint32_t MSRReg; |
223 | | uint32_t PStateField; |
224 | | }; |
225 | | |
226 | | struct SysCRImmOp { |
227 | | unsigned Val; |
228 | | }; |
229 | | |
230 | | struct PrefetchOp { |
231 | | unsigned Val; |
232 | | const char *Data; |
233 | | unsigned Length; |
234 | | }; |
235 | | |
236 | | struct PSBHintOp { |
237 | | unsigned Val; |
238 | | const char *Data; |
239 | | unsigned Length; |
240 | | }; |
241 | | |
242 | | struct ShiftExtendOp { |
243 | | AArch64_AM::ShiftExtendType Type; |
244 | | unsigned Amount; |
245 | | bool HasExplicitAmount; |
246 | | }; |
247 | | |
248 | | struct ExtendOp { |
249 | | unsigned Val; |
250 | | }; |
251 | | |
252 | | union { |
253 | | struct TokOp Tok; |
254 | | struct RegOp Reg; |
255 | | struct VectorListOp VectorList; |
256 | | struct VectorIndexOp VectorIndex; |
257 | | struct ImmOp Imm; |
258 | | struct ShiftedImmOp ShiftedImm; |
259 | | struct CondCodeOp CondCode; |
260 | | struct FPImmOp FPImm; |
261 | | struct BarrierOp Barrier; |
262 | | struct SysRegOp SysReg; |
263 | | struct SysCRImmOp SysCRImm; |
264 | | struct PrefetchOp Prefetch; |
265 | | struct PSBHintOp PSBHint; |
266 | | struct ShiftExtendOp ShiftExtend; |
267 | | }; |
268 | | |
269 | | // Keep the MCContext around as the MCExprs may need manipulated during |
270 | | // the add<>Operands() calls. |
271 | | MCContext &Ctx; |
272 | | |
273 | | public: |
274 | 96.4k | AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {} |
275 | | |
276 | 0 | AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { |
277 | 0 | Kind = o.Kind; |
278 | 0 | StartLoc = o.StartLoc; |
279 | 0 | EndLoc = o.EndLoc; |
280 | 0 | switch (Kind) { |
281 | 0 | case k_Token: |
282 | 0 | Tok = o.Tok; |
283 | 0 | break; |
284 | 0 | case k_Immediate: |
285 | 0 | Imm = o.Imm; |
286 | 0 | break; |
287 | 0 | case k_ShiftedImm: |
288 | 0 | ShiftedImm = o.ShiftedImm; |
289 | 0 | break; |
290 | 0 | case k_CondCode: |
291 | 0 | CondCode = o.CondCode; |
292 | 0 | break; |
293 | 0 | case k_FPImm: |
294 | 0 | FPImm = o.FPImm; |
295 | 0 | break; |
296 | 0 | case k_Barrier: |
297 | 0 | Barrier = o.Barrier; |
298 | 0 | break; |
299 | 0 | case k_Register: |
300 | 0 | Reg = o.Reg; |
301 | 0 | break; |
302 | 0 | case k_VectorList: |
303 | 0 | VectorList = o.VectorList; |
304 | 0 | break; |
305 | 0 | case k_VectorIndex: |
306 | 0 | VectorIndex = o.VectorIndex; |
307 | 0 | break; |
308 | 0 | case k_SysReg: |
309 | 0 | SysReg = o.SysReg; |
310 | 0 | break; |
311 | 0 | case k_SysCR: |
312 | 0 | SysCRImm = o.SysCRImm; |
313 | 0 | break; |
314 | 0 | case k_Prefetch: |
315 | 0 | Prefetch = o.Prefetch; |
316 | 0 | break; |
317 | 0 | case k_PSBHint: |
318 | 0 | PSBHint = o.PSBHint; |
319 | 0 | break; |
320 | 0 | case k_ShiftExtend: |
321 | 0 | ShiftExtend = o.ShiftExtend; |
322 | 0 | break; |
323 | 0 | } |
324 | 0 | } |
325 | | |
326 | | /// getStartLoc - Get the location of the first token of this operand. |
327 | 5.94k | SMLoc getStartLoc() const override { return StartLoc; } |
328 | | /// getEndLoc - Get the location of the last token of this operand. |
329 | 96 | SMLoc getEndLoc() const override { return EndLoc; } |
330 | | |
331 | 9.50k | StringRef getToken() const { |
332 | 9.50k | assert(Kind == k_Token && "Invalid access!"); |
333 | 9.50k | return StringRef(Tok.Data, Tok.Length); |
334 | 9.50k | } |
335 | | |
336 | 17 | bool isTokenSuffix() const { |
337 | 17 | assert(Kind == k_Token && "Invalid access!"); |
338 | 17 | return Tok.IsSuffix; |
339 | 17 | } |
340 | | |
341 | 6.56k | const MCExpr *getImm() const { |
342 | 6.56k | assert(Kind == k_Immediate && "Invalid access!"); |
343 | 6.56k | return Imm.Val; |
344 | 6.56k | } |
345 | | |
346 | 0 | const MCExpr *getShiftedImmVal() const { |
347 | 0 | assert(Kind == k_ShiftedImm && "Invalid access!"); |
348 | 0 | return ShiftedImm.Val; |
349 | 0 | } |
350 | | |
351 | 0 | unsigned getShiftedImmShift() const { |
352 | 0 | assert(Kind == k_ShiftedImm && "Invalid access!"); |
353 | 0 | return ShiftedImm.ShiftAmount; |
354 | 0 | } |
355 | | |
356 | 1.69k | AArch64CC::CondCode getCondCode() const { |
357 | 1.69k | assert(Kind == k_CondCode && "Invalid access!"); |
358 | 1.69k | return CondCode.Code; |
359 | 1.69k | } |
360 | | |
361 | 96 | unsigned getFPImm() const { |
362 | 96 | assert(Kind == k_FPImm && "Invalid access!"); |
363 | 96 | return FPImm.Val; |
364 | 96 | } |
365 | | |
366 | 0 | unsigned getBarrier() const { |
367 | 0 | assert(Kind == k_Barrier && "Invalid access!"); |
368 | 0 | return Barrier.Val; |
369 | 0 | } |
370 | | |
371 | 0 | StringRef getBarrierName() const { |
372 | 0 | assert(Kind == k_Barrier && "Invalid access!"); |
373 | 0 | return StringRef(Barrier.Data, Barrier.Length); |
374 | 0 | } |
375 | | |
376 | 1.62k | unsigned getReg() const override { |
377 | 1.62k | assert(Kind == k_Register && "Invalid access!"); |
378 | 1.62k | return Reg.RegNum; |
379 | 1.62k | } |
380 | | |
381 | 0 | unsigned getVectorListStart() const { |
382 | 0 | assert(Kind == k_VectorList && "Invalid access!"); |
383 | 0 | return VectorList.RegNum; |
384 | 0 | } |
385 | | |
386 | 0 | unsigned getVectorListCount() const { |
387 | 0 | assert(Kind == k_VectorList && "Invalid access!"); |
388 | 0 | return VectorList.Count; |
389 | 0 | } |
390 | | |
391 | 0 | unsigned getVectorIndex() const { |
392 | 0 | assert(Kind == k_VectorIndex && "Invalid access!"); |
393 | 0 | return VectorIndex.Val; |
394 | 0 | } |
395 | | |
396 | 0 | StringRef getSysReg() const { |
397 | 0 | assert(Kind == k_SysReg && "Invalid access!"); |
398 | 0 | return StringRef(SysReg.Data, SysReg.Length); |
399 | 0 | } |
400 | | |
401 | 0 | unsigned getSysCR() const { |
402 | 0 | assert(Kind == k_SysCR && "Invalid access!"); |
403 | 0 | return SysCRImm.Val; |
404 | 0 | } |
405 | | |
406 | 0 | unsigned getPrefetch() const { |
407 | 0 | assert(Kind == k_Prefetch && "Invalid access!"); |
408 | 0 | return Prefetch.Val; |
409 | 0 | } |
410 | | |
411 | 0 | unsigned getPSBHint() const { |
412 | 0 | assert(Kind == k_PSBHint && "Invalid access!"); |
413 | 0 | return PSBHint.Val; |
414 | 0 | } |
415 | | |
416 | 0 | StringRef getPSBHintName() const { |
417 | 0 | assert(Kind == k_PSBHint && "Invalid access!"); |
418 | 0 | return StringRef(PSBHint.Data, PSBHint.Length); |
419 | 0 | } |
420 | | |
421 | 0 | StringRef getPrefetchName() const { |
422 | 0 | assert(Kind == k_Prefetch && "Invalid access!"); |
423 | 0 | return StringRef(Prefetch.Data, Prefetch.Length); |
424 | 0 | } |
425 | | |
426 | 0 | AArch64_AM::ShiftExtendType getShiftExtendType() const { |
427 | 0 | assert(Kind == k_ShiftExtend && "Invalid access!"); |
428 | 0 | return ShiftExtend.Type; |
429 | 0 | } |
430 | | |
431 | 0 | unsigned getShiftExtendAmount() const { |
432 | 0 | assert(Kind == k_ShiftExtend && "Invalid access!"); |
433 | 0 | return ShiftExtend.Amount; |
434 | 0 | } |
435 | | |
436 | 0 | bool hasShiftExtendAmount() const { |
437 | 0 | assert(Kind == k_ShiftExtend && "Invalid access!"); |
438 | 0 | return ShiftExtend.HasExplicitAmount; |
439 | 0 | } |
440 | | |
441 | 2.19k | bool isImm() const override { return Kind == k_Immediate; } |
442 | 0 | bool isMem() const override { return false; } |
443 | 0 | bool isSImm9() const { |
444 | 0 | if (!isImm()) |
445 | 0 | return false; |
446 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
447 | 0 | if (!MCE) |
448 | 0 | return false; |
449 | 0 | int64_t Val = MCE->getValue(); |
450 | 0 | return (Val >= -256 && Val < 256); |
451 | 0 | } |
452 | 0 | bool isSImm7s4() const { |
453 | 0 | if (!isImm()) |
454 | 0 | return false; |
455 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
456 | 0 | if (!MCE) |
457 | 0 | return false; |
458 | 0 | int64_t Val = MCE->getValue(); |
459 | 0 | return (Val >= -256 && Val <= 252 && (Val & 3) == 0); |
460 | 0 | } |
461 | 0 | bool isSImm7s8() const { |
462 | 0 | if (!isImm()) |
463 | 0 | return false; |
464 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
465 | 0 | if (!MCE) |
466 | 0 | return false; |
467 | 0 | int64_t Val = MCE->getValue(); |
468 | 0 | return (Val >= -512 && Val <= 504 && (Val & 7) == 0); |
469 | 0 | } |
470 | 0 | bool isSImm7s16() const { |
471 | 0 | if (!isImm()) |
472 | 0 | return false; |
473 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
474 | 0 | if (!MCE) |
475 | 0 | return false; |
476 | 0 | int64_t Val = MCE->getValue(); |
477 | 0 | return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0); |
478 | 0 | } |
479 | | |
480 | 0 | bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const { |
481 | 0 | AArch64MCExpr::VariantKind ELFRefKind; |
482 | 0 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
483 | 0 | int64_t Addend; |
484 | 0 | if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, |
485 | 0 | Addend)) { |
486 | | // If we don't understand the expression, assume the best and |
487 | | // let the fixup and relocation code deal with it. |
488 | 0 | return true; |
489 | 0 | } |
490 | | |
491 | 0 | if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || |
492 | 0 | ELFRefKind == AArch64MCExpr::VK_LO12 || |
493 | 0 | ELFRefKind == AArch64MCExpr::VK_GOT_LO12 || |
494 | 0 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || |
495 | 0 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || |
496 | 0 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || |
497 | 0 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || |
498 | 0 | ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || |
499 | 0 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) { |
500 | | // Note that we don't range-check the addend. It's adjusted modulo page |
501 | | // size when converted, so there is no "out of range" condition when using |
502 | | // @pageoff. |
503 | 0 | return Addend >= 0 && (Addend % Scale) == 0; |
504 | 0 | } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || |
505 | 0 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { |
506 | | // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. |
507 | 0 | return Addend == 0; |
508 | 0 | } |
509 | | |
510 | 0 | return false; |
511 | 0 | } |
512 | | |
513 | 0 | template <int Scale> bool isUImm12Offset() const { |
514 | 0 | if (!isImm()) |
515 | 0 | return false; |
516 | | |
517 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
518 | 0 | if (!MCE) |
519 | 0 | return isSymbolicUImm12Offset(getImm(), Scale); |
520 | | |
521 | 0 | int64_t Val = MCE->getValue(); |
522 | 0 | return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000; |
523 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<1>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<2>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<4>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<8>() const |
524 | | |
525 | 0 | bool isImm0_1() const { |
526 | 0 | if (!isImm()) |
527 | 0 | return false; |
528 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
529 | 0 | if (!MCE) |
530 | 0 | return false; |
531 | 0 | int64_t Val = MCE->getValue(); |
532 | 0 | return (Val >= 0 && Val < 2); |
533 | 0 | } |
534 | 4 | bool isImm0_7() const { |
535 | 4 | if (!isImm()) |
536 | 0 | return false; |
537 | 4 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
538 | 4 | if (!MCE) |
539 | 0 | return false; |
540 | 4 | int64_t Val = MCE->getValue(); |
541 | 4 | return (Val >= 0 && Val < 8); |
542 | 4 | } |
543 | 0 | bool isImm1_8() const { |
544 | 0 | if (!isImm()) |
545 | 0 | return false; |
546 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
547 | 0 | if (!MCE) |
548 | 0 | return false; |
549 | 0 | int64_t Val = MCE->getValue(); |
550 | 0 | return (Val > 0 && Val < 9); |
551 | 0 | } |
552 | 0 | bool isImm0_15() const { |
553 | 0 | if (!isImm()) |
554 | 0 | return false; |
555 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
556 | 0 | if (!MCE) |
557 | 0 | return false; |
558 | 0 | int64_t Val = MCE->getValue(); |
559 | 0 | return (Val >= 0 && Val < 16); |
560 | 0 | } |
561 | 0 | bool isImm1_16() const { |
562 | 0 | if (!isImm()) |
563 | 0 | return false; |
564 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
565 | 0 | if (!MCE) |
566 | 0 | return false; |
567 | 0 | int64_t Val = MCE->getValue(); |
568 | 0 | return (Val > 0 && Val < 17); |
569 | 0 | } |
570 | 0 | bool isImm0_31() const { |
571 | 0 | if (!isImm()) |
572 | 0 | return false; |
573 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
574 | 0 | if (!MCE) |
575 | 0 | return false; |
576 | 0 | int64_t Val = MCE->getValue(); |
577 | 0 | return (Val >= 0 && Val < 32); |
578 | 0 | } |
579 | 0 | bool isImm1_31() const { |
580 | 0 | if (!isImm()) |
581 | 0 | return false; |
582 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
583 | 0 | if (!MCE) |
584 | 0 | return false; |
585 | 0 | int64_t Val = MCE->getValue(); |
586 | 0 | return (Val >= 1 && Val < 32); |
587 | 0 | } |
588 | 0 | bool isImm1_32() const { |
589 | 0 | if (!isImm()) |
590 | 0 | return false; |
591 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
592 | 0 | if (!MCE) |
593 | 0 | return false; |
594 | 0 | int64_t Val = MCE->getValue(); |
595 | 0 | return (Val >= 1 && Val < 33); |
596 | 0 | } |
597 | 0 | bool isImm0_63() const { |
598 | 0 | if (!isImm()) |
599 | 0 | return false; |
600 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
601 | 0 | if (!MCE) |
602 | 0 | return false; |
603 | 0 | int64_t Val = MCE->getValue(); |
604 | 0 | return (Val >= 0 && Val < 64); |
605 | 0 | } |
606 | 0 | bool isImm1_63() const { |
607 | 0 | if (!isImm()) |
608 | 0 | return false; |
609 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
610 | 0 | if (!MCE) |
611 | 0 | return false; |
612 | 0 | int64_t Val = MCE->getValue(); |
613 | 0 | return (Val >= 1 && Val < 64); |
614 | 0 | } |
615 | 0 | bool isImm1_64() const { |
616 | 0 | if (!isImm()) |
617 | 0 | return false; |
618 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
619 | 0 | if (!MCE) |
620 | 0 | return false; |
621 | 0 | int64_t Val = MCE->getValue(); |
622 | 0 | return (Val >= 1 && Val < 65); |
623 | 0 | } |
624 | 0 | bool isImm0_127() const { |
625 | 0 | if (!isImm()) |
626 | 0 | return false; |
627 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
628 | 0 | if (!MCE) |
629 | 0 | return false; |
630 | 0 | int64_t Val = MCE->getValue(); |
631 | 0 | return (Val >= 0 && Val < 128); |
632 | 0 | } |
633 | 0 | bool isImm0_255() const { |
634 | 0 | if (!isImm()) |
635 | 0 | return false; |
636 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
637 | 0 | if (!MCE) |
638 | 0 | return false; |
639 | 0 | int64_t Val = MCE->getValue(); |
640 | 0 | return (Val >= 0 && Val < 256); |
641 | 0 | } |
642 | 0 | bool isImm0_65535() const { |
643 | 0 | if (!isImm()) |
644 | 0 | return false; |
645 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
646 | 0 | if (!MCE) |
647 | 0 | return false; |
648 | 0 | int64_t Val = MCE->getValue(); |
649 | 0 | return (Val >= 0 && Val < 65536); |
650 | 0 | } |
651 | 0 | bool isImm32_63() const { |
652 | 0 | if (!isImm()) |
653 | 0 | return false; |
654 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
655 | 0 | if (!MCE) |
656 | 0 | return false; |
657 | 0 | int64_t Val = MCE->getValue(); |
658 | 0 | return (Val >= 32 && Val < 64); |
659 | 0 | } |
660 | 0 | bool isLogicalImm32() const { |
661 | 0 | if (!isImm()) |
662 | 0 | return false; |
663 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
664 | 0 | if (!MCE) |
665 | 0 | return false; |
666 | 0 | int64_t Val = MCE->getValue(); |
667 | 0 | if (Val >> 32 != 0 && Val >> 32 != ~0LL) |
668 | 0 | return false; |
669 | 0 | Val &= 0xFFFFFFFF; |
670 | 0 | return AArch64_AM::isLogicalImmediate(Val, 32); |
671 | 0 | } |
672 | 0 | bool isLogicalImm64() const { |
673 | 0 | if (!isImm()) |
674 | 0 | return false; |
675 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
676 | 0 | if (!MCE) |
677 | 0 | return false; |
678 | 0 | return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64); |
679 | 0 | } |
680 | 0 | bool isLogicalImm32Not() const { |
681 | 0 | if (!isImm()) |
682 | 0 | return false; |
683 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
684 | 0 | if (!MCE) |
685 | 0 | return false; |
686 | 0 | int64_t Val = ~MCE->getValue() & 0xFFFFFFFF; |
687 | 0 | return AArch64_AM::isLogicalImmediate(Val, 32); |
688 | 0 | } |
689 | 0 | bool isLogicalImm64Not() const { |
690 | 0 | if (!isImm()) |
691 | 0 | return false; |
692 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
693 | 0 | if (!MCE) |
694 | 0 | return false; |
695 | 0 | return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64); |
696 | 0 | } |
697 | 0 | bool isShiftedImm() const { return Kind == k_ShiftedImm; } |
698 | 0 | bool isAddSubImm() const { |
699 | 0 | if (!isShiftedImm() && !isImm()) |
700 | 0 | return false; |
701 | | |
702 | 0 | const MCExpr *Expr; |
703 | | |
704 | | // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. |
705 | 0 | if (isShiftedImm()) { |
706 | 0 | unsigned Shift = ShiftedImm.ShiftAmount; |
707 | 0 | Expr = ShiftedImm.Val; |
708 | 0 | if (Shift != 0 && Shift != 12) |
709 | 0 | return false; |
710 | 0 | } else { |
711 | 0 | Expr = getImm(); |
712 | 0 | } |
713 | | |
714 | 0 | AArch64MCExpr::VariantKind ELFRefKind; |
715 | 0 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
716 | 0 | int64_t Addend; |
717 | 0 | if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, |
718 | 0 | DarwinRefKind, Addend)) { |
719 | 0 | return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF |
720 | 0 | || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF |
721 | 0 | || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) |
722 | 0 | || ELFRefKind == AArch64MCExpr::VK_LO12 |
723 | 0 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 |
724 | 0 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 |
725 | 0 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC |
726 | 0 | || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 |
727 | 0 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 |
728 | 0 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC |
729 | 0 | || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12; |
730 | 0 | } |
731 | | |
732 | | // Otherwise it should be a real immediate in range: |
733 | 0 | const MCConstantExpr *CE = cast<MCConstantExpr>(Expr); |
734 | 0 | return CE->getValue() >= 0 && CE->getValue() <= 0xfff; |
735 | 0 | } |
736 | 0 | bool isAddSubImmNeg() const { |
737 | 0 | if (!isShiftedImm() && !isImm()) |
738 | 0 | return false; |
739 | | |
740 | 0 | const MCExpr *Expr; |
741 | | |
742 | | // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. |
743 | 0 | if (isShiftedImm()) { |
744 | 0 | unsigned Shift = ShiftedImm.ShiftAmount; |
745 | 0 | Expr = ShiftedImm.Val; |
746 | 0 | if (Shift != 0 && Shift != 12) |
747 | 0 | return false; |
748 | 0 | } else |
749 | 0 | Expr = getImm(); |
750 | | |
751 | | // Otherwise it should be a real negative immediate in range: |
752 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); |
753 | 0 | return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff; |
754 | 0 | } |
755 | 1.70k | bool isCondCode() const { return Kind == k_CondCode; } |
756 | 0 | bool isSIMDImmType10() const { |
757 | 0 | if (!isImm()) |
758 | 0 | return false; |
759 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
760 | 0 | if (!MCE) |
761 | 0 | return false; |
762 | 0 | return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue()); |
763 | 0 | } |
764 | 493 | bool isBranchTarget26() const { |
765 | 493 | if (!isImm()) |
766 | 0 | return false; |
767 | 493 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
768 | 493 | if (!MCE) |
769 | 493 | return true; |
770 | 0 | int64_t Val = MCE->getValue(); |
771 | 0 | return ((Val & 0x3) == 0); |
772 | 493 | } |
773 | 1.69k | bool isPCRelLabel19() const { |
774 | 1.69k | if (!isImm()) |
775 | 0 | return false; |
776 | 1.69k | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
777 | 1.69k | if (!MCE) |
778 | 1.68k | return true; |
779 | 14 | int64_t Val = MCE->getValue(); |
780 | 14 | return ((Val & 0x3) == 0); |
781 | 1.69k | } |
782 | 0 | bool isBranchTarget14() const { |
783 | 0 | if (!isImm()) |
784 | 0 | return false; |
785 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
786 | 0 | if (!MCE) |
787 | 0 | return true; |
788 | 0 | int64_t Val = MCE->getValue(); |
789 | 0 | return ((Val & 0x3) == 0); |
790 | 0 | } |
791 | | |
792 | | bool |
793 | 0 | isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const { |
794 | 0 | if (!isImm()) |
795 | 0 | return false; |
796 | | |
797 | 0 | AArch64MCExpr::VariantKind ELFRefKind; |
798 | 0 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
799 | 0 | int64_t Addend; |
800 | 0 | if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind, |
801 | 0 | DarwinRefKind, Addend)) { |
802 | 0 | return false; |
803 | 0 | } |
804 | 0 | if (DarwinRefKind != MCSymbolRefExpr::VK_None) |
805 | 0 | return false; |
806 | | |
807 | 0 | for (unsigned i = 0; i != AllowedModifiers.size(); ++i) { |
808 | 0 | if (ELFRefKind == AllowedModifiers[i]) |
809 | 0 | return Addend == 0; |
810 | 0 | } |
811 | | |
812 | 0 | return false; |
813 | 0 | } |
814 | | |
815 | 0 | bool isMovZSymbolG3() const { |
816 | 0 | return isMovWSymbol(AArch64MCExpr::VK_ABS_G3); |
817 | 0 | } |
818 | | |
819 | 0 | bool isMovZSymbolG2() const { |
820 | 0 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S, |
821 | 0 | AArch64MCExpr::VK_TPREL_G2, |
822 | 0 | AArch64MCExpr::VK_DTPREL_G2}); |
823 | 0 | } |
824 | | |
825 | 0 | bool isMovZSymbolG1() const { |
826 | 0 | return isMovWSymbol({ |
827 | 0 | AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S, |
828 | 0 | AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1, |
829 | 0 | AArch64MCExpr::VK_DTPREL_G1, |
830 | 0 | }); |
831 | 0 | } |
832 | | |
833 | 0 | bool isMovZSymbolG0() const { |
834 | 0 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S, |
835 | 0 | AArch64MCExpr::VK_TPREL_G0, |
836 | 0 | AArch64MCExpr::VK_DTPREL_G0}); |
837 | 0 | } |
838 | | |
839 | 0 | bool isMovKSymbolG3() const { |
840 | 0 | return isMovWSymbol(AArch64MCExpr::VK_ABS_G3); |
841 | 0 | } |
842 | | |
843 | 0 | bool isMovKSymbolG2() const { |
844 | 0 | return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC); |
845 | 0 | } |
846 | | |
847 | 0 | bool isMovKSymbolG1() const { |
848 | 0 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC, |
849 | 0 | AArch64MCExpr::VK_TPREL_G1_NC, |
850 | 0 | AArch64MCExpr::VK_DTPREL_G1_NC}); |
851 | 0 | } |
852 | | |
853 | 0 | bool isMovKSymbolG0() const { |
854 | 0 | return isMovWSymbol( |
855 | 0 | {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC, |
856 | 0 | AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC}); |
857 | 0 | } |
858 | | |
859 | | template<int RegWidth, int Shift> |
860 | 0 | bool isMOVZMovAlias() const { |
861 | 0 | if (!isImm()) return false; |
862 | | |
863 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
864 | 0 | if (!CE) return false; |
865 | 0 | uint64_t Value = CE->getValue(); |
866 | |
|
867 | 0 | if (RegWidth == 32) |
868 | 0 | Value &= 0xffffffffULL; |
869 | | |
870 | | // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0". |
871 | 0 | if (Value == 0 && Shift != 0) |
872 | 0 | return false; |
873 | | |
874 | 0 | return (Value & ~(0xffffULL << Shift)) == 0; |
875 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<32, 0>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<32, 16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 0>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 32>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 48>() const |
876 | | |
877 | | template<int RegWidth, int Shift> |
878 | 0 | bool isMOVNMovAlias() const { |
879 | 0 | if (!isImm()) return false; |
880 | | |
881 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
882 | 0 | if (!CE) return false; |
883 | 0 | uint64_t Value = CE->getValue(); |
884 | | |
885 | | // MOVZ takes precedence over MOVN. |
886 | 0 | for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16) |
887 | 0 | if ((Value & ~(0xffffULL << MOVZShift)) == 0) |
888 | 0 | return false; |
889 | | |
890 | 0 | Value = ~Value; |
891 | 0 | if (RegWidth == 32) |
892 | 0 | Value &= 0xffffffffULL; |
893 | |
|
894 | 0 | return (Value & ~(0xffffULL << Shift)) == 0; |
895 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<32, 0>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<32, 16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 0>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 32>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 48>() const |
896 | | |
897 | 96 | bool isFPImm() const { return Kind == k_FPImm; } |
898 | 0 | bool isBarrier() const { return Kind == k_Barrier; } |
899 | 8 | bool isSysReg() const { return Kind == k_SysReg; } |
900 | 0 | bool isMRSSystemRegister() const { |
901 | 0 | if (!isSysReg()) return false; |
902 | | |
903 | 0 | return SysReg.MRSReg != -1U; |
904 | 0 | } |
905 | 2 | bool isMSRSystemRegister() const { |
906 | 2 | if (!isSysReg()) return false; |
907 | 2 | return SysReg.MSRReg != -1U; |
908 | 2 | } |
909 | 4 | bool isSystemPStateFieldWithImm0_1() const { |
910 | 4 | if (!isSysReg()) return false; |
911 | 4 | return (SysReg.PStateField == AArch64PState::PAN || |
912 | 4 | SysReg.PStateField == AArch64PState::UAO); |
913 | 4 | } |
914 | 2 | bool isSystemPStateFieldWithImm0_15() const { |
915 | 2 | if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false; |
916 | 2 | return SysReg.PStateField != -1U; |
917 | 2 | } |
918 | 1.43k | bool isReg() const override { return Kind == k_Register && !Reg.isVector; } |
919 | 0 | bool isVectorReg() const { return Kind == k_Register && Reg.isVector; } |
920 | 0 | bool isVectorRegLo() const { |
921 | 0 | return Kind == k_Register && Reg.isVector && |
922 | 0 | AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains( |
923 | 0 | Reg.RegNum); |
924 | 0 | } |
925 | 0 | bool isGPR32as64() const { |
926 | 0 | return Kind == k_Register && !Reg.isVector && |
927 | 0 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); |
928 | 0 | } |
929 | 0 | bool isWSeqPair() const { |
930 | 0 | return Kind == k_Register && !Reg.isVector && |
931 | 0 | AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( |
932 | 0 | Reg.RegNum); |
933 | 0 | } |
934 | 0 | bool isXSeqPair() const { |
935 | 0 | return Kind == k_Register && !Reg.isVector && |
936 | 0 | AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains( |
937 | 0 | Reg.RegNum); |
938 | 0 | } |
939 | | |
940 | 0 | bool isGPR64sp0() const { |
941 | 0 | return Kind == k_Register && !Reg.isVector && |
942 | 0 | AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum); |
943 | 0 | } |
944 | | |
945 | | /// Is this a vector list with the type implicit (presumably attached to the |
946 | | /// instruction itself)? |
947 | 0 | template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const { |
948 | 0 | return Kind == k_VectorList && VectorList.Count == NumRegs && |
949 | 0 | !VectorList.ElementKind; |
950 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<4u>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<1u>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<3u>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<2u>() const |
951 | | |
952 | | template <unsigned NumRegs, unsigned NumElements, char ElementKind> |
953 | 0 | bool isTypedVectorList() const { |
954 | 0 | if (Kind != k_VectorList) |
955 | 0 | return false; |
956 | 0 | if (VectorList.Count != NumRegs) |
957 | 0 | return false; |
958 | 0 | if (VectorList.ElementKind != ElementKind) |
959 | 0 | return false; |
960 | 0 | return VectorList.NumElements == NumElements; |
961 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 16u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 1u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 2u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 2u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 4u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 4u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 8u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 8u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 16u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 1u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 2u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 2u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 4u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 4u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 8u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 8u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 16u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 1u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 2u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 2u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 4u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 4u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 8u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 8u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 16u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 1u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 2u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 2u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 4u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 4u, (char)115>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 8u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 8u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)98>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)100>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)104>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)115>() const |
962 | | |
963 | 0 | bool isVectorIndex1() const { |
964 | 0 | return Kind == k_VectorIndex && VectorIndex.Val == 1; |
965 | 0 | } |
966 | 0 | bool isVectorIndexB() const { |
967 | 0 | return Kind == k_VectorIndex && VectorIndex.Val < 16; |
968 | 0 | } |
969 | 0 | bool isVectorIndexH() const { |
970 | 0 | return Kind == k_VectorIndex && VectorIndex.Val < 8; |
971 | 0 | } |
972 | 0 | bool isVectorIndexS() const { |
973 | 0 | return Kind == k_VectorIndex && VectorIndex.Val < 4; |
974 | 0 | } |
975 | 0 | bool isVectorIndexD() const { |
976 | 0 | return Kind == k_VectorIndex && VectorIndex.Val < 2; |
977 | 0 | } |
978 | 11.5k | bool isToken() const override { return Kind == k_Token; } |
979 | 0 | bool isTokenEqual(StringRef Str) const { |
980 | 0 | return Kind == k_Token && getToken() == Str; |
981 | 0 | } |
982 | 0 | bool isSysCR() const { return Kind == k_SysCR; } |
983 | 0 | bool isPrefetch() const { return Kind == k_Prefetch; } |
984 | 0 | bool isPSBHint() const { return Kind == k_PSBHint; } |
985 | 0 | bool isShiftExtend() const { return Kind == k_ShiftExtend; } |
986 | 0 | bool isShifter() const { |
987 | 0 | if (!isShiftExtend()) |
988 | 0 | return false; |
989 | | |
990 | 0 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
991 | 0 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
992 | 0 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || |
993 | 0 | ST == AArch64_AM::MSL); |
994 | 0 | } |
995 | 0 | bool isExtend() const { |
996 | 0 | if (!isShiftExtend()) |
997 | 0 | return false; |
998 | | |
999 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1000 | 0 | return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || |
1001 | 0 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || |
1002 | 0 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || |
1003 | 0 | ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || |
1004 | 0 | ET == AArch64_AM::LSL) && |
1005 | 0 | getShiftExtendAmount() <= 4; |
1006 | 0 | } |
1007 | | |
1008 | 0 | bool isExtend64() const { |
1009 | 0 | if (!isExtend()) |
1010 | 0 | return false; |
1011 | | // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class). |
1012 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1013 | 0 | return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX; |
1014 | 0 | } |
1015 | 0 | bool isExtendLSL64() const { |
1016 | 0 | if (!isExtend()) |
1017 | 0 | return false; |
1018 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1019 | 0 | return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || |
1020 | 0 | ET == AArch64_AM::LSL) && |
1021 | 0 | getShiftExtendAmount() <= 4; |
1022 | 0 | } |
1023 | | |
1024 | 0 | template<int Width> bool isMemXExtend() const { |
1025 | 0 | if (!isExtend()) |
1026 | 0 | return false; |
1027 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1028 | 0 | return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && |
1029 | 0 | (getShiftExtendAmount() == Log2_32(Width / 8) || |
1030 | 0 | getShiftExtendAmount() == 0); |
1031 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<128>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<32>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<64>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<8>() const |
1032 | | |
1033 | 0 | template<int Width> bool isMemWExtend() const { |
1034 | 0 | if (!isExtend()) |
1035 | 0 | return false; |
1036 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1037 | 0 | return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && |
1038 | 0 | (getShiftExtendAmount() == Log2_32(Width / 8) || |
1039 | 0 | getShiftExtendAmount() == 0); |
1040 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<128>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<32>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<64>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<8>() const |
1041 | | |
1042 | | template <unsigned width> |
1043 | 0 | bool isArithmeticShifter() const { |
1044 | 0 | if (!isShifter()) |
1045 | 0 | return false; |
1046 | | |
1047 | | // An arithmetic shifter is LSL, LSR, or ASR. |
1048 | 0 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1049 | 0 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1050 | 0 | ST == AArch64_AM::ASR) && getShiftExtendAmount() < width; |
1051 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isArithmeticShifter<32u>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isArithmeticShifter<64u>() const |
1052 | | |
1053 | | template <unsigned width> |
1054 | 0 | bool isLogicalShifter() const { |
1055 | 0 | if (!isShifter()) |
1056 | 0 | return false; |
1057 | | |
1058 | | // A logical shifter is LSL, LSR, ASR or ROR. |
1059 | 0 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1060 | 0 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1061 | 0 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) && |
1062 | 0 | getShiftExtendAmount() < width; |
1063 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isLogicalShifter<32u>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isLogicalShifter<64u>() const |
1064 | | |
1065 | 0 | bool isMovImm32Shifter() const { |
1066 | 0 | if (!isShifter()) |
1067 | 0 | return false; |
1068 | | |
1069 | | // A MOVi shifter is LSL of 0, 16, 32, or 48. |
1070 | 0 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1071 | 0 | if (ST != AArch64_AM::LSL) |
1072 | 0 | return false; |
1073 | 0 | uint64_t Val = getShiftExtendAmount(); |
1074 | 0 | return (Val == 0 || Val == 16); |
1075 | 0 | } |
1076 | | |
1077 | 0 | bool isMovImm64Shifter() const { |
1078 | 0 | if (!isShifter()) |
1079 | 0 | return false; |
1080 | | |
1081 | | // A MOVi shifter is LSL of 0 or 16. |
1082 | 0 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1083 | 0 | if (ST != AArch64_AM::LSL) |
1084 | 0 | return false; |
1085 | 0 | uint64_t Val = getShiftExtendAmount(); |
1086 | 0 | return (Val == 0 || Val == 16 || Val == 32 || Val == 48); |
1087 | 0 | } |
1088 | | |
1089 | 0 | bool isLogicalVecShifter() const { |
1090 | 0 | if (!isShifter()) |
1091 | 0 | return false; |
1092 | | |
1093 | | // A logical vector shifter is a left shift by 0, 8, 16, or 24. |
1094 | 0 | unsigned Shift = getShiftExtendAmount(); |
1095 | 0 | return getShiftExtendType() == AArch64_AM::LSL && |
1096 | 0 | (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); |
1097 | 0 | } |
1098 | | |
1099 | 0 | bool isLogicalVecHalfWordShifter() const { |
1100 | 0 | if (!isLogicalVecShifter()) |
1101 | 0 | return false; |
1102 | | |
1103 | | // A logical vector shifter is a left shift by 0 or 8. |
1104 | 0 | unsigned Shift = getShiftExtendAmount(); |
1105 | 0 | return getShiftExtendType() == AArch64_AM::LSL && |
1106 | 0 | (Shift == 0 || Shift == 8); |
1107 | 0 | } |
1108 | | |
1109 | 0 | bool isMoveVecShifter() const { |
1110 | 0 | if (!isShiftExtend()) |
1111 | 0 | return false; |
1112 | | |
1113 | | // A logical vector shifter is a left shift by 8 or 16. |
1114 | 0 | unsigned Shift = getShiftExtendAmount(); |
1115 | 0 | return getShiftExtendType() == AArch64_AM::MSL && |
1116 | 0 | (Shift == 8 || Shift == 16); |
1117 | 0 | } |
1118 | | |
1119 | | // Fallback unscaled operands are for aliases of LDR/STR that fall back |
1120 | | // to LDUR/STUR when the offset is not legal for the former but is for |
1121 | | // the latter. As such, in addition to checking for being a legal unscaled |
1122 | | // address, also check that it is not a legal scaled address. This avoids |
1123 | | // ambiguity in the matcher. |
1124 | | template<int Width> |
1125 | 0 | bool isSImm9OffsetFB() const { |
1126 | 0 | return isSImm9() && !isUImm12Offset<Width / 8>(); |
1127 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<128>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<16>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<32>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<64>() const Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<8>() const |
1128 | | |
1129 | 0 | bool isAdrpLabel() const { |
1130 | | // Validation was handled during parsing, so we just sanity check that |
1131 | | // something didn't go haywire. |
1132 | 0 | if (!isImm()) |
1133 | 0 | return false; |
1134 | | |
1135 | 0 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
1136 | 0 | int64_t Val = CE->getValue(); |
1137 | 0 | int64_t Offset = Val - Ctx.getBaseAddress(); |
1138 | 0 | int64_t Min = - (4096 * (1LL << (21 - 1))); |
1139 | 0 | int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); |
1140 | 0 | return (Val % 4096) == 0 && Offset >= Min && Offset <= Max; |
1141 | 0 | } |
1142 | | |
1143 | 0 | return true; |
1144 | 0 | } |
1145 | | |
1146 | 0 | bool isAdrLabel() const { |
1147 | | // Validation was handled during parsing, so we just sanity check that |
1148 | | // something didn't go haywire. |
1149 | 0 | if (!isImm()) |
1150 | 0 | return false; |
1151 | | |
1152 | 0 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
1153 | 0 | int64_t Val = CE->getValue(); |
1154 | 0 | int64_t Min = - (1LL << (21 - 1)); |
1155 | 0 | int64_t Max = ((1LL << (21 - 1)) - 1); |
1156 | 0 | return Val >= Min && Val <= Max; |
1157 | 0 | } |
1158 | | |
1159 | 0 | return true; |
1160 | 0 | } |
1161 | | |
1162 | 2.17k | void addExpr(MCInst &Inst, const MCExpr *Expr) const { |
1163 | | // Add as immediates when possible. Null MCExpr = 0. |
1164 | 2.17k | if (!Expr) |
1165 | 0 | Inst.addOperand(MCOperand::createImm(0)); |
1166 | 2.17k | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) |
1167 | 0 | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
1168 | 2.17k | else |
1169 | 2.17k | Inst.addOperand(MCOperand::createExpr(Expr)); |
1170 | 2.17k | } |
1171 | | |
1172 | 192 | void addRegOperands(MCInst &Inst, unsigned N) const { |
1173 | 192 | assert(N == 1 && "Invalid number of operands!"); |
1174 | 192 | Inst.addOperand(MCOperand::createReg(getReg())); |
1175 | 192 | } |
1176 | | |
1177 | 0 | void addGPR32as64Operands(MCInst &Inst, unsigned N) const { |
1178 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1179 | 0 | assert( |
1180 | 0 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())); |
1181 | | |
1182 | 0 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); |
1183 | 0 | uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister( |
1184 | 0 | RI->getEncodingValue(getReg())); |
1185 | |
|
1186 | 0 | Inst.addOperand(MCOperand::createReg(Reg)); |
1187 | 0 | } |
1188 | | |
1189 | 0 | void addVectorReg64Operands(MCInst &Inst, unsigned N) const { |
1190 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1191 | 0 | assert( |
1192 | 0 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())); |
1193 | 0 | Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0)); |
1194 | 0 | } |
1195 | | |
1196 | 0 | void addVectorReg128Operands(MCInst &Inst, unsigned N) const { |
1197 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1198 | 0 | assert( |
1199 | 0 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())); |
1200 | 0 | Inst.addOperand(MCOperand::createReg(getReg())); |
1201 | 0 | } |
1202 | | |
1203 | 0 | void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { |
1204 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1205 | 0 | Inst.addOperand(MCOperand::createReg(getReg())); |
1206 | 0 | } |
1207 | | |
1208 | | template <unsigned NumRegs> |
1209 | 0 | void addVectorList64Operands(MCInst &Inst, unsigned N) const { |
1210 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1211 | 0 | static const unsigned FirstRegs[] = { AArch64::D0, |
1212 | 0 | AArch64::D0_D1, |
1213 | 0 | AArch64::D0_D1_D2, |
1214 | 0 | AArch64::D0_D1_D2_D3 }; |
1215 | 0 | unsigned FirstReg = FirstRegs[NumRegs - 1]; |
1216 | |
|
1217 | 0 | Inst.addOperand( |
1218 | 0 | MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0)); |
1219 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<4u>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<1u>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<3u>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<2u>(llvm_ks::MCInst&, unsigned int) const |
1220 | | |
1221 | | template <unsigned NumRegs> |
1222 | 0 | void addVectorList128Operands(MCInst &Inst, unsigned N) const { |
1223 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1224 | 0 | static const unsigned FirstRegs[] = { AArch64::Q0, |
1225 | 0 | AArch64::Q0_Q1, |
1226 | 0 | AArch64::Q0_Q1_Q2, |
1227 | 0 | AArch64::Q0_Q1_Q2_Q3 }; |
1228 | 0 | unsigned FirstReg = FirstRegs[NumRegs - 1]; |
1229 | |
|
1230 | 0 | Inst.addOperand( |
1231 | 0 | MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0)); |
1232 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<4u>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<1u>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<3u>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<2u>(llvm_ks::MCInst&, unsigned int) const |
1233 | | |
1234 | 0 | void addVectorIndex1Operands(MCInst &Inst, unsigned N) const { |
1235 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1236 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
1237 | 0 | } |
1238 | | |
1239 | 0 | void addVectorIndexBOperands(MCInst &Inst, unsigned N) const { |
1240 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1241 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
1242 | 0 | } |
1243 | | |
1244 | 0 | void addVectorIndexHOperands(MCInst &Inst, unsigned N) const { |
1245 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1246 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
1247 | 0 | } |
1248 | | |
1249 | 0 | void addVectorIndexSOperands(MCInst &Inst, unsigned N) const { |
1250 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1251 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
1252 | 0 | } |
1253 | | |
1254 | 0 | void addVectorIndexDOperands(MCInst &Inst, unsigned N) const { |
1255 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1256 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
1257 | 0 | } |
1258 | | |
1259 | 0 | void addImmOperands(MCInst &Inst, unsigned N) const { |
1260 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1261 | | // If this is a pageoff symrefexpr with an addend, adjust the addend |
1262 | | // to be only the page-offset portion. Otherwise, just add the expr |
1263 | | // as-is. |
1264 | 0 | addExpr(Inst, getImm()); |
1265 | 0 | } |
1266 | | |
1267 | 0 | void addAddSubImmOperands(MCInst &Inst, unsigned N) const { |
1268 | 0 | assert(N == 2 && "Invalid number of operands!"); |
1269 | 0 | if (isShiftedImm()) { |
1270 | 0 | addExpr(Inst, getShiftedImmVal()); |
1271 | 0 | Inst.addOperand(MCOperand::createImm(getShiftedImmShift())); |
1272 | 0 | } else { |
1273 | 0 | addExpr(Inst, getImm()); |
1274 | 0 | Inst.addOperand(MCOperand::createImm(0)); |
1275 | 0 | } |
1276 | 0 | } |
1277 | | |
1278 | 0 | void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const { |
1279 | 0 | assert(N == 2 && "Invalid number of operands!"); |
1280 | | |
1281 | 0 | const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm(); |
1282 | 0 | const MCConstantExpr *CE = cast<MCConstantExpr>(MCE); |
1283 | 0 | int64_t Val = -CE->getValue(); |
1284 | 0 | unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0; |
1285 | |
|
1286 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
1287 | 0 | Inst.addOperand(MCOperand::createImm(ShiftAmt)); |
1288 | 0 | } |
1289 | | |
1290 | 1.69k | void addCondCodeOperands(MCInst &Inst, unsigned N) const { |
1291 | 1.69k | assert(N == 1 && "Invalid number of operands!"); |
1292 | 1.69k | Inst.addOperand(MCOperand::createImm(getCondCode())); |
1293 | 1.69k | } |
1294 | | |
1295 | 0 | void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { |
1296 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1297 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1298 | 0 | if (!MCE) |
1299 | 0 | addExpr(Inst, getImm()); |
1300 | 0 | else |
1301 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12)); |
1302 | 0 | } |
1303 | | |
1304 | 0 | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { |
1305 | 0 | addImmOperands(Inst, N); |
1306 | 0 | } |
1307 | | |
1308 | | template<int Scale> |
1309 | 0 | void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { |
1310 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1311 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1312 | |
|
1313 | 0 | if (!MCE) { |
1314 | 0 | Inst.addOperand(MCOperand::createExpr(getImm())); |
1315 | 0 | return; |
1316 | 0 | } |
1317 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); |
1318 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<16>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<2>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<4>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<8>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<1>(llvm_ks::MCInst&, unsigned int) const |
1319 | | |
1320 | 0 | void addSImm9Operands(MCInst &Inst, unsigned N) const { |
1321 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1322 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1323 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1324 | 0 | } |
1325 | | |
1326 | 0 | void addSImm7s4Operands(MCInst &Inst, unsigned N) const { |
1327 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1328 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1329 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4)); |
1330 | 0 | } |
1331 | | |
1332 | 0 | void addSImm7s8Operands(MCInst &Inst, unsigned N) const { |
1333 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1334 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1335 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8)); |
1336 | 0 | } |
1337 | | |
1338 | 0 | void addSImm7s16Operands(MCInst &Inst, unsigned N) const { |
1339 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1340 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1341 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16)); |
1342 | 0 | } |
1343 | | |
1344 | 0 | void addImm0_1Operands(MCInst &Inst, unsigned N) const { |
1345 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1346 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1347 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1348 | 0 | } |
1349 | | |
1350 | 0 | void addImm0_7Operands(MCInst &Inst, unsigned N) const { |
1351 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1352 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1353 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1354 | 0 | } |
1355 | | |
1356 | 0 | void addImm1_8Operands(MCInst &Inst, unsigned N) const { |
1357 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1358 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1359 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1360 | 0 | } |
1361 | | |
1362 | 0 | void addImm0_15Operands(MCInst &Inst, unsigned N) const { |
1363 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1364 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1365 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1366 | 0 | } |
1367 | | |
1368 | 0 | void addImm1_16Operands(MCInst &Inst, unsigned N) const { |
1369 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1370 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1371 | 0 | assert(MCE && "Invalid constant immediate operand!"); |
1372 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1373 | 0 | } |
1374 | | |
1375 | 0 | void addImm0_31Operands(MCInst &Inst, unsigned N) const { |
1376 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1377 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1378 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1379 | 0 | } |
1380 | | |
1381 | 0 | void addImm1_31Operands(MCInst &Inst, unsigned N) const { |
1382 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1383 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1384 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1385 | 0 | } |
1386 | | |
1387 | 0 | void addImm1_32Operands(MCInst &Inst, unsigned N) const { |
1388 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1389 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1390 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1391 | 0 | } |
1392 | | |
1393 | 0 | void addImm0_63Operands(MCInst &Inst, unsigned N) const { |
1394 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1395 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1396 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1397 | 0 | } |
1398 | | |
1399 | 0 | void addImm1_63Operands(MCInst &Inst, unsigned N) const { |
1400 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1401 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1402 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1403 | 0 | } |
1404 | | |
1405 | 0 | void addImm1_64Operands(MCInst &Inst, unsigned N) const { |
1406 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1407 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1408 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1409 | 0 | } |
1410 | | |
1411 | 0 | void addImm0_127Operands(MCInst &Inst, unsigned N) const { |
1412 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1413 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1414 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1415 | 0 | } |
1416 | | |
1417 | 0 | void addImm0_255Operands(MCInst &Inst, unsigned N) const { |
1418 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1419 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1420 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1421 | 0 | } |
1422 | | |
1423 | 0 | void addImm0_65535Operands(MCInst &Inst, unsigned N) const { |
1424 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1425 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1426 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1427 | 0 | } |
1428 | | |
1429 | 0 | void addImm32_63Operands(MCInst &Inst, unsigned N) const { |
1430 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1431 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1432 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1433 | 0 | } |
1434 | | |
1435 | 0 | void addLogicalImm32Operands(MCInst &Inst, unsigned N) const { |
1436 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1437 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1438 | 0 | uint64_t encoding = |
1439 | 0 | AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32); |
1440 | 0 | Inst.addOperand(MCOperand::createImm(encoding)); |
1441 | 0 | } |
1442 | | |
1443 | 0 | void addLogicalImm64Operands(MCInst &Inst, unsigned N) const { |
1444 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1445 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1446 | 0 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64); |
1447 | 0 | Inst.addOperand(MCOperand::createImm(encoding)); |
1448 | 0 | } |
1449 | | |
1450 | 0 | void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const { |
1451 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1452 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1453 | 0 | int64_t Val = ~MCE->getValue() & 0xFFFFFFFF; |
1454 | 0 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32); |
1455 | 0 | Inst.addOperand(MCOperand::createImm(encoding)); |
1456 | 0 | } |
1457 | | |
1458 | 0 | void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const { |
1459 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1460 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1461 | 0 | uint64_t encoding = |
1462 | 0 | AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64); |
1463 | 0 | Inst.addOperand(MCOperand::createImm(encoding)); |
1464 | 0 | } |
1465 | | |
1466 | 0 | void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { |
1467 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1468 | 0 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1469 | 0 | uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); |
1470 | 0 | Inst.addOperand(MCOperand::createImm(encoding)); |
1471 | 0 | } |
1472 | | |
1473 | 493 | void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { |
1474 | | // Branch operands don't encode the low bits, so shift them off |
1475 | | // here. If it's a label, however, just put it on directly as there's |
1476 | | // not enough information now to do anything. |
1477 | 493 | assert(N == 1 && "Invalid number of operands!"); |
1478 | 493 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1479 | 493 | if (!MCE) { |
1480 | 493 | addExpr(Inst, getImm()); |
1481 | 493 | return; |
1482 | 493 | } |
1483 | 0 | assert(MCE && "Invalid constant immediate operand!"); |
1484 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); |
1485 | 0 | } |
1486 | | |
1487 | 1.69k | void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { |
1488 | | // Branch operands don't encode the low bits, so shift them off |
1489 | | // here. If it's a label, however, just put it on directly as there's |
1490 | | // not enough information now to do anything. |
1491 | 1.69k | assert(N == 1 && "Invalid number of operands!"); |
1492 | 1.69k | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1493 | 1.69k | if (!MCE) { |
1494 | 1.68k | addExpr(Inst, getImm()); |
1495 | 1.68k | return; |
1496 | 1.68k | } |
1497 | 14 | assert(MCE && "Invalid constant immediate operand!"); |
1498 | 14 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); |
1499 | 14 | } |
1500 | | |
1501 | 0 | void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { |
1502 | | // Branch operands don't encode the low bits, so shift them off |
1503 | | // here. If it's a label, however, just put it on directly as there's |
1504 | | // not enough information now to do anything. |
1505 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1506 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1507 | 0 | if (!MCE) { |
1508 | 0 | addExpr(Inst, getImm()); |
1509 | 0 | return; |
1510 | 0 | } |
1511 | 0 | assert(MCE && "Invalid constant immediate operand!"); |
1512 | 0 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); |
1513 | 0 | } |
1514 | | |
1515 | 0 | void addFPImmOperands(MCInst &Inst, unsigned N) const { |
1516 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1517 | 0 | Inst.addOperand(MCOperand::createImm(getFPImm())); |
1518 | 0 | } |
1519 | | |
1520 | 0 | void addBarrierOperands(MCInst &Inst, unsigned N) const { |
1521 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1522 | 0 | Inst.addOperand(MCOperand::createImm(getBarrier())); |
1523 | 0 | } |
1524 | | |
1525 | 0 | void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
1526 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1527 | | |
1528 | 0 | Inst.addOperand(MCOperand::createImm(SysReg.MRSReg)); |
1529 | 0 | } |
1530 | | |
1531 | 0 | void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
1532 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1533 | | |
1534 | 0 | Inst.addOperand(MCOperand::createImm(SysReg.MSRReg)); |
1535 | 0 | } |
1536 | | |
1537 | 0 | void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const { |
1538 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1539 | | |
1540 | 0 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); |
1541 | 0 | } |
1542 | | |
1543 | 0 | void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const { |
1544 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1545 | | |
1546 | 0 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); |
1547 | 0 | } |
1548 | | |
1549 | 0 | void addSysCROperands(MCInst &Inst, unsigned N) const { |
1550 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1551 | 0 | Inst.addOperand(MCOperand::createImm(getSysCR())); |
1552 | 0 | } |
1553 | | |
1554 | 0 | void addPrefetchOperands(MCInst &Inst, unsigned N) const { |
1555 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1556 | 0 | Inst.addOperand(MCOperand::createImm(getPrefetch())); |
1557 | 0 | } |
1558 | | |
1559 | 0 | void addPSBHintOperands(MCInst &Inst, unsigned N) const { |
1560 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1561 | 0 | Inst.addOperand(MCOperand::createImm(getPSBHint())); |
1562 | 0 | } |
1563 | | |
1564 | 0 | void addShifterOperands(MCInst &Inst, unsigned N) const { |
1565 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1566 | 0 | unsigned Imm = |
1567 | 0 | AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount()); |
1568 | 0 | Inst.addOperand(MCOperand::createImm(Imm)); |
1569 | 0 | } |
1570 | | |
1571 | 0 | void addExtendOperands(MCInst &Inst, unsigned N) const { |
1572 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1573 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1574 | 0 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; |
1575 | 0 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); |
1576 | 0 | Inst.addOperand(MCOperand::createImm(Imm)); |
1577 | 0 | } |
1578 | | |
1579 | 0 | void addExtend64Operands(MCInst &Inst, unsigned N) const { |
1580 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1581 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1582 | 0 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX; |
1583 | 0 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); |
1584 | 0 | Inst.addOperand(MCOperand::createImm(Imm)); |
1585 | 0 | } |
1586 | | |
1587 | 0 | void addMemExtendOperands(MCInst &Inst, unsigned N) const { |
1588 | 0 | assert(N == 2 && "Invalid number of operands!"); |
1589 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1590 | 0 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; |
1591 | 0 | Inst.addOperand(MCOperand::createImm(IsSigned)); |
1592 | 0 | Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0)); |
1593 | 0 | } |
1594 | | |
1595 | | // For 8-bit load/store instructions with a register offset, both the |
1596 | | // "DoShift" and "NoShift" variants have a shift of 0. Because of this, |
1597 | | // they're disambiguated by whether the shift was explicit or implicit rather |
1598 | | // than its size. |
1599 | 0 | void addMemExtend8Operands(MCInst &Inst, unsigned N) const { |
1600 | 0 | assert(N == 2 && "Invalid number of operands!"); |
1601 | 0 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1602 | 0 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; |
1603 | 0 | Inst.addOperand(MCOperand::createImm(IsSigned)); |
1604 | 0 | Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount())); |
1605 | 0 | } |
1606 | | |
1607 | | template<int Shift> |
1608 | 0 | void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { |
1609 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1610 | | |
1611 | 0 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); |
1612 | 0 | uint64_t Value = CE->getValue(); |
1613 | 0 | Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); |
1614 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<0>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<16>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<32>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<48>(llvm_ks::MCInst&, unsigned int) const |
1615 | | |
1616 | | template<int Shift> |
1617 | 0 | void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { |
1618 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1619 | | |
1620 | 0 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); |
1621 | 0 | uint64_t Value = CE->getValue(); |
1622 | 0 | Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff)); |
1623 | 0 | } Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<0>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<16>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<32>(llvm_ks::MCInst&, unsigned int) const Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<48>(llvm_ks::MCInst&, unsigned int) const |
1624 | | |
1625 | | void print(raw_ostream &OS) const override; |
1626 | | |
1627 | | static std::unique_ptr<AArch64Operand> |
1628 | 68.1k | CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) { |
1629 | 68.1k | auto Op = make_unique<AArch64Operand>(k_Token, Ctx); |
1630 | 68.1k | Op->Tok.Data = Str.data(); |
1631 | 68.1k | Op->Tok.Length = Str.size(); |
1632 | 68.1k | Op->Tok.IsSuffix = IsSuffix; |
1633 | 68.1k | Op->StartLoc = S; |
1634 | 68.1k | Op->EndLoc = S; |
1635 | 68.1k | return Op; |
1636 | 68.1k | } |
1637 | | |
1638 | | static std::unique_ptr<AArch64Operand> |
1639 | 4.48k | CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) { |
1640 | 4.48k | auto Op = make_unique<AArch64Operand>(k_Register, Ctx); |
1641 | 4.48k | Op->Reg.RegNum = RegNum; |
1642 | 4.48k | Op->Reg.isVector = isVector; |
1643 | 4.48k | Op->StartLoc = S; |
1644 | 4.48k | Op->EndLoc = E; |
1645 | 4.48k | return Op; |
1646 | 4.48k | } |
1647 | | |
1648 | | static std::unique_ptr<AArch64Operand> |
1649 | | CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements, |
1650 | 7 | char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) { |
1651 | 7 | auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx); |
1652 | 7 | Op->VectorList.RegNum = RegNum; |
1653 | 7 | Op->VectorList.Count = Count; |
1654 | 7 | Op->VectorList.NumElements = NumElements; |
1655 | 7 | Op->VectorList.ElementKind = ElementKind; |
1656 | 7 | Op->StartLoc = S; |
1657 | 7 | Op->EndLoc = E; |
1658 | 7 | return Op; |
1659 | 7 | } |
1660 | | |
1661 | | static std::unique_ptr<AArch64Operand> |
1662 | 0 | CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { |
1663 | 0 | auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx); |
1664 | 0 | Op->VectorIndex.Val = Idx; |
1665 | 0 | Op->StartLoc = S; |
1666 | 0 | Op->EndLoc = E; |
1667 | 0 | return Op; |
1668 | 0 | } |
1669 | | |
1670 | | static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S, |
1671 | 20.0k | SMLoc E, MCContext &Ctx) { |
1672 | 20.0k | auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx); |
1673 | 20.0k | Op->Imm.Val = Val; |
1674 | 20.0k | Op->StartLoc = S; |
1675 | 20.0k | Op->EndLoc = E; |
1676 | 20.0k | return Op; |
1677 | 20.0k | } |
1678 | | |
1679 | | static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val, |
1680 | | unsigned ShiftAmount, |
1681 | | SMLoc S, SMLoc E, |
1682 | 0 | MCContext &Ctx) { |
1683 | 0 | auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx); |
1684 | 0 | Op->ShiftedImm .Val = Val; |
1685 | 0 | Op->ShiftedImm.ShiftAmount = ShiftAmount; |
1686 | 0 | Op->StartLoc = S; |
1687 | 0 | Op->EndLoc = E; |
1688 | 0 | return Op; |
1689 | 0 | } |
1690 | | |
1691 | | static std::unique_ptr<AArch64Operand> |
1692 | 3.43k | CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) { |
1693 | 3.43k | auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx); |
1694 | 3.43k | Op->CondCode.Code = Code; |
1695 | 3.43k | Op->StartLoc = S; |
1696 | 3.43k | Op->EndLoc = E; |
1697 | 3.43k | return Op; |
1698 | 3.43k | } |
1699 | | |
1700 | | static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S, |
1701 | 96 | MCContext &Ctx) { |
1702 | 96 | auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx); |
1703 | 96 | Op->FPImm.Val = Val; |
1704 | 96 | Op->StartLoc = S; |
1705 | 96 | Op->EndLoc = S; |
1706 | 96 | return Op; |
1707 | 96 | } |
1708 | | |
1709 | | static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, |
1710 | | StringRef Str, |
1711 | | SMLoc S, |
1712 | 0 | MCContext &Ctx) { |
1713 | 0 | auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx); |
1714 | 0 | Op->Barrier.Val = Val; |
1715 | 0 | Op->Barrier.Data = Str.data(); |
1716 | 0 | Op->Barrier.Length = Str.size(); |
1717 | 0 | Op->StartLoc = S; |
1718 | 0 | Op->EndLoc = S; |
1719 | 0 | return Op; |
1720 | 0 | } |
1721 | | |
1722 | | static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S, |
1723 | | uint32_t MRSReg, |
1724 | | uint32_t MSRReg, |
1725 | | uint32_t PStateField, |
1726 | 238 | MCContext &Ctx) { |
1727 | 238 | auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx); |
1728 | 238 | Op->SysReg.Data = Str.data(); |
1729 | 238 | Op->SysReg.Length = Str.size(); |
1730 | 238 | Op->SysReg.MRSReg = MRSReg; |
1731 | 238 | Op->SysReg.MSRReg = MSRReg; |
1732 | 238 | Op->SysReg.PStateField = PStateField; |
1733 | 238 | Op->StartLoc = S; |
1734 | 238 | Op->EndLoc = S; |
1735 | 238 | return Op; |
1736 | 238 | } |
1737 | | |
1738 | | static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S, |
1739 | 0 | SMLoc E, MCContext &Ctx) { |
1740 | 0 | auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx); |
1741 | 0 | Op->SysCRImm.Val = Val; |
1742 | 0 | Op->StartLoc = S; |
1743 | 0 | Op->EndLoc = E; |
1744 | 0 | return Op; |
1745 | 0 | } |
1746 | | |
1747 | | static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, |
1748 | | StringRef Str, |
1749 | | SMLoc S, |
1750 | 0 | MCContext &Ctx) { |
1751 | 0 | auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx); |
1752 | 0 | Op->Prefetch.Val = Val; |
1753 | 0 | Op->Barrier.Data = Str.data(); |
1754 | 0 | Op->Barrier.Length = Str.size(); |
1755 | 0 | Op->StartLoc = S; |
1756 | 0 | Op->EndLoc = S; |
1757 | 0 | return Op; |
1758 | 0 | } |
1759 | | |
1760 | | static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val, |
1761 | | StringRef Str, |
1762 | | SMLoc S, |
1763 | 0 | MCContext &Ctx) { |
1764 | 0 | auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx); |
1765 | 0 | Op->PSBHint.Val = Val; |
1766 | 0 | Op->PSBHint.Data = Str.data(); |
1767 | 0 | Op->PSBHint.Length = Str.size(); |
1768 | 0 | Op->StartLoc = S; |
1769 | 0 | Op->EndLoc = S; |
1770 | 0 | return Op; |
1771 | 0 | } |
1772 | | |
1773 | | static std::unique_ptr<AArch64Operand> |
1774 | | CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val, |
1775 | 70 | bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) { |
1776 | 70 | auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx); |
1777 | 70 | Op->ShiftExtend.Type = ShOp; |
1778 | 70 | Op->ShiftExtend.Amount = Val; |
1779 | 70 | Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount; |
1780 | 70 | Op->StartLoc = S; |
1781 | 70 | Op->EndLoc = E; |
1782 | 70 | return Op; |
1783 | 70 | } |
1784 | | }; |
1785 | | |
1786 | | } // end anonymous namespace. |
1787 | | |
1788 | 0 | void AArch64Operand::print(raw_ostream &OS) const { |
1789 | 0 | switch (Kind) { |
1790 | 0 | case k_FPImm: |
1791 | 0 | OS << "<fpimm " << getFPImm() << "(" |
1792 | 0 | << AArch64_AM::getFPImmFloat(getFPImm()) << ") >"; |
1793 | 0 | break; |
1794 | 0 | case k_Barrier: { |
1795 | 0 | StringRef Name = getBarrierName(); |
1796 | 0 | if (!Name.empty()) |
1797 | 0 | OS << "<barrier " << Name << ">"; |
1798 | 0 | else |
1799 | 0 | OS << "<barrier invalid #" << getBarrier() << ">"; |
1800 | 0 | break; |
1801 | 0 | } |
1802 | 0 | case k_Immediate: |
1803 | 0 | OS << *getImm(); |
1804 | 0 | break; |
1805 | 0 | case k_ShiftedImm: { |
1806 | 0 | unsigned Shift = getShiftedImmShift(); |
1807 | 0 | OS << "<shiftedimm "; |
1808 | 0 | OS << *getShiftedImmVal(); |
1809 | 0 | OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">"; |
1810 | 0 | break; |
1811 | 0 | } |
1812 | 0 | case k_CondCode: |
1813 | 0 | OS << "<condcode " << getCondCode() << ">"; |
1814 | 0 | break; |
1815 | 0 | case k_Register: |
1816 | 0 | OS << "<register " << getReg() << ">"; |
1817 | 0 | break; |
1818 | 0 | case k_VectorList: { |
1819 | 0 | OS << "<vectorlist "; |
1820 | 0 | unsigned Reg = getVectorListStart(); |
1821 | 0 | for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) |
1822 | 0 | OS << Reg + i << " "; |
1823 | 0 | OS << ">"; |
1824 | 0 | break; |
1825 | 0 | } |
1826 | 0 | case k_VectorIndex: |
1827 | 0 | OS << "<vectorindex " << getVectorIndex() << ">"; |
1828 | 0 | break; |
1829 | 0 | case k_SysReg: |
1830 | 0 | OS << "<sysreg: " << getSysReg() << '>'; |
1831 | 0 | break; |
1832 | 0 | case k_Token: |
1833 | 0 | OS << "'" << getToken() << "'"; |
1834 | 0 | break; |
1835 | 0 | case k_SysCR: |
1836 | 0 | OS << "c" << getSysCR(); |
1837 | 0 | break; |
1838 | 0 | case k_Prefetch: { |
1839 | 0 | StringRef Name = getPrefetchName(); |
1840 | 0 | if (!Name.empty()) |
1841 | 0 | OS << "<prfop " << Name << ">"; |
1842 | 0 | else |
1843 | 0 | OS << "<prfop invalid #" << getPrefetch() << ">"; |
1844 | 0 | break; |
1845 | 0 | } |
1846 | 0 | case k_PSBHint: { |
1847 | 0 | OS << getPSBHintName(); |
1848 | 0 | break; |
1849 | 0 | } |
1850 | 0 | case k_ShiftExtend: { |
1851 | 0 | OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #" |
1852 | 0 | << getShiftExtendAmount(); |
1853 | 0 | if (!hasShiftExtendAmount()) |
1854 | 0 | OS << "<imp>"; |
1855 | 0 | OS << '>'; |
1856 | 0 | break; |
1857 | 0 | } |
1858 | 0 | } |
1859 | 0 | } |
1860 | | |
1861 | | /// @name Auto-generated Match Functions |
1862 | | /// { |
1863 | | |
1864 | | static unsigned MatchRegisterName(StringRef Name); |
1865 | | |
1866 | | /// } |
1867 | | |
1868 | 21.4k | static unsigned matchVectorRegName(StringRef Name) { |
1869 | 21.4k | return StringSwitch<unsigned>(Name.lower()) |
1870 | 21.4k | .Case("v0", AArch64::Q0) |
1871 | 21.4k | .Case("v1", AArch64::Q1) |
1872 | 21.4k | .Case("v2", AArch64::Q2) |
1873 | 21.4k | .Case("v3", AArch64::Q3) |
1874 | 21.4k | .Case("v4", AArch64::Q4) |
1875 | 21.4k | .Case("v5", AArch64::Q5) |
1876 | 21.4k | .Case("v6", AArch64::Q6) |
1877 | 21.4k | .Case("v7", AArch64::Q7) |
1878 | 21.4k | .Case("v8", AArch64::Q8) |
1879 | 21.4k | .Case("v9", AArch64::Q9) |
1880 | 21.4k | .Case("v10", AArch64::Q10) |
1881 | 21.4k | .Case("v11", AArch64::Q11) |
1882 | 21.4k | .Case("v12", AArch64::Q12) |
1883 | 21.4k | .Case("v13", AArch64::Q13) |
1884 | 21.4k | .Case("v14", AArch64::Q14) |
1885 | 21.4k | .Case("v15", AArch64::Q15) |
1886 | 21.4k | .Case("v16", AArch64::Q16) |
1887 | 21.4k | .Case("v17", AArch64::Q17) |
1888 | 21.4k | .Case("v18", AArch64::Q18) |
1889 | 21.4k | .Case("v19", AArch64::Q19) |
1890 | 21.4k | .Case("v20", AArch64::Q20) |
1891 | 21.4k | .Case("v21", AArch64::Q21) |
1892 | 21.4k | .Case("v22", AArch64::Q22) |
1893 | 21.4k | .Case("v23", AArch64::Q23) |
1894 | 21.4k | .Case("v24", AArch64::Q24) |
1895 | 21.4k | .Case("v25", AArch64::Q25) |
1896 | 21.4k | .Case("v26", AArch64::Q26) |
1897 | 21.4k | .Case("v27", AArch64::Q27) |
1898 | 21.4k | .Case("v28", AArch64::Q28) |
1899 | 21.4k | .Case("v29", AArch64::Q29) |
1900 | 21.4k | .Case("v30", AArch64::Q30) |
1901 | 21.4k | .Case("v31", AArch64::Q31) |
1902 | 21.4k | .Default(0); |
1903 | 21.4k | } |
1904 | | |
1905 | 3.36k | static bool isValidVectorKind(StringRef Name) { |
1906 | 3.36k | return StringSwitch<bool>(Name.lower()) |
1907 | 3.36k | .Case(".8b", true) |
1908 | 3.36k | .Case(".16b", true) |
1909 | 3.36k | .Case(".4h", true) |
1910 | 3.36k | .Case(".8h", true) |
1911 | 3.36k | .Case(".2s", true) |
1912 | 3.36k | .Case(".4s", true) |
1913 | 3.36k | .Case(".1d", true) |
1914 | 3.36k | .Case(".2d", true) |
1915 | 3.36k | .Case(".1q", true) |
1916 | | // Accept the width neutral ones, too, for verbose syntax. If those |
1917 | | // aren't used in the right places, the token operand won't match so |
1918 | | // all will work out. |
1919 | 3.36k | .Case(".b", true) |
1920 | 3.36k | .Case(".h", true) |
1921 | 3.36k | .Case(".s", true) |
1922 | 3.36k | .Case(".d", true) |
1923 | | // Needed for fp16 scalar pairwise reductions |
1924 | 3.36k | .Case(".2h", true) |
1925 | 3.36k | .Default(false); |
1926 | 3.36k | } |
1927 | | |
1928 | | static void parseValidVectorKind(StringRef Name, unsigned &NumElements, |
1929 | 0 | char &ElementKind) { |
1930 | 0 | assert(isValidVectorKind(Name)); |
1931 | | |
1932 | 0 | ElementKind = Name.lower()[Name.size() - 1]; |
1933 | 0 | NumElements = 0; |
1934 | |
|
1935 | 0 | if (Name.size() == 2) |
1936 | 0 | return; |
1937 | | |
1938 | | // Parse the lane count |
1939 | 0 | Name = Name.drop_front(); |
1940 | 0 | while (isdigit(Name.front())) { |
1941 | 0 | NumElements = 10 * NumElements + (Name.front() - '0'); |
1942 | 0 | Name = Name.drop_front(); |
1943 | 0 | } |
1944 | 0 | } |
1945 | | |
1946 | | bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, |
1947 | 0 | SMLoc &EndLoc, unsigned int &ErrorCode) { |
1948 | 0 | StartLoc = getLoc(); |
1949 | 0 | RegNo = tryParseRegister(); |
1950 | 0 | EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
1951 | 0 | return (RegNo == (unsigned)-1); |
1952 | 0 | } |
1953 | | |
1954 | | // Matches a register name or register alias previously defined by '.req' |
1955 | | unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, |
1956 | 41.3k | bool isVector) { |
1957 | 41.3k | unsigned RegNum = isVector ? matchVectorRegName(Name) |
1958 | 41.3k | : MatchRegisterName(Name); |
1959 | | |
1960 | 41.3k | if (RegNum == 0) { |
1961 | | // Check for aliases registered via .req. Canonicalize to lower case. |
1962 | | // That's more consistent since register names are case insensitive, and |
1963 | | // it's how the original entry was passed in from MC/MCParser/AsmParser. |
1964 | 35.7k | auto Entry = RegisterReqs.find(Name.lower()); |
1965 | 35.7k | if (Entry == RegisterReqs.end()) |
1966 | 35.7k | return 0; |
1967 | | // set RegNum if the match is the right kind of register |
1968 | 0 | if (isVector == Entry->getValue().first) |
1969 | 0 | RegNum = Entry->getValue().second; |
1970 | 0 | } |
1971 | 5.61k | return RegNum; |
1972 | 41.3k | } |
1973 | | |
1974 | | /// tryParseRegister - Try to parse a register name. The token must be an |
1975 | | /// Identifier when called, and if it is a register name the token is eaten and |
1976 | | /// the register is added to the operand list. |
1977 | 19.7k | int AArch64AsmParser::tryParseRegister() { |
1978 | 19.7k | MCAsmParser &Parser = getParser(); |
1979 | 19.7k | const AsmToken &Tok = Parser.getTok(); |
1980 | 19.7k | assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); |
1981 | | |
1982 | 19.7k | std::string lowerCase = Tok.getString().lower(); |
1983 | 19.7k | unsigned RegNum = matchRegisterNameAlias(lowerCase, false); |
1984 | | // Also handle a few aliases of registers. |
1985 | 19.7k | if (RegNum == 0) |
1986 | 18.0k | RegNum = StringSwitch<unsigned>(lowerCase) |
1987 | 18.0k | .Case("fp", AArch64::FP) |
1988 | 18.0k | .Case("lr", AArch64::LR) |
1989 | 18.0k | .Case("x31", AArch64::XZR) |
1990 | 18.0k | .Case("w31", AArch64::WZR) |
1991 | 18.0k | .Default(0); |
1992 | | |
1993 | 19.7k | if (RegNum == 0) |
1994 | 17.0k | return -1; |
1995 | | |
1996 | 2.72k | Parser.Lex(); // Eat identifier token. |
1997 | 2.72k | return RegNum; |
1998 | 19.7k | } |
1999 | | |
2000 | | /// tryMatchVectorRegister - Try to parse a vector register name with optional |
2001 | | /// kind specifier. If it is a register specifier, eat the token and return it. |
2002 | | int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) |
2003 | 21.4k | { |
2004 | 21.4k | MCAsmParser &Parser = getParser(); |
2005 | 21.4k | if (Parser.getTok().isNot(AsmToken::Identifier)) { |
2006 | | //TokError("vector register expected"); |
2007 | 3 | return -1; |
2008 | 3 | } |
2009 | | |
2010 | 21.4k | StringRef Name = Parser.getTok().getString(); |
2011 | | // If there is a kind specifier, it's separated from the register name by |
2012 | | // a '.'. |
2013 | 21.4k | size_t Start = 0, Next = Name.find('.'); |
2014 | 21.4k | StringRef Head = Name.slice(Start, Next); |
2015 | 21.4k | unsigned RegNum = matchRegisterNameAlias(Head, true); |
2016 | | |
2017 | 21.4k | if (RegNum) { |
2018 | 3.83k | if (Next != StringRef::npos) { |
2019 | 3.36k | Kind = Name.slice(Next, StringRef::npos); |
2020 | 3.36k | if (!isValidVectorKind(Kind)) { |
2021 | | //TokError("invalid vector kind qualifier"); |
2022 | 2.15k | return -1; |
2023 | 2.15k | } |
2024 | 3.36k | } |
2025 | 1.67k | Parser.Lex(); // Eat the register token. |
2026 | 1.67k | return RegNum; |
2027 | 3.83k | } |
2028 | | |
2029 | | //if (expected) |
2030 | | // TokError("vector register expected"); |
2031 | 17.6k | return -1; |
2032 | 21.4k | } |
2033 | | |
2034 | | /// tryParseSysCROperand - Try to parse a system instruction CR operand name. |
2035 | | AArch64AsmParser::OperandMatchResultTy |
2036 | | AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) |
2037 | 2 | { |
2038 | 2 | MCAsmParser &Parser = getParser(); |
2039 | 2 | SMLoc S = getLoc(); |
2040 | | |
2041 | 2 | if (Parser.getTok().isNot(AsmToken::Identifier)) { |
2042 | | //Error(S, "Expected cN operand where 0 <= N <= 15"); |
2043 | 0 | return MatchOperand_ParseFail; |
2044 | 0 | } |
2045 | | |
2046 | 2 | StringRef Tok = Parser.getTok().getIdentifier(); |
2047 | 2 | if (Tok[0] != 'c' && Tok[0] != 'C') { |
2048 | | //Error(S, "Expected cN operand where 0 <= N <= 15"); |
2049 | 0 | return MatchOperand_ParseFail; |
2050 | 0 | } |
2051 | | |
2052 | 2 | uint32_t CRNum; |
2053 | 2 | bool BadNum = Tok.drop_front().getAsInteger(10, CRNum); |
2054 | 2 | if (BadNum || CRNum > 15) { |
2055 | | //Error(S, "Expected cN operand where 0 <= N <= 15"); |
2056 | 2 | return MatchOperand_ParseFail; |
2057 | 2 | } |
2058 | | |
2059 | 0 | Parser.Lex(); // Eat identifier token. |
2060 | 0 | Operands.push_back( |
2061 | 0 | AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext())); |
2062 | 0 | return MatchOperand_Success; |
2063 | 2 | } |
2064 | | |
2065 | | /// tryParsePrefetch - Try to parse a prefetch operand. |
2066 | | AArch64AsmParser::OperandMatchResultTy |
2067 | 0 | AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { |
2068 | 0 | MCAsmParser &Parser = getParser(); |
2069 | 0 | SMLoc S = getLoc(); |
2070 | 0 | const AsmToken &Tok = Parser.getTok(); |
2071 | | // Either an identifier for named values or a 5-bit immediate. |
2072 | 0 | bool Hash = Tok.is(AsmToken::Hash); |
2073 | 0 | if (Hash || Tok.is(AsmToken::Integer)) { |
2074 | 0 | if (Hash) |
2075 | 0 | Parser.Lex(); // Eat hash token. |
2076 | 0 | const MCExpr *ImmVal; |
2077 | 0 | if (getParser().parseExpression(ImmVal)) |
2078 | 0 | return MatchOperand_ParseFail; |
2079 | | |
2080 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
2081 | 0 | if (!MCE) { |
2082 | | //TokError("immediate value expected for prefetch operand"); |
2083 | 0 | return MatchOperand_ParseFail; |
2084 | 0 | } |
2085 | 0 | unsigned prfop = MCE->getValue(); |
2086 | 0 | if (prfop > 31) { |
2087 | | //TokError("prefetch operand out of range, [0,31] expected"); |
2088 | 0 | return MatchOperand_ParseFail; |
2089 | 0 | } |
2090 | | |
2091 | 0 | bool Valid; |
2092 | 0 | auto Mapper = AArch64PRFM::PRFMMapper(); |
2093 | 0 | StringRef Name = |
2094 | 0 | Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid); |
2095 | 0 | Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name, |
2096 | 0 | S, getContext())); |
2097 | 0 | return MatchOperand_Success; |
2098 | 0 | } |
2099 | | |
2100 | 0 | if (Tok.isNot(AsmToken::Identifier)) { |
2101 | | //TokError("pre-fetch hint expected"); |
2102 | 0 | return MatchOperand_ParseFail; |
2103 | 0 | } |
2104 | | |
2105 | 0 | bool Valid; |
2106 | 0 | auto Mapper = AArch64PRFM::PRFMMapper(); |
2107 | 0 | unsigned prfop = |
2108 | 0 | Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid); |
2109 | 0 | if (!Valid) { |
2110 | | //TokError("pre-fetch hint expected"); |
2111 | 0 | return MatchOperand_ParseFail; |
2112 | 0 | } |
2113 | | |
2114 | 0 | Parser.Lex(); // Eat identifier token. |
2115 | 0 | Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(), |
2116 | 0 | S, getContext())); |
2117 | 0 | return MatchOperand_Success; |
2118 | 0 | } |
2119 | | |
2120 | | /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command |
2121 | | AArch64AsmParser::OperandMatchResultTy |
2122 | 0 | AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) { |
2123 | 0 | MCAsmParser &Parser = getParser(); |
2124 | 0 | SMLoc S = getLoc(); |
2125 | 0 | const AsmToken &Tok = Parser.getTok(); |
2126 | 0 | if (Tok.isNot(AsmToken::Identifier)) { |
2127 | | //TokError("invalid operand for instruction"); |
2128 | 0 | return MatchOperand_ParseFail; |
2129 | 0 | } |
2130 | | |
2131 | 0 | bool Valid; |
2132 | 0 | auto Mapper = AArch64PSBHint::PSBHintMapper(); |
2133 | 0 | unsigned psbhint = |
2134 | 0 | Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid); |
2135 | 0 | if (!Valid) { |
2136 | | //TokError("invalid operand for instruction"); |
2137 | 0 | return MatchOperand_ParseFail; |
2138 | 0 | } |
2139 | | |
2140 | 0 | Parser.Lex(); // Eat identifier token. |
2141 | 0 | Operands.push_back(AArch64Operand::CreatePSBHint(psbhint, Tok.getString(), |
2142 | 0 | S, getContext())); |
2143 | 0 | return MatchOperand_Success; |
2144 | 0 | } |
2145 | | |
2146 | | /// tryParseAdrpLabel - Parse and validate a source label for the ADRP |
2147 | | /// instruction. |
2148 | | AArch64AsmParser::OperandMatchResultTy |
2149 | 0 | AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { |
2150 | 0 | MCAsmParser &Parser = getParser(); |
2151 | 0 | SMLoc S = getLoc(); |
2152 | 0 | const MCExpr *Expr; |
2153 | |
|
2154 | 0 | if (Parser.getTok().is(AsmToken::Hash)) { |
2155 | 0 | Parser.Lex(); // Eat hash token. |
2156 | 0 | } |
2157 | |
|
2158 | 0 | if (parseSymbolicImmVal(Expr)) |
2159 | 0 | return MatchOperand_ParseFail; |
2160 | | |
2161 | 0 | AArch64MCExpr::VariantKind ELFRefKind; |
2162 | 0 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
2163 | 0 | int64_t Addend; |
2164 | 0 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
2165 | 0 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && |
2166 | 0 | ELFRefKind == AArch64MCExpr::VK_INVALID) { |
2167 | | // No modifier was specified at all; this is the syntax for an ELF basic |
2168 | | // ADRP relocation (unfortunately). |
2169 | 0 | Expr = |
2170 | 0 | AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext()); |
2171 | 0 | } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || |
2172 | 0 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && |
2173 | 0 | Addend != 0) { |
2174 | | //Error(S, "gotpage label reference not allowed an addend"); |
2175 | 0 | return MatchOperand_ParseFail; |
2176 | 0 | } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && |
2177 | 0 | DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && |
2178 | 0 | DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && |
2179 | 0 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE && |
2180 | 0 | ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE && |
2181 | 0 | ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) { |
2182 | | // The operand must be an @page or @gotpage qualified symbolref. |
2183 | | //Error(S, "page or gotpage label reference expected"); |
2184 | 0 | return MatchOperand_ParseFail; |
2185 | 0 | } |
2186 | 0 | } |
2187 | | |
2188 | | // We have either a label reference possibly with addend or an immediate. The |
2189 | | // addend is a raw value here. The linker will adjust it to only reference the |
2190 | | // page. |
2191 | 0 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
2192 | 0 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); |
2193 | |
|
2194 | 0 | return MatchOperand_Success; |
2195 | 0 | } |
2196 | | |
2197 | | /// tryParseAdrLabel - Parse and validate a source label for the ADR |
2198 | | /// instruction. |
2199 | | AArch64AsmParser::OperandMatchResultTy |
2200 | 363 | AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { |
2201 | 363 | MCAsmParser &Parser = getParser(); |
2202 | 363 | SMLoc S = getLoc(); |
2203 | 363 | const MCExpr *Expr; |
2204 | | |
2205 | 363 | if (Parser.getTok().is(AsmToken::Hash)) { |
2206 | 363 | Parser.Lex(); // Eat hash token. |
2207 | 363 | } |
2208 | | |
2209 | 363 | if (getParser().parseExpression(Expr)) |
2210 | 363 | return MatchOperand_ParseFail; |
2211 | | |
2212 | 0 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
2213 | 0 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); |
2214 | |
|
2215 | 0 | return MatchOperand_Success; |
2216 | 363 | } |
2217 | | |
2218 | | /// tryParseFPImm - A floating point immediate expression operand. |
2219 | | AArch64AsmParser::OperandMatchResultTy |
2220 | 520 | AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { |
2221 | 520 | MCAsmParser &Parser = getParser(); |
2222 | 520 | SMLoc S = getLoc(); |
2223 | | |
2224 | 520 | bool Hash = false; |
2225 | 520 | if (Parser.getTok().is(AsmToken::Hash)) { |
2226 | 0 | Parser.Lex(); // Eat '#' |
2227 | 0 | Hash = true; |
2228 | 0 | } |
2229 | | |
2230 | | // Handle negation, as that still comes through as a separate token. |
2231 | 520 | bool isNegative = false; |
2232 | 520 | if (Parser.getTok().is(AsmToken::Minus)) { |
2233 | 200 | isNegative = true; |
2234 | 200 | Parser.Lex(); |
2235 | 200 | } |
2236 | 520 | const AsmToken &Tok = Parser.getTok(); |
2237 | 520 | if (Tok.is(AsmToken::Real)) { |
2238 | 0 | APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); |
2239 | 0 | if (isNegative) |
2240 | 0 | RealVal.changeSign(); |
2241 | |
|
2242 | 0 | if (RealVal.bitcastToAPInt().getActiveBits() > 64) |
2243 | 0 | return MatchOperand_ParseFail; |
2244 | 0 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
2245 | 0 | int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal)); |
2246 | 0 | Parser.Lex(); // Eat the token. |
2247 | | // Check for out of range values. As an exception, we let Zero through, |
2248 | | // as we handle that special case in post-processing before matching in |
2249 | | // order to use the zero register for it. |
2250 | 0 | if (Val == -1 && !RealVal.isPosZero()) { |
2251 | | //TokError("expected compatible register or floating-point constant"); |
2252 | 0 | return MatchOperand_ParseFail; |
2253 | 0 | } |
2254 | 0 | Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext())); |
2255 | 0 | return MatchOperand_Success; |
2256 | 0 | } |
2257 | 520 | if (Tok.is(AsmToken::Integer)) { |
2258 | 96 | int64_t Val; |
2259 | 96 | if (!isNegative && Tok.getString().startswith("0x")) { |
2260 | 0 | bool valid; |
2261 | 0 | Val = Tok.getIntVal(valid); |
2262 | 0 | if (!valid) |
2263 | 0 | return MatchOperand_ParseFail; |
2264 | 0 | if (Val > 255 || Val < 0) { |
2265 | | //TokError("encoded floating point value out of range"); |
2266 | 0 | return MatchOperand_ParseFail; |
2267 | 0 | } |
2268 | 96 | } else { |
2269 | 96 | APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); |
2270 | 96 | if (RealVal.bitcastToAPInt().getActiveBits() > 64) |
2271 | 0 | return MatchOperand_ParseFail; |
2272 | 96 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
2273 | | // If we had a '-' in front, toggle the sign bit. |
2274 | 96 | IntVal ^= (uint64_t)isNegative << 63; |
2275 | 96 | Val = AArch64_AM::getFP64Imm(APInt(64, IntVal)); |
2276 | 96 | } |
2277 | 96 | Parser.Lex(); // Eat the token. |
2278 | 96 | Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext())); |
2279 | 96 | return MatchOperand_Success; |
2280 | 96 | } |
2281 | | |
2282 | 424 | if (!Hash) |
2283 | 424 | return MatchOperand_NoMatch; |
2284 | | |
2285 | | //TokError("invalid floating point immediate"); |
2286 | 0 | return MatchOperand_ParseFail; |
2287 | 424 | } |
2288 | | |
2289 | | /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand |
2290 | | AArch64AsmParser::OperandMatchResultTy |
2291 | 0 | AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) { |
2292 | 0 | MCAsmParser &Parser = getParser(); |
2293 | 0 | SMLoc S = getLoc(); |
2294 | |
|
2295 | 0 | if (Parser.getTok().is(AsmToken::Hash)) |
2296 | 0 | Parser.Lex(); // Eat '#' |
2297 | 0 | else if (Parser.getTok().isNot(AsmToken::Integer)) |
2298 | | // Operand should start from # or should be integer, emit error otherwise. |
2299 | 0 | return MatchOperand_NoMatch; |
2300 | | |
2301 | 0 | const MCExpr *Imm; |
2302 | 0 | if (parseSymbolicImmVal(Imm)) |
2303 | 0 | return MatchOperand_ParseFail; |
2304 | 0 | else if (Parser.getTok().isNot(AsmToken::Comma)) { |
2305 | 0 | uint64_t ShiftAmount = 0; |
2306 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm); |
2307 | 0 | if (MCE) { |
2308 | 0 | int64_t Val = MCE->getValue(); |
2309 | 0 | if (Val > 0xfff && (Val & 0xfff) == 0) { |
2310 | 0 | Imm = MCConstantExpr::create(Val >> 12, getContext()); |
2311 | 0 | ShiftAmount = 12; |
2312 | 0 | } |
2313 | 0 | } |
2314 | 0 | SMLoc E = Parser.getTok().getLoc(); |
2315 | 0 | Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E, |
2316 | 0 | getContext())); |
2317 | 0 | return MatchOperand_Success; |
2318 | 0 | } |
2319 | | |
2320 | | // Eat ',' |
2321 | 0 | Parser.Lex(); |
2322 | | |
2323 | | // The optional operand must be "lsl #N" where N is non-negative. |
2324 | 0 | if (!Parser.getTok().is(AsmToken::Identifier) || |
2325 | 0 | !Parser.getTok().getIdentifier().equals_lower("lsl")) { |
2326 | | //Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate"); |
2327 | 0 | return MatchOperand_ParseFail; |
2328 | 0 | } |
2329 | | |
2330 | | // Eat 'lsl' |
2331 | 0 | Parser.Lex(); |
2332 | |
|
2333 | 0 | if (Parser.getTok().is(AsmToken::Hash)) { |
2334 | 0 | Parser.Lex(); |
2335 | 0 | } |
2336 | |
|
2337 | 0 | if (Parser.getTok().isNot(AsmToken::Integer)) { |
2338 | | //Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate"); |
2339 | 0 | return MatchOperand_ParseFail; |
2340 | 0 | } |
2341 | | |
2342 | 0 | bool valid; |
2343 | 0 | int64_t ShiftAmount = Parser.getTok().getIntVal(valid); |
2344 | 0 | if (!valid) |
2345 | 0 | return MatchOperand_ParseFail; |
2346 | | |
2347 | 0 | if (ShiftAmount < 0) { |
2348 | | //Error(Parser.getTok().getLoc(), "positive shift amount required"); |
2349 | 0 | return MatchOperand_ParseFail; |
2350 | 0 | } |
2351 | 0 | Parser.Lex(); // Eat the number |
2352 | |
|
2353 | 0 | SMLoc E = Parser.getTok().getLoc(); |
2354 | 0 | Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, |
2355 | 0 | S, E, getContext())); |
2356 | 0 | return MatchOperand_Success; |
2357 | 0 | } |
2358 | | |
2359 | | /// parseCondCodeString - Parse a Condition Code string. |
2360 | 3.44k | AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) { |
2361 | 3.44k | AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) |
2362 | 3.44k | .Case("eq", AArch64CC::EQ) |
2363 | 3.44k | .Case("ne", AArch64CC::NE) |
2364 | 3.44k | .Case("cs", AArch64CC::HS) |
2365 | 3.44k | .Case("hs", AArch64CC::HS) |
2366 | 3.44k | .Case("cc", AArch64CC::LO) |
2367 | 3.44k | .Case("lo", AArch64CC::LO) |
2368 | 3.44k | .Case("mi", AArch64CC::MI) |
2369 | 3.44k | .Case("pl", AArch64CC::PL) |
2370 | 3.44k | .Case("vs", AArch64CC::VS) |
2371 | 3.44k | .Case("vc", AArch64CC::VC) |
2372 | 3.44k | .Case("hi", AArch64CC::HI) |
2373 | 3.44k | .Case("ls", AArch64CC::LS) |
2374 | 3.44k | .Case("ge", AArch64CC::GE) |
2375 | 3.44k | .Case("lt", AArch64CC::LT) |
2376 | 3.44k | .Case("gt", AArch64CC::GT) |
2377 | 3.44k | .Case("le", AArch64CC::LE) |
2378 | 3.44k | .Case("al", AArch64CC::AL) |
2379 | 3.44k | .Case("nv", AArch64CC::NV) |
2380 | 3.44k | .Default(AArch64CC::Invalid); |
2381 | 3.44k | return CC; |
2382 | 3.44k | } |
2383 | | |
2384 | | /// parseCondCode - Parse a Condition Code operand. |
2385 | | bool AArch64AsmParser::parseCondCode(OperandVector &Operands, |
2386 | 0 | bool invertCondCode) { |
2387 | 0 | MCAsmParser &Parser = getParser(); |
2388 | 0 | SMLoc S = getLoc(); |
2389 | 0 | const AsmToken &Tok = Parser.getTok(); |
2390 | 0 | assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); |
2391 | | |
2392 | 0 | StringRef Cond = Tok.getString(); |
2393 | 0 | AArch64CC::CondCode CC = parseCondCodeString(Cond); |
2394 | 0 | if (CC == AArch64CC::Invalid) |
2395 | | //return TokError("invalid condition code"); |
2396 | 0 | return true; |
2397 | 0 | Parser.Lex(); // Eat identifier token. |
2398 | |
|
2399 | 0 | if (invertCondCode) { |
2400 | 0 | if (CC == AArch64CC::AL || CC == AArch64CC::NV) |
2401 | | //return TokError("condition codes AL and NV are invalid for this instruction"); |
2402 | 0 | return true; |
2403 | 0 | CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC)); |
2404 | 0 | } |
2405 | | |
2406 | 0 | Operands.push_back( |
2407 | 0 | AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext())); |
2408 | 0 | return false; |
2409 | 0 | } |
2410 | | |
2411 | | /// tryParseOptionalShift - Some operands take an optional shift argument. Parse |
2412 | | /// them if present. |
2413 | | AArch64AsmParser::OperandMatchResultTy |
2414 | 17.0k | AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) { |
2415 | 17.0k | MCAsmParser &Parser = getParser(); |
2416 | 17.0k | const AsmToken &Tok = Parser.getTok(); |
2417 | 17.0k | std::string LowerID = Tok.getString().lower(); |
2418 | 17.0k | AArch64_AM::ShiftExtendType ShOp = |
2419 | 17.0k | StringSwitch<AArch64_AM::ShiftExtendType>(LowerID) |
2420 | 17.0k | .Case("lsl", AArch64_AM::LSL) |
2421 | 17.0k | .Case("lsr", AArch64_AM::LSR) |
2422 | 17.0k | .Case("asr", AArch64_AM::ASR) |
2423 | 17.0k | .Case("ror", AArch64_AM::ROR) |
2424 | 17.0k | .Case("msl", AArch64_AM::MSL) |
2425 | 17.0k | .Case("uxtb", AArch64_AM::UXTB) |
2426 | 17.0k | .Case("uxth", AArch64_AM::UXTH) |
2427 | 17.0k | .Case("uxtw", AArch64_AM::UXTW) |
2428 | 17.0k | .Case("uxtx", AArch64_AM::UXTX) |
2429 | 17.0k | .Case("sxtb", AArch64_AM::SXTB) |
2430 | 17.0k | .Case("sxth", AArch64_AM::SXTH) |
2431 | 17.0k | .Case("sxtw", AArch64_AM::SXTW) |
2432 | 17.0k | .Case("sxtx", AArch64_AM::SXTX) |
2433 | 17.0k | .Default(AArch64_AM::InvalidShiftExtend); |
2434 | | |
2435 | 17.0k | if (ShOp == AArch64_AM::InvalidShiftExtend) |
2436 | 17.0k | return MatchOperand_NoMatch; |
2437 | | |
2438 | 43 | SMLoc S = Tok.getLoc(); |
2439 | 43 | Parser.Lex(); |
2440 | | |
2441 | 43 | bool Hash = getLexer().is(AsmToken::Hash); |
2442 | 43 | if (!Hash && getLexer().isNot(AsmToken::Integer)) { |
2443 | 42 | if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || |
2444 | 42 | ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR || |
2445 | 42 | ShOp == AArch64_AM::MSL) { |
2446 | | // We expect a number here. |
2447 | | //TokError("expected #imm after shift specifier"); |
2448 | 0 | return MatchOperand_ParseFail; |
2449 | 0 | } |
2450 | | |
2451 | | // "extend" type operatoins don't need an immediate, #0 is implicit. |
2452 | 42 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
2453 | 42 | Operands.push_back( |
2454 | 42 | AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext())); |
2455 | 42 | return MatchOperand_Success; |
2456 | 42 | } |
2457 | | |
2458 | 1 | if (Hash) |
2459 | 1 | Parser.Lex(); // Eat the '#'. |
2460 | | |
2461 | | // Make sure we do actually have a number or a parenthesized expression. |
2462 | 1 | SMLoc E = Parser.getTok().getLoc(); |
2463 | 1 | if (!Parser.getTok().is(AsmToken::Integer) && |
2464 | 1 | !Parser.getTok().is(AsmToken::LParen)) { |
2465 | | //Error(E, "expected integer shift amount"); |
2466 | 0 | return MatchOperand_ParseFail; |
2467 | 0 | } |
2468 | | |
2469 | 1 | const MCExpr *ImmVal; |
2470 | 1 | if (getParser().parseExpression(ImmVal)) |
2471 | 0 | return MatchOperand_ParseFail; |
2472 | | |
2473 | 1 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
2474 | 1 | if (!MCE) { |
2475 | | //Error(E, "expected constant '#imm' after shift specifier"); |
2476 | 0 | return MatchOperand_ParseFail; |
2477 | 0 | } |
2478 | | |
2479 | 1 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
2480 | 1 | Operands.push_back(AArch64Operand::CreateShiftExtend( |
2481 | 1 | ShOp, MCE->getValue(), true, S, E, getContext())); |
2482 | 1 | return MatchOperand_Success; |
2483 | 1 | } |
2484 | | |
2485 | | /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for |
2486 | | /// the SYS instruction. Parse them specially so that we create a SYS MCInst. |
2487 | | bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, |
2488 | | OperandVector &Operands) |
2489 | 0 | { |
2490 | 0 | if (Name.find('.') != StringRef::npos) |
2491 | | //return TokError("invalid operand"); |
2492 | 0 | return true; |
2493 | | |
2494 | 0 | Mnemonic = Name; |
2495 | 0 | Operands.push_back( |
2496 | 0 | AArch64Operand::CreateToken("sys", false, NameLoc, getContext())); |
2497 | |
|
2498 | 0 | MCAsmParser &Parser = getParser(); |
2499 | 0 | const AsmToken &Tok = Parser.getTok(); |
2500 | 0 | StringRef Op = Tok.getString(); |
2501 | 0 | SMLoc S = Tok.getLoc(); |
2502 | |
|
2503 | 0 | const MCExpr *Expr = nullptr; |
2504 | |
|
2505 | 0 | #define SYS_ALIAS(op1, Cn, Cm, op2) \ |
2506 | 0 | do { \ |
2507 | 0 | Expr = MCConstantExpr::create(op1, getContext()); \ |
2508 | 0 | Operands.push_back( \ |
2509 | 0 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \ |
2510 | 0 | Operands.push_back( \ |
2511 | 0 | AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \ |
2512 | 0 | Operands.push_back( \ |
2513 | 0 | AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \ |
2514 | 0 | Expr = MCConstantExpr::create(op2, getContext()); \ |
2515 | 0 | Operands.push_back( \ |
2516 | 0 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \ |
2517 | 0 | } while (0) |
2518 | |
|
2519 | 0 | if (Mnemonic == "ic") { |
2520 | 0 | if (!Op.compare_lower("ialluis")) { |
2521 | | // SYS #0, C7, C1, #0 |
2522 | 0 | SYS_ALIAS(0, 7, 1, 0); |
2523 | 0 | } else if (!Op.compare_lower("iallu")) { |
2524 | | // SYS #0, C7, C5, #0 |
2525 | 0 | SYS_ALIAS(0, 7, 5, 0); |
2526 | 0 | } else if (!Op.compare_lower("ivau")) { |
2527 | | // SYS #3, C7, C5, #1 |
2528 | 0 | SYS_ALIAS(3, 7, 5, 1); |
2529 | 0 | } else { |
2530 | | //return TokError("invalid operand for IC instruction"); |
2531 | 0 | return true; |
2532 | 0 | } |
2533 | 0 | } else if (Mnemonic == "dc") { |
2534 | 0 | if (!Op.compare_lower("zva")) { |
2535 | | // SYS #3, C7, C4, #1 |
2536 | 0 | SYS_ALIAS(3, 7, 4, 1); |
2537 | 0 | } else if (!Op.compare_lower("ivac")) { |
2538 | | // SYS #3, C7, C6, #1 |
2539 | 0 | SYS_ALIAS(0, 7, 6, 1); |
2540 | 0 | } else if (!Op.compare_lower("isw")) { |
2541 | | // SYS #0, C7, C6, #2 |
2542 | 0 | SYS_ALIAS(0, 7, 6, 2); |
2543 | 0 | } else if (!Op.compare_lower("cvac")) { |
2544 | | // SYS #3, C7, C10, #1 |
2545 | 0 | SYS_ALIAS(3, 7, 10, 1); |
2546 | 0 | } else if (!Op.compare_lower("csw")) { |
2547 | | // SYS #0, C7, C10, #2 |
2548 | 0 | SYS_ALIAS(0, 7, 10, 2); |
2549 | 0 | } else if (!Op.compare_lower("cvau")) { |
2550 | | // SYS #3, C7, C11, #1 |
2551 | 0 | SYS_ALIAS(3, 7, 11, 1); |
2552 | 0 | } else if (!Op.compare_lower("civac")) { |
2553 | | // SYS #3, C7, C14, #1 |
2554 | 0 | SYS_ALIAS(3, 7, 14, 1); |
2555 | 0 | } else if (!Op.compare_lower("cisw")) { |
2556 | | // SYS #0, C7, C14, #2 |
2557 | 0 | SYS_ALIAS(0, 7, 14, 2); |
2558 | 0 | } else if (!Op.compare_lower("cvap")) { |
2559 | 0 | if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) { |
2560 | | // SYS #3, C7, C12, #1 |
2561 | 0 | SYS_ALIAS(3, 7, 12, 1); |
2562 | 0 | } else { |
2563 | | //return TokError("DC CVAP requires ARMv8.2a"); |
2564 | 0 | return true; |
2565 | 0 | } |
2566 | 0 | } else { |
2567 | | //return TokError("invalid operand for DC instruction"); |
2568 | 0 | return true; |
2569 | 0 | } |
2570 | 0 | } else if (Mnemonic == "at") { |
2571 | 0 | if (!Op.compare_lower("s1e1r")) { |
2572 | | // SYS #0, C7, C8, #0 |
2573 | 0 | SYS_ALIAS(0, 7, 8, 0); |
2574 | 0 | } else if (!Op.compare_lower("s1e2r")) { |
2575 | | // SYS #4, C7, C8, #0 |
2576 | 0 | SYS_ALIAS(4, 7, 8, 0); |
2577 | 0 | } else if (!Op.compare_lower("s1e3r")) { |
2578 | | // SYS #6, C7, C8, #0 |
2579 | 0 | SYS_ALIAS(6, 7, 8, 0); |
2580 | 0 | } else if (!Op.compare_lower("s1e1w")) { |
2581 | | // SYS #0, C7, C8, #1 |
2582 | 0 | SYS_ALIAS(0, 7, 8, 1); |
2583 | 0 | } else if (!Op.compare_lower("s1e2w")) { |
2584 | | // SYS #4, C7, C8, #1 |
2585 | 0 | SYS_ALIAS(4, 7, 8, 1); |
2586 | 0 | } else if (!Op.compare_lower("s1e3w")) { |
2587 | | // SYS #6, C7, C8, #1 |
2588 | 0 | SYS_ALIAS(6, 7, 8, 1); |
2589 | 0 | } else if (!Op.compare_lower("s1e0r")) { |
2590 | | // SYS #0, C7, C8, #3 |
2591 | 0 | SYS_ALIAS(0, 7, 8, 2); |
2592 | 0 | } else if (!Op.compare_lower("s1e0w")) { |
2593 | | // SYS #0, C7, C8, #3 |
2594 | 0 | SYS_ALIAS(0, 7, 8, 3); |
2595 | 0 | } else if (!Op.compare_lower("s12e1r")) { |
2596 | | // SYS #4, C7, C8, #4 |
2597 | 0 | SYS_ALIAS(4, 7, 8, 4); |
2598 | 0 | } else if (!Op.compare_lower("s12e1w")) { |
2599 | | // SYS #4, C7, C8, #5 |
2600 | 0 | SYS_ALIAS(4, 7, 8, 5); |
2601 | 0 | } else if (!Op.compare_lower("s12e0r")) { |
2602 | | // SYS #4, C7, C8, #6 |
2603 | 0 | SYS_ALIAS(4, 7, 8, 6); |
2604 | 0 | } else if (!Op.compare_lower("s12e0w")) { |
2605 | | // SYS #4, C7, C8, #7 |
2606 | 0 | SYS_ALIAS(4, 7, 8, 7); |
2607 | 0 | } else if (!Op.compare_lower("s1e1rp")) { |
2608 | 0 | if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) { |
2609 | | // SYS #0, C7, C9, #0 |
2610 | 0 | SYS_ALIAS(0, 7, 9, 0); |
2611 | 0 | } else { |
2612 | | //return TokError("AT S1E1RP requires ARMv8.2a"); |
2613 | 0 | return true; |
2614 | 0 | } |
2615 | 0 | } else if (!Op.compare_lower("s1e1wp")) { |
2616 | 0 | if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) { |
2617 | | // SYS #0, C7, C9, #1 |
2618 | 0 | SYS_ALIAS(0, 7, 9, 1); |
2619 | 0 | } else { |
2620 | | //return TokError("AT S1E1WP requires ARMv8.2a"); |
2621 | 0 | return true; |
2622 | 0 | } |
2623 | 0 | } else { |
2624 | | //return TokError("invalid operand for AT instruction"); |
2625 | 0 | return true; |
2626 | 0 | } |
2627 | 0 | } else if (Mnemonic == "tlbi") { |
2628 | 0 | if (!Op.compare_lower("vmalle1is")) { |
2629 | | // SYS #0, C8, C3, #0 |
2630 | 0 | SYS_ALIAS(0, 8, 3, 0); |
2631 | 0 | } else if (!Op.compare_lower("alle2is")) { |
2632 | | // SYS #4, C8, C3, #0 |
2633 | 0 | SYS_ALIAS(4, 8, 3, 0); |
2634 | 0 | } else if (!Op.compare_lower("alle3is")) { |
2635 | | // SYS #6, C8, C3, #0 |
2636 | 0 | SYS_ALIAS(6, 8, 3, 0); |
2637 | 0 | } else if (!Op.compare_lower("vae1is")) { |
2638 | | // SYS #0, C8, C3, #1 |
2639 | 0 | SYS_ALIAS(0, 8, 3, 1); |
2640 | 0 | } else if (!Op.compare_lower("vae2is")) { |
2641 | | // SYS #4, C8, C3, #1 |
2642 | 0 | SYS_ALIAS(4, 8, 3, 1); |
2643 | 0 | } else if (!Op.compare_lower("vae3is")) { |
2644 | | // SYS #6, C8, C3, #1 |
2645 | 0 | SYS_ALIAS(6, 8, 3, 1); |
2646 | 0 | } else if (!Op.compare_lower("aside1is")) { |
2647 | | // SYS #0, C8, C3, #2 |
2648 | 0 | SYS_ALIAS(0, 8, 3, 2); |
2649 | 0 | } else if (!Op.compare_lower("vaae1is")) { |
2650 | | // SYS #0, C8, C3, #3 |
2651 | 0 | SYS_ALIAS(0, 8, 3, 3); |
2652 | 0 | } else if (!Op.compare_lower("alle1is")) { |
2653 | | // SYS #4, C8, C3, #4 |
2654 | 0 | SYS_ALIAS(4, 8, 3, 4); |
2655 | 0 | } else if (!Op.compare_lower("vale1is")) { |
2656 | | // SYS #0, C8, C3, #5 |
2657 | 0 | SYS_ALIAS(0, 8, 3, 5); |
2658 | 0 | } else if (!Op.compare_lower("vaale1is")) { |
2659 | | // SYS #0, C8, C3, #7 |
2660 | 0 | SYS_ALIAS(0, 8, 3, 7); |
2661 | 0 | } else if (!Op.compare_lower("vmalle1")) { |
2662 | | // SYS #0, C8, C7, #0 |
2663 | 0 | SYS_ALIAS(0, 8, 7, 0); |
2664 | 0 | } else if (!Op.compare_lower("alle2")) { |
2665 | | // SYS #4, C8, C7, #0 |
2666 | 0 | SYS_ALIAS(4, 8, 7, 0); |
2667 | 0 | } else if (!Op.compare_lower("vale2is")) { |
2668 | | // SYS #4, C8, C3, #5 |
2669 | 0 | SYS_ALIAS(4, 8, 3, 5); |
2670 | 0 | } else if (!Op.compare_lower("vale3is")) { |
2671 | | // SYS #6, C8, C3, #5 |
2672 | 0 | SYS_ALIAS(6, 8, 3, 5); |
2673 | 0 | } else if (!Op.compare_lower("alle3")) { |
2674 | | // SYS #6, C8, C7, #0 |
2675 | 0 | SYS_ALIAS(6, 8, 7, 0); |
2676 | 0 | } else if (!Op.compare_lower("vae1")) { |
2677 | | // SYS #0, C8, C7, #1 |
2678 | 0 | SYS_ALIAS(0, 8, 7, 1); |
2679 | 0 | } else if (!Op.compare_lower("vae2")) { |
2680 | | // SYS #4, C8, C7, #1 |
2681 | 0 | SYS_ALIAS(4, 8, 7, 1); |
2682 | 0 | } else if (!Op.compare_lower("vae3")) { |
2683 | | // SYS #6, C8, C7, #1 |
2684 | 0 | SYS_ALIAS(6, 8, 7, 1); |
2685 | 0 | } else if (!Op.compare_lower("aside1")) { |
2686 | | // SYS #0, C8, C7, #2 |
2687 | 0 | SYS_ALIAS(0, 8, 7, 2); |
2688 | 0 | } else if (!Op.compare_lower("vaae1")) { |
2689 | | // SYS #0, C8, C7, #3 |
2690 | 0 | SYS_ALIAS(0, 8, 7, 3); |
2691 | 0 | } else if (!Op.compare_lower("alle1")) { |
2692 | | // SYS #4, C8, C7, #4 |
2693 | 0 | SYS_ALIAS(4, 8, 7, 4); |
2694 | 0 | } else if (!Op.compare_lower("vale1")) { |
2695 | | // SYS #0, C8, C7, #5 |
2696 | 0 | SYS_ALIAS(0, 8, 7, 5); |
2697 | 0 | } else if (!Op.compare_lower("vale2")) { |
2698 | | // SYS #4, C8, C7, #5 |
2699 | 0 | SYS_ALIAS(4, 8, 7, 5); |
2700 | 0 | } else if (!Op.compare_lower("vale3")) { |
2701 | | // SYS #6, C8, C7, #5 |
2702 | 0 | SYS_ALIAS(6, 8, 7, 5); |
2703 | 0 | } else if (!Op.compare_lower("vaale1")) { |
2704 | | // SYS #0, C8, C7, #7 |
2705 | 0 | SYS_ALIAS(0, 8, 7, 7); |
2706 | 0 | } else if (!Op.compare_lower("ipas2e1")) { |
2707 | | // SYS #4, C8, C4, #1 |
2708 | 0 | SYS_ALIAS(4, 8, 4, 1); |
2709 | 0 | } else if (!Op.compare_lower("ipas2le1")) { |
2710 | | // SYS #4, C8, C4, #5 |
2711 | 0 | SYS_ALIAS(4, 8, 4, 5); |
2712 | 0 | } else if (!Op.compare_lower("ipas2e1is")) { |
2713 | | // SYS #4, C8, C4, #1 |
2714 | 0 | SYS_ALIAS(4, 8, 0, 1); |
2715 | 0 | } else if (!Op.compare_lower("ipas2le1is")) { |
2716 | | // SYS #4, C8, C4, #5 |
2717 | 0 | SYS_ALIAS(4, 8, 0, 5); |
2718 | 0 | } else if (!Op.compare_lower("vmalls12e1")) { |
2719 | | // SYS #4, C8, C7, #6 |
2720 | 0 | SYS_ALIAS(4, 8, 7, 6); |
2721 | 0 | } else if (!Op.compare_lower("vmalls12e1is")) { |
2722 | | // SYS #4, C8, C3, #6 |
2723 | 0 | SYS_ALIAS(4, 8, 3, 6); |
2724 | 0 | } else { |
2725 | | //return TokError("invalid operand for TLBI instruction"); |
2726 | 0 | return true; |
2727 | 0 | } |
2728 | 0 | } |
2729 | | |
2730 | 0 | #undef SYS_ALIAS |
2731 | | |
2732 | 0 | Parser.Lex(); // Eat operand. |
2733 | |
|
2734 | 0 | bool ExpectRegister = (Op.lower().find("all") == StringRef::npos); |
2735 | 0 | bool HasRegister = false; |
2736 | | |
2737 | | // Check for the optional register operand. |
2738 | 0 | if (getLexer().is(AsmToken::Comma)) { |
2739 | 0 | Parser.Lex(); // Eat comma. |
2740 | |
|
2741 | 0 | if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands)) |
2742 | | //return TokError("expected register operand"); |
2743 | 0 | return true; |
2744 | | |
2745 | 0 | HasRegister = true; |
2746 | 0 | } |
2747 | | |
2748 | 0 | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
2749 | 0 | Parser.eatToEndOfStatement(); |
2750 | | //return TokError("unexpected token in argument list"); |
2751 | 0 | return true; |
2752 | 0 | } |
2753 | | |
2754 | 0 | if (ExpectRegister && !HasRegister) { |
2755 | | //return TokError("specified " + Mnemonic + " op requires a register"); |
2756 | 0 | return true; |
2757 | 0 | } |
2758 | 0 | else if (!ExpectRegister && HasRegister) { |
2759 | | //return TokError("specified " + Mnemonic + " op does not use a register"); |
2760 | 0 | return true; |
2761 | 0 | } |
2762 | | |
2763 | 0 | Parser.Lex(); // Consume the EndOfStatement |
2764 | 0 | return false; |
2765 | 0 | } |
2766 | | |
2767 | | AArch64AsmParser::OperandMatchResultTy |
2768 | | AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) |
2769 | 1 | { |
2770 | 1 | MCAsmParser &Parser = getParser(); |
2771 | 1 | const AsmToken &Tok = Parser.getTok(); |
2772 | | |
2773 | | // Can be either a #imm style literal or an option name |
2774 | 1 | bool Hash = Tok.is(AsmToken::Hash); |
2775 | 1 | if (Hash || Tok.is(AsmToken::Integer)) { |
2776 | | // Immediate operand. |
2777 | 0 | if (Hash) |
2778 | 0 | Parser.Lex(); // Eat the '#' |
2779 | 0 | const MCExpr *ImmVal; |
2780 | 0 | SMLoc ExprLoc = getLoc(); |
2781 | 0 | if (getParser().parseExpression(ImmVal)) |
2782 | 0 | return MatchOperand_ParseFail; |
2783 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
2784 | 0 | if (!MCE) { |
2785 | | //Error(ExprLoc, "immediate value expected for barrier operand"); |
2786 | 0 | return MatchOperand_ParseFail; |
2787 | 0 | } |
2788 | 0 | if (MCE->getValue() < 0 || MCE->getValue() > 15) { |
2789 | | //Error(ExprLoc, "barrier operand out of range"); |
2790 | 0 | return MatchOperand_ParseFail; |
2791 | 0 | } |
2792 | 0 | bool Valid; |
2793 | 0 | auto Mapper = AArch64DB::DBarrierMapper(); |
2794 | 0 | StringRef Name = |
2795 | 0 | Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid); |
2796 | 0 | Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name, |
2797 | 0 | ExprLoc, getContext())); |
2798 | 0 | return MatchOperand_Success; |
2799 | 0 | } |
2800 | | |
2801 | 1 | if (Tok.isNot(AsmToken::Identifier)) { |
2802 | | //TokError("invalid operand for instruction"); |
2803 | 0 | return MatchOperand_ParseFail; |
2804 | 0 | } |
2805 | | |
2806 | 1 | bool Valid; |
2807 | 1 | auto Mapper = AArch64DB::DBarrierMapper(); |
2808 | 1 | unsigned Opt = |
2809 | 1 | Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid); |
2810 | 1 | if (!Valid) { |
2811 | | //TokError("invalid barrier option name"); |
2812 | 1 | return MatchOperand_ParseFail; |
2813 | 1 | } |
2814 | | |
2815 | | // The only valid named option for ISB is 'sy' |
2816 | 0 | if (Mnemonic == "isb" && Opt != AArch64DB::SY) { |
2817 | | //TokError("'sy' or #imm operand expected"); |
2818 | 0 | return MatchOperand_ParseFail; |
2819 | 0 | } |
2820 | | |
2821 | 0 | Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(), |
2822 | 0 | getLoc(), getContext())); |
2823 | 0 | Parser.Lex(); // Consume the option |
2824 | |
|
2825 | 0 | return MatchOperand_Success; |
2826 | 0 | } |
2827 | | |
2828 | | AArch64AsmParser::OperandMatchResultTy |
2829 | | AArch64AsmParser::tryParseSysReg(OperandVector &Operands) |
2830 | 248 | { |
2831 | 248 | MCAsmParser &Parser = getParser(); |
2832 | 248 | const AsmToken &Tok = Parser.getTok(); |
2833 | | |
2834 | 248 | if (Tok.isNot(AsmToken::Identifier)) |
2835 | 10 | return MatchOperand_NoMatch; |
2836 | | |
2837 | 238 | bool IsKnown; |
2838 | 238 | auto MRSMapper = AArch64SysReg::MRSMapper(); |
2839 | 238 | uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), |
2840 | 238 | getSTI().getFeatureBits(), IsKnown); |
2841 | 238 | assert(IsKnown == (MRSReg != -1U) && |
2842 | 238 | "register should be -1 if and only if it's unknown"); |
2843 | | |
2844 | 238 | auto MSRMapper = AArch64SysReg::MSRMapper(); |
2845 | 238 | uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), |
2846 | 238 | getSTI().getFeatureBits(), IsKnown); |
2847 | 238 | assert(IsKnown == (MSRReg != -1U) && |
2848 | 238 | "register should be -1 if and only if it's unknown"); |
2849 | | |
2850 | 238 | auto PStateMapper = AArch64PState::PStateMapper(); |
2851 | 238 | uint32_t PStateField = |
2852 | 238 | PStateMapper.fromString(Tok.getString(), |
2853 | 238 | getSTI().getFeatureBits(), IsKnown); |
2854 | 238 | assert(IsKnown == (PStateField != -1U) && |
2855 | 238 | "register should be -1 if and only if it's unknown"); |
2856 | | |
2857 | 238 | Operands.push_back(AArch64Operand::CreateSysReg( |
2858 | 238 | Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext())); |
2859 | 238 | Parser.Lex(); // Eat identifier |
2860 | | |
2861 | 238 | return MatchOperand_Success; |
2862 | 238 | } |
2863 | | |
2864 | | /// tryParseVectorRegister - Parse a vector register operand. |
2865 | | // return true on error |
2866 | | bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) |
2867 | 21.4k | { |
2868 | 21.4k | MCAsmParser &Parser = getParser(); |
2869 | 21.4k | if (Parser.getTok().isNot(AsmToken::Identifier)) |
2870 | 0 | return true; |
2871 | | |
2872 | 21.4k | SMLoc S = getLoc(); |
2873 | | // Check for a vector register specifier first. |
2874 | 21.4k | StringRef Kind; |
2875 | 21.4k | int64_t Reg = tryMatchVectorRegister(Kind, false); |
2876 | 21.4k | if (Reg == -1) |
2877 | 19.7k | return true; |
2878 | 1.66k | Operands.push_back( |
2879 | 1.66k | AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext())); |
2880 | | // If there was an explicit qualifier, that goes on as a literal text |
2881 | | // operand. |
2882 | 1.66k | if (!Kind.empty()) |
2883 | 1.20k | Operands.push_back( |
2884 | 1.20k | AArch64Operand::CreateToken(Kind, false, S, getContext())); |
2885 | | |
2886 | | // If there is an index specifier following the register, parse that too. |
2887 | 1.66k | if (Parser.getTok().is(AsmToken::LBrac)) { |
2888 | 0 | SMLoc SIdx = getLoc(); |
2889 | 0 | Parser.Lex(); // Eat left bracket token. |
2890 | |
|
2891 | 0 | const MCExpr *ImmVal; |
2892 | 0 | if (getParser().parseExpression(ImmVal)) |
2893 | 0 | return false; |
2894 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
2895 | 0 | if (!MCE) { |
2896 | | //TokError("immediate value expected for vector index"); |
2897 | 0 | return true; |
2898 | 0 | } |
2899 | | |
2900 | 0 | SMLoc E = getLoc(); |
2901 | 0 | if (Parser.getTok().isNot(AsmToken::RBrac)) { |
2902 | | //Error(E, "']' expected"); |
2903 | 0 | return true; |
2904 | 0 | } |
2905 | | |
2906 | 0 | Parser.Lex(); // Eat right bracket token. |
2907 | |
|
2908 | 0 | Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, |
2909 | 0 | E, getContext())); |
2910 | 0 | } |
2911 | | |
2912 | 1.66k | return false; |
2913 | 1.66k | } |
2914 | | |
2915 | | /// parseRegister - Parse a non-vector register operand. |
2916 | | // return true on error |
2917 | | bool AArch64AsmParser::parseRegister(OperandVector &Operands) |
2918 | 21.4k | { |
2919 | 21.4k | MCAsmParser &Parser = getParser(); |
2920 | 21.4k | SMLoc S = getLoc(); |
2921 | | // Try for a vector register. |
2922 | 21.4k | if (!tryParseVectorRegister(Operands)) |
2923 | 1.66k | return false; |
2924 | | |
2925 | | // Try for a scalar register. |
2926 | 19.7k | int64_t Reg = tryParseRegister(); |
2927 | 19.7k | if (Reg == -1) |
2928 | 17.0k | return true; |
2929 | 2.72k | Operands.push_back( |
2930 | 2.72k | AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext())); |
2931 | | |
2932 | | // A small number of instructions (FMOVXDhighr, for example) have "[1]" |
2933 | | // as a string token in the instruction itself. |
2934 | 2.72k | if (getLexer().getKind() == AsmToken::LBrac) { |
2935 | 27 | SMLoc LBracS = getLoc(); |
2936 | 27 | Parser.Lex(); |
2937 | 27 | const AsmToken &Tok = Parser.getTok(); |
2938 | 27 | if (Tok.is(AsmToken::Integer)) { |
2939 | 0 | SMLoc IntS = getLoc(); |
2940 | 0 | bool valid; |
2941 | 0 | int64_t Val = Tok.getIntVal(valid); |
2942 | 0 | if (!valid) |
2943 | 0 | return true; |
2944 | 0 | if (Val == 1) { |
2945 | 0 | Parser.Lex(); |
2946 | 0 | if (getLexer().getKind() == AsmToken::RBrac) { |
2947 | 0 | SMLoc RBracS = getLoc(); |
2948 | 0 | Parser.Lex(); |
2949 | 0 | Operands.push_back( |
2950 | 0 | AArch64Operand::CreateToken("[", false, LBracS, getContext())); |
2951 | 0 | Operands.push_back( |
2952 | 0 | AArch64Operand::CreateToken("1", false, IntS, getContext())); |
2953 | 0 | Operands.push_back( |
2954 | 0 | AArch64Operand::CreateToken("]", false, RBracS, getContext())); |
2955 | 0 | return false; |
2956 | 0 | } |
2957 | 0 | } |
2958 | 0 | } |
2959 | 27 | } |
2960 | | |
2961 | 2.72k | return false; |
2962 | 2.72k | } |
2963 | | |
2964 | | bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) |
2965 | 10.8k | { |
2966 | 10.8k | MCAsmParser &Parser = getParser(); |
2967 | 10.8k | bool HasELFModifier = false; |
2968 | 10.8k | AArch64MCExpr::VariantKind RefKind; |
2969 | | |
2970 | 10.8k | if (Parser.getTok().is(AsmToken::Colon)) { |
2971 | 8 | Parser.Lex(); // Eat ':" |
2972 | 8 | HasELFModifier = true; |
2973 | | |
2974 | 8 | if (Parser.getTok().isNot(AsmToken::Identifier)) { |
2975 | | //Error(Parser.getTok().getLoc(), |
2976 | | // "expect relocation specifier in operand after ':'"); |
2977 | 0 | return true; |
2978 | 0 | } |
2979 | | |
2980 | 8 | std::string LowerCase = Parser.getTok().getIdentifier().lower(); |
2981 | 8 | RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase) |
2982 | 8 | .Case("lo12", AArch64MCExpr::VK_LO12) |
2983 | 8 | .Case("abs_g3", AArch64MCExpr::VK_ABS_G3) |
2984 | 8 | .Case("abs_g2", AArch64MCExpr::VK_ABS_G2) |
2985 | 8 | .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S) |
2986 | 8 | .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC) |
2987 | 8 | .Case("abs_g1", AArch64MCExpr::VK_ABS_G1) |
2988 | 8 | .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S) |
2989 | 8 | .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC) |
2990 | 8 | .Case("abs_g0", AArch64MCExpr::VK_ABS_G0) |
2991 | 8 | .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S) |
2992 | 8 | .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC) |
2993 | 8 | .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2) |
2994 | 8 | .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1) |
2995 | 8 | .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC) |
2996 | 8 | .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0) |
2997 | 8 | .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC) |
2998 | 8 | .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12) |
2999 | 8 | .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12) |
3000 | 8 | .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC) |
3001 | 8 | .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2) |
3002 | 8 | .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1) |
3003 | 8 | .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC) |
3004 | 8 | .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0) |
3005 | 8 | .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC) |
3006 | 8 | .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12) |
3007 | 8 | .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12) |
3008 | 8 | .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC) |
3009 | 8 | .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12) |
3010 | 8 | .Case("got", AArch64MCExpr::VK_GOT_PAGE) |
3011 | 8 | .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12) |
3012 | 8 | .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE) |
3013 | 8 | .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC) |
3014 | 8 | .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1) |
3015 | 8 | .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC) |
3016 | 8 | .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE) |
3017 | 8 | .Default(AArch64MCExpr::VK_INVALID); |
3018 | | |
3019 | 8 | if (RefKind == AArch64MCExpr::VK_INVALID) { |
3020 | | //Error(Parser.getTok().getLoc(), |
3021 | | // "expect relocation specifier in operand after ':'"); |
3022 | 3 | return true; |
3023 | 3 | } |
3024 | | |
3025 | 5 | Parser.Lex(); // Eat identifier |
3026 | | |
3027 | 5 | if (Parser.getTok().isNot(AsmToken::Colon)) { |
3028 | | //Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier"); |
3029 | 0 | return true; |
3030 | 0 | } |
3031 | 5 | Parser.Lex(); // Eat ':' |
3032 | 5 | } |
3033 | | |
3034 | 10.8k | if (getParser().parseExpression(ImmVal)) |
3035 | 7.05k | return true; |
3036 | | |
3037 | 3.83k | if (HasELFModifier) |
3038 | 5 | ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext()); |
3039 | | |
3040 | 3.83k | return false; |
3041 | 10.8k | } |
3042 | | |
3043 | | /// parseVectorList - Parse a vector list operand for AdvSIMD instructions. |
3044 | | // return true on error |
3045 | | bool AArch64AsmParser::parseVectorList(OperandVector &Operands) |
3046 | 12 | { |
3047 | 12 | MCAsmParser &Parser = getParser(); |
3048 | 12 | assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket"); |
3049 | 12 | SMLoc S = getLoc(); |
3050 | 12 | Parser.Lex(); // Eat left bracket token. |
3051 | 12 | StringRef Kind; |
3052 | 12 | int64_t FirstReg = tryMatchVectorRegister(Kind, true); |
3053 | 12 | if (FirstReg == -1) |
3054 | 5 | return true; |
3055 | 7 | int64_t PrevReg = FirstReg; |
3056 | 7 | unsigned Count = 1; |
3057 | | |
3058 | 7 | if (Parser.getTok().is(AsmToken::Minus)) { |
3059 | 0 | Parser.Lex(); // Eat the minus. |
3060 | | |
3061 | | //SMLoc Loc = getLoc(); |
3062 | 0 | StringRef NextKind; |
3063 | 0 | int64_t Reg = tryMatchVectorRegister(NextKind, true); |
3064 | 0 | if (Reg == -1) |
3065 | 0 | return true; |
3066 | | // Any Kind suffices must match on all regs in the list. |
3067 | 0 | if (Kind != NextKind) |
3068 | | //return Error(Loc, "mismatched register size suffix"); |
3069 | 0 | return true; |
3070 | | |
3071 | 0 | unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg); |
3072 | |
|
3073 | 0 | if (Space == 0 || Space > 3) { |
3074 | | //return Error(Loc, "invalid number of vectors"); |
3075 | 0 | return true; |
3076 | 0 | } |
3077 | | |
3078 | 0 | Count += Space; |
3079 | 0 | } |
3080 | 7 | else { |
3081 | 7 | while (Parser.getTok().is(AsmToken::Comma)) { |
3082 | 0 | Parser.Lex(); // Eat the comma token. |
3083 | | |
3084 | | //SMLoc Loc = getLoc(); |
3085 | 0 | StringRef NextKind; |
3086 | 0 | int64_t Reg = tryMatchVectorRegister(NextKind, true); |
3087 | 0 | if (Reg == -1) |
3088 | 0 | return true; |
3089 | | // Any Kind suffices must match on all regs in the list. |
3090 | 0 | if (Kind != NextKind) |
3091 | | //return Error(Loc, "mismatched register size suffix"); |
3092 | 0 | return true; |
3093 | | |
3094 | | // Registers must be incremental (with wraparound at 31) |
3095 | 0 | if (getContext().getRegisterInfo()->getEncodingValue(Reg) != |
3096 | 0 | (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) |
3097 | | //return Error(Loc, "registers must be sequential"); |
3098 | 0 | return true; |
3099 | | |
3100 | 0 | PrevReg = Reg; |
3101 | 0 | ++Count; |
3102 | 0 | } |
3103 | 7 | } |
3104 | | |
3105 | 7 | if (Parser.getTok().isNot(AsmToken::RCurly)) |
3106 | | //return Error(getLoc(), "'}' expected"); |
3107 | 0 | return true; |
3108 | 7 | Parser.Lex(); // Eat the '}' token. |
3109 | | |
3110 | 7 | if (Count > 4) |
3111 | | //return Error(S, "invalid number of vectors"); |
3112 | 0 | return true; |
3113 | | |
3114 | 7 | unsigned NumElements = 0; |
3115 | 7 | char ElementKind = 0; |
3116 | 7 | if (!Kind.empty()) |
3117 | 0 | parseValidVectorKind(Kind, NumElements, ElementKind); |
3118 | | |
3119 | 7 | Operands.push_back(AArch64Operand::CreateVectorList( |
3120 | 7 | FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext())); |
3121 | | |
3122 | | // If there is an index specifier following the list, parse that too. |
3123 | 7 | if (Parser.getTok().is(AsmToken::LBrac)) { |
3124 | 3 | SMLoc SIdx = getLoc(); |
3125 | 3 | Parser.Lex(); // Eat left bracket token. |
3126 | | |
3127 | 3 | const MCExpr *ImmVal; |
3128 | 3 | if (getParser().parseExpression(ImmVal)) |
3129 | 3 | return false; |
3130 | 0 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
3131 | 0 | if (!MCE) { |
3132 | | //TokError("immediate value expected for vector index"); |
3133 | 0 | return false; |
3134 | 0 | } |
3135 | | |
3136 | 0 | SMLoc E = getLoc(); |
3137 | 0 | if (Parser.getTok().isNot(AsmToken::RBrac)) { |
3138 | | //Error(E, "']' expected"); |
3139 | 0 | return false; |
3140 | 0 | } |
3141 | | |
3142 | 0 | Parser.Lex(); // Eat right bracket token. |
3143 | |
|
3144 | 0 | Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, |
3145 | 0 | E, getContext())); |
3146 | 0 | } |
3147 | 4 | return false; |
3148 | 7 | } |
3149 | | |
3150 | | AArch64AsmParser::OperandMatchResultTy |
3151 | | AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) |
3152 | 232 | { |
3153 | 232 | MCAsmParser &Parser = getParser(); |
3154 | 232 | const AsmToken &Tok = Parser.getTok(); |
3155 | 232 | if (!Tok.is(AsmToken::Identifier)) |
3156 | 128 | return MatchOperand_NoMatch; |
3157 | | |
3158 | 104 | unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false); |
3159 | | |
3160 | 104 | MCContext &Ctx = getContext(); |
3161 | 104 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); |
3162 | 104 | if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum)) |
3163 | 104 | return MatchOperand_NoMatch; |
3164 | | |
3165 | 0 | SMLoc S = getLoc(); |
3166 | 0 | Parser.Lex(); // Eat register |
3167 | |
|
3168 | 0 | if (Parser.getTok().isNot(AsmToken::Comma)) { |
3169 | 0 | Operands.push_back( |
3170 | 0 | AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx)); |
3171 | 0 | return MatchOperand_Success; |
3172 | 0 | } |
3173 | 0 | Parser.Lex(); // Eat comma. |
3174 | |
|
3175 | 0 | if (Parser.getTok().is(AsmToken::Hash)) |
3176 | 0 | Parser.Lex(); // Eat hash |
3177 | |
|
3178 | 0 | if (Parser.getTok().isNot(AsmToken::Integer)) { |
3179 | | //Error(getLoc(), "index must be absent or #0"); |
3180 | 0 | return MatchOperand_ParseFail; |
3181 | 0 | } |
3182 | | |
3183 | 0 | const MCExpr *ImmVal; |
3184 | 0 | if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || |
3185 | 0 | cast<MCConstantExpr>(ImmVal)->getValue() != 0) { |
3186 | | //Error(getLoc(), "index must be absent or #0"); |
3187 | 0 | return MatchOperand_ParseFail; |
3188 | 0 | } |
3189 | | |
3190 | 0 | Operands.push_back( |
3191 | 0 | AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx)); |
3192 | 0 | return MatchOperand_Success; |
3193 | 0 | } |
3194 | | |
3195 | | /// parseOperand - Parse a arm instruction operand. For now this parses the |
3196 | | /// operand regardless of the mnemonic. |
3197 | | // return true on failure |
3198 | | bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, |
3199 | | bool invertCondCode) |
3200 | 66.2k | { |
3201 | 66.2k | MCAsmParser &Parser = getParser(); |
3202 | | // Check if the current operand has a custom associated parser, if so, try to |
3203 | | // custom parse the operand, or fallback to the general approach. |
3204 | 66.2k | OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); |
3205 | 66.2k | if (ResTy == MatchOperand_Success) |
3206 | 334 | return false; |
3207 | | // If there wasn't a custom match, try the generic matcher below. Otherwise, |
3208 | | // there was a match, but an error occurred, in which case, just return that |
3209 | | // the operand parsing failed. |
3210 | 65.8k | if (ResTy == MatchOperand_ParseFail) |
3211 | 366 | return true; |
3212 | | |
3213 | | // Nothing custom, so do general case parsing. |
3214 | 65.5k | SMLoc S, E; |
3215 | 65.5k | switch (getLexer().getKind()) { |
3216 | 7.04k | default: { |
3217 | 7.04k | SMLoc S = getLoc(); |
3218 | 7.04k | const MCExpr *Expr; |
3219 | 7.04k | if (parseSymbolicImmVal(Expr)) |
3220 | | //return Error(S, "invalid operand"); |
3221 | 5.30k | return true; |
3222 | | |
3223 | 1.74k | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
3224 | 1.74k | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); |
3225 | 1.74k | return false; |
3226 | 7.04k | } |
3227 | 32.8k | case AsmToken::LBrac: { |
3228 | 32.8k | SMLoc Loc = Parser.getTok().getLoc(); |
3229 | 32.8k | Operands.push_back(AArch64Operand::CreateToken("[", false, Loc, |
3230 | 32.8k | getContext())); |
3231 | 32.8k | Parser.Lex(); // Eat '[' |
3232 | | |
3233 | | // There's no comma after a '[', so we can parse the next operand |
3234 | | // immediately. |
3235 | 32.8k | return parseOperand(Operands, false, false); |
3236 | 7.04k | } |
3237 | 12 | case AsmToken::LCurly: |
3238 | 12 | return parseVectorList(Operands); |
3239 | 21.4k | case AsmToken::Identifier: { |
3240 | | // If we're expecting a Condition Code operand, then just parse that. |
3241 | 21.4k | if (isCondCode) |
3242 | 0 | return parseCondCode(Operands, invertCondCode); |
3243 | | |
3244 | | // If it's a register name, parse it. |
3245 | 21.4k | if (!parseRegister(Operands)) |
3246 | 4.39k | return false; |
3247 | | |
3248 | | // This could be an optional "shift" or "extend" operand. |
3249 | 17.0k | OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands); |
3250 | | // We can only continue if no tokens were eaten. |
3251 | 17.0k | if (GotShift != MatchOperand_NoMatch) |
3252 | 43 | return GotShift; |
3253 | | |
3254 | | // This was not a register so parse other operands that start with an |
3255 | | // identifier (like labels) as expressions and create them as immediates. |
3256 | 17.0k | const MCExpr *IdVal; |
3257 | 17.0k | S = getLoc(); |
3258 | 17.0k | if (getParser().parseExpression(IdVal)) |
3259 | 1.11k | return true; |
3260 | | |
3261 | 15.9k | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
3262 | 15.9k | Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext())); |
3263 | 15.9k | return false; |
3264 | 17.0k | } |
3265 | 610 | case AsmToken::Integer: |
3266 | 616 | case AsmToken::Real: |
3267 | 3.85k | case AsmToken::Hash: { |
3268 | | // #42 -> immediate. |
3269 | 3.85k | S = getLoc(); |
3270 | 3.85k | if (getLexer().is(AsmToken::Hash)) |
3271 | 3.23k | Parser.Lex(); |
3272 | | |
3273 | | // Parse a negative sign |
3274 | 3.85k | bool isNegative = false; |
3275 | 3.85k | if (Parser.getTok().is(AsmToken::Minus)) { |
3276 | 4 | isNegative = true; |
3277 | | // We need to consume this token only when we have a Real, otherwise |
3278 | | // we let parseSymbolicImmVal take care of it |
3279 | 4 | if (Parser.getLexer().peekTok().is(AsmToken::Real)) |
3280 | 0 | Parser.Lex(); |
3281 | 4 | } |
3282 | | |
3283 | | // The only Real that should come through here is a literal #0.0 for |
3284 | | // the fcmp[e] r, #0.0 instructions. They expect raw token operands, |
3285 | | // so convert the value. |
3286 | 3.85k | const AsmToken &Tok = Parser.getTok(); |
3287 | 3.85k | if (Tok.is(AsmToken::Real)) { |
3288 | 7 | APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); |
3289 | 7 | if (RealVal.bitcastToAPInt().getActiveBits() > 64) |
3290 | 0 | return true; |
3291 | 7 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
3292 | 7 | if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && |
3293 | 7 | Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && |
3294 | 7 | Mnemonic != "fcmlt") |
3295 | | //return TokError("unexpected floating point literal"); |
3296 | 7 | return true; |
3297 | 0 | else if (IntVal != 0 || isNegative) |
3298 | | //return TokError("expected floating-point constant #0.0"); |
3299 | 0 | return true; |
3300 | 0 | Parser.Lex(); // Eat the token. |
3301 | |
|
3302 | 0 | Operands.push_back( |
3303 | 0 | AArch64Operand::CreateToken("#0", false, S, getContext())); |
3304 | 0 | Operands.push_back( |
3305 | 0 | AArch64Operand::CreateToken(".0", false, S, getContext())); |
3306 | 0 | return false; |
3307 | 7 | } |
3308 | | |
3309 | 3.84k | const MCExpr *ImmVal; |
3310 | 3.84k | if (parseSymbolicImmVal(ImmVal)) |
3311 | 1.75k | return true; |
3312 | | |
3313 | 2.09k | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
3314 | 2.09k | Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext())); |
3315 | 2.09k | return false; |
3316 | 3.84k | } |
3317 | 291 | case AsmToken::Equal: { |
3318 | 291 | SMLoc Loc = Parser.getTok().getLoc(); |
3319 | 291 | if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) |
3320 | | //return Error(Loc, "unexpected token in operand"); |
3321 | 0 | return true; |
3322 | 291 | Parser.Lex(); // Eat '=' |
3323 | 291 | const MCExpr *SubExprVal; |
3324 | 291 | if (getParser().parseExpression(SubExprVal)) |
3325 | 15 | return true; |
3326 | | |
3327 | 276 | if (Operands.size() < 2 || |
3328 | 276 | !static_cast<AArch64Operand &>(*Operands[1]).isReg()) |
3329 | | //return Error(Loc, "Only valid when first operand is register"); |
3330 | 0 | return true; |
3331 | | |
3332 | 276 | bool IsXReg = |
3333 | 276 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
3334 | 276 | Operands[1]->getReg()); |
3335 | | |
3336 | 276 | MCContext& Ctx = getContext(); |
3337 | 276 | E = SMLoc::getFromPointer(Loc.getPointer() - 1); |
3338 | | // If the op is an imm and can be fit into a mov, then replace ldr with mov. |
3339 | 276 | if (isa<MCConstantExpr>(SubExprVal)) { |
3340 | 240 | uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue(); |
3341 | 240 | uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; |
3342 | 321 | while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) { |
3343 | 81 | ShiftAmt += 16; |
3344 | 81 | Imm >>= 16; |
3345 | 81 | } |
3346 | 240 | if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { |
3347 | 116 | Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx); |
3348 | 116 | Operands.push_back(AArch64Operand::CreateImm( |
3349 | 116 | MCConstantExpr::create(Imm, Ctx), S, E, Ctx)); |
3350 | 116 | if (ShiftAmt) |
3351 | 27 | Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL, |
3352 | 27 | ShiftAmt, true, S, E, Ctx)); |
3353 | 116 | return false; |
3354 | 116 | } |
3355 | 124 | APInt Simm = APInt(64, Imm << ShiftAmt); |
3356 | | // check if the immediate is an unsigned or signed 32-bit int for W regs |
3357 | 124 | if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32))) |
3358 | | //return Error(Loc, "Immediate too large for register"); |
3359 | 1 | return true; |
3360 | 124 | } |
3361 | | // If it is a label or an imm that cannot fit in a movz, put it into CP. |
3362 | 159 | const MCExpr *CPLoc = |
3363 | 159 | getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc); |
3364 | 159 | Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx)); |
3365 | 159 | return false; |
3366 | 276 | } |
3367 | 65.5k | } |
3368 | 65.5k | } |
3369 | | |
3370 | | /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its |
3371 | | /// operands. |
3372 | | // return true on error |
3373 | | bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, |
3374 | | StringRef Name, SMLoc NameLoc, |
3375 | | OperandVector &Operands, unsigned int &ErrorCode) |
3376 | 11.6k | { |
3377 | 11.6k | MCAsmParser &Parser = getParser(); |
3378 | 11.6k | Name = StringSwitch<StringRef>(Name.lower()) |
3379 | 11.6k | .Case("beq", "b.eq") |
3380 | 11.6k | .Case("bne", "b.ne") |
3381 | 11.6k | .Case("bhs", "b.hs") |
3382 | 11.6k | .Case("bcs", "b.cs") |
3383 | 11.6k | .Case("blo", "b.lo") |
3384 | 11.6k | .Case("bcc", "b.cc") |
3385 | 11.6k | .Case("bmi", "b.mi") |
3386 | 11.6k | .Case("bpl", "b.pl") |
3387 | 11.6k | .Case("bvs", "b.vs") |
3388 | 11.6k | .Case("bvc", "b.vc") |
3389 | 11.6k | .Case("bhi", "b.hi") |
3390 | 11.6k | .Case("bls", "b.ls") |
3391 | 11.6k | .Case("bge", "b.ge") |
3392 | 11.6k | .Case("blt", "b.lt") |
3393 | 11.6k | .Case("bgt", "b.gt") |
3394 | 11.6k | .Case("ble", "b.le") |
3395 | 11.6k | .Case("bal", "b.al") |
3396 | 11.6k | .Case("bnv", "b.nv") |
3397 | 11.6k | .Default(Name); |
3398 | | |
3399 | | // First check for the AArch64-specific .req directive. |
3400 | 11.6k | if (Parser.getTok().is(AsmToken::Identifier) && |
3401 | 11.6k | Parser.getTok().getIdentifier() == ".req") { |
3402 | 0 | parseDirectiveReq(Name, NameLoc); |
3403 | | // We always return 'error' for this, as we're done with this |
3404 | | // statement and don't need to match the 'instruction." |
3405 | 0 | return true; |
3406 | 0 | } |
3407 | | |
3408 | | // Create the leading tokens for the mnemonic, split by '.' characters. |
3409 | 11.6k | size_t Start = 0, Next = Name.find('.'); |
3410 | 11.6k | StringRef Head = Name.slice(Start, Next); |
3411 | | |
3412 | | // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction. |
3413 | 11.6k | if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") { |
3414 | 0 | bool IsError = parseSysAlias(Head, NameLoc, Operands); |
3415 | 0 | if (IsError && getLexer().isNot(AsmToken::EndOfStatement)) |
3416 | 0 | Parser.eatToEndOfStatement(); |
3417 | 0 | return IsError; |
3418 | 0 | } |
3419 | | |
3420 | 11.6k | Operands.push_back( |
3421 | 11.6k | AArch64Operand::CreateToken(Head, false, NameLoc, getContext())); |
3422 | 11.6k | Mnemonic = Head; |
3423 | | |
3424 | | // Handle condition codes for a branch mnemonic |
3425 | 11.6k | if (Head == "b" && Next != StringRef::npos) { |
3426 | 3.44k | Start = Next; |
3427 | 3.44k | Next = Name.find('.', Start + 1); |
3428 | 3.44k | Head = Name.slice(Start + 1, Next); |
3429 | | |
3430 | 3.44k | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + |
3431 | 3.44k | (Head.data() - Name.data())); |
3432 | 3.44k | AArch64CC::CondCode CC = parseCondCodeString(Head); |
3433 | 3.44k | if (CC == AArch64CC::Invalid) |
3434 | | //return Error(SuffixLoc, "invalid condition code"); |
3435 | 6 | return true; |
3436 | 3.43k | Operands.push_back( |
3437 | 3.43k | AArch64Operand::CreateToken(".", true, SuffixLoc, getContext())); |
3438 | 3.43k | Operands.push_back( |
3439 | 3.43k | AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext())); |
3440 | 3.43k | } |
3441 | | |
3442 | | // Add the remaining tokens in the mnemonic. |
3443 | 30.4k | while (Next != StringRef::npos) { |
3444 | 18.7k | Start = Next; |
3445 | 18.7k | Next = Name.find('.', Start + 1); |
3446 | 18.7k | Head = Name.slice(Start, Next); |
3447 | 18.7k | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + |
3448 | 18.7k | (Head.data() - Name.data()) + 1); |
3449 | 18.7k | Operands.push_back( |
3450 | 18.7k | AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext())); |
3451 | 18.7k | } |
3452 | | |
3453 | | // Conditional compare instructions have a Condition Code operand, which needs |
3454 | | // to be parsed and an immediate operand created. |
3455 | 11.6k | bool condCodeFourthOperand = |
3456 | 11.6k | (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" || |
3457 | 11.6k | Head == "fccmpe" || Head == "fcsel" || Head == "csel" || |
3458 | 11.6k | Head == "csinc" || Head == "csinv" || Head == "csneg"); |
3459 | | |
3460 | | // These instructions are aliases to some of the conditional select |
3461 | | // instructions. However, the condition code is inverted in the aliased |
3462 | | // instruction. |
3463 | | // |
3464 | | // FIXME: Is this the correct way to handle these? Or should the parser |
3465 | | // generate the aliased instructions directly? |
3466 | 11.6k | bool condCodeSecondOperand = (Head == "cset" || Head == "csetm"); |
3467 | 11.6k | bool condCodeThirdOperand = |
3468 | 11.6k | (Head == "cinc" || Head == "cinv" || Head == "cneg"); |
3469 | | |
3470 | | // Read the remaining operands. |
3471 | 11.6k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
3472 | | // Read the first operand. |
3473 | 11.1k | if (parseOperand(Operands, false, false)) { |
3474 | 7.59k | Parser.eatToEndOfStatement(); |
3475 | 7.59k | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3476 | 7.59k | return true; |
3477 | 7.59k | } |
3478 | | |
3479 | 3.58k | unsigned N = 2; |
3480 | 24.7k | while (getLexer().is(AsmToken::Comma)) { |
3481 | 22.1k | Parser.Lex(); // Eat the comma. |
3482 | | |
3483 | | // Parse and remember the operand. |
3484 | 22.1k | if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) || |
3485 | 22.1k | (N == 3 && condCodeThirdOperand) || |
3486 | 22.1k | (N == 2 && condCodeSecondOperand), |
3487 | 22.1k | condCodeSecondOperand || condCodeThirdOperand)) { |
3488 | 973 | Parser.eatToEndOfStatement(); |
3489 | 973 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3490 | 973 | return true; |
3491 | 973 | } |
3492 | | |
3493 | | // After successfully parsing some operands there are two special cases to |
3494 | | // consider (i.e. notional operands not separated by commas). Both are due |
3495 | | // to memory specifiers: |
3496 | | // + An RBrac will end an address for load/store/prefetch |
3497 | | // + An '!' will indicate a pre-indexed operation. |
3498 | | // |
3499 | | // It's someone else's responsibility to make sure these tokens are sane |
3500 | | // in the given context! |
3501 | 21.2k | if (Parser.getTok().is(AsmToken::RBrac)) { |
3502 | 19 | SMLoc Loc = Parser.getTok().getLoc(); |
3503 | 19 | Operands.push_back(AArch64Operand::CreateToken("]", false, Loc, |
3504 | 19 | getContext())); |
3505 | 19 | Parser.Lex(); |
3506 | 19 | } |
3507 | | |
3508 | 21.2k | if (Parser.getTok().is(AsmToken::Exclaim)) { |
3509 | 5 | SMLoc Loc = Parser.getTok().getLoc(); |
3510 | 5 | Operands.push_back(AArch64Operand::CreateToken("!", false, Loc, |
3511 | 5 | getContext())); |
3512 | 5 | Parser.Lex(); |
3513 | 5 | } |
3514 | | |
3515 | 21.2k | ++N; |
3516 | 21.2k | } |
3517 | 3.58k | } |
3518 | | |
3519 | 3.12k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
3520 | | //SMLoc Loc = Parser.getTok().getLoc(); |
3521 | 180 | Parser.eatToEndOfStatement(); |
3522 | | //return Error(Loc, "unexpected token in argument list"); |
3523 | 180 | return true; |
3524 | 180 | } |
3525 | | |
3526 | 2.94k | Parser.Lex(); // Consume the EndOfStatement |
3527 | 2.94k | return false; |
3528 | 3.12k | } |
3529 | | |
3530 | | // FIXME: This entire function is a giant hack to provide us with decent |
3531 | | // operand range validation/diagnostics until TableGen/MC can be extended |
3532 | | // to support autogeneration of this kind of validation. |
3533 | | bool AArch64AsmParser::validateInstruction(MCInst &Inst, |
3534 | | SmallVectorImpl<SMLoc> &Loc) |
3535 | 2.71k | { |
3536 | 2.71k | const MCRegisterInfo *RI = getContext().getRegisterInfo(); |
3537 | | // Check for indexed addressing modes w/ the base register being the |
3538 | | // same as a destination/source register or pair load where |
3539 | | // the Rt == Rt2. All of those are undefined behaviour. |
3540 | 2.71k | switch (Inst.getOpcode()) { |
3541 | 0 | case AArch64::LDPSWpre: |
3542 | 0 | case AArch64::LDPWpost: |
3543 | 0 | case AArch64::LDPWpre: |
3544 | 0 | case AArch64::LDPXpost: |
3545 | 0 | case AArch64::LDPXpre: { |
3546 | 0 | unsigned Rt = Inst.getOperand(1).getReg(); |
3547 | 0 | unsigned Rt2 = Inst.getOperand(2).getReg(); |
3548 | 0 | unsigned Rn = Inst.getOperand(3).getReg(); |
3549 | 0 | if (RI->isSubRegisterEq(Rn, Rt)) |
3550 | | //return Error(Loc[0], "unpredictable LDP instruction, writeback base " |
3551 | | // "is also a destination"); |
3552 | 0 | return true; |
3553 | 0 | if (RI->isSubRegisterEq(Rn, Rt2)) |
3554 | | //return Error(Loc[1], "unpredictable LDP instruction, writeback base " |
3555 | | // "is also a destination"); |
3556 | 0 | return true; |
3557 | | // FALLTHROUGH |
3558 | 0 | } |
3559 | 0 | case AArch64::LDPDi: |
3560 | 0 | case AArch64::LDPQi: |
3561 | 0 | case AArch64::LDPSi: |
3562 | 0 | case AArch64::LDPSWi: |
3563 | 0 | case AArch64::LDPWi: |
3564 | 0 | case AArch64::LDPXi: { |
3565 | 0 | unsigned Rt = Inst.getOperand(0).getReg(); |
3566 | 0 | unsigned Rt2 = Inst.getOperand(1).getReg(); |
3567 | 0 | if (Rt == Rt2) |
3568 | | //return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); |
3569 | 0 | return true; |
3570 | 0 | break; |
3571 | 0 | } |
3572 | 0 | case AArch64::LDPDpost: |
3573 | 0 | case AArch64::LDPDpre: |
3574 | 0 | case AArch64::LDPQpost: |
3575 | 0 | case AArch64::LDPQpre: |
3576 | 0 | case AArch64::LDPSpost: |
3577 | 0 | case AArch64::LDPSpre: |
3578 | 0 | case AArch64::LDPSWpost: { |
3579 | 0 | unsigned Rt = Inst.getOperand(1).getReg(); |
3580 | 0 | unsigned Rt2 = Inst.getOperand(2).getReg(); |
3581 | 0 | if (Rt == Rt2) |
3582 | | //return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); |
3583 | 0 | return true; |
3584 | 0 | break; |
3585 | 0 | } |
3586 | 0 | case AArch64::STPDpost: |
3587 | 0 | case AArch64::STPDpre: |
3588 | 0 | case AArch64::STPQpost: |
3589 | 0 | case AArch64::STPQpre: |
3590 | 0 | case AArch64::STPSpost: |
3591 | 0 | case AArch64::STPSpre: |
3592 | 0 | case AArch64::STPWpost: |
3593 | 0 | case AArch64::STPWpre: |
3594 | 0 | case AArch64::STPXpost: |
3595 | 0 | case AArch64::STPXpre: { |
3596 | 0 | unsigned Rt = Inst.getOperand(1).getReg(); |
3597 | 0 | unsigned Rt2 = Inst.getOperand(2).getReg(); |
3598 | 0 | unsigned Rn = Inst.getOperand(3).getReg(); |
3599 | 0 | if (RI->isSubRegisterEq(Rn, Rt)) |
3600 | | //return Error(Loc[0], "unpredictable STP instruction, writeback base " |
3601 | | // "is also a source"); |
3602 | 0 | return true; |
3603 | 0 | if (RI->isSubRegisterEq(Rn, Rt2)) |
3604 | | //return Error(Loc[1], "unpredictable STP instruction, writeback base " |
3605 | | // "is also a source"); |
3606 | 0 | return true; |
3607 | 0 | break; |
3608 | 0 | } |
3609 | 0 | case AArch64::LDRBBpre: |
3610 | 0 | case AArch64::LDRBpre: |
3611 | 0 | case AArch64::LDRHHpre: |
3612 | 0 | case AArch64::LDRHpre: |
3613 | 0 | case AArch64::LDRSBWpre: |
3614 | 0 | case AArch64::LDRSBXpre: |
3615 | 0 | case AArch64::LDRSHWpre: |
3616 | 0 | case AArch64::LDRSHXpre: |
3617 | 0 | case AArch64::LDRSWpre: |
3618 | 0 | case AArch64::LDRWpre: |
3619 | 0 | case AArch64::LDRXpre: |
3620 | 0 | case AArch64::LDRBBpost: |
3621 | 0 | case AArch64::LDRBpost: |
3622 | 0 | case AArch64::LDRHHpost: |
3623 | 0 | case AArch64::LDRHpost: |
3624 | 0 | case AArch64::LDRSBWpost: |
3625 | 0 | case AArch64::LDRSBXpost: |
3626 | 0 | case AArch64::LDRSHWpost: |
3627 | 0 | case AArch64::LDRSHXpost: |
3628 | 0 | case AArch64::LDRSWpost: |
3629 | 0 | case AArch64::LDRWpost: |
3630 | 0 | case AArch64::LDRXpost: { |
3631 | 0 | unsigned Rt = Inst.getOperand(1).getReg(); |
3632 | 0 | unsigned Rn = Inst.getOperand(2).getReg(); |
3633 | 0 | if (RI->isSubRegisterEq(Rn, Rt)) |
3634 | | //return Error(Loc[0], "unpredictable LDR instruction, writeback base " |
3635 | | // "is also a source"); |
3636 | 0 | return true; |
3637 | 0 | break; |
3638 | 0 | } |
3639 | 0 | case AArch64::STRBBpost: |
3640 | 0 | case AArch64::STRBpost: |
3641 | 0 | case AArch64::STRHHpost: |
3642 | 0 | case AArch64::STRHpost: |
3643 | 0 | case AArch64::STRWpost: |
3644 | 0 | case AArch64::STRXpost: |
3645 | 0 | case AArch64::STRBBpre: |
3646 | 0 | case AArch64::STRBpre: |
3647 | 0 | case AArch64::STRHHpre: |
3648 | 0 | case AArch64::STRHpre: |
3649 | 0 | case AArch64::STRWpre: |
3650 | 0 | case AArch64::STRXpre: { |
3651 | 0 | unsigned Rt = Inst.getOperand(1).getReg(); |
3652 | 0 | unsigned Rn = Inst.getOperand(2).getReg(); |
3653 | 0 | if (RI->isSubRegisterEq(Rn, Rt)) |
3654 | | //return Error(Loc[0], "unpredictable STR instruction, writeback base " |
3655 | | // "is also a source"); |
3656 | 0 | return true; |
3657 | 0 | break; |
3658 | 0 | } |
3659 | 2.71k | } |
3660 | | |
3661 | | // Now check immediate ranges. Separate from the above as there is overlap |
3662 | | // in the instructions being checked and this keeps the nested conditionals |
3663 | | // to a minimum. |
3664 | 2.71k | switch (Inst.getOpcode()) { |
3665 | 0 | case AArch64::ADDSWri: |
3666 | 0 | case AArch64::ADDSXri: |
3667 | 0 | case AArch64::ADDWri: |
3668 | 0 | case AArch64::ADDXri: |
3669 | 0 | case AArch64::SUBSWri: |
3670 | 0 | case AArch64::SUBSXri: |
3671 | 0 | case AArch64::SUBWri: |
3672 | 0 | case AArch64::SUBXri: { |
3673 | | // Annoyingly we can't do this in the isAddSubImm predicate, so there is |
3674 | | // some slight duplication here. |
3675 | 0 | if (Inst.getOperand(2).isExpr()) { |
3676 | 0 | const MCExpr *Expr = Inst.getOperand(2).getExpr(); |
3677 | 0 | AArch64MCExpr::VariantKind ELFRefKind; |
3678 | 0 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
3679 | 0 | int64_t Addend; |
3680 | 0 | if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
3681 | | //return Error(Loc[2], "invalid immediate expression"); |
3682 | 0 | return true; |
3683 | 0 | } |
3684 | | |
3685 | | // Only allow these with ADDXri. |
3686 | 0 | if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || |
3687 | 0 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) && |
3688 | 0 | Inst.getOpcode() == AArch64::ADDXri) |
3689 | 0 | return false; |
3690 | | |
3691 | | // Only allow these with ADDXri/ADDWri |
3692 | 0 | if ((ELFRefKind == AArch64MCExpr::VK_LO12 || |
3693 | 0 | ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 || |
3694 | 0 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || |
3695 | 0 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || |
3696 | 0 | ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || |
3697 | 0 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || |
3698 | 0 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || |
3699 | 0 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) && |
3700 | 0 | (Inst.getOpcode() == AArch64::ADDXri || |
3701 | 0 | Inst.getOpcode() == AArch64::ADDWri)) |
3702 | 0 | return false; |
3703 | | |
3704 | | // Don't allow expressions in the immediate field otherwise |
3705 | | //return Error(Loc[2], "invalid immediate expression"); |
3706 | 0 | return true; |
3707 | 0 | } |
3708 | 0 | return false; |
3709 | 0 | } |
3710 | 2.71k | default: |
3711 | 2.71k | return false; |
3712 | 2.71k | } |
3713 | 2.71k | } |
3714 | | |
3715 | | bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) |
3716 | 229 | { |
3717 | 229 | switch (ErrCode) { |
3718 | 0 | case Match_MissingFeature: |
3719 | | //return Error(Loc, |
3720 | | // "instruction requires a CPU feature not currently enabled"); |
3721 | 0 | return true; |
3722 | 58 | case Match_InvalidOperand: |
3723 | | //return Error(Loc, "invalid operand for instruction"); |
3724 | 58 | return true; |
3725 | 13 | case Match_InvalidSuffix: |
3726 | | //return Error(Loc, "invalid type suffix for instruction"); |
3727 | 13 | return true; |
3728 | 0 | case Match_InvalidCondCode: |
3729 | | //return Error(Loc, "expected AArch64 condition code"); |
3730 | 0 | return true; |
3731 | 0 | case Match_AddSubRegExtendSmall: |
3732 | | //return Error(Loc, |
3733 | | // "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]"); |
3734 | 0 | return true; |
3735 | 0 | case Match_AddSubRegExtendLarge: |
3736 | | //return Error(Loc, |
3737 | | // "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]"); |
3738 | 0 | return true; |
3739 | 0 | case Match_AddSubSecondSource: |
3740 | | //return Error(Loc, |
3741 | | // "expected compatible register, symbol or integer in range [0, 4095]"); |
3742 | 0 | return true; |
3743 | 0 | case Match_LogicalSecondSource: |
3744 | | //return Error(Loc, "expected compatible register or logical immediate"); |
3745 | 0 | return true; |
3746 | 0 | case Match_InvalidMovImm32Shift: |
3747 | | //return Error(Loc, "expected 'lsl' with optional integer 0 or 16"); |
3748 | 0 | return true; |
3749 | 0 | case Match_InvalidMovImm64Shift: |
3750 | | //return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48"); |
3751 | 0 | return true; |
3752 | 0 | case Match_AddSubRegShift32: |
3753 | | //return Error(Loc, |
3754 | | // "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]"); |
3755 | 0 | return true; |
3756 | 0 | case Match_AddSubRegShift64: |
3757 | | //return Error(Loc, |
3758 | | // "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]"); |
3759 | 0 | return true; |
3760 | 0 | case Match_InvalidFPImm: |
3761 | | //return Error(Loc, |
3762 | | // "expected compatible register or floating-point constant"); |
3763 | 0 | return true; |
3764 | 0 | case Match_InvalidMemoryIndexedSImm9: |
3765 | | //return Error(Loc, "index must be an integer in range [-256, 255]."); |
3766 | 0 | return true; |
3767 | 0 | case Match_InvalidMemoryIndexed4SImm7: |
3768 | | //return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); |
3769 | 0 | return true; |
3770 | 0 | case Match_InvalidMemoryIndexed8SImm7: |
3771 | | //return Error(Loc, "index must be a multiple of 8 in range [-512, 504]."); |
3772 | 0 | return true; |
3773 | 0 | case Match_InvalidMemoryIndexed16SImm7: |
3774 | | //return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008]."); |
3775 | 0 | return true; |
3776 | 0 | case Match_InvalidMemoryWExtend8: |
3777 | | //return Error(Loc, |
3778 | | // "expected 'uxtw' or 'sxtw' with optional shift of #0"); |
3779 | 0 | return true; |
3780 | 0 | case Match_InvalidMemoryWExtend16: |
3781 | | //return Error(Loc, |
3782 | | // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1"); |
3783 | 0 | return true; |
3784 | 0 | case Match_InvalidMemoryWExtend32: |
3785 | | //return Error(Loc, |
3786 | | // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2"); |
3787 | 0 | return true; |
3788 | 0 | case Match_InvalidMemoryWExtend64: |
3789 | | //return Error(Loc, |
3790 | | // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3"); |
3791 | 0 | return true; |
3792 | 0 | case Match_InvalidMemoryWExtend128: |
3793 | | //return Error(Loc, |
3794 | | // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4"); |
3795 | 0 | return true; |
3796 | 0 | case Match_InvalidMemoryXExtend8: |
3797 | | //return Error(Loc, |
3798 | | // "expected 'lsl' or 'sxtx' with optional shift of #0"); |
3799 | 0 | return true; |
3800 | 0 | case Match_InvalidMemoryXExtend16: |
3801 | | //return Error(Loc, |
3802 | | // "expected 'lsl' or 'sxtx' with optional shift of #0 or #1"); |
3803 | 0 | return true; |
3804 | 0 | case Match_InvalidMemoryXExtend32: |
3805 | | //return Error(Loc, |
3806 | | // "expected 'lsl' or 'sxtx' with optional shift of #0 or #2"); |
3807 | 0 | return true; |
3808 | 0 | case Match_InvalidMemoryXExtend64: |
3809 | | //return Error(Loc, |
3810 | | // "expected 'lsl' or 'sxtx' with optional shift of #0 or #3"); |
3811 | 0 | return true; |
3812 | 0 | case Match_InvalidMemoryXExtend128: |
3813 | | //return Error(Loc, |
3814 | | // "expected 'lsl' or 'sxtx' with optional shift of #0 or #4"); |
3815 | 0 | return true; |
3816 | 0 | case Match_InvalidMemoryIndexed1: |
3817 | | //return Error(Loc, "index must be an integer in range [0, 4095]."); |
3818 | 0 | return true; |
3819 | 0 | case Match_InvalidMemoryIndexed2: |
3820 | | //return Error(Loc, "index must be a multiple of 2 in range [0, 8190]."); |
3821 | 0 | return true; |
3822 | 0 | case Match_InvalidMemoryIndexed4: |
3823 | | //return Error(Loc, "index must be a multiple of 4 in range [0, 16380]."); |
3824 | 0 | return true; |
3825 | 0 | case Match_InvalidMemoryIndexed8: |
3826 | | //return Error(Loc, "index must be a multiple of 8 in range [0, 32760]."); |
3827 | 0 | return true; |
3828 | 0 | case Match_InvalidMemoryIndexed16: |
3829 | | //return Error(Loc, "index must be a multiple of 16 in range [0, 65520]."); |
3830 | 0 | return true; |
3831 | 0 | case Match_InvalidImm0_1: |
3832 | | //return Error(Loc, "immediate must be an integer in range [0, 1]."); |
3833 | 0 | return true; |
3834 | 1 | case Match_InvalidImm0_7: |
3835 | | //return Error(Loc, "immediate must be an integer in range [0, 7]."); |
3836 | 1 | return true; |
3837 | 0 | case Match_InvalidImm0_15: |
3838 | | //return Error(Loc, "immediate must be an integer in range [0, 15]."); |
3839 | 0 | return true; |
3840 | 0 | case Match_InvalidImm0_31: |
3841 | | //return Error(Loc, "immediate must be an integer in range [0, 31]."); |
3842 | 0 | return true; |
3843 | 0 | case Match_InvalidImm0_63: |
3844 | | //return Error(Loc, "immediate must be an integer in range [0, 63]."); |
3845 | 0 | return true; |
3846 | 0 | case Match_InvalidImm0_127: |
3847 | | //return Error(Loc, "immediate must be an integer in range [0, 127]."); |
3848 | 0 | return true; |
3849 | 0 | case Match_InvalidImm0_65535: |
3850 | | //return Error(Loc, "immediate must be an integer in range [0, 65535]."); |
3851 | 0 | return true; |
3852 | 0 | case Match_InvalidImm1_8: |
3853 | | //return Error(Loc, "immediate must be an integer in range [1, 8]."); |
3854 | 0 | return true; |
3855 | 0 | case Match_InvalidImm1_16: |
3856 | | //return Error(Loc, "immediate must be an integer in range [1, 16]."); |
3857 | 0 | return true; |
3858 | 0 | case Match_InvalidImm1_32: |
3859 | | //return Error(Loc, "immediate must be an integer in range [1, 32]."); |
3860 | 0 | return true; |
3861 | 0 | case Match_InvalidImm1_64: |
3862 | | //return Error(Loc, "immediate must be an integer in range [1, 64]."); |
3863 | 0 | return true; |
3864 | 0 | case Match_InvalidIndex1: |
3865 | | //return Error(Loc, "expected lane specifier '[1]'"); |
3866 | 0 | return true; |
3867 | 0 | case Match_InvalidIndexB: |
3868 | | //return Error(Loc, "vector lane must be an integer in range [0, 15]."); |
3869 | 0 | return true; |
3870 | 0 | case Match_InvalidIndexH: |
3871 | | //return Error(Loc, "vector lane must be an integer in range [0, 7]."); |
3872 | 0 | return true; |
3873 | 0 | case Match_InvalidIndexS: |
3874 | | //return Error(Loc, "vector lane must be an integer in range [0, 3]."); |
3875 | 0 | return true; |
3876 | 0 | case Match_InvalidIndexD: |
3877 | | //return Error(Loc, "vector lane must be an integer in range [0, 1]."); |
3878 | 0 | return true; |
3879 | 0 | case Match_InvalidLabel: |
3880 | | //return Error(Loc, "expected label or encodable integer pc offset"); |
3881 | 0 | return true; |
3882 | 0 | case Match_MRS: |
3883 | | //return Error(Loc, "expected readable system register"); |
3884 | 0 | return true; |
3885 | 1 | case Match_MSR: |
3886 | | //return Error(Loc, "expected writable system register or pstate"); |
3887 | 1 | return true; |
3888 | 156 | case Match_MnemonicFail: |
3889 | | //return Error(Loc, "unrecognized instruction mnemonic"); |
3890 | 156 | return true; |
3891 | 0 | default: |
3892 | 0 | llvm_unreachable("unexpected error code!"); |
3893 | 229 | } |
3894 | 229 | } |
3895 | | |
3896 | | static const char *getSubtargetFeatureName(uint64_t Val); |
3897 | | |
3898 | | // return True on error |
3899 | | bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
3900 | | OperandVector &Operands, |
3901 | | MCStreamer &Out, |
3902 | | uint64_t &ErrorInfo, |
3903 | | bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address) |
3904 | 2.94k | { |
3905 | 2.94k | assert(!Operands.empty() && "Unexpect empty operand list!"); |
3906 | 2.94k | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]); |
3907 | 2.94k | assert(Op.isToken() && "Leading operand should always be a mnemonic!"); |
3908 | | |
3909 | 2.94k | StringRef Tok = Op.getToken(); |
3910 | 2.94k | unsigned NumOperands = Operands.size(); |
3911 | | |
3912 | 2.94k | if (NumOperands == 4 && Tok == "lsl") { |
3913 | 0 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); |
3914 | 0 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
3915 | 0 | if (Op2.isReg() && Op3.isImm()) { |
3916 | 0 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); |
3917 | 0 | if (Op3CE) { |
3918 | 0 | uint64_t Op3Val = Op3CE->getValue(); |
3919 | 0 | uint64_t NewOp3Val = 0; |
3920 | 0 | uint64_t NewOp4Val = 0; |
3921 | 0 | if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains( |
3922 | 0 | Op2.getReg())) { |
3923 | 0 | NewOp3Val = (32 - Op3Val) & 0x1f; |
3924 | 0 | NewOp4Val = 31 - Op3Val; |
3925 | 0 | } else { |
3926 | 0 | NewOp3Val = (64 - Op3Val) & 0x3f; |
3927 | 0 | NewOp4Val = 63 - Op3Val; |
3928 | 0 | } |
3929 | |
|
3930 | 0 | const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext()); |
3931 | 0 | const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext()); |
3932 | |
|
3933 | 0 | Operands[0] = AArch64Operand::CreateToken( |
3934 | 0 | "ubfm", false, Op.getStartLoc(), getContext()); |
3935 | 0 | Operands.push_back(AArch64Operand::CreateImm( |
3936 | 0 | NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext())); |
3937 | 0 | Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(), |
3938 | 0 | Op3.getEndLoc(), getContext()); |
3939 | 0 | } |
3940 | 0 | } |
3941 | 2.94k | } else if (NumOperands == 4 && Tok == "bfc") { |
3942 | | // FIXME: Horrible hack to handle BFC->BFM alias. |
3943 | 0 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
3944 | 0 | AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]); |
3945 | 0 | AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]); |
3946 | |
|
3947 | 0 | if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) { |
3948 | 0 | const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm()); |
3949 | 0 | const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm()); |
3950 | |
|
3951 | 0 | if (LSBCE && WidthCE) { |
3952 | 0 | uint64_t LSB = LSBCE->getValue(); |
3953 | 0 | uint64_t Width = WidthCE->getValue(); |
3954 | |
|
3955 | 0 | uint64_t RegWidth = 0; |
3956 | 0 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
3957 | 0 | Op1.getReg())) |
3958 | 0 | RegWidth = 64; |
3959 | 0 | else |
3960 | 0 | RegWidth = 32; |
3961 | |
|
3962 | 0 | if (LSB >= RegWidth) { |
3963 | | //return Error(LSBOp.getStartLoc(), |
3964 | | // "expected integer in range [0, 31]"); |
3965 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3966 | 0 | return true; |
3967 | 0 | } |
3968 | 0 | if (Width < 1 || Width > RegWidth) { |
3969 | | //return Error(WidthOp.getStartLoc(), |
3970 | | // "expected integer in range [1, 32]"); |
3971 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3972 | 0 | return true; |
3973 | 0 | } |
3974 | | |
3975 | 0 | uint64_t ImmR = 0; |
3976 | 0 | if (RegWidth == 32) |
3977 | 0 | ImmR = (32 - LSB) & 0x1f; |
3978 | 0 | else |
3979 | 0 | ImmR = (64 - LSB) & 0x3f; |
3980 | |
|
3981 | 0 | uint64_t ImmS = Width - 1; |
3982 | |
|
3983 | 0 | if (ImmR != 0 && ImmS >= ImmR) { |
3984 | | //return Error(WidthOp.getStartLoc(), |
3985 | | // "requested insert overflows register"); |
3986 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3987 | 0 | return true; |
3988 | 0 | } |
3989 | | |
3990 | 0 | const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext()); |
3991 | 0 | const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext()); |
3992 | 0 | Operands[0] = AArch64Operand::CreateToken( |
3993 | 0 | "bfm", false, Op.getStartLoc(), getContext()); |
3994 | 0 | Operands[2] = AArch64Operand::CreateReg( |
3995 | 0 | RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(), |
3996 | 0 | SMLoc(), getContext()); |
3997 | 0 | Operands[3] = AArch64Operand::CreateImm( |
3998 | 0 | ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext()); |
3999 | 0 | Operands.emplace_back( |
4000 | 0 | AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(), |
4001 | 0 | WidthOp.getEndLoc(), getContext())); |
4002 | 0 | } |
4003 | 0 | } |
4004 | 2.94k | } else if (NumOperands == 5) { |
4005 | | // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and |
4006 | | // UBFIZ -> UBFM aliases. |
4007 | 1 | if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") { |
4008 | 0 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
4009 | 0 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
4010 | 0 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); |
4011 | |
|
4012 | 0 | if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { |
4013 | 0 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); |
4014 | 0 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); |
4015 | |
|
4016 | 0 | if (Op3CE && Op4CE) { |
4017 | 0 | uint64_t Op3Val = Op3CE->getValue(); |
4018 | 0 | uint64_t Op4Val = Op4CE->getValue(); |
4019 | |
|
4020 | 0 | uint64_t RegWidth = 0; |
4021 | 0 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
4022 | 0 | Op1.getReg())) |
4023 | 0 | RegWidth = 64; |
4024 | 0 | else |
4025 | 0 | RegWidth = 32; |
4026 | |
|
4027 | 0 | if (Op3Val >= RegWidth) { |
4028 | | //return Error(Op3.getStartLoc(), |
4029 | | // "expected integer in range [0, 31]"); |
4030 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4031 | 0 | return true; |
4032 | 0 | } |
4033 | 0 | if (Op4Val < 1 || Op4Val > RegWidth) { |
4034 | | //return Error(Op4.getStartLoc(), |
4035 | | // "expected integer in range [1, 32]"); |
4036 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4037 | 0 | return true; |
4038 | 0 | } |
4039 | | |
4040 | 0 | uint64_t NewOp3Val = 0; |
4041 | 0 | if (RegWidth == 32) |
4042 | 0 | NewOp3Val = (32 - Op3Val) & 0x1f; |
4043 | 0 | else |
4044 | 0 | NewOp3Val = (64 - Op3Val) & 0x3f; |
4045 | |
|
4046 | 0 | uint64_t NewOp4Val = Op4Val - 1; |
4047 | |
|
4048 | 0 | if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) { |
4049 | | //return Error(Op4.getStartLoc(), |
4050 | | // "requested insert overflows register"); |
4051 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4052 | 0 | return true; |
4053 | 0 | } |
4054 | | |
4055 | 0 | const MCExpr *NewOp3 = |
4056 | 0 | MCConstantExpr::create(NewOp3Val, getContext()); |
4057 | 0 | const MCExpr *NewOp4 = |
4058 | 0 | MCConstantExpr::create(NewOp4Val, getContext()); |
4059 | 0 | Operands[3] = AArch64Operand::CreateImm( |
4060 | 0 | NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext()); |
4061 | 0 | Operands[4] = AArch64Operand::CreateImm( |
4062 | 0 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); |
4063 | 0 | if (Tok == "bfi") |
4064 | 0 | Operands[0] = AArch64Operand::CreateToken( |
4065 | 0 | "bfm", false, Op.getStartLoc(), getContext()); |
4066 | 0 | else if (Tok == "sbfiz") |
4067 | 0 | Operands[0] = AArch64Operand::CreateToken( |
4068 | 0 | "sbfm", false, Op.getStartLoc(), getContext()); |
4069 | 0 | else if (Tok == "ubfiz") |
4070 | 0 | Operands[0] = AArch64Operand::CreateToken( |
4071 | 0 | "ubfm", false, Op.getStartLoc(), getContext()); |
4072 | 0 | else |
4073 | 0 | llvm_unreachable("No valid mnemonic for alias?"); |
4074 | 0 | } |
4075 | 0 | } |
4076 | | |
4077 | | // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and |
4078 | | // UBFX -> UBFM aliases. |
4079 | 1 | } else if (NumOperands == 5 && |
4080 | 1 | (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) { |
4081 | 0 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
4082 | 0 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
4083 | 0 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); |
4084 | |
|
4085 | 0 | if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { |
4086 | 0 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); |
4087 | 0 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); |
4088 | |
|
4089 | 0 | if (Op3CE && Op4CE) { |
4090 | 0 | uint64_t Op3Val = Op3CE->getValue(); |
4091 | 0 | uint64_t Op4Val = Op4CE->getValue(); |
4092 | |
|
4093 | 0 | uint64_t RegWidth = 0; |
4094 | 0 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
4095 | 0 | Op1.getReg())) |
4096 | 0 | RegWidth = 64; |
4097 | 0 | else |
4098 | 0 | RegWidth = 32; |
4099 | |
|
4100 | 0 | if (Op3Val >= RegWidth) { |
4101 | | // TODO: save ErrorCode |
4102 | | //return Error(Op3.getStartLoc(), |
4103 | | // "expected integer in range [0, 31]"); |
4104 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4105 | 0 | return true; |
4106 | 0 | } |
4107 | 0 | if (Op4Val < 1 || Op4Val > RegWidth) { |
4108 | | // TODO: save ErrorCode |
4109 | | //return Error(Op4.getStartLoc(), |
4110 | | // "expected integer in range [1, 32]"); |
4111 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4112 | 0 | return true; |
4113 | 0 | } |
4114 | | |
4115 | 0 | uint64_t NewOp4Val = Op3Val + Op4Val - 1; |
4116 | |
|
4117 | 0 | if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) { |
4118 | | // TODO: save ErrorCode |
4119 | | //return Error(Op4.getStartLoc(), |
4120 | | // "requested extract overflows register"); |
4121 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4122 | 0 | return true; |
4123 | 0 | } |
4124 | | |
4125 | 0 | const MCExpr *NewOp4 = |
4126 | 0 | MCConstantExpr::create(NewOp4Val, getContext()); |
4127 | 0 | Operands[4] = AArch64Operand::CreateImm( |
4128 | 0 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); |
4129 | 0 | if (Tok == "bfxil") |
4130 | 0 | Operands[0] = AArch64Operand::CreateToken( |
4131 | 0 | "bfm", false, Op.getStartLoc(), getContext()); |
4132 | 0 | else if (Tok == "sbfx") |
4133 | 0 | Operands[0] = AArch64Operand::CreateToken( |
4134 | 0 | "sbfm", false, Op.getStartLoc(), getContext()); |
4135 | 0 | else if (Tok == "ubfx") |
4136 | 0 | Operands[0] = AArch64Operand::CreateToken( |
4137 | 0 | "ubfm", false, Op.getStartLoc(), getContext()); |
4138 | 0 | else |
4139 | 0 | llvm_unreachable("No valid mnemonic for alias?"); |
4140 | 0 | } |
4141 | 0 | } |
4142 | 0 | } |
4143 | 1 | } |
4144 | | // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands. |
4145 | | // InstAlias can't quite handle this since the reg classes aren't |
4146 | | // subclasses. |
4147 | 2.94k | if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) { |
4148 | | // The source register can be Wn here, but the matcher expects a |
4149 | | // GPR64. Twiddle it here if necessary. |
4150 | 0 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); |
4151 | 0 | if (Op.isReg()) { |
4152 | 0 | unsigned Reg = getXRegFromWReg(Op.getReg()); |
4153 | 0 | Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(), |
4154 | 0 | Op.getEndLoc(), getContext()); |
4155 | 0 | } |
4156 | 0 | } |
4157 | | // FIXME: Likewise for sxt[bh] with a Xd dst operand |
4158 | 2.94k | else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) { |
4159 | 0 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
4160 | 0 | if (Op.isReg() && |
4161 | 0 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
4162 | 0 | Op.getReg())) { |
4163 | | // The source register can be Wn here, but the matcher expects a |
4164 | | // GPR64. Twiddle it here if necessary. |
4165 | 0 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); |
4166 | 0 | if (Op.isReg()) { |
4167 | 0 | unsigned Reg = getXRegFromWReg(Op.getReg()); |
4168 | 0 | Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(), |
4169 | 0 | Op.getEndLoc(), getContext()); |
4170 | 0 | } |
4171 | 0 | } |
4172 | 0 | } |
4173 | | // FIXME: Likewise for uxt[bh] with a Xd dst operand |
4174 | 2.94k | else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) { |
4175 | 0 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
4176 | 0 | if (Op.isReg() && |
4177 | 0 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
4178 | 0 | Op.getReg())) { |
4179 | | // The source register can be Wn here, but the matcher expects a |
4180 | | // GPR32. Twiddle it here if necessary. |
4181 | 0 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
4182 | 0 | if (Op.isReg()) { |
4183 | 0 | unsigned Reg = getWRegFromXReg(Op.getReg()); |
4184 | 0 | Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(), |
4185 | 0 | Op.getEndLoc(), getContext()); |
4186 | 0 | } |
4187 | 0 | } |
4188 | 0 | } |
4189 | | |
4190 | | // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR. |
4191 | 2.94k | if (NumOperands == 3 && Tok == "fmov") { |
4192 | 96 | AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]); |
4193 | 96 | AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]); |
4194 | 96 | if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) { |
4195 | 96 | unsigned zreg = |
4196 | 96 | !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains( |
4197 | 96 | RegOp.getReg()) |
4198 | 96 | ? AArch64::WZR |
4199 | 96 | : AArch64::XZR; |
4200 | 96 | Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(), |
4201 | 96 | Op.getEndLoc(), getContext()); |
4202 | 96 | } |
4203 | 96 | } |
4204 | | |
4205 | 2.94k | MCInst Inst(Address); |
4206 | | // First try to match against the secondary set of tables containing the |
4207 | | // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2"). |
4208 | 2.94k | unsigned MatchResult = |
4209 | 2.94k | MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1); |
4210 | | |
4211 | | // If that fails, try against the alternate table containing long-form NEON: |
4212 | | // "fadd v0.2s, v1.2s, v2.2s" |
4213 | 2.94k | if (MatchResult != Match_Success) { |
4214 | | // But first, save the short-form match result: we can use it in case the |
4215 | | // long-form match also fails. |
4216 | 231 | auto ShortFormNEONErrorInfo = ErrorInfo; |
4217 | 231 | auto ShortFormNEONMatchResult = MatchResult; |
4218 | | |
4219 | 231 | MatchResult = |
4220 | 231 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0); |
4221 | | |
4222 | | // Now, both matches failed, and the long-form match failed on the mnemonic |
4223 | | // suffix token operand. The short-form match failure is probably more |
4224 | | // relevant: use it instead. |
4225 | 231 | if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 && |
4226 | 231 | Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() && |
4227 | 231 | ((AArch64Operand &)*Operands[1]).isTokenSuffix()) { |
4228 | 1 | MatchResult = ShortFormNEONMatchResult; |
4229 | 1 | ErrorInfo = ShortFormNEONErrorInfo; |
4230 | 1 | } |
4231 | 231 | } |
4232 | | |
4233 | | // save the error code |
4234 | 2.94k | ErrorCode = MatchResult; |
4235 | | |
4236 | 2.94k | switch (MatchResult) { |
4237 | 2.71k | case Match_Success: { |
4238 | | // Perform range checking and other semantic validations |
4239 | 2.71k | SmallVector<SMLoc, 8> OperandLocs; |
4240 | 2.71k | NumOperands = Operands.size(); |
4241 | 8.49k | for (unsigned i = 1; i < NumOperands; ++i) |
4242 | 5.77k | OperandLocs.push_back(Operands[i]->getStartLoc()); |
4243 | 2.71k | if (validateInstruction(Inst, OperandLocs)) |
4244 | 0 | return true; |
4245 | | |
4246 | 2.71k | Inst.setLoc(IDLoc); |
4247 | 2.71k | Out.EmitInstruction(Inst, getSTI(), ErrorCode); |
4248 | 2.71k | if (ErrorCode == 0) { |
4249 | 2.71k | Address = Inst.getAddress(); // Keystone update address |
4250 | 2.71k | return false; |
4251 | 2.71k | } else |
4252 | 0 | return true; |
4253 | 2.71k | } |
4254 | 0 | case Match_MissingFeature: { |
4255 | 0 | assert(ErrorInfo && "Unknown missing feature!"); |
4256 | | // Special case the error message for the very common case where only |
4257 | | // a single subtarget feature is missing (neon, e.g.). |
4258 | 0 | std::string Msg = "instruction requires:"; |
4259 | 0 | uint64_t Mask = 1; |
4260 | 0 | for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) { |
4261 | 0 | if (ErrorInfo & Mask) { |
4262 | 0 | Msg += " "; |
4263 | 0 | Msg += getSubtargetFeatureName(ErrorInfo & Mask); |
4264 | 0 | } |
4265 | 0 | Mask <<= 1; |
4266 | 0 | } |
4267 | | //return Error(IDLoc, Msg); |
4268 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4269 | 0 | return true; |
4270 | 0 | } |
4271 | 156 | case Match_MnemonicFail: |
4272 | 156 | return showMatchError(IDLoc, MatchResult); |
4273 | 73 | case Match_InvalidOperand: { |
4274 | 73 | SMLoc ErrorLoc = IDLoc; |
4275 | | |
4276 | 73 | if (ErrorInfo != ~0ULL) { |
4277 | 73 | if (ErrorInfo >= Operands.size()) { |
4278 | | //return Error(IDLoc, "too few operands for instruction"); |
4279 | 2 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4280 | 2 | return true; |
4281 | 2 | } |
4282 | | |
4283 | 71 | ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); |
4284 | 71 | if (ErrorLoc == SMLoc()) |
4285 | 0 | ErrorLoc = IDLoc; |
4286 | 71 | } |
4287 | | // If the match failed on a suffix token operand, tweak the diagnostic |
4288 | | // accordingly. |
4289 | 71 | if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() && |
4290 | 71 | ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix()) |
4291 | 13 | MatchResult = Match_InvalidSuffix; |
4292 | | |
4293 | 71 | return showMatchError(ErrorLoc, MatchResult); |
4294 | 73 | } |
4295 | 0 | case Match_InvalidMemoryIndexed1: |
4296 | 0 | case Match_InvalidMemoryIndexed2: |
4297 | 0 | case Match_InvalidMemoryIndexed4: |
4298 | 0 | case Match_InvalidMemoryIndexed8: |
4299 | 0 | case Match_InvalidMemoryIndexed16: |
4300 | 0 | case Match_InvalidCondCode: |
4301 | 0 | case Match_AddSubRegExtendSmall: |
4302 | 0 | case Match_AddSubRegExtendLarge: |
4303 | 0 | case Match_AddSubSecondSource: |
4304 | 0 | case Match_LogicalSecondSource: |
4305 | 0 | case Match_AddSubRegShift32: |
4306 | 0 | case Match_AddSubRegShift64: |
4307 | 0 | case Match_InvalidMovImm32Shift: |
4308 | 0 | case Match_InvalidMovImm64Shift: |
4309 | 0 | case Match_InvalidFPImm: |
4310 | 0 | case Match_InvalidMemoryWExtend8: |
4311 | 0 | case Match_InvalidMemoryWExtend16: |
4312 | 0 | case Match_InvalidMemoryWExtend32: |
4313 | 0 | case Match_InvalidMemoryWExtend64: |
4314 | 0 | case Match_InvalidMemoryWExtend128: |
4315 | 0 | case Match_InvalidMemoryXExtend8: |
4316 | 0 | case Match_InvalidMemoryXExtend16: |
4317 | 0 | case Match_InvalidMemoryXExtend32: |
4318 | 0 | case Match_InvalidMemoryXExtend64: |
4319 | 0 | case Match_InvalidMemoryXExtend128: |
4320 | 0 | case Match_InvalidMemoryIndexed4SImm7: |
4321 | 0 | case Match_InvalidMemoryIndexed8SImm7: |
4322 | 0 | case Match_InvalidMemoryIndexed16SImm7: |
4323 | 0 | case Match_InvalidMemoryIndexedSImm9: |
4324 | 0 | case Match_InvalidImm0_1: |
4325 | 1 | case Match_InvalidImm0_7: |
4326 | 1 | case Match_InvalidImm0_15: |
4327 | 1 | case Match_InvalidImm0_31: |
4328 | 1 | case Match_InvalidImm0_63: |
4329 | 1 | case Match_InvalidImm0_127: |
4330 | 1 | case Match_InvalidImm0_65535: |
4331 | 1 | case Match_InvalidImm1_8: |
4332 | 1 | case Match_InvalidImm1_16: |
4333 | 1 | case Match_InvalidImm1_32: |
4334 | 1 | case Match_InvalidImm1_64: |
4335 | 1 | case Match_InvalidIndex1: |
4336 | 1 | case Match_InvalidIndexB: |
4337 | 1 | case Match_InvalidIndexH: |
4338 | 1 | case Match_InvalidIndexS: |
4339 | 1 | case Match_InvalidIndexD: |
4340 | 1 | case Match_InvalidLabel: |
4341 | 2 | case Match_MSR: |
4342 | 2 | case Match_MRS: { |
4343 | 2 | if (ErrorInfo >= Operands.size()) { |
4344 | | //return Error(IDLoc, "too few operands for instruction"); |
4345 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4346 | 0 | return true; |
4347 | 0 | } |
4348 | | // Any time we get here, there's nothing fancy to do. Just get the |
4349 | | // operand SMLoc and display the diagnostic. |
4350 | 2 | SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); |
4351 | 2 | if (ErrorLoc == SMLoc()) |
4352 | 0 | ErrorLoc = IDLoc; |
4353 | 2 | return showMatchError(ErrorLoc, MatchResult); |
4354 | 2 | } |
4355 | 2.94k | } |
4356 | | |
4357 | 2.94k | llvm_unreachable("Implement any new match types added!"); |
4358 | 2.94k | } |
4359 | | |
4360 | | /// ParseDirective parses the arm specific directives |
4361 | 95.0k | bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) { |
4362 | 95.0k | const MCObjectFileInfo::Environment Format = |
4363 | 95.0k | getContext().getObjectFileInfo()->getObjectFileType(); |
4364 | 95.0k | bool IsMachO = Format == MCObjectFileInfo::IsMachO; |
4365 | 95.0k | bool IsCOFF = Format == MCObjectFileInfo::IsCOFF; |
4366 | | |
4367 | 95.0k | StringRef IDVal = DirectiveID.getIdentifier(); |
4368 | 95.0k | SMLoc Loc = DirectiveID.getLoc(); |
4369 | 95.0k | if (IDVal == ".hword") |
4370 | 0 | return parseDirectiveWord(2, Loc); |
4371 | 95.0k | if (IDVal == ".word") |
4372 | 12.7k | return parseDirectiveWord(4, Loc); |
4373 | 82.2k | if (IDVal == ".xword") |
4374 | 0 | return parseDirectiveWord(8, Loc); |
4375 | 82.2k | if (IDVal == ".tlsdesccall") |
4376 | 0 | return parseDirectiveTLSDescCall(Loc); |
4377 | 82.2k | if (IDVal == ".ltorg" || IDVal == ".pool") |
4378 | 1 | return parseDirectiveLtorg(Loc); |
4379 | 82.2k | if (IDVal == ".unreq") |
4380 | 64 | return parseDirectiveUnreq(Loc); |
4381 | | |
4382 | 82.1k | if (!IsMachO && !IsCOFF) { |
4383 | 82.1k | if (IDVal == ".inst") |
4384 | 237 | return parseDirectiveInst(Loc); |
4385 | 82.1k | } |
4386 | | |
4387 | 81.9k | return parseDirectiveLOH(IDVal, Loc); |
4388 | 82.1k | } |
4389 | | |
4390 | | /// parseDirectiveWord |
4391 | | /// ::= .word [ expression (, expression)* ] |
4392 | | bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) |
4393 | 12.7k | { |
4394 | 12.7k | MCAsmParser &Parser = getParser(); |
4395 | 12.7k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
4396 | 48.9k | for (;;) { |
4397 | 48.9k | const MCExpr *Value; |
4398 | 48.9k | if (getParser().parseExpression(Value)) |
4399 | 487 | return true; |
4400 | | |
4401 | 48.4k | getParser().getStreamer().EmitValue(Value, Size, L); |
4402 | | |
4403 | 48.4k | if (getLexer().is(AsmToken::EndOfStatement)) |
4404 | 7.52k | break; |
4405 | | |
4406 | | // FIXME: Improve diagnostic. |
4407 | 40.9k | if (getLexer().isNot(AsmToken::Comma)) |
4408 | | //return Error(L, "unexpected token in directive"); |
4409 | 4.29k | return true; |
4410 | 36.6k | Parser.Lex(); |
4411 | 36.6k | } |
4412 | 12.3k | } |
4413 | | |
4414 | 8.01k | Parser.Lex(); |
4415 | 8.01k | return false; |
4416 | 12.7k | } |
4417 | | |
4418 | | /// parseDirectiveInst |
4419 | | /// ::= .inst opcode [, ...] |
4420 | | bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) |
4421 | 237 | { |
4422 | 237 | MCAsmParser &Parser = getParser(); |
4423 | 237 | if (getLexer().is(AsmToken::EndOfStatement)) { |
4424 | 1 | Parser.eatToEndOfStatement(); |
4425 | | //Error(Loc, "expected expression following directive"); |
4426 | 1 | return false; |
4427 | 1 | } |
4428 | | |
4429 | 247 | for (;;) { |
4430 | 247 | const MCExpr *Expr; |
4431 | | |
4432 | 247 | if (getParser().parseExpression(Expr)) { |
4433 | | //Error(Loc, "expected expression"); |
4434 | 1 | return false; |
4435 | 1 | } |
4436 | | |
4437 | 246 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); |
4438 | 246 | if (!Value) { |
4439 | | //Error(Loc, "expected constant expression"); |
4440 | 210 | return false; |
4441 | 210 | } |
4442 | | |
4443 | 36 | getTargetStreamer().emitInst(Value->getValue()); |
4444 | | |
4445 | 36 | if (getLexer().is(AsmToken::EndOfStatement)) |
4446 | 10 | break; |
4447 | | |
4448 | 26 | if (getLexer().isNot(AsmToken::Comma)) { |
4449 | | //Error(Loc, "unexpected token in directive"); |
4450 | 15 | return false; |
4451 | 15 | } |
4452 | | |
4453 | 11 | Parser.Lex(); // Eat comma. |
4454 | 11 | } |
4455 | | |
4456 | 10 | Parser.Lex(); |
4457 | 10 | return false; |
4458 | 236 | } |
4459 | | |
4460 | | // parseDirectiveTLSDescCall: |
4461 | | // ::= .tlsdesccall symbol |
4462 | 0 | bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) { |
4463 | 0 | StringRef Name; |
4464 | 0 | if (getParser().parseIdentifier(Name)) |
4465 | | //return Error(L, "expected symbol after directive"); |
4466 | 0 | return true; |
4467 | | |
4468 | 0 | MCSymbol *Sym = getContext().getOrCreateSymbol(Name); |
4469 | 0 | const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext()); |
4470 | 0 | Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext()); |
4471 | |
|
4472 | 0 | MCInst Inst; |
4473 | 0 | Inst.setOpcode(AArch64::TLSDESCCALL); |
4474 | 0 | Inst.addOperand(MCOperand::createExpr(Expr)); |
4475 | |
|
4476 | 0 | unsigned int KsError; |
4477 | 0 | getParser().getStreamer().EmitInstruction(Inst, getSTI(), KsError); |
4478 | 0 | return false; |
4479 | 0 | } |
4480 | | |
4481 | | /// ::= .loh <lohName | lohId> label1, ..., labelN |
4482 | | /// The number of arguments depends on the loh identifier. |
4483 | | bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) |
4484 | 81.9k | { |
4485 | 81.9k | if (IDVal != MCLOHDirectiveName()) |
4486 | 81.8k | return true; |
4487 | 61 | MCLOHType Kind; |
4488 | 61 | if (getParser().getTok().isNot(AsmToken::Identifier)) { |
4489 | 60 | if (getParser().getTok().isNot(AsmToken::Integer)) |
4490 | | //return TokError("expected an identifier or a number in directive"); |
4491 | 0 | return true; |
4492 | | // We successfully get a numeric value for the identifier. |
4493 | | // Check if it is valid. |
4494 | 60 | bool valid; |
4495 | 60 | int64_t Id = getParser().getTok().getIntVal(valid); |
4496 | 60 | if (!valid) |
4497 | 0 | return true; |
4498 | 60 | if (Id <= -1U && !isValidMCLOHType(Id)) |
4499 | | //return TokError("invalid numeric identifier in directive"); |
4500 | 0 | return true; |
4501 | 60 | Kind = (MCLOHType)Id; |
4502 | 60 | } else { |
4503 | 1 | StringRef Name = getTok().getIdentifier(); |
4504 | | // We successfully parse an identifier. |
4505 | | // Check if it is a recognized one. |
4506 | 1 | int Id = MCLOHNameToId(Name); |
4507 | | |
4508 | 1 | if (Id == -1) |
4509 | | //return TokError("invalid identifier in directive"); |
4510 | 1 | return true; |
4511 | 0 | Kind = (MCLOHType)Id; |
4512 | 0 | } |
4513 | | // Consume the identifier. |
4514 | 60 | Lex(); |
4515 | | // Get the number of arguments of this LOH. |
4516 | 60 | int NbArgs = MCLOHIdToNbArgs(Kind); |
4517 | | |
4518 | 60 | assert(NbArgs != -1 && "Invalid number of arguments"); |
4519 | | |
4520 | 60 | SmallVector<MCSymbol *, 3> Args; |
4521 | 118 | for (int Idx = 0; Idx < NbArgs; ++Idx) { |
4522 | 118 | StringRef Name; |
4523 | 118 | if (getParser().parseIdentifier(Name)) |
4524 | | //return TokError("expected identifier in directive"); |
4525 | 4 | return true; |
4526 | 114 | Args.push_back(getContext().getOrCreateSymbol(Name)); |
4527 | | |
4528 | 114 | if (Idx + 1 == NbArgs) |
4529 | 17 | break; |
4530 | 97 | if (getLexer().isNot(AsmToken::Comma)) |
4531 | | //return TokError("unexpected token in '" + Twine(IDVal) + "' directive"); |
4532 | 39 | return true; |
4533 | 58 | Lex(); |
4534 | 58 | } |
4535 | 17 | if (getLexer().isNot(AsmToken::EndOfStatement)) |
4536 | | //return TokError("unexpected token in '" + Twine(IDVal) + "' directive"); |
4537 | 0 | return true; |
4538 | | |
4539 | 17 | getStreamer().EmitLOHDirective((MCLOHType)Kind, Args); |
4540 | 17 | return false; |
4541 | 17 | } |
4542 | | |
4543 | | /// parseDirectiveLtorg |
4544 | | /// ::= .ltorg | .pool |
4545 | 1 | bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) { |
4546 | 1 | getTargetStreamer().emitCurrentConstantPool(); |
4547 | 1 | return false; |
4548 | 1 | } |
4549 | | |
4550 | | /// parseDirectiveReq |
4551 | | /// ::= name .req registername |
4552 | | bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) |
4553 | 0 | { |
4554 | 0 | MCAsmParser &Parser = getParser(); |
4555 | 0 | Parser.Lex(); // Eat the '.req' token. |
4556 | | //SMLoc SRegLoc = getLoc(); |
4557 | 0 | unsigned RegNum = tryParseRegister(); |
4558 | 0 | bool IsVector = false; |
4559 | |
|
4560 | 0 | if (RegNum == static_cast<unsigned>(-1)) { |
4561 | 0 | StringRef Kind; |
4562 | 0 | RegNum = tryMatchVectorRegister(Kind, false); |
4563 | 0 | if (!Kind.empty()) { |
4564 | | //Error(SRegLoc, "vector register without type specifier expected"); |
4565 | 0 | return false; |
4566 | 0 | } |
4567 | 0 | IsVector = true; |
4568 | 0 | } |
4569 | | |
4570 | 0 | if (RegNum == static_cast<unsigned>(-1)) { |
4571 | 0 | Parser.eatToEndOfStatement(); |
4572 | | //Error(SRegLoc, "register name or alias expected"); |
4573 | 0 | return false; |
4574 | 0 | } |
4575 | | |
4576 | | // Shouldn't be anything else. |
4577 | 0 | if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { |
4578 | | //Error(Parser.getTok().getLoc(), "unexpected input in .req directive"); |
4579 | 0 | Parser.eatToEndOfStatement(); |
4580 | 0 | return false; |
4581 | 0 | } |
4582 | | |
4583 | 0 | Parser.Lex(); // Consume the EndOfStatement |
4584 | |
|
4585 | 0 | auto pair = std::make_pair(IsVector, RegNum); |
4586 | 0 | if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair) |
4587 | 0 | Warning(L, "ignoring redefinition of register alias '" + Name + "'"); |
4588 | |
|
4589 | 0 | return true; |
4590 | 0 | } |
4591 | | |
4592 | | /// parseDirectiveUneq |
4593 | | /// ::= .unreq registername |
4594 | | bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) |
4595 | 64 | { |
4596 | 64 | MCAsmParser &Parser = getParser(); |
4597 | 64 | if (Parser.getTok().isNot(AsmToken::Identifier)) { |
4598 | | //Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive."); |
4599 | 1 | Parser.eatToEndOfStatement(); |
4600 | 1 | return false; |
4601 | 1 | } |
4602 | 63 | RegisterReqs.erase(Parser.getTok().getIdentifier().lower()); |
4603 | 63 | Parser.Lex(); // Eat the identifier. |
4604 | 63 | return false; |
4605 | 64 | } |
4606 | | |
4607 | | bool |
4608 | | AArch64AsmParser::classifySymbolRef(const MCExpr *Expr, |
4609 | | AArch64MCExpr::VariantKind &ELFRefKind, |
4610 | | MCSymbolRefExpr::VariantKind &DarwinRefKind, |
4611 | 0 | int64_t &Addend) { |
4612 | 0 | ELFRefKind = AArch64MCExpr::VK_INVALID; |
4613 | 0 | DarwinRefKind = MCSymbolRefExpr::VK_None; |
4614 | 0 | Addend = 0; |
4615 | |
|
4616 | 0 | if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) { |
4617 | 0 | ELFRefKind = AE->getKind(); |
4618 | 0 | Expr = AE->getSubExpr(); |
4619 | 0 | } |
4620 | |
|
4621 | 0 | const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr); |
4622 | 0 | if (SE) { |
4623 | | // It's a simple symbol reference with no addend. |
4624 | 0 | DarwinRefKind = SE->getKind(); |
4625 | 0 | return true; |
4626 | 0 | } |
4627 | | |
4628 | 0 | const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr); |
4629 | 0 | if (!BE) |
4630 | 0 | return false; |
4631 | | |
4632 | 0 | SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS()); |
4633 | 0 | if (!SE) |
4634 | 0 | return false; |
4635 | 0 | DarwinRefKind = SE->getKind(); |
4636 | |
|
4637 | 0 | if (BE->getOpcode() != MCBinaryExpr::Add && |
4638 | 0 | BE->getOpcode() != MCBinaryExpr::Sub) |
4639 | 0 | return false; |
4640 | | |
4641 | | // See if the addend is is a constant, otherwise there's more going |
4642 | | // on here than we can deal with. |
4643 | 0 | auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS()); |
4644 | 0 | if (!AddendExpr) |
4645 | 0 | return false; |
4646 | | |
4647 | 0 | Addend = AddendExpr->getValue(); |
4648 | 0 | if (BE->getOpcode() == MCBinaryExpr::Sub) |
4649 | 0 | Addend = -Addend; |
4650 | | |
4651 | | // It's some symbol reference + a constant addend, but really |
4652 | | // shouldn't use both Darwin and ELF syntax. |
4653 | 0 | return ELFRefKind == AArch64MCExpr::VK_INVALID || |
4654 | 0 | DarwinRefKind == MCSymbolRefExpr::VK_None; |
4655 | 0 | } |
4656 | | |
4657 | | /// Force static initialization. |
4658 | 25 | extern "C" void LLVMInitializeAArch64AsmParser() { |
4659 | 25 | RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget); |
4660 | 25 | RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget); |
4661 | 25 | RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target); |
4662 | 25 | } |
4663 | | |
4664 | | #define GET_REGISTER_MATCHER |
4665 | | #define GET_SUBTARGET_FEATURE_NAME |
4666 | | #define GET_MATCHER_IMPLEMENTATION |
4667 | | #include "AArch64GenAsmMatcher.inc" |
4668 | | |
4669 | | // Define this matcher function after the auto-generated include so we |
4670 | | // have the match class enum definitions. |
4671 | | unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, |
4672 | 2.60k | unsigned Kind) { |
4673 | 2.60k | AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp); |
4674 | | // If the kind is a token for a literal immediate, check if our asm |
4675 | | // operand matches. This is for InstAliases which have a fixed-value |
4676 | | // immediate in the syntax. |
4677 | 2.60k | int64_t ExpectedVal; |
4678 | 2.60k | switch (Kind) { |
4679 | 2.60k | default: |
4680 | 2.60k | return Match_InvalidOperand; |
4681 | 0 | case MCK__35_0: |
4682 | 0 | ExpectedVal = 0; |
4683 | 0 | break; |
4684 | 0 | case MCK__35_1: |
4685 | 0 | ExpectedVal = 1; |
4686 | 0 | break; |
4687 | 0 | case MCK__35_12: |
4688 | 0 | ExpectedVal = 12; |
4689 | 0 | break; |
4690 | 0 | case MCK__35_16: |
4691 | 0 | ExpectedVal = 16; |
4692 | 0 | break; |
4693 | 0 | case MCK__35_2: |
4694 | 0 | ExpectedVal = 2; |
4695 | 0 | break; |
4696 | 0 | case MCK__35_24: |
4697 | 0 | ExpectedVal = 24; |
4698 | 0 | break; |
4699 | 0 | case MCK__35_3: |
4700 | 0 | ExpectedVal = 3; |
4701 | 0 | break; |
4702 | 0 | case MCK__35_32: |
4703 | 0 | ExpectedVal = 32; |
4704 | 0 | break; |
4705 | 0 | case MCK__35_4: |
4706 | 0 | ExpectedVal = 4; |
4707 | 0 | break; |
4708 | 0 | case MCK__35_48: |
4709 | 0 | ExpectedVal = 48; |
4710 | 0 | break; |
4711 | 0 | case MCK__35_6: |
4712 | 0 | ExpectedVal = 6; |
4713 | 0 | break; |
4714 | 0 | case MCK__35_64: |
4715 | 0 | ExpectedVal = 64; |
4716 | 0 | break; |
4717 | 0 | case MCK__35_8: |
4718 | 0 | ExpectedVal = 8; |
4719 | 0 | break; |
4720 | 2.60k | } |
4721 | 0 | if (!Op.isImm()) |
4722 | 0 | return Match_InvalidOperand; |
4723 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); |
4724 | 0 | if (!CE) |
4725 | 0 | return Match_InvalidOperand; |
4726 | 0 | if (CE->getValue() == ExpectedVal) |
4727 | 0 | return Match_Success; |
4728 | 0 | return Match_InvalidOperand; |
4729 | 0 | } |
4730 | | |
4731 | | |
4732 | | AArch64AsmParser::OperandMatchResultTy |
4733 | | AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) |
4734 | 0 | { |
4735 | 0 | SMLoc S = getLoc(); |
4736 | |
|
4737 | 0 | if (getParser().getTok().isNot(AsmToken::Identifier)) { |
4738 | | //Error(S, "expected register"); |
4739 | 0 | return MatchOperand_ParseFail; |
4740 | 0 | } |
4741 | | |
4742 | 0 | int FirstReg = tryParseRegister(); |
4743 | 0 | if (FirstReg == -1) { |
4744 | 0 | return MatchOperand_ParseFail; |
4745 | 0 | } |
4746 | 0 | const MCRegisterClass &WRegClass = |
4747 | 0 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID]; |
4748 | 0 | const MCRegisterClass &XRegClass = |
4749 | 0 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID]; |
4750 | |
|
4751 | 0 | bool isXReg = XRegClass.contains(FirstReg), |
4752 | 0 | isWReg = WRegClass.contains(FirstReg); |
4753 | 0 | if (!isXReg && !isWReg) { |
4754 | | //Error(S, "expected first even register of a " |
4755 | | // "consecutive same-size even/odd register pair"); |
4756 | 0 | return MatchOperand_ParseFail; |
4757 | 0 | } |
4758 | | |
4759 | 0 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); |
4760 | 0 | unsigned FirstEncoding = RI->getEncodingValue(FirstReg); |
4761 | |
|
4762 | 0 | if (FirstEncoding & 0x1) { |
4763 | | //Error(S, "expected first even register of a " |
4764 | | // "consecutive same-size even/odd register pair"); |
4765 | 0 | return MatchOperand_ParseFail; |
4766 | 0 | } |
4767 | | |
4768 | | //SMLoc M = getLoc(); |
4769 | 0 | if (getParser().getTok().isNot(AsmToken::Comma)) { |
4770 | | //Error(M, "expected comma"); |
4771 | 0 | return MatchOperand_ParseFail; |
4772 | 0 | } |
4773 | | // Eat the comma |
4774 | 0 | getParser().Lex(); |
4775 | | |
4776 | | //SMLoc E = getLoc(); |
4777 | 0 | int SecondReg = tryParseRegister(); |
4778 | 0 | if (SecondReg ==-1) { |
4779 | 0 | return MatchOperand_ParseFail; |
4780 | 0 | } |
4781 | | |
4782 | 0 | if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 || |
4783 | 0 | (isXReg && !XRegClass.contains(SecondReg)) || |
4784 | 0 | (isWReg && !WRegClass.contains(SecondReg))) { |
4785 | | //Error(E,"expected second odd register of a " |
4786 | | // "consecutive same-size even/odd register pair"); |
4787 | 0 | return MatchOperand_ParseFail; |
4788 | 0 | } |
4789 | | |
4790 | 0 | unsigned Pair = 0; |
4791 | 0 | if(isXReg) { |
4792 | 0 | Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64, |
4793 | 0 | &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]); |
4794 | 0 | } else { |
4795 | 0 | Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32, |
4796 | 0 | &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]); |
4797 | 0 | } |
4798 | |
|
4799 | 0 | Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(), |
4800 | 0 | getContext())); |
4801 | |
|
4802 | 0 | return MatchOperand_Success; |
4803 | 0 | } |