Coverage Report

Created: 2023-09-25 06:27

/src/keystone/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
Line
Count
Source (jump to first uncovered line)
1
//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "MCTargetDesc/AArch64AddressingModes.h"
11
#include "MCTargetDesc/AArch64MCExpr.h"
12
#include "MCTargetDesc/AArch64TargetStreamer.h"
13
#include "Utils/AArch64BaseInfo.h"
14
#include "llvm/ADT/APInt.h"
15
#include "llvm/ADT/STLExtras.h"
16
#include "llvm/ADT/SmallString.h"
17
#include "llvm/ADT/SmallVector.h"
18
#include "llvm/ADT/StringSwitch.h"
19
#include "llvm/ADT/Twine.h"
20
#include "llvm/MC/MCContext.h"
21
#include "llvm/MC/MCExpr.h"
22
#include "llvm/MC/MCInst.h"
23
#include "llvm/MC/MCObjectFileInfo.h"
24
#include "llvm/MC/MCParser/MCAsmLexer.h"
25
#include "llvm/MC/MCParser/MCAsmParser.h"
26
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
28
#include "llvm/MC/MCRegisterInfo.h"
29
#include "llvm/MC/MCStreamer.h"
30
#include "llvm/MC/MCSubtargetInfo.h"
31
#include "llvm/MC/MCSymbol.h"
32
#include "llvm/Support/ErrorHandling.h"
33
#include "llvm/Support/SourceMgr.h"
34
#include "llvm/Support/TargetRegistry.h"
35
#include "llvm/Support/raw_ostream.h"
36
37
#include "keystone/arm64.h"
38
39
#include <cstdio>
40
using namespace llvm_ks;
41
42
namespace {
43
44
class AArch64Operand;
45
46
class AArch64AsmParser : public MCTargetAsmParser {
47
private:
48
  StringRef Mnemonic; ///< Instruction mnemonic.
49
50
  // Map of register aliases registers via the .req directive.
51
  StringMap<std::pair<bool, unsigned> > RegisterReqs;
52
53
733
  AArch64TargetStreamer &getTargetStreamer() {
54
733
    MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
55
733
    return static_cast<AArch64TargetStreamer &>(TS);
56
733
  }
57
58
84.7k
  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
59
60
  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
61
  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
62
  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
63
  unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
64
  int tryParseRegister();
65
  int tryMatchVectorRegister(StringRef &Kind, bool expected);
66
  bool parseRegister(OperandVector &Operands);
67
  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
68
  bool parseVectorList(OperandVector &Operands);
69
  bool parseOperand(OperandVector &Operands, bool isCondCode,
70
                    bool invertCondCode);
71
72
0
  void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
73
  //bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
74
  //bool Error(SMLoc L, const Twine &Msg) { return true; }
75
  bool showMatchError(SMLoc Loc, unsigned ErrCode);
76
77
  bool parseDirectiveWord(unsigned Size, SMLoc L);
78
  bool parseDirectiveInst(SMLoc L);
79
80
  bool parseDirectiveTLSDescCall(SMLoc L);
81
82
  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
83
  bool parseDirectiveLtorg(SMLoc L);
84
85
  bool parseDirectiveReq(StringRef Name, SMLoc L);
86
  bool parseDirectiveUnreq(SMLoc L);
87
88
  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
89
  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
90
                               OperandVector &Operands, MCStreamer &Out,
91
                               uint64_t &ErrorInfo,
92
                               bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address) override;
93
/// @name Auto-generated Match Functions
94
/// {
95
96
#define GET_ASSEMBLER_HEADER
97
#include "AArch64GenAsmMatcher.inc"
98
99
  /// }
100
101
  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
102
  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
103
  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
104
  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
105
  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
106
  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
107
  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
108
  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
109
  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
110
  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
111
  OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
112
  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
113
  bool tryParseVectorRegister(OperandVector &Operands);
114
  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
115
116
public:
117
  enum AArch64MatchResultTy {
118
    Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
119
#define GET_OPERAND_DIAGNOSTIC_TYPES
120
#include "AArch64GenAsmMatcher.inc"
121
  };
122
  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
123
                   const MCInstrInfo &MII, const MCTargetOptions &Options)
124
2.03k
    : MCTargetAsmParser(Options, STI) {
125
2.03k
    MCAsmParserExtension::Initialize(Parser);
126
2.03k
    MCStreamer &S = getParser().getStreamer();
127
2.03k
    if (S.getTargetStreamer() == nullptr)
128
2.03k
      new AArch64TargetStreamer(S);
129
130
    // Initialize the set of available features.
131
2.03k
    setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
132
2.03k
  }
133
134
  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
135
                        SMLoc NameLoc, OperandVector &Operands, unsigned int &ErrorCode) override;
136
  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc, unsigned int &ErrorCode) override;
137
  bool ParseDirective(AsmToken DirectiveID) override;
138
  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
139
                                      unsigned Kind) override;
140
141
  static bool classifySymbolRef(const MCExpr *Expr,
142
                                AArch64MCExpr::VariantKind &ELFRefKind,
143
                                MCSymbolRefExpr::VariantKind &DarwinRefKind,
144
                                int64_t &Addend);
145
};
146
} // end anonymous namespace
147
148
namespace {
149
150
/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
151
/// instruction.
152
class AArch64Operand : public MCParsedAsmOperand {
153
private:
154
  enum KindTy {
155
    k_Immediate,
156
    k_ShiftedImm,
157
    k_CondCode,
158
    k_Register,
159
    k_VectorList,
160
    k_VectorIndex,
161
    k_Token,
162
    k_SysReg,
163
    k_SysCR,
164
    k_Prefetch,
165
    k_ShiftExtend,
166
    k_FPImm,
167
    k_Barrier,
168
    k_PSBHint,
169
  } Kind;
170
171
  SMLoc StartLoc, EndLoc;
172
173
  struct TokOp {
174
    const char *Data;
175
    unsigned Length;
176
    bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177
  };
178
179
  struct RegOp {
180
    unsigned RegNum;
181
    bool isVector;
182
  };
183
184
  struct VectorListOp {
185
    unsigned RegNum;
186
    unsigned Count;
187
    unsigned NumElements;
188
    unsigned ElementKind;
189
  };
190
191
  struct VectorIndexOp {
192
    unsigned Val;
193
  };
194
195
  struct ImmOp {
196
    const MCExpr *Val;
197
  };
198
199
  struct ShiftedImmOp {
200
    const MCExpr *Val;
201
    unsigned ShiftAmount;
202
  };
203
204
  struct CondCodeOp {
205
    AArch64CC::CondCode Code;
206
  };
207
208
  struct FPImmOp {
209
    unsigned Val; // Encoded 8-bit representation.
210
  };
211
212
  struct BarrierOp {
213
    unsigned Val; // Not the enum since not all values have names.
214
    const char *Data;
215
    unsigned Length;
216
  };
217
218
  struct SysRegOp {
219
    const char *Data;
220
    unsigned Length;
221
    uint32_t MRSReg;
222
    uint32_t MSRReg;
223
    uint32_t PStateField;
224
  };
225
226
  struct SysCRImmOp {
227
    unsigned Val;
228
  };
229
230
  struct PrefetchOp {
231
    unsigned Val;
232
    const char *Data;
233
    unsigned Length;
234
  };
235
236
  struct PSBHintOp {
237
    unsigned Val;
238
    const char *Data;
239
    unsigned Length;
240
  };
241
242
  struct ShiftExtendOp {
243
    AArch64_AM::ShiftExtendType Type;
244
    unsigned Amount;
245
    bool HasExplicitAmount;
246
  };
247
248
  struct ExtendOp {
249
    unsigned Val;
250
  };
251
252
  union {
253
    struct TokOp Tok;
254
    struct RegOp Reg;
255
    struct VectorListOp VectorList;
256
    struct VectorIndexOp VectorIndex;
257
    struct ImmOp Imm;
258
    struct ShiftedImmOp ShiftedImm;
259
    struct CondCodeOp CondCode;
260
    struct FPImmOp FPImm;
261
    struct BarrierOp Barrier;
262
    struct SysRegOp SysReg;
263
    struct SysCRImmOp SysCRImm;
264
    struct PrefetchOp Prefetch;
265
    struct PSBHintOp PSBHint;
266
    struct ShiftExtendOp ShiftExtend;
267
  };
268
269
  // Keep the MCContext around as the MCExprs may need manipulated during
270
  // the add<>Operands() calls.
271
  MCContext &Ctx;
272
273
public:
274
93.9k
  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
275
276
2
  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
277
2
    Kind = o.Kind;
278
2
    StartLoc = o.StartLoc;
279
2
    EndLoc = o.EndLoc;
280
2
    switch (Kind) {
281
1
    case k_Token:
282
1
      Tok = o.Tok;
283
1
      break;
284
1
    case k_Immediate:
285
1
      Imm = o.Imm;
286
1
      break;
287
0
    case k_ShiftedImm:
288
0
      ShiftedImm = o.ShiftedImm;
289
0
      break;
290
0
    case k_CondCode:
291
0
      CondCode = o.CondCode;
292
0
      break;
293
0
    case k_FPImm:
294
0
      FPImm = o.FPImm;
295
0
      break;
296
0
    case k_Barrier:
297
0
      Barrier = o.Barrier;
298
0
      break;
299
0
    case k_Register:
300
0
      Reg = o.Reg;
301
0
      break;
302
0
    case k_VectorList:
303
0
      VectorList = o.VectorList;
304
0
      break;
305
0
    case k_VectorIndex:
306
0
      VectorIndex = o.VectorIndex;
307
0
      break;
308
0
    case k_SysReg:
309
0
      SysReg = o.SysReg;
310
0
      break;
311
0
    case k_SysCR:
312
0
      SysCRImm = o.SysCRImm;
313
0
      break;
314
0
    case k_Prefetch:
315
0
      Prefetch = o.Prefetch;
316
0
      break;
317
0
    case k_PSBHint:
318
0
      PSBHint = o.PSBHint;
319
0
      break;
320
0
    case k_ShiftExtend:
321
0
      ShiftExtend = o.ShiftExtend;
322
0
      break;
323
2
    }
324
2
  }
325
326
  /// getStartLoc - Get the location of the first token of this operand.
327
2.69k
  SMLoc getStartLoc() const override { return StartLoc; }
328
  /// getEndLoc - Get the location of the last token of this operand.
329
10
  SMLoc getEndLoc() const override { return EndLoc; }
330
331
5.55k
  StringRef getToken() const {
332
5.55k
    assert(Kind == k_Token && "Invalid access!");
333
0
    return StringRef(Tok.Data, Tok.Length);
334
5.55k
  }
335
336
35
  bool isTokenSuffix() const {
337
35
    assert(Kind == k_Token && "Invalid access!");
338
0
    return Tok.IsSuffix;
339
35
  }
340
341
4.01k
  const MCExpr *getImm() const {
342
4.01k
    assert(Kind == k_Immediate && "Invalid access!");
343
0
    return Imm.Val;
344
4.01k
  }
345
346
134
  const MCExpr *getShiftedImmVal() const {
347
134
    assert(Kind == k_ShiftedImm && "Invalid access!");
348
0
    return ShiftedImm.Val;
349
134
  }
350
351
129
  unsigned getShiftedImmShift() const {
352
129
    assert(Kind == k_ShiftedImm && "Invalid access!");
353
0
    return ShiftedImm.ShiftAmount;
354
129
  }
355
356
364
  AArch64CC::CondCode getCondCode() const {
357
364
    assert(Kind == k_CondCode && "Invalid access!");
358
0
    return CondCode.Code;
359
364
  }
360
361
34
  unsigned getFPImm() const {
362
34
    assert(Kind == k_FPImm && "Invalid access!");
363
0
    return FPImm.Val;
364
34
  }
365
366
48
  unsigned getBarrier() const {
367
48
    assert(Kind == k_Barrier && "Invalid access!");
368
0
    return Barrier.Val;
369
48
  }
370
371
0
  StringRef getBarrierName() const {
372
0
    assert(Kind == k_Barrier && "Invalid access!");
373
0
    return StringRef(Barrier.Data, Barrier.Length);
374
0
  }
375
376
3.44k
  unsigned getReg() const override {
377
3.44k
    assert(Kind == k_Register && "Invalid access!");
378
0
    return Reg.RegNum;
379
3.44k
  }
380
381
0
  unsigned getVectorListStart() const {
382
0
    assert(Kind == k_VectorList && "Invalid access!");
383
0
    return VectorList.RegNum;
384
0
  }
385
386
0
  unsigned getVectorListCount() const {
387
0
    assert(Kind == k_VectorList && "Invalid access!");
388
0
    return VectorList.Count;
389
0
  }
390
391
0
  unsigned getVectorIndex() const {
392
0
    assert(Kind == k_VectorIndex && "Invalid access!");
393
0
    return VectorIndex.Val;
394
0
  }
395
396
0
  StringRef getSysReg() const {
397
0
    assert(Kind == k_SysReg && "Invalid access!");
398
0
    return StringRef(SysReg.Data, SysReg.Length);
399
0
  }
400
401
0
  unsigned getSysCR() const {
402
0
    assert(Kind == k_SysCR && "Invalid access!");
403
0
    return SysCRImm.Val;
404
0
  }
405
406
0
  unsigned getPrefetch() const {
407
0
    assert(Kind == k_Prefetch && "Invalid access!");
408
0
    return Prefetch.Val;
409
0
  }
410
411
0
  unsigned getPSBHint() const {
412
0
    assert(Kind == k_PSBHint && "Invalid access!");
413
0
    return PSBHint.Val;
414
0
  }
415
416
0
  StringRef getPSBHintName() const {
417
0
    assert(Kind == k_PSBHint && "Invalid access!");
418
0
    return StringRef(PSBHint.Data, PSBHint.Length);
419
0
  }
420
421
0
  StringRef getPrefetchName() const {
422
0
    assert(Kind == k_Prefetch && "Invalid access!");
423
0
    return StringRef(Prefetch.Data, Prefetch.Length);
424
0
  }
425
426
0
  AArch64_AM::ShiftExtendType getShiftExtendType() const {
427
0
    assert(Kind == k_ShiftExtend && "Invalid access!");
428
0
    return ShiftExtend.Type;
429
0
  }
430
431
0
  unsigned getShiftExtendAmount() const {
432
0
    assert(Kind == k_ShiftExtend && "Invalid access!");
433
0
    return ShiftExtend.Amount;
434
0
  }
435
436
0
  bool hasShiftExtendAmount() const {
437
0
    assert(Kind == k_ShiftExtend && "Invalid access!");
438
0
    return ShiftExtend.HasExplicitAmount;
439
0
  }
440
441
1.37k
  bool isImm() const override { return Kind == k_Immediate; }
442
0
  bool isMem() const override { return false; }
443
0
  bool isSImm9() const {
444
0
    if (!isImm())
445
0
      return false;
446
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
447
0
    if (!MCE)
448
0
      return false;
449
0
    int64_t Val = MCE->getValue();
450
0
    return (Val >= -256 && Val < 256);
451
0
  }
452
0
  bool isSImm7s4() const {
453
0
    if (!isImm())
454
0
      return false;
455
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
456
0
    if (!MCE)
457
0
      return false;
458
0
    int64_t Val = MCE->getValue();
459
0
    return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
460
0
  }
461
0
  bool isSImm7s8() const {
462
0
    if (!isImm())
463
0
      return false;
464
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
465
0
    if (!MCE)
466
0
      return false;
467
0
    int64_t Val = MCE->getValue();
468
0
    return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
469
0
  }
470
0
  bool isSImm7s16() const {
471
0
    if (!isImm())
472
0
      return false;
473
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
474
0
    if (!MCE)
475
0
      return false;
476
0
    int64_t Val = MCE->getValue();
477
0
    return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
478
0
  }
479
480
0
  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
481
0
    AArch64MCExpr::VariantKind ELFRefKind;
482
0
    MCSymbolRefExpr::VariantKind DarwinRefKind;
483
0
    int64_t Addend;
484
0
    if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
485
0
                                           Addend)) {
486
      // If we don't understand the expression, assume the best and
487
      // let the fixup and relocation code deal with it.
488
0
      return true;
489
0
    }
490
491
0
    if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
492
0
        ELFRefKind == AArch64MCExpr::VK_LO12 ||
493
0
        ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
494
0
        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
495
0
        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
496
0
        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
497
0
        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
498
0
        ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
499
0
        ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
500
      // Note that we don't range-check the addend. It's adjusted modulo page
501
      // size when converted, so there is no "out of range" condition when using
502
      // @pageoff.
503
0
      return Addend >= 0 && (Addend % Scale) == 0;
504
0
    } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
505
0
               DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
506
      // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
507
0
      return Addend == 0;
508
0
    }
509
510
0
    return false;
511
0
  }
512
513
0
  template <int Scale> bool isUImm12Offset() const {
514
0
    if (!isImm())
515
0
      return false;
516
517
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
518
0
    if (!MCE)
519
0
      return isSymbolicUImm12Offset(getImm(), Scale);
520
521
0
    int64_t Val = MCE->getValue();
522
0
    return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
523
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<16>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<1>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<2>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<4>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isUImm12Offset<8>() const
524
525
0
  bool isImm0_1() const {
526
0
    if (!isImm())
527
0
      return false;
528
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
529
0
    if (!MCE)
530
0
      return false;
531
0
    int64_t Val = MCE->getValue();
532
0
    return (Val >= 0 && Val < 2);
533
0
  }
534
16
  bool isImm0_7() const {
535
16
    if (!isImm())
536
0
      return false;
537
16
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
538
16
    if (!MCE)
539
0
      return false;
540
16
    int64_t Val = MCE->getValue();
541
16
    return (Val >= 0 && Val < 8);
542
16
  }
543
0
  bool isImm1_8() const {
544
0
    if (!isImm())
545
0
      return false;
546
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
547
0
    if (!MCE)
548
0
      return false;
549
0
    int64_t Val = MCE->getValue();
550
0
    return (Val > 0 && Val < 9);
551
0
  }
552
7
  bool isImm0_15() const {
553
7
    if (!isImm())
554
0
      return false;
555
7
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556
7
    if (!MCE)
557
0
      return false;
558
7
    int64_t Val = MCE->getValue();
559
7
    return (Val >= 0 && Val < 16);
560
7
  }
561
0
  bool isImm1_16() const {
562
0
    if (!isImm())
563
0
      return false;
564
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
565
0
    if (!MCE)
566
0
      return false;
567
0
    int64_t Val = MCE->getValue();
568
0
    return (Val > 0 && Val < 17);
569
0
  }
570
7
  bool isImm0_31() const {
571
7
    if (!isImm())
572
0
      return false;
573
7
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574
7
    if (!MCE)
575
0
      return false;
576
7
    int64_t Val = MCE->getValue();
577
7
    return (Val >= 0 && Val < 32);
578
7
  }
579
0
  bool isImm1_31() const {
580
0
    if (!isImm())
581
0
      return false;
582
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
583
0
    if (!MCE)
584
0
      return false;
585
0
    int64_t Val = MCE->getValue();
586
0
    return (Val >= 1 && Val < 32);
587
0
  }
588
0
  bool isImm1_32() const {
589
0
    if (!isImm())
590
0
      return false;
591
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
592
0
    if (!MCE)
593
0
      return false;
594
0
    int64_t Val = MCE->getValue();
595
0
    return (Val >= 1 && Val < 33);
596
0
  }
597
0
  bool isImm0_63() const {
598
0
    if (!isImm())
599
0
      return false;
600
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
601
0
    if (!MCE)
602
0
      return false;
603
0
    int64_t Val = MCE->getValue();
604
0
    return (Val >= 0 && Val < 64);
605
0
  }
606
0
  bool isImm1_63() const {
607
0
    if (!isImm())
608
0
      return false;
609
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
610
0
    if (!MCE)
611
0
      return false;
612
0
    int64_t Val = MCE->getValue();
613
0
    return (Val >= 1 && Val < 64);
614
0
  }
615
0
  bool isImm1_64() const {
616
0
    if (!isImm())
617
0
      return false;
618
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
619
0
    if (!MCE)
620
0
      return false;
621
0
    int64_t Val = MCE->getValue();
622
0
    return (Val >= 1 && Val < 65);
623
0
  }
624
0
  bool isImm0_127() const {
625
0
    if (!isImm())
626
0
      return false;
627
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
628
0
    if (!MCE)
629
0
      return false;
630
0
    int64_t Val = MCE->getValue();
631
0
    return (Val >= 0 && Val < 128);
632
0
  }
633
0
  bool isImm0_255() const {
634
0
    if (!isImm())
635
0
      return false;
636
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
637
0
    if (!MCE)
638
0
      return false;
639
0
    int64_t Val = MCE->getValue();
640
0
    return (Val >= 0 && Val < 256);
641
0
  }
642
2
  bool isImm0_65535() const {
643
2
    if (!isImm())
644
0
      return false;
645
2
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
646
2
    if (!MCE)
647
0
      return false;
648
2
    int64_t Val = MCE->getValue();
649
2
    return (Val >= 0 && Val < 65536);
650
2
  }
651
0
  bool isImm32_63() const {
652
0
    if (!isImm())
653
0
      return false;
654
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
655
0
    if (!MCE)
656
0
      return false;
657
0
    int64_t Val = MCE->getValue();
658
0
    return (Val >= 32 && Val < 64);
659
0
  }
660
0
  bool isLogicalImm32() const {
661
0
    if (!isImm())
662
0
      return false;
663
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
664
0
    if (!MCE)
665
0
      return false;
666
0
    int64_t Val = MCE->getValue();
667
0
    if (Val >> 32 != 0 && Val >> 32 != ~0LL)
668
0
      return false;
669
0
    Val &= 0xFFFFFFFF;
670
0
    return AArch64_AM::isLogicalImmediate(Val, 32);
671
0
  }
672
2
  bool isLogicalImm64() const {
673
2
    if (!isImm())
674
0
      return false;
675
2
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
676
2
    if (!MCE)
677
0
      return false;
678
2
    return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
679
2
  }
680
0
  bool isLogicalImm32Not() const {
681
0
    if (!isImm())
682
0
      return false;
683
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
684
0
    if (!MCE)
685
0
      return false;
686
0
    int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
687
0
    return AArch64_AM::isLogicalImmediate(Val, 32);
688
0
  }
689
0
  bool isLogicalImm64Not() const {
690
0
    if (!isImm())
691
0
      return false;
692
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
693
0
    if (!MCE)
694
0
      return false;
695
0
    return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
696
0
  }
697
665
  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
698
129
  bool isAddSubImm() const {
699
129
    if (!isShiftedImm() && !isImm())
700
0
      return false;
701
702
129
    const MCExpr *Expr;
703
704
    // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
705
129
    if (isShiftedImm()) {
706
129
      unsigned Shift = ShiftedImm.ShiftAmount;
707
129
      Expr = ShiftedImm.Val;
708
129
      if (Shift != 0 && Shift != 12)
709
0
        return false;
710
129
    } else {
711
0
      Expr = getImm();
712
0
    }
713
714
129
    AArch64MCExpr::VariantKind ELFRefKind;
715
129
    MCSymbolRefExpr::VariantKind DarwinRefKind;
716
129
    int64_t Addend;
717
129
    if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
718
129
                                          DarwinRefKind, Addend)) {
719
0
      return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
720
0
          || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
721
0
          || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
722
0
          || ELFRefKind == AArch64MCExpr::VK_LO12
723
0
          || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
724
0
          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
725
0
          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
726
0
          || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
727
0
          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
728
0
          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
729
0
          || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
730
0
    }
731
732
    // Otherwise it should be a real immediate in range:
733
129
    const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
734
129
    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
735
129
  }
736
134
  bool isAddSubImmNeg() const {
737
134
    if (!isShiftedImm() && !isImm())
738
0
      return false;
739
740
134
    const MCExpr *Expr;
741
742
    // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
743
134
    if (isShiftedImm()) {
744
134
      unsigned Shift = ShiftedImm.ShiftAmount;
745
134
      Expr = ShiftedImm.Val;
746
134
      if (Shift != 0 && Shift != 12)
747
0
        return false;
748
134
    } else
749
0
      Expr = getImm();
750
751
    // Otherwise it should be a real negative immediate in range:
752
134
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
753
134
    return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
754
134
  }
755
368
  bool isCondCode() const { return Kind == k_CondCode; }
756
0
  bool isSIMDImmType10() const {
757
0
    if (!isImm())
758
0
      return false;
759
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
760
0
    if (!MCE)
761
0
      return false;
762
0
    return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
763
0
  }
764
947
  bool isBranchTarget26() const {
765
947
    if (!isImm())
766
0
      return false;
767
947
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
768
947
    if (!MCE)
769
947
      return true;
770
0
    int64_t Val = MCE->getValue();
771
0
    return ((Val & 0x3) == 0);
772
947
  }
773
362
  bool isPCRelLabel19() const {
774
362
    if (!isImm())
775
0
      return false;
776
362
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
777
362
    if (!MCE)
778
359
      return true;
779
3
    int64_t Val = MCE->getValue();
780
3
    return ((Val & 0x3) == 0);
781
362
  }
782
0
  bool isBranchTarget14() const {
783
0
    if (!isImm())
784
0
      return false;
785
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
786
0
    if (!MCE)
787
0
      return true;
788
0
    int64_t Val = MCE->getValue();
789
0
    return ((Val & 0x3) == 0);
790
0
  }
791
792
  bool
793
0
  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
794
0
    if (!isImm())
795
0
      return false;
796
797
0
    AArch64MCExpr::VariantKind ELFRefKind;
798
0
    MCSymbolRefExpr::VariantKind DarwinRefKind;
799
0
    int64_t Addend;
800
0
    if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
801
0
                                             DarwinRefKind, Addend)) {
802
0
      return false;
803
0
    }
804
0
    if (DarwinRefKind != MCSymbolRefExpr::VK_None)
805
0
      return false;
806
807
0
    for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
808
0
      if (ELFRefKind == AllowedModifiers[i])
809
0
        return Addend == 0;
810
0
    }
811
812
0
    return false;
813
0
  }
814
815
0
  bool isMovZSymbolG3() const {
816
0
    return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
817
0
  }
818
819
0
  bool isMovZSymbolG2() const {
820
0
    return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
821
0
                         AArch64MCExpr::VK_TPREL_G2,
822
0
                         AArch64MCExpr::VK_DTPREL_G2});
823
0
  }
824
825
0
  bool isMovZSymbolG1() const {
826
0
    return isMovWSymbol({
827
0
        AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
828
0
        AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
829
0
        AArch64MCExpr::VK_DTPREL_G1,
830
0
    });
831
0
  }
832
833
0
  bool isMovZSymbolG0() const {
834
0
    return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
835
0
                         AArch64MCExpr::VK_TPREL_G0,
836
0
                         AArch64MCExpr::VK_DTPREL_G0});
837
0
  }
838
839
0
  bool isMovKSymbolG3() const {
840
0
    return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
841
0
  }
842
843
0
  bool isMovKSymbolG2() const {
844
0
    return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
845
0
  }
846
847
0
  bool isMovKSymbolG1() const {
848
0
    return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
849
0
                         AArch64MCExpr::VK_TPREL_G1_NC,
850
0
                         AArch64MCExpr::VK_DTPREL_G1_NC});
851
0
  }
852
853
0
  bool isMovKSymbolG0() const {
854
0
    return isMovWSymbol(
855
0
        {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
856
0
         AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
857
0
  }
858
859
  template<int RegWidth, int Shift>
860
24
  bool isMOVZMovAlias() const {
861
24
    if (!isImm()) return false;
862
863
24
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
864
24
    if (!CE) return false;
865
24
    uint64_t Value = CE->getValue();
866
867
24
    if (RegWidth == 32)
868
0
      Value &= 0xffffffffULL;
869
870
    // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
871
24
    if (Value == 0 && Shift != 0)
872
0
      return false;
873
874
24
    return (Value & ~(0xffffULL << Shift)) == 0;
875
24
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<32, 0>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<32, 16>() const
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 0>() const
Line
Count
Source
860
18
  bool isMOVZMovAlias() const {
861
18
    if (!isImm()) return false;
862
863
18
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
864
18
    if (!CE) return false;
865
18
    uint64_t Value = CE->getValue();
866
867
18
    if (RegWidth == 32)
868
0
      Value &= 0xffffffffULL;
869
870
    // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
871
18
    if (Value == 0 && Shift != 0)
872
0
      return false;
873
874
18
    return (Value & ~(0xffffULL << Shift)) == 0;
875
18
  }
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 16>() const
Line
Count
Source
860
2
  bool isMOVZMovAlias() const {
861
2
    if (!isImm()) return false;
862
863
2
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
864
2
    if (!CE) return false;
865
2
    uint64_t Value = CE->getValue();
866
867
2
    if (RegWidth == 32)
868
0
      Value &= 0xffffffffULL;
869
870
    // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
871
2
    if (Value == 0 && Shift != 0)
872
0
      return false;
873
874
2
    return (Value & ~(0xffffULL << Shift)) == 0;
875
2
  }
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 32>() const
Line
Count
Source
860
2
  bool isMOVZMovAlias() const {
861
2
    if (!isImm()) return false;
862
863
2
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
864
2
    if (!CE) return false;
865
2
    uint64_t Value = CE->getValue();
866
867
2
    if (RegWidth == 32)
868
0
      Value &= 0xffffffffULL;
869
870
    // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
871
2
    if (Value == 0 && Shift != 0)
872
0
      return false;
873
874
2
    return (Value & ~(0xffffULL << Shift)) == 0;
875
2
  }
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVZMovAlias<64, 48>() const
Line
Count
Source
860
2
  bool isMOVZMovAlias() const {
861
2
    if (!isImm()) return false;
862
863
2
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
864
2
    if (!CE) return false;
865
2
    uint64_t Value = CE->getValue();
866
867
2
    if (RegWidth == 32)
868
0
      Value &= 0xffffffffULL;
869
870
    // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
871
2
    if (Value == 0 && Shift != 0)
872
0
      return false;
873
874
2
    return (Value & ~(0xffffULL << Shift)) == 0;
875
2
  }
876
877
  template<int RegWidth, int Shift>
878
8
  bool isMOVNMovAlias() const {
879
8
    if (!isImm()) return false;
880
881
8
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
882
8
    if (!CE) return false;
883
8
    uint64_t Value = CE->getValue();
884
885
    // MOVZ takes precedence over MOVN.
886
40
    for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
887
32
      if ((Value & ~(0xffffULL << MOVZShift)) == 0)
888
0
        return false;
889
890
8
    Value = ~Value;
891
8
    if (RegWidth == 32)
892
0
      Value &= 0xffffffffULL;
893
894
8
    return (Value & ~(0xffffULL << Shift)) == 0;
895
8
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<32, 0>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<32, 16>() const
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 0>() const
Line
Count
Source
878
2
  bool isMOVNMovAlias() const {
879
2
    if (!isImm()) return false;
880
881
2
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
882
2
    if (!CE) return false;
883
2
    uint64_t Value = CE->getValue();
884
885
    // MOVZ takes precedence over MOVN.
886
10
    for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
887
8
      if ((Value & ~(0xffffULL << MOVZShift)) == 0)
888
0
        return false;
889
890
2
    Value = ~Value;
891
2
    if (RegWidth == 32)
892
0
      Value &= 0xffffffffULL;
893
894
2
    return (Value & ~(0xffffULL << Shift)) == 0;
895
2
  }
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 16>() const
Line
Count
Source
878
2
  bool isMOVNMovAlias() const {
879
2
    if (!isImm()) return false;
880
881
2
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
882
2
    if (!CE) return false;
883
2
    uint64_t Value = CE->getValue();
884
885
    // MOVZ takes precedence over MOVN.
886
10
    for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
887
8
      if ((Value & ~(0xffffULL << MOVZShift)) == 0)
888
0
        return false;
889
890
2
    Value = ~Value;
891
2
    if (RegWidth == 32)
892
0
      Value &= 0xffffffffULL;
893
894
2
    return (Value & ~(0xffffULL << Shift)) == 0;
895
2
  }
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 32>() const
Line
Count
Source
878
2
  bool isMOVNMovAlias() const {
879
2
    if (!isImm()) return false;
880
881
2
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
882
2
    if (!CE) return false;
883
2
    uint64_t Value = CE->getValue();
884
885
    // MOVZ takes precedence over MOVN.
886
10
    for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
887
8
      if ((Value & ~(0xffffULL << MOVZShift)) == 0)
888
0
        return false;
889
890
2
    Value = ~Value;
891
2
    if (RegWidth == 32)
892
0
      Value &= 0xffffffffULL;
893
894
2
    return (Value & ~(0xffffULL << Shift)) == 0;
895
2
  }
AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMOVNMovAlias<64, 48>() const
Line
Count
Source
878
2
  bool isMOVNMovAlias() const {
879
2
    if (!isImm()) return false;
880
881
2
    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
882
2
    if (!CE) return false;
883
2
    uint64_t Value = CE->getValue();
884
885
    // MOVZ takes precedence over MOVN.
886
10
    for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
887
8
      if ((Value & ~(0xffffULL << MOVZShift)) == 0)
888
0
        return false;
889
890
2
    Value = ~Value;
891
2
    if (RegWidth == 32)
892
0
      Value &= 0xffffffffULL;
893
894
2
    return (Value & ~(0xffffULL << Shift)) == 0;
895
2
  }
896
897
36
  bool isFPImm() const { return Kind == k_FPImm; }
898
48
  bool isBarrier() const { return Kind == k_Barrier; }
899
0
  bool isSysReg() const { return Kind == k_SysReg; }
900
0
  bool isMRSSystemRegister() const {
901
0
    if (!isSysReg()) return false;
902
903
0
    return SysReg.MRSReg != -1U;
904
0
  }
905
0
  bool isMSRSystemRegister() const {
906
0
    if (!isSysReg()) return false;
907
0
    return SysReg.MSRReg != -1U;
908
0
  }
909
0
  bool isSystemPStateFieldWithImm0_1() const {
910
0
    if (!isSysReg()) return false;
911
0
    return (SysReg.PStateField == AArch64PState::PAN ||
912
0
            SysReg.PStateField == AArch64PState::UAO);
913
0
  }
914
0
  bool isSystemPStateFieldWithImm0_15() const {
915
0
    if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
916
0
    return SysReg.PStateField != -1U;
917
0
  }
918
3.74k
  bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
919
42
  bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
920
0
  bool isVectorRegLo() const {
921
0
    return Kind == k_Register && Reg.isVector &&
922
0
           AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
923
0
               Reg.RegNum);
924
0
  }
925
0
  bool isGPR32as64() const {
926
0
    return Kind == k_Register && !Reg.isVector &&
927
0
      AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
928
0
  }
929
0
  bool isWSeqPair() const {
930
0
    return Kind == k_Register && !Reg.isVector &&
931
0
           AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
932
0
               Reg.RegNum);
933
0
  }
934
0
  bool isXSeqPair() const {
935
0
    return Kind == k_Register && !Reg.isVector &&
936
0
           AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
937
0
               Reg.RegNum);
938
0
  }
939
940
0
  bool isGPR64sp0() const {
941
0
    return Kind == k_Register && !Reg.isVector &&
942
0
      AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
943
0
  }
944
945
  /// Is this a vector list with the type implicit (presumably attached to the
946
  /// instruction itself)?
947
0
  template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
948
0
    return Kind == k_VectorList && VectorList.Count == NumRegs &&
949
0
           !VectorList.ElementKind;
950
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<4u>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<1u>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<3u>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isImplicitlyTypedVectorList<2u>() const
951
952
  template <unsigned NumRegs, unsigned NumElements, char ElementKind>
953
0
  bool isTypedVectorList() const {
954
0
    if (Kind != k_VectorList)
955
0
      return false;
956
0
    if (VectorList.Count != NumRegs)
957
0
      return false;
958
0
    if (VectorList.ElementKind != ElementKind)
959
0
      return false;
960
0
    return VectorList.NumElements == NumElements;
961
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 16u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 1u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 2u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 2u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 4u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 4u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 8u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 8u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<4u, 0u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 16u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 1u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 2u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 2u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 4u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 4u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 8u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 8u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<1u, 0u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 16u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 1u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 2u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 2u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 4u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 4u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 8u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 8u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<3u, 0u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 16u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 1u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 2u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 2u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 4u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 4u, (char)115>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 8u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 8u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)98>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)100>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)104>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isTypedVectorList<2u, 0u, (char)115>() const
962
963
0
  bool isVectorIndex1() const {
964
0
    return Kind == k_VectorIndex && VectorIndex.Val == 1;
965
0
  }
966
0
  bool isVectorIndexB() const {
967
0
    return Kind == k_VectorIndex && VectorIndex.Val < 16;
968
0
  }
969
0
  bool isVectorIndexH() const {
970
0
    return Kind == k_VectorIndex && VectorIndex.Val < 8;
971
0
  }
972
0
  bool isVectorIndexS() const {
973
0
    return Kind == k_VectorIndex && VectorIndex.Val < 4;
974
0
  }
975
0
  bool isVectorIndexD() const {
976
0
    return Kind == k_VectorIndex && VectorIndex.Val < 2;
977
0
  }
978
8.43k
  bool isToken() const override { return Kind == k_Token; }
979
0
  bool isTokenEqual(StringRef Str) const {
980
0
    return Kind == k_Token && getToken() == Str;
981
0
  }
982
0
  bool isSysCR() const { return Kind == k_SysCR; }
983
0
  bool isPrefetch() const { return Kind == k_Prefetch; }
984
0
  bool isPSBHint() const { return Kind == k_PSBHint; }
985
0
  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
986
0
  bool isShifter() const {
987
0
    if (!isShiftExtend())
988
0
      return false;
989
990
0
    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
991
0
    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
992
0
            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
993
0
            ST == AArch64_AM::MSL);
994
0
  }
995
0
  bool isExtend() const {
996
0
    if (!isShiftExtend())
997
0
      return false;
998
999
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1000
0
    return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1001
0
            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1002
0
            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1003
0
            ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1004
0
            ET == AArch64_AM::LSL) &&
1005
0
           getShiftExtendAmount() <= 4;
1006
0
  }
1007
1008
0
  bool isExtend64() const {
1009
0
    if (!isExtend())
1010
0
      return false;
1011
    // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1012
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1013
0
    return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1014
0
  }
1015
0
  bool isExtendLSL64() const {
1016
0
    if (!isExtend())
1017
0
      return false;
1018
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1019
0
    return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1020
0
            ET == AArch64_AM::LSL) &&
1021
0
           getShiftExtendAmount() <= 4;
1022
0
  }
1023
1024
0
  template<int Width> bool isMemXExtend() const {
1025
0
    if (!isExtend())
1026
0
      return false;
1027
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1028
0
    return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1029
0
           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1030
0
            getShiftExtendAmount() == 0);
1031
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<128>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<16>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<32>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<64>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemXExtend<8>() const
1032
1033
0
  template<int Width> bool isMemWExtend() const {
1034
0
    if (!isExtend())
1035
0
      return false;
1036
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1037
0
    return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1038
0
           (getShiftExtendAmount() == Log2_32(Width / 8) ||
1039
0
            getShiftExtendAmount() == 0);
1040
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<128>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<16>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<32>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<64>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isMemWExtend<8>() const
1041
1042
  template <unsigned width>
1043
0
  bool isArithmeticShifter() const {
1044
0
    if (!isShifter())
1045
0
      return false;
1046
1047
    // An arithmetic shifter is LSL, LSR, or ASR.
1048
0
    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1049
0
    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1050
0
            ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1051
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isArithmeticShifter<32u>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isArithmeticShifter<64u>() const
1052
1053
  template <unsigned width>
1054
0
  bool isLogicalShifter() const {
1055
0
    if (!isShifter())
1056
0
      return false;
1057
1058
    // A logical shifter is LSL, LSR, ASR or ROR.
1059
0
    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1060
0
    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1061
0
            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1062
0
           getShiftExtendAmount() < width;
1063
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isLogicalShifter<32u>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isLogicalShifter<64u>() const
1064
1065
0
  bool isMovImm32Shifter() const {
1066
0
    if (!isShifter())
1067
0
      return false;
1068
1069
    // A MOVi shifter is LSL of 0, 16, 32, or 48.
1070
0
    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1071
0
    if (ST != AArch64_AM::LSL)
1072
0
      return false;
1073
0
    uint64_t Val = getShiftExtendAmount();
1074
0
    return (Val == 0 || Val == 16);
1075
0
  }
1076
1077
0
  bool isMovImm64Shifter() const {
1078
0
    if (!isShifter())
1079
0
      return false;
1080
1081
    // A MOVi shifter is LSL of 0 or 16.
1082
0
    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1083
0
    if (ST != AArch64_AM::LSL)
1084
0
      return false;
1085
0
    uint64_t Val = getShiftExtendAmount();
1086
0
    return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1087
0
  }
1088
1089
0
  bool isLogicalVecShifter() const {
1090
0
    if (!isShifter())
1091
0
      return false;
1092
1093
    // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1094
0
    unsigned Shift = getShiftExtendAmount();
1095
0
    return getShiftExtendType() == AArch64_AM::LSL &&
1096
0
           (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1097
0
  }
1098
1099
0
  bool isLogicalVecHalfWordShifter() const {
1100
0
    if (!isLogicalVecShifter())
1101
0
      return false;
1102
1103
    // A logical vector shifter is a left shift by 0 or 8.
1104
0
    unsigned Shift = getShiftExtendAmount();
1105
0
    return getShiftExtendType() == AArch64_AM::LSL &&
1106
0
           (Shift == 0 || Shift == 8);
1107
0
  }
1108
1109
0
  bool isMoveVecShifter() const {
1110
0
    if (!isShiftExtend())
1111
0
      return false;
1112
1113
    // A logical vector shifter is a left shift by 8 or 16.
1114
0
    unsigned Shift = getShiftExtendAmount();
1115
0
    return getShiftExtendType() == AArch64_AM::MSL &&
1116
0
           (Shift == 8 || Shift == 16);
1117
0
  }
1118
1119
  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1120
  // to LDUR/STUR when the offset is not legal for the former but is for
1121
  // the latter. As such, in addition to checking for being a legal unscaled
1122
  // address, also check that it is not a legal scaled address. This avoids
1123
  // ambiguity in the matcher.
1124
  template<int Width>
1125
0
  bool isSImm9OffsetFB() const {
1126
0
    return isSImm9() && !isUImm12Offset<Width / 8>();
1127
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<128>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<16>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<32>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<64>() const
Unexecuted instantiation: AArch64AsmParser.cpp:bool (anonymous namespace)::AArch64Operand::isSImm9OffsetFB<8>() const
1128
1129
0
  bool isAdrpLabel() const {
1130
    // Validation was handled during parsing, so we just sanity check that
1131
    // something didn't go haywire.
1132
0
    if (!isImm())
1133
0
        return false;
1134
1135
0
    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1136
0
      int64_t Val = CE->getValue();
1137
0
      int64_t Offset = Val - Ctx.getBaseAddress();
1138
0
      int64_t Min = - (4096 * (1LL << (21 - 1)));
1139
0
      int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1140
0
      return (Val % 4096) == 0 && Offset >= Min && Offset <= Max;
1141
0
    }
1142
1143
0
    return true;
1144
0
  }
1145
1146
2
  bool isAdrLabel() const {
1147
    // Validation was handled during parsing, so we just sanity check that
1148
    // something didn't go haywire.
1149
2
    if (!isImm())
1150
0
        return false;
1151
1152
2
    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1153
2
      int64_t Val = CE->getValue();
1154
2
      int64_t Min = - (1LL << (21 - 1));
1155
2
      int64_t Max = ((1LL << (21 - 1)) - 1);
1156
2
      return Val >= Min && Val <= Max;
1157
2
    }
1158
1159
0
    return true;
1160
2
  }
1161
1162
1.43k
  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1163
    // Add as immediates when possible.  Null MCExpr = 0.
1164
1.43k
    if (!Expr)
1165
0
      Inst.addOperand(MCOperand::createImm(0));
1166
1.43k
    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1167
129
      Inst.addOperand(MCOperand::createImm(CE->getValue()));
1168
1.30k
    else
1169
1.30k
      Inst.addOperand(MCOperand::createExpr(Expr));
1170
1.43k
  }
1171
1172
320
  void addRegOperands(MCInst &Inst, unsigned N) const {
1173
320
    assert(N == 1 && "Invalid number of operands!");
1174
0
    Inst.addOperand(MCOperand::createReg(getReg()));
1175
320
  }
1176
1177
0
  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1178
0
    assert(N == 1 && "Invalid number of operands!");
1179
0
    assert(
1180
0
        AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1181
1182
0
    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1183
0
    uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1184
0
        RI->getEncodingValue(getReg()));
1185
1186
0
    Inst.addOperand(MCOperand::createReg(Reg));
1187
0
  }
1188
1189
0
  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1190
0
    assert(N == 1 && "Invalid number of operands!");
1191
0
    assert(
1192
0
        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1193
0
    Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1194
0
  }
1195
1196
0
  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1197
0
    assert(N == 1 && "Invalid number of operands!");
1198
0
    assert(
1199
0
        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1200
0
    Inst.addOperand(MCOperand::createReg(getReg()));
1201
0
  }
1202
1203
0
  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1204
0
    assert(N == 1 && "Invalid number of operands!");
1205
0
    Inst.addOperand(MCOperand::createReg(getReg()));
1206
0
  }
1207
1208
  template <unsigned NumRegs>
1209
0
  void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1210
0
    assert(N == 1 && "Invalid number of operands!");
1211
0
    static const unsigned FirstRegs[] = { AArch64::D0,
1212
0
                                          AArch64::D0_D1,
1213
0
                                          AArch64::D0_D1_D2,
1214
0
                                          AArch64::D0_D1_D2_D3 };
1215
0
    unsigned FirstReg = FirstRegs[NumRegs - 1];
1216
1217
0
    Inst.addOperand(
1218
0
        MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1219
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<4u>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<1u>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<3u>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList64Operands<2u>(llvm_ks::MCInst&, unsigned int) const
1220
1221
  template <unsigned NumRegs>
1222
0
  void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1223
0
    assert(N == 1 && "Invalid number of operands!");
1224
0
    static const unsigned FirstRegs[] = { AArch64::Q0,
1225
0
                                          AArch64::Q0_Q1,
1226
0
                                          AArch64::Q0_Q1_Q2,
1227
0
                                          AArch64::Q0_Q1_Q2_Q3 };
1228
0
    unsigned FirstReg = FirstRegs[NumRegs - 1];
1229
1230
0
    Inst.addOperand(
1231
0
        MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1232
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<4u>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<1u>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<3u>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addVectorList128Operands<2u>(llvm_ks::MCInst&, unsigned int) const
1233
1234
0
  void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1235
0
    assert(N == 1 && "Invalid number of operands!");
1236
0
    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1237
0
  }
1238
1239
0
  void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1240
0
    assert(N == 1 && "Invalid number of operands!");
1241
0
    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1242
0
  }
1243
1244
0
  void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1245
0
    assert(N == 1 && "Invalid number of operands!");
1246
0
    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1247
0
  }
1248
1249
0
  void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1250
0
    assert(N == 1 && "Invalid number of operands!");
1251
0
    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1252
0
  }
1253
1254
0
  void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1255
0
    assert(N == 1 && "Invalid number of operands!");
1256
0
    Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1257
0
  }
1258
1259
0
  void addImmOperands(MCInst &Inst, unsigned N) const {
1260
0
    assert(N == 1 && "Invalid number of operands!");
1261
    // If this is a pageoff symrefexpr with an addend, adjust the addend
1262
    // to be only the page-offset portion. Otherwise, just add the expr
1263
    // as-is.
1264
0
    addExpr(Inst, getImm());
1265
0
  }
1266
1267
129
  void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1268
129
    assert(N == 2 && "Invalid number of operands!");
1269
129
    if (isShiftedImm()) {
1270
129
      addExpr(Inst, getShiftedImmVal());
1271
129
      Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1272
129
    } else {
1273
0
      addExpr(Inst, getImm());
1274
0
      Inst.addOperand(MCOperand::createImm(0));
1275
0
    }
1276
129
  }
1277
1278
5
  void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1279
5
    assert(N == 2 && "Invalid number of operands!");
1280
1281
5
    const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1282
5
    const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1283
5
    int64_t Val = -CE->getValue();
1284
5
    unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1285
1286
5
    Inst.addOperand(MCOperand::createImm(Val));
1287
5
    Inst.addOperand(MCOperand::createImm(ShiftAmt));
1288
5
  }
1289
1290
364
  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1291
364
    assert(N == 1 && "Invalid number of operands!");
1292
0
    Inst.addOperand(MCOperand::createImm(getCondCode()));
1293
364
  }
1294
1295
0
  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1296
0
    assert(N == 1 && "Invalid number of operands!");
1297
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1298
0
    if (!MCE)
1299
0
      addExpr(Inst, getImm());
1300
0
    else
1301
0
      Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1302
0
  }
1303
1304
0
  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1305
0
    addImmOperands(Inst, N);
1306
0
  }
1307
1308
  template<int Scale>
1309
0
  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1310
0
    assert(N == 1 && "Invalid number of operands!");
1311
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1312
1313
0
    if (!MCE) {
1314
0
      Inst.addOperand(MCOperand::createExpr(getImm()));
1315
0
      return;
1316
0
    }
1317
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1318
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<16>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<2>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<4>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<8>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addUImm12OffsetOperands<1>(llvm_ks::MCInst&, unsigned int) const
1319
1320
0
  void addSImm9Operands(MCInst &Inst, unsigned N) const {
1321
0
    assert(N == 1 && "Invalid number of operands!");
1322
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1323
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1324
0
  }
1325
1326
0
  void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1327
0
    assert(N == 1 && "Invalid number of operands!");
1328
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1329
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1330
0
  }
1331
1332
0
  void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1333
0
    assert(N == 1 && "Invalid number of operands!");
1334
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1335
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1336
0
  }
1337
1338
0
  void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1339
0
    assert(N == 1 && "Invalid number of operands!");
1340
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1341
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1342
0
  }
1343
1344
0
  void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1345
0
    assert(N == 1 && "Invalid number of operands!");
1346
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1347
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1348
0
  }
1349
1350
0
  void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1351
0
    assert(N == 1 && "Invalid number of operands!");
1352
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1354
0
  }
1355
1356
0
  void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1357
0
    assert(N == 1 && "Invalid number of operands!");
1358
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1359
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1360
0
  }
1361
1362
5
  void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1363
5
    assert(N == 1 && "Invalid number of operands!");
1364
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1365
5
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1366
5
  }
1367
1368
0
  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1369
0
    assert(N == 1 && "Invalid number of operands!");
1370
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1371
0
    assert(MCE && "Invalid constant immediate operand!");
1372
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1373
0
  }
1374
1375
5
  void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1376
5
    assert(N == 1 && "Invalid number of operands!");
1377
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1378
5
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1379
5
  }
1380
1381
0
  void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1382
0
    assert(N == 1 && "Invalid number of operands!");
1383
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1384
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1385
0
  }
1386
1387
0
  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1388
0
    assert(N == 1 && "Invalid number of operands!");
1389
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1390
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1391
0
  }
1392
1393
0
  void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1394
0
    assert(N == 1 && "Invalid number of operands!");
1395
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1396
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1397
0
  }
1398
1399
0
  void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1400
0
    assert(N == 1 && "Invalid number of operands!");
1401
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1402
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1403
0
  }
1404
1405
0
  void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1406
0
    assert(N == 1 && "Invalid number of operands!");
1407
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1408
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1409
0
  }
1410
1411
0
  void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1412
0
    assert(N == 1 && "Invalid number of operands!");
1413
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1414
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1415
0
  }
1416
1417
0
  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1418
0
    assert(N == 1 && "Invalid number of operands!");
1419
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1420
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1421
0
  }
1422
1423
0
  void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1424
0
    assert(N == 1 && "Invalid number of operands!");
1425
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1426
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1427
0
  }
1428
1429
0
  void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1430
0
    assert(N == 1 && "Invalid number of operands!");
1431
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1432
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1433
0
  }
1434
1435
0
  void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1436
0
    assert(N == 1 && "Invalid number of operands!");
1437
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1438
0
    uint64_t encoding =
1439
0
        AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1440
0
    Inst.addOperand(MCOperand::createImm(encoding));
1441
0
  }
1442
1443
0
  void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1444
0
    assert(N == 1 && "Invalid number of operands!");
1445
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1446
0
    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1447
0
    Inst.addOperand(MCOperand::createImm(encoding));
1448
0
  }
1449
1450
0
  void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1451
0
    assert(N == 1 && "Invalid number of operands!");
1452
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1453
0
    int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1454
0
    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1455
0
    Inst.addOperand(MCOperand::createImm(encoding));
1456
0
  }
1457
1458
0
  void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1459
0
    assert(N == 1 && "Invalid number of operands!");
1460
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1461
0
    uint64_t encoding =
1462
0
        AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1463
0
    Inst.addOperand(MCOperand::createImm(encoding));
1464
0
  }
1465
1466
0
  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1467
0
    assert(N == 1 && "Invalid number of operands!");
1468
0
    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1469
0
    uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1470
0
    Inst.addOperand(MCOperand::createImm(encoding));
1471
0
  }
1472
1473
947
  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1474
    // Branch operands don't encode the low bits, so shift them off
1475
    // here. If it's a label, however, just put it on directly as there's
1476
    // not enough information now to do anything.
1477
947
    assert(N == 1 && "Invalid number of operands!");
1478
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1479
947
    if (!MCE) {
1480
947
      addExpr(Inst, getImm());
1481
947
      return;
1482
947
    }
1483
0
    assert(MCE && "Invalid constant immediate operand!");
1484
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1485
0
  }
1486
1487
362
  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1488
    // Branch operands don't encode the low bits, so shift them off
1489
    // here. If it's a label, however, just put it on directly as there's
1490
    // not enough information now to do anything.
1491
362
    assert(N == 1 && "Invalid number of operands!");
1492
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1493
362
    if (!MCE) {
1494
359
      addExpr(Inst, getImm());
1495
359
      return;
1496
359
    }
1497
3
    assert(MCE && "Invalid constant immediate operand!");
1498
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1499
3
  }
1500
1501
0
  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1502
    // Branch operands don't encode the low bits, so shift them off
1503
    // here. If it's a label, however, just put it on directly as there's
1504
    // not enough information now to do anything.
1505
0
    assert(N == 1 && "Invalid number of operands!");
1506
0
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1507
0
    if (!MCE) {
1508
0
      addExpr(Inst, getImm());
1509
0
      return;
1510
0
    }
1511
0
    assert(MCE && "Invalid constant immediate operand!");
1512
0
    Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1513
0
  }
1514
1515
12
  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1516
12
    assert(N == 1 && "Invalid number of operands!");
1517
0
    Inst.addOperand(MCOperand::createImm(getFPImm()));
1518
12
  }
1519
1520
48
  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1521
48
    assert(N == 1 && "Invalid number of operands!");
1522
0
    Inst.addOperand(MCOperand::createImm(getBarrier()));
1523
48
  }
1524
1525
0
  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1526
0
    assert(N == 1 && "Invalid number of operands!");
1527
1528
0
    Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1529
0
  }
1530
1531
0
  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1532
0
    assert(N == 1 && "Invalid number of operands!");
1533
1534
0
    Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1535
0
  }
1536
1537
0
  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1538
0
    assert(N == 1 && "Invalid number of operands!");
1539
1540
0
    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1541
0
  }
1542
1543
0
  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1544
0
    assert(N == 1 && "Invalid number of operands!");
1545
1546
0
    Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1547
0
  }
1548
1549
0
  void addSysCROperands(MCInst &Inst, unsigned N) const {
1550
0
    assert(N == 1 && "Invalid number of operands!");
1551
0
    Inst.addOperand(MCOperand::createImm(getSysCR()));
1552
0
  }
1553
1554
0
  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1555
0
    assert(N == 1 && "Invalid number of operands!");
1556
0
    Inst.addOperand(MCOperand::createImm(getPrefetch()));
1557
0
  }
1558
1559
0
  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1560
0
    assert(N == 1 && "Invalid number of operands!");
1561
0
    Inst.addOperand(MCOperand::createImm(getPSBHint()));
1562
0
  }
1563
1564
0
  void addShifterOperands(MCInst &Inst, unsigned N) const {
1565
0
    assert(N == 1 && "Invalid number of operands!");
1566
0
    unsigned Imm =
1567
0
        AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1568
0
    Inst.addOperand(MCOperand::createImm(Imm));
1569
0
  }
1570
1571
0
  void addExtendOperands(MCInst &Inst, unsigned N) const {
1572
0
    assert(N == 1 && "Invalid number of operands!");
1573
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1574
0
    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1575
0
    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1576
0
    Inst.addOperand(MCOperand::createImm(Imm));
1577
0
  }
1578
1579
0
  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1580
0
    assert(N == 1 && "Invalid number of operands!");
1581
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1582
0
    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1583
0
    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1584
0
    Inst.addOperand(MCOperand::createImm(Imm));
1585
0
  }
1586
1587
0
  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1588
0
    assert(N == 2 && "Invalid number of operands!");
1589
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1590
0
    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1591
0
    Inst.addOperand(MCOperand::createImm(IsSigned));
1592
0
    Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1593
0
  }
1594
1595
  // For 8-bit load/store instructions with a register offset, both the
1596
  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1597
  // they're disambiguated by whether the shift was explicit or implicit rather
1598
  // than its size.
1599
0
  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1600
0
    assert(N == 2 && "Invalid number of operands!");
1601
0
    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1602
0
    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1603
0
    Inst.addOperand(MCOperand::createImm(IsSigned));
1604
0
    Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1605
0
  }
1606
1607
  template<int Shift>
1608
16
  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1609
16
    assert(N == 1 && "Invalid number of operands!");
1610
1611
0
    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1612
16
    uint64_t Value = CE->getValue();
1613
16
    Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1614
16
  }
AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<0>(llvm_ks::MCInst&, unsigned int) const
Line
Count
Source
1608
16
  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1609
16
    assert(N == 1 && "Invalid number of operands!");
1610
1611
0
    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1612
16
    uint64_t Value = CE->getValue();
1613
16
    Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1614
16
  }
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<16>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<32>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVZMovAliasOperands<48>(llvm_ks::MCInst&, unsigned int) const
1615
1616
  template<int Shift>
1617
0
  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1618
0
    assert(N == 1 && "Invalid number of operands!");
1619
1620
0
    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1621
0
    uint64_t Value = CE->getValue();
1622
0
    Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1623
0
  }
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<0>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<16>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<32>(llvm_ks::MCInst&, unsigned int) const
Unexecuted instantiation: AArch64AsmParser.cpp:void (anonymous namespace)::AArch64Operand::addMOVNMovAliasOperands<48>(llvm_ks::MCInst&, unsigned int) const
1624
1625
  void print(raw_ostream &OS) const override;
1626
1627
  static std::unique_ptr<AArch64Operand>
1628
72.6k
  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1629
72.6k
    auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1630
72.6k
    Op->Tok.Data = Str.data();
1631
72.6k
    Op->Tok.Length = Str.size();
1632
72.6k
    Op->Tok.IsSuffix = IsSuffix;
1633
72.6k
    Op->StartLoc = S;
1634
72.6k
    Op->EndLoc = S;
1635
72.6k
    return Op;
1636
72.6k
  }
1637
1638
  static std::unique_ptr<AArch64Operand>
1639
4.91k
  CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1640
4.91k
    auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1641
4.91k
    Op->Reg.RegNum = RegNum;
1642
4.91k
    Op->Reg.isVector = isVector;
1643
4.91k
    Op->StartLoc = S;
1644
4.91k
    Op->EndLoc = E;
1645
4.91k
    return Op;
1646
4.91k
  }
1647
1648
  static std::unique_ptr<AArch64Operand>
1649
  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1650
6
                   char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1651
6
    auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1652
6
    Op->VectorList.RegNum = RegNum;
1653
6
    Op->VectorList.Count = Count;
1654
6
    Op->VectorList.NumElements = NumElements;
1655
6
    Op->VectorList.ElementKind = ElementKind;
1656
6
    Op->StartLoc = S;
1657
6
    Op->EndLoc = E;
1658
6
    return Op;
1659
6
  }
1660
1661
  static std::unique_ptr<AArch64Operand>
1662
0
  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1663
0
    auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1664
0
    Op->VectorIndex.Val = Idx;
1665
0
    Op->StartLoc = S;
1666
0
    Op->EndLoc = E;
1667
0
    return Op;
1668
0
  }
1669
1670
  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1671
15.1k
                                                   SMLoc E, MCContext &Ctx) {
1672
15.1k
    auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1673
15.1k
    Op->Imm.Val = Val;
1674
15.1k
    Op->StartLoc = S;
1675
15.1k
    Op->EndLoc = E;
1676
15.1k
    return Op;
1677
15.1k
  }
1678
1679
  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1680
                                                          unsigned ShiftAmount,
1681
                                                          SMLoc S, SMLoc E,
1682
134
                                                          MCContext &Ctx) {
1683
134
    auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1684
134
    Op->ShiftedImm .Val = Val;
1685
134
    Op->ShiftedImm.ShiftAmount = ShiftAmount;
1686
134
    Op->StartLoc = S;
1687
134
    Op->EndLoc = E;
1688
134
    return Op;
1689
134
  }
1690
1691
  static std::unique_ptr<AArch64Operand>
1692
544
  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1693
544
    auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1694
544
    Op->CondCode.Code = Code;
1695
544
    Op->StartLoc = S;
1696
544
    Op->EndLoc = E;
1697
544
    return Op;
1698
544
  }
1699
1700
  static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1701
32
                                                     MCContext &Ctx) {
1702
32
    auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1703
32
    Op->FPImm.Val = Val;
1704
32
    Op->StartLoc = S;
1705
32
    Op->EndLoc = S;
1706
32
    return Op;
1707
32
  }
1708
1709
  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1710
                                                       StringRef Str,
1711
                                                       SMLoc S,
1712
48
                                                       MCContext &Ctx) {
1713
48
    auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1714
48
    Op->Barrier.Val = Val;
1715
48
    Op->Barrier.Data = Str.data();
1716
48
    Op->Barrier.Length = Str.size();
1717
48
    Op->StartLoc = S;
1718
48
    Op->EndLoc = S;
1719
48
    return Op;
1720
48
  }
1721
1722
  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1723
                                                      uint32_t MRSReg,
1724
                                                      uint32_t MSRReg,
1725
                                                      uint32_t PStateField,
1726
330
                                                      MCContext &Ctx) {
1727
330
    auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1728
330
    Op->SysReg.Data = Str.data();
1729
330
    Op->SysReg.Length = Str.size();
1730
330
    Op->SysReg.MRSReg = MRSReg;
1731
330
    Op->SysReg.MSRReg = MSRReg;
1732
330
    Op->SysReg.PStateField = PStateField;
1733
330
    Op->StartLoc = S;
1734
330
    Op->EndLoc = S;
1735
330
    return Op;
1736
330
  }
1737
1738
  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1739
2
                                                     SMLoc E, MCContext &Ctx) {
1740
2
    auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1741
2
    Op->SysCRImm.Val = Val;
1742
2
    Op->StartLoc = S;
1743
2
    Op->EndLoc = E;
1744
2
    return Op;
1745
2
  }
1746
1747
  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1748
                                                        StringRef Str,
1749
                                                        SMLoc S,
1750
3
                                                        MCContext &Ctx) {
1751
3
    auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1752
3
    Op->Prefetch.Val = Val;
1753
3
    Op->Barrier.Data = Str.data();
1754
3
    Op->Barrier.Length = Str.size();
1755
3
    Op->StartLoc = S;
1756
3
    Op->EndLoc = S;
1757
3
    return Op;
1758
3
  }
1759
1760
  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1761
                                                       StringRef Str,
1762
                                                       SMLoc S,
1763
0
                                                       MCContext &Ctx) {
1764
0
    auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1765
0
    Op->PSBHint.Val = Val;
1766
0
    Op->PSBHint.Data = Str.data();
1767
0
    Op->PSBHint.Length = Str.size();
1768
0
    Op->StartLoc = S;
1769
0
    Op->EndLoc = S;
1770
0
    return Op;
1771
0
  }
1772
1773
  static std::unique_ptr<AArch64Operand>
1774
  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1775
129
                    bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1776
129
    auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1777
129
    Op->ShiftExtend.Type = ShOp;
1778
129
    Op->ShiftExtend.Amount = Val;
1779
129
    Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1780
129
    Op->StartLoc = S;
1781
129
    Op->EndLoc = E;
1782
129
    return Op;
1783
129
  }
1784
};
1785
1786
} // end anonymous namespace.
1787
1788
0
void AArch64Operand::print(raw_ostream &OS) const {
1789
0
  switch (Kind) {
1790
0
  case k_FPImm:
1791
0
    OS << "<fpimm " << getFPImm() << "("
1792
0
       << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1793
0
    break;
1794
0
  case k_Barrier: {
1795
0
    StringRef Name = getBarrierName();
1796
0
    if (!Name.empty())
1797
0
      OS << "<barrier " << Name << ">";
1798
0
    else
1799
0
      OS << "<barrier invalid #" << getBarrier() << ">";
1800
0
    break;
1801
0
  }
1802
0
  case k_Immediate:
1803
0
    OS << *getImm();
1804
0
    break;
1805
0
  case k_ShiftedImm: {
1806
0
    unsigned Shift = getShiftedImmShift();
1807
0
    OS << "<shiftedimm ";
1808
0
    OS << *getShiftedImmVal();
1809
0
    OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1810
0
    break;
1811
0
  }
1812
0
  case k_CondCode:
1813
0
    OS << "<condcode " << getCondCode() << ">";
1814
0
    break;
1815
0
  case k_Register:
1816
0
    OS << "<register " << getReg() << ">";
1817
0
    break;
1818
0
  case k_VectorList: {
1819
0
    OS << "<vectorlist ";
1820
0
    unsigned Reg = getVectorListStart();
1821
0
    for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1822
0
      OS << Reg + i << " ";
1823
0
    OS << ">";
1824
0
    break;
1825
0
  }
1826
0
  case k_VectorIndex:
1827
0
    OS << "<vectorindex " << getVectorIndex() << ">";
1828
0
    break;
1829
0
  case k_SysReg:
1830
0
    OS << "<sysreg: " << getSysReg() << '>';
1831
0
    break;
1832
0
  case k_Token:
1833
0
    OS << "'" << getToken() << "'";
1834
0
    break;
1835
0
  case k_SysCR:
1836
0
    OS << "c" << getSysCR();
1837
0
    break;
1838
0
  case k_Prefetch: {
1839
0
    StringRef Name = getPrefetchName();
1840
0
    if (!Name.empty())
1841
0
      OS << "<prfop " << Name << ">";
1842
0
    else
1843
0
      OS << "<prfop invalid #" << getPrefetch() << ">";
1844
0
    break;
1845
0
  }
1846
0
  case k_PSBHint: {
1847
0
    OS << getPSBHintName();
1848
0
    break;
1849
0
  }
1850
0
  case k_ShiftExtend: {
1851
0
    OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1852
0
       << getShiftExtendAmount();
1853
0
    if (!hasShiftExtendAmount())
1854
0
      OS << "<imp>";
1855
0
    OS << '>';
1856
0
    break;
1857
0
  }
1858
0
  }
1859
0
}
1860
1861
/// @name Auto-generated Match Functions
1862
/// {
1863
1864
static unsigned MatchRegisterName(StringRef Name);
1865
1866
/// }
1867
1868
17.7k
static unsigned matchVectorRegName(StringRef Name) {
1869
17.7k
  return StringSwitch<unsigned>(Name.lower())
1870
17.7k
      .Case("v0", AArch64::Q0)
1871
17.7k
      .Case("v1", AArch64::Q1)
1872
17.7k
      .Case("v2", AArch64::Q2)
1873
17.7k
      .Case("v3", AArch64::Q3)
1874
17.7k
      .Case("v4", AArch64::Q4)
1875
17.7k
      .Case("v5", AArch64::Q5)
1876
17.7k
      .Case("v6", AArch64::Q6)
1877
17.7k
      .Case("v7", AArch64::Q7)
1878
17.7k
      .Case("v8", AArch64::Q8)
1879
17.7k
      .Case("v9", AArch64::Q9)
1880
17.7k
      .Case("v10", AArch64::Q10)
1881
17.7k
      .Case("v11", AArch64::Q11)
1882
17.7k
      .Case("v12", AArch64::Q12)
1883
17.7k
      .Case("v13", AArch64::Q13)
1884
17.7k
      .Case("v14", AArch64::Q14)
1885
17.7k
      .Case("v15", AArch64::Q15)
1886
17.7k
      .Case("v16", AArch64::Q16)
1887
17.7k
      .Case("v17", AArch64::Q17)
1888
17.7k
      .Case("v18", AArch64::Q18)
1889
17.7k
      .Case("v19", AArch64::Q19)
1890
17.7k
      .Case("v20", AArch64::Q20)
1891
17.7k
      .Case("v21", AArch64::Q21)
1892
17.7k
      .Case("v22", AArch64::Q22)
1893
17.7k
      .Case("v23", AArch64::Q23)
1894
17.7k
      .Case("v24", AArch64::Q24)
1895
17.7k
      .Case("v25", AArch64::Q25)
1896
17.7k
      .Case("v26", AArch64::Q26)
1897
17.7k
      .Case("v27", AArch64::Q27)
1898
17.7k
      .Case("v28", AArch64::Q28)
1899
17.7k
      .Case("v29", AArch64::Q29)
1900
17.7k
      .Case("v30", AArch64::Q30)
1901
17.7k
      .Case("v31", AArch64::Q31)
1902
17.7k
      .Default(0);
1903
17.7k
}
1904
1905
2.52k
static bool isValidVectorKind(StringRef Name) {
1906
2.52k
  return StringSwitch<bool>(Name.lower())
1907
2.52k
      .Case(".8b", true)
1908
2.52k
      .Case(".16b", true)
1909
2.52k
      .Case(".4h", true)
1910
2.52k
      .Case(".8h", true)
1911
2.52k
      .Case(".2s", true)
1912
2.52k
      .Case(".4s", true)
1913
2.52k
      .Case(".1d", true)
1914
2.52k
      .Case(".2d", true)
1915
2.52k
      .Case(".1q", true)
1916
      // Accept the width neutral ones, too, for verbose syntax. If those
1917
      // aren't used in the right places, the token operand won't match so
1918
      // all will work out.
1919
2.52k
      .Case(".b", true)
1920
2.52k
      .Case(".h", true)
1921
2.52k
      .Case(".s", true)
1922
2.52k
      .Case(".d", true)
1923
      // Needed for fp16 scalar pairwise reductions
1924
2.52k
      .Case(".2h", true)
1925
2.52k
      .Default(false);
1926
2.52k
}
1927
1928
static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1929
0
                                 char &ElementKind) {
1930
0
  assert(isValidVectorKind(Name));
1931
1932
0
  ElementKind = Name.lower()[Name.size() - 1];
1933
0
  NumElements = 0;
1934
1935
0
  if (Name.size() == 2)
1936
0
    return;
1937
1938
  // Parse the lane count
1939
0
  Name = Name.drop_front();
1940
0
  while (isdigit(Name.front())) {
1941
0
    NumElements = 10 * NumElements + (Name.front() - '0');
1942
0
    Name = Name.drop_front();
1943
0
  }
1944
0
}
1945
1946
bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1947
0
                                     SMLoc &EndLoc, unsigned int &ErrorCode) {
1948
0
  StartLoc = getLoc();
1949
0
  RegNo = tryParseRegister();
1950
0
  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1951
0
  return (RegNo == (unsigned)-1);
1952
0
}
1953
1954
// Matches a register name or register alias previously defined by '.req'
1955
unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1956
34.1k
                                                  bool isVector) {
1957
34.1k
  unsigned RegNum = isVector ? matchVectorRegName(Name)
1958
34.1k
                             : MatchRegisterName(Name);
1959
1960
34.1k
  if (RegNum == 0) {
1961
    // Check for aliases registered via .req. Canonicalize to lower case.
1962
    // That's more consistent since register names are case insensitive, and
1963
    // it's how the original entry was passed in from MC/MCParser/AsmParser.
1964
28.5k
    auto Entry = RegisterReqs.find(Name.lower());
1965
28.5k
    if (Entry == RegisterReqs.end())
1966
28.5k
      return 0;
1967
    // set RegNum if the match is the right kind of register
1968
0
    if (isVector == Entry->getValue().first)
1969
0
      RegNum = Entry->getValue().second;
1970
0
  }
1971
5.67k
  return RegNum;
1972
34.1k
}
1973
1974
/// tryParseRegister - Try to parse a register name. The token must be an
1975
/// Identifier when called, and if it is a register name the token is eaten and
1976
/// the register is added to the operand list.
1977
16.3k
int AArch64AsmParser::tryParseRegister() {
1978
16.3k
  MCAsmParser &Parser = getParser();
1979
16.3k
  const AsmToken &Tok = Parser.getTok();
1980
16.3k
  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1981
1982
0
  std::string lowerCase = Tok.getString().lower();
1983
16.3k
  unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1984
  // Also handle a few aliases of registers.
1985
16.3k
  if (RegNum == 0)
1986
13.7k
    RegNum = StringSwitch<unsigned>(lowerCase)
1987
13.7k
                 .Case("fp",  AArch64::FP)
1988
13.7k
                 .Case("lr",  AArch64::LR)
1989
13.7k
                 .Case("x31", AArch64::XZR)
1990
13.7k
                 .Case("w31", AArch64::WZR)
1991
13.7k
                 .Default(0);
1992
1993
16.3k
  if (RegNum == 0)
1994
12.8k
    return -1;
1995
1996
3.49k
  Parser.Lex(); // Eat identifier token.
1997
3.49k
  return RegNum;
1998
16.3k
}
1999
2000
/// tryMatchVectorRegister - Try to parse a vector register name with optional
2001
/// kind specifier. If it is a register specifier, eat the token and return it.
2002
int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected)
2003
17.7k
{
2004
17.7k
  MCAsmParser &Parser = getParser();
2005
17.7k
  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2006
    //TokError("vector register expected");
2007
11
    return -1;
2008
11
  }
2009
2010
17.7k
  StringRef Name = Parser.getTok().getString();
2011
  // If there is a kind specifier, it's separated from the register name by
2012
  // a '.'.
2013
17.7k
  size_t Start = 0, Next = Name.find('.');
2014
17.7k
  StringRef Head = Name.slice(Start, Next);
2015
17.7k
  unsigned RegNum = matchRegisterNameAlias(Head, true);
2016
2017
17.7k
  if (RegNum) {
2018
3.03k
    if (Next != StringRef::npos) {
2019
2.52k
      Kind = Name.slice(Next, StringRef::npos);
2020
2.52k
      if (!isValidVectorKind(Kind)) {
2021
        //TokError("invalid vector kind qualifier");
2022
1.61k
        return -1;
2023
1.61k
      }
2024
2.52k
    }
2025
1.41k
    Parser.Lex(); // Eat the register token.
2026
1.41k
    return RegNum;
2027
3.03k
  }
2028
2029
  //if (expected)
2030
  //  TokError("vector register expected");
2031
14.7k
  return -1;
2032
17.7k
}
2033
2034
/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2035
AArch64AsmParser::OperandMatchResultTy
2036
AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands)
2037
1
{
2038
1
  MCAsmParser &Parser = getParser();
2039
1
  SMLoc S = getLoc();
2040
2041
1
  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2042
    //Error(S, "Expected cN operand where 0 <= N <= 15");
2043
0
    return MatchOperand_ParseFail;
2044
0
  }
2045
2046
1
  StringRef Tok = Parser.getTok().getIdentifier();
2047
1
  if (Tok[0] != 'c' && Tok[0] != 'C') {
2048
    //Error(S, "Expected cN operand where 0 <= N <= 15");
2049
0
    return MatchOperand_ParseFail;
2050
0
  }
2051
2052
1
  uint32_t CRNum;
2053
1
  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2054
1
  if (BadNum || CRNum > 15) {
2055
    //Error(S, "Expected cN operand where 0 <= N <= 15");
2056
1
    return MatchOperand_ParseFail;
2057
1
  }
2058
2059
0
  Parser.Lex(); // Eat identifier token.
2060
0
  Operands.push_back(
2061
0
      AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2062
0
  return MatchOperand_Success;
2063
1
}
2064
2065
/// tryParsePrefetch - Try to parse a prefetch operand.
2066
AArch64AsmParser::OperandMatchResultTy
2067
5
AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2068
5
  MCAsmParser &Parser = getParser();
2069
5
  SMLoc S = getLoc();
2070
5
  const AsmToken &Tok = Parser.getTok();
2071
  // Either an identifier for named values or a 5-bit immediate.
2072
5
  bool Hash = Tok.is(AsmToken::Hash);
2073
5
  if (Hash || Tok.is(AsmToken::Integer)) {
2074
3
    if (Hash)
2075
0
      Parser.Lex(); // Eat hash token.
2076
3
    const MCExpr *ImmVal;
2077
3
    if (getParser().parseExpression(ImmVal))
2078
0
      return MatchOperand_ParseFail;
2079
2080
3
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2081
3
    if (!MCE) {
2082
      //TokError("immediate value expected for prefetch operand");
2083
0
      return MatchOperand_ParseFail;
2084
0
    }
2085
3
    unsigned prfop = MCE->getValue();
2086
3
    if (prfop > 31) {
2087
      //TokError("prefetch operand out of range, [0,31] expected");
2088
0
      return MatchOperand_ParseFail;
2089
0
    }
2090
2091
3
    bool Valid;
2092
3
    auto Mapper = AArch64PRFM::PRFMMapper();
2093
3
    StringRef Name = 
2094
3
        Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2095
3
    Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2096
3
                                                      S, getContext()));
2097
3
    return MatchOperand_Success;
2098
3
  }
2099
2100
2
  if (Tok.isNot(AsmToken::Identifier)) {
2101
    //TokError("pre-fetch hint expected");
2102
0
    return MatchOperand_ParseFail;
2103
0
  }
2104
2105
2
  bool Valid;
2106
2
  auto Mapper = AArch64PRFM::PRFMMapper();
2107
2
  unsigned prfop = 
2108
2
      Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2109
2
  if (!Valid) {
2110
    //TokError("pre-fetch hint expected");
2111
2
    return MatchOperand_ParseFail;
2112
2
  }
2113
2114
0
  Parser.Lex(); // Eat identifier token.
2115
0
  Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2116
0
                                                    S, getContext()));
2117
0
  return MatchOperand_Success;
2118
2
}
2119
2120
/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2121
AArch64AsmParser::OperandMatchResultTy
2122
0
AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2123
0
  MCAsmParser &Parser = getParser();
2124
0
  SMLoc S = getLoc();
2125
0
  const AsmToken &Tok = Parser.getTok();
2126
0
  if (Tok.isNot(AsmToken::Identifier)) {
2127
    //TokError("invalid operand for instruction");
2128
0
    return MatchOperand_ParseFail;
2129
0
  }
2130
2131
0
  bool Valid;
2132
0
  auto Mapper = AArch64PSBHint::PSBHintMapper();
2133
0
  unsigned psbhint =
2134
0
      Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2135
0
  if (!Valid) {
2136
    //TokError("invalid operand for instruction");
2137
0
    return MatchOperand_ParseFail;
2138
0
  }
2139
2140
0
  Parser.Lex(); // Eat identifier token.
2141
0
  Operands.push_back(AArch64Operand::CreatePSBHint(psbhint, Tok.getString(),
2142
0
                                                   S, getContext()));
2143
0
  return MatchOperand_Success;
2144
0
}
2145
2146
/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2147
/// instruction.
2148
AArch64AsmParser::OperandMatchResultTy
2149
35
AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2150
35
  MCAsmParser &Parser = getParser();
2151
35
  SMLoc S = getLoc();
2152
35
  const MCExpr *Expr;
2153
2154
35
  if (Parser.getTok().is(AsmToken::Hash)) {
2155
0
    Parser.Lex(); // Eat hash token.
2156
0
  }
2157
2158
35
  if (parseSymbolicImmVal(Expr))
2159
9
    return MatchOperand_ParseFail;
2160
2161
26
  AArch64MCExpr::VariantKind ELFRefKind;
2162
26
  MCSymbolRefExpr::VariantKind DarwinRefKind;
2163
26
  int64_t Addend;
2164
26
  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2165
9
    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2166
9
        ELFRefKind == AArch64MCExpr::VK_INVALID) {
2167
      // No modifier was specified at all; this is the syntax for an ELF basic
2168
      // ADRP relocation (unfortunately).
2169
9
      Expr =
2170
9
          AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2171
9
    } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2172
0
                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2173
0
               Addend != 0) {
2174
      //Error(S, "gotpage label reference not allowed an addend");
2175
0
      return MatchOperand_ParseFail;
2176
0
    } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2177
0
               DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2178
0
               DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2179
0
               ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2180
0
               ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2181
0
               ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2182
      // The operand must be an @page or @gotpage qualified symbolref.
2183
      //Error(S, "page or gotpage label reference expected");
2184
0
      return MatchOperand_ParseFail;
2185
0
    }
2186
9
  }
2187
2188
  // We have either a label reference possibly with addend or an immediate. The
2189
  // addend is a raw value here. The linker will adjust it to only reference the
2190
  // page.
2191
26
  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2192
26
  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2193
2194
26
  return MatchOperand_Success;
2195
26
}
2196
2197
/// tryParseAdrLabel - Parse and validate a source label for the ADR
2198
/// instruction.
2199
AArch64AsmParser::OperandMatchResultTy
2200
35
AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2201
35
  MCAsmParser &Parser = getParser();
2202
35
  SMLoc S = getLoc();
2203
35
  const MCExpr *Expr;
2204
2205
35
  if (Parser.getTok().is(AsmToken::Hash)) {
2206
26
    Parser.Lex(); // Eat hash token.
2207
26
  }
2208
2209
35
  if (getParser().parseExpression(Expr))
2210
34
    return MatchOperand_ParseFail;
2211
2212
1
  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2213
1
  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2214
2215
1
  return MatchOperand_Success;
2216
35
}
2217
2218
/// tryParseFPImm - A floating point immediate expression operand.
2219
AArch64AsmParser::OperandMatchResultTy
2220
89
AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2221
89
  MCAsmParser &Parser = getParser();
2222
89
  SMLoc S = getLoc();
2223
2224
89
  bool Hash = false;
2225
89
  if (Parser.getTok().is(AsmToken::Hash)) {
2226
0
    Parser.Lex(); // Eat '#'
2227
0
    Hash = true;
2228
0
  }
2229
2230
  // Handle negation, as that still comes through as a separate token.
2231
89
  bool isNegative = false;
2232
89
  if (Parser.getTok().is(AsmToken::Minus)) {
2233
13
    isNegative = true;
2234
13
    Parser.Lex();
2235
13
  }
2236
89
  const AsmToken &Tok = Parser.getTok();
2237
89
  if (Tok.is(AsmToken::Real)) {
2238
0
    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2239
0
    if (isNegative)
2240
0
      RealVal.changeSign();
2241
2242
0
    if (RealVal.bitcastToAPInt().getActiveBits() > 64)
2243
0
        return MatchOperand_ParseFail;
2244
0
    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2245
0
    int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2246
0
    Parser.Lex(); // Eat the token.
2247
    // Check for out of range values. As an exception, we let Zero through,
2248
    // as we handle that special case in post-processing before matching in
2249
    // order to use the zero register for it.
2250
0
    if (Val == -1 && !RealVal.isPosZero()) {
2251
      //TokError("expected compatible register or floating-point constant");
2252
0
      return MatchOperand_ParseFail;
2253
0
    }
2254
0
    Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2255
0
    return MatchOperand_Success;
2256
0
  }
2257
89
  if (Tok.is(AsmToken::Integer)) {
2258
32
    int64_t Val;
2259
32
    if (!isNegative && Tok.getString().startswith("0x")) {
2260
0
      bool valid;
2261
0
      Val = Tok.getIntVal(valid);
2262
0
      if (!valid)
2263
0
        return MatchOperand_ParseFail;
2264
0
      if (Val > 255 || Val < 0) {
2265
        //TokError("encoded floating point value out of range");
2266
0
        return MatchOperand_ParseFail;
2267
0
      }
2268
32
    } else {
2269
32
      APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2270
32
      if (RealVal.bitcastToAPInt().getActiveBits() > 64)
2271
0
          return MatchOperand_ParseFail;
2272
32
      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2273
      // If we had a '-' in front, toggle the sign bit.
2274
32
      IntVal ^= (uint64_t)isNegative << 63;
2275
32
      Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2276
32
    }
2277
32
    Parser.Lex(); // Eat the token.
2278
32
    Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2279
32
    return MatchOperand_Success;
2280
32
  }
2281
2282
57
  if (!Hash)
2283
57
    return MatchOperand_NoMatch;
2284
2285
  //TokError("invalid floating point immediate");
2286
0
  return MatchOperand_ParseFail;
2287
57
}
2288
2289
/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2290
AArch64AsmParser::OperandMatchResultTy
2291
1.42k
AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2292
1.42k
  MCAsmParser &Parser = getParser();
2293
1.42k
  SMLoc S = getLoc();
2294
2295
1.42k
  if (Parser.getTok().is(AsmToken::Hash))
2296
0
    Parser.Lex(); // Eat '#'
2297
1.42k
  else if (Parser.getTok().isNot(AsmToken::Integer))
2298
    // Operand should start from # or should be integer, emit error otherwise.
2299
1.28k
    return MatchOperand_NoMatch;
2300
2301
135
  const MCExpr *Imm;
2302
135
  if (parseSymbolicImmVal(Imm))
2303
0
    return MatchOperand_ParseFail;
2304
135
  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2305
134
    uint64_t ShiftAmount = 0;
2306
134
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2307
134
    if (MCE) {
2308
134
      int64_t Val = MCE->getValue();
2309
134
      if (Val > 0xfff && (Val & 0xfff) == 0) {
2310
0
        Imm = MCConstantExpr::create(Val >> 12, getContext());
2311
0
        ShiftAmount = 12;
2312
0
      }
2313
134
    }
2314
134
    SMLoc E = Parser.getTok().getLoc();
2315
134
    Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2316
134
                                                        getContext()));
2317
134
    return MatchOperand_Success;
2318
134
  }
2319
2320
  // Eat ','
2321
1
  Parser.Lex();
2322
2323
  // The optional operand must be "lsl #N" where N is non-negative.
2324
1
  if (!Parser.getTok().is(AsmToken::Identifier) ||
2325
1
      !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2326
    //Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2327
1
    return MatchOperand_ParseFail;
2328
1
  }
2329
2330
  // Eat 'lsl'
2331
0
  Parser.Lex();
2332
2333
0
  if (Parser.getTok().is(AsmToken::Hash)) {
2334
0
    Parser.Lex();
2335
0
  }
2336
2337
0
  if (Parser.getTok().isNot(AsmToken::Integer)) {
2338
    //Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2339
0
    return MatchOperand_ParseFail;
2340
0
  }
2341
2342
0
  bool valid;
2343
0
  int64_t ShiftAmount = Parser.getTok().getIntVal(valid);
2344
0
  if (!valid)
2345
0
    return MatchOperand_ParseFail;
2346
2347
0
  if (ShiftAmount < 0) {
2348
    //Error(Parser.getTok().getLoc(), "positive shift amount required");
2349
0
    return MatchOperand_ParseFail;
2350
0
  }
2351
0
  Parser.Lex(); // Eat the number
2352
2353
0
  SMLoc E = Parser.getTok().getLoc();
2354
0
  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2355
0
                                                      S, E, getContext()));
2356
0
  return MatchOperand_Success;
2357
0
}
2358
2359
/// parseCondCodeString - Parse a Condition Code string.
2360
560
AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2361
560
  AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2362
560
                    .Case("eq", AArch64CC::EQ)
2363
560
                    .Case("ne", AArch64CC::NE)
2364
560
                    .Case("cs", AArch64CC::HS)
2365
560
                    .Case("hs", AArch64CC::HS)
2366
560
                    .Case("cc", AArch64CC::LO)
2367
560
                    .Case("lo", AArch64CC::LO)
2368
560
                    .Case("mi", AArch64CC::MI)
2369
560
                    .Case("pl", AArch64CC::PL)
2370
560
                    .Case("vs", AArch64CC::VS)
2371
560
                    .Case("vc", AArch64CC::VC)
2372
560
                    .Case("hi", AArch64CC::HI)
2373
560
                    .Case("ls", AArch64CC::LS)
2374
560
                    .Case("ge", AArch64CC::GE)
2375
560
                    .Case("lt", AArch64CC::LT)
2376
560
                    .Case("gt", AArch64CC::GT)
2377
560
                    .Case("le", AArch64CC::LE)
2378
560
                    .Case("al", AArch64CC::AL)
2379
560
                    .Case("nv", AArch64CC::NV)
2380
560
                    .Default(AArch64CC::Invalid);
2381
560
  return CC;
2382
560
}
2383
2384
/// parseCondCode - Parse a Condition Code operand.
2385
bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2386
6
                                     bool invertCondCode) {
2387
6
  MCAsmParser &Parser = getParser();
2388
6
  SMLoc S = getLoc();
2389
6
  const AsmToken &Tok = Parser.getTok();
2390
6
  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2391
2392
0
  StringRef Cond = Tok.getString();
2393
6
  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2394
6
  if (CC == AArch64CC::Invalid)
2395
    //return TokError("invalid condition code");
2396
1
    return true;
2397
5
  Parser.Lex(); // Eat identifier token.
2398
2399
5
  if (invertCondCode) {
2400
0
    if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2401
      //return TokError("condition codes AL and NV are invalid for this instruction");
2402
0
      return true;
2403
0
    CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2404
0
  }
2405
2406
5
  Operands.push_back(
2407
5
      AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2408
5
  return false;
2409
5
}
2410
2411
/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2412
/// them if present.
2413
AArch64AsmParser::OperandMatchResultTy
2414
12.8k
AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2415
12.8k
  MCAsmParser &Parser = getParser();
2416
12.8k
  const AsmToken &Tok = Parser.getTok();
2417
12.8k
  std::string LowerID = Tok.getString().lower();
2418
12.8k
  AArch64_AM::ShiftExtendType ShOp =
2419
12.8k
      StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2420
12.8k
          .Case("lsl", AArch64_AM::LSL)
2421
12.8k
          .Case("lsr", AArch64_AM::LSR)
2422
12.8k
          .Case("asr", AArch64_AM::ASR)
2423
12.8k
          .Case("ror", AArch64_AM::ROR)
2424
12.8k
          .Case("msl", AArch64_AM::MSL)
2425
12.8k
          .Case("uxtb", AArch64_AM::UXTB)
2426
12.8k
          .Case("uxth", AArch64_AM::UXTH)
2427
12.8k
          .Case("uxtw", AArch64_AM::UXTW)
2428
12.8k
          .Case("uxtx", AArch64_AM::UXTX)
2429
12.8k
          .Case("sxtb", AArch64_AM::SXTB)
2430
12.8k
          .Case("sxth", AArch64_AM::SXTH)
2431
12.8k
          .Case("sxtw", AArch64_AM::SXTW)
2432
12.8k
          .Case("sxtx", AArch64_AM::SXTX)
2433
12.8k
          .Default(AArch64_AM::InvalidShiftExtend);
2434
2435
12.8k
  if (ShOp == AArch64_AM::InvalidShiftExtend)
2436
12.7k
    return MatchOperand_NoMatch;
2437
2438
41
  SMLoc S = Tok.getLoc();
2439
41
  Parser.Lex();
2440
2441
41
  bool Hash = getLexer().is(AsmToken::Hash);
2442
41
  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2443
37
    if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2444
37
        ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2445
37
        ShOp == AArch64_AM::MSL) {
2446
      // We expect a number here.
2447
      //TokError("expected #imm after shift specifier");
2448
0
      return MatchOperand_ParseFail;
2449
0
    }
2450
2451
    // "extend" type operatoins don't need an immediate, #0 is implicit.
2452
37
    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2453
37
    Operands.push_back(
2454
37
        AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2455
37
    return MatchOperand_Success;
2456
37
  }
2457
2458
4
  if (Hash)
2459
4
    Parser.Lex(); // Eat the '#'.
2460
2461
  // Make sure we do actually have a number or a parenthesized expression.
2462
4
  SMLoc E = Parser.getTok().getLoc();
2463
4
  if (!Parser.getTok().is(AsmToken::Integer) &&
2464
4
      !Parser.getTok().is(AsmToken::LParen)) {
2465
    //Error(E, "expected integer shift amount");
2466
0
    return MatchOperand_ParseFail;
2467
0
  }
2468
2469
4
  const MCExpr *ImmVal;
2470
4
  if (getParser().parseExpression(ImmVal))
2471
0
    return MatchOperand_ParseFail;
2472
2473
4
  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2474
4
  if (!MCE) {
2475
    //Error(E, "expected constant '#imm' after shift specifier");
2476
0
    return MatchOperand_ParseFail;
2477
0
  }
2478
2479
4
  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2480
4
  Operands.push_back(AArch64Operand::CreateShiftExtend(
2481
4
      ShOp, MCE->getValue(), true, S, E, getContext()));
2482
4
  return MatchOperand_Success;
2483
4
}
2484
2485
/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2486
/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2487
bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2488
                                   OperandVector &Operands)
2489
2
{
2490
2
  if (Name.find('.') != StringRef::npos)
2491
    //return TokError("invalid operand");
2492
0
    return true;
2493
2494
2
  Mnemonic = Name;
2495
2
  Operands.push_back(
2496
2
      AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2497
2498
2
  MCAsmParser &Parser = getParser();
2499
2
  const AsmToken &Tok = Parser.getTok();
2500
2
  StringRef Op = Tok.getString();
2501
2
  SMLoc S = Tok.getLoc();
2502
2503
2
  const MCExpr *Expr = nullptr;
2504
2505
2
#define SYS_ALIAS(op1, Cn, Cm, op2)                                            \
2506
2
  do {                                                                         \
2507
1
    Expr = MCConstantExpr::create(op1, getContext());                          \
2508
1
    Operands.push_back(                                                        \
2509
1
        AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
2510
1
    Operands.push_back(                                                        \
2511
1
        AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));           \
2512
1
    Operands.push_back(                                                        \
2513
1
        AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));           \
2514
1
    Expr = MCConstantExpr::create(op2, getContext());                          \
2515
1
    Operands.push_back(                                                        \
2516
1
        AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
2517
1
  } while (0)
2518
2519
2
  if (Mnemonic == "ic") {
2520
0
    if (!Op.compare_lower("ialluis")) {
2521
      // SYS #0, C7, C1, #0
2522
0
      SYS_ALIAS(0, 7, 1, 0);
2523
0
    } else if (!Op.compare_lower("iallu")) {
2524
      // SYS #0, C7, C5, #0
2525
0
      SYS_ALIAS(0, 7, 5, 0);
2526
0
    } else if (!Op.compare_lower("ivau")) {
2527
      // SYS #3, C7, C5, #1
2528
0
      SYS_ALIAS(3, 7, 5, 1);
2529
0
    } else {
2530
      //return TokError("invalid operand for IC instruction");
2531
0
      return true;
2532
0
    }
2533
2
  } else if (Mnemonic == "dc") {
2534
2
    if (!Op.compare_lower("zva")) {
2535
      // SYS #3, C7, C4, #1
2536
0
      SYS_ALIAS(3, 7, 4, 1);
2537
2
    } else if (!Op.compare_lower("ivac")) {
2538
      // SYS #3, C7, C6, #1
2539
1
      SYS_ALIAS(0, 7, 6, 1);
2540
1
    } else if (!Op.compare_lower("isw")) {
2541
      // SYS #0, C7, C6, #2
2542
0
      SYS_ALIAS(0, 7, 6, 2);
2543
1
    } else if (!Op.compare_lower("cvac")) {
2544
      // SYS #3, C7, C10, #1
2545
0
      SYS_ALIAS(3, 7, 10, 1);
2546
1
    } else if (!Op.compare_lower("csw")) {
2547
      // SYS #0, C7, C10, #2
2548
0
      SYS_ALIAS(0, 7, 10, 2);
2549
1
    } else if (!Op.compare_lower("cvau")) {
2550
      // SYS #3, C7, C11, #1
2551
0
      SYS_ALIAS(3, 7, 11, 1);
2552
1
    } else if (!Op.compare_lower("civac")) {
2553
      // SYS #3, C7, C14, #1
2554
0
      SYS_ALIAS(3, 7, 14, 1);
2555
1
    } else if (!Op.compare_lower("cisw")) {
2556
      // SYS #0, C7, C14, #2
2557
0
      SYS_ALIAS(0, 7, 14, 2);
2558
1
    } else if (!Op.compare_lower("cvap")) {
2559
0
      if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2560
        // SYS #3, C7, C12, #1
2561
0
        SYS_ALIAS(3, 7, 12, 1);
2562
0
      } else {
2563
        //return TokError("DC CVAP requires ARMv8.2a");
2564
0
        return true;
2565
0
      }
2566
1
    } else {
2567
      //return TokError("invalid operand for DC instruction");
2568
1
      return true;
2569
1
    }
2570
2
  } else if (Mnemonic == "at") {
2571
0
    if (!Op.compare_lower("s1e1r")) {
2572
      // SYS #0, C7, C8, #0
2573
0
      SYS_ALIAS(0, 7, 8, 0);
2574
0
    } else if (!Op.compare_lower("s1e2r")) {
2575
      // SYS #4, C7, C8, #0
2576
0
      SYS_ALIAS(4, 7, 8, 0);
2577
0
    } else if (!Op.compare_lower("s1e3r")) {
2578
      // SYS #6, C7, C8, #0
2579
0
      SYS_ALIAS(6, 7, 8, 0);
2580
0
    } else if (!Op.compare_lower("s1e1w")) {
2581
      // SYS #0, C7, C8, #1
2582
0
      SYS_ALIAS(0, 7, 8, 1);
2583
0
    } else if (!Op.compare_lower("s1e2w")) {
2584
      // SYS #4, C7, C8, #1
2585
0
      SYS_ALIAS(4, 7, 8, 1);
2586
0
    } else if (!Op.compare_lower("s1e3w")) {
2587
      // SYS #6, C7, C8, #1
2588
0
      SYS_ALIAS(6, 7, 8, 1);
2589
0
    } else if (!Op.compare_lower("s1e0r")) {
2590
      // SYS #0, C7, C8, #3
2591
0
      SYS_ALIAS(0, 7, 8, 2);
2592
0
    } else if (!Op.compare_lower("s1e0w")) {
2593
      // SYS #0, C7, C8, #3
2594
0
      SYS_ALIAS(0, 7, 8, 3);
2595
0
    } else if (!Op.compare_lower("s12e1r")) {
2596
      // SYS #4, C7, C8, #4
2597
0
      SYS_ALIAS(4, 7, 8, 4);
2598
0
    } else if (!Op.compare_lower("s12e1w")) {
2599
      // SYS #4, C7, C8, #5
2600
0
      SYS_ALIAS(4, 7, 8, 5);
2601
0
    } else if (!Op.compare_lower("s12e0r")) {
2602
      // SYS #4, C7, C8, #6
2603
0
      SYS_ALIAS(4, 7, 8, 6);
2604
0
    } else if (!Op.compare_lower("s12e0w")) {
2605
      // SYS #4, C7, C8, #7
2606
0
      SYS_ALIAS(4, 7, 8, 7);
2607
0
    } else if (!Op.compare_lower("s1e1rp")) {
2608
0
      if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2609
        // SYS #0, C7, C9, #0
2610
0
        SYS_ALIAS(0, 7, 9, 0);
2611
0
      } else {
2612
        //return TokError("AT S1E1RP requires ARMv8.2a");
2613
0
        return true;
2614
0
      }
2615
0
    } else if (!Op.compare_lower("s1e1wp")) {
2616
0
      if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2617
        // SYS #0, C7, C9, #1
2618
0
        SYS_ALIAS(0, 7, 9, 1);
2619
0
      } else {
2620
        //return TokError("AT S1E1WP requires ARMv8.2a");
2621
0
        return true;
2622
0
      }
2623
0
    } else {
2624
      //return TokError("invalid operand for AT instruction");
2625
0
      return true;
2626
0
    }
2627
0
  } else if (Mnemonic == "tlbi") {
2628
0
    if (!Op.compare_lower("vmalle1is")) {
2629
      // SYS #0, C8, C3, #0
2630
0
      SYS_ALIAS(0, 8, 3, 0);
2631
0
    } else if (!Op.compare_lower("alle2is")) {
2632
      // SYS #4, C8, C3, #0
2633
0
      SYS_ALIAS(4, 8, 3, 0);
2634
0
    } else if (!Op.compare_lower("alle3is")) {
2635
      // SYS #6, C8, C3, #0
2636
0
      SYS_ALIAS(6, 8, 3, 0);
2637
0
    } else if (!Op.compare_lower("vae1is")) {
2638
      // SYS #0, C8, C3, #1
2639
0
      SYS_ALIAS(0, 8, 3, 1);
2640
0
    } else if (!Op.compare_lower("vae2is")) {
2641
      // SYS #4, C8, C3, #1
2642
0
      SYS_ALIAS(4, 8, 3, 1);
2643
0
    } else if (!Op.compare_lower("vae3is")) {
2644
      // SYS #6, C8, C3, #1
2645
0
      SYS_ALIAS(6, 8, 3, 1);
2646
0
    } else if (!Op.compare_lower("aside1is")) {
2647
      // SYS #0, C8, C3, #2
2648
0
      SYS_ALIAS(0, 8, 3, 2);
2649
0
    } else if (!Op.compare_lower("vaae1is")) {
2650
      // SYS #0, C8, C3, #3
2651
0
      SYS_ALIAS(0, 8, 3, 3);
2652
0
    } else if (!Op.compare_lower("alle1is")) {
2653
      // SYS #4, C8, C3, #4
2654
0
      SYS_ALIAS(4, 8, 3, 4);
2655
0
    } else if (!Op.compare_lower("vale1is")) {
2656
      // SYS #0, C8, C3, #5
2657
0
      SYS_ALIAS(0, 8, 3, 5);
2658
0
    } else if (!Op.compare_lower("vaale1is")) {
2659
      // SYS #0, C8, C3, #7
2660
0
      SYS_ALIAS(0, 8, 3, 7);
2661
0
    } else if (!Op.compare_lower("vmalle1")) {
2662
      // SYS #0, C8, C7, #0
2663
0
      SYS_ALIAS(0, 8, 7, 0);
2664
0
    } else if (!Op.compare_lower("alle2")) {
2665
      // SYS #4, C8, C7, #0
2666
0
      SYS_ALIAS(4, 8, 7, 0);
2667
0
    } else if (!Op.compare_lower("vale2is")) {
2668
      // SYS #4, C8, C3, #5
2669
0
      SYS_ALIAS(4, 8, 3, 5);
2670
0
    } else if (!Op.compare_lower("vale3is")) {
2671
      // SYS #6, C8, C3, #5
2672
0
      SYS_ALIAS(6, 8, 3, 5);
2673
0
    } else if (!Op.compare_lower("alle3")) {
2674
      // SYS #6, C8, C7, #0
2675
0
      SYS_ALIAS(6, 8, 7, 0);
2676
0
    } else if (!Op.compare_lower("vae1")) {
2677
      // SYS #0, C8, C7, #1
2678
0
      SYS_ALIAS(0, 8, 7, 1);
2679
0
    } else if (!Op.compare_lower("vae2")) {
2680
      // SYS #4, C8, C7, #1
2681
0
      SYS_ALIAS(4, 8, 7, 1);
2682
0
    } else if (!Op.compare_lower("vae3")) {
2683
      // SYS #6, C8, C7, #1
2684
0
      SYS_ALIAS(6, 8, 7, 1);
2685
0
    } else if (!Op.compare_lower("aside1")) {
2686
      // SYS #0, C8, C7, #2
2687
0
      SYS_ALIAS(0, 8, 7, 2);
2688
0
    } else if (!Op.compare_lower("vaae1")) {
2689
      // SYS #0, C8, C7, #3
2690
0
      SYS_ALIAS(0, 8, 7, 3);
2691
0
    } else if (!Op.compare_lower("alle1")) {
2692
      // SYS #4, C8, C7, #4
2693
0
      SYS_ALIAS(4, 8, 7, 4);
2694
0
    } else if (!Op.compare_lower("vale1")) {
2695
      // SYS #0, C8, C7, #5
2696
0
      SYS_ALIAS(0, 8, 7, 5);
2697
0
    } else if (!Op.compare_lower("vale2")) {
2698
      // SYS #4, C8, C7, #5
2699
0
      SYS_ALIAS(4, 8, 7, 5);
2700
0
    } else if (!Op.compare_lower("vale3")) {
2701
      // SYS #6, C8, C7, #5
2702
0
      SYS_ALIAS(6, 8, 7, 5);
2703
0
    } else if (!Op.compare_lower("vaale1")) {
2704
      // SYS #0, C8, C7, #7
2705
0
      SYS_ALIAS(0, 8, 7, 7);
2706
0
    } else if (!Op.compare_lower("ipas2e1")) {
2707
      // SYS #4, C8, C4, #1
2708
0
      SYS_ALIAS(4, 8, 4, 1);
2709
0
    } else if (!Op.compare_lower("ipas2le1")) {
2710
      // SYS #4, C8, C4, #5
2711
0
      SYS_ALIAS(4, 8, 4, 5);
2712
0
    } else if (!Op.compare_lower("ipas2e1is")) {
2713
      // SYS #4, C8, C4, #1
2714
0
      SYS_ALIAS(4, 8, 0, 1);
2715
0
    } else if (!Op.compare_lower("ipas2le1is")) {
2716
      // SYS #4, C8, C4, #5
2717
0
      SYS_ALIAS(4, 8, 0, 5);
2718
0
    } else if (!Op.compare_lower("vmalls12e1")) {
2719
      // SYS #4, C8, C7, #6
2720
0
      SYS_ALIAS(4, 8, 7, 6);
2721
0
    } else if (!Op.compare_lower("vmalls12e1is")) {
2722
      // SYS #4, C8, C3, #6
2723
0
      SYS_ALIAS(4, 8, 3, 6);
2724
0
    } else {
2725
      //return TokError("invalid operand for TLBI instruction");
2726
0
      return true;
2727
0
    }
2728
0
  }
2729
2730
1
#undef SYS_ALIAS
2731
2732
1
  Parser.Lex(); // Eat operand.
2733
2734
1
  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2735
1
  bool HasRegister = false;
2736
2737
  // Check for the optional register operand.
2738
1
  if (getLexer().is(AsmToken::Comma)) {
2739
0
    Parser.Lex(); // Eat comma.
2740
2741
0
    if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2742
      //return TokError("expected register operand");
2743
0
      return true;
2744
2745
0
    HasRegister = true;
2746
0
  }
2747
2748
1
  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2749
1
    Parser.eatToEndOfStatement();
2750
    //return TokError("unexpected token in argument list");
2751
1
    return true;
2752
1
  }
2753
2754
0
  if (ExpectRegister && !HasRegister) {
2755
    //return TokError("specified " + Mnemonic + " op requires a register");
2756
0
    return true;
2757
0
  }
2758
0
  else if (!ExpectRegister && HasRegister) {
2759
    //return TokError("specified " + Mnemonic + " op does not use a register");
2760
0
    return true;
2761
0
  }
2762
2763
0
  Parser.Lex(); // Consume the EndOfStatement
2764
0
  return false;
2765
0
}
2766
2767
AArch64AsmParser::OperandMatchResultTy
2768
AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands)
2769
50
{
2770
50
  MCAsmParser &Parser = getParser();
2771
50
  const AsmToken &Tok = Parser.getTok();
2772
2773
  // Can be either a #imm style literal or an option name
2774
50
  bool Hash = Tok.is(AsmToken::Hash);
2775
50
  if (Hash || Tok.is(AsmToken::Integer)) {
2776
    // Immediate operand.
2777
32
    if (Hash)
2778
32
      Parser.Lex(); // Eat the '#'
2779
32
    const MCExpr *ImmVal;
2780
32
    SMLoc ExprLoc = getLoc();
2781
32
    if (getParser().parseExpression(ImmVal))
2782
0
      return MatchOperand_ParseFail;
2783
32
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2784
32
    if (!MCE) {
2785
      //Error(ExprLoc, "immediate value expected for barrier operand");
2786
0
      return MatchOperand_ParseFail;
2787
0
    }
2788
32
    if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2789
      //Error(ExprLoc, "barrier operand out of range");
2790
0
      return MatchOperand_ParseFail;
2791
0
    }
2792
32
    bool Valid;
2793
32
    auto Mapper = AArch64DB::DBarrierMapper();
2794
32
    StringRef Name = 
2795
32
        Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2796
32
    Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2797
32
                                                      ExprLoc, getContext()));
2798
32
    return MatchOperand_Success;
2799
32
  }
2800
2801
18
  if (Tok.isNot(AsmToken::Identifier)) {
2802
    //TokError("invalid operand for instruction");
2803
2
    return MatchOperand_ParseFail;
2804
2
  }
2805
2806
16
  bool Valid;
2807
16
  auto Mapper = AArch64DB::DBarrierMapper();
2808
16
  unsigned Opt = 
2809
16
      Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2810
16
  if (!Valid) {
2811
    //TokError("invalid barrier option name");
2812
0
    return MatchOperand_ParseFail;
2813
0
  }
2814
2815
  // The only valid named option for ISB is 'sy'
2816
16
  if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2817
    //TokError("'sy' or #imm operand expected");
2818
0
    return MatchOperand_ParseFail;
2819
0
  }
2820
2821
16
  Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2822
16
                                                    getLoc(), getContext()));
2823
16
  Parser.Lex(); // Consume the option
2824
2825
16
  return MatchOperand_Success;
2826
16
}
2827
2828
AArch64AsmParser::OperandMatchResultTy
2829
AArch64AsmParser::tryParseSysReg(OperandVector &Operands)
2830
792
{
2831
792
  MCAsmParser &Parser = getParser();
2832
792
  const AsmToken &Tok = Parser.getTok();
2833
2834
792
  if (Tok.isNot(AsmToken::Identifier))
2835
462
    return MatchOperand_NoMatch;
2836
2837
330
  bool IsKnown;
2838
330
  auto MRSMapper = AArch64SysReg::MRSMapper();
2839
330
  uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
2840
330
                                         getSTI().getFeatureBits(), IsKnown);
2841
330
  assert(IsKnown == (MRSReg != -1U) &&
2842
330
         "register should be -1 if and only if it's unknown");
2843
2844
0
  auto MSRMapper = AArch64SysReg::MSRMapper();
2845
330
  uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
2846
330
                                         getSTI().getFeatureBits(), IsKnown);
2847
330
  assert(IsKnown == (MSRReg != -1U) &&
2848
330
         "register should be -1 if and only if it's unknown");
2849
2850
0
  auto PStateMapper = AArch64PState::PStateMapper();
2851
330
  uint32_t PStateField = 
2852
330
      PStateMapper.fromString(Tok.getString(),
2853
330
                              getSTI().getFeatureBits(), IsKnown);
2854
330
  assert(IsKnown == (PStateField != -1U) &&
2855
330
         "register should be -1 if and only if it's unknown");
2856
2857
0
  Operands.push_back(AArch64Operand::CreateSysReg(
2858
330
      Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2859
330
  Parser.Lex(); // Eat identifier
2860
2861
330
  return MatchOperand_Success;
2862
792
}
2863
2864
/// tryParseVectorRegister - Parse a vector register operand.
2865
// return true on error
2866
bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands)
2867
17.7k
{
2868
17.7k
  MCAsmParser &Parser = getParser();
2869
17.7k
  if (Parser.getTok().isNot(AsmToken::Identifier))
2870
0
    return true;
2871
2872
17.7k
  SMLoc S = getLoc();
2873
  // Check for a vector register specifier first.
2874
17.7k
  StringRef Kind;
2875
17.7k
  int64_t Reg = tryMatchVectorRegister(Kind, false);
2876
17.7k
  if (Reg == -1)
2877
16.3k
    return true;
2878
1.40k
  Operands.push_back(
2879
1.40k
      AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2880
  // If there was an explicit qualifier, that goes on as a literal text
2881
  // operand.
2882
1.40k
  if (!Kind.empty())
2883
907
    Operands.push_back(
2884
907
        AArch64Operand::CreateToken(Kind, false, S, getContext()));
2885
2886
  // If there is an index specifier following the register, parse that too.
2887
1.40k
  if (Parser.getTok().is(AsmToken::LBrac)) {
2888
2
    SMLoc SIdx = getLoc();
2889
2
    Parser.Lex(); // Eat left bracket token.
2890
2891
2
    const MCExpr *ImmVal;
2892
2
    if (getParser().parseExpression(ImmVal))
2893
0
      return false;
2894
2
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2895
2
    if (!MCE) {
2896
      //TokError("immediate value expected for vector index");
2897
0
      return true;
2898
0
    }
2899
2900
2
    SMLoc E = getLoc();
2901
2
    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2902
      //Error(E, "']' expected");
2903
2
      return true;
2904
2
    }
2905
2906
0
    Parser.Lex(); // Eat right bracket token.
2907
2908
0
    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2909
0
                                                         E, getContext()));
2910
0
  }
2911
2912
1.40k
  return false;
2913
1.40k
}
2914
2915
/// parseRegister - Parse a non-vector register operand.
2916
// return true on error
2917
bool AArch64AsmParser::parseRegister(OperandVector &Operands)
2918
17.7k
{
2919
17.7k
  MCAsmParser &Parser = getParser();
2920
17.7k
  SMLoc S = getLoc();
2921
  // Try for a vector register.
2922
17.7k
  if (!tryParseVectorRegister(Operands))
2923
1.40k
    return false;
2924
2925
  // Try for a scalar register.
2926
16.3k
  int64_t Reg = tryParseRegister();
2927
16.3k
  if (Reg == -1)
2928
12.8k
    return true;
2929
3.49k
  Operands.push_back(
2930
3.49k
      AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2931
2932
  // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2933
  // as a string token in the instruction itself.
2934
3.49k
  if (getLexer().getKind() == AsmToken::LBrac) {
2935
36
    SMLoc LBracS = getLoc();
2936
36
    Parser.Lex();
2937
36
    const AsmToken &Tok = Parser.getTok();
2938
36
    if (Tok.is(AsmToken::Integer)) {
2939
3
      SMLoc IntS = getLoc();
2940
3
      bool valid;
2941
3
      int64_t Val = Tok.getIntVal(valid);
2942
3
      if (!valid)
2943
0
        return true;
2944
3
      if (Val == 1) {
2945
3
        Parser.Lex();
2946
3
        if (getLexer().getKind() == AsmToken::RBrac) {
2947
0
          SMLoc RBracS = getLoc();
2948
0
          Parser.Lex();
2949
0
          Operands.push_back(
2950
0
              AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2951
0
          Operands.push_back(
2952
0
              AArch64Operand::CreateToken("1", false, IntS, getContext()));
2953
0
          Operands.push_back(
2954
0
              AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2955
0
          return false;
2956
0
        }
2957
3
      }
2958
3
    }
2959
36
  }
2960
2961
3.49k
  return false;
2962
3.49k
}
2963
2964
bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal)
2965
14.9k
{
2966
14.9k
  MCAsmParser &Parser = getParser();
2967
14.9k
  bool HasELFModifier = false;
2968
14.9k
  AArch64MCExpr::VariantKind RefKind;
2969
2970
14.9k
  if (Parser.getTok().is(AsmToken::Colon)) {
2971
11
    Parser.Lex(); // Eat ':"
2972
11
    HasELFModifier = true;
2973
2974
11
    if (Parser.getTok().isNot(AsmToken::Identifier)) {
2975
      //Error(Parser.getTok().getLoc(),
2976
      //      "expect relocation specifier in operand after ':'");
2977
0
      return true;
2978
0
    }
2979
2980
11
    std::string LowerCase = Parser.getTok().getIdentifier().lower();
2981
11
    RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2982
11
                  .Case("lo12", AArch64MCExpr::VK_LO12)
2983
11
                  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2984
11
                  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2985
11
                  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2986
11
                  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2987
11
                  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2988
11
                  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2989
11
                  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2990
11
                  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2991
11
                  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2992
11
                  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2993
11
                  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2994
11
                  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2995
11
                  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2996
11
                  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2997
11
                  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2998
11
                  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2999
11
                  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3000
11
                  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3001
11
                  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3002
11
                  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3003
11
                  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3004
11
                  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3005
11
                  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3006
11
                  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3007
11
                  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3008
11
                  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3009
11
                  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3010
11
                  .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3011
11
                  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3012
11
                  .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3013
11
                  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3014
11
                  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3015
11
                  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3016
11
                  .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3017
11
                  .Default(AArch64MCExpr::VK_INVALID);
3018
3019
11
    if (RefKind == AArch64MCExpr::VK_INVALID) {
3020
      //Error(Parser.getTok().getLoc(),
3021
      //      "expect relocation specifier in operand after ':'");
3022
10
      return true;
3023
10
    }
3024
3025
1
    Parser.Lex(); // Eat identifier
3026
3027
1
    if (Parser.getTok().isNot(AsmToken::Colon)) {
3028
      //Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3029
0
      return true;
3030
0
    }
3031
1
    Parser.Lex(); // Eat ':'
3032
1
  }
3033
3034
14.9k
  if (getParser().parseExpression(ImmVal))
3035
11.5k
    return true;
3036
3037
3.45k
  if (HasELFModifier)
3038
1
    ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3039
3040
3.45k
  return false;
3041
14.9k
}
3042
3043
/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3044
// return true on error
3045
bool AArch64AsmParser::parseVectorList(OperandVector &Operands)
3046
20
{
3047
20
  MCAsmParser &Parser = getParser();
3048
20
  assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3049
0
  SMLoc S = getLoc();
3050
20
  Parser.Lex(); // Eat left bracket token.
3051
20
  StringRef Kind;
3052
20
  int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3053
20
  if (FirstReg == -1)
3054
12
    return true;
3055
8
  int64_t PrevReg = FirstReg;
3056
8
  unsigned Count = 1;
3057
3058
8
  if (Parser.getTok().is(AsmToken::Minus)) {
3059
0
    Parser.Lex(); // Eat the minus.
3060
3061
    //SMLoc Loc = getLoc();
3062
0
    StringRef NextKind;
3063
0
    int64_t Reg = tryMatchVectorRegister(NextKind, true);
3064
0
    if (Reg == -1)
3065
0
      return true;
3066
    // Any Kind suffices must match on all regs in the list.
3067
0
    if (Kind != NextKind)
3068
      //return Error(Loc, "mismatched register size suffix");
3069
0
      return true;
3070
3071
0
    unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3072
3073
0
    if (Space == 0 || Space > 3) {
3074
      //return Error(Loc, "invalid number of vectors");
3075
0
      return true;
3076
0
    }
3077
3078
0
    Count += Space;
3079
0
  }
3080
8
  else {
3081
12
    while (Parser.getTok().is(AsmToken::Comma)) {
3082
4
      Parser.Lex(); // Eat the comma token.
3083
3084
      //SMLoc Loc = getLoc();
3085
4
      StringRef NextKind;
3086
4
      int64_t Reg = tryMatchVectorRegister(NextKind, true);
3087
4
      if (Reg == -1)
3088
0
        return true;
3089
      // Any Kind suffices must match on all regs in the list.
3090
4
      if (Kind != NextKind)
3091
        //return Error(Loc, "mismatched register size suffix");
3092
0
        return true;
3093
3094
      // Registers must be incremental (with wraparound at 31)
3095
4
      if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3096
4
          (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3097
       //return Error(Loc, "registers must be sequential");
3098
0
       return true;
3099
3100
4
      PrevReg = Reg;
3101
4
      ++Count;
3102
4
    }
3103
8
  }
3104
3105
8
  if (Parser.getTok().isNot(AsmToken::RCurly))
3106
    //return Error(getLoc(), "'}' expected");
3107
2
    return true;
3108
6
  Parser.Lex(); // Eat the '}' token.
3109
3110
6
  if (Count > 4)
3111
    //return Error(S, "invalid number of vectors");
3112
0
    return true;
3113
3114
6
  unsigned NumElements = 0;
3115
6
  char ElementKind = 0;
3116
6
  if (!Kind.empty())
3117
0
    parseValidVectorKind(Kind, NumElements, ElementKind);
3118
3119
6
  Operands.push_back(AArch64Operand::CreateVectorList(
3120
6
      FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3121
3122
  // If there is an index specifier following the list, parse that too.
3123
6
  if (Parser.getTok().is(AsmToken::LBrac)) {
3124
6
    SMLoc SIdx = getLoc();
3125
6
    Parser.Lex(); // Eat left bracket token.
3126
3127
6
    const MCExpr *ImmVal;
3128
6
    if (getParser().parseExpression(ImmVal))
3129
4
      return false;
3130
2
    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3131
2
    if (!MCE) {
3132
      //TokError("immediate value expected for vector index");
3133
2
      return false;
3134
2
    }
3135
3136
0
    SMLoc E = getLoc();
3137
0
    if (Parser.getTok().isNot(AsmToken::RBrac)) {
3138
      //Error(E, "']' expected");
3139
0
      return false;
3140
0
    }
3141
3142
0
    Parser.Lex(); // Eat right bracket token.
3143
3144
0
    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3145
0
                                                         E, getContext()));
3146
0
  }
3147
0
  return false;
3148
6
}
3149
3150
AArch64AsmParser::OperandMatchResultTy
3151
AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands)
3152
1.16k
{
3153
1.16k
  MCAsmParser &Parser = getParser();
3154
1.16k
  const AsmToken &Tok = Parser.getTok();
3155
1.16k
  if (!Tok.is(AsmToken::Identifier))
3156
1.05k
    return MatchOperand_NoMatch;
3157
3158
112
  unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3159
3160
112
  MCContext &Ctx = getContext();
3161
112
  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3162
112
  if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3163
112
    return MatchOperand_NoMatch;
3164
3165
0
  SMLoc S = getLoc();
3166
0
  Parser.Lex(); // Eat register
3167
3168
0
  if (Parser.getTok().isNot(AsmToken::Comma)) {
3169
0
    Operands.push_back(
3170
0
        AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3171
0
    return MatchOperand_Success;
3172
0
  }
3173
0
  Parser.Lex(); // Eat comma.
3174
3175
0
  if (Parser.getTok().is(AsmToken::Hash))
3176
0
    Parser.Lex(); // Eat hash
3177
3178
0
  if (Parser.getTok().isNot(AsmToken::Integer)) {
3179
    //Error(getLoc(), "index must be absent or #0");
3180
0
    return MatchOperand_ParseFail;
3181
0
  }
3182
3183
0
  const MCExpr *ImmVal;
3184
0
  if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3185
0
      cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3186
    //Error(getLoc(), "index must be absent or #0");
3187
0
    return MatchOperand_ParseFail;
3188
0
  }
3189
3190
0
  Operands.push_back(
3191
0
      AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3192
0
  return MatchOperand_Success;
3193
0
}
3194
3195
/// parseOperand - Parse a arm instruction operand.  For now this parses the
3196
/// operand regardless of the mnemonic.
3197
// return true on failure
3198
bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3199
                                  bool invertCondCode)
3200
73.8k
{
3201
73.8k
  MCAsmParser &Parser = getParser();
3202
  // Check if the current operand has a custom associated parser, if so, try to
3203
  // custom parse the operand, or fallback to the general approach.
3204
73.8k
  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3205
73.8k
  if (ResTy == MatchOperand_Success)
3206
574
    return false;
3207
  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3208
  // there was a match, but an error occurred, in which case, just return that
3209
  // the operand parsing failed.
3210
73.2k
  if (ResTy == MatchOperand_ParseFail)
3211
49
    return true;
3212
3213
  // Nothing custom, so do general case parsing.
3214
73.2k
  SMLoc S, E;
3215
73.2k
  switch (getLexer().getKind()) {
3216
13.0k
  default: {
3217
13.0k
    SMLoc S = getLoc();
3218
13.0k
    const MCExpr *Expr;
3219
13.0k
    if (parseSymbolicImmVal(Expr))
3220
      //return Error(S, "invalid operand");
3221
11.4k
      return true;
3222
3223
1.63k
    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3224
1.63k
    Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3225
1.63k
    return false;
3226
13.0k
  }
3227
40.0k
  case AsmToken::LBrac: {
3228
40.0k
    SMLoc Loc = Parser.getTok().getLoc();
3229
40.0k
    Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3230
40.0k
                                                   getContext()));
3231
40.0k
    Parser.Lex(); // Eat '['
3232
3233
    // There's no comma after a '[', so we can parse the next operand
3234
    // immediately.
3235
40.0k
    return parseOperand(Operands, false, false);
3236
13.0k
  }
3237
20
  case AsmToken::LCurly:
3238
20
    return parseVectorList(Operands);
3239
17.7k
  case AsmToken::Identifier: {
3240
    // If we're expecting a Condition Code operand, then just parse that.
3241
17.7k
    if (isCondCode)
3242
6
      return parseCondCode(Operands, invertCondCode);
3243
3244
    // If it's a register name, parse it.
3245
17.7k
    if (!parseRegister(Operands))
3246
4.89k
      return false;
3247
3248
    // This could be an optional "shift" or "extend" operand.
3249
12.8k
    OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3250
    // We can only continue if no tokens were eaten.
3251
12.8k
    if (GotShift != MatchOperand_NoMatch)
3252
41
      return GotShift;
3253
3254
    // This was not a register so parse other operands that start with an
3255
    // identifier (like labels) as expressions and create them as immediates.
3256
12.7k
    const MCExpr *IdVal;
3257
12.7k
    S = getLoc();
3258
12.7k
    if (getParser().parseExpression(IdVal))
3259
1.50k
      return true;
3260
3261
11.2k
    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3262
11.2k
    Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3263
11.2k
    return false;
3264
12.7k
  }
3265
1.58k
  case AsmToken::Integer:
3266
1.63k
  case AsmToken::Real:
3267
1.83k
  case AsmToken::Hash: {
3268
    // #42 -> immediate.
3269
1.83k
    S = getLoc();
3270
1.83k
    if (getLexer().is(AsmToken::Hash))
3271
202
      Parser.Lex();
3272
3273
    // Parse a negative sign
3274
1.83k
    bool isNegative = false;
3275
1.83k
    if (Parser.getTok().is(AsmToken::Minus)) {
3276
51
      isNegative = true;
3277
      // We need to consume this token only when we have a Real, otherwise
3278
      // we let parseSymbolicImmVal take care of it
3279
51
      if (Parser.getLexer().peekTok().is(AsmToken::Real))
3280
0
        Parser.Lex();
3281
51
    }
3282
3283
    // The only Real that should come through here is a literal #0.0 for
3284
    // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3285
    // so convert the value.
3286
1.83k
    const AsmToken &Tok = Parser.getTok();
3287
1.83k
    if (Tok.is(AsmToken::Real)) {
3288
51
      APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3289
51
      if (RealVal.bitcastToAPInt().getActiveBits() > 64)
3290
0
          return true;
3291
51
      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3292
51
      if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3293
51
          Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3294
51
          Mnemonic != "fcmlt")
3295
        //return TokError("unexpected floating point literal");
3296
39
        return true;
3297
12
      else if (IntVal != 0 || isNegative)
3298
        //return TokError("expected floating-point constant #0.0");
3299
0
        return true;
3300
12
      Parser.Lex(); // Eat the token.
3301
3302
12
      Operands.push_back(
3303
12
          AArch64Operand::CreateToken("#0", false, S, getContext()));
3304
12
      Operands.push_back(
3305
12
          AArch64Operand::CreateToken(".0", false, S, getContext()));
3306
12
      return false;
3307
51
    }
3308
3309
1.78k
    const MCExpr *ImmVal;
3310
1.78k
    if (parseSymbolicImmVal(ImmVal))
3311
116
      return true;
3312
3313
1.66k
    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3314
1.66k
    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3315
1.66k
    return false;
3316
1.78k
  }
3317
536
  case AsmToken::Equal: {
3318
536
    SMLoc Loc = Parser.getTok().getLoc();
3319
536
    if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3320
      //return Error(Loc, "unexpected token in operand");
3321
5
      return true;
3322
531
    Parser.Lex(); // Eat '='
3323
531
    const MCExpr *SubExprVal;
3324
531
    if (getParser().parseExpression(SubExprVal))
3325
6
      return true;
3326
3327
525
    if (Operands.size() < 2 ||
3328
525
        !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3329
      //return Error(Loc, "Only valid when first operand is register");
3330
0
      return true;
3331
3332
525
    bool IsXReg =
3333
525
        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3334
525
            Operands[1]->getReg());
3335
3336
525
    MCContext& Ctx = getContext();
3337
525
    E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3338
    // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3339
525
    if (isa<MCConstantExpr>(SubExprVal)) {
3340
441
      uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3341
441
      uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3342
710
      while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3343
269
        ShiftAmt += 16;
3344
269
        Imm >>= 16;
3345
269
      }
3346
441
      if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3347
278
          Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3348
278
          Operands.push_back(AArch64Operand::CreateImm(
3349
278
                     MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3350
278
        if (ShiftAmt)
3351
88
          Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3352
88
                     ShiftAmt, true, S, E, Ctx));
3353
278
        return false;
3354
278
      }
3355
163
      APInt Simm = APInt(64, Imm << ShiftAmt);
3356
      // check if the immediate is an unsigned or signed 32-bit int for W regs
3357
163
      if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3358
        //return Error(Loc, "Immediate too large for register");
3359
4
        return true;
3360
163
    }
3361
    // If it is a label or an imm that cannot fit in a movz, put it into CP.
3362
243
    const MCExpr *CPLoc =
3363
243
        getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3364
243
    Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3365
243
    return false;
3366
525
  }
3367
73.2k
  }
3368
73.2k
}
3369
3370
/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3371
/// operands.
3372
// return true on error
3373
bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3374
                                        StringRef Name, SMLoc NameLoc,
3375
                                        OperandVector &Operands, unsigned int &ErrorCode)
3376
15.7k
{
3377
15.7k
  MCAsmParser &Parser = getParser();
3378
15.7k
  Name = StringSwitch<StringRef>(Name.lower())
3379
15.7k
             .Case("beq", "b.eq")
3380
15.7k
             .Case("bne", "b.ne")
3381
15.7k
             .Case("bhs", "b.hs")
3382
15.7k
             .Case("bcs", "b.cs")
3383
15.7k
             .Case("blo", "b.lo")
3384
15.7k
             .Case("bcc", "b.cc")
3385
15.7k
             .Case("bmi", "b.mi")
3386
15.7k
             .Case("bpl", "b.pl")
3387
15.7k
             .Case("bvs", "b.vs")
3388
15.7k
             .Case("bvc", "b.vc")
3389
15.7k
             .Case("bhi", "b.hi")
3390
15.7k
             .Case("bls", "b.ls")
3391
15.7k
             .Case("bge", "b.ge")
3392
15.7k
             .Case("blt", "b.lt")
3393
15.7k
             .Case("bgt", "b.gt")
3394
15.7k
             .Case("ble", "b.le")
3395
15.7k
             .Case("bal", "b.al")
3396
15.7k
             .Case("bnv", "b.nv")
3397
15.7k
             .Default(Name);
3398
3399
  // First check for the AArch64-specific .req directive.
3400
15.7k
  if (Parser.getTok().is(AsmToken::Identifier) &&
3401
15.7k
      Parser.getTok().getIdentifier() == ".req") {
3402
0
    parseDirectiveReq(Name, NameLoc);
3403
    // We always return 'error' for this, as we're done with this
3404
    // statement and don't need to match the 'instruction."
3405
0
    return true;
3406
0
  }
3407
3408
  // Create the leading tokens for the mnemonic, split by '.' characters.
3409
15.7k
  size_t Start = 0, Next = Name.find('.');
3410
15.7k
  StringRef Head = Name.slice(Start, Next);
3411
3412
  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3413
15.7k
  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3414
2
    bool IsError = parseSysAlias(Head, NameLoc, Operands);
3415
2
    if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3416
2
      Parser.eatToEndOfStatement();
3417
2
    return IsError;
3418
2
  }
3419
3420
15.7k
  Operands.push_back(
3421
15.7k
      AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3422
15.7k
  Mnemonic = Head;
3423
3424
  // Handle condition codes for a branch mnemonic
3425
15.7k
  if (Head == "b" && Next != StringRef::npos) {
3426
554
    Start = Next;
3427
554
    Next = Name.find('.', Start + 1);
3428
554
    Head = Name.slice(Start + 1, Next);
3429
3430
554
    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3431
554
                                            (Head.data() - Name.data()));
3432
554
    AArch64CC::CondCode CC = parseCondCodeString(Head);
3433
554
    if (CC == AArch64CC::Invalid)
3434
      //return Error(SuffixLoc, "invalid condition code");
3435
15
      return true;
3436
539
    Operands.push_back(
3437
539
        AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3438
539
    Operands.push_back(
3439
539
        AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3440
539
  }
3441
3442
  // Add the remaining tokens in the mnemonic.
3443
30.6k
  while (Next != StringRef::npos) {
3444
14.8k
    Start = Next;
3445
14.8k
    Next = Name.find('.', Start + 1);
3446
14.8k
    Head = Name.slice(Start, Next);
3447
14.8k
    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3448
14.8k
                                            (Head.data() - Name.data()) + 1);
3449
14.8k
    Operands.push_back(
3450
14.8k
        AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3451
14.8k
  }
3452
3453
  // Conditional compare instructions have a Condition Code operand, which needs
3454
  // to be parsed and an immediate operand created.
3455
15.7k
  bool condCodeFourthOperand =
3456
15.7k
      (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3457
15.7k
       Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3458
15.7k
       Head == "csinc" || Head == "csinv" || Head == "csneg");
3459
3460
  // These instructions are aliases to some of the conditional select
3461
  // instructions. However, the condition code is inverted in the aliased
3462
  // instruction.
3463
  //
3464
  // FIXME: Is this the correct way to handle these? Or should the parser
3465
  //        generate the aliased instructions directly?
3466
15.7k
  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3467
15.7k
  bool condCodeThirdOperand =
3468
15.7k
      (Head == "cinc" || Head == "cinv" || Head == "cneg");
3469
3470
  // Read the remaining operands.
3471
15.7k
  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3472
    // Read the first operand.
3473
15.1k
    if (parseOperand(Operands, false, false)) {
3474
12.7k
      Parser.eatToEndOfStatement();
3475
12.7k
      ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3476
12.7k
      return true;
3477
12.7k
    }
3478
3479
2.41k
    unsigned N = 2;
3480
20.6k
    while (getLexer().is(AsmToken::Comma)) {
3481
18.5k
      Parser.Lex(); // Eat the comma.
3482
3483
      // Parse and remember the operand.
3484
18.5k
      if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3485
18.5k
                                     (N == 3 && condCodeThirdOperand) ||
3486
18.5k
                                     (N == 2 && condCodeSecondOperand),
3487
18.5k
                       condCodeSecondOperand || condCodeThirdOperand)) {
3488
358
        Parser.eatToEndOfStatement();
3489
358
        ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3490
358
        return true;
3491
358
      }
3492
3493
      // After successfully parsing some operands there are two special cases to
3494
      // consider (i.e. notional operands not separated by commas). Both are due
3495
      // to memory specifiers:
3496
      //  + An RBrac will end an address for load/store/prefetch
3497
      //  + An '!' will indicate a pre-indexed operation.
3498
      //
3499
      // It's someone else's responsibility to make sure these tokens are sane
3500
      // in the given context!
3501
18.2k
      if (Parser.getTok().is(AsmToken::RBrac)) {
3502
50
        SMLoc Loc = Parser.getTok().getLoc();
3503
50
        Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3504
50
                                                       getContext()));
3505
50
        Parser.Lex();
3506
50
      }
3507
3508
18.2k
      if (Parser.getTok().is(AsmToken::Exclaim)) {
3509
172
        SMLoc Loc = Parser.getTok().getLoc();
3510
172
        Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3511
172
                                                       getContext()));
3512
172
        Parser.Lex();
3513
172
      }
3514
3515
18.2k
      ++N;
3516
18.2k
    }
3517
2.41k
  }
3518
3519
2.60k
  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3520
    //SMLoc Loc = Parser.getTok().getLoc();
3521
298
    Parser.eatToEndOfStatement();
3522
    //return Error(Loc, "unexpected token in argument list");
3523
298
    return true;
3524
298
  }
3525
3526
2.31k
  Parser.Lex(); // Consume the EndOfStatement
3527
2.31k
  return false;
3528
2.60k
}
3529
3530
// FIXME: This entire function is a giant hack to provide us with decent
3531
// operand range validation/diagnostics until TableGen/MC can be extended
3532
// to support autogeneration of this kind of validation.
3533
bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3534
                                         SmallVectorImpl<SMLoc> &Loc)
3535
1.96k
{
3536
1.96k
  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3537
  // Check for indexed addressing modes w/ the base register being the
3538
  // same as a destination/source register or pair load where
3539
  // the Rt == Rt2. All of those are undefined behaviour.
3540
1.96k
  switch (Inst.getOpcode()) {
3541
0
  case AArch64::LDPSWpre:
3542
0
  case AArch64::LDPWpost:
3543
0
  case AArch64::LDPWpre:
3544
0
  case AArch64::LDPXpost:
3545
0
  case AArch64::LDPXpre: {
3546
0
    unsigned Rt = Inst.getOperand(1).getReg();
3547
0
    unsigned Rt2 = Inst.getOperand(2).getReg();
3548
0
    unsigned Rn = Inst.getOperand(3).getReg();
3549
0
    if (RI->isSubRegisterEq(Rn, Rt))
3550
      //return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3551
      //                     "is also a destination");
3552
0
      return true;
3553
0
    if (RI->isSubRegisterEq(Rn, Rt2))
3554
      //return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3555
      //                     "is also a destination");
3556
0
      return true;
3557
    // FALLTHROUGH
3558
0
  }
3559
0
  case AArch64::LDPDi:
3560
0
  case AArch64::LDPQi:
3561
0
  case AArch64::LDPSi:
3562
0
  case AArch64::LDPSWi:
3563
0
  case AArch64::LDPWi:
3564
0
  case AArch64::LDPXi: {
3565
0
    unsigned Rt = Inst.getOperand(0).getReg();
3566
0
    unsigned Rt2 = Inst.getOperand(1).getReg();
3567
0
    if (Rt == Rt2)
3568
      //return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3569
0
      return true;
3570
0
    break;
3571
0
  }
3572
0
  case AArch64::LDPDpost:
3573
0
  case AArch64::LDPDpre:
3574
0
  case AArch64::LDPQpost:
3575
0
  case AArch64::LDPQpre:
3576
0
  case AArch64::LDPSpost:
3577
0
  case AArch64::LDPSpre:
3578
0
  case AArch64::LDPSWpost: {
3579
0
    unsigned Rt = Inst.getOperand(1).getReg();
3580
0
    unsigned Rt2 = Inst.getOperand(2).getReg();
3581
0
    if (Rt == Rt2)
3582
      //return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3583
0
      return true;
3584
0
    break;
3585
0
  }
3586
0
  case AArch64::STPDpost:
3587
0
  case AArch64::STPDpre:
3588
0
  case AArch64::STPQpost:
3589
0
  case AArch64::STPQpre:
3590
0
  case AArch64::STPSpost:
3591
0
  case AArch64::STPSpre:
3592
0
  case AArch64::STPWpost:
3593
0
  case AArch64::STPWpre:
3594
0
  case AArch64::STPXpost:
3595
0
  case AArch64::STPXpre: {
3596
0
    unsigned Rt = Inst.getOperand(1).getReg();
3597
0
    unsigned Rt2 = Inst.getOperand(2).getReg();
3598
0
    unsigned Rn = Inst.getOperand(3).getReg();
3599
0
    if (RI->isSubRegisterEq(Rn, Rt))
3600
      //return Error(Loc[0], "unpredictable STP instruction, writeback base "
3601
      //                     "is also a source");
3602
0
      return true;
3603
0
    if (RI->isSubRegisterEq(Rn, Rt2))
3604
      //return Error(Loc[1], "unpredictable STP instruction, writeback base "
3605
      //                     "is also a source");
3606
0
      return true;
3607
0
    break;
3608
0
  }
3609
0
  case AArch64::LDRBBpre:
3610
0
  case AArch64::LDRBpre:
3611
0
  case AArch64::LDRHHpre:
3612
0
  case AArch64::LDRHpre:
3613
0
  case AArch64::LDRSBWpre:
3614
0
  case AArch64::LDRSBXpre:
3615
0
  case AArch64::LDRSHWpre:
3616
0
  case AArch64::LDRSHXpre:
3617
0
  case AArch64::LDRSWpre:
3618
0
  case AArch64::LDRWpre:
3619
0
  case AArch64::LDRXpre:
3620
0
  case AArch64::LDRBBpost:
3621
0
  case AArch64::LDRBpost:
3622
0
  case AArch64::LDRHHpost:
3623
0
  case AArch64::LDRHpost:
3624
0
  case AArch64::LDRSBWpost:
3625
0
  case AArch64::LDRSBXpost:
3626
0
  case AArch64::LDRSHWpost:
3627
0
  case AArch64::LDRSHXpost:
3628
0
  case AArch64::LDRSWpost:
3629
0
  case AArch64::LDRWpost:
3630
0
  case AArch64::LDRXpost: {
3631
0
    unsigned Rt = Inst.getOperand(1).getReg();
3632
0
    unsigned Rn = Inst.getOperand(2).getReg();
3633
0
    if (RI->isSubRegisterEq(Rn, Rt))
3634
      //return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3635
      //                     "is also a source");
3636
0
      return true;
3637
0
    break;
3638
0
  }
3639
0
  case AArch64::STRBBpost:
3640
0
  case AArch64::STRBpost:
3641
0
  case AArch64::STRHHpost:
3642
0
  case AArch64::STRHpost:
3643
0
  case AArch64::STRWpost:
3644
0
  case AArch64::STRXpost:
3645
0
  case AArch64::STRBBpre:
3646
0
  case AArch64::STRBpre:
3647
0
  case AArch64::STRHHpre:
3648
0
  case AArch64::STRHpre:
3649
0
  case AArch64::STRWpre:
3650
0
  case AArch64::STRXpre: {
3651
0
    unsigned Rt = Inst.getOperand(1).getReg();
3652
0
    unsigned Rn = Inst.getOperand(2).getReg();
3653
0
    if (RI->isSubRegisterEq(Rn, Rt))
3654
      //return Error(Loc[0], "unpredictable STR instruction, writeback base "
3655
      //                     "is also a source");
3656
0
      return true;
3657
0
    break;
3658
0
  }
3659
1.96k
  }
3660
3661
  // Now check immediate ranges. Separate from the above as there is overlap
3662
  // in the instructions being checked and this keeps the nested conditionals
3663
  // to a minimum.
3664
1.96k
  switch (Inst.getOpcode()) {
3665
0
  case AArch64::ADDSWri:
3666
2
  case AArch64::ADDSXri:
3667
2
  case AArch64::ADDWri:
3668
129
  case AArch64::ADDXri:
3669
129
  case AArch64::SUBSWri:
3670
129
  case AArch64::SUBSXri:
3671
129
  case AArch64::SUBWri:
3672
134
  case AArch64::SUBXri: {
3673
    // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3674
    // some slight duplication here.
3675
134
    if (Inst.getOperand(2).isExpr()) {
3676
0
      const MCExpr *Expr = Inst.getOperand(2).getExpr();
3677
0
      AArch64MCExpr::VariantKind ELFRefKind;
3678
0
      MCSymbolRefExpr::VariantKind DarwinRefKind;
3679
0
      int64_t Addend;
3680
0
      if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3681
        //return Error(Loc[2], "invalid immediate expression");
3682
0
        return true;
3683
0
      }
3684
3685
      // Only allow these with ADDXri.
3686
0
      if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3687
0
          DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3688
0
          Inst.getOpcode() == AArch64::ADDXri)
3689
0
        return false;
3690
3691
      // Only allow these with ADDXri/ADDWri
3692
0
      if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3693
0
          ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3694
0
          ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3695
0
          ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3696
0
          ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3697
0
          ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3698
0
          ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3699
0
          ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3700
0
          (Inst.getOpcode() == AArch64::ADDXri ||
3701
0
          Inst.getOpcode() == AArch64::ADDWri))
3702
0
        return false;
3703
3704
      // Don't allow expressions in the immediate field otherwise
3705
      //return Error(Loc[2], "invalid immediate expression");
3706
0
      return true;
3707
0
    }
3708
134
    return false;
3709
134
  }
3710
1.83k
  default:
3711
1.83k
    return false;
3712
1.96k
  }
3713
1.96k
}
3714
3715
bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode)
3716
337
{
3717
337
  switch (ErrCode) {
3718
0
  case Match_MissingFeature:
3719
    //return Error(Loc,
3720
    //             "instruction requires a CPU feature not currently enabled");
3721
0
    return true;
3722
92
  case Match_InvalidOperand:
3723
    //return Error(Loc, "invalid operand for instruction");
3724
92
    return true;
3725
11
  case Match_InvalidSuffix:
3726
    //return Error(Loc, "invalid type suffix for instruction");
3727
11
    return true;
3728
1
  case Match_InvalidCondCode:
3729
    //return Error(Loc, "expected AArch64 condition code");
3730
1
    return true;
3731
0
  case Match_AddSubRegExtendSmall:
3732
    //return Error(Loc,
3733
    //  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3734
0
    return true;
3735
0
  case Match_AddSubRegExtendLarge:
3736
    //return Error(Loc,
3737
    //  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3738
0
    return true;
3739
0
  case Match_AddSubSecondSource:
3740
    //return Error(Loc,
3741
    //  "expected compatible register, symbol or integer in range [0, 4095]");
3742
0
    return true;
3743
1
  case Match_LogicalSecondSource:
3744
    //return Error(Loc, "expected compatible register or logical immediate");
3745
1
    return true;
3746
0
  case Match_InvalidMovImm32Shift:
3747
    //return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3748
0
    return true;
3749
0
  case Match_InvalidMovImm64Shift:
3750
    //return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3751
0
    return true;
3752
0
  case Match_AddSubRegShift32:
3753
    //return Error(Loc,
3754
    //   "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3755
0
    return true;
3756
0
  case Match_AddSubRegShift64:
3757
    //return Error(Loc,
3758
    //   "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3759
0
    return true;
3760
0
  case Match_InvalidFPImm:
3761
    //return Error(Loc,
3762
    //             "expected compatible register or floating-point constant");
3763
0
    return true;
3764
0
  case Match_InvalidMemoryIndexedSImm9:
3765
    //return Error(Loc, "index must be an integer in range [-256, 255].");
3766
0
    return true;
3767
0
  case Match_InvalidMemoryIndexed4SImm7:
3768
    //return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3769
0
    return true;
3770
0
  case Match_InvalidMemoryIndexed8SImm7:
3771
    //return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3772
0
    return true;
3773
0
  case Match_InvalidMemoryIndexed16SImm7:
3774
    //return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3775
0
    return true;
3776
0
  case Match_InvalidMemoryWExtend8:
3777
    //return Error(Loc,
3778
    //             "expected 'uxtw' or 'sxtw' with optional shift of #0");
3779
0
    return true;
3780
0
  case Match_InvalidMemoryWExtend16:
3781
    //return Error(Loc,
3782
    //             "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3783
0
    return true;
3784
0
  case Match_InvalidMemoryWExtend32:
3785
    //return Error(Loc,
3786
    //             "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3787
0
    return true;
3788
0
  case Match_InvalidMemoryWExtend64:
3789
    //return Error(Loc,
3790
    //             "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3791
0
    return true;
3792
0
  case Match_InvalidMemoryWExtend128:
3793
    //return Error(Loc,
3794
    //             "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3795
0
    return true;
3796
0
  case Match_InvalidMemoryXExtend8:
3797
    //return Error(Loc,
3798
    //             "expected 'lsl' or 'sxtx' with optional shift of #0");
3799
0
    return true;
3800
0
  case Match_InvalidMemoryXExtend16:
3801
    //return Error(Loc,
3802
    //             "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3803
0
    return true;
3804
0
  case Match_InvalidMemoryXExtend32:
3805
    //return Error(Loc,
3806
    //             "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3807
0
    return true;
3808
0
  case Match_InvalidMemoryXExtend64:
3809
    //return Error(Loc,
3810
    //             "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3811
0
    return true;
3812
0
  case Match_InvalidMemoryXExtend128:
3813
    //return Error(Loc,
3814
    //             "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3815
0
    return true;
3816
0
  case Match_InvalidMemoryIndexed1:
3817
    //return Error(Loc, "index must be an integer in range [0, 4095].");
3818
0
    return true;
3819
0
  case Match_InvalidMemoryIndexed2:
3820
    //return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3821
0
    return true;
3822
0
  case Match_InvalidMemoryIndexed4:
3823
    //return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3824
0
    return true;
3825
0
  case Match_InvalidMemoryIndexed8:
3826
    //return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3827
0
    return true;
3828
0
  case Match_InvalidMemoryIndexed16:
3829
    //return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3830
0
    return true;
3831
0
  case Match_InvalidImm0_1:
3832
    //return Error(Loc, "immediate must be an integer in range [0, 1].");
3833
0
    return true;
3834
4
  case Match_InvalidImm0_7:
3835
    //return Error(Loc, "immediate must be an integer in range [0, 7].");
3836
4
    return true;
3837
0
  case Match_InvalidImm0_15:
3838
    //return Error(Loc, "immediate must be an integer in range [0, 15].");
3839
0
    return true;
3840
0
  case Match_InvalidImm0_31:
3841
    //return Error(Loc, "immediate must be an integer in range [0, 31].");
3842
0
    return true;
3843
0
  case Match_InvalidImm0_63:
3844
    //return Error(Loc, "immediate must be an integer in range [0, 63].");
3845
0
    return true;
3846
0
  case Match_InvalidImm0_127:
3847
    //return Error(Loc, "immediate must be an integer in range [0, 127].");
3848
0
    return true;
3849
1
  case Match_InvalidImm0_65535:
3850
    //return Error(Loc, "immediate must be an integer in range [0, 65535].");
3851
1
    return true;
3852
0
  case Match_InvalidImm1_8:
3853
    //return Error(Loc, "immediate must be an integer in range [1, 8].");
3854
0
    return true;
3855
0
  case Match_InvalidImm1_16:
3856
    //return Error(Loc, "immediate must be an integer in range [1, 16].");
3857
0
    return true;
3858
0
  case Match_InvalidImm1_32:
3859
    //return Error(Loc, "immediate must be an integer in range [1, 32].");
3860
0
    return true;
3861
0
  case Match_InvalidImm1_64:
3862
    //return Error(Loc, "immediate must be an integer in range [1, 64].");
3863
0
    return true;
3864
0
  case Match_InvalidIndex1:
3865
    //return Error(Loc, "expected lane specifier '[1]'");
3866
0
    return true;
3867
0
  case Match_InvalidIndexB:
3868
    //return Error(Loc, "vector lane must be an integer in range [0, 15].");
3869
0
    return true;
3870
0
  case Match_InvalidIndexH:
3871
    //return Error(Loc, "vector lane must be an integer in range [0, 7].");
3872
0
    return true;
3873
0
  case Match_InvalidIndexS:
3874
    //return Error(Loc, "vector lane must be an integer in range [0, 3].");
3875
0
    return true;
3876
0
  case Match_InvalidIndexD:
3877
    //return Error(Loc, "vector lane must be an integer in range [0, 1].");
3878
0
    return true;
3879
1
  case Match_InvalidLabel:
3880
    //return Error(Loc, "expected label or encodable integer pc offset");
3881
1
    return true;
3882
0
  case Match_MRS:
3883
    //return Error(Loc, "expected readable system register");
3884
0
    return true;
3885
0
  case Match_MSR:
3886
    //return Error(Loc, "expected writable system register or pstate");
3887
0
    return true;
3888
226
  case Match_MnemonicFail:
3889
    //return Error(Loc, "unrecognized instruction mnemonic");
3890
226
    return true;
3891
0
  default:
3892
0
    llvm_unreachable("unexpected error code!");
3893
337
  }
3894
337
}
3895
3896
static const char *getSubtargetFeatureName(uint64_t Val);
3897
3898
// return True on error
3899
bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3900
                                               OperandVector &Operands,
3901
                                               MCStreamer &Out,
3902
                                               uint64_t &ErrorInfo,
3903
                                               bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address)
3904
2.31k
{
3905
2.31k
  assert(!Operands.empty() && "Unexpect empty operand list!");
3906
0
  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3907
2.31k
  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3908
3909
0
  StringRef Tok = Op.getToken();
3910
2.31k
  unsigned NumOperands = Operands.size();
3911
3912
2.31k
  if (NumOperands == 4 && Tok == "lsl") {
3913
0
    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3914
0
    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3915
0
    if (Op2.isReg() && Op3.isImm()) {
3916
0
      const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3917
0
      if (Op3CE) {
3918
0
        uint64_t Op3Val = Op3CE->getValue();
3919
0
        uint64_t NewOp3Val = 0;
3920
0
        uint64_t NewOp4Val = 0;
3921
0
        if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3922
0
                Op2.getReg())) {
3923
0
          NewOp3Val = (32 - Op3Val) & 0x1f;
3924
0
          NewOp4Val = 31 - Op3Val;
3925
0
        } else {
3926
0
          NewOp3Val = (64 - Op3Val) & 0x3f;
3927
0
          NewOp4Val = 63 - Op3Val;
3928
0
        }
3929
3930
0
        const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3931
0
        const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3932
3933
0
        Operands[0] = AArch64Operand::CreateToken(
3934
0
            "ubfm", false, Op.getStartLoc(), getContext());
3935
0
        Operands.push_back(AArch64Operand::CreateImm(
3936
0
            NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3937
0
        Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3938
0
                                                Op3.getEndLoc(), getContext());
3939
0
      }
3940
0
    }
3941
2.31k
  } else if (NumOperands == 4 && Tok == "bfc") {
3942
    // FIXME: Horrible hack to handle BFC->BFM alias.
3943
1
    AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3944
1
    AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3945
1
    AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3946
3947
1
    if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3948
0
      const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3949
0
      const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3950
3951
0
      if (LSBCE && WidthCE) {
3952
0
        uint64_t LSB = LSBCE->getValue();
3953
0
        uint64_t Width = WidthCE->getValue();
3954
3955
0
        uint64_t RegWidth = 0;
3956
0
        if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3957
0
                Op1.getReg()))
3958
0
          RegWidth = 64;
3959
0
        else
3960
0
          RegWidth = 32;
3961
3962
0
        if (LSB >= RegWidth) {
3963
          //return Error(LSBOp.getStartLoc(),
3964
          //             "expected integer in range [0, 31]");
3965
0
          ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3966
0
          return true;
3967
0
        }
3968
0
        if (Width < 1 || Width > RegWidth) {
3969
          //return Error(WidthOp.getStartLoc(),
3970
          //             "expected integer in range [1, 32]");
3971
0
          ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3972
0
          return true;
3973
0
        }
3974
3975
0
        uint64_t ImmR = 0;
3976
0
        if (RegWidth == 32)
3977
0
          ImmR = (32 - LSB) & 0x1f;
3978
0
        else
3979
0
          ImmR = (64 - LSB) & 0x3f;
3980
3981
0
        uint64_t ImmS = Width - 1;
3982
3983
0
        if (ImmR != 0 && ImmS >= ImmR) {
3984
          //return Error(WidthOp.getStartLoc(),
3985
          //             "requested insert overflows register");
3986
0
          ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3987
0
          return true;
3988
0
        }
3989
3990
0
        const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3991
0
        const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3992
0
        Operands[0] = AArch64Operand::CreateToken(
3993
0
              "bfm", false, Op.getStartLoc(), getContext());
3994
0
        Operands[2] = AArch64Operand::CreateReg(
3995
0
            RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3996
0
            SMLoc(), getContext());
3997
0
        Operands[3] = AArch64Operand::CreateImm(
3998
0
            ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3999
0
        Operands.emplace_back(
4000
0
            AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4001
0
                                      WidthOp.getEndLoc(), getContext()));
4002
0
      }
4003
0
    }
4004
2.30k
  } else if (NumOperands == 5) {
4005
    // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4006
    // UBFIZ -> UBFM aliases.
4007
10
    if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4008
1
      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4009
1
      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4010
1
      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4011
4012
1
      if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
4013
0
        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4014
0
        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4015
4016
0
        if (Op3CE && Op4CE) {
4017
0
          uint64_t Op3Val = Op3CE->getValue();
4018
0
          uint64_t Op4Val = Op4CE->getValue();
4019
4020
0
          uint64_t RegWidth = 0;
4021
0
          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4022
0
                  Op1.getReg()))
4023
0
            RegWidth = 64;
4024
0
          else
4025
0
            RegWidth = 32;
4026
4027
0
          if (Op3Val >= RegWidth) {
4028
            //return Error(Op3.getStartLoc(),
4029
            //             "expected integer in range [0, 31]");
4030
0
            ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4031
0
            return true;
4032
0
          }
4033
0
          if (Op4Val < 1 || Op4Val > RegWidth) {
4034
            //return Error(Op4.getStartLoc(),
4035
            //             "expected integer in range [1, 32]");
4036
0
            ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4037
0
            return true;
4038
0
          }
4039
4040
0
          uint64_t NewOp3Val = 0;
4041
0
          if (RegWidth == 32)
4042
0
            NewOp3Val = (32 - Op3Val) & 0x1f;
4043
0
          else
4044
0
            NewOp3Val = (64 - Op3Val) & 0x3f;
4045
4046
0
          uint64_t NewOp4Val = Op4Val - 1;
4047
4048
0
          if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) {
4049
            //return Error(Op4.getStartLoc(),
4050
            //             "requested insert overflows register");
4051
0
            ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4052
0
            return true;
4053
0
          }
4054
4055
0
          const MCExpr *NewOp3 =
4056
0
              MCConstantExpr::create(NewOp3Val, getContext());
4057
0
          const MCExpr *NewOp4 =
4058
0
              MCConstantExpr::create(NewOp4Val, getContext());
4059
0
          Operands[3] = AArch64Operand::CreateImm(
4060
0
              NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4061
0
          Operands[4] = AArch64Operand::CreateImm(
4062
0
              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4063
0
          if (Tok == "bfi")
4064
0
            Operands[0] = AArch64Operand::CreateToken(
4065
0
                "bfm", false, Op.getStartLoc(), getContext());
4066
0
          else if (Tok == "sbfiz")
4067
0
            Operands[0] = AArch64Operand::CreateToken(
4068
0
                "sbfm", false, Op.getStartLoc(), getContext());
4069
0
          else if (Tok == "ubfiz")
4070
0
            Operands[0] = AArch64Operand::CreateToken(
4071
0
                "ubfm", false, Op.getStartLoc(), getContext());
4072
0
          else
4073
0
            llvm_unreachable("No valid mnemonic for alias?");
4074
0
        }
4075
0
      }
4076
4077
      // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4078
      // UBFX -> UBFM aliases.
4079
9
    } else if (NumOperands == 5 &&
4080
9
               (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4081
0
      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4082
0
      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4083
0
      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4084
4085
0
      if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
4086
0
        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4087
0
        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4088
4089
0
        if (Op3CE && Op4CE) {
4090
0
          uint64_t Op3Val = Op3CE->getValue();
4091
0
          uint64_t Op4Val = Op4CE->getValue();
4092
4093
0
          uint64_t RegWidth = 0;
4094
0
          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4095
0
                  Op1.getReg()))
4096
0
            RegWidth = 64;
4097
0
          else
4098
0
            RegWidth = 32;
4099
4100
0
          if (Op3Val >= RegWidth) {
4101
          // TODO: save ErrorCode
4102
            //return Error(Op3.getStartLoc(),
4103
            //             "expected integer in range [0, 31]");
4104
0
            ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4105
0
            return true;
4106
0
          }
4107
0
          if (Op4Val < 1 || Op4Val > RegWidth) {
4108
          // TODO: save ErrorCode
4109
            //return Error(Op4.getStartLoc(),
4110
            //             "expected integer in range [1, 32]");
4111
0
            ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4112
0
            return true;
4113
0
          }
4114
4115
0
          uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4116
4117
0
          if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) {
4118
          // TODO: save ErrorCode
4119
            //return Error(Op4.getStartLoc(),
4120
            //             "requested extract overflows register");
4121
0
            ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4122
0
            return true;
4123
0
          }
4124
4125
0
          const MCExpr *NewOp4 =
4126
0
              MCConstantExpr::create(NewOp4Val, getContext());
4127
0
          Operands[4] = AArch64Operand::CreateImm(
4128
0
              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4129
0
          if (Tok == "bfxil")
4130
0
            Operands[0] = AArch64Operand::CreateToken(
4131
0
                "bfm", false, Op.getStartLoc(), getContext());
4132
0
          else if (Tok == "sbfx")
4133
0
            Operands[0] = AArch64Operand::CreateToken(
4134
0
                "sbfm", false, Op.getStartLoc(), getContext());
4135
0
          else if (Tok == "ubfx")
4136
0
            Operands[0] = AArch64Operand::CreateToken(
4137
0
                "ubfm", false, Op.getStartLoc(), getContext());
4138
0
          else
4139
0
            llvm_unreachable("No valid mnemonic for alias?");
4140
0
        }
4141
0
      }
4142
0
    }
4143
10
  }
4144
  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4145
  //        InstAlias can't quite handle this since the reg classes aren't
4146
  //        subclasses.
4147
2.31k
  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4148
    // The source register can be Wn here, but the matcher expects a
4149
    // GPR64. Twiddle it here if necessary.
4150
0
    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4151
0
    if (Op.isReg()) {
4152
0
      unsigned Reg = getXRegFromWReg(Op.getReg());
4153
0
      Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4154
0
                                              Op.getEndLoc(), getContext());
4155
0
    }
4156
0
  }
4157
  // FIXME: Likewise for sxt[bh] with a Xd dst operand
4158
2.31k
  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4159
0
    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4160
0
    if (Op.isReg() &&
4161
0
        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4162
0
            Op.getReg())) {
4163
      // The source register can be Wn here, but the matcher expects a
4164
      // GPR64. Twiddle it here if necessary.
4165
0
      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4166
0
      if (Op.isReg()) {
4167
0
        unsigned Reg = getXRegFromWReg(Op.getReg());
4168
0
        Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4169
0
                                                Op.getEndLoc(), getContext());
4170
0
      }
4171
0
    }
4172
0
  }
4173
  // FIXME: Likewise for uxt[bh] with a Xd dst operand
4174
2.31k
  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4175
0
    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4176
0
    if (Op.isReg() &&
4177
0
        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4178
0
            Op.getReg())) {
4179
      // The source register can be Wn here, but the matcher expects a
4180
      // GPR32. Twiddle it here if necessary.
4181
0
      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4182
0
      if (Op.isReg()) {
4183
0
        unsigned Reg = getWRegFromXReg(Op.getReg());
4184
0
        Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4185
0
                                                Op.getEndLoc(), getContext());
4186
0
      }
4187
0
    }
4188
0
  }
4189
4190
  // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4191
2.31k
  if (NumOperands == 3 && Tok == "fmov") {
4192
23
    AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
4193
23
    AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
4194
23
    if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
4195
10
      unsigned zreg =
4196
10
          !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
4197
10
              RegOp.getReg())
4198
10
              ? AArch64::WZR
4199
10
              : AArch64::XZR;
4200
10
      Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
4201
10
                                              Op.getEndLoc(), getContext());
4202
10
    }
4203
23
  }
4204
4205
2.31k
  MCInst Inst(Address);
4206
  // First try to match against the secondary set of tables containing the
4207
  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4208
2.31k
  unsigned MatchResult =
4209
2.31k
      MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4210
4211
  // If that fails, try against the alternate table containing long-form NEON:
4212
  // "fadd v0.2s, v1.2s, v2.2s"
4213
2.31k
  if (MatchResult != Match_Success) {
4214
    // But first, save the short-form match result: we can use it in case the
4215
    // long-form match also fails.
4216
343
    auto ShortFormNEONErrorInfo = ErrorInfo;
4217
343
    auto ShortFormNEONMatchResult = MatchResult;
4218
4219
343
    MatchResult =
4220
343
        MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4221
4222
    // Now, both matches failed, and the long-form match failed on the mnemonic
4223
    // suffix token operand.  The short-form match failure is probably more
4224
    // relevant: use it instead.
4225
343
    if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4226
343
        Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4227
343
        ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4228
0
      MatchResult = ShortFormNEONMatchResult;
4229
0
      ErrorInfo = ShortFormNEONErrorInfo;
4230
0
    }
4231
343
  }
4232
4233
  // save the error code
4234
2.31k
  ErrorCode = MatchResult;
4235
4236
2.31k
  switch (MatchResult) {
4237
1.96k
  case Match_Success: {
4238
    // Perform range checking and other semantic validations
4239
1.96k
    SmallVector<SMLoc, 8> OperandLocs;
4240
1.96k
    NumOperands = Operands.size();
4241
4.53k
    for (unsigned i = 1; i < NumOperands; ++i)
4242
2.57k
      OperandLocs.push_back(Operands[i]->getStartLoc());
4243
1.96k
    if (validateInstruction(Inst, OperandLocs))
4244
0
      return true;
4245
4246
1.96k
    Inst.setLoc(IDLoc);
4247
1.96k
    Out.EmitInstruction(Inst, getSTI(), ErrorCode);
4248
1.96k
    if (ErrorCode == 0) {
4249
1.96k
        Address = Inst.getAddress(); // Keystone update address
4250
1.96k
        return false;
4251
1.96k
    } else
4252
0
        return true;
4253
1.96k
  }
4254
1
  case Match_MissingFeature: {
4255
1
    assert(ErrorInfo && "Unknown missing feature!");
4256
    // Special case the error message for the very common case where only
4257
    // a single subtarget feature is missing (neon, e.g.).
4258
0
    std::string Msg = "instruction requires:";
4259
1
    uint64_t Mask = 1;
4260
64
    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4261
63
      if (ErrorInfo & Mask) {
4262
1
        Msg += " ";
4263
1
        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4264
1
      }
4265
63
      Mask <<= 1;
4266
63
    }
4267
    //return Error(IDLoc, Msg);
4268
1
    ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4269
1
    return true;
4270
1.96k
  }
4271
226
  case Match_MnemonicFail:
4272
226
    return showMatchError(IDLoc, MatchResult);
4273
108
  case Match_InvalidOperand: {
4274
108
    SMLoc ErrorLoc = IDLoc;
4275
4276
108
    if (ErrorInfo != ~0ULL) {
4277
108
      if (ErrorInfo >= Operands.size()) {
4278
        //return Error(IDLoc, "too few operands for instruction");
4279
5
        ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4280
5
        return true;
4281
5
      }
4282
4283
103
      ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4284
103
      if (ErrorLoc == SMLoc())
4285
0
        ErrorLoc = IDLoc;
4286
103
    }
4287
    // If the match failed on a suffix token operand, tweak the diagnostic
4288
    // accordingly.
4289
103
    if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4290
103
        ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4291
11
      MatchResult = Match_InvalidSuffix;
4292
4293
103
    return showMatchError(ErrorLoc, MatchResult);
4294
108
  }
4295
0
  case Match_InvalidMemoryIndexed1:
4296
0
  case Match_InvalidMemoryIndexed2:
4297
0
  case Match_InvalidMemoryIndexed4:
4298
0
  case Match_InvalidMemoryIndexed8:
4299
0
  case Match_InvalidMemoryIndexed16:
4300
1
  case Match_InvalidCondCode:
4301
1
  case Match_AddSubRegExtendSmall:
4302
1
  case Match_AddSubRegExtendLarge:
4303
1
  case Match_AddSubSecondSource:
4304
2
  case Match_LogicalSecondSource:
4305
2
  case Match_AddSubRegShift32:
4306
2
  case Match_AddSubRegShift64:
4307
2
  case Match_InvalidMovImm32Shift:
4308
2
  case Match_InvalidMovImm64Shift:
4309
2
  case Match_InvalidFPImm:
4310
2
  case Match_InvalidMemoryWExtend8:
4311
2
  case Match_InvalidMemoryWExtend16:
4312
2
  case Match_InvalidMemoryWExtend32:
4313
2
  case Match_InvalidMemoryWExtend64:
4314
2
  case Match_InvalidMemoryWExtend128:
4315
2
  case Match_InvalidMemoryXExtend8:
4316
2
  case Match_InvalidMemoryXExtend16:
4317
2
  case Match_InvalidMemoryXExtend32:
4318
2
  case Match_InvalidMemoryXExtend64:
4319
2
  case Match_InvalidMemoryXExtend128:
4320
2
  case Match_InvalidMemoryIndexed4SImm7:
4321
2
  case Match_InvalidMemoryIndexed8SImm7:
4322
2
  case Match_InvalidMemoryIndexed16SImm7:
4323
2
  case Match_InvalidMemoryIndexedSImm9:
4324
2
  case Match_InvalidImm0_1:
4325
6
  case Match_InvalidImm0_7:
4326
6
  case Match_InvalidImm0_15:
4327
6
  case Match_InvalidImm0_31:
4328
6
  case Match_InvalidImm0_63:
4329
6
  case Match_InvalidImm0_127:
4330
7
  case Match_InvalidImm0_65535:
4331
7
  case Match_InvalidImm1_8:
4332
7
  case Match_InvalidImm1_16:
4333
7
  case Match_InvalidImm1_32:
4334
7
  case Match_InvalidImm1_64:
4335
7
  case Match_InvalidIndex1:
4336
7
  case Match_InvalidIndexB:
4337
7
  case Match_InvalidIndexH:
4338
7
  case Match_InvalidIndexS:
4339
7
  case Match_InvalidIndexD:
4340
8
  case Match_InvalidLabel:
4341
8
  case Match_MSR:
4342
8
  case Match_MRS: {
4343
8
    if (ErrorInfo >= Operands.size()) {
4344
      //return Error(IDLoc, "too few operands for instruction");
4345
0
      ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4346
0
      return true;
4347
0
    }
4348
    // Any time we get here, there's nothing fancy to do. Just get the
4349
    // operand SMLoc and display the diagnostic.
4350
8
    SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4351
8
    if (ErrorLoc == SMLoc())
4352
0
      ErrorLoc = IDLoc;
4353
8
    return showMatchError(ErrorLoc, MatchResult);
4354
8
  }
4355
2.31k
  }
4356
4357
2.31k
  llvm_unreachable("Implement any new match types added!");
4358
2.31k
}
4359
4360
/// ParseDirective parses the arm specific directives
4361
46.2k
bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4362
46.2k
  const MCObjectFileInfo::Environment Format =
4363
46.2k
    getContext().getObjectFileInfo()->getObjectFileType();
4364
46.2k
  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4365
46.2k
  bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4366
4367
46.2k
  StringRef IDVal = DirectiveID.getIdentifier();
4368
46.2k
  SMLoc Loc = DirectiveID.getLoc();
4369
46.2k
  if (IDVal == ".hword")
4370
0
    return parseDirectiveWord(2, Loc);
4371
46.2k
  if (IDVal == ".word")
4372
4.56k
    return parseDirectiveWord(4, Loc);
4373
41.6k
  if (IDVal == ".xword")
4374
1
    return parseDirectiveWord(8, Loc);
4375
41.6k
  if (IDVal == ".tlsdesccall")
4376
7
    return parseDirectiveTLSDescCall(Loc);
4377
41.6k
  if (IDVal == ".ltorg" || IDVal == ".pool")
4378
431
    return parseDirectiveLtorg(Loc);
4379
41.2k
  if (IDVal == ".unreq")
4380
17
    return parseDirectiveUnreq(Loc);
4381
4382
41.2k
  if (!IsMachO && !IsCOFF) {
4383
41.2k
    if (IDVal == ".inst")
4384
195
      return parseDirectiveInst(Loc);
4385
41.2k
  }
4386
4387
41.0k
  return parseDirectiveLOH(IDVal, Loc);
4388
41.2k
}
4389
4390
/// parseDirectiveWord
4391
///  ::= .word [ expression (, expression)* ]
4392
bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L)
4393
4.56k
{
4394
4.56k
  MCAsmParser &Parser = getParser();
4395
4.56k
  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4396
25.4k
    for (;;) {
4397
25.4k
      const MCExpr *Value;
4398
25.4k
      if (getParser().parseExpression(Value))
4399
92
        return true;
4400
4401
25.3k
      getParser().getStreamer().EmitValue(Value, Size, L);
4402
4403
25.3k
      if (getLexer().is(AsmToken::EndOfStatement))
4404
3.05k
        break;
4405
4406
      // FIXME: Improve diagnostic.
4407
22.3k
      if (getLexer().isNot(AsmToken::Comma))
4408
        //return Error(L, "unexpected token in directive");
4409
492
        return true;
4410
21.8k
      Parser.Lex();
4411
21.8k
    }
4412
3.63k
  }
4413
4414
3.98k
  Parser.Lex();
4415
3.98k
  return false;
4416
4.56k
}
4417
4418
/// parseDirectiveInst
4419
///  ::= .inst opcode [, ...]
4420
bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc)
4421
195
{
4422
195
  MCAsmParser &Parser = getParser();
4423
195
  if (getLexer().is(AsmToken::EndOfStatement)) {
4424
0
    Parser.eatToEndOfStatement();
4425
    //Error(Loc, "expected expression following directive");
4426
0
    return false;
4427
0
  }
4428
4429
214
  for (;;) {
4430
214
    const MCExpr *Expr;
4431
4432
214
    if (getParser().parseExpression(Expr)) {
4433
      //Error(Loc, "expected expression");
4434
1
      return false;
4435
1
    }
4436
4437
213
    const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4438
213
    if (!Value) {
4439
      //Error(Loc, "expected constant expression");
4440
154
      return false;
4441
154
    }
4442
4443
59
    getTargetStreamer().emitInst(Value->getValue());
4444
4445
59
    if (getLexer().is(AsmToken::EndOfStatement))
4446
17
      break;
4447
4448
42
    if (getLexer().isNot(AsmToken::Comma)) {
4449
      //Error(Loc, "unexpected token in directive");
4450
23
      return false;
4451
23
    }
4452
4453
19
    Parser.Lex(); // Eat comma.
4454
19
  }
4455
4456
17
  Parser.Lex();
4457
17
  return false;
4458
195
}
4459
4460
// parseDirectiveTLSDescCall:
4461
//   ::= .tlsdesccall symbol
4462
7
bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4463
7
  StringRef Name;
4464
7
  if (getParser().parseIdentifier(Name))
4465
    //return Error(L, "expected symbol after directive");
4466
4
    return true;
4467
4468
3
  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4469
3
  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4470
3
  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4471
4472
3
  MCInst Inst;
4473
3
  Inst.setOpcode(AArch64::TLSDESCCALL);
4474
3
  Inst.addOperand(MCOperand::createExpr(Expr));
4475
4476
3
  unsigned int KsError;
4477
3
  getParser().getStreamer().EmitInstruction(Inst, getSTI(), KsError);
4478
3
  return false;
4479
7
}
4480
4481
/// ::= .loh <lohName | lohId> label1, ..., labelN
4482
/// The number of arguments depends on the loh identifier.
4483
bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc)
4484
41.0k
{
4485
41.0k
  if (IDVal != MCLOHDirectiveName())
4486
40.9k
    return true;
4487
87
  MCLOHType Kind;
4488
87
  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4489
85
    if (getParser().getTok().isNot(AsmToken::Integer))
4490
      //return TokError("expected an identifier or a number in directive");
4491
6
      return true;
4492
    // We successfully get a numeric value for the identifier.
4493
    // Check if it is valid.
4494
79
    bool valid;
4495
79
    int64_t Id = getParser().getTok().getIntVal(valid);
4496
79
    if (!valid)
4497
0
      return true;
4498
79
    if (Id <= -1U && !isValidMCLOHType(Id))
4499
      //return TokError("invalid numeric identifier in directive");
4500
1
      return true;
4501
78
    Kind = (MCLOHType)Id;
4502
78
  } else {
4503
2
    StringRef Name = getTok().getIdentifier();
4504
    // We successfully parse an identifier.
4505
    // Check if it is a recognized one.
4506
2
    int Id = MCLOHNameToId(Name);
4507
4508
2
    if (Id == -1)
4509
      //return TokError("invalid identifier in directive");
4510
2
      return true;
4511
0
    Kind = (MCLOHType)Id;
4512
0
  }
4513
  // Consume the identifier.
4514
78
  Lex();
4515
  // Get the number of arguments of this LOH.
4516
78
  int NbArgs = MCLOHIdToNbArgs(Kind);
4517
4518
78
  assert(NbArgs != -1 && "Invalid number of arguments");
4519
4520
0
  SmallVector<MCSymbol *, 3> Args;
4521
131
  for (int Idx = 0; Idx < NbArgs; ++Idx) {
4522
131
    StringRef Name;
4523
131
    if (getParser().parseIdentifier(Name))
4524
      //return TokError("expected identifier in directive");
4525
15
      return true;
4526
116
    Args.push_back(getContext().getOrCreateSymbol(Name));
4527
4528
116
    if (Idx + 1 == NbArgs)
4529
13
      break;
4530
103
    if (getLexer().isNot(AsmToken::Comma))
4531
      //return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4532
50
      return true;
4533
53
    Lex();
4534
53
  }
4535
13
  if (getLexer().isNot(AsmToken::EndOfStatement))
4536
    //return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4537
7
    return true;
4538
4539
6
  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4540
6
  return false;
4541
13
}
4542
4543
/// parseDirectiveLtorg
4544
///  ::= .ltorg | .pool
4545
431
bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4546
431
  getTargetStreamer().emitCurrentConstantPool();
4547
431
  return false;
4548
431
}
4549
4550
/// parseDirectiveReq
4551
///  ::= name .req registername
4552
bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L)
4553
0
{
4554
0
  MCAsmParser &Parser = getParser();
4555
0
  Parser.Lex(); // Eat the '.req' token.
4556
  //SMLoc SRegLoc = getLoc();
4557
0
  unsigned RegNum = tryParseRegister();
4558
0
  bool IsVector = false;
4559
4560
0
  if (RegNum == static_cast<unsigned>(-1)) {
4561
0
    StringRef Kind;
4562
0
    RegNum = tryMatchVectorRegister(Kind, false);
4563
0
    if (!Kind.empty()) {
4564
      //Error(SRegLoc, "vector register without type specifier expected");
4565
0
      return false;
4566
0
    }
4567
0
    IsVector = true;
4568
0
  }
4569
4570
0
  if (RegNum == static_cast<unsigned>(-1)) {
4571
0
    Parser.eatToEndOfStatement();
4572
    //Error(SRegLoc, "register name or alias expected");
4573
0
    return false;
4574
0
  }
4575
4576
  // Shouldn't be anything else.
4577
0
  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4578
    //Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4579
0
    Parser.eatToEndOfStatement();
4580
0
    return false;
4581
0
  }
4582
4583
0
  Parser.Lex(); // Consume the EndOfStatement
4584
4585
0
  auto pair = std::make_pair(IsVector, RegNum);
4586
0
  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4587
0
    Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4588
4589
0
  return true;
4590
0
}
4591
4592
/// parseDirectiveUneq
4593
///  ::= .unreq registername
4594
bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L)
4595
17
{
4596
17
  MCAsmParser &Parser = getParser();
4597
17
  if (Parser.getTok().isNot(AsmToken::Identifier)) {
4598
    //Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4599
9
    Parser.eatToEndOfStatement();
4600
9
    return false;
4601
9
  }
4602
8
  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4603
8
  Parser.Lex(); // Eat the identifier.
4604
8
  return false;
4605
17
}
4606
4607
bool
4608
AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4609
                                    AArch64MCExpr::VariantKind &ELFRefKind,
4610
                                    MCSymbolRefExpr::VariantKind &DarwinRefKind,
4611
155
                                    int64_t &Addend) {
4612
155
  ELFRefKind = AArch64MCExpr::VK_INVALID;
4613
155
  DarwinRefKind = MCSymbolRefExpr::VK_None;
4614
155
  Addend = 0;
4615
4616
155
  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4617
0
    ELFRefKind = AE->getKind();
4618
0
    Expr = AE->getSubExpr();
4619
0
  }
4620
4621
155
  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4622
155
  if (SE) {
4623
    // It's a simple symbol reference with no addend.
4624
8
    DarwinRefKind = SE->getKind();
4625
8
    return true;
4626
8
  }
4627
4628
147
  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4629
147
  if (!BE)
4630
139
    return false;
4631
4632
8
  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4633
8
  if (!SE)
4634
4
    return false;
4635
4
  DarwinRefKind = SE->getKind();
4636
4637
4
  if (BE->getOpcode() != MCBinaryExpr::Add &&
4638
4
      BE->getOpcode() != MCBinaryExpr::Sub)
4639
3
    return false;
4640
4641
  // See if the addend is is a constant, otherwise there's more going
4642
  // on here than we can deal with.
4643
1
  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4644
1
  if (!AddendExpr)
4645
0
    return false;
4646
4647
1
  Addend = AddendExpr->getValue();
4648
1
  if (BE->getOpcode() == MCBinaryExpr::Sub)
4649
1
    Addend = -Addend;
4650
4651
  // It's some symbol reference + a constant addend, but really
4652
  // shouldn't use both Darwin and ELF syntax.
4653
1
  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4654
1
         DarwinRefKind == MCSymbolRefExpr::VK_None;
4655
1
}
4656
4657
/// Force static initialization.
4658
26
extern "C" void LLVMInitializeAArch64AsmParser() {
4659
26
  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4660
26
  RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4661
26
  RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4662
26
}
4663
4664
#define GET_REGISTER_MATCHER
4665
#define GET_SUBTARGET_FEATURE_NAME
4666
#define GET_MATCHER_IMPLEMENTATION
4667
#include "AArch64GenAsmMatcher.inc"
4668
4669
// Define this matcher function after the auto-generated include so we
4670
// have the match class enum definitions.
4671
unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4672
2.41k
                                                      unsigned Kind) {
4673
2.41k
  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4674
  // If the kind is a token for a literal immediate, check if our asm
4675
  // operand matches. This is for InstAliases which have a fixed-value
4676
  // immediate in the syntax.
4677
2.41k
  int64_t ExpectedVal;
4678
2.41k
  switch (Kind) {
4679
2.41k
  default:
4680
2.41k
    return Match_InvalidOperand;
4681
0
  case MCK__35_0:
4682
0
    ExpectedVal = 0;
4683
0
    break;
4684
0
  case MCK__35_1:
4685
0
    ExpectedVal = 1;
4686
0
    break;
4687
0
  case MCK__35_12:
4688
0
    ExpectedVal = 12;
4689
0
    break;
4690
0
  case MCK__35_16:
4691
0
    ExpectedVal = 16;
4692
0
    break;
4693
0
  case MCK__35_2:
4694
0
    ExpectedVal = 2;
4695
0
    break;
4696
0
  case MCK__35_24:
4697
0
    ExpectedVal = 24;
4698
0
    break;
4699
0
  case MCK__35_3:
4700
0
    ExpectedVal = 3;
4701
0
    break;
4702
0
  case MCK__35_32:
4703
0
    ExpectedVal = 32;
4704
0
    break;
4705
0
  case MCK__35_4:
4706
0
    ExpectedVal = 4;
4707
0
    break;
4708
0
  case MCK__35_48:
4709
0
    ExpectedVal = 48;
4710
0
    break;
4711
0
  case MCK__35_6:
4712
0
    ExpectedVal = 6;
4713
0
    break;
4714
0
  case MCK__35_64:
4715
0
    ExpectedVal = 64;
4716
0
    break;
4717
0
  case MCK__35_8:
4718
0
    ExpectedVal = 8;
4719
0
    break;
4720
2.41k
  }
4721
0
  if (!Op.isImm())
4722
0
    return Match_InvalidOperand;
4723
0
  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4724
0
  if (!CE)
4725
0
    return Match_InvalidOperand;
4726
0
  if (CE->getValue() == ExpectedVal)
4727
0
    return Match_Success;
4728
0
  return Match_InvalidOperand;
4729
0
}
4730
4731
4732
AArch64AsmParser::OperandMatchResultTy
4733
AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands)
4734
0
{
4735
0
  SMLoc S = getLoc();
4736
4737
0
  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4738
    //Error(S, "expected register");
4739
0
    return MatchOperand_ParseFail;
4740
0
  }
4741
4742
0
  int FirstReg = tryParseRegister();
4743
0
  if (FirstReg == -1) {
4744
0
    return MatchOperand_ParseFail;
4745
0
  }
4746
0
  const MCRegisterClass &WRegClass =
4747
0
      AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4748
0
  const MCRegisterClass &XRegClass =
4749
0
      AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4750
4751
0
  bool isXReg = XRegClass.contains(FirstReg),
4752
0
       isWReg = WRegClass.contains(FirstReg);
4753
0
  if (!isXReg && !isWReg) {
4754
    //Error(S, "expected first even register of a "
4755
    //         "consecutive same-size even/odd register pair");
4756
0
    return MatchOperand_ParseFail;
4757
0
  }
4758
4759
0
  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4760
0
  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4761
4762
0
  if (FirstEncoding & 0x1) {
4763
    //Error(S, "expected first even register of a "
4764
    //         "consecutive same-size even/odd register pair");
4765
0
    return MatchOperand_ParseFail;
4766
0
  }
4767
4768
  //SMLoc M = getLoc();
4769
0
  if (getParser().getTok().isNot(AsmToken::Comma)) {
4770
    //Error(M, "expected comma");
4771
0
    return MatchOperand_ParseFail;
4772
0
  }
4773
  // Eat the comma
4774
0
  getParser().Lex();
4775
4776
  //SMLoc E = getLoc();
4777
0
  int SecondReg = tryParseRegister();
4778
0
  if (SecondReg ==-1) {
4779
0
    return MatchOperand_ParseFail;
4780
0
  }
4781
4782
0
 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4783
0
      (isXReg && !XRegClass.contains(SecondReg)) ||
4784
0
      (isWReg && !WRegClass.contains(SecondReg))) {
4785
    //Error(E,"expected second odd register of a "
4786
    //         "consecutive same-size even/odd register pair");
4787
0
    return MatchOperand_ParseFail;
4788
0
  }
4789
  
4790
0
  unsigned Pair = 0;
4791
0
  if(isXReg) {
4792
0
    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4793
0
           &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4794
0
  } else {
4795
0
    Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4796
0
           &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4797
0
  }
4798
4799
0
  Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4800
0
      getContext()));
4801
4802
0
  return MatchOperand_Success;
4803
0
}