/src/keystone/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | |
10 | | #include "ARMFeatures.h" |
11 | | #include "MCTargetDesc/ARMAddressingModes.h" |
12 | | #include "MCTargetDesc/ARMBaseInfo.h" |
13 | | #include "MCTargetDesc/ARMMCExpr.h" |
14 | | #include "llvm/ADT/STLExtras.h" |
15 | | #include "llvm/ADT/SmallVector.h" |
16 | | #include "llvm/ADT/StringExtras.h" |
17 | | #include "llvm/ADT/StringSwitch.h" |
18 | | #include "llvm/ADT/Triple.h" |
19 | | #include "llvm/ADT/Twine.h" |
20 | | #include "llvm/MC/MCAsmInfo.h" |
21 | | #include "llvm/MC/MCAssembler.h" |
22 | | #include "llvm/MC/MCContext.h" |
23 | | #include "llvm/MC/MCELFStreamer.h" |
24 | | #include "llvm/MC/MCExpr.h" |
25 | | #include "llvm/MC/MCInst.h" |
26 | | #include "llvm/MC/MCInstrDesc.h" |
27 | | #include "llvm/MC/MCInstrInfo.h" |
28 | | #include "llvm/MC/MCObjectFileInfo.h" |
29 | | #include "llvm/MC/MCParser/MCAsmLexer.h" |
30 | | #include "llvm/MC/MCParser/MCAsmParser.h" |
31 | | #include "llvm/MC/MCParser/MCAsmParserUtils.h" |
32 | | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" |
33 | | #include "llvm/MC/MCParser/MCTargetAsmParser.h" |
34 | | #include "llvm/MC/MCRegisterInfo.h" |
35 | | #include "llvm/MC/MCSection.h" |
36 | | #include "llvm/MC/MCStreamer.h" |
37 | | #include "llvm/MC/MCSubtargetInfo.h" |
38 | | #include "llvm/MC/MCSymbol.h" |
39 | | #include "llvm/Support/ARMBuildAttributes.h" |
40 | | #include "llvm/Support/ARMEHABI.h" |
41 | | #include "llvm/Support/COFF.h" |
42 | | #include "llvm/Support/Debug.h" |
43 | | #include "llvm/Support/ELF.h" |
44 | | #include "llvm/Support/MathExtras.h" |
45 | | #include "llvm/Support/SourceMgr.h" |
46 | | #include "llvm/Support/TargetParser.h" |
47 | | #include "llvm/Support/TargetRegistry.h" |
48 | | #include "llvm/Support/raw_ostream.h" |
49 | | |
50 | | #include "keystone/arm.h" |
51 | | |
52 | | using namespace llvm_ks; |
53 | | |
54 | | namespace { |
55 | | |
56 | | class ARMOperand; |
57 | | |
58 | | enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; |
59 | | |
60 | | class UnwindContext { |
61 | | MCAsmParser &Parser; |
62 | | |
63 | | typedef SmallVector<SMLoc, 4> Locs; |
64 | | |
65 | | Locs FnStartLocs; |
66 | | Locs CantUnwindLocs; |
67 | | Locs PersonalityLocs; |
68 | | Locs PersonalityIndexLocs; |
69 | | Locs HandlerDataLocs; |
70 | | int FPReg; |
71 | | |
72 | | public: |
73 | 61.7k | UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {} |
74 | | |
75 | 13.7k | bool hasFnStart() const { return !FnStartLocs.empty(); } |
76 | 2.53k | bool cantUnwind() const { return !CantUnwindLocs.empty(); } |
77 | 3.61k | bool hasHandlerData() const { return !HandlerDataLocs.empty(); } |
78 | 3.30k | bool hasPersonality() const { |
79 | 3.30k | return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty()); |
80 | 3.30k | } |
81 | | |
82 | 1.81k | void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); } |
83 | 1.19k | void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); } |
84 | 1.56k | void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); } |
85 | 1.71k | void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); } |
86 | 1.04k | void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); } |
87 | | |
88 | 9 | void saveFPReg(int Reg) { FPReg = Reg; } |
89 | 154 | int getFPReg() const { return FPReg; } |
90 | | |
91 | 1.54k | void emitFnStartLocNotes() const { |
92 | 1.54k | for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end(); |
93 | 3.09k | FI != FE; ++FI) |
94 | 1.54k | Parser.Note(*FI, ".fnstart was specified here"); |
95 | 1.54k | } |
96 | 487 | void emitCantUnwindLocNotes() const { |
97 | 487 | for (Locs::const_iterator UI = CantUnwindLocs.begin(), |
98 | 3.32k | UE = CantUnwindLocs.end(); UI != UE; ++UI) |
99 | 2.83k | Parser.Note(*UI, ".cantunwind was specified here"); |
100 | 487 | } |
101 | 286 | void emitHandlerDataLocNotes() const { |
102 | 286 | for (Locs::const_iterator HI = HandlerDataLocs.begin(), |
103 | 899 | HE = HandlerDataLocs.end(); HI != HE; ++HI) |
104 | 613 | Parser.Note(*HI, ".handlerdata was specified here"); |
105 | 286 | } |
106 | 1.59k | void emitPersonalityLocNotes() const { |
107 | 1.59k | for (Locs::const_iterator PI = PersonalityLocs.begin(), |
108 | 1.59k | PE = PersonalityLocs.end(), |
109 | 1.59k | PII = PersonalityIndexLocs.begin(), |
110 | 1.59k | PIE = PersonalityIndexLocs.end(); |
111 | 14.7k | PI != PE || PII != PIE;) { |
112 | 13.1k | if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer())) |
113 | 5.58k | Parser.Note(*PI++, ".personality was specified here"); |
114 | 7.56k | else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer())) |
115 | 7.56k | Parser.Note(*PII++, ".personalityindex was specified here"); |
116 | 0 | else |
117 | 0 | llvm_unreachable(".personality and .personalityindex cannot be " |
118 | 13.1k | "at the same location"); |
119 | 13.1k | } |
120 | 1.59k | } |
121 | | |
122 | 2.78k | void reset() { |
123 | 2.78k | FnStartLocs = Locs(); |
124 | 2.78k | CantUnwindLocs = Locs(); |
125 | 2.78k | PersonalityLocs = Locs(); |
126 | 2.78k | HandlerDataLocs = Locs(); |
127 | 2.78k | PersonalityIndexLocs = Locs(); |
128 | 2.78k | FPReg = ARM::SP; |
129 | 2.78k | } |
130 | | }; |
131 | | |
132 | | class ARMAsmParser : public MCTargetAsmParser { |
133 | | const MCInstrInfo &MII; |
134 | | const MCRegisterInfo *MRI; |
135 | | UnwindContext UC; |
136 | | |
137 | 132k | ARMTargetStreamer &getTargetStreamer() { |
138 | 132k | assert(getParser().getStreamer().getTargetStreamer() && |
139 | 132k | "do not have a target streamer"); |
140 | 132k | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); |
141 | 132k | return static_cast<ARMTargetStreamer &>(TS); |
142 | 132k | } |
143 | | |
144 | | // Map of register aliases registers via the .req directive. |
145 | | StringMap<unsigned> RegisterReqs; |
146 | | |
147 | | bool NextSymbolIsThumb; |
148 | | |
149 | | struct { |
150 | | ARMCC::CondCodes Cond; // Condition for IT block. |
151 | | unsigned Mask:4; // Condition mask for instructions. |
152 | | // Starting at first 1 (from lsb). |
153 | | // '1' condition as indicated in IT. |
154 | | // '0' inverse of condition (else). |
155 | | // Count of instructions in IT block is |
156 | | // 4 - trailingzeroes(mask) |
157 | | |
158 | | bool FirstCond; // Explicit flag for when we're parsing the |
159 | | // First instruction in the IT block. It's |
160 | | // implied in the mask, so needs special |
161 | | // handling. |
162 | | |
163 | | unsigned CurPosition; // Current position in parsing of IT |
164 | | // block. In range [0,3]. Initialized |
165 | | // according to count of instructions in block. |
166 | | // ~0U if no active IT block. |
167 | | } ITState; |
168 | 332k | bool inITBlock() { return ITState.CurPosition != ~0U; } |
169 | 0 | bool lastInITBlock() { |
170 | 0 | return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask); |
171 | 0 | } |
172 | 99.8k | void forwardITPosition() { |
173 | 99.8k | if (!inITBlock()) return; |
174 | | // Move to the next instruction in the IT block, if there is one. If not, |
175 | | // mark the block as done. |
176 | 7.15k | unsigned TZ = countTrailingZeros(ITState.Mask); |
177 | 7.15k | if (++ITState.CurPosition == 5 - TZ) |
178 | 3.32k | ITState.CurPosition = ~0U; // Done with the IT block after this. |
179 | 7.15k | } |
180 | | |
181 | 0 | void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) { |
182 | 0 | return getParser().Note(L, Msg, Ranges); |
183 | 0 | } |
184 | | bool Warning(SMLoc L, const Twine &Msg, |
185 | 2.82k | ArrayRef<SMRange> Ranges = None) { |
186 | 2.82k | return getParser().Warning(L, Msg, Ranges); |
187 | 2.82k | } |
188 | | //bool Error(SMLoc L, const Twine &Msg, |
189 | | // ArrayRef<SMRange> Ranges = None) { |
190 | | // return getParser().Error(L, Msg, Ranges); |
191 | | //} |
192 | | |
193 | | bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands, |
194 | | unsigned ListNo, bool IsARPop = false); |
195 | | bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands, |
196 | | unsigned ListNo); |
197 | | |
198 | | int tryParseRegister(); |
199 | | bool tryParseRegisterWithWriteBack(OperandVector &); |
200 | | int tryParseShiftRegister(OperandVector &); |
201 | | bool parseRegisterList(OperandVector &); |
202 | | bool parseMemory(OperandVector &); |
203 | | bool parseOperand(OperandVector &, StringRef Mnemonic, unsigned int &ErrorCode); |
204 | | bool parsePrefix(ARMMCExpr::VariantKind &RefKind); |
205 | | bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, |
206 | | unsigned &ShiftAmount); |
207 | | bool parseLiteralValues(unsigned Size, SMLoc L); |
208 | | bool parseDirectiveThumb(SMLoc L); |
209 | | bool parseDirectiveARM(SMLoc L); |
210 | | bool parseDirectiveThumbFunc(SMLoc L); |
211 | | bool parseDirectiveCode(SMLoc L); |
212 | | bool parseDirectiveSyntax(SMLoc L); |
213 | | bool parseDirectiveReq(StringRef Name, SMLoc L); |
214 | | bool parseDirectiveUnreq(SMLoc L); |
215 | | bool parseDirectiveArch(SMLoc L); |
216 | | bool parseDirectiveEabiAttr(SMLoc L); |
217 | | bool parseDirectiveCPU(SMLoc L); |
218 | | bool parseDirectiveFPU(SMLoc L); |
219 | | bool parseDirectiveFnStart(SMLoc L); |
220 | | bool parseDirectiveFnEnd(SMLoc L); |
221 | | bool parseDirectiveCantUnwind(SMLoc L); |
222 | | bool parseDirectivePersonality(SMLoc L); |
223 | | bool parseDirectiveHandlerData(SMLoc L); |
224 | | bool parseDirectiveSetFP(SMLoc L); |
225 | | bool parseDirectivePad(SMLoc L); |
226 | | bool parseDirectiveRegSave(SMLoc L, bool IsVector); |
227 | | bool parseDirectiveInst(SMLoc L, char Suffix = '\0'); |
228 | | bool parseDirectiveLtorg(SMLoc L); |
229 | | bool parseDirectiveEven(SMLoc L); |
230 | | bool parseDirectivePersonalityIndex(SMLoc L); |
231 | | bool parseDirectiveUnwindRaw(SMLoc L); |
232 | | bool parseDirectiveTLSDescSeq(SMLoc L); |
233 | | bool parseDirectiveMovSP(SMLoc L); |
234 | | bool parseDirectiveObjectArch(SMLoc L); |
235 | | bool parseDirectiveArchExtension(SMLoc L); |
236 | | bool parseDirectiveAlign(SMLoc L); |
237 | | bool parseDirectiveThumbSet(SMLoc L); |
238 | | |
239 | | StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, |
240 | | bool &CarrySetting, unsigned &ProcessorIMod, |
241 | | StringRef &ITMask); |
242 | | void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, |
243 | | bool &CanAcceptCarrySet, |
244 | | bool &CanAcceptPredicationCode); |
245 | | |
246 | | void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting, |
247 | | OperandVector &Operands); |
248 | 1.95M | bool isThumb() const { |
249 | | // FIXME: Can tablegen auto-generate this? |
250 | 1.95M | return getSTI().getFeatureBits()[ARM::ModeThumb]; |
251 | 1.95M | } |
252 | 344k | bool isThumbOne() const { |
253 | 344k | return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2]; |
254 | 344k | } |
255 | 653k | bool isThumbTwo() const { |
256 | 653k | return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2]; |
257 | 653k | } |
258 | 4.22k | bool hasThumb() const { |
259 | 4.22k | return getSTI().getFeatureBits()[ARM::HasV4TOps]; |
260 | 4.22k | } |
261 | 65 | bool hasV6Ops() const { |
262 | 65 | return getSTI().getFeatureBits()[ARM::HasV6Ops]; |
263 | 65 | } |
264 | 3.99k | bool hasV6MOps() const { |
265 | 3.99k | return getSTI().getFeatureBits()[ARM::HasV6MOps]; |
266 | 3.99k | } |
267 | 834 | bool hasV7Ops() const { |
268 | 834 | return getSTI().getFeatureBits()[ARM::HasV7Ops]; |
269 | 834 | } |
270 | 26.1k | bool hasV8Ops() const { |
271 | 26.1k | return getSTI().getFeatureBits()[ARM::HasV8Ops]; |
272 | 26.1k | } |
273 | 2.52k | bool hasV8MBaseline() const { |
274 | 2.52k | return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps]; |
275 | 2.52k | } |
276 | 78 | bool hasV8MMainline() const { |
277 | 78 | return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps]; |
278 | 78 | } |
279 | 87 | bool has8MSecExt() const { |
280 | 87 | return getSTI().getFeatureBits()[ARM::Feature8MSecExt]; |
281 | 87 | } |
282 | 3.40k | bool hasARM() const { |
283 | 3.40k | return !getSTI().getFeatureBits()[ARM::FeatureNoARM]; |
284 | 3.40k | } |
285 | 105 | bool hasDSP() const { |
286 | 105 | return getSTI().getFeatureBits()[ARM::FeatureDSP]; |
287 | 105 | } |
288 | 101k | bool hasD16() const { |
289 | 101k | return getSTI().getFeatureBits()[ARM::FeatureD16]; |
290 | 101k | } |
291 | 0 | bool hasV8_1aOps() const { |
292 | 0 | return getSTI().getFeatureBits()[ARM::HasV8_1aOps]; |
293 | 0 | } |
294 | | |
295 | 3.96k | void SwitchMode() { |
296 | 3.96k | MCSubtargetInfo &STI = copySTI(); |
297 | 3.96k | uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); |
298 | 3.96k | setAvailableFeatures(FB); |
299 | 3.96k | } |
300 | 4.11k | bool isMClass() const { |
301 | 4.11k | return getSTI().getFeatureBits()[ARM::FeatureMClass]; |
302 | 4.11k | } |
303 | | |
304 | | /// @name Auto-generated Match Functions |
305 | | /// { |
306 | | |
307 | | #define GET_ASSEMBLER_HEADER |
308 | | #include "ARMGenAsmMatcher.inc" |
309 | | |
310 | | /// } |
311 | | |
312 | | OperandMatchResultTy parseITCondCode(OperandVector &, unsigned int &ErrorCode); |
313 | | OperandMatchResultTy parseCoprocNumOperand(OperandVector &, unsigned int &ErrorCode); |
314 | | OperandMatchResultTy parseCoprocRegOperand(OperandVector &, unsigned int &ErrorCode); |
315 | | OperandMatchResultTy parseCoprocOptionOperand(OperandVector &, unsigned int &ErrorCode); |
316 | | OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &, unsigned int &ErrorCode); |
317 | | OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &, unsigned int &ErrorCode); |
318 | | OperandMatchResultTy parseProcIFlagsOperand(OperandVector &, unsigned int &ErrorCode); |
319 | | OperandMatchResultTy parseMSRMaskOperand(OperandVector &, unsigned int &ErrorCode); |
320 | | OperandMatchResultTy parseBankedRegOperand(OperandVector &, unsigned int &ErrorCode); |
321 | | OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low, |
322 | | int High, unsigned int &ErrorCode); |
323 | 0 | OperandMatchResultTy parsePKHLSLImm(OperandVector &O, unsigned int &ErrorCode) { |
324 | 0 | return parsePKHImm(O, "lsl", 0, 31, ErrorCode); |
325 | 0 | } |
326 | 2 | OperandMatchResultTy parsePKHASRImm(OperandVector &O, unsigned int &ErrorCode) { |
327 | 2 | return parsePKHImm(O, "asr", 1, 32, ErrorCode); |
328 | 2 | } |
329 | | OperandMatchResultTy parseSetEndImm(OperandVector &, unsigned int &ErrorCode); |
330 | | OperandMatchResultTy parseShifterImm(OperandVector &, unsigned int &ErrorCode); |
331 | | OperandMatchResultTy parseRotImm(OperandVector &, unsigned int &ErrorCode); |
332 | | OperandMatchResultTy parseModImm(OperandVector &, unsigned int &ErrorCode); |
333 | | OperandMatchResultTy parseBitfield(OperandVector &, unsigned int &ErrorCode); |
334 | | OperandMatchResultTy parsePostIdxReg(OperandVector &, unsigned int &ErrorCode); |
335 | | OperandMatchResultTy parseAM3Offset(OperandVector &, unsigned int &ErrorCode); |
336 | | OperandMatchResultTy parseFPImm(OperandVector &, unsigned int &ErrorCode); |
337 | | OperandMatchResultTy parseVectorList(OperandVector &, unsigned int &ErrorCode); |
338 | | OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, |
339 | | SMLoc &EndLoc, unsigned int &ErrorCode); |
340 | | |
341 | | // Asm Match Converter Methods |
342 | | void cvtThumbMultiply(MCInst &Inst, const OperandVector &); |
343 | | void cvtThumbBranches(MCInst &Inst, const OperandVector &); |
344 | | |
345 | | bool validateInstruction(MCInst &Inst, const OperandVector &Ops); |
346 | | bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out); |
347 | | bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands); |
348 | | bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands); |
349 | | |
350 | | public: |
351 | | enum ARMMatchResultTy { |
352 | | Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, |
353 | | Match_RequiresNotITBlock, |
354 | | Match_RequiresV6, |
355 | | Match_RequiresThumb2, |
356 | | Match_RequiresV8, |
357 | | #define GET_OPERAND_DIAGNOSTIC_TYPES |
358 | | #include "ARMGenAsmMatcher.inc" |
359 | | |
360 | | }; |
361 | | |
362 | | ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, |
363 | | const MCInstrInfo &MII, const MCTargetOptions &Options) |
364 | 61.7k | : MCTargetAsmParser(Options, STI), MII(MII), UC(Parser) { |
365 | 61.7k | MCAsmParserExtension::Initialize(Parser); |
366 | | |
367 | 61.7k | MCStreamer &S = getParser().getStreamer(); |
368 | 61.7k | if (S.getTargetStreamer() == nullptr) |
369 | 61.7k | new ARMTargetStreamer(S); |
370 | | |
371 | | // Cache the MCRegisterInfo. |
372 | 61.7k | MRI = getContext().getRegisterInfo(); |
373 | | |
374 | | // Initialize the set of available features. |
375 | 61.7k | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); |
376 | | |
377 | | // Not in an ITBlock to start with. |
378 | 61.7k | ITState.CurPosition = ~0U; |
379 | | |
380 | 61.7k | NextSymbolIsThumb = false; |
381 | 61.7k | } |
382 | | |
383 | | // Implementation of the MCTargetAsmParser interface: |
384 | | bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc, unsigned int &ErrorCode) override; |
385 | | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, |
386 | | SMLoc NameLoc, OperandVector &Operands, unsigned int &ErrorCode) override; |
387 | | bool ParseDirective(AsmToken DirectiveID) override; |
388 | | |
389 | | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, |
390 | | unsigned Kind) override; |
391 | | unsigned checkTargetMatchPredicate(MCInst &Inst) override; |
392 | | |
393 | | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
394 | | OperandVector &Operands, MCStreamer &Out, |
395 | | uint64_t &ErrorInfo, |
396 | | bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address) override; |
397 | | void onLabelParsed(MCSymbol *Symbol) override; |
398 | | }; |
399 | | } // end anonymous namespace |
400 | | |
401 | | namespace { |
402 | | |
403 | | /// ARMOperand - Instances of this class represent a parsed ARM machine |
404 | | /// operand. |
405 | | class ARMOperand : public MCParsedAsmOperand { |
406 | | enum KindTy { |
407 | | k_CondCode, |
408 | | k_CCOut, |
409 | | k_ITCondMask, |
410 | | k_CoprocNum, |
411 | | k_CoprocReg, |
412 | | k_CoprocOption, |
413 | | k_Immediate, |
414 | | k_MemBarrierOpt, |
415 | | k_InstSyncBarrierOpt, |
416 | | k_Memory, |
417 | | k_PostIndexRegister, |
418 | | k_MSRMask, |
419 | | k_BankedReg, |
420 | | k_ProcIFlags, |
421 | | k_VectorIndex, |
422 | | k_Register, |
423 | | k_RegisterList, |
424 | | k_DPRRegisterList, |
425 | | k_SPRRegisterList, |
426 | | k_VectorList, |
427 | | k_VectorListAllLanes, |
428 | | k_VectorListIndexed, |
429 | | k_ShiftedRegister, |
430 | | k_ShiftedImmediate, |
431 | | k_ShifterImmediate, |
432 | | k_RotateImmediate, |
433 | | k_ModifiedImmediate, |
434 | | k_BitfieldDescriptor, |
435 | | k_Token |
436 | | } Kind; |
437 | | |
438 | | SMLoc StartLoc, EndLoc, AlignmentLoc; |
439 | | SmallVector<unsigned, 8> Registers; |
440 | | |
441 | | struct CCOp { |
442 | | ARMCC::CondCodes Val; |
443 | | }; |
444 | | |
445 | | struct CopOp { |
446 | | unsigned Val; |
447 | | }; |
448 | | |
449 | | struct CoprocOptionOp { |
450 | | unsigned Val; |
451 | | }; |
452 | | |
453 | | struct ITMaskOp { |
454 | | unsigned Mask:4; |
455 | | }; |
456 | | |
457 | | struct MBOptOp { |
458 | | ARM_MB::MemBOpt Val; |
459 | | }; |
460 | | |
461 | | struct ISBOptOp { |
462 | | ARM_ISB::InstSyncBOpt Val; |
463 | | }; |
464 | | |
465 | | struct IFlagsOp { |
466 | | ARM_PROC::IFlags Val; |
467 | | }; |
468 | | |
469 | | struct MMaskOp { |
470 | | unsigned Val; |
471 | | }; |
472 | | |
473 | | struct BankedRegOp { |
474 | | unsigned Val; |
475 | | }; |
476 | | |
477 | | struct TokOp { |
478 | | const char *Data; |
479 | | unsigned Length; |
480 | | }; |
481 | | |
482 | | struct RegOp { |
483 | | unsigned RegNum; |
484 | | }; |
485 | | |
486 | | // A vector register list is a sequential list of 1 to 4 registers. |
487 | | struct VectorListOp { |
488 | | unsigned RegNum; |
489 | | unsigned Count; |
490 | | unsigned LaneIndex; |
491 | | bool isDoubleSpaced; |
492 | | }; |
493 | | |
494 | | struct VectorIndexOp { |
495 | | unsigned Val; |
496 | | }; |
497 | | |
498 | | struct ImmOp { |
499 | | const MCExpr *Val; |
500 | | }; |
501 | | |
502 | | /// Combined record for all forms of ARM address expressions. |
503 | | struct MemoryOp { |
504 | | unsigned BaseRegNum; |
505 | | // Offset is in OffsetReg or OffsetImm. If both are zero, no offset |
506 | | // was specified. |
507 | | const MCConstantExpr *OffsetImm; // Offset immediate value |
508 | | unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL |
509 | | ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg |
510 | | unsigned ShiftImm; // shift for OffsetReg. |
511 | | unsigned Alignment; // 0 = no alignment specified |
512 | | // n = alignment in bytes (2, 4, 8, 16, or 32) |
513 | | unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) |
514 | | }; |
515 | | |
516 | | struct PostIdxRegOp { |
517 | | unsigned RegNum; |
518 | | bool isAdd; |
519 | | ARM_AM::ShiftOpc ShiftTy; |
520 | | unsigned ShiftImm; |
521 | | }; |
522 | | |
523 | | struct ShifterImmOp { |
524 | | bool isASR; |
525 | | unsigned Imm; |
526 | | }; |
527 | | |
528 | | struct RegShiftedRegOp { |
529 | | ARM_AM::ShiftOpc ShiftTy; |
530 | | unsigned SrcReg; |
531 | | unsigned ShiftReg; |
532 | | unsigned ShiftImm; |
533 | | }; |
534 | | |
535 | | struct RegShiftedImmOp { |
536 | | ARM_AM::ShiftOpc ShiftTy; |
537 | | unsigned SrcReg; |
538 | | unsigned ShiftImm; |
539 | | }; |
540 | | |
541 | | struct RotImmOp { |
542 | | unsigned Imm; |
543 | | }; |
544 | | |
545 | | struct ModImmOp { |
546 | | unsigned Bits; |
547 | | unsigned Rot; |
548 | | }; |
549 | | |
550 | | struct BitfieldOp { |
551 | | unsigned LSB; |
552 | | unsigned Width; |
553 | | }; |
554 | | |
555 | | union { |
556 | | struct CCOp CC; |
557 | | struct CopOp Cop; |
558 | | struct CoprocOptionOp CoprocOption; |
559 | | struct MBOptOp MBOpt; |
560 | | struct ISBOptOp ISBOpt; |
561 | | struct ITMaskOp ITMask; |
562 | | struct IFlagsOp IFlags; |
563 | | struct MMaskOp MMask; |
564 | | struct BankedRegOp BankedReg; |
565 | | struct TokOp Tok; |
566 | | struct RegOp Reg; |
567 | | struct VectorListOp VectorList; |
568 | | struct VectorIndexOp VectorIndex; |
569 | | struct ImmOp Imm; |
570 | | struct MemoryOp Memory; |
571 | | struct PostIdxRegOp PostIdxReg; |
572 | | struct ShifterImmOp ShifterImm; |
573 | | struct RegShiftedRegOp RegShiftedReg; |
574 | | struct RegShiftedImmOp RegShiftedImm; |
575 | | struct RotImmOp RotImm; |
576 | | struct ModImmOp ModImm; |
577 | | struct BitfieldOp Bitfield; |
578 | | }; |
579 | | |
580 | | public: |
581 | 760k | ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} |
582 | | |
583 | | /// getStartLoc - Get the location of the first token of this operand. |
584 | 728 | SMLoc getStartLoc() const override { return StartLoc; } |
585 | | /// getEndLoc - Get the location of the last token of this operand. |
586 | 36 | SMLoc getEndLoc() const override { return EndLoc; } |
587 | | /// getLocRange - Get the range between the first and last token of this |
588 | | /// operand. |
589 | 0 | SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } |
590 | | |
591 | | /// getAlignmentLoc - Get the location of the Alignment token of this operand. |
592 | 0 | SMLoc getAlignmentLoc() const { |
593 | 0 | assert(Kind == k_Memory && "Invalid access!"); |
594 | 0 | return AlignmentLoc; |
595 | 0 | } |
596 | | |
597 | 204k | ARMCC::CondCodes getCondCode() const { |
598 | 204k | assert(Kind == k_CondCode && "Invalid access!"); |
599 | 204k | return CC.Val; |
600 | 204k | } |
601 | | |
602 | 1.11k | unsigned getCoproc() const { |
603 | 1.11k | assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); |
604 | 1.11k | return Cop.Val; |
605 | 1.11k | } |
606 | | |
607 | 206k | StringRef getToken() const { |
608 | 206k | assert(Kind == k_Token && "Invalid access!"); |
609 | 206k | return StringRef(Tok.Data, Tok.Length); |
610 | 206k | } |
611 | | |
612 | 614k | unsigned getReg() const override { |
613 | 614k | assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); |
614 | 614k | return Reg.RegNum; |
615 | 614k | } |
616 | | |
617 | 0 | const SmallVectorImpl<unsigned> &getRegList() const { |
618 | 0 | assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || |
619 | 0 | Kind == k_SPRRegisterList) && "Invalid access!"); |
620 | 0 | return Registers; |
621 | 0 | } |
622 | | |
623 | 129k | const MCExpr *getImm() const { |
624 | 129k | assert(isImm() && "Invalid access!"); |
625 | 129k | return Imm.Val; |
626 | 129k | } |
627 | | |
628 | 0 | unsigned getVectorIndex() const { |
629 | 0 | assert(Kind == k_VectorIndex && "Invalid access!"); |
630 | 0 | return VectorIndex.Val; |
631 | 0 | } |
632 | | |
633 | 3.02k | ARM_MB::MemBOpt getMemBarrierOpt() const { |
634 | 3.02k | assert(Kind == k_MemBarrierOpt && "Invalid access!"); |
635 | 3.02k | return MBOpt.Val; |
636 | 3.02k | } |
637 | | |
638 | 860 | ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const { |
639 | 860 | assert(Kind == k_InstSyncBarrierOpt && "Invalid access!"); |
640 | 860 | return ISBOpt.Val; |
641 | 860 | } |
642 | | |
643 | 145 | ARM_PROC::IFlags getProcIFlags() const { |
644 | 145 | assert(Kind == k_ProcIFlags && "Invalid access!"); |
645 | 145 | return IFlags.Val; |
646 | 145 | } |
647 | | |
648 | 654 | unsigned getMSRMask() const { |
649 | 654 | assert(Kind == k_MSRMask && "Invalid access!"); |
650 | 654 | return MMask.Val; |
651 | 654 | } |
652 | | |
653 | 0 | unsigned getBankedReg() const { |
654 | 0 | assert(Kind == k_BankedReg && "Invalid access!"); |
655 | 0 | return BankedReg.Val; |
656 | 0 | } |
657 | | |
658 | 2.42k | bool isCoprocNum() const { return Kind == k_CoprocNum; } |
659 | 1.15k | bool isCoprocReg() const { return Kind == k_CoprocReg; } |
660 | 0 | bool isCoprocOption() const { return Kind == k_CoprocOption; } |
661 | 587k | bool isCondCode() const { return Kind == k_CondCode; } |
662 | 263k | bool isCCOut() const { return Kind == k_CCOut; } |
663 | 6.21k | bool isITMask() const { return Kind == k_ITCondMask; } |
664 | 5.98k | bool isITCondCode() const { return Kind == k_CondCode; } |
665 | 389k | bool isImm() const override { return Kind == k_Immediate; } |
666 | | // checks whether this operand is an unsigned offset which fits is a field |
667 | | // of specified width and scaled by a specific number of bits |
668 | | template<unsigned width, unsigned scale> |
669 | 2.78k | bool isUnsignedOffset() const { |
670 | 2.78k | if (!isImm()) return false; |
671 | 2.77k | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; |
672 | 2.03k | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
673 | 781 | int64_t Val = CE->getValue(); |
674 | 781 | int64_t Align = 1LL << scale; |
675 | 781 | int64_t Max = Align * ((1LL << width) - 1); |
676 | 781 | return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max); |
677 | 781 | } |
678 | 1.25k | return false; |
679 | 2.03k | } |
680 | | // checks whether this operand is an signed offset which fits is a field |
681 | | // of specified width and scaled by a specific number of bits |
682 | | template<unsigned width, unsigned scale> |
683 | | bool isSignedOffset() const { |
684 | | if (!isImm()) return false; |
685 | | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; |
686 | | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
687 | | int64_t Val = CE->getValue(); |
688 | | int64_t Align = 1LL << scale; |
689 | | int64_t Max = Align * ((1LL << (width-1)) - 1); |
690 | | int64_t Min = -Align * (1LL << (width-1)); |
691 | | return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); |
692 | | } |
693 | | return false; |
694 | | } |
695 | | // checks whether this operand is an signed offset relative to an address |
696 | | // which fits is a field of specified width and scaled by a specific number |
697 | | // of bits |
698 | | template<unsigned width, unsigned scale> |
699 | 34.4k | bool isSignedOffsetRel(int64_t Addr) const { |
700 | 34.4k | if (!isImm()) return false; |
701 | 34.4k | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; |
702 | 12.3k | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
703 | 12.1k | int64_t Val = CE->getValue() - Addr; |
704 | 12.1k | int64_t Align = 1LL << scale; |
705 | 12.1k | int64_t Max = Align * ((1LL << (width-1)) - 1); |
706 | 12.1k | int64_t Min = -Align * (1LL << (width-1)); |
707 | 12.1k | return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); |
708 | 12.1k | } |
709 | 144 | return false; |
710 | 12.3k | } ARMAsmParser.cpp:bool (anonymous namespace)::ARMOperand::isSignedOffsetRel<11u, 1u>(long) const Line | Count | Source | 699 | 24.9k | bool isSignedOffsetRel(int64_t Addr) const { | 700 | 24.9k | if (!isImm()) return false; | 701 | 24.9k | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | 702 | 8.22k | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | 703 | 8.15k | int64_t Val = CE->getValue() - Addr; | 704 | 8.15k | int64_t Align = 1LL << scale; | 705 | 8.15k | int64_t Max = Align * ((1LL << (width-1)) - 1); | 706 | 8.15k | int64_t Min = -Align * (1LL << (width-1)); | 707 | 8.15k | return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); | 708 | 8.15k | } | 709 | 69 | return false; | 710 | 8.22k | } |
ARMAsmParser.cpp:bool (anonymous namespace)::ARMOperand::isSignedOffsetRel<8u, 1u>(long) const Line | Count | Source | 699 | 6.90k | bool isSignedOffsetRel(int64_t Addr) const { | 700 | 6.90k | if (!isImm()) return false; | 701 | 6.90k | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | 702 | 1.61k | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | 703 | 1.60k | int64_t Val = CE->getValue() - Addr; | 704 | 1.60k | int64_t Align = 1LL << scale; | 705 | 1.60k | int64_t Max = Align * ((1LL << (width-1)) - 1); | 706 | 1.60k | int64_t Min = -Align * (1LL << (width-1)); | 707 | 1.60k | return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); | 708 | 1.60k | } | 709 | 10 | return false; | 710 | 1.61k | } |
ARMAsmParser.cpp:bool (anonymous namespace)::ARMOperand::isSignedOffsetRel<24u, 1u>(long) const Line | Count | Source | 699 | 1.71k | bool isSignedOffsetRel(int64_t Addr) const { | 700 | 1.71k | if (!isImm()) return false; | 701 | 1.71k | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | 702 | 1.68k | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | 703 | 1.62k | int64_t Val = CE->getValue() - Addr; | 704 | 1.62k | int64_t Align = 1LL << scale; | 705 | 1.62k | int64_t Max = Align * ((1LL << (width-1)) - 1); | 706 | 1.62k | int64_t Min = -Align * (1LL << (width-1)); | 707 | 1.62k | return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); | 708 | 1.62k | } | 709 | 61 | return false; | 710 | 1.68k | } |
ARMAsmParser.cpp:bool (anonymous namespace)::ARMOperand::isSignedOffsetRel<20u, 1u>(long) const Line | Count | Source | 699 | 866 | bool isSignedOffsetRel(int64_t Addr) const { | 700 | 866 | if (!isImm()) return false; | 701 | 866 | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; | 702 | 800 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | 703 | 796 | int64_t Val = CE->getValue() - Addr; | 704 | 796 | int64_t Align = 1LL << scale; | 705 | 796 | int64_t Max = Align * ((1LL << (width-1)) - 1); | 706 | 796 | int64_t Min = -Align * (1LL << (width-1)); | 707 | 796 | return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); | 708 | 796 | } | 709 | 4 | return false; | 710 | 800 | } |
|
711 | | |
712 | | // checks whether this operand is a memory operand computed as an offset |
713 | | // applied to PC. the offset may have 8 bits of magnitude and is represented |
714 | | // with two bits of shift. textually it may be either [pc, #imm], #imm or |
715 | | // relocable expression... |
716 | 3.45k | bool isThumbMemPC() const { |
717 | 3.45k | int64_t Val = 0; |
718 | 3.45k | if (isImm()) { |
719 | 3.12k | if (isa<MCSymbolRefExpr>(Imm.Val)) return true; |
720 | 255 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val); |
721 | 255 | if (!CE) return false; |
722 | 201 | Val = CE->getValue(); |
723 | 201 | } |
724 | 325 | else if (isMem()) { |
725 | 316 | if(!Memory.OffsetImm || Memory.OffsetRegNum) return false; |
726 | 34 | if(Memory.BaseRegNum != ARM::PC) return false; |
727 | 0 | Val = Memory.OffsetImm->getValue(); |
728 | 0 | } |
729 | 9 | else return false; |
730 | 201 | return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020); |
731 | 3.45k | } |
732 | 536 | bool isFPImm() const { |
733 | 536 | if (!isImm()) return false; |
734 | 508 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
735 | 508 | if (!CE) return false; |
736 | 503 | int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); |
737 | 503 | return Val != -1; |
738 | 508 | } |
739 | 0 | bool isFBits16() const { |
740 | 0 | if (!isImm()) return false; |
741 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
742 | 0 | if (!CE) return false; |
743 | 0 | int64_t Value = CE->getValue(); |
744 | 0 | return Value >= 0 && Value <= 16; |
745 | 0 | } |
746 | 0 | bool isFBits32() const { |
747 | 0 | if (!isImm()) return false; |
748 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
749 | 0 | if (!CE) return false; |
750 | 0 | int64_t Value = CE->getValue(); |
751 | 0 | return Value >= 1 && Value <= 32; |
752 | 0 | } |
753 | 0 | bool isImm8s4() const { |
754 | 0 | if (!isImm()) return false; |
755 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
756 | 0 | if (!CE) return false; |
757 | 0 | int64_t Value = CE->getValue(); |
758 | 0 | return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; |
759 | 0 | } |
760 | 889 | bool isImm0_1020s4() const { |
761 | 889 | if (!isImm()) return false; |
762 | 850 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
763 | 850 | if (!CE) return false; |
764 | 833 | int64_t Value = CE->getValue(); |
765 | 833 | return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; |
766 | 850 | } |
767 | 4.43k | bool isImm0_508s4() const { |
768 | 4.43k | if (!isImm()) return false; |
769 | 3.92k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
770 | 3.92k | if (!CE) return false; |
771 | 3.89k | int64_t Value = CE->getValue(); |
772 | 3.89k | return ((Value & 3) == 0) && Value >= 0 && Value <= 508; |
773 | 3.92k | } |
774 | 1.36k | bool isImm0_508s4Neg() const { |
775 | 1.36k | if (!isImm()) return false; |
776 | 890 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
777 | 890 | if (!CE) return false; |
778 | 863 | int64_t Value = -CE->getValue(); |
779 | | // explicitly exclude zero. we want that to use the normal 0_508 version. |
780 | 863 | return ((Value & 3) == 0) && Value > 0 && Value <= 508; |
781 | 890 | } |
782 | 498 | bool isImm0_239() const { |
783 | 498 | if (!isImm()) return false; |
784 | 490 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
785 | 490 | if (!CE) return false; |
786 | 482 | int64_t Value = CE->getValue(); |
787 | 482 | return Value >= 0 && Value < 240; |
788 | 490 | } |
789 | 8.96k | bool isImm0_255() const { |
790 | 8.96k | if (!isImm()) return false; |
791 | 3.76k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
792 | 3.76k | if (!CE) return false; |
793 | 3.14k | int64_t Value = CE->getValue(); |
794 | 3.14k | return Value >= 0 && Value < 256; |
795 | 3.76k | } |
796 | 5.47k | bool isImm0_4095() const { |
797 | 5.47k | if (!isImm()) return false; |
798 | 2.55k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
799 | 2.55k | if (!CE) return false; |
800 | 2.50k | int64_t Value = CE->getValue(); |
801 | 2.50k | return Value >= 0 && Value < 4096; |
802 | 2.55k | } |
803 | 3.39k | bool isImm0_4095Neg() const { |
804 | 3.39k | if (!isImm()) return false; |
805 | 517 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
806 | 517 | if (!CE) return false; |
807 | 476 | int64_t Value = -CE->getValue(); |
808 | 476 | return Value > 0 && Value < 4096; |
809 | 517 | } |
810 | 160 | bool isImm0_1() const { |
811 | 160 | if (!isImm()) return false; |
812 | 156 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
813 | 156 | if (!CE) return false; |
814 | 154 | int64_t Value = CE->getValue(); |
815 | 154 | return Value >= 0 && Value < 2; |
816 | 156 | } |
817 | 0 | bool isImm0_3() const { |
818 | 0 | if (!isImm()) return false; |
819 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
820 | 0 | if (!CE) return false; |
821 | 0 | int64_t Value = CE->getValue(); |
822 | 0 | return Value >= 0 && Value < 4; |
823 | 0 | } |
824 | 2.83k | bool isImm0_7() const { |
825 | 2.83k | if (!isImm()) return false; |
826 | 1.79k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
827 | 1.79k | if (!CE) return false; |
828 | 1.67k | int64_t Value = CE->getValue(); |
829 | 1.67k | return Value >= 0 && Value < 8; |
830 | 1.79k | } |
831 | 659 | bool isImm0_15() const { |
832 | 659 | if (!isImm()) return false; |
833 | 646 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
834 | 646 | if (!CE) return false; |
835 | 626 | int64_t Value = CE->getValue(); |
836 | 626 | return Value >= 0 && Value < 16; |
837 | 646 | } |
838 | 9.41k | bool isImm0_31() const { |
839 | 9.41k | if (!isImm()) return false; |
840 | 5.86k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
841 | 5.86k | if (!CE) return false; |
842 | 5.55k | int64_t Value = CE->getValue(); |
843 | 5.55k | return Value >= 0 && Value < 32; |
844 | 5.86k | } |
845 | 274 | bool isImm0_63() const { |
846 | 274 | if (!isImm()) return false; |
847 | 272 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
848 | 272 | if (!CE) return false; |
849 | 267 | int64_t Value = CE->getValue(); |
850 | 267 | return Value >= 0 && Value < 64; |
851 | 272 | } |
852 | 0 | bool isImm8() const { |
853 | 0 | if (!isImm()) return false; |
854 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
855 | 0 | if (!CE) return false; |
856 | 0 | int64_t Value = CE->getValue(); |
857 | 0 | return Value == 8; |
858 | 0 | } |
859 | 0 | bool isImm16() const { |
860 | 0 | if (!isImm()) return false; |
861 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
862 | 0 | if (!CE) return false; |
863 | 0 | int64_t Value = CE->getValue(); |
864 | 0 | return Value == 16; |
865 | 0 | } |
866 | 0 | bool isImm32() const { |
867 | 0 | if (!isImm()) return false; |
868 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
869 | 0 | if (!CE) return false; |
870 | 0 | int64_t Value = CE->getValue(); |
871 | 0 | return Value == 32; |
872 | 0 | } |
873 | 13 | bool isShrImm8() const { |
874 | 13 | if (!isImm()) return false; |
875 | 9 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
876 | 9 | if (!CE) return false; |
877 | 6 | int64_t Value = CE->getValue(); |
878 | 6 | return Value > 0 && Value <= 8; |
879 | 9 | } |
880 | 11 | bool isShrImm16() const { |
881 | 11 | if (!isImm()) return false; |
882 | 6 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
883 | 6 | if (!CE) return false; |
884 | 3 | int64_t Value = CE->getValue(); |
885 | 3 | return Value > 0 && Value <= 16; |
886 | 6 | } |
887 | 19 | bool isShrImm32() const { |
888 | 19 | if (!isImm()) return false; |
889 | 9 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
890 | 9 | if (!CE) return false; |
891 | 6 | int64_t Value = CE->getValue(); |
892 | 6 | return Value > 0 && Value <= 32; |
893 | 9 | } |
894 | 12 | bool isShrImm64() const { |
895 | 12 | if (!isImm()) return false; |
896 | 10 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
897 | 10 | if (!CE) return false; |
898 | 8 | int64_t Value = CE->getValue(); |
899 | 8 | return Value > 0 && Value <= 64; |
900 | 10 | } |
901 | 0 | bool isImm1_7() const { |
902 | 0 | if (!isImm()) return false; |
903 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
904 | 0 | if (!CE) return false; |
905 | 0 | int64_t Value = CE->getValue(); |
906 | 0 | return Value > 0 && Value < 8; |
907 | 0 | } |
908 | 0 | bool isImm1_15() const { |
909 | 0 | if (!isImm()) return false; |
910 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
911 | 0 | if (!CE) return false; |
912 | 0 | int64_t Value = CE->getValue(); |
913 | 0 | return Value > 0 && Value < 16; |
914 | 0 | } |
915 | 0 | bool isImm1_31() const { |
916 | 0 | if (!isImm()) return false; |
917 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
918 | 0 | if (!CE) return false; |
919 | 0 | int64_t Value = CE->getValue(); |
920 | 0 | return Value > 0 && Value < 32; |
921 | 0 | } |
922 | 0 | bool isImm1_16() const { |
923 | 0 | if (!isImm()) return false; |
924 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
925 | 0 | if (!CE) return false; |
926 | 0 | int64_t Value = CE->getValue(); |
927 | 0 | return Value > 0 && Value < 17; |
928 | 0 | } |
929 | 32 | bool isImm1_32() const { |
930 | 32 | if (!isImm()) return false; |
931 | 16 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
932 | 16 | if (!CE) return false; |
933 | 8 | int64_t Value = CE->getValue(); |
934 | 8 | return Value > 0 && Value < 33; |
935 | 16 | } |
936 | 1.24k | bool isImm0_32() const { |
937 | 1.24k | if (!isImm()) return false; |
938 | 392 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
939 | 392 | if (!CE) return false; |
940 | 366 | int64_t Value = CE->getValue(); |
941 | 366 | return Value >= 0 && Value < 33; |
942 | 392 | } |
943 | 494 | bool isImm0_65535() const { |
944 | 494 | if (!isImm()) return false; |
945 | 481 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
946 | 481 | if (!CE) return false; |
947 | 461 | int64_t Value = CE->getValue(); |
948 | 461 | return Value >= 0 && Value < 65536; |
949 | 481 | } |
950 | 419 | bool isImm256_65535Expr() const { |
951 | 419 | if (!isImm()) return false; |
952 | 291 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
953 | | // If it's not a constant expression, it'll generate a fixup and be |
954 | | // handled later. |
955 | 291 | if (!CE) return true; |
956 | 280 | int64_t Value = CE->getValue(); |
957 | 280 | return Value >= 256 && Value < 65536; |
958 | 291 | } |
959 | 630 | bool isImm0_65535Expr() const { |
960 | 630 | if (!isImm()) return false; |
961 | 574 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
962 | | // If it's not a constant expression, it'll generate a fixup and be |
963 | | // handled later. |
964 | 574 | if (!CE) return true; |
965 | 545 | int64_t Value = CE->getValue(); |
966 | 545 | return Value >= 0 && Value < 65536; |
967 | 574 | } |
968 | 315 | bool isImm24bit() const { |
969 | 315 | if (!isImm()) return false; |
970 | 310 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
971 | 310 | if (!CE) return false; |
972 | 297 | int64_t Value = CE->getValue(); |
973 | 297 | return Value >= 0 && Value <= 0xffffff; |
974 | 310 | } |
975 | 6.26k | bool isImmThumbSR() const { |
976 | 6.26k | if (!isImm()) return false; |
977 | 3.06k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
978 | 3.06k | if (!CE) return false; |
979 | 3.02k | int64_t Value = CE->getValue(); |
980 | 3.02k | return Value > 0 && Value < 33; |
981 | 3.06k | } |
982 | 0 | bool isPKHLSLImm() const { |
983 | 0 | if (!isImm()) return false; |
984 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
985 | 0 | if (!CE) return false; |
986 | 0 | int64_t Value = CE->getValue(); |
987 | 0 | return Value >= 0 && Value < 32; |
988 | 0 | } |
989 | 0 | bool isPKHASRImm() const { |
990 | 0 | if (!isImm()) return false; |
991 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
992 | 0 | if (!CE) return false; |
993 | 0 | int64_t Value = CE->getValue(); |
994 | 0 | return Value > 0 && Value <= 32; |
995 | 0 | } |
996 | 1.78k | bool isAdrLabel() const { |
997 | | // If we have an immediate that's not a constant, treat it as a label |
998 | | // reference needing a fixup. |
999 | 1.78k | if (isImm() && !isa<MCConstantExpr>(getImm())) |
1000 | 1.17k | return true; |
1001 | | |
1002 | | // If it is a constant, it must fit into a modified immediate encoding. |
1003 | 606 | if (!isImm()) return false; |
1004 | 603 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1005 | 603 | if (!CE) return false; |
1006 | 603 | int64_t Value = CE->getValue(); |
1007 | 603 | return (ARM_AM::getSOImmVal(Value) != -1 || |
1008 | 603 | ARM_AM::getSOImmVal(-Value) != -1); |
1009 | 603 | } |
1010 | 18.7k | bool isT2SOImm() const { |
1011 | 18.7k | if (!isImm()) return false; |
1012 | 7.17k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1013 | 7.17k | if (!CE) return false; |
1014 | 5.43k | int64_t Value = CE->getValue(); |
1015 | 5.43k | return ARM_AM::getT2SOImmVal(Value) != -1; |
1016 | 7.17k | } |
1017 | 2.89k | bool isT2SOImmNot() const { |
1018 | 2.89k | if (!isImm()) return false; |
1019 | 836 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1020 | 836 | if (!CE) return false; |
1021 | 490 | int64_t Value = CE->getValue(); |
1022 | 490 | return ARM_AM::getT2SOImmVal(Value) == -1 && |
1023 | 490 | ARM_AM::getT2SOImmVal(~Value) != -1; |
1024 | 836 | } |
1025 | 15.3k | bool isT2SOImmNeg() const { |
1026 | 15.3k | if (!isImm()) return false; |
1027 | 3.86k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1028 | 3.86k | if (!CE) return false; |
1029 | 2.16k | int64_t Value = CE->getValue(); |
1030 | | // Only use this when not representable as a plain so_imm. |
1031 | 2.16k | return ARM_AM::getT2SOImmVal(Value) == -1 && |
1032 | 2.16k | ARM_AM::getT2SOImmVal(-Value) != -1; |
1033 | 3.86k | } |
1034 | 794 | bool isSetEndImm() const { |
1035 | 794 | if (!isImm()) return false; |
1036 | 794 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1037 | 794 | if (!CE) return false; |
1038 | 792 | int64_t Value = CE->getValue(); |
1039 | 792 | return Value == 1 || Value == 0; |
1040 | 794 | } |
1041 | 804k | bool isReg() const override { return Kind == k_Register; } |
1042 | 1.32k | bool isRegList() const { return Kind == k_RegisterList; } |
1043 | 32 | bool isDPRRegList() const { return Kind == k_DPRRegisterList; } |
1044 | 16 | bool isSPRRegList() const { return Kind == k_SPRRegisterList; } |
1045 | 1.67M | bool isToken() const override { return Kind == k_Token; } |
1046 | 6.05k | bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } |
1047 | 1.32k | bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; } |
1048 | 23.6k | bool isMem() const override { return Kind == k_Memory; } |
1049 | 0 | bool isShifterImm() const { return Kind == k_ShifterImmediate; } |
1050 | 9.77k | bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } |
1051 | 25.1k | bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } |
1052 | 9 | bool isRotImm() const { return Kind == k_RotateImmediate; } |
1053 | 14.7k | bool isModImm() const { return Kind == k_ModifiedImmediate; } |
1054 | 3.78k | bool isModImmNot() const { |
1055 | 3.78k | if (!isImm()) return false; |
1056 | 890 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1057 | 890 | if (!CE) return false; |
1058 | 807 | int64_t Value = CE->getValue(); |
1059 | 807 | return ARM_AM::getSOImmVal(~Value) != -1; |
1060 | 890 | } |
1061 | 4.43k | bool isModImmNeg() const { |
1062 | 4.43k | if (!isImm()) return false; |
1063 | 1.23k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1064 | 1.23k | if (!CE) return false; |
1065 | 1.01k | int64_t Value = CE->getValue(); |
1066 | 1.01k | return ARM_AM::getSOImmVal(Value) == -1 && |
1067 | 1.01k | ARM_AM::getSOImmVal(-Value) != -1; |
1068 | 1.23k | } |
1069 | 12 | bool isBitfield() const { return Kind == k_BitfieldDescriptor; } |
1070 | 52 | bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } |
1071 | 1 | bool isPostIdxReg() const { |
1072 | 1 | return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; |
1073 | 1 | } |
1074 | 947 | bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const { |
1075 | 947 | if (!isMem()) |
1076 | 353 | return false; |
1077 | | // No offset of any kind. |
1078 | 594 | return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && |
1079 | 594 | (alignOK || Memory.Alignment == Alignment); |
1080 | 947 | } |
1081 | 1.48k | bool isMemPCRelImm12() const { |
1082 | 1.48k | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1083 | 1.12k | return false; |
1084 | | // Base register must be PC. |
1085 | 362 | if (Memory.BaseRegNum != ARM::PC) |
1086 | 353 | return false; |
1087 | | // Immediate offset in range [-4095, 4095]. |
1088 | 9 | if (!Memory.OffsetImm) return true; |
1089 | 0 | int64_t Val = Memory.OffsetImm->getValue(); |
1090 | 0 | return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); |
1091 | 9 | } |
1092 | 0 | bool isAlignedMemory() const { |
1093 | 0 | return isMemNoOffset(true); |
1094 | 0 | } |
1095 | 0 | bool isAlignedMemoryNone() const { |
1096 | 0 | return isMemNoOffset(false, 0); |
1097 | 0 | } |
1098 | 0 | bool isDupAlignedMemoryNone() const { |
1099 | 0 | return isMemNoOffset(false, 0); |
1100 | 0 | } |
1101 | 0 | bool isAlignedMemory16() const { |
1102 | 0 | if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. |
1103 | 0 | return true; |
1104 | 0 | return isMemNoOffset(false, 0); |
1105 | 0 | } |
1106 | 0 | bool isDupAlignedMemory16() const { |
1107 | 0 | if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. |
1108 | 0 | return true; |
1109 | 0 | return isMemNoOffset(false, 0); |
1110 | 0 | } |
1111 | 0 | bool isAlignedMemory32() const { |
1112 | 0 | if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. |
1113 | 0 | return true; |
1114 | 0 | return isMemNoOffset(false, 0); |
1115 | 0 | } |
1116 | 0 | bool isDupAlignedMemory32() const { |
1117 | 0 | if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. |
1118 | 0 | return true; |
1119 | 0 | return isMemNoOffset(false, 0); |
1120 | 0 | } |
1121 | 0 | bool isAlignedMemory64() const { |
1122 | 0 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. |
1123 | 0 | return true; |
1124 | 0 | return isMemNoOffset(false, 0); |
1125 | 0 | } |
1126 | 0 | bool isDupAlignedMemory64() const { |
1127 | 0 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. |
1128 | 0 | return true; |
1129 | 0 | return isMemNoOffset(false, 0); |
1130 | 0 | } |
1131 | 0 | bool isAlignedMemory64or128() const { |
1132 | 0 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. |
1133 | 0 | return true; |
1134 | 0 | if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. |
1135 | 0 | return true; |
1136 | 0 | return isMemNoOffset(false, 0); |
1137 | 0 | } |
1138 | 0 | bool isDupAlignedMemory64or128() const { |
1139 | 0 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. |
1140 | 0 | return true; |
1141 | 0 | if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. |
1142 | 0 | return true; |
1143 | 0 | return isMemNoOffset(false, 0); |
1144 | 0 | } |
1145 | 0 | bool isAlignedMemory64or128or256() const { |
1146 | 0 | if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. |
1147 | 0 | return true; |
1148 | 0 | if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. |
1149 | 0 | return true; |
1150 | 0 | if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32. |
1151 | 0 | return true; |
1152 | 0 | return isMemNoOffset(false, 0); |
1153 | 0 | } |
1154 | 0 | bool isAddrMode2() const { |
1155 | 0 | if (!isMem() || Memory.Alignment != 0) return false; |
1156 | | // Check for register offset. |
1157 | 0 | if (Memory.OffsetRegNum) return true; |
1158 | | // Immediate offset in range [-4095, 4095]. |
1159 | 0 | if (!Memory.OffsetImm) return true; |
1160 | 0 | int64_t Val = Memory.OffsetImm->getValue(); |
1161 | 0 | return Val > -4096 && Val < 4096; |
1162 | 0 | } |
1163 | 190 | bool isAM2OffsetImm() const { |
1164 | 190 | if (!isImm()) return false; |
1165 | | // Immediate offset in range [-4095, 4095]. |
1166 | 145 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1167 | 145 | if (!CE) return false; |
1168 | 138 | int64_t Val = CE->getValue(); |
1169 | 138 | return (Val == INT32_MIN) || (Val > -4096 && Val < 4096); |
1170 | 145 | } |
1171 | 1.60k | bool isAddrMode3() const { |
1172 | | // If we have an immediate that's not a constant, treat it as a label |
1173 | | // reference needing a fixup. If it is a constant, it's something else |
1174 | | // and we reject it. |
1175 | 1.60k | if (isImm() && !isa<MCConstantExpr>(getImm())) |
1176 | 1.22k | return true; |
1177 | 386 | if (!isMem() || Memory.Alignment != 0) return false; |
1178 | | // No shifts are legal for AM3. |
1179 | 352 | if (Memory.ShiftType != ARM_AM::no_shift) return false; |
1180 | | // Check for register offset. |
1181 | 352 | if (Memory.OffsetRegNum) return true; |
1182 | | // Immediate offset in range [-255, 255]. |
1183 | 348 | if (!Memory.OffsetImm) return true; |
1184 | 5 | int64_t Val = Memory.OffsetImm->getValue(); |
1185 | | // The #-0 offset is encoded as INT32_MIN, and we have to check |
1186 | | // for this too. |
1187 | 5 | return (Val > -256 && Val < 256) || Val == INT32_MIN; |
1188 | 348 | } |
1189 | 156 | bool isAM3Offset() const { |
1190 | 156 | if (Kind != k_Immediate && Kind != k_PostIndexRegister) |
1191 | 3 | return false; |
1192 | 153 | if (Kind == k_PostIndexRegister) |
1193 | 16 | return PostIdxReg.ShiftTy == ARM_AM::no_shift; |
1194 | | // Immediate offset in range [-255, 255]. |
1195 | 137 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1196 | 137 | if (!CE) return false; |
1197 | 133 | int64_t Val = CE->getValue(); |
1198 | | // Special case, #-0 is INT32_MIN. |
1199 | 133 | return (Val > -256 && Val < 256) || Val == INT32_MIN; |
1200 | 137 | } |
1201 | 1.58k | bool isAddrMode5() const { |
1202 | | // If we have an immediate that's not a constant, treat it as a label |
1203 | | // reference needing a fixup. If it is a constant, it's something else |
1204 | | // and we reject it. |
1205 | 1.58k | if (isImm() && !isa<MCConstantExpr>(getImm())) |
1206 | 1.54k | return true; |
1207 | 40 | if (!isMem() || Memory.Alignment != 0) return false; |
1208 | | // Check for register offset. |
1209 | 1 | if (Memory.OffsetRegNum) return false; |
1210 | | // Immediate offset in range [-1020, 1020] and a multiple of 4. |
1211 | 1 | if (!Memory.OffsetImm) return true; |
1212 | 1 | int64_t Val = Memory.OffsetImm->getValue(); |
1213 | 1 | return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || |
1214 | 1 | Val == INT32_MIN; |
1215 | 1 | } |
1216 | 0 | bool isAddrMode5FP16() const { |
1217 | | // If we have an immediate that's not a constant, treat it as a label |
1218 | | // reference needing a fixup. If it is a constant, it's something else |
1219 | | // and we reject it. |
1220 | 0 | if (isImm() && !isa<MCConstantExpr>(getImm())) |
1221 | 0 | return true; |
1222 | 0 | if (!isMem() || Memory.Alignment != 0) return false; |
1223 | | // Check for register offset. |
1224 | 0 | if (Memory.OffsetRegNum) return false; |
1225 | | // Immediate offset in range [-510, 510] and a multiple of 2. |
1226 | 0 | if (!Memory.OffsetImm) return true; |
1227 | 0 | int64_t Val = Memory.OffsetImm->getValue(); |
1228 | 0 | return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || Val == INT32_MIN; |
1229 | 0 | } |
1230 | 4 | bool isMemTBB() const { |
1231 | 4 | if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || |
1232 | 4 | Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) |
1233 | 4 | return false; |
1234 | 0 | return true; |
1235 | 4 | } |
1236 | 4 | bool isMemTBH() const { |
1237 | 4 | if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || |
1238 | 4 | Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || |
1239 | 4 | Memory.Alignment != 0 ) |
1240 | 4 | return false; |
1241 | 0 | return true; |
1242 | 4 | } |
1243 | 934 | bool isMemRegOffset() const { |
1244 | 934 | if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0) |
1245 | 903 | return false; |
1246 | 31 | return true; |
1247 | 934 | } |
1248 | 1.58k | bool isT2MemRegOffset() const { |
1249 | 1.58k | if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || |
1250 | 1.58k | Memory.Alignment != 0) |
1251 | 1.56k | return false; |
1252 | | // Only lsl #{0, 1, 2, 3} allowed. |
1253 | 22 | if (Memory.ShiftType == ARM_AM::no_shift) |
1254 | 22 | return true; |
1255 | 0 | if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) |
1256 | 0 | return false; |
1257 | 0 | return true; |
1258 | 0 | } |
1259 | 5.37k | bool isMemThumbRR() const { |
1260 | | // Thumb reg+reg addressing is simple. Just two registers, a base and |
1261 | | // an offset. No shifts, negations or any other complicating factors. |
1262 | 5.37k | if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || |
1263 | 5.37k | Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) |
1264 | 5.34k | return false; |
1265 | 37 | return isARMLowRegister(Memory.BaseRegNum) && |
1266 | 37 | (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); |
1267 | 5.37k | } |
1268 | 1.93k | bool isMemThumbRIs4() const { |
1269 | 1.93k | if (!isMem() || Memory.OffsetRegNum != 0 || |
1270 | 1.93k | !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) |
1271 | 1.69k | return false; |
1272 | | // Immediate offset, multiple of 4 in range [0, 124]. |
1273 | 238 | if (!Memory.OffsetImm) return true; |
1274 | 17 | int64_t Val = Memory.OffsetImm->getValue(); |
1275 | 17 | return Val >= 0 && Val <= 124 && (Val % 4) == 0; |
1276 | 238 | } |
1277 | 3.18k | bool isMemThumbRIs2() const { |
1278 | 3.18k | if (!isMem() || Memory.OffsetRegNum != 0 || |
1279 | 3.18k | !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) |
1280 | 3.16k | return false; |
1281 | | // Immediate offset, multiple of 4 in range [0, 62]. |
1282 | 21 | if (!Memory.OffsetImm) return true; |
1283 | 0 | int64_t Val = Memory.OffsetImm->getValue(); |
1284 | 0 | return Val >= 0 && Val <= 62 && (Val % 2) == 0; |
1285 | 21 | } |
1286 | 174 | bool isMemThumbRIs1() const { |
1287 | 174 | if (!isMem() || Memory.OffsetRegNum != 0 || |
1288 | 174 | !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) |
1289 | 81 | return false; |
1290 | | // Immediate offset in range [0, 31]. |
1291 | 93 | if (!Memory.OffsetImm) return true; |
1292 | 1 | int64_t Val = Memory.OffsetImm->getValue(); |
1293 | 1 | return Val >= 0 && Val <= 31; |
1294 | 93 | } |
1295 | 1.90k | bool isMemThumbSPI() const { |
1296 | 1.90k | if (!isMem() || Memory.OffsetRegNum != 0 || |
1297 | 1.90k | Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) |
1298 | 1.90k | return false; |
1299 | | // Immediate offset, multiple of 4 in range [0, 1020]. |
1300 | 0 | if (!Memory.OffsetImm) return true; |
1301 | 0 | int64_t Val = Memory.OffsetImm->getValue(); |
1302 | 0 | return Val >= 0 && Val <= 1020 && (Val % 4) == 0; |
1303 | 0 | } |
1304 | 340 | bool isMemImm8s4Offset() const { |
1305 | | // If we have an immediate that's not a constant, treat it as a label |
1306 | | // reference needing a fixup. If it is a constant, it's something else |
1307 | | // and we reject it. |
1308 | 340 | if (isImm() && !isa<MCConstantExpr>(getImm())) |
1309 | 283 | return true; |
1310 | 57 | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1311 | 18 | return false; |
1312 | | // Immediate offset a multiple of 4 in range [-1020, 1020]. |
1313 | 39 | if (!Memory.OffsetImm) return true; |
1314 | 5 | int64_t Val = Memory.OffsetImm->getValue(); |
1315 | | // Special case, #-0 is INT32_MIN. |
1316 | 5 | return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN; |
1317 | 39 | } |
1318 | 1 | bool isMemImm0_1020s4Offset() const { |
1319 | 1 | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1320 | 1 | return false; |
1321 | | // Immediate offset a multiple of 4 in range [0, 1020]. |
1322 | 0 | if (!Memory.OffsetImm) return true; |
1323 | 0 | int64_t Val = Memory.OffsetImm->getValue(); |
1324 | 0 | return Val >= 0 && Val <= 1020 && (Val & 3) == 0; |
1325 | 0 | } |
1326 | 432 | bool isMemImm8Offset() const { |
1327 | 432 | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1328 | 92 | return false; |
1329 | | // Base reg of PC isn't allowed for these encodings. |
1330 | 340 | if (Memory.BaseRegNum == ARM::PC) return false; |
1331 | | // Immediate offset in range [-255, 255]. |
1332 | 331 | if (!Memory.OffsetImm) return true; |
1333 | 5 | int64_t Val = Memory.OffsetImm->getValue(); |
1334 | 5 | return (Val == INT32_MIN) || (Val > -256 && Val < 256); |
1335 | 331 | } |
1336 | 43 | bool isMemPosImm8Offset() const { |
1337 | 43 | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1338 | 5 | return false; |
1339 | | // Immediate offset in range [0, 255]. |
1340 | 38 | if (!Memory.OffsetImm) return true; |
1341 | 0 | int64_t Val = Memory.OffsetImm->getValue(); |
1342 | 0 | return Val >= 0 && Val < 256; |
1343 | 38 | } |
1344 | 1.58k | bool isMemNegImm8Offset() const { |
1345 | 1.58k | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1346 | 1.18k | return false; |
1347 | | // Base reg of PC isn't allowed for these encodings. |
1348 | 399 | if (Memory.BaseRegNum == ARM::PC) return false; |
1349 | | // Immediate offset in range [-255, -1]. |
1350 | 390 | if (!Memory.OffsetImm) return false; |
1351 | 17 | int64_t Val = Memory.OffsetImm->getValue(); |
1352 | 17 | return (Val == INT32_MIN) || (Val > -256 && Val < 0); |
1353 | 390 | } |
1354 | 1.65k | bool isMemUImm12Offset() const { |
1355 | 1.65k | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1356 | 1.18k | return false; |
1357 | | // Immediate offset in range [0, 4095]. |
1358 | 463 | if (!Memory.OffsetImm) return true; |
1359 | 23 | int64_t Val = Memory.OffsetImm->getValue(); |
1360 | 23 | return (Val >= 0 && Val < 4096); |
1361 | 463 | } |
1362 | 3.14k | bool isMemImm12Offset() const { |
1363 | | // If we have an immediate that's not a constant, treat it as a label |
1364 | | // reference needing a fixup. If it is a constant, it's something else |
1365 | | // and we reject it. |
1366 | 3.14k | if (isImm() && !isa<MCConstantExpr>(getImm())) |
1367 | 1.88k | return true; |
1368 | | |
1369 | 1.26k | if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) |
1370 | 674 | return false; |
1371 | | // Immediate offset in range [-4095, 4095]. |
1372 | 588 | if (!Memory.OffsetImm) return true; |
1373 | 37 | int64_t Val = Memory.OffsetImm->getValue(); |
1374 | 37 | return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); |
1375 | 588 | } |
1376 | 1 | bool isPostIdxImm8() const { |
1377 | 1 | if (!isImm()) return false; |
1378 | 1 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1379 | 1 | if (!CE) return false; |
1380 | 1 | int64_t Val = CE->getValue(); |
1381 | 1 | return (Val > -256 && Val < 256) || (Val == INT32_MIN); |
1382 | 1 | } |
1383 | 0 | bool isPostIdxImm8s4() const { |
1384 | 0 | if (!isImm()) return false; |
1385 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1386 | 0 | if (!CE) return false; |
1387 | 0 | int64_t Val = CE->getValue(); |
1388 | 0 | return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || |
1389 | 0 | (Val == INT32_MIN); |
1390 | 0 | } |
1391 | | |
1392 | 2.33k | bool isMSRMask() const { return Kind == k_MSRMask; } |
1393 | 1.82k | bool isBankedReg() const { return Kind == k_BankedReg; } |
1394 | 489 | bool isProcIFlags() const { return Kind == k_ProcIFlags; } |
1395 | | |
1396 | | // NEON operands. |
1397 | 619 | bool isSingleSpacedVectorList() const { |
1398 | 619 | return Kind == k_VectorList && !VectorList.isDoubleSpaced; |
1399 | 619 | } |
1400 | 105 | bool isDoubleSpacedVectorList() const { |
1401 | 105 | return Kind == k_VectorList && VectorList.isDoubleSpaced; |
1402 | 105 | } |
1403 | 88 | bool isVecListOneD() const { |
1404 | 88 | if (!isSingleSpacedVectorList()) return false; |
1405 | 23 | return VectorList.Count == 1; |
1406 | 88 | } |
1407 | | |
1408 | 160 | bool isVecListDPair() const { |
1409 | 160 | if (!isSingleSpacedVectorList()) return false; |
1410 | 41 | return (ARMMCRegisterClasses[ARM::DPairRegClassID] |
1411 | 41 | .contains(VectorList.RegNum)); |
1412 | 160 | } |
1413 | | |
1414 | 139 | bool isVecListThreeD() const { |
1415 | 139 | if (!isSingleSpacedVectorList()) return false; |
1416 | 32 | return VectorList.Count == 3; |
1417 | 139 | } |
1418 | | |
1419 | 214 | bool isVecListFourD() const { |
1420 | 214 | if (!isSingleSpacedVectorList()) return false; |
1421 | 53 | return VectorList.Count == 4; |
1422 | 214 | } |
1423 | | |
1424 | 72 | bool isVecListDPairSpaced() const { |
1425 | 72 | if (Kind != k_VectorList) return false; |
1426 | 18 | if (isSingleSpacedVectorList()) return false; |
1427 | 0 | return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID] |
1428 | 0 | .contains(VectorList.RegNum)); |
1429 | 18 | } |
1430 | | |
1431 | 51 | bool isVecListThreeQ() const { |
1432 | 51 | if (!isDoubleSpacedVectorList()) return false; |
1433 | 0 | return VectorList.Count == 3; |
1434 | 51 | } |
1435 | | |
1436 | 54 | bool isVecListFourQ() const { |
1437 | 54 | if (!isDoubleSpacedVectorList()) return false; |
1438 | 0 | return VectorList.Count == 4; |
1439 | 54 | } |
1440 | | |
1441 | 282 | bool isSingleSpacedVectorAllLanes() const { |
1442 | 282 | return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced; |
1443 | 282 | } |
1444 | 144 | bool isDoubleSpacedVectorAllLanes() const { |
1445 | 144 | return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced; |
1446 | 144 | } |
1447 | 69 | bool isVecListOneDAllLanes() const { |
1448 | 69 | if (!isSingleSpacedVectorAllLanes()) return false; |
1449 | 9 | return VectorList.Count == 1; |
1450 | 69 | } |
1451 | | |
1452 | 126 | bool isVecListDPairAllLanes() const { |
1453 | 126 | if (!isSingleSpacedVectorAllLanes()) return false; |
1454 | 12 | return (ARMMCRegisterClasses[ARM::DPairRegClassID] |
1455 | 12 | .contains(VectorList.RegNum)); |
1456 | 126 | } |
1457 | | |
1458 | 57 | bool isVecListDPairSpacedAllLanes() const { |
1459 | 57 | if (!isDoubleSpacedVectorAllLanes()) return false; |
1460 | 0 | return VectorList.Count == 2; |
1461 | 57 | } |
1462 | | |
1463 | 39 | bool isVecListThreeDAllLanes() const { |
1464 | 39 | if (!isSingleSpacedVectorAllLanes()) return false; |
1465 | 0 | return VectorList.Count == 3; |
1466 | 39 | } |
1467 | | |
1468 | 39 | bool isVecListThreeQAllLanes() const { |
1469 | 39 | if (!isDoubleSpacedVectorAllLanes()) return false; |
1470 | 0 | return VectorList.Count == 3; |
1471 | 39 | } |
1472 | | |
1473 | 48 | bool isVecListFourDAllLanes() const { |
1474 | 48 | if (!isSingleSpacedVectorAllLanes()) return false; |
1475 | 3 | return VectorList.Count == 4; |
1476 | 48 | } |
1477 | | |
1478 | 48 | bool isVecListFourQAllLanes() const { |
1479 | 48 | if (!isDoubleSpacedVectorAllLanes()) return false; |
1480 | 0 | return VectorList.Count == 4; |
1481 | 48 | } |
1482 | | |
1483 | 258 | bool isSingleSpacedVectorIndexed() const { |
1484 | 258 | return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced; |
1485 | 258 | } |
1486 | 126 | bool isDoubleSpacedVectorIndexed() const { |
1487 | 126 | return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced; |
1488 | 126 | } |
1489 | 15 | bool isVecListOneDByteIndexed() const { |
1490 | 15 | if (!isSingleSpacedVectorIndexed()) return false; |
1491 | 0 | return VectorList.Count == 1 && VectorList.LaneIndex <= 7; |
1492 | 15 | } |
1493 | | |
1494 | 15 | bool isVecListOneDHWordIndexed() const { |
1495 | 15 | if (!isSingleSpacedVectorIndexed()) return false; |
1496 | 0 | return VectorList.Count == 1 && VectorList.LaneIndex <= 3; |
1497 | 15 | } |
1498 | | |
1499 | 51 | bool isVecListOneDWordIndexed() const { |
1500 | 51 | if (!isSingleSpacedVectorIndexed()) return false; |
1501 | 9 | return VectorList.Count == 1 && VectorList.LaneIndex <= 1; |
1502 | 51 | } |
1503 | | |
1504 | 21 | bool isVecListTwoDByteIndexed() const { |
1505 | 21 | if (!isSingleSpacedVectorIndexed()) return false; |
1506 | 0 | return VectorList.Count == 2 && VectorList.LaneIndex <= 7; |
1507 | 21 | } |
1508 | | |
1509 | 15 | bool isVecListTwoDHWordIndexed() const { |
1510 | 15 | if (!isSingleSpacedVectorIndexed()) return false; |
1511 | 0 | return VectorList.Count == 2 && VectorList.LaneIndex <= 3; |
1512 | 15 | } |
1513 | | |
1514 | 36 | bool isVecListTwoQWordIndexed() const { |
1515 | 36 | if (!isDoubleSpacedVectorIndexed()) return false; |
1516 | 0 | return VectorList.Count == 2 && VectorList.LaneIndex <= 1; |
1517 | 36 | } |
1518 | | |
1519 | 15 | bool isVecListTwoQHWordIndexed() const { |
1520 | 15 | if (!isDoubleSpacedVectorIndexed()) return false; |
1521 | 0 | return VectorList.Count == 2 && VectorList.LaneIndex <= 3; |
1522 | 15 | } |
1523 | | |
1524 | 36 | bool isVecListTwoDWordIndexed() const { |
1525 | 36 | if (!isSingleSpacedVectorIndexed()) return false; |
1526 | 6 | return VectorList.Count == 2 && VectorList.LaneIndex <= 1; |
1527 | 36 | } |
1528 | | |
1529 | 18 | bool isVecListThreeDByteIndexed() const { |
1530 | 18 | if (!isSingleSpacedVectorIndexed()) return false; |
1531 | 0 | return VectorList.Count == 3 && VectorList.LaneIndex <= 7; |
1532 | 18 | } |
1533 | | |
1534 | 9 | bool isVecListThreeDHWordIndexed() const { |
1535 | 9 | if (!isSingleSpacedVectorIndexed()) return false; |
1536 | 0 | return VectorList.Count == 3 && VectorList.LaneIndex <= 3; |
1537 | 9 | } |
1538 | | |
1539 | 24 | bool isVecListThreeQWordIndexed() const { |
1540 | 24 | if (!isDoubleSpacedVectorIndexed()) return false; |
1541 | 0 | return VectorList.Count == 3 && VectorList.LaneIndex <= 1; |
1542 | 24 | } |
1543 | | |
1544 | 9 | bool isVecListThreeQHWordIndexed() const { |
1545 | 9 | if (!isDoubleSpacedVectorIndexed()) return false; |
1546 | 0 | return VectorList.Count == 3 && VectorList.LaneIndex <= 3; |
1547 | 9 | } |
1548 | | |
1549 | 24 | bool isVecListThreeDWordIndexed() const { |
1550 | 24 | if (!isSingleSpacedVectorIndexed()) return false; |
1551 | 3 | return VectorList.Count == 3 && VectorList.LaneIndex <= 1; |
1552 | 24 | } |
1553 | | |
1554 | 12 | bool isVecListFourDByteIndexed() const { |
1555 | 12 | if (!isSingleSpacedVectorIndexed()) return false; |
1556 | 0 | return VectorList.Count == 4 && VectorList.LaneIndex <= 7; |
1557 | 12 | } |
1558 | | |
1559 | 6 | bool isVecListFourDHWordIndexed() const { |
1560 | 6 | if (!isSingleSpacedVectorIndexed()) return false; |
1561 | 0 | return VectorList.Count == 4 && VectorList.LaneIndex <= 3; |
1562 | 6 | } |
1563 | | |
1564 | 36 | bool isVecListFourQWordIndexed() const { |
1565 | 36 | if (!isDoubleSpacedVectorIndexed()) return false; |
1566 | 0 | return VectorList.Count == 4 && VectorList.LaneIndex <= 1; |
1567 | 36 | } |
1568 | | |
1569 | 6 | bool isVecListFourQHWordIndexed() const { |
1570 | 6 | if (!isDoubleSpacedVectorIndexed()) return false; |
1571 | 0 | return VectorList.Count == 4 && VectorList.LaneIndex <= 3; |
1572 | 6 | } |
1573 | | |
1574 | 36 | bool isVecListFourDWordIndexed() const { |
1575 | 36 | if (!isSingleSpacedVectorIndexed()) return false; |
1576 | 6 | return VectorList.Count == 4 && VectorList.LaneIndex <= 1; |
1577 | 36 | } |
1578 | | |
1579 | 11 | bool isVectorIndex8() const { |
1580 | 11 | if (Kind != k_VectorIndex) return false; |
1581 | 0 | return VectorIndex.Val < 8; |
1582 | 11 | } |
1583 | 121 | bool isVectorIndex16() const { |
1584 | 121 | if (Kind != k_VectorIndex) return false; |
1585 | 0 | return VectorIndex.Val < 4; |
1586 | 121 | } |
1587 | 377 | bool isVectorIndex32() const { |
1588 | 377 | if (Kind != k_VectorIndex) return false; |
1589 | 0 | return VectorIndex.Val < 2; |
1590 | 377 | } |
1591 | | |
1592 | 14 | bool isNEONi8splat() const { |
1593 | 14 | if (!isImm()) return false; |
1594 | 12 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1595 | | // Must be a constant. |
1596 | 12 | if (!CE) return false; |
1597 | 9 | int64_t Value = CE->getValue(); |
1598 | | // i8 value splatted across 8 bytes. The immediate is just the 8 byte |
1599 | | // value. |
1600 | 9 | return Value >= 0 && Value < 256; |
1601 | 12 | } |
1602 | | |
1603 | 155 | bool isNEONi16splat() const { |
1604 | 155 | if (isNEONByteReplicate(2)) |
1605 | 7 | return false; // Leave that for bytes replication and forbid by default. |
1606 | 148 | if (!isImm()) |
1607 | 1 | return false; |
1608 | 147 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1609 | | // Must be a constant. |
1610 | 147 | if (!CE) return false; |
1611 | 143 | unsigned Value = CE->getValue(); |
1612 | 143 | return ARM_AM::isNEONi16splat(Value); |
1613 | 147 | } |
1614 | | |
1615 | 4 | bool isNEONi16splatNot() const { |
1616 | 4 | if (!isImm()) |
1617 | 0 | return false; |
1618 | 4 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1619 | | // Must be a constant. |
1620 | 4 | if (!CE) return false; |
1621 | 2 | unsigned Value = CE->getValue(); |
1622 | 2 | return ARM_AM::isNEONi16splat(~Value & 0xffff); |
1623 | 4 | } |
1624 | | |
1625 | 17 | bool isNEONi32splat() const { |
1626 | 17 | if (isNEONByteReplicate(4)) |
1627 | 4 | return false; // Leave that for bytes replication and forbid by default. |
1628 | 13 | if (!isImm()) |
1629 | 2 | return false; |
1630 | 11 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1631 | | // Must be a constant. |
1632 | 11 | if (!CE) return false; |
1633 | 8 | unsigned Value = CE->getValue(); |
1634 | 8 | return ARM_AM::isNEONi32splat(Value); |
1635 | 11 | } |
1636 | | |
1637 | 5 | bool isNEONi32splatNot() const { |
1638 | 5 | if (!isImm()) |
1639 | 1 | return false; |
1640 | 4 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1641 | | // Must be a constant. |
1642 | 4 | if (!CE) return false; |
1643 | 4 | unsigned Value = CE->getValue(); |
1644 | 4 | return ARM_AM::isNEONi32splat(~Value); |
1645 | 4 | } |
1646 | | |
1647 | 1.70k | bool isNEONByteReplicate(unsigned NumBytes) const { |
1648 | 1.70k | if (!isImm()) |
1649 | 33 | return false; |
1650 | 1.67k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1651 | | // Must be a constant. |
1652 | 1.67k | if (!CE) |
1653 | 24 | return false; |
1654 | 1.65k | int64_t Value = CE->getValue(); |
1655 | 1.65k | if (!Value) |
1656 | 30 | return false; // Don't bother with zero. |
1657 | | |
1658 | 1.62k | unsigned char B = Value & 0xff; |
1659 | 1.74k | for (unsigned i = 1; i < NumBytes; ++i) { |
1660 | 1.72k | Value >>= 8; |
1661 | 1.72k | if ((Value & 0xff) != B) |
1662 | 1.59k | return false; |
1663 | 1.72k | } |
1664 | 27 | return true; |
1665 | 1.62k | } |
1666 | 131 | bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); } |
1667 | 541 | bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); } |
1668 | 863 | bool isNEONi32vmov() const { |
1669 | 863 | if (isNEONByteReplicate(4)) |
1670 | 5 | return false; // Let it to be classified as byte-replicate case. |
1671 | 858 | if (!isImm()) |
1672 | 27 | return false; |
1673 | 831 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1674 | | // Must be a constant. |
1675 | 831 | if (!CE) |
1676 | 9 | return false; |
1677 | 822 | int64_t Value = CE->getValue(); |
1678 | | // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, |
1679 | | // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. |
1680 | | // FIXME: This is probably wrong and a copy and paste from previous example |
1681 | 822 | return (Value >= 0 && Value < 256) || |
1682 | 822 | (Value >= 0x0100 && Value <= 0xff00) || |
1683 | 822 | (Value >= 0x010000 && Value <= 0xff0000) || |
1684 | 822 | (Value >= 0x01000000 && Value <= 0xff000000) || |
1685 | 822 | (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || |
1686 | 822 | (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); |
1687 | 831 | } |
1688 | 540 | bool isNEONi32vmovNeg() const { |
1689 | 540 | if (!isImm()) return false; |
1690 | 538 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1691 | | // Must be a constant. |
1692 | 538 | if (!CE) return false; |
1693 | 533 | int64_t Value = ~CE->getValue(); |
1694 | | // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, |
1695 | | // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. |
1696 | | // FIXME: This is probably wrong and a copy and paste from previous example |
1697 | 533 | return (Value >= 0 && Value < 256) || |
1698 | 533 | (Value >= 0x0100 && Value <= 0xff00) || |
1699 | 533 | (Value >= 0x010000 && Value <= 0xff0000) || |
1700 | 533 | (Value >= 0x01000000 && Value <= 0xff000000) || |
1701 | 533 | (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || |
1702 | 533 | (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); |
1703 | 538 | } |
1704 | | |
1705 | 15 | bool isNEONi64splat() const { |
1706 | 15 | if (!isImm()) return false; |
1707 | 13 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1708 | | // Must be a constant. |
1709 | 13 | if (!CE) return false; |
1710 | 9 | uint64_t Value = CE->getValue(); |
1711 | | // i64 value with each byte being either 0 or 0xff. |
1712 | 41 | for (unsigned i = 0; i < 8; ++i) |
1713 | 37 | if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; |
1714 | 4 | return true; |
1715 | 9 | } |
1716 | | |
1717 | 58.9k | void addExpr(MCInst &Inst, const MCExpr *Expr) const { |
1718 | | // Add as immediates when possible. Null MCExpr = 0. |
1719 | 58.9k | if (!Expr) |
1720 | 0 | Inst.addOperand(MCOperand::createImm(0)); |
1721 | 58.9k | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) |
1722 | 20.5k | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
1723 | 38.3k | else |
1724 | 38.3k | Inst.addOperand(MCOperand::createExpr(Expr)); |
1725 | 58.9k | } |
1726 | | |
1727 | 92.8k | void addCondCodeOperands(MCInst &Inst, unsigned N) const { |
1728 | 92.8k | assert(N == 2 && "Invalid number of operands!"); |
1729 | 92.8k | Inst.addOperand(MCOperand::createImm(unsigned(getCondCode()))); |
1730 | 92.8k | unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; |
1731 | 92.8k | Inst.addOperand(MCOperand::createReg(RegNum)); |
1732 | 92.8k | } |
1733 | | |
1734 | 559 | void addCoprocNumOperands(MCInst &Inst, unsigned N) const { |
1735 | 559 | assert(N == 1 && "Invalid number of operands!"); |
1736 | 559 | Inst.addOperand(MCOperand::createImm(getCoproc())); |
1737 | 559 | } |
1738 | | |
1739 | 559 | void addCoprocRegOperands(MCInst &Inst, unsigned N) const { |
1740 | 559 | assert(N == 1 && "Invalid number of operands!"); |
1741 | 559 | Inst.addOperand(MCOperand::createImm(getCoproc())); |
1742 | 559 | } |
1743 | | |
1744 | 0 | void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { |
1745 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1746 | 0 | Inst.addOperand(MCOperand::createImm(CoprocOption.Val)); |
1747 | 0 | } |
1748 | | |
1749 | 3.57k | void addITMaskOperands(MCInst &Inst, unsigned N) const { |
1750 | 3.57k | assert(N == 1 && "Invalid number of operands!"); |
1751 | 3.57k | Inst.addOperand(MCOperand::createImm(ITMask.Mask)); |
1752 | 3.57k | } |
1753 | | |
1754 | 3.57k | void addITCondCodeOperands(MCInst &Inst, unsigned N) const { |
1755 | 3.57k | assert(N == 1 && "Invalid number of operands!"); |
1756 | 3.57k | Inst.addOperand(MCOperand::createImm(unsigned(getCondCode()))); |
1757 | 3.57k | } |
1758 | | |
1759 | 25.1k | void addCCOutOperands(MCInst &Inst, unsigned N) const { |
1760 | 25.1k | assert(N == 1 && "Invalid number of operands!"); |
1761 | 25.1k | Inst.addOperand(MCOperand::createReg(getReg())); |
1762 | 25.1k | } |
1763 | | |
1764 | 80.8k | void addRegOperands(MCInst &Inst, unsigned N) const { |
1765 | 80.8k | assert(N == 1 && "Invalid number of operands!"); |
1766 | 80.8k | Inst.addOperand(MCOperand::createReg(getReg())); |
1767 | 80.8k | } |
1768 | | |
1769 | 16 | void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { |
1770 | 16 | assert(N == 3 && "Invalid number of operands!"); |
1771 | 16 | assert(isRegShiftedReg() && |
1772 | 16 | "addRegShiftedRegOperands() on non-RegShiftedReg!"); |
1773 | 16 | Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg)); |
1774 | 16 | Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg)); |
1775 | 16 | Inst.addOperand(MCOperand::createImm( |
1776 | 16 | ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); |
1777 | 16 | } |
1778 | | |
1779 | 678 | void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { |
1780 | 678 | assert(N == 2 && "Invalid number of operands!"); |
1781 | 678 | assert(isRegShiftedImm() && |
1782 | 678 | "addRegShiftedImmOperands() on non-RegShiftedImm!"); |
1783 | 678 | Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg)); |
1784 | | // Shift of #32 is encoded as 0 where permitted |
1785 | 678 | unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm); |
1786 | 678 | Inst.addOperand(MCOperand::createImm( |
1787 | 678 | ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm))); |
1788 | 678 | } |
1789 | | |
1790 | 0 | void addShifterImmOperands(MCInst &Inst, unsigned N) const { |
1791 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1792 | 0 | Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) | |
1793 | 0 | ShifterImm.Imm)); |
1794 | 0 | } |
1795 | | |
1796 | 0 | void addRegListOperands(MCInst &Inst, unsigned N) const { |
1797 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1798 | 0 | const SmallVectorImpl<unsigned> &RegList = getRegList(); |
1799 | 0 | for (SmallVectorImpl<unsigned>::const_iterator |
1800 | 0 | I = RegList.begin(), E = RegList.end(); I != E; ++I) |
1801 | 0 | Inst.addOperand(MCOperand::createReg(*I)); |
1802 | 0 | } |
1803 | | |
1804 | 0 | void addDPRRegListOperands(MCInst &Inst, unsigned N) const { |
1805 | 0 | addRegListOperands(Inst, N); |
1806 | 0 | } |
1807 | | |
1808 | 0 | void addSPRRegListOperands(MCInst &Inst, unsigned N) const { |
1809 | 0 | addRegListOperands(Inst, N); |
1810 | 0 | } |
1811 | | |
1812 | 0 | void addRotImmOperands(MCInst &Inst, unsigned N) const { |
1813 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1814 | | // Encoded as val>>3. The printer handles display as 8, 16, 24. |
1815 | 0 | Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3)); |
1816 | 0 | } |
1817 | | |
1818 | 6.51k | void addModImmOperands(MCInst &Inst, unsigned N) const { |
1819 | 6.51k | assert(N == 1 && "Invalid number of operands!"); |
1820 | | |
1821 | | // Support for fixups (MCFixup) |
1822 | 6.51k | if (isImm()) |
1823 | 1.99k | return addImmOperands(Inst, N); |
1824 | | |
1825 | 4.51k | Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7))); |
1826 | 4.51k | } |
1827 | | |
1828 | 205 | void addModImmNotOperands(MCInst &Inst, unsigned N) const { |
1829 | 205 | assert(N == 1 && "Invalid number of operands!"); |
1830 | 205 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1831 | 205 | uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue()); |
1832 | 205 | Inst.addOperand(MCOperand::createImm(Enc)); |
1833 | 205 | } |
1834 | | |
1835 | 630 | void addModImmNegOperands(MCInst &Inst, unsigned N) const { |
1836 | 630 | assert(N == 1 && "Invalid number of operands!"); |
1837 | 630 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1838 | 630 | uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue()); |
1839 | 630 | Inst.addOperand(MCOperand::createImm(Enc)); |
1840 | 630 | } |
1841 | | |
1842 | 2 | void addBitfieldOperands(MCInst &Inst, unsigned N) const { |
1843 | 2 | assert(N == 1 && "Invalid number of operands!"); |
1844 | | // Munge the lsb/width into a bitfield mask. |
1845 | 2 | unsigned lsb = Bitfield.LSB; |
1846 | 2 | unsigned width = Bitfield.Width; |
1847 | | // Make a 32-bit mask w/ the referenced bits clear and all other bits set. |
1848 | 2 | uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> |
1849 | 2 | (32 - (lsb + width))); |
1850 | 2 | Inst.addOperand(MCOperand::createImm(Mask)); |
1851 | 2 | } |
1852 | | |
1853 | 57.0k | void addImmOperands(MCInst &Inst, unsigned N) const { |
1854 | 57.0k | assert(N == 1 && "Invalid number of operands!"); |
1855 | 57.0k | addExpr(Inst, getImm()); |
1856 | 57.0k | } |
1857 | | |
1858 | 0 | void addFBits16Operands(MCInst &Inst, unsigned N) const { |
1859 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1860 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1861 | 0 | Inst.addOperand(MCOperand::createImm(16 - CE->getValue())); |
1862 | 0 | } |
1863 | | |
1864 | 0 | void addFBits32Operands(MCInst &Inst, unsigned N) const { |
1865 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1866 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1867 | 0 | Inst.addOperand(MCOperand::createImm(32 - CE->getValue())); |
1868 | 0 | } |
1869 | | |
1870 | 289 | void addFPImmOperands(MCInst &Inst, unsigned N) const { |
1871 | 289 | assert(N == 1 && "Invalid number of operands!"); |
1872 | 289 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1873 | 289 | int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); |
1874 | 289 | Inst.addOperand(MCOperand::createImm(Val)); |
1875 | 289 | } |
1876 | | |
1877 | 0 | void addImm8s4Operands(MCInst &Inst, unsigned N) const { |
1878 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1879 | 0 | // FIXME: We really want to scale the value here, but the LDRD/STRD |
1880 | 0 | // instruction don't encode operands that way yet. |
1881 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1882 | 0 | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
1883 | 0 | } |
1884 | | |
1885 | 6 | void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { |
1886 | 6 | assert(N == 1 && "Invalid number of operands!"); |
1887 | | // The immediate is scaled by four in the encoding and is stored |
1888 | | // in the MCInst as such. Lop off the low two bits here. |
1889 | 6 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1890 | 6 | Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); |
1891 | 6 | } |
1892 | | |
1893 | 71 | void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const { |
1894 | 71 | assert(N == 1 && "Invalid number of operands!"); |
1895 | | // The immediate is scaled by four in the encoding and is stored |
1896 | | // in the MCInst as such. Lop off the low two bits here. |
1897 | 71 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1898 | 71 | Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4))); |
1899 | 71 | } |
1900 | | |
1901 | 1.24k | void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { |
1902 | 1.24k | assert(N == 1 && "Invalid number of operands!"); |
1903 | | // The immediate is scaled by four in the encoding and is stored |
1904 | | // in the MCInst as such. Lop off the low two bits here. |
1905 | 1.24k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1906 | 1.24k | Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); |
1907 | 1.24k | } |
1908 | | |
1909 | 0 | void addImm1_16Operands(MCInst &Inst, unsigned N) const { |
1910 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1911 | | // The constant encodes as the immediate-1, and we store in the instruction |
1912 | | // the bits as encoded, so subtract off one here. |
1913 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1914 | 0 | Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); |
1915 | 0 | } |
1916 | | |
1917 | 0 | void addImm1_32Operands(MCInst &Inst, unsigned N) const { |
1918 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1919 | | // The constant encodes as the immediate-1, and we store in the instruction |
1920 | | // the bits as encoded, so subtract off one here. |
1921 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1922 | 0 | Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); |
1923 | 0 | } |
1924 | | |
1925 | 2.41k | void addImmThumbSROperands(MCInst &Inst, unsigned N) const { |
1926 | 2.41k | assert(N == 1 && "Invalid number of operands!"); |
1927 | | // The constant encodes as the immediate, except for 32, which encodes as |
1928 | | // zero. |
1929 | 2.41k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1930 | 2.41k | unsigned Imm = CE->getValue(); |
1931 | 2.41k | Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm))); |
1932 | 2.41k | } |
1933 | | |
1934 | 0 | void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { |
1935 | 0 | assert(N == 1 && "Invalid number of operands!"); |
1936 | | // An ASR value of 32 encodes as 0, so that's how we want to add it to |
1937 | | // the instruction as well. |
1938 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1939 | 0 | int Val = CE->getValue(); |
1940 | 0 | Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val)); |
1941 | 0 | } |
1942 | | |
1943 | 61 | void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { |
1944 | 61 | assert(N == 1 && "Invalid number of operands!"); |
1945 | | // The operand is actually a t2_so_imm, but we have its bitwise |
1946 | | // negation in the assembly source, so twiddle it here. |
1947 | 61 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1948 | 61 | Inst.addOperand(MCOperand::createImm(~CE->getValue())); |
1949 | 61 | } |
1950 | | |
1951 | 156 | void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { |
1952 | 156 | assert(N == 1 && "Invalid number of operands!"); |
1953 | | // The operand is actually a t2_so_imm, but we have its |
1954 | | // negation in the assembly source, so twiddle it here. |
1955 | 156 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1956 | 156 | Inst.addOperand(MCOperand::createImm(-CE->getValue())); |
1957 | 156 | } |
1958 | | |
1959 | 213 | void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const { |
1960 | 213 | assert(N == 1 && "Invalid number of operands!"); |
1961 | | // The operand is actually an imm0_4095, but we have its |
1962 | | // negation in the assembly source, so twiddle it here. |
1963 | 213 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1964 | 213 | Inst.addOperand(MCOperand::createImm(-CE->getValue())); |
1965 | 213 | } |
1966 | | |
1967 | 558 | void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const { |
1968 | 558 | if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) { |
1969 | 47 | Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2)); |
1970 | 47 | return; |
1971 | 47 | } |
1972 | | |
1973 | 511 | const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); |
1974 | 511 | assert(SR && "Unknown value type!"); |
1975 | 511 | Inst.addOperand(MCOperand::createExpr(SR)); |
1976 | 511 | } |
1977 | | |
1978 | 1.53k | void addThumbMemPCOperands(MCInst &Inst, unsigned N) const { |
1979 | 1.53k | assert(N == 1 && "Invalid number of operands!"); |
1980 | 1.53k | if (isImm()) { |
1981 | 1.53k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1982 | 1.53k | if (CE) { |
1983 | 26 | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
1984 | 26 | return; |
1985 | 26 | } |
1986 | | |
1987 | 1.50k | const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); |
1988 | 1.50k | assert(SR && "Unknown value type!"); |
1989 | 1.50k | Inst.addOperand(MCOperand::createExpr(SR)); |
1990 | 1.50k | return; |
1991 | 1.50k | } |
1992 | | |
1993 | 0 | assert(isMem() && "Unknown value type!"); |
1994 | 0 | assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!"); |
1995 | 0 | Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue())); |
1996 | 0 | } |
1997 | | |
1998 | 3.02k | void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { |
1999 | 3.02k | assert(N == 1 && "Invalid number of operands!"); |
2000 | 3.02k | Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt()))); |
2001 | 3.02k | } |
2002 | | |
2003 | 860 | void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const { |
2004 | 860 | assert(N == 1 && "Invalid number of operands!"); |
2005 | 860 | Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt()))); |
2006 | 860 | } |
2007 | | |
2008 | 334 | void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { |
2009 | 334 | assert(N == 1 && "Invalid number of operands!"); |
2010 | 334 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2011 | 334 | } |
2012 | | |
2013 | 0 | void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const { |
2014 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2015 | 0 | int32_t Imm = Memory.OffsetImm->getValue(); |
2016 | 0 | Inst.addOperand(MCOperand::createImm(Imm)); |
2017 | 0 | } |
2018 | | |
2019 | 1.57k | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { |
2020 | 1.57k | assert(N == 1 && "Invalid number of operands!"); |
2021 | 1.57k | assert(isImm() && "Not an immediate!"); |
2022 | | |
2023 | | // If we have an immediate that's not a constant, treat it as a label |
2024 | | // reference needing a fixup. |
2025 | 1.57k | if (!isa<MCConstantExpr>(getImm())) { |
2026 | 1.17k | Inst.addOperand(MCOperand::createExpr(getImm())); |
2027 | 1.17k | return; |
2028 | 1.17k | } |
2029 | | |
2030 | 398 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2031 | 398 | int Val = CE->getValue(); |
2032 | 398 | Inst.addOperand(MCOperand::createImm(Val)); |
2033 | 398 | } |
2034 | | |
2035 | 0 | void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { |
2036 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2037 | 0 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2038 | 0 | Inst.addOperand(MCOperand::createImm(Memory.Alignment)); |
2039 | 0 | } |
2040 | | |
2041 | 0 | void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { |
2042 | 0 | addAlignedMemoryOperands(Inst, N); |
2043 | 0 | } |
2044 | | |
2045 | 0 | void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { |
2046 | 0 | addAlignedMemoryOperands(Inst, N); |
2047 | 0 | } |
2048 | | |
2049 | 0 | void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const { |
2050 | 0 | addAlignedMemoryOperands(Inst, N); |
2051 | 0 | } |
2052 | | |
2053 | 0 | void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const { |
2054 | 0 | addAlignedMemoryOperands(Inst, N); |
2055 | 0 | } |
2056 | | |
2057 | 0 | void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const { |
2058 | 0 | addAlignedMemoryOperands(Inst, N); |
2059 | 0 | } |
2060 | | |
2061 | 0 | void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const { |
2062 | 0 | addAlignedMemoryOperands(Inst, N); |
2063 | 0 | } |
2064 | | |
2065 | 0 | void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const { |
2066 | 0 | addAlignedMemoryOperands(Inst, N); |
2067 | 0 | } |
2068 | | |
2069 | 0 | void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const { |
2070 | 0 | addAlignedMemoryOperands(Inst, N); |
2071 | 0 | } |
2072 | | |
2073 | 0 | void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { |
2074 | 0 | addAlignedMemoryOperands(Inst, N); |
2075 | 0 | } |
2076 | | |
2077 | 0 | void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { |
2078 | 0 | addAlignedMemoryOperands(Inst, N); |
2079 | 0 | } |
2080 | | |
2081 | 0 | void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const { |
2082 | 0 | addAlignedMemoryOperands(Inst, N); |
2083 | 0 | } |
2084 | | |
2085 | 0 | void addAddrMode2Operands(MCInst &Inst, unsigned N) const { |
2086 | 0 | assert(N == 3 && "Invalid number of operands!"); |
2087 | 0 | int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; |
2088 | 0 | if (!Memory.OffsetRegNum) { |
2089 | 0 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; |
2090 | 0 | // Special case for #-0 |
2091 | 0 | if (Val == INT32_MIN) Val = 0; |
2092 | 0 | if (Val < 0) Val = -Val; |
2093 | 0 | Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); |
2094 | 0 | } else { |
2095 | 0 | // For register offset, we encode the shift type and negation flag |
2096 | 0 | // here. |
2097 | 0 | Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, |
2098 | 0 | Memory.ShiftImm, Memory.ShiftType); |
2099 | 0 | } |
2100 | 0 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2101 | 0 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); |
2102 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2103 | 0 | } |
2104 | | |
2105 | 95 | void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { |
2106 | 95 | assert(N == 2 && "Invalid number of operands!"); |
2107 | 95 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2108 | 95 | assert(CE && "non-constant AM2OffsetImm operand!"); |
2109 | 95 | int32_t Val = CE->getValue(); |
2110 | 95 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; |
2111 | | // Special case for #-0 |
2112 | 95 | if (Val == INT32_MIN) Val = 0; |
2113 | 95 | if (Val < 0) Val = -Val; |
2114 | 95 | Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); |
2115 | 95 | Inst.addOperand(MCOperand::createReg(0)); |
2116 | 95 | Inst.addOperand(MCOperand::createImm(Val)); |
2117 | 95 | } |
2118 | | |
2119 | 1.23k | void addAddrMode3Operands(MCInst &Inst, unsigned N) const { |
2120 | 1.23k | assert(N == 3 && "Invalid number of operands!"); |
2121 | | // If we have an immediate that's not a constant, treat it as a label |
2122 | | // reference needing a fixup. If it is a constant, it's something else |
2123 | | // and we reject it. |
2124 | 1.23k | if (isImm()) { |
2125 | 1.19k | Inst.addOperand(MCOperand::createExpr(getImm())); |
2126 | 1.19k | Inst.addOperand(MCOperand::createReg(0)); |
2127 | 1.19k | Inst.addOperand(MCOperand::createImm(0)); |
2128 | 1.19k | return; |
2129 | 1.19k | } |
2130 | | |
2131 | 40 | int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; |
2132 | 40 | if (!Memory.OffsetRegNum) { |
2133 | 36 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; |
2134 | | // Special case for #-0 |
2135 | 36 | if (Val == INT32_MIN) Val = 0; |
2136 | 36 | if (Val < 0) Val = -Val; |
2137 | 36 | Val = ARM_AM::getAM3Opc(AddSub, Val); |
2138 | 36 | } else { |
2139 | | // For register offset, we encode the shift type and negation flag |
2140 | | // here. |
2141 | 4 | Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); |
2142 | 4 | } |
2143 | 40 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2144 | 40 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); |
2145 | 40 | Inst.addOperand(MCOperand::createImm(Val)); |
2146 | 40 | } |
2147 | | |
2148 | 92 | void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { |
2149 | 92 | assert(N == 2 && "Invalid number of operands!"); |
2150 | 92 | if (Kind == k_PostIndexRegister) { |
2151 | 16 | int32_t Val = |
2152 | 16 | ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); |
2153 | 16 | Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); |
2154 | 16 | Inst.addOperand(MCOperand::createImm(Val)); |
2155 | 16 | return; |
2156 | 16 | } |
2157 | | |
2158 | | // Constant offset. |
2159 | 76 | const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); |
2160 | 76 | int32_t Val = CE->getValue(); |
2161 | 76 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; |
2162 | | // Special case for #-0 |
2163 | 76 | if (Val == INT32_MIN) Val = 0; |
2164 | 76 | if (Val < 0) Val = -Val; |
2165 | 76 | Val = ARM_AM::getAM3Opc(AddSub, Val); |
2166 | 76 | Inst.addOperand(MCOperand::createReg(0)); |
2167 | 76 | Inst.addOperand(MCOperand::createImm(Val)); |
2168 | 76 | } |
2169 | | |
2170 | 1.16k | void addAddrMode5Operands(MCInst &Inst, unsigned N) const { |
2171 | 1.16k | assert(N == 2 && "Invalid number of operands!"); |
2172 | | // If we have an immediate that's not a constant, treat it as a label |
2173 | | // reference needing a fixup. If it is a constant, it's something else |
2174 | | // and we reject it. |
2175 | 1.16k | if (isImm()) { |
2176 | 1.16k | Inst.addOperand(MCOperand::createExpr(getImm())); |
2177 | 1.16k | Inst.addOperand(MCOperand::createImm(0)); |
2178 | 1.16k | return; |
2179 | 1.16k | } |
2180 | | |
2181 | | // The lower two bits are always zero and as such are not encoded. |
2182 | 1 | int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; |
2183 | 1 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; |
2184 | | // Special case for #-0 |
2185 | 1 | if (Val == INT32_MIN) Val = 0; |
2186 | 1 | if (Val < 0) Val = -Val; |
2187 | 1 | Val = ARM_AM::getAM5Opc(AddSub, Val); |
2188 | 1 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2189 | 1 | Inst.addOperand(MCOperand::createImm(Val)); |
2190 | 1 | } |
2191 | | |
2192 | 0 | void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const { |
2193 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2194 | | // If we have an immediate that's not a constant, treat it as a label |
2195 | | // reference needing a fixup. If it is a constant, it's something else |
2196 | | // and we reject it. |
2197 | 0 | if (isImm()) { |
2198 | 0 | Inst.addOperand(MCOperand::createExpr(getImm())); |
2199 | 0 | Inst.addOperand(MCOperand::createImm(0)); |
2200 | 0 | return; |
2201 | 0 | } |
2202 | | |
2203 | | // The lower bit is always zero and as such is not encoded. |
2204 | 0 | int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0; |
2205 | 0 | ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; |
2206 | | // Special case for #-0 |
2207 | 0 | if (Val == INT32_MIN) Val = 0; |
2208 | 0 | if (Val < 0) Val = -Val; |
2209 | 0 | Val = ARM_AM::getAM5FP16Opc(AddSub, Val); |
2210 | 0 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2211 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2212 | 0 | } |
2213 | | |
2214 | 143 | void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { |
2215 | 143 | assert(N == 2 && "Invalid number of operands!"); |
2216 | | // If we have an immediate that's not a constant, treat it as a label |
2217 | | // reference needing a fixup. If it is a constant, it's something else |
2218 | | // and we reject it. |
2219 | 143 | if (isImm()) { |
2220 | 126 | Inst.addOperand(MCOperand::createExpr(getImm())); |
2221 | 126 | Inst.addOperand(MCOperand::createImm(0)); |
2222 | 126 | return; |
2223 | 126 | } |
2224 | | |
2225 | 17 | int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; |
2226 | 17 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2227 | 17 | Inst.addOperand(MCOperand::createImm(Val)); |
2228 | 17 | } |
2229 | | |
2230 | 0 | void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { |
2231 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2232 | | // The lower two bits are always zero and as such are not encoded. |
2233 | 0 | int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; |
2234 | 0 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2235 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2236 | 0 | } |
2237 | | |
2238 | 20 | void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { |
2239 | 20 | assert(N == 2 && "Invalid number of operands!"); |
2240 | 20 | int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; |
2241 | 20 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2242 | 20 | Inst.addOperand(MCOperand::createImm(Val)); |
2243 | 20 | } |
2244 | | |
2245 | 15 | void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { |
2246 | 15 | addMemImm8OffsetOperands(Inst, N); |
2247 | 15 | } |
2248 | | |
2249 | 2 | void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { |
2250 | 2 | addMemImm8OffsetOperands(Inst, N); |
2251 | 2 | } |
2252 | | |
2253 | 64 | void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { |
2254 | 64 | assert(N == 2 && "Invalid number of operands!"); |
2255 | | // If this is an immediate, it's a label reference. |
2256 | 64 | if (isImm()) { |
2257 | 0 | addExpr(Inst, getImm()); |
2258 | 0 | Inst.addOperand(MCOperand::createImm(0)); |
2259 | 0 | return; |
2260 | 0 | } |
2261 | | |
2262 | | // Otherwise, it's a normal memory reg+offset. |
2263 | 64 | int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; |
2264 | 64 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2265 | 64 | Inst.addOperand(MCOperand::createImm(Val)); |
2266 | 64 | } |
2267 | | |
2268 | 2.03k | void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { |
2269 | 2.03k | assert(N == 2 && "Invalid number of operands!"); |
2270 | | // If this is an immediate, it's a label reference. |
2271 | 2.03k | if (isImm()) { |
2272 | 1.85k | addExpr(Inst, getImm()); |
2273 | 1.85k | Inst.addOperand(MCOperand::createImm(0)); |
2274 | 1.85k | return; |
2275 | 1.85k | } |
2276 | | |
2277 | | // Otherwise, it's a normal memory reg+offset. |
2278 | 175 | int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; |
2279 | 175 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2280 | 175 | Inst.addOperand(MCOperand::createImm(Val)); |
2281 | 175 | } |
2282 | | |
2283 | 0 | void addMemTBBOperands(MCInst &Inst, unsigned N) const { |
2284 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2285 | 0 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2286 | 0 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); |
2287 | 0 | } |
2288 | | |
2289 | 0 | void addMemTBHOperands(MCInst &Inst, unsigned N) const { |
2290 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2291 | 0 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2292 | 0 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); |
2293 | 0 | } |
2294 | | |
2295 | 14 | void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { |
2296 | 14 | assert(N == 3 && "Invalid number of operands!"); |
2297 | 14 | unsigned Val = |
2298 | 14 | ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, |
2299 | 14 | Memory.ShiftImm, Memory.ShiftType); |
2300 | 14 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2301 | 14 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); |
2302 | 14 | Inst.addOperand(MCOperand::createImm(Val)); |
2303 | 14 | } |
2304 | | |
2305 | 16 | void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { |
2306 | 16 | assert(N == 3 && "Invalid number of operands!"); |
2307 | 16 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2308 | 16 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); |
2309 | 16 | Inst.addOperand(MCOperand::createImm(Memory.ShiftImm)); |
2310 | 16 | } |
2311 | | |
2312 | 12 | void addMemThumbRROperands(MCInst &Inst, unsigned N) const { |
2313 | 12 | assert(N == 2 && "Invalid number of operands!"); |
2314 | 12 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2315 | 12 | Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); |
2316 | 12 | } |
2317 | | |
2318 | 21 | void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { |
2319 | 21 | assert(N == 2 && "Invalid number of operands!"); |
2320 | 21 | int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; |
2321 | 21 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2322 | 21 | Inst.addOperand(MCOperand::createImm(Val)); |
2323 | 21 | } |
2324 | | |
2325 | 3 | void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { |
2326 | 3 | assert(N == 2 && "Invalid number of operands!"); |
2327 | 3 | int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; |
2328 | 3 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2329 | 3 | Inst.addOperand(MCOperand::createImm(Val)); |
2330 | 3 | } |
2331 | | |
2332 | 21 | void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { |
2333 | 21 | assert(N == 2 && "Invalid number of operands!"); |
2334 | 21 | int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; |
2335 | 21 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2336 | 21 | Inst.addOperand(MCOperand::createImm(Val)); |
2337 | 21 | } |
2338 | | |
2339 | 0 | void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { |
2340 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2341 | 0 | int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; |
2342 | 0 | Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); |
2343 | 0 | Inst.addOperand(MCOperand::createImm(Val)); |
2344 | 0 | } |
2345 | | |
2346 | 0 | void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { |
2347 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2348 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2349 | 0 | assert(CE && "non-constant post-idx-imm8 operand!"); |
2350 | 0 | int Imm = CE->getValue(); |
2351 | 0 | bool isAdd = Imm >= 0; |
2352 | 0 | if (Imm == INT32_MIN) Imm = 0; |
2353 | 0 | Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; |
2354 | 0 | Inst.addOperand(MCOperand::createImm(Imm)); |
2355 | 0 | } |
2356 | | |
2357 | 0 | void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { |
2358 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2359 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2360 | 0 | assert(CE && "non-constant post-idx-imm8s4 operand!"); |
2361 | 0 | int Imm = CE->getValue(); |
2362 | 0 | bool isAdd = Imm >= 0; |
2363 | 0 | if (Imm == INT32_MIN) Imm = 0; |
2364 | | // Immediate is scaled by 4. |
2365 | 0 | Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; |
2366 | 0 | Inst.addOperand(MCOperand::createImm(Imm)); |
2367 | 0 | } |
2368 | | |
2369 | 0 | void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { |
2370 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2371 | 0 | Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); |
2372 | 0 | Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd)); |
2373 | 0 | } |
2374 | | |
2375 | 42 | void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { |
2376 | 42 | assert(N == 2 && "Invalid number of operands!"); |
2377 | 42 | Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); |
2378 | | // The sign, shift type, and shift amount are encoded in a single operand |
2379 | | // using the AM2 encoding helpers. |
2380 | 42 | ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; |
2381 | 42 | unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, |
2382 | 42 | PostIdxReg.ShiftTy); |
2383 | 42 | Inst.addOperand(MCOperand::createImm(Imm)); |
2384 | 42 | } |
2385 | | |
2386 | 654 | void addMSRMaskOperands(MCInst &Inst, unsigned N) const { |
2387 | 654 | assert(N == 1 && "Invalid number of operands!"); |
2388 | 654 | Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask()))); |
2389 | 654 | } |
2390 | | |
2391 | 0 | void addBankedRegOperands(MCInst &Inst, unsigned N) const { |
2392 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2393 | 0 | Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg()))); |
2394 | 0 | } |
2395 | | |
2396 | 145 | void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { |
2397 | 145 | assert(N == 1 && "Invalid number of operands!"); |
2398 | 145 | Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags()))); |
2399 | 145 | } |
2400 | | |
2401 | 0 | void addVecListOperands(MCInst &Inst, unsigned N) const { |
2402 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2403 | 0 | Inst.addOperand(MCOperand::createReg(VectorList.RegNum)); |
2404 | 0 | } |
2405 | | |
2406 | 0 | void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { |
2407 | 0 | assert(N == 2 && "Invalid number of operands!"); |
2408 | 0 | Inst.addOperand(MCOperand::createReg(VectorList.RegNum)); |
2409 | 0 | Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex)); |
2410 | 0 | } |
2411 | | |
2412 | 0 | void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { |
2413 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2414 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
2415 | 0 | } |
2416 | | |
2417 | 0 | void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { |
2418 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2419 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
2420 | 0 | } |
2421 | | |
2422 | 0 | void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { |
2423 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2424 | 0 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
2425 | 0 | } |
2426 | | |
2427 | 3 | void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { |
2428 | 3 | assert(N == 1 && "Invalid number of operands!"); |
2429 | | // The immediate encodes the type of constant as well as the value. |
2430 | | // Mask in that this is an i8 splat. |
2431 | 3 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2432 | 3 | Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00)); |
2433 | 3 | } |
2434 | | |
2435 | 22 | void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { |
2436 | 22 | assert(N == 1 && "Invalid number of operands!"); |
2437 | | // The immediate encodes the type of constant as well as the value. |
2438 | 22 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2439 | 22 | unsigned Value = CE->getValue(); |
2440 | 22 | Value = ARM_AM::encodeNEONi16splat(Value); |
2441 | 22 | Inst.addOperand(MCOperand::createImm(Value)); |
2442 | 22 | } |
2443 | | |
2444 | 0 | void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const { |
2445 | 0 | assert(N == 1 && "Invalid number of operands!"); |
2446 | | // The immediate encodes the type of constant as well as the value. |
2447 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2448 | 0 | unsigned Value = CE->getValue(); |
2449 | 0 | Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff); |
2450 | 0 | Inst.addOperand(MCOperand::createImm(Value)); |
2451 | 0 | } |
2452 | | |
2453 | 2 | void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { |
2454 | 2 | assert(N == 1 && "Invalid number of operands!"); |
2455 | | // The immediate encodes the type of constant as well as the value. |
2456 | 2 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2457 | 2 | unsigned Value = CE->getValue(); |
2458 | 2 | Value = ARM_AM::encodeNEONi32splat(Value); |
2459 | 2 | Inst.addOperand(MCOperand::createImm(Value)); |
2460 | 2 | } |
2461 | | |
2462 | 2 | void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const { |
2463 | 2 | assert(N == 1 && "Invalid number of operands!"); |
2464 | | // The immediate encodes the type of constant as well as the value. |
2465 | 2 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2466 | 2 | unsigned Value = CE->getValue(); |
2467 | 2 | Value = ARM_AM::encodeNEONi32splat(~Value); |
2468 | 2 | Inst.addOperand(MCOperand::createImm(Value)); |
2469 | 2 | } |
2470 | | |
2471 | 1 | void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const { |
2472 | 1 | assert(N == 1 && "Invalid number of operands!"); |
2473 | | // The immediate encodes the type of constant as well as the value. |
2474 | 1 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2475 | 1 | unsigned Value = CE->getValue(); |
2476 | 1 | assert((Inst.getOpcode() == ARM::VMOVv8i8 || |
2477 | 1 | Inst.getOpcode() == ARM::VMOVv16i8) && |
2478 | 1 | "All vmvn instructions that wants to replicate non-zero byte " |
2479 | 1 | "always must be replaced with VMOVv8i8 or VMOVv16i8."); |
2480 | 1 | unsigned B = ((~Value) & 0xff); |
2481 | 1 | B |= 0xe00; // cmode = 0b1110 |
2482 | 1 | Inst.addOperand(MCOperand::createImm(B)); |
2483 | 1 | } |
2484 | 235 | void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { |
2485 | 235 | assert(N == 1 && "Invalid number of operands!"); |
2486 | | // The immediate encodes the type of constant as well as the value. |
2487 | 235 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2488 | 235 | unsigned Value = CE->getValue(); |
2489 | 235 | if (Value >= 256 && Value <= 0xffff) |
2490 | 70 | Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); |
2491 | 165 | else if (Value > 0xffff && Value <= 0xffffff) |
2492 | 36 | Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); |
2493 | 129 | else if (Value > 0xffffff) |
2494 | 60 | Value = (Value >> 24) | 0x600; |
2495 | 235 | Inst.addOperand(MCOperand::createImm(Value)); |
2496 | 235 | } |
2497 | | |
2498 | 10 | void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const { |
2499 | 10 | assert(N == 1 && "Invalid number of operands!"); |
2500 | | // The immediate encodes the type of constant as well as the value. |
2501 | 10 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2502 | 10 | unsigned Value = CE->getValue(); |
2503 | 10 | assert((Inst.getOpcode() == ARM::VMOVv8i8 || |
2504 | 10 | Inst.getOpcode() == ARM::VMOVv16i8) && |
2505 | 10 | "All instructions that wants to replicate non-zero byte " |
2506 | 10 | "always must be replaced with VMOVv8i8 or VMOVv16i8."); |
2507 | 10 | unsigned B = Value & 0xff; |
2508 | 10 | B |= 0xe00; // cmode = 0b1110 |
2509 | 10 | Inst.addOperand(MCOperand::createImm(B)); |
2510 | 10 | } |
2511 | 204 | void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { |
2512 | 204 | assert(N == 1 && "Invalid number of operands!"); |
2513 | | // The immediate encodes the type of constant as well as the value. |
2514 | 204 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2515 | 204 | unsigned Value = ~CE->getValue(); |
2516 | 204 | if (Value >= 256 && Value <= 0xffff) |
2517 | 87 | Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); |
2518 | 117 | else if (Value > 0xffff && Value <= 0xffffff) |
2519 | 41 | Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); |
2520 | 76 | else if (Value > 0xffffff) |
2521 | 43 | Value = (Value >> 24) | 0x600; |
2522 | 204 | Inst.addOperand(MCOperand::createImm(Value)); |
2523 | 204 | } |
2524 | | |
2525 | 3 | void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { |
2526 | 3 | assert(N == 1 && "Invalid number of operands!"); |
2527 | | // The immediate encodes the type of constant as well as the value. |
2528 | 3 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
2529 | 3 | uint64_t Value = CE->getValue(); |
2530 | 3 | unsigned Imm = 0; |
2531 | 27 | for (unsigned i = 0; i < 8; ++i, Value >>= 8) { |
2532 | 24 | Imm |= (Value & 1) << i; |
2533 | 24 | } |
2534 | 3 | Inst.addOperand(MCOperand::createImm(Imm | 0x1e00)); |
2535 | 3 | } |
2536 | | |
2537 | | void print(raw_ostream &OS) const override; |
2538 | | |
2539 | 4.19k | static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) { |
2540 | 4.19k | auto Op = make_unique<ARMOperand>(k_ITCondMask); |
2541 | 4.19k | Op->ITMask.Mask = Mask; |
2542 | 4.19k | Op->StartLoc = S; |
2543 | 4.19k | Op->EndLoc = S; |
2544 | 4.19k | return Op; |
2545 | 4.19k | } |
2546 | | |
2547 | | static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC, |
2548 | 153k | SMLoc S) { |
2549 | 153k | auto Op = make_unique<ARMOperand>(k_CondCode); |
2550 | 153k | Op->CC.Val = CC; |
2551 | 153k | Op->StartLoc = S; |
2552 | 153k | Op->EndLoc = S; |
2553 | 153k | return Op; |
2554 | 153k | } |
2555 | | |
2556 | 689 | static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) { |
2557 | 689 | auto Op = make_unique<ARMOperand>(k_CoprocNum); |
2558 | 689 | Op->Cop.Val = CopVal; |
2559 | 689 | Op->StartLoc = S; |
2560 | 689 | Op->EndLoc = S; |
2561 | 689 | return Op; |
2562 | 689 | } |
2563 | | |
2564 | 625 | static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) { |
2565 | 625 | auto Op = make_unique<ARMOperand>(k_CoprocReg); |
2566 | 625 | Op->Cop.Val = CopVal; |
2567 | 625 | Op->StartLoc = S; |
2568 | 625 | Op->EndLoc = S; |
2569 | 625 | return Op; |
2570 | 625 | } |
2571 | | |
2572 | | static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S, |
2573 | 2 | SMLoc E) { |
2574 | 2 | auto Op = make_unique<ARMOperand>(k_CoprocOption); |
2575 | 2 | Op->Cop.Val = Val; |
2576 | 2 | Op->StartLoc = S; |
2577 | 2 | Op->EndLoc = E; |
2578 | 2 | return Op; |
2579 | 2 | } |
2580 | | |
2581 | 34.3k | static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) { |
2582 | 34.3k | auto Op = make_unique<ARMOperand>(k_CCOut); |
2583 | 34.3k | Op->Reg.RegNum = RegNum; |
2584 | 34.3k | Op->StartLoc = S; |
2585 | 34.3k | Op->EndLoc = S; |
2586 | 34.3k | return Op; |
2587 | 34.3k | } |
2588 | | |
2589 | 262k | static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) { |
2590 | 262k | auto Op = make_unique<ARMOperand>(k_Token); |
2591 | 262k | Op->Tok.Data = Str.data(); |
2592 | 262k | Op->Tok.Length = Str.size(); |
2593 | 262k | Op->StartLoc = S; |
2594 | 262k | Op->EndLoc = S; |
2595 | 262k | return Op; |
2596 | 262k | } |
2597 | | |
2598 | | static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S, |
2599 | 93.7k | SMLoc E) { |
2600 | 93.7k | auto Op = make_unique<ARMOperand>(k_Register); |
2601 | 93.7k | Op->Reg.RegNum = RegNum; |
2602 | 93.7k | Op->StartLoc = S; |
2603 | 93.7k | Op->EndLoc = E; |
2604 | 93.7k | return Op; |
2605 | 93.7k | } |
2606 | | |
2607 | | static std::unique_ptr<ARMOperand> |
2608 | | CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, |
2609 | | unsigned ShiftReg, unsigned ShiftImm, SMLoc S, |
2610 | 151 | SMLoc E) { |
2611 | 151 | auto Op = make_unique<ARMOperand>(k_ShiftedRegister); |
2612 | 151 | Op->RegShiftedReg.ShiftTy = ShTy; |
2613 | 151 | Op->RegShiftedReg.SrcReg = SrcReg; |
2614 | 151 | Op->RegShiftedReg.ShiftReg = ShiftReg; |
2615 | 151 | Op->RegShiftedReg.ShiftImm = ShiftImm; |
2616 | 151 | Op->StartLoc = S; |
2617 | 151 | Op->EndLoc = E; |
2618 | 151 | return Op; |
2619 | 151 | } |
2620 | | |
2621 | | static std::unique_ptr<ARMOperand> |
2622 | | CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, |
2623 | 955 | unsigned ShiftImm, SMLoc S, SMLoc E) { |
2624 | 955 | auto Op = make_unique<ARMOperand>(k_ShiftedImmediate); |
2625 | 955 | Op->RegShiftedImm.ShiftTy = ShTy; |
2626 | 955 | Op->RegShiftedImm.SrcReg = SrcReg; |
2627 | 955 | Op->RegShiftedImm.ShiftImm = ShiftImm; |
2628 | 955 | Op->StartLoc = S; |
2629 | 955 | Op->EndLoc = E; |
2630 | 955 | return Op; |
2631 | 955 | } |
2632 | | |
2633 | | static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm, |
2634 | 6 | SMLoc S, SMLoc E) { |
2635 | 6 | auto Op = make_unique<ARMOperand>(k_ShifterImmediate); |
2636 | 6 | Op->ShifterImm.isASR = isASR; |
2637 | 6 | Op->ShifterImm.Imm = Imm; |
2638 | 6 | Op->StartLoc = S; |
2639 | 6 | Op->EndLoc = E; |
2640 | 6 | return Op; |
2641 | 6 | } |
2642 | | |
2643 | | static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S, |
2644 | 4 | SMLoc E) { |
2645 | 4 | auto Op = make_unique<ARMOperand>(k_RotateImmediate); |
2646 | 4 | Op->RotImm.Imm = Imm; |
2647 | 4 | Op->StartLoc = S; |
2648 | 4 | Op->EndLoc = E; |
2649 | 4 | return Op; |
2650 | 4 | } |
2651 | | |
2652 | | static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot, |
2653 | 4.54k | SMLoc S, SMLoc E) { |
2654 | 4.54k | auto Op = make_unique<ARMOperand>(k_ModifiedImmediate); |
2655 | 4.54k | Op->ModImm.Bits = Bits; |
2656 | 4.54k | Op->ModImm.Rot = Rot; |
2657 | 4.54k | Op->StartLoc = S; |
2658 | 4.54k | Op->EndLoc = E; |
2659 | 4.54k | return Op; |
2660 | 4.54k | } |
2661 | | |
2662 | | static std::unique_ptr<ARMOperand> |
2663 | 28 | CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) { |
2664 | 28 | auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor); |
2665 | 28 | Op->Bitfield.LSB = LSB; |
2666 | 28 | Op->Bitfield.Width = Width; |
2667 | 28 | Op->StartLoc = S; |
2668 | 28 | Op->EndLoc = E; |
2669 | 28 | return Op; |
2670 | 28 | } |
2671 | | |
2672 | | static std::unique_ptr<ARMOperand> |
2673 | | CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, |
2674 | 228 | SMLoc StartLoc, SMLoc EndLoc) { |
2675 | 228 | assert (Regs.size() > 0 && "RegList contains no registers?"); |
2676 | 228 | KindTy Kind = k_RegisterList; |
2677 | | |
2678 | 228 | if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second)) |
2679 | 87 | Kind = k_DPRRegisterList; |
2680 | 141 | else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. |
2681 | 141 | contains(Regs.front().second)) |
2682 | 20 | Kind = k_SPRRegisterList; |
2683 | | |
2684 | | // Sort based on the register encoding values. |
2685 | 228 | array_pod_sort(Regs.begin(), Regs.end()); |
2686 | | |
2687 | 228 | auto Op = make_unique<ARMOperand>(Kind); |
2688 | 228 | for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator |
2689 | 2.24k | I = Regs.begin(), E = Regs.end(); I != E; ++I) |
2690 | 2.01k | Op->Registers.push_back(I->second); |
2691 | 228 | Op->StartLoc = StartLoc; |
2692 | 228 | Op->EndLoc = EndLoc; |
2693 | 228 | return Op; |
2694 | 228 | } |
2695 | | |
2696 | | static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum, |
2697 | | unsigned Count, |
2698 | | bool isDoubleSpaced, |
2699 | 118 | SMLoc S, SMLoc E) { |
2700 | 118 | auto Op = make_unique<ARMOperand>(k_VectorList); |
2701 | 118 | Op->VectorList.RegNum = RegNum; |
2702 | 118 | Op->VectorList.Count = Count; |
2703 | 118 | Op->VectorList.isDoubleSpaced = isDoubleSpaced; |
2704 | 118 | Op->StartLoc = S; |
2705 | 118 | Op->EndLoc = E; |
2706 | 118 | return Op; |
2707 | 118 | } |
2708 | | |
2709 | | static std::unique_ptr<ARMOperand> |
2710 | | CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced, |
2711 | 6 | SMLoc S, SMLoc E) { |
2712 | 6 | auto Op = make_unique<ARMOperand>(k_VectorListAllLanes); |
2713 | 6 | Op->VectorList.RegNum = RegNum; |
2714 | 6 | Op->VectorList.Count = Count; |
2715 | 6 | Op->VectorList.isDoubleSpaced = isDoubleSpaced; |
2716 | 6 | Op->StartLoc = S; |
2717 | 6 | Op->EndLoc = E; |
2718 | 6 | return Op; |
2719 | 6 | } |
2720 | | |
2721 | | static std::unique_ptr<ARMOperand> |
2722 | | CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index, |
2723 | 11 | bool isDoubleSpaced, SMLoc S, SMLoc E) { |
2724 | 11 | auto Op = make_unique<ARMOperand>(k_VectorListIndexed); |
2725 | 11 | Op->VectorList.RegNum = RegNum; |
2726 | 11 | Op->VectorList.Count = Count; |
2727 | 11 | Op->VectorList.LaneIndex = Index; |
2728 | 11 | Op->VectorList.isDoubleSpaced = isDoubleSpaced; |
2729 | 11 | Op->StartLoc = S; |
2730 | 11 | Op->EndLoc = E; |
2731 | 11 | return Op; |
2732 | 11 | } |
2733 | | |
2734 | | static std::unique_ptr<ARMOperand> |
2735 | 17 | CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { |
2736 | 17 | auto Op = make_unique<ARMOperand>(k_VectorIndex); |
2737 | 17 | Op->VectorIndex.Val = Idx; |
2738 | 17 | Op->StartLoc = S; |
2739 | 17 | Op->EndLoc = E; |
2740 | 17 | return Op; |
2741 | 17 | } |
2742 | | |
2743 | | static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S, |
2744 | 199k | SMLoc E) { |
2745 | 199k | auto Op = make_unique<ARMOperand>(k_Immediate); |
2746 | 199k | Op->Imm.Val = Val; |
2747 | 199k | Op->StartLoc = S; |
2748 | 199k | Op->EndLoc = E; |
2749 | 199k | return Op; |
2750 | 199k | } |
2751 | | |
2752 | | static std::unique_ptr<ARMOperand> |
2753 | | CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm, |
2754 | | unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType, |
2755 | | unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S, |
2756 | 895 | SMLoc E, SMLoc AlignmentLoc = SMLoc()) { |
2757 | 895 | auto Op = make_unique<ARMOperand>(k_Memory); |
2758 | 895 | Op->Memory.BaseRegNum = BaseRegNum; |
2759 | 895 | Op->Memory.OffsetImm = OffsetImm; |
2760 | 895 | Op->Memory.OffsetRegNum = OffsetRegNum; |
2761 | 895 | Op->Memory.ShiftType = ShiftType; |
2762 | 895 | Op->Memory.ShiftImm = ShiftImm; |
2763 | 895 | Op->Memory.Alignment = Alignment; |
2764 | 895 | Op->Memory.isNegative = isNegative; |
2765 | 895 | Op->StartLoc = S; |
2766 | 895 | Op->EndLoc = E; |
2767 | 895 | Op->AlignmentLoc = AlignmentLoc; |
2768 | 895 | return Op; |
2769 | 895 | } |
2770 | | |
2771 | | static std::unique_ptr<ARMOperand> |
2772 | | CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy, |
2773 | 118 | unsigned ShiftImm, SMLoc S, SMLoc E) { |
2774 | 118 | auto Op = make_unique<ARMOperand>(k_PostIndexRegister); |
2775 | 118 | Op->PostIdxReg.RegNum = RegNum; |
2776 | 118 | Op->PostIdxReg.isAdd = isAdd; |
2777 | 118 | Op->PostIdxReg.ShiftTy = ShiftTy; |
2778 | 118 | Op->PostIdxReg.ShiftImm = ShiftImm; |
2779 | 118 | Op->StartLoc = S; |
2780 | 118 | Op->EndLoc = E; |
2781 | 118 | return Op; |
2782 | 118 | } |
2783 | | |
2784 | | static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, |
2785 | 3.04k | SMLoc S) { |
2786 | 3.04k | auto Op = make_unique<ARMOperand>(k_MemBarrierOpt); |
2787 | 3.04k | Op->MBOpt.Val = Opt; |
2788 | 3.04k | Op->StartLoc = S; |
2789 | 3.04k | Op->EndLoc = S; |
2790 | 3.04k | return Op; |
2791 | 3.04k | } |
2792 | | |
2793 | | static std::unique_ptr<ARMOperand> |
2794 | 871 | CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) { |
2795 | 871 | auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt); |
2796 | 871 | Op->ISBOpt.Val = Opt; |
2797 | 871 | Op->StartLoc = S; |
2798 | 871 | Op->EndLoc = S; |
2799 | 871 | return Op; |
2800 | 871 | } |
2801 | | |
2802 | | static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags, |
2803 | 168 | SMLoc S) { |
2804 | 168 | auto Op = make_unique<ARMOperand>(k_ProcIFlags); |
2805 | 168 | Op->IFlags.Val = IFlags; |
2806 | 168 | Op->StartLoc = S; |
2807 | 168 | Op->EndLoc = S; |
2808 | 168 | return Op; |
2809 | 168 | } |
2810 | | |
2811 | 745 | static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) { |
2812 | 745 | auto Op = make_unique<ARMOperand>(k_MSRMask); |
2813 | 745 | Op->MMask.Val = MMask; |
2814 | 745 | Op->StartLoc = S; |
2815 | 745 | Op->EndLoc = S; |
2816 | 745 | return Op; |
2817 | 745 | } |
2818 | | |
2819 | 0 | static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) { |
2820 | 0 | auto Op = make_unique<ARMOperand>(k_BankedReg); |
2821 | 0 | Op->BankedReg.Val = Reg; |
2822 | 0 | Op->StartLoc = S; |
2823 | 0 | Op->EndLoc = S; |
2824 | 0 | return Op; |
2825 | 0 | } |
2826 | | }; |
2827 | | |
2828 | | } // end anonymous namespace. |
2829 | | |
2830 | 0 | void ARMOperand::print(raw_ostream &OS) const { |
2831 | 0 | switch (Kind) { |
2832 | 0 | case k_CondCode: |
2833 | 0 | OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; |
2834 | 0 | break; |
2835 | 0 | case k_CCOut: |
2836 | 0 | OS << "<ccout " << getReg() << ">"; |
2837 | 0 | break; |
2838 | 0 | case k_ITCondMask: { |
2839 | 0 | static const char *const MaskStr[] = { |
2840 | 0 | "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", |
2841 | 0 | "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" |
2842 | 0 | }; |
2843 | 0 | assert((ITMask.Mask & 0xf) == ITMask.Mask); |
2844 | 0 | OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; |
2845 | 0 | break; |
2846 | 0 | } |
2847 | 0 | case k_CoprocNum: |
2848 | 0 | OS << "<coprocessor number: " << getCoproc() << ">"; |
2849 | 0 | break; |
2850 | 0 | case k_CoprocReg: |
2851 | 0 | OS << "<coprocessor register: " << getCoproc() << ">"; |
2852 | 0 | break; |
2853 | 0 | case k_CoprocOption: |
2854 | 0 | OS << "<coprocessor option: " << CoprocOption.Val << ">"; |
2855 | 0 | break; |
2856 | 0 | case k_MSRMask: |
2857 | 0 | OS << "<mask: " << getMSRMask() << ">"; |
2858 | 0 | break; |
2859 | 0 | case k_BankedReg: |
2860 | 0 | OS << "<banked reg: " << getBankedReg() << ">"; |
2861 | 0 | break; |
2862 | 0 | case k_Immediate: |
2863 | 0 | OS << *getImm(); |
2864 | 0 | break; |
2865 | 0 | case k_MemBarrierOpt: |
2866 | 0 | OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">"; |
2867 | 0 | break; |
2868 | 0 | case k_InstSyncBarrierOpt: |
2869 | 0 | OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">"; |
2870 | 0 | break; |
2871 | 0 | case k_Memory: |
2872 | 0 | OS << "<memory " |
2873 | 0 | << " base:" << Memory.BaseRegNum; |
2874 | 0 | OS << ">"; |
2875 | 0 | break; |
2876 | 0 | case k_PostIndexRegister: |
2877 | 0 | OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") |
2878 | 0 | << PostIdxReg.RegNum; |
2879 | 0 | if (PostIdxReg.ShiftTy != ARM_AM::no_shift) |
2880 | 0 | OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " |
2881 | 0 | << PostIdxReg.ShiftImm; |
2882 | 0 | OS << ">"; |
2883 | 0 | break; |
2884 | 0 | case k_ProcIFlags: { |
2885 | 0 | OS << "<ARM_PROC::"; |
2886 | 0 | unsigned IFlags = getProcIFlags(); |
2887 | 0 | for (int i=2; i >= 0; --i) |
2888 | 0 | if (IFlags & (1 << i)) |
2889 | 0 | OS << ARM_PROC::IFlagsToString(1 << i); |
2890 | 0 | OS << ">"; |
2891 | 0 | break; |
2892 | 0 | } |
2893 | 0 | case k_Register: |
2894 | 0 | OS << "<register " << getReg() << ">"; |
2895 | 0 | break; |
2896 | 0 | case k_ShifterImmediate: |
2897 | 0 | OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") |
2898 | 0 | << " #" << ShifterImm.Imm << ">"; |
2899 | 0 | break; |
2900 | 0 | case k_ShiftedRegister: |
2901 | 0 | OS << "<so_reg_reg " |
2902 | 0 | << RegShiftedReg.SrcReg << " " |
2903 | 0 | << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) |
2904 | 0 | << " " << RegShiftedReg.ShiftReg << ">"; |
2905 | 0 | break; |
2906 | 0 | case k_ShiftedImmediate: |
2907 | 0 | OS << "<so_reg_imm " |
2908 | 0 | << RegShiftedImm.SrcReg << " " |
2909 | 0 | << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) |
2910 | 0 | << " #" << RegShiftedImm.ShiftImm << ">"; |
2911 | 0 | break; |
2912 | 0 | case k_RotateImmediate: |
2913 | 0 | OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; |
2914 | 0 | break; |
2915 | 0 | case k_ModifiedImmediate: |
2916 | 0 | OS << "<mod_imm #" << ModImm.Bits << ", #" |
2917 | 0 | << ModImm.Rot << ")>"; |
2918 | 0 | break; |
2919 | 0 | case k_BitfieldDescriptor: |
2920 | 0 | OS << "<bitfield " << "lsb: " << Bitfield.LSB |
2921 | 0 | << ", width: " << Bitfield.Width << ">"; |
2922 | 0 | break; |
2923 | 0 | case k_RegisterList: |
2924 | 0 | case k_DPRRegisterList: |
2925 | 0 | case k_SPRRegisterList: { |
2926 | 0 | OS << "<register_list "; |
2927 | |
|
2928 | 0 | const SmallVectorImpl<unsigned> &RegList = getRegList(); |
2929 | 0 | for (SmallVectorImpl<unsigned>::const_iterator |
2930 | 0 | I = RegList.begin(), E = RegList.end(); I != E; ) { |
2931 | 0 | OS << *I; |
2932 | 0 | if (++I < E) OS << ", "; |
2933 | 0 | } |
2934 | |
|
2935 | 0 | OS << ">"; |
2936 | 0 | break; |
2937 | 0 | } |
2938 | 0 | case k_VectorList: |
2939 | 0 | OS << "<vector_list " << VectorList.Count << " * " |
2940 | 0 | << VectorList.RegNum << ">"; |
2941 | 0 | break; |
2942 | 0 | case k_VectorListAllLanes: |
2943 | 0 | OS << "<vector_list(all lanes) " << VectorList.Count << " * " |
2944 | 0 | << VectorList.RegNum << ">"; |
2945 | 0 | break; |
2946 | 0 | case k_VectorListIndexed: |
2947 | 0 | OS << "<vector_list(lane " << VectorList.LaneIndex << ") " |
2948 | 0 | << VectorList.Count << " * " << VectorList.RegNum << ">"; |
2949 | 0 | break; |
2950 | 0 | case k_Token: |
2951 | 0 | OS << "'" << getToken() << "'"; |
2952 | 0 | break; |
2953 | 0 | case k_VectorIndex: |
2954 | 0 | OS << "<vectorindex " << getVectorIndex() << ">"; |
2955 | 0 | break; |
2956 | 0 | } |
2957 | 0 | } |
2958 | | |
2959 | | /// @name Auto-generated Match Functions |
2960 | | /// { |
2961 | | |
2962 | | static unsigned MatchRegisterName(StringRef Name); |
2963 | | |
2964 | | /// } |
2965 | | |
2966 | | bool ARMAsmParser::ParseRegister(unsigned &RegNo, |
2967 | 5 | SMLoc &StartLoc, SMLoc &EndLoc, unsigned int &ErrorCode) { |
2968 | 5 | const AsmToken &Tok = getParser().getTok(); |
2969 | 5 | StartLoc = Tok.getLoc(); |
2970 | 5 | EndLoc = Tok.getEndLoc(); |
2971 | 5 | RegNo = tryParseRegister(); |
2972 | | |
2973 | 5 | return (RegNo == (unsigned)-1); |
2974 | 5 | } |
2975 | | |
2976 | | /// Try to parse a register name. The token must be an Identifier when called, |
2977 | | /// and if it is a register name the token is eaten and the register number is |
2978 | | /// returned. Otherwise return -1. |
2979 | | /// |
2980 | 247k | int ARMAsmParser::tryParseRegister() { |
2981 | 247k | MCAsmParser &Parser = getParser(); |
2982 | 247k | const AsmToken &Tok = Parser.getTok(); |
2983 | 247k | if (Tok.isNot(AsmToken::Identifier)) return -1; |
2984 | | |
2985 | 246k | std::string lowerCase = Tok.getString().lower(); |
2986 | 246k | unsigned RegNum = MatchRegisterName(lowerCase); |
2987 | 246k | if (!RegNum) { |
2988 | 201k | RegNum = StringSwitch<unsigned>(lowerCase) |
2989 | 201k | .Case("r13", ARM::SP) |
2990 | 201k | .Case("r14", ARM::LR) |
2991 | 201k | .Case("r15", ARM::PC) |
2992 | 201k | .Case("ip", ARM::R12) |
2993 | | // Additional register name aliases for 'gas' compatibility. |
2994 | 201k | .Case("a1", ARM::R0) |
2995 | 201k | .Case("a2", ARM::R1) |
2996 | 201k | .Case("a3", ARM::R2) |
2997 | 201k | .Case("a4", ARM::R3) |
2998 | 201k | .Case("v1", ARM::R4) |
2999 | 201k | .Case("v2", ARM::R5) |
3000 | 201k | .Case("v3", ARM::R6) |
3001 | 201k | .Case("v4", ARM::R7) |
3002 | 201k | .Case("v5", ARM::R8) |
3003 | 201k | .Case("v6", ARM::R9) |
3004 | 201k | .Case("v7", ARM::R10) |
3005 | 201k | .Case("v8", ARM::R11) |
3006 | 201k | .Case("sb", ARM::R9) |
3007 | 201k | .Case("sl", ARM::R10) |
3008 | 201k | .Case("fp", ARM::R11) |
3009 | 201k | .Default(0); |
3010 | 201k | } |
3011 | 246k | if (!RegNum) { |
3012 | | // Check for aliases registered via .req. Canonicalize to lower case. |
3013 | | // That's more consistent since register names are case insensitive, and |
3014 | | // it's how the original entry was passed in from MC/MCParser/AsmParser. |
3015 | 145k | StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase); |
3016 | | // If no match, return failure. |
3017 | 145k | if (Entry == RegisterReqs.end()) |
3018 | 145k | return -1; |
3019 | 0 | Parser.Lex(); // Eat identifier token. |
3020 | 0 | return Entry->getValue(); |
3021 | 145k | } |
3022 | | |
3023 | | // Some FPUs only have 16 D registers, so D16-D31 are invalid |
3024 | 101k | if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31) |
3025 | 4 | return -1; |
3026 | | |
3027 | 101k | Parser.Lex(); // Eat identifier token. |
3028 | | |
3029 | 101k | return RegNum; |
3030 | 101k | } |
3031 | | |
3032 | | // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. |
3033 | | // If a recoverable error occurs, return 1. If an irrecoverable error |
3034 | | // occurs, return -1. An irrecoverable error is one where tokens have been |
3035 | | // consumed in the process of trying to parse the shifter (i.e., when it is |
3036 | | // indeed a shifter operand, but malformed). |
3037 | | int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) |
3038 | 102k | { |
3039 | 102k | MCAsmParser &Parser = getParser(); |
3040 | 102k | SMLoc S = Parser.getTok().getLoc(); |
3041 | 102k | const AsmToken &Tok = Parser.getTok(); |
3042 | 102k | if (Tok.isNot(AsmToken::Identifier)) |
3043 | 126 | return -1; |
3044 | | |
3045 | 102k | std::string lowerCase = Tok.getString().lower(); |
3046 | 102k | ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) |
3047 | 102k | .Case("asl", ARM_AM::lsl) |
3048 | 102k | .Case("lsl", ARM_AM::lsl) |
3049 | 102k | .Case("lsr", ARM_AM::lsr) |
3050 | 102k | .Case("asr", ARM_AM::asr) |
3051 | 102k | .Case("ror", ARM_AM::ror) |
3052 | 102k | .Case("rrx", ARM_AM::rrx) |
3053 | 102k | .Default(ARM_AM::no_shift); |
3054 | | |
3055 | 102k | if (ShiftTy == ARM_AM::no_shift) |
3056 | 100k | return 1; |
3057 | | |
3058 | 1.31k | Parser.Lex(); // Eat the operator. |
3059 | | |
3060 | | // The source register for the shift has already been added to the |
3061 | | // operand list, so we need to pop it off and combine it into the shifted |
3062 | | // register operand instead. |
3063 | 1.31k | std::unique_ptr<ARMOperand> PrevOp( |
3064 | 1.31k | (ARMOperand *)Operands.pop_back_val().release()); |
3065 | 1.31k | if (!PrevOp->isReg()) |
3066 | | //return Error(PrevOp->getStartLoc(), "shift must be of a register"); |
3067 | 36 | return -1; |
3068 | 1.27k | int SrcReg = PrevOp->getReg(); |
3069 | | |
3070 | 1.27k | SMLoc EndLoc; |
3071 | 1.27k | int64_t Imm = 0; |
3072 | 1.27k | int ShiftReg = 0; |
3073 | 1.27k | if (ShiftTy == ARM_AM::rrx) { |
3074 | | // RRX Doesn't have an explicit shift amount. The encoder expects |
3075 | | // the shift register to be the same as the source register. Seems odd, |
3076 | | // but OK. |
3077 | 336 | ShiftReg = SrcReg; |
3078 | 942 | } else { |
3079 | | // Figure out if this is shifted by a constant or a register (for non-RRX). |
3080 | 942 | if (Parser.getTok().is(AsmToken::Hash) || |
3081 | 942 | Parser.getTok().is(AsmToken::Dollar)) { |
3082 | 756 | Parser.Lex(); // Eat hash. |
3083 | | //SMLoc ImmLoc = Parser.getTok().getLoc(); |
3084 | 756 | const MCExpr *ShiftExpr = nullptr; |
3085 | 756 | if (getParser().parseExpression(ShiftExpr, EndLoc)) { |
3086 | | //Error(ImmLoc, "invalid immediate shift value"); |
3087 | 48 | return -1; |
3088 | 48 | } |
3089 | | // The expression must be evaluatable as an immediate. |
3090 | 708 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); |
3091 | 708 | if (!CE) { |
3092 | | //Error(ImmLoc, "invalid immediate shift value"); |
3093 | 7 | return -1; |
3094 | 7 | } |
3095 | | // Range check the immediate. |
3096 | | // lsl, ror: 0 <= imm <= 31 |
3097 | | // lsr, asr: 0 <= imm <= 32 |
3098 | 701 | Imm = CE->getValue(); |
3099 | 701 | if (Imm < 0 || |
3100 | 701 | ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || |
3101 | 701 | ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { |
3102 | | //Error(ImmLoc, "immediate shift value out of range"); |
3103 | 82 | return -1; |
3104 | 82 | } |
3105 | | // shift by zero is a nop. Always send it through as lsl. |
3106 | | // ('as' compatibility) |
3107 | 619 | if (Imm == 0) |
3108 | 139 | ShiftTy = ARM_AM::lsl; |
3109 | 619 | } else if (Parser.getTok().is(AsmToken::Identifier)) { |
3110 | | //SMLoc L = Parser.getTok().getLoc(); |
3111 | 161 | EndLoc = Parser.getTok().getEndLoc(); |
3112 | 161 | ShiftReg = tryParseRegister(); |
3113 | 161 | if (ShiftReg == -1) { |
3114 | | //Error(L, "expected immediate or register in shift operand"); |
3115 | 10 | return -1; |
3116 | 10 | } |
3117 | 161 | } else { |
3118 | | //Error(Parser.getTok().getLoc(), |
3119 | | // "expected immediate or register in shift operand"); |
3120 | 25 | return -1; |
3121 | 25 | } |
3122 | 942 | } |
3123 | | |
3124 | 1.10k | if (ShiftReg && ShiftTy != ARM_AM::rrx) |
3125 | 151 | Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, |
3126 | 151 | ShiftReg, Imm, |
3127 | 151 | S, EndLoc)); |
3128 | 955 | else |
3129 | 955 | Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, |
3130 | 955 | S, EndLoc)); |
3131 | | |
3132 | 1.10k | return 0; |
3133 | 1.27k | } |
3134 | | |
3135 | | |
3136 | | /// Try to parse a register name. The token must be an Identifier when called. |
3137 | | /// If it's a register, an AsmOperand is created. Another AsmOperand is created |
3138 | | /// if there is a "writeback". 'true' if it's not a register. |
3139 | | /// |
3140 | | /// TODO this is likely to change to allow different register types and or to |
3141 | | /// parse for a specific register type. |
3142 | | bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) |
3143 | 195k | { |
3144 | 195k | MCAsmParser &Parser = getParser(); |
3145 | 195k | const AsmToken &RegTok = Parser.getTok(); |
3146 | 195k | int RegNo = tryParseRegister(); |
3147 | 195k | if (RegNo == -1) |
3148 | 102k | return true; |
3149 | | |
3150 | 93.7k | Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(), |
3151 | 93.7k | RegTok.getEndLoc())); |
3152 | | |
3153 | 93.7k | const AsmToken &ExclaimTok = Parser.getTok(); |
3154 | 93.7k | if (ExclaimTok.is(AsmToken::Exclaim)) { |
3155 | 184 | Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), |
3156 | 184 | ExclaimTok.getLoc())); |
3157 | 184 | Parser.Lex(); // Eat exclaim token |
3158 | 184 | return false; |
3159 | 184 | } |
3160 | | |
3161 | | // Also check for an index operand. This is only legal for vector registers, |
3162 | | // but that'll get caught OK in operand matching, so we don't need to |
3163 | | // explicitly filter everything else out here. |
3164 | 93.5k | if (Parser.getTok().is(AsmToken::LBrac)) { |
3165 | 255 | SMLoc SIdx = Parser.getTok().getLoc(); |
3166 | 255 | Parser.Lex(); // Eat left bracket token. |
3167 | | |
3168 | 255 | const MCExpr *ImmVal; |
3169 | 255 | if (getParser().parseExpression(ImmVal)) |
3170 | 145 | return true; |
3171 | 110 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
3172 | 110 | if (!MCE) |
3173 | | //return TokError("immediate value expected for vector index"); |
3174 | 53 | return true; |
3175 | | |
3176 | 57 | if (Parser.getTok().isNot(AsmToken::RBrac)) |
3177 | | //return Error(Parser.getTok().getLoc(), "']' expected"); |
3178 | 40 | return true; |
3179 | | |
3180 | 17 | SMLoc E = Parser.getTok().getEndLoc(); |
3181 | 17 | Parser.Lex(); // Eat right bracket token. |
3182 | | |
3183 | 17 | Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), |
3184 | 17 | SIdx, E, |
3185 | 17 | getContext())); |
3186 | 17 | } |
3187 | | |
3188 | 93.2k | return false; |
3189 | 93.5k | } |
3190 | | |
3191 | | /// MatchCoprocessorOperandName - Try to parse an coprocessor related |
3192 | | /// instruction with a symbolic operand name. |
3193 | | /// We accept "crN" syntax for GAS compatibility. |
3194 | | /// <operand-name> ::= <prefix><number> |
3195 | | /// If CoprocOp is 'c', then: |
3196 | | /// <prefix> ::= c | cr |
3197 | | /// If CoprocOp is 'p', then : |
3198 | | /// <prefix> ::= p |
3199 | | /// <number> ::= integer in range [0, 15] |
3200 | 6.53k | static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { |
3201 | | // Use the same layout as the tablegen'erated register name matcher. Ugly, |
3202 | | // but efficient. |
3203 | 6.53k | if (Name.size() < 2 || Name[0] != CoprocOp) |
3204 | 4.76k | return -1; |
3205 | 1.77k | Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front(); |
3206 | | |
3207 | 1.77k | switch (Name.size()) { |
3208 | 99 | default: return -1; |
3209 | 1.42k | case 1: |
3210 | 1.42k | switch (Name[0]) { |
3211 | 177 | default: return -1; |
3212 | 542 | case '0': return 0; |
3213 | 532 | case '1': return 1; |
3214 | 9 | case '2': return 2; |
3215 | 8 | case '3': return 3; |
3216 | 16 | case '4': return 4; |
3217 | 5 | case '5': return 5; |
3218 | 98 | case '6': return 6; |
3219 | 13 | case '7': return 7; |
3220 | 6 | case '8': return 8; |
3221 | 16 | case '9': return 9; |
3222 | 1.42k | } |
3223 | 256 | case 2: |
3224 | 256 | if (Name[0] != '1') |
3225 | 113 | return -1; |
3226 | 143 | switch (Name[1]) { |
3227 | 23 | default: return -1; |
3228 | | // CP10 and CP11 are VFP/NEON and so vector instructions should be used. |
3229 | | // However, old cores (v5/v6) did use them in that way. |
3230 | 28 | case '0': return 10; |
3231 | 75 | case '1': return 11; |
3232 | 1 | case '2': return 12; |
3233 | 7 | case '3': return 13; |
3234 | 2 | case '4': return 14; |
3235 | 7 | case '5': return 15; |
3236 | 143 | } |
3237 | 1.77k | } |
3238 | 1.77k | } |
3239 | | |
3240 | | /// parseITCondCode - Try to parse a condition code for an IT instruction. |
3241 | | ARMAsmParser::OperandMatchResultTy |
3242 | 4.35k | ARMAsmParser::parseITCondCode(OperandVector &Operands, unsigned int &ErrorCode) { |
3243 | 4.35k | MCAsmParser &Parser = getParser(); |
3244 | 4.35k | SMLoc S = Parser.getTok().getLoc(); |
3245 | 4.35k | const AsmToken &Tok = Parser.getTok(); |
3246 | 4.35k | if (!Tok.is(AsmToken::Identifier)) |
3247 | 147 | return MatchOperand_NoMatch; |
3248 | 4.21k | unsigned CC = StringSwitch<unsigned>(Tok.getString().lower()) |
3249 | 4.21k | .Case("eq", ARMCC::EQ) |
3250 | 4.21k | .Case("ne", ARMCC::NE) |
3251 | 4.21k | .Case("hs", ARMCC::HS) |
3252 | 4.21k | .Case("cs", ARMCC::HS) |
3253 | 4.21k | .Case("lo", ARMCC::LO) |
3254 | 4.21k | .Case("cc", ARMCC::LO) |
3255 | 4.21k | .Case("mi", ARMCC::MI) |
3256 | 4.21k | .Case("pl", ARMCC::PL) |
3257 | 4.21k | .Case("vs", ARMCC::VS) |
3258 | 4.21k | .Case("vc", ARMCC::VC) |
3259 | 4.21k | .Case("hi", ARMCC::HI) |
3260 | 4.21k | .Case("ls", ARMCC::LS) |
3261 | 4.21k | .Case("ge", ARMCC::GE) |
3262 | 4.21k | .Case("lt", ARMCC::LT) |
3263 | 4.21k | .Case("gt", ARMCC::GT) |
3264 | 4.21k | .Case("le", ARMCC::LE) |
3265 | 4.21k | .Case("al", ARMCC::AL) |
3266 | 4.21k | .Default(~0U); |
3267 | 4.21k | if (CC == ~0U) |
3268 | 623 | return MatchOperand_NoMatch; |
3269 | 3.58k | Parser.Lex(); // Eat the token. |
3270 | | |
3271 | 3.58k | Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); |
3272 | | |
3273 | 3.58k | return MatchOperand_Success; |
3274 | 4.21k | } |
3275 | | |
3276 | | /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The |
3277 | | /// token must be an Identifier when called, and if it is a coprocessor |
3278 | | /// number, the token is eaten and the operand is added to the operand list. |
3279 | | ARMAsmParser::OperandMatchResultTy |
3280 | 4.44k | ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands, unsigned int &ErrorCode) { |
3281 | 4.44k | MCAsmParser &Parser = getParser(); |
3282 | 4.44k | SMLoc S = Parser.getTok().getLoc(); |
3283 | 4.44k | const AsmToken &Tok = Parser.getTok(); |
3284 | 4.44k | if (Tok.isNot(AsmToken::Identifier)) |
3285 | 1.32k | return MatchOperand_NoMatch; |
3286 | | |
3287 | 3.11k | int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); |
3288 | 3.11k | if (Num == -1) |
3289 | 2.37k | return MatchOperand_NoMatch; |
3290 | | // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions |
3291 | 740 | if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11)) |
3292 | 51 | return MatchOperand_NoMatch; |
3293 | | |
3294 | 689 | Parser.Lex(); // Eat identifier token. |
3295 | 689 | Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); |
3296 | 689 | return MatchOperand_Success; |
3297 | 740 | } |
3298 | | |
3299 | | /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The |
3300 | | /// token must be an Identifier when called, and if it is a coprocessor |
3301 | | /// number, the token is eaten and the operand is added to the operand list. |
3302 | | ARMAsmParser::OperandMatchResultTy |
3303 | 4.11k | ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands, unsigned int &ErrorCode) { |
3304 | 4.11k | MCAsmParser &Parser = getParser(); |
3305 | 4.11k | SMLoc S = Parser.getTok().getLoc(); |
3306 | 4.11k | const AsmToken &Tok = Parser.getTok(); |
3307 | 4.11k | if (Tok.isNot(AsmToken::Identifier)) |
3308 | 692 | return MatchOperand_NoMatch; |
3309 | | |
3310 | 3.42k | int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); |
3311 | 3.42k | if (Reg == -1) |
3312 | 2.80k | return MatchOperand_NoMatch; |
3313 | | |
3314 | 625 | Parser.Lex(); // Eat identifier token. |
3315 | 625 | Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); |
3316 | 625 | return MatchOperand_Success; |
3317 | 3.42k | } |
3318 | | |
3319 | | /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. |
3320 | | /// coproc_option : '{' imm0_255 '}' |
3321 | | ARMAsmParser::OperandMatchResultTy |
3322 | | ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands, unsigned int &ErrorCode) |
3323 | 825 | { |
3324 | 825 | MCAsmParser &Parser = getParser(); |
3325 | 825 | SMLoc S = Parser.getTok().getLoc(); |
3326 | | |
3327 | | // If this isn't a '{', this isn't a coprocessor immediate operand. |
3328 | 825 | if (Parser.getTok().isNot(AsmToken::LCurly)) |
3329 | 728 | return MatchOperand_NoMatch; |
3330 | 97 | Parser.Lex(); // Eat the '{' |
3331 | | |
3332 | 97 | const MCExpr *Expr; |
3333 | | //SMLoc Loc = Parser.getTok().getLoc(); |
3334 | 97 | if (getParser().parseExpression(Expr)) { |
3335 | | //Error(Loc, "illegal expression"); |
3336 | 9 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3337 | 9 | return MatchOperand_ParseFail; |
3338 | 9 | } |
3339 | 88 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); |
3340 | 88 | if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { |
3341 | | //Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); |
3342 | 81 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3343 | 81 | return MatchOperand_ParseFail; |
3344 | 81 | } |
3345 | 7 | int Val = CE->getValue(); |
3346 | | |
3347 | | // Check for and consume the closing '}' |
3348 | 7 | if (Parser.getTok().isNot(AsmToken::RCurly)) |
3349 | 5 | return MatchOperand_ParseFail; |
3350 | 2 | SMLoc E = Parser.getTok().getEndLoc(); |
3351 | 2 | Parser.Lex(); // Eat the '}' |
3352 | | |
3353 | 2 | Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); |
3354 | 2 | return MatchOperand_Success; |
3355 | 7 | } |
3356 | | |
3357 | | // For register list parsing, we need to map from raw GPR register numbering |
3358 | | // to the enumeration values. The enumeration values aren't sorted by |
3359 | | // register number due to our using "sp", "lr" and "pc" as canonical names. |
3360 | 6.84k | static unsigned getNextRegister(unsigned Reg) { |
3361 | | // If this is a GPR, we need to do it manually, otherwise we can rely |
3362 | | // on the sort ordering of the enumeration since the other reg-classes |
3363 | | // are sane. |
3364 | 6.84k | if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) |
3365 | 654 | return Reg + 1; |
3366 | 6.18k | switch(Reg) { |
3367 | 0 | default: llvm_unreachable("Invalid GPR number!"); |
3368 | 606 | case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; |
3369 | 622 | case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; |
3370 | 660 | case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; |
3371 | 674 | case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; |
3372 | 650 | case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; |
3373 | 130 | case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; |
3374 | 98 | case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; |
3375 | 51 | case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; |
3376 | 6.18k | } |
3377 | 6.18k | } |
3378 | | |
3379 | | // Return the low-subreg of a given Q register. |
3380 | 763 | static unsigned getDRegFromQReg(unsigned QReg) { |
3381 | 763 | switch (QReg) { |
3382 | 0 | default: llvm_unreachable("expected a Q register!"); |
3383 | 129 | case ARM::Q0: return ARM::D0; |
3384 | 85 | case ARM::Q1: return ARM::D2; |
3385 | 79 | case ARM::Q2: return ARM::D4; |
3386 | 61 | case ARM::Q3: return ARM::D6; |
3387 | 49 | case ARM::Q4: return ARM::D8; |
3388 | 28 | case ARM::Q5: return ARM::D10; |
3389 | 28 | case ARM::Q6: return ARM::D12; |
3390 | 85 | case ARM::Q7: return ARM::D14; |
3391 | 30 | case ARM::Q8: return ARM::D16; |
3392 | 21 | case ARM::Q9: return ARM::D18; |
3393 | 6 | case ARM::Q10: return ARM::D20; |
3394 | 20 | case ARM::Q11: return ARM::D22; |
3395 | 44 | case ARM::Q12: return ARM::D24; |
3396 | 20 | case ARM::Q13: return ARM::D26; |
3397 | 63 | case ARM::Q14: return ARM::D28; |
3398 | 15 | case ARM::Q15: return ARM::D30; |
3399 | 763 | } |
3400 | 763 | } |
3401 | | |
3402 | | /// Parse a register list. |
3403 | | bool ARMAsmParser::parseRegisterList(OperandVector &Operands) |
3404 | 1.01k | { |
3405 | 1.01k | MCAsmParser &Parser = getParser(); |
3406 | 1.01k | assert(Parser.getTok().is(AsmToken::LCurly) && |
3407 | 1.01k | "Token is not a Left Curly Brace"); |
3408 | 1.01k | SMLoc S = Parser.getTok().getLoc(); |
3409 | 1.01k | Parser.Lex(); // Eat '{' token. |
3410 | 1.01k | SMLoc RegLoc = Parser.getTok().getLoc(); |
3411 | | |
3412 | | // Check the first register in the list to see what register class |
3413 | | // this is a list of. |
3414 | 1.01k | int Reg = tryParseRegister(); |
3415 | 1.01k | if (Reg == -1) |
3416 | | //return Error(RegLoc, "register expected"); |
3417 | 218 | return true; |
3418 | | |
3419 | | // The reglist instructions have at most 16 registers, so reserve |
3420 | | // space for that many. |
3421 | 801 | int EReg = 0; |
3422 | 801 | SmallVector<std::pair<unsigned, unsigned>, 16> Registers; |
3423 | | |
3424 | | // Allow Q regs and just interpret them as the two D sub-registers. |
3425 | 801 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { |
3426 | 186 | Reg = getDRegFromQReg(Reg); |
3427 | 186 | EReg = MRI->getEncodingValue(Reg); |
3428 | 186 | Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); |
3429 | 186 | ++Reg; |
3430 | 186 | } |
3431 | 801 | const MCRegisterClass *RC; |
3432 | 801 | if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) |
3433 | 377 | RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; |
3434 | 424 | else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) |
3435 | 226 | RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; |
3436 | 198 | else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) |
3437 | 192 | RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; |
3438 | 6 | else |
3439 | | //return Error(RegLoc, "invalid register in register list"); |
3440 | 6 | return true; |
3441 | | |
3442 | | // Store the register. |
3443 | 795 | EReg = MRI->getEncodingValue(Reg); |
3444 | 795 | Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); |
3445 | | |
3446 | | // This starts immediately after the first register token in the list, |
3447 | | // so we can see either a comma or a minus (range separator) as a legal |
3448 | | // next token. |
3449 | 5.07k | while (Parser.getTok().is(AsmToken::Comma) || |
3450 | 5.07k | Parser.getTok().is(AsmToken::Minus)) { |
3451 | 4.65k | if (Parser.getTok().is(AsmToken::Minus)) { |
3452 | 1.61k | Parser.Lex(); // Eat the minus. |
3453 | | //SMLoc AfterMinusLoc = Parser.getTok().getLoc(); |
3454 | 1.61k | int EndReg = tryParseRegister(); |
3455 | 1.61k | if (EndReg == -1) |
3456 | | //return Error(AfterMinusLoc, "register expected"); |
3457 | 83 | return true; |
3458 | | // Allow Q regs and just interpret them as the two D sub-registers. |
3459 | 1.52k | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) |
3460 | 137 | EndReg = getDRegFromQReg(EndReg) + 1; |
3461 | | // If the register is the same as the start reg, there's nothing |
3462 | | // more to do. |
3463 | 1.52k | if (Reg == EndReg) |
3464 | 581 | continue; |
3465 | | // The register must be in the same register class as the first. |
3466 | 948 | if (!RC->contains(EndReg)) |
3467 | | //return Error(AfterMinusLoc, "invalid register in register list"); |
3468 | 13 | return true; |
3469 | | // Ranges must go from low to high. |
3470 | 935 | if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg)) |
3471 | | //return Error(AfterMinusLoc, "bad range in register list"); |
3472 | 24 | return true; |
3473 | | |
3474 | | // Add all the registers in the range to the register list. |
3475 | 7.75k | while (Reg != EndReg) { |
3476 | 6.84k | Reg = getNextRegister(Reg); |
3477 | 6.84k | EReg = MRI->getEncodingValue(Reg); |
3478 | 6.84k | Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); |
3479 | 6.84k | } |
3480 | 911 | continue; |
3481 | 935 | } |
3482 | 3.04k | Parser.Lex(); // Eat the comma. |
3483 | 3.04k | RegLoc = Parser.getTok().getLoc(); |
3484 | 3.04k | int OldReg = Reg; |
3485 | 3.04k | const AsmToken RegTok = Parser.getTok(); |
3486 | 3.04k | Reg = tryParseRegister(); |
3487 | 3.04k | if (Reg == -1) |
3488 | | //return Error(RegLoc, "register expected"); |
3489 | 229 | return true; |
3490 | | // Allow Q regs and just interpret them as the two D sub-registers. |
3491 | 2.81k | bool isQReg = false; |
3492 | 2.81k | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { |
3493 | 107 | Reg = getDRegFromQReg(Reg); |
3494 | 107 | isQReg = true; |
3495 | 107 | } |
3496 | | // The register must be in the same register class as the first. |
3497 | 2.81k | if (!RC->contains(Reg)) |
3498 | | //return Error(RegLoc, "invalid register in register list"); |
3499 | 6 | return true; |
3500 | | // List must be monotonically increasing. |
3501 | 2.80k | if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) { |
3502 | 1.36k | if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) |
3503 | 1.35k | Warning(RegLoc, "register list not in ascending order"); |
3504 | 11 | else |
3505 | | //return Error(RegLoc, "register list not in ascending order"); |
3506 | 11 | return true; |
3507 | 1.36k | } |
3508 | 2.79k | if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) { |
3509 | 1.13k | Warning(RegLoc, "duplicated register (" + RegTok.getString() + |
3510 | 1.13k | ") in register list"); |
3511 | 1.13k | continue; |
3512 | 1.13k | } |
3513 | | // VFP register lists must also be contiguous. |
3514 | 1.66k | if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && |
3515 | 1.66k | Reg != OldReg + 1) |
3516 | | //return Error(RegLoc, "non-contiguous register range"); |
3517 | 11 | return true; |
3518 | 1.64k | EReg = MRI->getEncodingValue(Reg); |
3519 | 1.64k | Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); |
3520 | 1.64k | if (isQReg) { |
3521 | 23 | EReg = MRI->getEncodingValue(++Reg); |
3522 | 23 | Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); |
3523 | 23 | } |
3524 | 1.64k | } |
3525 | | |
3526 | 418 | if (Parser.getTok().isNot(AsmToken::RCurly)) |
3527 | | //return Error(Parser.getTok().getLoc(), "'}' expected"); |
3528 | 190 | return true; |
3529 | 228 | SMLoc E = Parser.getTok().getEndLoc(); |
3530 | 228 | Parser.Lex(); // Eat '}' token. |
3531 | | |
3532 | | // Push the register list operand. |
3533 | 228 | Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); |
3534 | | |
3535 | | // The ARM system instruction variants for LDM/STM have a '^' token here. |
3536 | 228 | if (Parser.getTok().is(AsmToken::Caret)) { |
3537 | 35 | Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); |
3538 | 35 | Parser.Lex(); // Eat '^' token. |
3539 | 35 | } |
3540 | | |
3541 | 228 | return false; |
3542 | 418 | } |
3543 | | |
3544 | | // Helper function to parse the lane index for vector lists. |
3545 | | ARMAsmParser::OperandMatchResultTy ARMAsmParser:: |
3546 | | parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc, unsigned int &ErrorCode) |
3547 | 415 | { |
3548 | 415 | MCAsmParser &Parser = getParser(); |
3549 | 415 | Index = 0; // Always return a defined index value. |
3550 | 415 | if (Parser.getTok().is(AsmToken::LBrac)) { |
3551 | 104 | Parser.Lex(); // Eat the '['. |
3552 | 104 | if (Parser.getTok().is(AsmToken::RBrac)) { |
3553 | | // "Dn[]" is the 'all lanes' syntax. |
3554 | 13 | LaneKind = AllLanes; |
3555 | 13 | EndLoc = Parser.getTok().getEndLoc(); |
3556 | 13 | Parser.Lex(); // Eat the ']'. |
3557 | 13 | return MatchOperand_Success; |
3558 | 13 | } |
3559 | | |
3560 | | // There's an optional '#' token here. Normally there wouldn't be, but |
3561 | | // inline assemble puts one in, and it's friendly to accept that. |
3562 | 91 | if (Parser.getTok().is(AsmToken::Hash)) |
3563 | 4 | Parser.Lex(); // Eat '#' or '$'. |
3564 | | |
3565 | 91 | const MCExpr *LaneIndex; |
3566 | | //SMLoc Loc = Parser.getTok().getLoc(); |
3567 | 91 | if (getParser().parseExpression(LaneIndex)) { |
3568 | | //Error(Loc, "illegal expression"); |
3569 | 42 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3570 | 42 | return MatchOperand_ParseFail; |
3571 | 42 | } |
3572 | 49 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex); |
3573 | 49 | if (!CE) { |
3574 | | //Error(Loc, "lane index must be empty or an integer"); |
3575 | 9 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3576 | 9 | return MatchOperand_ParseFail; |
3577 | 9 | } |
3578 | 40 | if (Parser.getTok().isNot(AsmToken::RBrac)) { |
3579 | | //Error(Parser.getTok().getLoc(), "']' expected"); |
3580 | 4 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3581 | 4 | return MatchOperand_ParseFail; |
3582 | 4 | } |
3583 | 36 | EndLoc = Parser.getTok().getEndLoc(); |
3584 | 36 | Parser.Lex(); // Eat the ']'. |
3585 | 36 | int64_t Val = CE->getValue(); |
3586 | | |
3587 | | // FIXME: Make this range check context sensitive for .8, .16, .32. |
3588 | 36 | if (Val < 0 || Val > 7) { |
3589 | | //Error(Parser.getTok().getLoc(), "lane index out of range"); |
3590 | 25 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3591 | 25 | return MatchOperand_ParseFail; |
3592 | 25 | } |
3593 | 11 | Index = Val; |
3594 | 11 | LaneKind = IndexedLane; |
3595 | 11 | return MatchOperand_Success; |
3596 | 36 | } |
3597 | 311 | LaneKind = NoLanes; |
3598 | 311 | return MatchOperand_Success; |
3599 | 415 | } |
3600 | | |
3601 | | // parse a vector register list |
3602 | | ARMAsmParser::OperandMatchResultTy |
3603 | | ARMAsmParser::parseVectorList(OperandVector &Operands, unsigned int &ErrorCode) |
3604 | 61.9k | { |
3605 | 61.9k | MCAsmParser &Parser = getParser(); |
3606 | 61.9k | VectorLaneTy LaneKind; |
3607 | 61.9k | unsigned LaneIndex; |
3608 | 61.9k | SMLoc S = Parser.getTok().getLoc(); |
3609 | | // As an extension (to match gas), support a plain D register or Q register |
3610 | | // (without encosing curly braces) as a single or double entry list, |
3611 | | // respectively. |
3612 | 61.9k | if (Parser.getTok().is(AsmToken::Identifier)) { |
3613 | 42.5k | SMLoc E = Parser.getTok().getEndLoc(); |
3614 | 42.5k | int Reg = tryParseRegister(); |
3615 | 42.5k | if (Reg == -1) |
3616 | 42.3k | return MatchOperand_NoMatch; |
3617 | 188 | if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { |
3618 | 120 | OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E, ErrorCode); |
3619 | 120 | if (Res != MatchOperand_Success) |
3620 | 20 | return Res; |
3621 | 100 | switch (LaneKind) { |
3622 | 88 | case NoLanes: |
3623 | 88 | Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E)); |
3624 | 88 | break; |
3625 | 4 | case AllLanes: |
3626 | 4 | Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false, |
3627 | 4 | S, E)); |
3628 | 4 | break; |
3629 | 8 | case IndexedLane: |
3630 | 8 | Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, |
3631 | 8 | LaneIndex, |
3632 | 8 | false, S, E)); |
3633 | 8 | break; |
3634 | 100 | } |
3635 | 100 | return MatchOperand_Success; |
3636 | 100 | } |
3637 | 68 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { |
3638 | 58 | Reg = getDRegFromQReg(Reg); |
3639 | 58 | OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E, ErrorCode); |
3640 | 58 | if (Res != MatchOperand_Success) |
3641 | 31 | return Res; |
3642 | 27 | switch (LaneKind) { |
3643 | 24 | case NoLanes: |
3644 | 24 | Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, |
3645 | 24 | &ARMMCRegisterClasses[ARM::DPairRegClassID]); |
3646 | 24 | Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E)); |
3647 | 24 | break; |
3648 | 2 | case AllLanes: |
3649 | 2 | Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, |
3650 | 2 | &ARMMCRegisterClasses[ARM::DPairRegClassID]); |
3651 | 2 | Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false, |
3652 | 2 | S, E)); |
3653 | 2 | break; |
3654 | 1 | case IndexedLane: |
3655 | 1 | Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, |
3656 | 1 | LaneIndex, |
3657 | 1 | false, S, E)); |
3658 | 1 | break; |
3659 | 27 | } |
3660 | 27 | return MatchOperand_Success; |
3661 | 27 | } |
3662 | | //Error(S, "vector register expected"); |
3663 | 10 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3664 | 10 | return MatchOperand_ParseFail; |
3665 | 68 | } |
3666 | | |
3667 | 19.4k | if (Parser.getTok().isNot(AsmToken::LCurly)) |
3668 | 19.2k | return MatchOperand_NoMatch; |
3669 | | |
3670 | 160 | Parser.Lex(); // Eat '{' token. |
3671 | 160 | SMLoc RegLoc = Parser.getTok().getLoc(); |
3672 | | |
3673 | 160 | int Reg = tryParseRegister(); |
3674 | 160 | if (Reg == -1) { |
3675 | | //Error(RegLoc, "register expected"); |
3676 | 11 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3677 | 11 | return MatchOperand_ParseFail; |
3678 | 11 | } |
3679 | 149 | unsigned Count = 1; |
3680 | 149 | int Spacing = 0; |
3681 | 149 | unsigned FirstReg = Reg; |
3682 | | // The list is of D registers, but we also allow Q regs and just interpret |
3683 | | // them as the two D sub-registers. |
3684 | 149 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { |
3685 | 101 | FirstReg = Reg = getDRegFromQReg(Reg); |
3686 | 101 | Spacing = 1; // double-spacing requires explicit D registers, otherwise |
3687 | | // it's ambiguous with four-register single spaced. |
3688 | 101 | ++Reg; |
3689 | 101 | ++Count; |
3690 | 101 | } |
3691 | | |
3692 | 149 | SMLoc E; |
3693 | 149 | if (parseVectorLane(LaneKind, LaneIndex, E, ErrorCode) != MatchOperand_Success) |
3694 | 6 | return MatchOperand_ParseFail; |
3695 | | |
3696 | 429 | while (Parser.getTok().is(AsmToken::Comma) || |
3697 | 429 | Parser.getTok().is(AsmToken::Minus)) { |
3698 | 397 | if (Parser.getTok().is(AsmToken::Minus)) { |
3699 | 323 | if (!Spacing) |
3700 | 25 | Spacing = 1; // Register range implies a single spaced list. |
3701 | 298 | else if (Spacing == 2) { |
3702 | | //Error(Parser.getTok().getLoc(), |
3703 | | // "sequential registers in double spaced list"); |
3704 | 1 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3705 | 1 | return MatchOperand_ParseFail; |
3706 | 1 | } |
3707 | 322 | Parser.Lex(); // Eat the minus. |
3708 | | //SMLoc AfterMinusLoc = Parser.getTok().getLoc(); |
3709 | 322 | int EndReg = tryParseRegister(); |
3710 | 322 | if (EndReg == -1) { |
3711 | | //Error(AfterMinusLoc, "register expected"); |
3712 | 26 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3713 | 26 | return MatchOperand_ParseFail; |
3714 | 26 | } |
3715 | | // Allow Q regs and just interpret them as the two D sub-registers. |
3716 | 296 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) |
3717 | 134 | EndReg = getDRegFromQReg(EndReg) + 1; |
3718 | | // If the register is the same as the start reg, there's nothing |
3719 | | // more to do. |
3720 | 296 | if (Reg == EndReg) |
3721 | 226 | continue; |
3722 | | // The register must be in the same register class as the first. |
3723 | 70 | if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { |
3724 | | //Error(AfterMinusLoc, "invalid register in register list"); |
3725 | 14 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3726 | 14 | return MatchOperand_ParseFail; |
3727 | 14 | } |
3728 | | // Ranges must go from low to high. |
3729 | 56 | if (Reg > EndReg) { |
3730 | | //Error(AfterMinusLoc, "bad range in register list"); |
3731 | 12 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3732 | 12 | return MatchOperand_ParseFail; |
3733 | 12 | } |
3734 | | // Parse the lane specifier if present. |
3735 | 44 | VectorLaneTy NextLaneKind; |
3736 | 44 | unsigned NextLaneIndex; |
3737 | 44 | if (parseVectorLane(NextLaneKind, NextLaneIndex, E, ErrorCode) != |
3738 | 44 | MatchOperand_Success) |
3739 | 18 | return MatchOperand_ParseFail; |
3740 | 26 | if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { |
3741 | | //Error(AfterMinusLoc, "mismatched lane index in register list"); |
3742 | 4 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3743 | 4 | return MatchOperand_ParseFail; |
3744 | 4 | } |
3745 | | |
3746 | | // Add all the registers in the range to the register list. |
3747 | 22 | Count += EndReg - Reg; |
3748 | 22 | Reg = EndReg; |
3749 | 22 | continue; |
3750 | 26 | } |
3751 | 74 | Parser.Lex(); // Eat the comma. |
3752 | 74 | RegLoc = Parser.getTok().getLoc(); |
3753 | 74 | int OldReg = Reg; |
3754 | 74 | Reg = tryParseRegister(); |
3755 | 74 | if (Reg == -1) { |
3756 | | //Error(RegLoc, "register expected"); |
3757 | 8 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3758 | 8 | return MatchOperand_ParseFail; |
3759 | 8 | } |
3760 | | // vector register lists must be contiguous. |
3761 | | // It's OK to use the enumeration values directly here rather, as the |
3762 | | // VFP register classes have the enum sorted properly. |
3763 | | // |
3764 | | // The list is of D registers, but we also allow Q regs and just interpret |
3765 | | // them as the two D sub-registers. |
3766 | 66 | if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { |
3767 | 40 | if (!Spacing) |
3768 | 4 | Spacing = 1; // Register range implies a single spaced list. |
3769 | 36 | else if (Spacing == 2) { |
3770 | | //Error(RegLoc, |
3771 | | // "invalid register in double-spaced list (must be 'D' register')"); |
3772 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3773 | 0 | return MatchOperand_ParseFail; |
3774 | 0 | } |
3775 | 40 | Reg = getDRegFromQReg(Reg); |
3776 | 40 | if (Reg != OldReg + 1) { |
3777 | | //Error(RegLoc, "non-contiguous register range"); |
3778 | 12 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3779 | 12 | return MatchOperand_ParseFail; |
3780 | 12 | } |
3781 | 28 | ++Reg; |
3782 | 28 | Count += 2; |
3783 | | // Parse the lane specifier if present. |
3784 | 28 | VectorLaneTy NextLaneKind; |
3785 | 28 | unsigned NextLaneIndex; |
3786 | | //SMLoc LaneLoc = Parser.getTok().getLoc(); |
3787 | 28 | if (parseVectorLane(NextLaneKind, NextLaneIndex, E, ErrorCode) != |
3788 | 28 | MatchOperand_Success) |
3789 | 4 | return MatchOperand_ParseFail; |
3790 | 24 | if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { |
3791 | | //Error(LaneLoc, "mismatched lane index in register list"); |
3792 | 1 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3793 | 1 | return MatchOperand_ParseFail; |
3794 | 1 | } |
3795 | 23 | continue; |
3796 | 24 | } |
3797 | | // Normal D register. |
3798 | | // Figure out the register spacing (single or double) of the list if |
3799 | | // we don't know it already. |
3800 | 26 | if (!Spacing) |
3801 | 12 | Spacing = 1 + (Reg == OldReg + 2); |
3802 | | |
3803 | | // Just check that it's contiguous and keep going. |
3804 | 26 | if (Reg != OldReg + Spacing) { |
3805 | | //Error(RegLoc, "non-contiguous register range"); |
3806 | 10 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3807 | 10 | return MatchOperand_ParseFail; |
3808 | 10 | } |
3809 | 16 | ++Count; |
3810 | | // Parse the lane specifier if present. |
3811 | 16 | VectorLaneTy NextLaneKind; |
3812 | 16 | unsigned NextLaneIndex; |
3813 | | //SMLoc EndLoc = Parser.getTok().getLoc(); |
3814 | 16 | if (parseVectorLane(NextLaneKind, NextLaneIndex, E, ErrorCode) != MatchOperand_Success) |
3815 | 1 | return MatchOperand_ParseFail; |
3816 | 15 | if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { |
3817 | | //Error(EndLoc, "mismatched lane index in register list"); |
3818 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3819 | 0 | return MatchOperand_ParseFail; |
3820 | 0 | } |
3821 | 15 | } |
3822 | | |
3823 | 32 | if (Parser.getTok().isNot(AsmToken::RCurly)) { |
3824 | | //Error(Parser.getTok().getLoc(), "'}' expected"); |
3825 | 24 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3826 | 24 | return MatchOperand_ParseFail; |
3827 | 24 | } |
3828 | 8 | E = Parser.getTok().getEndLoc(); |
3829 | 8 | Parser.Lex(); // Eat '}' token. |
3830 | | |
3831 | 8 | switch (LaneKind) { |
3832 | 6 | case NoLanes: |
3833 | | // Two-register operands have been converted to the |
3834 | | // composite register classes. |
3835 | 6 | if (Count == 2) { |
3836 | 3 | const MCRegisterClass *RC = (Spacing == 1) ? |
3837 | 2 | &ARMMCRegisterClasses[ARM::DPairRegClassID] : |
3838 | 3 | &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; |
3839 | 3 | FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); |
3840 | 3 | } |
3841 | | |
3842 | 6 | Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, |
3843 | 6 | (Spacing == 2), S, E)); |
3844 | 6 | break; |
3845 | 0 | case AllLanes: |
3846 | | // Two-register operands have been converted to the |
3847 | | // composite register classes. |
3848 | 0 | if (Count == 2) { |
3849 | 0 | const MCRegisterClass *RC = (Spacing == 1) ? |
3850 | 0 | &ARMMCRegisterClasses[ARM::DPairRegClassID] : |
3851 | 0 | &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; |
3852 | 0 | FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); |
3853 | 0 | } |
3854 | 0 | Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, |
3855 | 0 | (Spacing == 2), |
3856 | 0 | S, E)); |
3857 | 0 | break; |
3858 | 2 | case IndexedLane: |
3859 | 2 | Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, |
3860 | 2 | LaneIndex, |
3861 | 2 | (Spacing == 2), |
3862 | 2 | S, E)); |
3863 | 2 | break; |
3864 | 8 | } |
3865 | 8 | return MatchOperand_Success; |
3866 | 8 | } |
3867 | | |
3868 | | /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. |
3869 | | ARMAsmParser::OperandMatchResultTy |
3870 | 4.18k | ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands, unsigned int &ErrorCode) { |
3871 | 4.18k | MCAsmParser &Parser = getParser(); |
3872 | 4.18k | SMLoc S = Parser.getTok().getLoc(); |
3873 | 4.18k | const AsmToken &Tok = Parser.getTok(); |
3874 | 4.18k | unsigned Opt; |
3875 | | |
3876 | 4.18k | if (Tok.is(AsmToken::Identifier)) { |
3877 | 3.75k | StringRef OptStr = Tok.getString(); |
3878 | | |
3879 | 3.75k | Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower()) |
3880 | 3.75k | .Case("sy", ARM_MB::SY) |
3881 | 3.75k | .Case("st", ARM_MB::ST) |
3882 | 3.75k | .Case("ld", ARM_MB::LD) |
3883 | 3.75k | .Case("sh", ARM_MB::ISH) |
3884 | 3.75k | .Case("ish", ARM_MB::ISH) |
3885 | 3.75k | .Case("shst", ARM_MB::ISHST) |
3886 | 3.75k | .Case("ishst", ARM_MB::ISHST) |
3887 | 3.75k | .Case("ishld", ARM_MB::ISHLD) |
3888 | 3.75k | .Case("nsh", ARM_MB::NSH) |
3889 | 3.75k | .Case("un", ARM_MB::NSH) |
3890 | 3.75k | .Case("nshst", ARM_MB::NSHST) |
3891 | 3.75k | .Case("nshld", ARM_MB::NSHLD) |
3892 | 3.75k | .Case("unst", ARM_MB::NSHST) |
3893 | 3.75k | .Case("osh", ARM_MB::OSH) |
3894 | 3.75k | .Case("oshst", ARM_MB::OSHST) |
3895 | 3.75k | .Case("oshld", ARM_MB::OSHLD) |
3896 | 3.75k | .Default(~0U); |
3897 | | |
3898 | | // ishld, oshld, nshld and ld are only available from ARMv8. |
3899 | 3.75k | if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD || |
3900 | 3.28k | Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD)) |
3901 | 11 | Opt = ~0U; |
3902 | | |
3903 | 3.75k | if (Opt == ~0U) |
3904 | 925 | return MatchOperand_NoMatch; |
3905 | | |
3906 | 2.82k | Parser.Lex(); // Eat identifier token. |
3907 | 2.82k | } else if (Tok.is(AsmToken::Hash) || |
3908 | 429 | Tok.is(AsmToken::Dollar) || |
3909 | 429 | Tok.is(AsmToken::Integer)) { |
3910 | 402 | if (Parser.getTok().isNot(AsmToken::Integer)) |
3911 | 234 | Parser.Lex(); // Eat '#' or '$'. |
3912 | | |
3913 | | //SMLoc Loc = Parser.getTok().getLoc(); |
3914 | | |
3915 | 402 | const MCExpr *MemBarrierID; |
3916 | 402 | if (getParser().parseExpression(MemBarrierID)) { |
3917 | | //Error(Loc, "illegal expression"); |
3918 | 127 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3919 | 127 | return MatchOperand_ParseFail; |
3920 | 127 | } |
3921 | | |
3922 | 275 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID); |
3923 | 275 | if (!CE) { |
3924 | | //Error(Loc, "constant expression expected"); |
3925 | 6 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3926 | 6 | return MatchOperand_ParseFail; |
3927 | 6 | } |
3928 | | |
3929 | 269 | int Val = CE->getValue(); |
3930 | 269 | if (Val & ~0xf) { |
3931 | | //Error(Loc, "immediate value out of range"); |
3932 | 48 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3933 | 48 | return MatchOperand_ParseFail; |
3934 | 48 | } |
3935 | | |
3936 | 221 | Opt = ARM_MB::RESERVED_0 + Val; |
3937 | 221 | } else |
3938 | 27 | return MatchOperand_ParseFail; |
3939 | | |
3940 | 3.04k | Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); |
3941 | 3.04k | return MatchOperand_Success; |
3942 | 4.18k | } |
3943 | | |
3944 | | /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options. |
3945 | | ARMAsmParser::OperandMatchResultTy |
3946 | 1.12k | ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands, unsigned int &ErrorCode) { |
3947 | 1.12k | MCAsmParser &Parser = getParser(); |
3948 | 1.12k | SMLoc S = Parser.getTok().getLoc(); |
3949 | 1.12k | const AsmToken &Tok = Parser.getTok(); |
3950 | 1.12k | unsigned Opt; |
3951 | | |
3952 | 1.12k | if (Tok.is(AsmToken::Identifier)) { |
3953 | 381 | StringRef OptStr = Tok.getString(); |
3954 | | |
3955 | 381 | if (OptStr.equals_lower("sy")) |
3956 | 324 | Opt = ARM_ISB::SY; |
3957 | 57 | else |
3958 | 57 | return MatchOperand_NoMatch; |
3959 | | |
3960 | 324 | Parser.Lex(); // Eat identifier token. |
3961 | 748 | } else if (Tok.is(AsmToken::Hash) || |
3962 | 748 | Tok.is(AsmToken::Dollar) || |
3963 | 748 | Tok.is(AsmToken::Integer)) { |
3964 | 732 | if (Parser.getTok().isNot(AsmToken::Integer)) |
3965 | 477 | Parser.Lex(); // Eat '#' or '$'. |
3966 | | |
3967 | | //SMLoc Loc = Parser.getTok().getLoc(); |
3968 | | |
3969 | 732 | const MCExpr *ISBarrierID; |
3970 | 732 | if (getParser().parseExpression(ISBarrierID)) { |
3971 | | //Error(Loc, "illegal expression"); |
3972 | 131 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3973 | 131 | return MatchOperand_ParseFail; |
3974 | 131 | } |
3975 | | |
3976 | 601 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID); |
3977 | 601 | if (!CE) { |
3978 | | //Error(Loc, "constant expression expected"); |
3979 | 9 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3980 | 9 | return MatchOperand_ParseFail; |
3981 | 9 | } |
3982 | | |
3983 | 592 | int Val = CE->getValue(); |
3984 | 592 | if (Val & ~0xf) { |
3985 | | //Error(Loc, "immediate value out of range"); |
3986 | 45 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
3987 | 45 | return MatchOperand_ParseFail; |
3988 | 45 | } |
3989 | | |
3990 | 547 | Opt = ARM_ISB::RESERVED_0 + Val; |
3991 | 547 | } else |
3992 | 16 | return MatchOperand_ParseFail; |
3993 | | |
3994 | 871 | Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt( |
3995 | 871 | (ARM_ISB::InstSyncBOpt)Opt, S)); |
3996 | 871 | return MatchOperand_Success; |
3997 | 1.12k | } |
3998 | | |
3999 | | |
4000 | | /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. |
4001 | | ARMAsmParser::OperandMatchResultTy |
4002 | 1.29k | ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands, unsigned int &ErrorCode) { |
4003 | 1.29k | MCAsmParser &Parser = getParser(); |
4004 | 1.29k | SMLoc S = Parser.getTok().getLoc(); |
4005 | 1.29k | const AsmToken &Tok = Parser.getTok(); |
4006 | 1.29k | if (!Tok.is(AsmToken::Identifier)) |
4007 | 294 | return MatchOperand_NoMatch; |
4008 | 1.00k | StringRef IFlagsStr = Tok.getString(); |
4009 | | |
4010 | | // An iflags string of "none" is interpreted to mean that none of the AIF |
4011 | | // bits are set. Not a terribly useful instruction, but a valid encoding. |
4012 | 1.00k | unsigned IFlags = 0; |
4013 | 1.00k | if (IFlagsStr != "none") { |
4014 | 1.47k | for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { |
4015 | 1.31k | unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) |
4016 | 1.31k | .Case("a", ARM_PROC::A) |
4017 | 1.31k | .Case("i", ARM_PROC::I) |
4018 | 1.31k | .Case("f", ARM_PROC::F) |
4019 | 1.31k | .Default(~0U); |
4020 | | |
4021 | | // If some specific iflag is already set, it means that some letter is |
4022 | | // present more than once, this is not acceptable. |
4023 | 1.31k | if (Flag == ~0U || (IFlags & Flag)) |
4024 | 836 | return MatchOperand_NoMatch; |
4025 | | |
4026 | 474 | IFlags |= Flag; |
4027 | 474 | } |
4028 | 1.00k | } |
4029 | | |
4030 | 168 | Parser.Lex(); // Eat identifier token. |
4031 | 168 | Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); |
4032 | 168 | return MatchOperand_Success; |
4033 | 1.00k | } |
4034 | | |
4035 | | /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. |
4036 | | ARMAsmParser::OperandMatchResultTy |
4037 | 3.57k | ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands, unsigned int &ErrorCode) { |
4038 | 3.57k | MCAsmParser &Parser = getParser(); |
4039 | 3.57k | SMLoc S = Parser.getTok().getLoc(); |
4040 | 3.57k | const AsmToken &Tok = Parser.getTok(); |
4041 | 3.57k | if (!Tok.is(AsmToken::Identifier)) |
4042 | 148 | return MatchOperand_NoMatch; |
4043 | 3.42k | StringRef Mask = Tok.getString(); |
4044 | | |
4045 | 3.42k | if (isMClass()) { |
4046 | | // See ARMv6-M 10.1.1 |
4047 | 931 | std::string Name = Mask.lower(); |
4048 | 931 | unsigned FlagsVal = StringSwitch<unsigned>(Name) |
4049 | | // Note: in the documentation: |
4050 | | // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias |
4051 | | // for MSR APSR_nzcvq. |
4052 | | // but we do make it an alias here. This is so to get the "mask encoding" |
4053 | | // bits correct on MSR APSR writes. |
4054 | | // |
4055 | | // FIXME: Note the 0xc00 "mask encoding" bits version of the registers |
4056 | | // should really only be allowed when writing a special register. Note |
4057 | | // they get dropped in the MRS instruction reading a special register as |
4058 | | // the SYSm field is only 8 bits. |
4059 | 931 | .Case("apsr", 0x800) |
4060 | 931 | .Case("apsr_nzcvq", 0x800) |
4061 | 931 | .Case("apsr_g", 0x400) |
4062 | 931 | .Case("apsr_nzcvqg", 0xc00) |
4063 | 931 | .Case("iapsr", 0x801) |
4064 | 931 | .Case("iapsr_nzcvq", 0x801) |
4065 | 931 | .Case("iapsr_g", 0x401) |
4066 | 931 | .Case("iapsr_nzcvqg", 0xc01) |
4067 | 931 | .Case("eapsr", 0x802) |
4068 | 931 | .Case("eapsr_nzcvq", 0x802) |
4069 | 931 | .Case("eapsr_g", 0x402) |
4070 | 931 | .Case("eapsr_nzcvqg", 0xc02) |
4071 | 931 | .Case("xpsr", 0x803) |
4072 | 931 | .Case("xpsr_nzcvq", 0x803) |
4073 | 931 | .Case("xpsr_g", 0x403) |
4074 | 931 | .Case("xpsr_nzcvqg", 0xc03) |
4075 | 931 | .Case("ipsr", 0x805) |
4076 | 931 | .Case("epsr", 0x806) |
4077 | 931 | .Case("iepsr", 0x807) |
4078 | 931 | .Case("msp", 0x808) |
4079 | 931 | .Case("psp", 0x809) |
4080 | 931 | .Case("primask", 0x810) |
4081 | 931 | .Case("basepri", 0x811) |
4082 | 931 | .Case("basepri_max", 0x812) |
4083 | 931 | .Case("faultmask", 0x813) |
4084 | 931 | .Case("control", 0x814) |
4085 | 931 | .Case("msplim", 0x80a) |
4086 | 931 | .Case("psplim", 0x80b) |
4087 | 931 | .Case("msp_ns", 0x888) |
4088 | 931 | .Case("psp_ns", 0x889) |
4089 | 931 | .Case("msplim_ns", 0x88a) |
4090 | 931 | .Case("psplim_ns", 0x88b) |
4091 | 931 | .Case("primask_ns", 0x890) |
4092 | 931 | .Case("basepri_ns", 0x891) |
4093 | 931 | .Case("basepri_max_ns", 0x892) |
4094 | 931 | .Case("faultmask_ns", 0x893) |
4095 | 931 | .Case("control_ns", 0x894) |
4096 | 931 | .Case("sp_ns", 0x898) |
4097 | 931 | .Default(~0U); |
4098 | | |
4099 | 931 | if (FlagsVal == ~0U) |
4100 | 826 | return MatchOperand_NoMatch; |
4101 | | |
4102 | 105 | if (!hasDSP() && (FlagsVal & 0x400)) |
4103 | | // The _g and _nzcvqg versions are only valid if the DSP extension is |
4104 | | // available. |
4105 | 11 | return MatchOperand_NoMatch; |
4106 | | |
4107 | 94 | if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813) |
4108 | | // basepri, basepri_max and faultmask only valid for V7m. |
4109 | 7 | return MatchOperand_NoMatch; |
4110 | | |
4111 | 87 | if (!has8MSecExt() && (FlagsVal == 0x80a || FlagsVal == 0x80b || |
4112 | 42 | (FlagsVal > 0x814 && FlagsVal < 0xc00))) |
4113 | 9 | return MatchOperand_NoMatch; |
4114 | | |
4115 | 78 | if (!hasV8MMainline() && (FlagsVal == 0x88a || FlagsVal == 0x88b || |
4116 | 33 | (FlagsVal > 0x890 && FlagsVal <= 0x893))) |
4117 | 0 | return MatchOperand_NoMatch; |
4118 | | |
4119 | 78 | Parser.Lex(); // Eat identifier token. |
4120 | 78 | Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); |
4121 | 78 | return MatchOperand_Success; |
4122 | 78 | } |
4123 | | |
4124 | | // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" |
4125 | 2.49k | size_t Start = 0, Next = Mask.find('_'); |
4126 | 2.49k | StringRef Flags = ""; |
4127 | 2.49k | std::string SpecReg = Mask.slice(Start, Next).lower(); |
4128 | 2.49k | if (Next != StringRef::npos) |
4129 | 442 | Flags = Mask.slice(Next+1, Mask.size()); |
4130 | | |
4131 | | // FlagsVal contains the complete mask: |
4132 | | // 3-0: Mask |
4133 | | // 4: Special Reg (cpsr, apsr => 0; spsr => 1) |
4134 | 2.49k | unsigned FlagsVal = 0; |
4135 | | |
4136 | 2.49k | if (SpecReg == "apsr") { |
4137 | 192 | FlagsVal = StringSwitch<unsigned>(Flags) |
4138 | 192 | .Case("nzcvq", 0x8) // same as CPSR_f |
4139 | 192 | .Case("g", 0x4) // same as CPSR_s |
4140 | 192 | .Case("nzcvqg", 0xc) // same as CPSR_fs |
4141 | 192 | .Default(~0U); |
4142 | | |
4143 | 192 | if (FlagsVal == ~0U) { |
4144 | 176 | if (!Flags.empty()) |
4145 | 126 | return MatchOperand_NoMatch; |
4146 | 50 | else |
4147 | 50 | FlagsVal = 8; // No flag |
4148 | 176 | } |
4149 | 2.30k | } else if (SpecReg == "cpsr" || SpecReg == "spsr") { |
4150 | | // cpsr_all is an alias for cpsr_fc, as is plain cpsr. |
4151 | 685 | if (Flags == "all" || Flags == "") |
4152 | 590 | Flags = "fc"; |
4153 | 1.99k | for (int i = 0, e = Flags.size(); i != e; ++i) { |
4154 | 1.39k | unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) |
4155 | 1.39k | .Case("c", 1) |
4156 | 1.39k | .Case("x", 2) |
4157 | 1.39k | .Case("s", 4) |
4158 | 1.39k | .Case("f", 8) |
4159 | 1.39k | .Default(~0U); |
4160 | | |
4161 | | // If some specific flag is already set, it means that some letter is |
4162 | | // present more than once, this is not acceptable. |
4163 | 1.39k | if (FlagsVal == ~0U || (FlagsVal & Flag)) |
4164 | 84 | return MatchOperand_NoMatch; |
4165 | 1.30k | FlagsVal |= Flag; |
4166 | 1.30k | } |
4167 | 685 | } else // No match for special register. |
4168 | 1.61k | return MatchOperand_NoMatch; |
4169 | | |
4170 | | // Special register without flags is NOT equivalent to "fc" flags. |
4171 | | // NOTE: This is a divergence from gas' behavior. Uncommenting the following |
4172 | | // two lines would enable gas compatibility at the expense of breaking |
4173 | | // round-tripping. |
4174 | | // |
4175 | | // if (!FlagsVal) |
4176 | | // FlagsVal = 0x9; |
4177 | | |
4178 | | // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) |
4179 | 667 | if (SpecReg == "spsr") |
4180 | 532 | FlagsVal |= 16; |
4181 | | |
4182 | 667 | Parser.Lex(); // Eat identifier token. |
4183 | 667 | Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); |
4184 | 667 | return MatchOperand_Success; |
4185 | 2.49k | } |
4186 | | |
4187 | | /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for |
4188 | | /// use in the MRS/MSR instructions added to support virtualization. |
4189 | | ARMAsmParser::OperandMatchResultTy |
4190 | 687 | ARMAsmParser::parseBankedRegOperand(OperandVector &Operands, unsigned int &ErrorCode) { |
4191 | 687 | MCAsmParser &Parser = getParser(); |
4192 | 687 | SMLoc S = Parser.getTok().getLoc(); |
4193 | 687 | const AsmToken &Tok = Parser.getTok(); |
4194 | 687 | if (!Tok.is(AsmToken::Identifier)) |
4195 | 45 | return MatchOperand_NoMatch; |
4196 | 642 | StringRef RegName = Tok.getString(); |
4197 | | |
4198 | | // The values here come from B9.2.3 of the ARM ARM, where bits 4-0 are SysM |
4199 | | // and bit 5 is R. |
4200 | 642 | unsigned Encoding = StringSwitch<unsigned>(RegName.lower()) |
4201 | 642 | .Case("r8_usr", 0x00) |
4202 | 642 | .Case("r9_usr", 0x01) |
4203 | 642 | .Case("r10_usr", 0x02) |
4204 | 642 | .Case("r11_usr", 0x03) |
4205 | 642 | .Case("r12_usr", 0x04) |
4206 | 642 | .Case("sp_usr", 0x05) |
4207 | 642 | .Case("lr_usr", 0x06) |
4208 | 642 | .Case("r8_fiq", 0x08) |
4209 | 642 | .Case("r9_fiq", 0x09) |
4210 | 642 | .Case("r10_fiq", 0x0a) |
4211 | 642 | .Case("r11_fiq", 0x0b) |
4212 | 642 | .Case("r12_fiq", 0x0c) |
4213 | 642 | .Case("sp_fiq", 0x0d) |
4214 | 642 | .Case("lr_fiq", 0x0e) |
4215 | 642 | .Case("lr_irq", 0x10) |
4216 | 642 | .Case("sp_irq", 0x11) |
4217 | 642 | .Case("lr_svc", 0x12) |
4218 | 642 | .Case("sp_svc", 0x13) |
4219 | 642 | .Case("lr_abt", 0x14) |
4220 | 642 | .Case("sp_abt", 0x15) |
4221 | 642 | .Case("lr_und", 0x16) |
4222 | 642 | .Case("sp_und", 0x17) |
4223 | 642 | .Case("lr_mon", 0x1c) |
4224 | 642 | .Case("sp_mon", 0x1d) |
4225 | 642 | .Case("elr_hyp", 0x1e) |
4226 | 642 | .Case("sp_hyp", 0x1f) |
4227 | 642 | .Case("spsr_fiq", 0x2e) |
4228 | 642 | .Case("spsr_irq", 0x30) |
4229 | 642 | .Case("spsr_svc", 0x32) |
4230 | 642 | .Case("spsr_abt", 0x34) |
4231 | 642 | .Case("spsr_und", 0x36) |
4232 | 642 | .Case("spsr_mon", 0x3c) |
4233 | 642 | .Case("spsr_hyp", 0x3e) |
4234 | 642 | .Default(~0U); |
4235 | | |
4236 | 642 | if (Encoding == ~0U) |
4237 | 642 | return MatchOperand_NoMatch; |
4238 | | |
4239 | 0 | Parser.Lex(); // Eat identifier token. |
4240 | 0 | Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S)); |
4241 | 0 | return MatchOperand_Success; |
4242 | 642 | } |
4243 | | |
4244 | | ARMAsmParser::OperandMatchResultTy |
4245 | | ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low, |
4246 | | int High, unsigned int &ErrorCode) |
4247 | 2 | { |
4248 | 2 | MCAsmParser &Parser = getParser(); |
4249 | 2 | const AsmToken &Tok = Parser.getTok(); |
4250 | 2 | if (Tok.isNot(AsmToken::Identifier)) { |
4251 | | //Error(Parser.getTok().getLoc(), Op + " operand expected."); |
4252 | 1 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4253 | 1 | return MatchOperand_ParseFail; |
4254 | 1 | } |
4255 | 1 | StringRef ShiftName = Tok.getString(); |
4256 | 1 | std::string LowerOp = Op.lower(); |
4257 | 1 | std::string UpperOp = Op.upper(); |
4258 | 1 | if (ShiftName != LowerOp && ShiftName != UpperOp) { |
4259 | | //Error(Parser.getTok().getLoc(), Op + " operand expected."); |
4260 | 1 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4261 | 1 | return MatchOperand_ParseFail; |
4262 | 1 | } |
4263 | 0 | Parser.Lex(); // Eat shift type token. |
4264 | | |
4265 | | // There must be a '#' and a shift amount. |
4266 | 0 | if (Parser.getTok().isNot(AsmToken::Hash) && |
4267 | 0 | Parser.getTok().isNot(AsmToken::Dollar)) { |
4268 | | //Error(Parser.getTok().getLoc(), "'#' expected"); |
4269 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4270 | 0 | return MatchOperand_ParseFail; |
4271 | 0 | } |
4272 | 0 | Parser.Lex(); // Eat hash token. |
4273 | |
|
4274 | 0 | const MCExpr *ShiftAmount; |
4275 | 0 | SMLoc Loc = Parser.getTok().getLoc(); |
4276 | 0 | SMLoc EndLoc; |
4277 | 0 | if (getParser().parseExpression(ShiftAmount, EndLoc)) { |
4278 | | //Error(Loc, "illegal expression"); |
4279 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4280 | 0 | return MatchOperand_ParseFail; |
4281 | 0 | } |
4282 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); |
4283 | 0 | if (!CE) { |
4284 | | //Error(Loc, "constant expression expected"); |
4285 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4286 | 0 | return MatchOperand_ParseFail; |
4287 | 0 | } |
4288 | 0 | int Val = CE->getValue(); |
4289 | 0 | if (Val < Low || Val > High) { |
4290 | | //Error(Loc, "immediate value out of range"); |
4291 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4292 | 0 | return MatchOperand_ParseFail; |
4293 | 0 | } |
4294 | | |
4295 | 0 | Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc)); |
4296 | |
|
4297 | 0 | return MatchOperand_Success; |
4298 | 0 | } |
4299 | | |
4300 | | ARMAsmParser::OperandMatchResultTy |
4301 | | ARMAsmParser::parseSetEndImm(OperandVector &Operands, unsigned int &ErrorCode) |
4302 | 623 | { |
4303 | 623 | MCAsmParser &Parser = getParser(); |
4304 | 623 | const AsmToken &Tok = Parser.getTok(); |
4305 | 623 | SMLoc S = Tok.getLoc(); |
4306 | 623 | if (Tok.isNot(AsmToken::Identifier)) { |
4307 | | //Error(S, "'be' or 'le' operand expected"); |
4308 | 4 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4309 | 4 | return MatchOperand_ParseFail; |
4310 | 4 | } |
4311 | 619 | int Val = StringSwitch<int>(Tok.getString().lower()) |
4312 | 619 | .Case("be", 1) |
4313 | 619 | .Case("le", 0) |
4314 | 619 | .Default(-1); |
4315 | 619 | Parser.Lex(); // Eat the token. |
4316 | | |
4317 | 619 | if (Val == -1) { |
4318 | | //Error(S, "'be' or 'le' operand expected"); |
4319 | 35 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4320 | 35 | return MatchOperand_ParseFail; |
4321 | 35 | } |
4322 | 584 | Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val, |
4323 | 584 | getContext()), |
4324 | 584 | S, Tok.getEndLoc())); |
4325 | 584 | return MatchOperand_Success; |
4326 | 619 | } |
4327 | | |
4328 | | /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT |
4329 | | /// instructions. Legal values are: |
4330 | | /// lsl #n 'n' in [0,31] |
4331 | | /// asr #n 'n' in [1,32] |
4332 | | /// n == 32 encoded as n == 0. |
4333 | | ARMAsmParser::OperandMatchResultTy |
4334 | | ARMAsmParser::parseShifterImm(OperandVector &Operands, unsigned int &ErrorCode) |
4335 | 394 | { |
4336 | 394 | MCAsmParser &Parser = getParser(); |
4337 | 394 | const AsmToken &Tok = Parser.getTok(); |
4338 | 394 | SMLoc S = Tok.getLoc(); |
4339 | 394 | if (Tok.isNot(AsmToken::Identifier)) { |
4340 | | //Error(S, "shift operator 'asr' or 'lsl' expected"); |
4341 | 2 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4342 | 2 | return MatchOperand_ParseFail; |
4343 | 2 | } |
4344 | 392 | StringRef ShiftName = Tok.getString(); |
4345 | 392 | bool isASR; |
4346 | 392 | if (ShiftName == "lsl" || ShiftName == "LSL") |
4347 | 271 | isASR = false; |
4348 | 121 | else if (ShiftName == "asr" || ShiftName == "ASR") |
4349 | 79 | isASR = true; |
4350 | 42 | else { |
4351 | | //Error(S, "shift operator 'asr' or 'lsl' expected"); |
4352 | 42 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4353 | 42 | return MatchOperand_ParseFail; |
4354 | 42 | } |
4355 | 350 | Parser.Lex(); // Eat the operator. |
4356 | | |
4357 | | // A '#' and a shift amount. |
4358 | 350 | if (Parser.getTok().isNot(AsmToken::Hash) && |
4359 | 350 | Parser.getTok().isNot(AsmToken::Dollar)) { |
4360 | | //Error(Parser.getTok().getLoc(), "'#' expected"); |
4361 | 3 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4362 | 3 | return MatchOperand_ParseFail; |
4363 | 3 | } |
4364 | 347 | Parser.Lex(); // Eat hash token. |
4365 | | //SMLoc ExLoc = Parser.getTok().getLoc(); |
4366 | | |
4367 | 347 | const MCExpr *ShiftAmount; |
4368 | 347 | SMLoc EndLoc; |
4369 | 347 | if (getParser().parseExpression(ShiftAmount, EndLoc)) { |
4370 | | //Error(ExLoc, "malformed shift expression"); |
4371 | 170 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4372 | 170 | return MatchOperand_ParseFail; |
4373 | 170 | } |
4374 | 177 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); |
4375 | 177 | if (!CE) { |
4376 | | //Error(ExLoc, "shift amount must be an immediate"); |
4377 | 4 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4378 | 4 | return MatchOperand_ParseFail; |
4379 | 4 | } |
4380 | | |
4381 | 173 | int64_t Val = CE->getValue(); |
4382 | 173 | if (isASR) { |
4383 | | // Shift amount must be in [1,32] |
4384 | 75 | if (Val < 1 || Val > 32) { |
4385 | | //Error(ExLoc, "'asr' shift amount must be in range [1,32]"); |
4386 | 72 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4387 | 72 | return MatchOperand_ParseFail; |
4388 | 72 | } |
4389 | | // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. |
4390 | 3 | if (isThumb() && Val == 32) { |
4391 | | //Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode"); |
4392 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4393 | 0 | return MatchOperand_ParseFail; |
4394 | 0 | } |
4395 | 3 | if (Val == 32) Val = 0; |
4396 | 98 | } else { |
4397 | | // Shift amount must be in [1,32] |
4398 | 98 | if (Val < 0 || Val > 31) { |
4399 | | //Error(ExLoc, "'lsr' shift amount must be in range [0,31]"); |
4400 | 95 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4401 | 95 | return MatchOperand_ParseFail; |
4402 | 95 | } |
4403 | 98 | } |
4404 | | |
4405 | 6 | Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc)); |
4406 | | |
4407 | 6 | return MatchOperand_Success; |
4408 | 173 | } |
4409 | | |
4410 | | /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family |
4411 | | /// of instructions. Legal values are: |
4412 | | /// ror #n 'n' in {0, 8, 16, 24} |
4413 | | ARMAsmParser::OperandMatchResultTy |
4414 | 481 | ARMAsmParser::parseRotImm(OperandVector &Operands, unsigned int &ErrorCode) { |
4415 | 481 | MCAsmParser &Parser = getParser(); |
4416 | 481 | const AsmToken &Tok = Parser.getTok(); |
4417 | 481 | SMLoc S = Tok.getLoc(); |
4418 | 481 | if (Tok.isNot(AsmToken::Identifier)) |
4419 | 121 | return MatchOperand_NoMatch; |
4420 | 360 | StringRef ShiftName = Tok.getString(); |
4421 | 360 | if (ShiftName != "ror" && ShiftName != "ROR") |
4422 | 103 | return MatchOperand_NoMatch; |
4423 | 257 | Parser.Lex(); // Eat the operator. |
4424 | | |
4425 | | // A '#' and a rotate amount. |
4426 | 257 | if (Parser.getTok().isNot(AsmToken::Hash) && |
4427 | 257 | Parser.getTok().isNot(AsmToken::Dollar)) { |
4428 | | //Error(Parser.getTok().getLoc(), "'#' expected"); |
4429 | 1 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4430 | 1 | return MatchOperand_ParseFail; |
4431 | 1 | } |
4432 | 256 | Parser.Lex(); // Eat hash token. |
4433 | | //SMLoc ExLoc = Parser.getTok().getLoc(); |
4434 | | |
4435 | 256 | const MCExpr *ShiftAmount; |
4436 | 256 | SMLoc EndLoc; |
4437 | 256 | if (getParser().parseExpression(ShiftAmount, EndLoc)) { |
4438 | | //Error(ExLoc, "malformed rotate expression"); |
4439 | 38 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4440 | 38 | return MatchOperand_ParseFail; |
4441 | 38 | } |
4442 | 218 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); |
4443 | 218 | if (!CE) { |
4444 | | //Error(ExLoc, "rotate amount must be an immediate"); |
4445 | 2 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4446 | 2 | return MatchOperand_ParseFail; |
4447 | 2 | } |
4448 | | |
4449 | 216 | int64_t Val = CE->getValue(); |
4450 | | // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) |
4451 | | // normally, zero is represented in asm by omitting the rotate operand |
4452 | | // entirely. |
4453 | 216 | if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { |
4454 | | //Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24"); |
4455 | 212 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
4456 | 212 | return MatchOperand_ParseFail; |
4457 | 212 | } |
4458 | | |
4459 | 4 | Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc)); |
4460 | | |
4461 | 4 | return MatchOperand_Success; |
4462 | 216 | } |
4463 | | |
4464 | | ARMAsmParser::OperandMatchResultTy |
4465 | | ARMAsmParser::parseModImm(OperandVector &Operands, unsigned int &ErrorCode) |
4466 | 19.2k | { |
4467 | 19.2k | MCAsmParser &Parser = getParser(); |
4468 | 19.2k | MCAsmLexer &Lexer = getLexer(); |
4469 | 19.2k | int64_t Imm1, Imm2; |
4470 | | |
4471 | 19.2k | SMLoc S = Parser.getTok().getLoc(); |
4472 | | |
4473 | | // 1) A mod_imm operand can appear in the place of a register name: |
4474 | | // add r0, #mod_imm |
4475 | | // add r0, r0, #mod_imm |
4476 | | // to correctly handle the latter, we bail out as soon as we see an |
4477 | | // identifier. |
4478 | | // |
4479 | | // 2) Similarly, we do not want to parse into complex operands: |
4480 | | // mov r0, #mod_imm |
4481 | | // mov r0, :lower16:(_foo) |
4482 | 19.2k | if (Parser.getTok().is(AsmToken::Identifier) || |
4483 | 19.2k | Parser.getTok().is(AsmToken::Colon)) |
4484 | 10.6k | return MatchOperand_NoMatch; |
4485 | | |
4486 | | // Hash (dollar) is optional as per the ARMARM |
4487 | 8.69k | if (Parser.getTok().is(AsmToken::Hash) || |
4488 | 8.69k | Parser.getTok().is(AsmToken::Dollar)) { |
4489 | | // Avoid parsing into complex operands (#:) |
4490 | 1.13k | if (Lexer.peekTok().is(AsmToken::Colon)) |
4491 | 3 | return MatchOperand_NoMatch; |
4492 | | |
4493 | | // Eat the hash (dollar) |
4494 | 1.13k | Parser.Lex(); |
4495 | 1.13k | } |
4496 | | |
4497 | 8.69k | SMLoc Sx1, Ex1; |
4498 | 8.69k | Sx1 = Parser.getTok().getLoc(); |
4499 | 8.69k | const MCExpr *Imm1Exp; |
4500 | 8.69k | if (getParser().parseExpression(Imm1Exp, Ex1)) { |
4501 | | //Error(Sx1, "malformed expression"); |
4502 | 1.64k | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4503 | 1.64k | return MatchOperand_ParseFail; |
4504 | 1.64k | } |
4505 | | |
4506 | 7.04k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp); |
4507 | | |
4508 | 7.04k | if (CE) { |
4509 | | // Immediate must fit within 32-bits |
4510 | 6.30k | Imm1 = CE->getValue(); |
4511 | 6.30k | int Enc = ARM_AM::getSOImmVal(Imm1); |
4512 | 6.30k | if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) { |
4513 | | // We have a match! |
4514 | 4.16k | Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF), |
4515 | 4.16k | (Enc & 0xF00) >> 7, |
4516 | 4.16k | Sx1, Ex1)); |
4517 | 4.16k | return MatchOperand_Success; |
4518 | 4.16k | } |
4519 | | |
4520 | | // We have parsed an immediate which is not for us, fallback to a plain |
4521 | | // immediate. This can happen for instruction aliases. For an example, |
4522 | | // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform |
4523 | | // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite |
4524 | | // instruction with a mod_imm operand. The alias is defined such that the |
4525 | | // parser method is shared, that's why we have to do this here. |
4526 | 2.13k | if (Parser.getTok().is(AsmToken::EndOfStatement)) { |
4527 | 1.20k | Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1)); |
4528 | 1.20k | return MatchOperand_Success; |
4529 | 1.20k | } |
4530 | 2.13k | } else { |
4531 | | // Operands like #(l1 - l2) can only be evaluated at a later stage (via an |
4532 | | // MCFixup). Fallback to a plain immediate. |
4533 | 745 | Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1)); |
4534 | 745 | return MatchOperand_Success; |
4535 | 745 | } |
4536 | | |
4537 | | // From this point onward, we expect the input to be a (#bits, #rot) pair |
4538 | 928 | if (Parser.getTok().isNot(AsmToken::Comma)) { |
4539 | | //Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]"); |
4540 | 50 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4541 | 50 | return MatchOperand_ParseFail; |
4542 | 50 | } |
4543 | | |
4544 | 878 | if (Imm1 & ~0xFF) { |
4545 | | // Error(Sx1, "immediate operand must a number in the range [0, 255]"); |
4546 | 54 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4547 | 54 | return MatchOperand_ParseFail; |
4548 | 54 | } |
4549 | | |
4550 | | // Eat the comma |
4551 | 824 | Parser.Lex(); |
4552 | | |
4553 | | // Repeat for #rot |
4554 | 824 | SMLoc Sx2, Ex2; |
4555 | 824 | Sx2 = Parser.getTok().getLoc(); |
4556 | | |
4557 | | // Eat the optional hash (dollar) |
4558 | 824 | if (Parser.getTok().is(AsmToken::Hash) || |
4559 | 824 | Parser.getTok().is(AsmToken::Dollar)) |
4560 | 494 | Parser.Lex(); |
4561 | | |
4562 | 824 | const MCExpr *Imm2Exp; |
4563 | 824 | if (getParser().parseExpression(Imm2Exp, Ex2)) { |
4564 | | // Error(Sx2, "malformed expression"); |
4565 | 337 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4566 | 337 | return MatchOperand_ParseFail; |
4567 | 337 | } |
4568 | | |
4569 | 487 | CE = dyn_cast<MCConstantExpr>(Imm2Exp); |
4570 | | |
4571 | 487 | if (CE) { |
4572 | 471 | Imm2 = CE->getValue(); |
4573 | 471 | if (!(Imm2 & ~0x1E)) { |
4574 | | // We have a match! |
4575 | 385 | Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2)); |
4576 | 385 | return MatchOperand_Success; |
4577 | 385 | } |
4578 | | // Error(Sx2, "immediate operand must an even number in the range [0, 30]"); |
4579 | 86 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4580 | 86 | return MatchOperand_ParseFail; |
4581 | 471 | } else { |
4582 | | // Error(Sx2, "constant expression expected"); |
4583 | 16 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4584 | 16 | return MatchOperand_ParseFail; |
4585 | 16 | } |
4586 | 487 | } |
4587 | | |
4588 | | ARMAsmParser::OperandMatchResultTy |
4589 | | ARMAsmParser::parseBitfield(OperandVector &Operands, unsigned int &ErrorCode) |
4590 | 454 | { |
4591 | 454 | MCAsmParser &Parser = getParser(); |
4592 | 454 | SMLoc S = Parser.getTok().getLoc(); |
4593 | | // The bitfield descriptor is really two operands, the LSB and the width. |
4594 | 454 | if (Parser.getTok().isNot(AsmToken::Hash) && |
4595 | 454 | Parser.getTok().isNot(AsmToken::Dollar)) { |
4596 | | // Error(Parser.getTok().getLoc(), "'#' expected"); |
4597 | 20 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4598 | 20 | return MatchOperand_ParseFail; |
4599 | 20 | } |
4600 | 434 | Parser.Lex(); // Eat hash token. |
4601 | | |
4602 | 434 | const MCExpr *LSBExpr; |
4603 | 434 | SMLoc E = Parser.getTok().getLoc(); |
4604 | 434 | if (getParser().parseExpression(LSBExpr)) { |
4605 | | // Error(E, "malformed immediate expression"); |
4606 | 41 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4607 | 41 | return MatchOperand_ParseFail; |
4608 | 41 | } |
4609 | 393 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); |
4610 | 393 | if (!CE) { |
4611 | | // Error(E, "'lsb' operand must be an immediate"); |
4612 | 4 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4613 | 4 | return MatchOperand_ParseFail; |
4614 | 4 | } |
4615 | | |
4616 | 389 | int64_t LSB = CE->getValue(); |
4617 | | // The LSB must be in the range [0,31] |
4618 | 389 | if (LSB < 0 || LSB > 31) { |
4619 | | // Error(E, "'lsb' operand must be in the range [0,31]"); |
4620 | 129 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4621 | 129 | return MatchOperand_ParseFail; |
4622 | 129 | } |
4623 | 260 | E = Parser.getTok().getLoc(); |
4624 | | |
4625 | | // Expect another immediate operand. |
4626 | 260 | if (Parser.getTok().isNot(AsmToken::Comma)) { |
4627 | | // Error(Parser.getTok().getLoc(), "too few operands"); |
4628 | 5 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4629 | 5 | return MatchOperand_ParseFail; |
4630 | 5 | } |
4631 | 255 | Parser.Lex(); // Eat hash token. |
4632 | 255 | if (Parser.getTok().isNot(AsmToken::Hash) && |
4633 | 255 | Parser.getTok().isNot(AsmToken::Dollar)) { |
4634 | | // Error(Parser.getTok().getLoc(), "'#' expected"); |
4635 | 4 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4636 | 4 | return MatchOperand_ParseFail; |
4637 | 4 | } |
4638 | 251 | Parser.Lex(); // Eat hash token. |
4639 | | |
4640 | 251 | const MCExpr *WidthExpr; |
4641 | 251 | SMLoc EndLoc; |
4642 | 251 | if (getParser().parseExpression(WidthExpr, EndLoc)) { |
4643 | | // Error(E, "malformed immediate expression"); |
4644 | 7 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4645 | 7 | return MatchOperand_ParseFail; |
4646 | 7 | } |
4647 | 244 | CE = dyn_cast<MCConstantExpr>(WidthExpr); |
4648 | 244 | if (!CE) { |
4649 | | // Error(E, "'width' operand must be an immediate"); |
4650 | 3 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4651 | 3 | return MatchOperand_ParseFail; |
4652 | 3 | } |
4653 | | |
4654 | 241 | int64_t Width = CE->getValue(); |
4655 | | // The LSB must be in the range [1,32-lsb] |
4656 | 241 | if (Width < 1 || Width > 32 - LSB) { |
4657 | | // Error(E, "'width' operand must be in the range [1,32-lsb]"); |
4658 | 213 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
4659 | 213 | return MatchOperand_ParseFail; |
4660 | 213 | } |
4661 | | |
4662 | 28 | Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc)); |
4663 | | |
4664 | 28 | return MatchOperand_Success; |
4665 | 241 | } |
4666 | | |
4667 | | ARMAsmParser::OperandMatchResultTy |
4668 | 386 | ARMAsmParser::parsePostIdxReg(OperandVector &Operands, unsigned int &ErrorCode) { |
4669 | | // Check for a post-index addressing register operand. Specifically: |
4670 | | // postidx_reg := '+' register {, shift} |
4671 | | // | '-' register {, shift} |
4672 | | // | register {, shift} |
4673 | | |
4674 | | // This method must return MatchOperand_NoMatch without consuming any tokens |
4675 | | // in the case where there is no match, as other alternatives take other |
4676 | | // parse methods. |
4677 | 386 | MCAsmParser &Parser = getParser(); |
4678 | 386 | AsmToken Tok = Parser.getTok(); |
4679 | 386 | SMLoc S = Tok.getLoc(); |
4680 | 386 | bool haveEaten = false; |
4681 | 386 | bool isAdd = true; |
4682 | 386 | if (Tok.is(AsmToken::Plus)) { |
4683 | 5 | Parser.Lex(); // Eat the '+' token. |
4684 | 5 | haveEaten = true; |
4685 | 381 | } else if (Tok.is(AsmToken::Minus)) { |
4686 | 3 | Parser.Lex(); // Eat the '-' token. |
4687 | 3 | isAdd = false; |
4688 | 3 | haveEaten = true; |
4689 | 3 | } |
4690 | | |
4691 | 386 | SMLoc E = Parser.getTok().getEndLoc(); |
4692 | 386 | int Reg = tryParseRegister(); |
4693 | 386 | if (Reg == -1) { |
4694 | 331 | if (!haveEaten) |
4695 | 323 | return MatchOperand_NoMatch; |
4696 | | //Error(Parser.getTok().getLoc(), "register expected"); |
4697 | 8 | return MatchOperand_ParseFail; |
4698 | 331 | } |
4699 | | |
4700 | 55 | ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; |
4701 | 55 | unsigned ShiftImm = 0; |
4702 | 55 | if (Parser.getTok().is(AsmToken::Comma)) { |
4703 | 11 | Parser.Lex(); // Eat the ','. |
4704 | 11 | if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) |
4705 | 10 | return MatchOperand_ParseFail; |
4706 | | |
4707 | | // FIXME: Only approximates end...may include intervening whitespace. |
4708 | 1 | E = Parser.getTok().getLoc(); |
4709 | 1 | } |
4710 | | |
4711 | 45 | Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, |
4712 | 45 | ShiftImm, S, E)); |
4713 | | |
4714 | 45 | return MatchOperand_Success; |
4715 | 55 | } |
4716 | | |
4717 | | ARMAsmParser::OperandMatchResultTy |
4718 | 358 | ARMAsmParser::parseAM3Offset(OperandVector &Operands, unsigned int &ErrorCode) { |
4719 | | // Check for a post-index addressing register operand. Specifically: |
4720 | | // am3offset := '+' register |
4721 | | // | '-' register |
4722 | | // | register |
4723 | | // | # imm |
4724 | | // | # + imm |
4725 | | // | # - imm |
4726 | | |
4727 | | // This method must return MatchOperand_NoMatch without consuming any tokens |
4728 | | // in the case where there is no match, as other alternatives take other |
4729 | | // parse methods. |
4730 | 358 | MCAsmParser &Parser = getParser(); |
4731 | 358 | AsmToken Tok = Parser.getTok(); |
4732 | 358 | SMLoc S = Tok.getLoc(); |
4733 | | |
4734 | | // Do immediates first, as we always parse those if we have a '#'. |
4735 | 358 | if (Parser.getTok().is(AsmToken::Hash) || |
4736 | 358 | Parser.getTok().is(AsmToken::Dollar)) { |
4737 | 84 | Parser.Lex(); // Eat '#' or '$'. |
4738 | | // Explicitly look for a '-', as we need to encode negative zero |
4739 | | // differently. |
4740 | 84 | bool isNegative = Parser.getTok().is(AsmToken::Minus); |
4741 | 84 | const MCExpr *Offset; |
4742 | 84 | SMLoc E; |
4743 | 84 | if (getParser().parseExpression(Offset, E)) |
4744 | 34 | return MatchOperand_ParseFail; |
4745 | 50 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); |
4746 | 50 | if (!CE) { |
4747 | | //Error(S, "constant expression expected"); |
4748 | 5 | return MatchOperand_ParseFail; |
4749 | 5 | } |
4750 | | // Negative zero is encoded as the flag value INT32_MIN. |
4751 | 45 | int32_t Val = CE->getValue(); |
4752 | 45 | if (isNegative && Val == 0) |
4753 | 9 | Val = INT32_MIN; |
4754 | | |
4755 | 45 | Operands.push_back( |
4756 | 45 | ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E)); |
4757 | | |
4758 | 45 | return MatchOperand_Success; |
4759 | 50 | } |
4760 | | |
4761 | | |
4762 | 274 | bool haveEaten = false; |
4763 | 274 | bool isAdd = true; |
4764 | 274 | if (Tok.is(AsmToken::Plus)) { |
4765 | 2 | Parser.Lex(); // Eat the '+' token. |
4766 | 2 | haveEaten = true; |
4767 | 272 | } else if (Tok.is(AsmToken::Minus)) { |
4768 | 3 | Parser.Lex(); // Eat the '-' token. |
4769 | 3 | isAdd = false; |
4770 | 3 | haveEaten = true; |
4771 | 3 | } |
4772 | | |
4773 | 274 | Tok = Parser.getTok(); |
4774 | 274 | int Reg = tryParseRegister(); |
4775 | 274 | if (Reg == -1) { |
4776 | 201 | if (!haveEaten) |
4777 | 196 | return MatchOperand_NoMatch; |
4778 | | //Error(Tok.getLoc(), "register expected"); |
4779 | 5 | return MatchOperand_ParseFail; |
4780 | 201 | } |
4781 | | |
4782 | 73 | Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, |
4783 | 73 | 0, S, Tok.getEndLoc())); |
4784 | | |
4785 | 73 | return MatchOperand_Success; |
4786 | 274 | } |
4787 | | |
4788 | | /// Convert parsed operands to MCInst. Needed here because this instruction |
4789 | | /// only has two register operands, but multiplication is commutative so |
4790 | | /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN". |
4791 | | void ARMAsmParser::cvtThumbMultiply(MCInst &Inst, |
4792 | 55 | const OperandVector &Operands) { |
4793 | 55 | ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); |
4794 | 55 | ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1); |
4795 | | // If we have a three-operand form, make sure to set Rn to be the operand |
4796 | | // that isn't the same as Rd. |
4797 | 55 | unsigned RegOp = 4; |
4798 | 55 | if (Operands.size() == 6 && |
4799 | 55 | ((ARMOperand &)*Operands[4]).getReg() == |
4800 | 43 | ((ARMOperand &)*Operands[3]).getReg()) |
4801 | 21 | RegOp = 5; |
4802 | 55 | ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1); |
4803 | 55 | Inst.addOperand(Inst.getOperand(0)); |
4804 | 55 | ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2); |
4805 | 55 | } |
4806 | | |
4807 | | void ARMAsmParser::cvtThumbBranches(MCInst &Inst, |
4808 | 17.2k | const OperandVector &Operands) { |
4809 | 17.2k | int CondOp = -1, ImmOp = -1; |
4810 | 17.2k | switch(Inst.getOpcode()) { |
4811 | 17.1k | case ARM::tB: |
4812 | 17.1k | case ARM::tBcc: CondOp = 1; ImmOp = 2; break; |
4813 | | |
4814 | 103 | case ARM::t2B: |
4815 | 103 | case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break; |
4816 | | |
4817 | 0 | default: llvm_unreachable("Unexpected instruction in cvtThumbBranches"); |
4818 | 17.2k | } |
4819 | | // first decide whether or not the branch should be conditional |
4820 | | // by looking at it's location relative to an IT block |
4821 | 17.2k | if(inITBlock()) { |
4822 | | // inside an IT block we cannot have any conditional branches. any |
4823 | | // such instructions needs to be converted to unconditional form |
4824 | 1.87k | switch(Inst.getOpcode()) { |
4825 | 0 | case ARM::tBcc: Inst.setOpcode(ARM::tB); break; |
4826 | 0 | case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break; |
4827 | 1.87k | } |
4828 | 15.3k | } else { |
4829 | | // outside IT blocks we can only have unconditional branches with AL |
4830 | | // condition code or conditional branches with non-AL condition code |
4831 | 15.3k | unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode(); |
4832 | 15.3k | switch(Inst.getOpcode()) { |
4833 | 15.2k | case ARM::tB: |
4834 | 15.2k | case ARM::tBcc: |
4835 | 15.2k | Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc); |
4836 | 15.2k | break; |
4837 | 103 | case ARM::t2B: |
4838 | 103 | case ARM::t2Bcc: |
4839 | 103 | Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc); |
4840 | 103 | break; |
4841 | 15.3k | } |
4842 | 15.3k | } |
4843 | | |
4844 | | // now decide on encoding size based on branch target range |
4845 | 17.2k | switch(Inst.getOpcode()) { |
4846 | | // classify tB as either t2B or t1B based on range of immediate operand |
4847 | 13.3k | case ARM::tB: { |
4848 | 13.3k | ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); |
4849 | 13.3k | if (!op.isSignedOffsetRel<11, 1>(Inst.getAddress()) && isThumb() && hasV8MBaseline()) |
4850 | 1.67k | Inst.setOpcode(ARM::t2B); |
4851 | 13.3k | break; |
4852 | 0 | } |
4853 | | // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand |
4854 | 3.85k | case ARM::tBcc: { |
4855 | 3.85k | ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); |
4856 | 3.85k | if (!op.isSignedOffsetRel<8, 1>(Inst.getAddress()) && isThumb() && hasV8MBaseline()) |
4857 | 800 | Inst.setOpcode(ARM::t2Bcc); |
4858 | 3.85k | break; |
4859 | 0 | } |
4860 | 17.2k | } |
4861 | 17.2k | ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1); |
4862 | 17.2k | ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2); |
4863 | 17.2k | } |
4864 | | |
4865 | | /// Parse an ARM memory expression, return false if successful else return true |
4866 | | /// or an error. The first token must be a '[' when called. |
4867 | | bool ARMAsmParser::parseMemory(OperandVector &Operands) |
4868 | 1.74k | { |
4869 | 1.74k | MCAsmParser &Parser = getParser(); |
4870 | 1.74k | SMLoc S, E; |
4871 | 1.74k | assert(Parser.getTok().is(AsmToken::LBrac) && |
4872 | 1.74k | "Token is not a Left Bracket"); |
4873 | 1.74k | S = Parser.getTok().getLoc(); |
4874 | 1.74k | Parser.Lex(); // Eat left bracket token. |
4875 | | |
4876 | | //const AsmToken &BaseRegTok = Parser.getTok(); |
4877 | 1.74k | int BaseRegNum = tryParseRegister(); |
4878 | 1.74k | if (BaseRegNum == -1) |
4879 | | //return Error(BaseRegTok.getLoc(), "register expected"); |
4880 | 127 | return true; |
4881 | | |
4882 | | // The next token must either be a comma, a colon or a closing bracket. |
4883 | 1.62k | const AsmToken &Tok = Parser.getTok(); |
4884 | 1.62k | if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) && |
4885 | 1.62k | !Tok.is(AsmToken::RBrac)) |
4886 | | //return Error(Tok.getLoc(), "malformed memory operand"); |
4887 | 24 | return true; |
4888 | | |
4889 | 1.59k | if (Tok.is(AsmToken::RBrac)) { |
4890 | 765 | E = Tok.getEndLoc(); |
4891 | 765 | Parser.Lex(); // Eat right bracket token. |
4892 | | |
4893 | 765 | Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, |
4894 | 765 | ARM_AM::no_shift, 0, 0, false, |
4895 | 765 | S, E)); |
4896 | | |
4897 | | // If there's a pre-indexing writeback marker, '!', just add it as a token |
4898 | | // operand. It's rather odd, but syntactically valid. |
4899 | 765 | if (Parser.getTok().is(AsmToken::Exclaim)) { |
4900 | 9 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); |
4901 | 9 | Parser.Lex(); // Eat the '!'. |
4902 | 9 | } |
4903 | | |
4904 | 765 | return false; |
4905 | 765 | } |
4906 | | |
4907 | 832 | assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && |
4908 | 832 | "Lost colon or comma in memory operand?!"); |
4909 | 832 | if (Tok.is(AsmToken::Comma)) { |
4910 | 654 | Parser.Lex(); // Eat the comma. |
4911 | 654 | } |
4912 | | |
4913 | | // If we have a ':', it's an alignment specifier. |
4914 | 832 | if (Parser.getTok().is(AsmToken::Colon)) { |
4915 | 204 | Parser.Lex(); // Eat the ':'. |
4916 | 204 | E = Parser.getTok().getLoc(); |
4917 | 204 | SMLoc AlignmentLoc = Tok.getLoc(); |
4918 | | |
4919 | 204 | const MCExpr *Expr; |
4920 | 204 | if (getParser().parseExpression(Expr)) |
4921 | 45 | return true; |
4922 | | |
4923 | | // The expression has to be a constant. Memory references with relocations |
4924 | | // don't come through here, as they use the <label> forms of the relevant |
4925 | | // instructions. |
4926 | 159 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); |
4927 | 159 | if (!CE) |
4928 | | //return Error (E, "constant expression expected"); |
4929 | 5 | return true; |
4930 | | |
4931 | 154 | unsigned Align = 0; |
4932 | 154 | switch (CE->getValue()) { |
4933 | 145 | default: |
4934 | | //return Error(E, |
4935 | | // "alignment specifier must be 16, 32, 64, 128, or 256 bits"); |
4936 | 145 | return true; |
4937 | 3 | case 16: Align = 2; break; |
4938 | 2 | case 32: Align = 4; break; |
4939 | 2 | case 64: Align = 8; break; |
4940 | 2 | case 128: Align = 16; break; |
4941 | 0 | case 256: Align = 32; break; |
4942 | 154 | } |
4943 | | |
4944 | | // Now we should have the closing ']' |
4945 | 9 | if (Parser.getTok().isNot(AsmToken::RBrac)) |
4946 | | //return Error(Parser.getTok().getLoc(), "']' expected"); |
4947 | 9 | return true; |
4948 | 0 | E = Parser.getTok().getEndLoc(); |
4949 | 0 | Parser.Lex(); // Eat right bracket token. |
4950 | | |
4951 | | // Don't worry about range checking the value here. That's handled by |
4952 | | // the is*() predicates. |
4953 | 0 | Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, |
4954 | 0 | ARM_AM::no_shift, 0, Align, |
4955 | 0 | false, S, E, AlignmentLoc)); |
4956 | | |
4957 | | // If there's a pre-indexing writeback marker, '!', just add it as a token |
4958 | | // operand. |
4959 | 0 | if (Parser.getTok().is(AsmToken::Exclaim)) { |
4960 | 0 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); |
4961 | 0 | Parser.Lex(); // Eat the '!'. |
4962 | 0 | } |
4963 | |
|
4964 | 0 | return false; |
4965 | 9 | } |
4966 | | |
4967 | | // If we have a '#', it's an immediate offset, else assume it's a register |
4968 | | // offset. Be friendly and also accept a plain integer (without a leading |
4969 | | // hash) for gas compatibility. |
4970 | 628 | if (Parser.getTok().is(AsmToken::Hash) || |
4971 | 628 | Parser.getTok().is(AsmToken::Dollar) || |
4972 | 628 | Parser.getTok().is(AsmToken::Integer)) { |
4973 | 241 | if (Parser.getTok().isNot(AsmToken::Integer)) |
4974 | 139 | Parser.Lex(); // Eat '#' or '$'. |
4975 | 241 | E = Parser.getTok().getLoc(); |
4976 | | |
4977 | 241 | bool isNegative = getParser().getTok().is(AsmToken::Minus); |
4978 | 241 | const MCExpr *Offset; |
4979 | 241 | if (getParser().parseExpression(Offset)) |
4980 | 129 | return true; |
4981 | | |
4982 | | // The expression has to be a constant. Memory references with relocations |
4983 | | // don't come through here, as they use the <label> forms of the relevant |
4984 | | // instructions. |
4985 | 112 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); |
4986 | 112 | if (!CE) |
4987 | 29 | return Error (E, "constant expression expected"); |
4988 | | |
4989 | | // If the constant was #-0, represent it as INT32_MIN. |
4990 | 83 | int32_t Val = CE->getValue(); |
4991 | 83 | if (isNegative && Val == 0) |
4992 | 4 | CE = MCConstantExpr::create(INT32_MIN, getContext()); |
4993 | | |
4994 | | // Now we should have the closing ']' |
4995 | 83 | if (Parser.getTok().isNot(AsmToken::RBrac)) |
4996 | | //return Error(Parser.getTok().getLoc(), "']' expected"); |
4997 | 14 | return true; |
4998 | 69 | E = Parser.getTok().getEndLoc(); |
4999 | 69 | Parser.Lex(); // Eat right bracket token. |
5000 | | |
5001 | | // Don't worry about range checking the value here. That's handled by |
5002 | | // the is*() predicates. |
5003 | 69 | Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, |
5004 | 69 | ARM_AM::no_shift, 0, 0, |
5005 | 69 | false, S, E)); |
5006 | | |
5007 | | // If there's a pre-indexing writeback marker, '!', just add it as a token |
5008 | | // operand. |
5009 | 69 | if (Parser.getTok().is(AsmToken::Exclaim)) { |
5010 | 10 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); |
5011 | 10 | Parser.Lex(); // Eat the '!'. |
5012 | 10 | } |
5013 | | |
5014 | 69 | return false; |
5015 | 83 | } |
5016 | | |
5017 | | // The register offset is optionally preceded by a '+' or '-' |
5018 | 387 | bool isNegative = false; |
5019 | 387 | if (Parser.getTok().is(AsmToken::Minus)) { |
5020 | 11 | isNegative = true; |
5021 | 11 | Parser.Lex(); // Eat the '-'. |
5022 | 376 | } else if (Parser.getTok().is(AsmToken::Plus)) { |
5023 | | // Nothing to do. |
5024 | 3 | Parser.Lex(); // Eat the '+'. |
5025 | 3 | } |
5026 | | |
5027 | 387 | E = Parser.getTok().getLoc(); |
5028 | 387 | int OffsetRegNum = tryParseRegister(); |
5029 | 387 | if (OffsetRegNum == -1) |
5030 | | //return Error(E, "register expected"); |
5031 | 17 | return true; |
5032 | | |
5033 | | // If there's a shift operator, handle it. |
5034 | 370 | ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; |
5035 | 370 | unsigned ShiftImm = 0; |
5036 | 370 | if (Parser.getTok().is(AsmToken::Comma)) { |
5037 | 299 | Parser.Lex(); // Eat the ','. |
5038 | 299 | if (parseMemRegOffsetShift(ShiftType, ShiftImm)) |
5039 | 284 | return true; |
5040 | 299 | } |
5041 | | |
5042 | | // Now we should have the closing ']' |
5043 | 86 | if (Parser.getTok().isNot(AsmToken::RBrac)) |
5044 | | //return Error(Parser.getTok().getLoc(), "']' expected"); |
5045 | 25 | return true; |
5046 | 61 | E = Parser.getTok().getEndLoc(); |
5047 | 61 | Parser.Lex(); // Eat right bracket token. |
5048 | | |
5049 | 61 | Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum, |
5050 | 61 | ShiftType, ShiftImm, 0, isNegative, |
5051 | 61 | S, E)); |
5052 | | |
5053 | | // If there's a pre-indexing writeback marker, '!', just add it as a token |
5054 | | // operand. |
5055 | 61 | if (Parser.getTok().is(AsmToken::Exclaim)) { |
5056 | 3 | Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); |
5057 | 3 | Parser.Lex(); // Eat the '!'. |
5058 | 3 | } |
5059 | | |
5060 | 61 | return false; |
5061 | 86 | } |
5062 | | |
5063 | | /// parseMemRegOffsetShift - one of these two: |
5064 | | /// ( lsl | lsr | asr | ror ) , # shift_amount |
5065 | | /// rrx |
5066 | | /// return true if it parses a shift otherwise it returns false. |
5067 | | bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, |
5068 | 310 | unsigned &Amount) { |
5069 | 310 | MCAsmParser &Parser = getParser(); |
5070 | 310 | SMLoc Loc = Parser.getTok().getLoc(); |
5071 | 310 | const AsmToken &Tok = Parser.getTok(); |
5072 | 310 | if (Tok.isNot(AsmToken::Identifier)) |
5073 | 4 | return true; |
5074 | 306 | StringRef ShiftName = Tok.getString(); |
5075 | 306 | if (ShiftName == "lsl" || ShiftName == "LSL" || |
5076 | 306 | ShiftName == "asl" || ShiftName == "ASL") |
5077 | 51 | St = ARM_AM::lsl; |
5078 | 255 | else if (ShiftName == "lsr" || ShiftName == "LSR") |
5079 | 34 | St = ARM_AM::lsr; |
5080 | 221 | else if (ShiftName == "asr" || ShiftName == "ASR") |
5081 | 36 | St = ARM_AM::asr; |
5082 | 185 | else if (ShiftName == "ror" || ShiftName == "ROR") |
5083 | 66 | St = ARM_AM::ror; |
5084 | 119 | else if (ShiftName == "rrx" || ShiftName == "RRX") |
5085 | 3 | St = ARM_AM::rrx; |
5086 | 116 | else |
5087 | | //return Error(Loc, "illegal shift operator"); |
5088 | 116 | return true; |
5089 | 190 | Parser.Lex(); // Eat shift type token. |
5090 | | |
5091 | | // rrx stands alone. |
5092 | 190 | Amount = 0; |
5093 | 190 | if (St != ARM_AM::rrx) { |
5094 | 187 | Loc = Parser.getTok().getLoc(); |
5095 | | // A '#' and a shift amount. |
5096 | 187 | const AsmToken &HashTok = Parser.getTok(); |
5097 | 187 | if (HashTok.isNot(AsmToken::Hash) && |
5098 | 187 | HashTok.isNot(AsmToken::Dollar)) |
5099 | | //return Error(HashTok.getLoc(), "'#' expected"); |
5100 | 6 | return true; |
5101 | 181 | Parser.Lex(); // Eat hash token. |
5102 | | |
5103 | 181 | const MCExpr *Expr; |
5104 | 181 | if (getParser().parseExpression(Expr)) |
5105 | 63 | return true; |
5106 | | // Range check the immediate. |
5107 | | // lsl, ror: 0 <= imm <= 31 |
5108 | | // lsr, asr: 0 <= imm <= 32 |
5109 | 118 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); |
5110 | 118 | if (!CE) |
5111 | | //return Error(Loc, "shift amount must be an immediate"); |
5112 | 3 | return true; |
5113 | 115 | int64_t Imm = CE->getValue(); |
5114 | 115 | if (Imm < 0 || |
5115 | 115 | ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || |
5116 | 115 | ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) |
5117 | | //return Error(Loc, "immediate shift value out of range"); |
5118 | 102 | return true; |
5119 | | // If <ShiftTy> #0, turn it into a no_shift. |
5120 | 13 | if (Imm == 0) |
5121 | 3 | St = ARM_AM::lsl; |
5122 | | // For consistency, treat lsr #32 and asr #32 as having immediate value 0. |
5123 | 13 | if (Imm == 32) |
5124 | 3 | Imm = 0; |
5125 | 13 | Amount = Imm; |
5126 | 13 | } |
5127 | | |
5128 | 16 | return false; |
5129 | 190 | } |
5130 | | |
5131 | | /// parseFPImm - A floating point immediate expression operand. |
5132 | | ARMAsmParser::OperandMatchResultTy |
5133 | 4.46k | ARMAsmParser::parseFPImm(OperandVector &Operands, unsigned int &ErrorCode) { |
5134 | 4.46k | MCAsmParser &Parser = getParser(); |
5135 | | // Anything that can accept a floating point constant as an operand |
5136 | | // needs to go through here, as the regular parseExpression is |
5137 | | // integer only. |
5138 | | // |
5139 | | // This routine still creates a generic Immediate operand, containing |
5140 | | // a bitcast of the 64-bit floating point value. The various operands |
5141 | | // that accept floats can check whether the value is valid for them |
5142 | | // via the standard is*() predicates. |
5143 | | |
5144 | 4.46k | SMLoc S = Parser.getTok().getLoc(); |
5145 | | |
5146 | 4.46k | if (Parser.getTok().isNot(AsmToken::Hash) && |
5147 | 4.46k | Parser.getTok().isNot(AsmToken::Dollar)) |
5148 | 3.86k | return MatchOperand_NoMatch; |
5149 | | |
5150 | | // Disambiguate the VMOV forms that can accept an FP immediate. |
5151 | | // vmov.f32 <sreg>, #imm |
5152 | | // vmov.f64 <dreg>, #imm |
5153 | | // vmov.f32 <dreg>, #imm @ vector f32x2 |
5154 | | // vmov.f32 <qreg>, #imm @ vector f32x4 |
5155 | | // |
5156 | | // There are also the NEON VMOV instructions which expect an |
5157 | | // integer constant. Make sure we don't try to parse an FPImm |
5158 | | // for these: |
5159 | | // vmov.i{8|16|32|64} <dreg|qreg>, #imm |
5160 | 602 | ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]); |
5161 | 602 | bool isVmovf = TyOp.isToken() && |
5162 | 602 | (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" || |
5163 | 304 | TyOp.getToken() == ".f16"); |
5164 | 602 | ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]); |
5165 | 602 | bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" || |
5166 | 602 | Mnemonic.getToken() == "fconsts"); |
5167 | 602 | if (!(isVmovf || isFconst)) |
5168 | 240 | return MatchOperand_NoMatch; |
5169 | | |
5170 | 362 | Parser.Lex(); // Eat '#' or '$'. |
5171 | | |
5172 | | // Handle negation, as that still comes through as a separate token. |
5173 | 362 | bool isNegative = false; |
5174 | 362 | if (Parser.getTok().is(AsmToken::Minus)) { |
5175 | 273 | isNegative = true; |
5176 | 273 | Parser.Lex(); |
5177 | 273 | } |
5178 | 362 | const AsmToken &Tok = Parser.getTok(); |
5179 | | //SMLoc Loc = Tok.getLoc(); |
5180 | 362 | if (Tok.is(AsmToken::Real) && isVmovf) { |
5181 | 72 | APFloat RealVal(APFloat::IEEEsingle, Tok.getString()); |
5182 | 72 | if (RealVal.bitcastToAPInt().getActiveBits() > 64) |
5183 | 0 | return MatchOperand_ParseFail; |
5184 | 72 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
5185 | | // If we had a '-' in front, toggle the sign bit. |
5186 | 72 | IntVal ^= (uint64_t)isNegative << 31; |
5187 | 72 | Parser.Lex(); // Eat the token. |
5188 | 72 | Operands.push_back(ARMOperand::CreateImm( |
5189 | 72 | MCConstantExpr::create(IntVal, getContext()), |
5190 | 72 | S, Parser.getTok().getLoc())); |
5191 | 72 | return MatchOperand_Success; |
5192 | 72 | } |
5193 | | // Also handle plain integers. Instructions which allow floating point |
5194 | | // immediates also allow a raw encoded 8-bit value. |
5195 | 290 | if (Tok.is(AsmToken::Integer) && isFconst) { |
5196 | 278 | bool valid; |
5197 | 278 | int64_t Val = Tok.getIntVal(valid); |
5198 | 278 | if (!valid) |
5199 | 0 | return MatchOperand_ParseFail; |
5200 | 278 | Parser.Lex(); // Eat the token. |
5201 | 278 | if (Val > 255 || Val < 0) { |
5202 | | //Error(Loc, "encoded floating point value out of range"); |
5203 | 8 | return MatchOperand_ParseFail; |
5204 | 8 | } |
5205 | 270 | float RealVal = ARM_AM::getFPImmFloat(Val); |
5206 | 270 | if (APFloat(RealVal).bitcastToAPInt().getActiveBits() > 64) |
5207 | 0 | return MatchOperand_ParseFail; |
5208 | 270 | Val = APFloat(RealVal).bitcastToAPInt().getZExtValue(); |
5209 | | |
5210 | 270 | Operands.push_back(ARMOperand::CreateImm( |
5211 | 270 | MCConstantExpr::create(Val, getContext()), S, |
5212 | 270 | Parser.getTok().getLoc())); |
5213 | 270 | return MatchOperand_Success; |
5214 | 270 | } |
5215 | | |
5216 | | //Error(Loc, "invalid floating point immediate"); |
5217 | 12 | return MatchOperand_ParseFail; |
5218 | 290 | } |
5219 | | |
5220 | | /// Parse a arm instruction operand. For now this parses the operand regardless |
5221 | | /// of the mnemonic. |
5222 | | bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic, unsigned int &ErrorCode) |
5223 | 346k | { |
5224 | 346k | MCAsmParser &Parser = getParser(); |
5225 | 346k | SMLoc S, E; |
5226 | | |
5227 | | // Check if the current operand has a custom associated parser, if so, try to |
5228 | | // custom parse the operand, or fallback to the general approach. |
5229 | 346k | OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic, ErrorCode); |
5230 | 346k | if (ResTy == MatchOperand_Success) |
5231 | 17.4k | return false; |
5232 | | // If there wasn't a custom match, try the generic matcher below. Otherwise, |
5233 | | // there was a match, but an error occurred, in which case, just return that |
5234 | | // the operand parsing failed. |
5235 | 328k | if (ResTy == MatchOperand_ParseFail) |
5236 | 4.09k | return true; |
5237 | | |
5238 | 324k | switch (getLexer().getKind()) { |
5239 | 4.33k | default: |
5240 | | //Error(Parser.getTok().getLoc(), "unexpected token in operand"); |
5241 | 4.33k | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
5242 | 4.33k | return true; |
5243 | 206k | case AsmToken::Identifier: { |
5244 | | // If we've seen a branch mnemonic, the next operand must be a label. This |
5245 | | // is true even if the label is a register name. So "br r1" means branch to |
5246 | | // label "r1". |
5247 | 206k | bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl"; |
5248 | 206k | if (!ExpectLabel) { |
5249 | 195k | if (!tryParseRegisterWithWriteBack(Operands)) |
5250 | 93.4k | return false; |
5251 | 102k | int Res = tryParseShiftRegister(Operands); |
5252 | 102k | if (Res == 0) // success |
5253 | 1.10k | return false; |
5254 | 101k | else if (Res == -1) // irrecoverable error |
5255 | 334 | return true; |
5256 | | // If this is VMRS, check for the apsr_nzcv operand. |
5257 | 100k | if (Mnemonic == "vmrs" && |
5258 | 100k | Parser.getTok().getString().equals_lower("apsr_nzcv")) { |
5259 | 0 | S = Parser.getTok().getLoc(); |
5260 | 0 | Parser.Lex(); |
5261 | 0 | Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); |
5262 | 0 | return false; |
5263 | 0 | } |
5264 | 100k | } |
5265 | | |
5266 | | // Fall though for the Identifier case that is not a register or a |
5267 | | // special name. |
5268 | 206k | } |
5269 | 120k | case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) |
5270 | 155k | case AsmToken::Integer: // things like 1f and 2b as a branch targets |
5271 | 156k | case AsmToken::String: // quoted label names. |
5272 | 191k | case AsmToken::Dot: { // . as a branch target |
5273 | | // This was not a register so parse other operands that start with an |
5274 | | // identifier (like labels) as expressions and create them as immediates. |
5275 | 191k | const MCExpr *IdVal; |
5276 | 191k | S = Parser.getTok().getLoc(); |
5277 | 191k | if (getParser().parseExpression(IdVal)) |
5278 | 17.6k | return true; |
5279 | 173k | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); |
5280 | 173k | Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); |
5281 | 173k | return false; |
5282 | 191k | } |
5283 | 1.74k | case AsmToken::LBrac: |
5284 | 1.74k | return parseMemory(Operands); |
5285 | 1.01k | case AsmToken::LCurly: |
5286 | 1.01k | return parseRegisterList(Operands); |
5287 | 1.91k | case AsmToken::Dollar: |
5288 | 24.4k | case AsmToken::Hash: { |
5289 | | // #42 -> immediate. |
5290 | 24.4k | S = Parser.getTok().getLoc(); |
5291 | 24.4k | Parser.Lex(); |
5292 | | |
5293 | 24.4k | if (Parser.getTok().isNot(AsmToken::Colon)) { |
5294 | 24.3k | bool isNegative = Parser.getTok().is(AsmToken::Minus); |
5295 | 24.3k | const MCExpr *ImmVal; |
5296 | 24.3k | if (getParser().parseExpression(ImmVal)) |
5297 | 8.41k | return true; |
5298 | 15.9k | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); |
5299 | 15.9k | if (CE) { |
5300 | 3.85k | int32_t Val = CE->getValue(); |
5301 | 3.85k | if (isNegative && Val == 0) |
5302 | 288 | ImmVal = MCConstantExpr::create(INT32_MIN, getContext()); |
5303 | 3.85k | } |
5304 | 15.9k | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); |
5305 | 15.9k | Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); |
5306 | | |
5307 | | // There can be a trailing '!' on operands that we want as a separate |
5308 | | // '!' Token operand. Handle that here. For example, the compatibility |
5309 | | // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'. |
5310 | 15.9k | if (Parser.getTok().is(AsmToken::Exclaim)) { |
5311 | 85 | Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(), |
5312 | 85 | Parser.getTok().getLoc())); |
5313 | 85 | Parser.Lex(); // Eat exclaim token |
5314 | 85 | } |
5315 | 15.9k | return false; |
5316 | 24.3k | } |
5317 | | // w/ a ':' after the '#', it's just like a plain ':'. |
5318 | | // FALLTHROUGH |
5319 | 24.4k | } |
5320 | 82 | case AsmToken::Colon: { |
5321 | 82 | S = Parser.getTok().getLoc(); |
5322 | | // ":lower16:" and ":upper16:" expression prefixes |
5323 | | // FIXME: Check it's an expression prefix, |
5324 | | // e.g. (FOO - :lower16:BAR) isn't legal. |
5325 | 82 | ARMMCExpr::VariantKind RefKind; |
5326 | 82 | if (parsePrefix(RefKind)) |
5327 | 45 | return true; |
5328 | | |
5329 | 37 | const MCExpr *SubExprVal; |
5330 | 37 | if (getParser().parseExpression(SubExprVal)) |
5331 | 7 | return true; |
5332 | | |
5333 | 30 | const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal, |
5334 | 30 | getContext()); |
5335 | 30 | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); |
5336 | 30 | Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); |
5337 | 30 | return false; |
5338 | 37 | } |
5339 | 7.00k | case AsmToken::Equal: { |
5340 | 7.00k | S = Parser.getTok().getLoc(); |
5341 | 7.00k | if (Mnemonic != "ldr") { // only parse for ldr pseudo (e.g. ldr r0, =val) |
5342 | | //return Error(S, "unexpected token in operand"); |
5343 | 60 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
5344 | 60 | return true; |
5345 | 60 | } |
5346 | | |
5347 | 6.94k | Parser.Lex(); // Eat '=' |
5348 | 6.94k | const MCExpr *SubExprVal; |
5349 | 6.94k | if (getParser().parseExpression(SubExprVal)) |
5350 | 526 | return true; |
5351 | 6.41k | E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); |
5352 | | |
5353 | 6.41k | const MCExpr *CPLoc = |
5354 | 6.41k | getTargetStreamer().addConstantPoolEntry(SubExprVal, S); |
5355 | 6.41k | Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E)); |
5356 | 6.41k | return false; |
5357 | 6.94k | } |
5358 | 324k | } |
5359 | 324k | } |
5360 | | |
5361 | | // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. |
5362 | | // :lower16: and :upper16:. |
5363 | | bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) |
5364 | 82 | { |
5365 | 82 | MCAsmParser &Parser = getParser(); |
5366 | 82 | RefKind = ARMMCExpr::VK_ARM_None; |
5367 | | |
5368 | | // consume an optional '#' (GNU compatibility) |
5369 | 82 | if (getLexer().is(AsmToken::Hash)) |
5370 | 0 | Parser.Lex(); |
5371 | | |
5372 | | // :lower16: and :upper16: modifiers |
5373 | 82 | assert(getLexer().is(AsmToken::Colon) && "expected a :"); |
5374 | 82 | Parser.Lex(); // Eat ':' |
5375 | | |
5376 | 82 | if (getLexer().isNot(AsmToken::Identifier)) { |
5377 | | //Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); |
5378 | 14 | return true; |
5379 | 14 | } |
5380 | | |
5381 | 68 | enum { |
5382 | 68 | COFF = (1 << MCObjectFileInfo::IsCOFF), |
5383 | 68 | ELF = (1 << MCObjectFileInfo::IsELF), |
5384 | 68 | MACHO = (1 << MCObjectFileInfo::IsMachO) |
5385 | 68 | }; |
5386 | 68 | static const struct PrefixEntry { |
5387 | 68 | const char *Spelling; |
5388 | 68 | ARMMCExpr::VariantKind VariantKind; |
5389 | 68 | uint8_t SupportedFormats; |
5390 | 68 | } PrefixEntries[] = { |
5391 | 68 | { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO }, |
5392 | 68 | { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO }, |
5393 | 68 | }; |
5394 | | |
5395 | 68 | StringRef IDVal = Parser.getTok().getIdentifier(); |
5396 | | |
5397 | 68 | const auto &Prefix = |
5398 | 68 | std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries), |
5399 | 96 | [&IDVal](const PrefixEntry &PE) { |
5400 | 96 | return PE.Spelling == IDVal; |
5401 | 96 | }); |
5402 | 68 | if (Prefix == std::end(PrefixEntries)) { |
5403 | | //Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); |
5404 | 28 | return true; |
5405 | 28 | } |
5406 | | |
5407 | 40 | uint8_t CurrentFormat; |
5408 | 40 | switch (getContext().getObjectFileInfo()->getObjectFileType()) { |
5409 | 0 | case MCObjectFileInfo::IsMachO: |
5410 | 0 | CurrentFormat = MACHO; |
5411 | 0 | break; |
5412 | 40 | case MCObjectFileInfo::IsELF: |
5413 | 40 | CurrentFormat = ELF; |
5414 | 40 | break; |
5415 | 0 | case MCObjectFileInfo::IsCOFF: |
5416 | 0 | CurrentFormat = COFF; |
5417 | 0 | break; |
5418 | 40 | } |
5419 | | |
5420 | 40 | if (~Prefix->SupportedFormats & CurrentFormat) { |
5421 | | //Error(Parser.getTok().getLoc(), |
5422 | | // "cannot represent relocation in the current file format"); |
5423 | 0 | return true; |
5424 | 0 | } |
5425 | | |
5426 | 40 | RefKind = Prefix->VariantKind; |
5427 | 40 | Parser.Lex(); |
5428 | | |
5429 | 40 | if (getLexer().isNot(AsmToken::Colon)) { |
5430 | | //Error(Parser.getTok().getLoc(), "unexpected token after prefix"); |
5431 | 3 | return true; |
5432 | 3 | } |
5433 | 37 | Parser.Lex(); // Eat the last ':' |
5434 | | |
5435 | 37 | return false; |
5436 | 40 | } |
5437 | | |
5438 | | /// \brief Given a mnemonic, split out possible predication code and carry |
5439 | | /// setting letters to form a canonical mnemonic and flags. |
5440 | | // |
5441 | | // FIXME: Would be nice to autogen this. |
5442 | | // FIXME: This is a bit of a maze of special cases. |
5443 | | StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, |
5444 | | unsigned &PredicationCode, |
5445 | | bool &CarrySetting, |
5446 | | unsigned &ProcessorIMod, |
5447 | 161k | StringRef &ITMask) { |
5448 | 161k | PredicationCode = ARMCC::AL; |
5449 | 161k | CarrySetting = false; |
5450 | 161k | ProcessorIMod = 0; |
5451 | | |
5452 | | // Ignore some mnemonics we know aren't predicated forms. |
5453 | | // |
5454 | | // FIXME: Would be nice to autogen this. |
5455 | 161k | if ((Mnemonic == "movs" && isThumb()) || |
5456 | 161k | Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || |
5457 | 161k | Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || |
5458 | 161k | Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || |
5459 | 161k | Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || |
5460 | 161k | Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" || |
5461 | 161k | Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || |
5462 | 161k | Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || |
5463 | 161k | Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || |
5464 | 161k | Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || |
5465 | 161k | Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || |
5466 | 161k | Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || |
5467 | 161k | Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" || |
5468 | 161k | Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" || |
5469 | 161k | Mnemonic == "bxns" || Mnemonic == "blxns") |
5470 | 1.49k | return Mnemonic; |
5471 | | |
5472 | | // First, split out any predication code. Ignore mnemonics we know aren't |
5473 | | // predicated but do have a carry-set and so weren't caught above. |
5474 | 159k | if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && |
5475 | 159k | Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && |
5476 | 159k | Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && |
5477 | 159k | Mnemonic != "sbcs" && Mnemonic != "rscs") { |
5478 | 155k | unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) |
5479 | 155k | .Case("eq", ARMCC::EQ) |
5480 | 155k | .Case("ne", ARMCC::NE) |
5481 | 155k | .Case("hs", ARMCC::HS) |
5482 | 155k | .Case("cs", ARMCC::HS) |
5483 | 155k | .Case("lo", ARMCC::LO) |
5484 | 155k | .Case("cc", ARMCC::LO) |
5485 | 155k | .Case("mi", ARMCC::MI) |
5486 | 155k | .Case("pl", ARMCC::PL) |
5487 | 155k | .Case("vs", ARMCC::VS) |
5488 | 155k | .Case("vc", ARMCC::VC) |
5489 | 155k | .Case("hi", ARMCC::HI) |
5490 | 155k | .Case("ls", ARMCC::LS) |
5491 | 155k | .Case("ge", ARMCC::GE) |
5492 | 155k | .Case("lt", ARMCC::LT) |
5493 | 155k | .Case("gt", ARMCC::GT) |
5494 | 155k | .Case("le", ARMCC::LE) |
5495 | 155k | .Case("al", ARMCC::AL) |
5496 | 155k | .Default(~0U); |
5497 | 155k | if (CC != ~0U) { |
5498 | 13.3k | Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); |
5499 | 13.3k | PredicationCode = CC; |
5500 | 13.3k | } |
5501 | 155k | } |
5502 | | |
5503 | | // Next, determine if we have a carry setting bit. We explicitly ignore all |
5504 | | // the instructions we know end in 's'. |
5505 | 159k | if (Mnemonic.endswith("s") && |
5506 | 159k | !(Mnemonic == "cps" || Mnemonic == "mls" || |
5507 | 14.3k | Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || |
5508 | 14.3k | Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || |
5509 | 14.3k | Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || |
5510 | 14.3k | Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || |
5511 | 14.3k | Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || |
5512 | 14.3k | Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" || |
5513 | 14.3k | Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || |
5514 | 14.3k | Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" || |
5515 | 14.3k | (Mnemonic == "movs" && isThumb()))) { |
5516 | 12.9k | Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); |
5517 | 12.9k | CarrySetting = true; |
5518 | 12.9k | } |
5519 | | |
5520 | | // The "cps" instruction can have a interrupt mode operand which is glued into |
5521 | | // the mnemonic. Check if this is the case, split it and parse the imod op |
5522 | 159k | if (Mnemonic.startswith("cps")) { |
5523 | | // Split out any imod code. |
5524 | 1.13k | unsigned IMod = |
5525 | 1.13k | StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) |
5526 | 1.13k | .Case("ie", ARM_PROC::IE) |
5527 | 1.13k | .Case("id", ARM_PROC::ID) |
5528 | 1.13k | .Default(~0U); |
5529 | 1.13k | if (IMod != ~0U) { |
5530 | 321 | Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); |
5531 | 321 | ProcessorIMod = IMod; |
5532 | 321 | } |
5533 | 1.13k | } |
5534 | | |
5535 | | // The "it" instruction has the condition mask on the end of the mnemonic. |
5536 | 159k | if (Mnemonic.startswith("it")) { |
5537 | 4.23k | ITMask = Mnemonic.slice(2, Mnemonic.size()); |
5538 | 4.23k | Mnemonic = Mnemonic.slice(0, 2); |
5539 | 4.23k | } |
5540 | | |
5541 | 159k | return Mnemonic; |
5542 | 161k | } |
5543 | | |
5544 | | /// \brief Given a canonical mnemonic, determine if the instruction ever allows |
5545 | | /// inclusion of carry set or predication code operands. |
5546 | | // |
5547 | | // FIXME: It would be nice to autogen this. |
5548 | | void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, |
5549 | | bool &CanAcceptCarrySet, |
5550 | 161k | bool &CanAcceptPredicationCode) { |
5551 | 161k | CanAcceptCarrySet = |
5552 | 161k | Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || |
5553 | 161k | Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || |
5554 | 161k | Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" || |
5555 | 161k | Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" || |
5556 | 161k | Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" || |
5557 | 161k | Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" || |
5558 | 161k | Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" || |
5559 | 161k | (!isThumb() && |
5560 | 129k | (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" || |
5561 | 65.9k | Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull")); |
5562 | | |
5563 | 161k | if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" || |
5564 | 161k | Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" || |
5565 | 161k | Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" || |
5566 | 161k | Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") || |
5567 | 161k | Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" || |
5568 | 161k | Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" || |
5569 | 161k | Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" || |
5570 | 161k | Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" || |
5571 | 161k | Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" || |
5572 | 161k | Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") || |
5573 | 161k | (FullInst.startswith("vmull") && FullInst.endswith(".p64")) || |
5574 | 161k | Mnemonic == "vmovx" || Mnemonic == "vins") { |
5575 | | // These mnemonics are never predicable |
5576 | 7.79k | CanAcceptPredicationCode = false; |
5577 | 153k | } else if (!isThumb()) { |
5578 | | // Some instructions are only predicable in Thumb mode |
5579 | 77.7k | CanAcceptPredicationCode = |
5580 | 77.7k | Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" && |
5581 | 77.7k | Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" && |
5582 | 77.7k | Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" && |
5583 | 77.7k | Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" && |
5584 | 77.7k | Mnemonic != "ldc2" && Mnemonic != "ldc2l" && Mnemonic != "stc2" && |
5585 | 77.7k | Mnemonic != "stc2l" && !Mnemonic.startswith("rfe") && |
5586 | 77.7k | !Mnemonic.startswith("srs"); |
5587 | 77.7k | } else if (isThumbOne()) { |
5588 | 3.91k | if (hasV6MOps()) |
5589 | 3.21k | CanAcceptPredicationCode = Mnemonic != "movs"; |
5590 | 701 | else |
5591 | 701 | CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs"; |
5592 | 3.91k | } else |
5593 | 71.9k | CanAcceptPredicationCode = true; |
5594 | 161k | } |
5595 | | |
5596 | | // \brief Some Thumb instructions have two operand forms that are not |
5597 | | // available as three operand, convert to two operand form if possible. |
5598 | | // |
5599 | | // FIXME: We would really like to be able to tablegen'erate this. |
5600 | | void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic, |
5601 | | bool CarrySetting, |
5602 | 119k | OperandVector &Operands) { |
5603 | 119k | if (Operands.size() != 6) |
5604 | 107k | return; |
5605 | | |
5606 | 11.5k | const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]); |
5607 | 11.5k | auto &Op4 = static_cast<ARMOperand &>(*Operands[4]); |
5608 | 11.5k | if (!Op3.isReg() || !Op4.isReg()) |
5609 | 368 | return; |
5610 | | |
5611 | 11.1k | auto Op3Reg = Op3.getReg(); |
5612 | 11.1k | auto Op4Reg = Op4.getReg(); |
5613 | | |
5614 | | // For most Thumb2 cases we just generate the 3 operand form and reduce |
5615 | | // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr) |
5616 | | // won't accept SP or PC so we do the transformation here taking care |
5617 | | // with immediate range in the 'add sp, sp #imm' case. |
5618 | 11.1k | auto &Op5 = static_cast<ARMOperand &>(*Operands[5]); |
5619 | 11.1k | if (isThumbTwo()) { |
5620 | 6.41k | if (Mnemonic != "add") |
5621 | 2.27k | return; |
5622 | 4.14k | bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC || |
5623 | 4.14k | (Op5.isReg() && Op5.getReg() == ARM::PC); |
5624 | 4.14k | if (!TryTransform) { |
5625 | 2.93k | TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP || |
5626 | 2.93k | (Op5.isReg() && Op5.getReg() == ARM::SP)) && |
5627 | 2.93k | !(Op3Reg == ARM::SP && Op4Reg == ARM::SP && |
5628 | 2.36k | Op5.isImm() && !Op5.isImm0_508s4()); |
5629 | 2.93k | } |
5630 | 4.14k | if (!TryTransform) |
5631 | 1.11k | return; |
5632 | 4.75k | } else if (!isThumbOne()) |
5633 | 3.69k | return; |
5634 | | |
5635 | 4.10k | if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" || |
5636 | 4.10k | Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" || |
5637 | 4.10k | Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" || |
5638 | 4.10k | Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic")) |
5639 | 20 | return; |
5640 | | |
5641 | | // If first 2 operands of a 3 operand instruction are the same |
5642 | | // then transform to 2 operand version of the same instruction |
5643 | | // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1' |
5644 | 4.08k | bool Transform = Op3Reg == Op4Reg; |
5645 | | |
5646 | | // For communtative operations, we might be able to transform if we swap |
5647 | | // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially |
5648 | | // as tADDrsp. |
5649 | 4.08k | const ARMOperand *LastOp = &Op5; |
5650 | 4.08k | bool Swap = false; |
5651 | 4.08k | if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() && |
5652 | 4.08k | ((Mnemonic == "add" && Op4Reg != ARM::SP) || |
5653 | 457 | Mnemonic == "and" || Mnemonic == "eor" || |
5654 | 457 | Mnemonic == "adc" || Mnemonic == "orr")) { |
5655 | 57 | Swap = true; |
5656 | 57 | LastOp = &Op4; |
5657 | 57 | Transform = true; |
5658 | 57 | } |
5659 | | |
5660 | | // If both registers are the same then remove one of them from |
5661 | | // the operand list, with certain exceptions. |
5662 | 4.08k | if (Transform) { |
5663 | | // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the |
5664 | | // 2 operand forms don't exist. |
5665 | 1.93k | if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") && |
5666 | 1.93k | LastOp->isReg()) |
5667 | 479 | Transform = false; |
5668 | | |
5669 | | // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into |
5670 | | // 3-bits because the ARMARM says not to. |
5671 | 1.93k | if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7()) |
5672 | 37 | Transform = false; |
5673 | 1.93k | } |
5674 | | |
5675 | 4.08k | if (Transform) { |
5676 | 1.41k | if (Swap) |
5677 | 56 | std::swap(Op4, Op5); |
5678 | 1.41k | Operands.erase(Operands.begin() + 3); |
5679 | 1.41k | } |
5680 | 4.08k | } |
5681 | | |
5682 | | bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, |
5683 | 108k | OperandVector &Operands) { |
5684 | | // FIXME: This is all horribly hacky. We really need a better way to deal |
5685 | | // with optional operands like this in the matcher table. |
5686 | | |
5687 | | // The 'mov' mnemonic is special. One variant has a cc_out operand, while |
5688 | | // another does not. Specifically, the MOVW instruction does not. So we |
5689 | | // special case it here and remove the defaulted (non-setting) cc_out |
5690 | | // operand if that's the instruction we're trying to match. |
5691 | | // |
5692 | | // We do this as post-processing of the explicit operands rather than just |
5693 | | // conditionally adding the cc_out in the first place because we need |
5694 | | // to check the type of the parsed immediate operand. |
5695 | 108k | if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && |
5696 | 108k | !static_cast<ARMOperand &>(*Operands[4]).isModImm() && |
5697 | 108k | static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() && |
5698 | 108k | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) |
5699 | 61 | return true; |
5700 | | |
5701 | | // Register-register 'add' for thumb does not have a cc_out operand |
5702 | | // when there are only two register operands. |
5703 | 107k | if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && |
5704 | 107k | static_cast<ARMOperand &>(*Operands[3]).isReg() && |
5705 | 107k | static_cast<ARMOperand &>(*Operands[4]).isReg() && |
5706 | 107k | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) |
5707 | 268 | return true; |
5708 | | // Register-register 'add' for thumb does not have a cc_out operand |
5709 | | // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do |
5710 | | // have to check the immediate range here since Thumb2 has a variant |
5711 | | // that can handle a different range and has a cc_out operand. |
5712 | 107k | if (((isThumb() && Mnemonic == "add") || |
5713 | 107k | (isThumbTwo() && Mnemonic == "sub")) && |
5714 | 107k | Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && |
5715 | 107k | static_cast<ARMOperand &>(*Operands[4]).isReg() && |
5716 | 107k | static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP && |
5717 | 107k | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && |
5718 | 107k | ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) || |
5719 | 1.24k | static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4())) |
5720 | 695 | return true; |
5721 | | // For Thumb2, add/sub immediate does not have a cc_out operand for the |
5722 | | // imm0_4095 variant. That's the least-preferred variant when |
5723 | | // selecting via the generic "add" mnemonic, so to know that we |
5724 | | // should remove the cc_out operand, we have to explicitly check that |
5725 | | // it's not one of the other variants. Ugh. |
5726 | 106k | if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && |
5727 | 106k | Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && |
5728 | 106k | static_cast<ARMOperand &>(*Operands[4]).isReg() && |
5729 | 106k | static_cast<ARMOperand &>(*Operands[5]).isImm()) { |
5730 | | // Nest conditions rather than one big 'if' statement for readability. |
5731 | | // |
5732 | | // If both registers are low, we're in an IT block, and the immediate is |
5733 | | // in range, we should use encoding T1 instead, which has a cc_out. |
5734 | 2.31k | if (inITBlock() && |
5735 | 2.31k | isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) && |
5736 | 2.31k | isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) && |
5737 | 2.31k | static_cast<ARMOperand &>(*Operands[5]).isImm0_7()) |
5738 | 0 | return false; |
5739 | | // Check against T3. If the second register is the PC, this is an |
5740 | | // alternate form of ADR, which uses encoding T4, so check for that too. |
5741 | 2.31k | if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC && |
5742 | 2.31k | static_cast<ARMOperand &>(*Operands[5]).isT2SOImm()) |
5743 | 662 | return false; |
5744 | | |
5745 | | // Otherwise, we use encoding T4, which does not have a cc_out |
5746 | | // operand. |
5747 | 1.65k | return true; |
5748 | 2.31k | } |
5749 | | |
5750 | | // The thumb2 multiply instruction doesn't have a CCOut register, so |
5751 | | // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to |
5752 | | // use the 16-bit encoding or not. |
5753 | 104k | if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && |
5754 | 104k | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && |
5755 | 104k | static_cast<ARMOperand &>(*Operands[3]).isReg() && |
5756 | 104k | static_cast<ARMOperand &>(*Operands[4]).isReg() && |
5757 | 104k | static_cast<ARMOperand &>(*Operands[5]).isReg() && |
5758 | | // If the registers aren't low regs, the destination reg isn't the |
5759 | | // same as one of the source regs, or the cc_out operand is zero |
5760 | | // outside of an IT block, we have to use the 32-bit encoding, so |
5761 | | // remove the cc_out operand. |
5762 | 104k | (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || |
5763 | 146 | !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || |
5764 | 146 | !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) || |
5765 | 146 | !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() != |
5766 | 13 | static_cast<ARMOperand &>(*Operands[5]).getReg() && |
5767 | 13 | static_cast<ARMOperand &>(*Operands[3]).getReg() != |
5768 | 5 | static_cast<ARMOperand &>(*Operands[4]).getReg()))) |
5769 | 133 | return true; |
5770 | | |
5771 | | // Also check the 'mul' syntax variant that doesn't specify an explicit |
5772 | | // destination register. |
5773 | 104k | if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && |
5774 | 104k | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && |
5775 | 104k | static_cast<ARMOperand &>(*Operands[3]).isReg() && |
5776 | 104k | static_cast<ARMOperand &>(*Operands[4]).isReg() && |
5777 | | // If the registers aren't low regs or the cc_out operand is zero |
5778 | | // outside of an IT block, we have to use the 32-bit encoding, so |
5779 | | // remove the cc_out operand. |
5780 | 104k | (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || |
5781 | 453 | !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || |
5782 | 453 | !inITBlock())) |
5783 | 452 | return true; |
5784 | | |
5785 | | |
5786 | | |
5787 | | // Register-register 'add/sub' for thumb does not have a cc_out operand |
5788 | | // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also |
5789 | | // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't |
5790 | | // right, this will result in better diagnostics (which operand is off) |
5791 | | // anyway. |
5792 | 104k | if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && |
5793 | 104k | (Operands.size() == 5 || Operands.size() == 6) && |
5794 | 104k | static_cast<ARMOperand &>(*Operands[3]).isReg() && |
5795 | 104k | static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP && |
5796 | 104k | static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && |
5797 | 104k | (static_cast<ARMOperand &>(*Operands[4]).isImm() || |
5798 | 2.12k | (Operands.size() == 6 && |
5799 | 192 | static_cast<ARMOperand &>(*Operands[5]).isImm()))) |
5800 | 1.93k | return true; |
5801 | | |
5802 | 102k | return false; |
5803 | 104k | } |
5804 | | |
5805 | | bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic, |
5806 | 119k | OperandVector &Operands) { |
5807 | | // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON |
5808 | 119k | unsigned RegIdx = 3; |
5809 | 119k | if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") && |
5810 | 119k | (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" || |
5811 | 16 | static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) { |
5812 | 5 | if (static_cast<ARMOperand &>(*Operands[3]).isToken() && |
5813 | 5 | (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" || |
5814 | 5 | static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16")) |
5815 | 0 | RegIdx = 4; |
5816 | | |
5817 | 5 | if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() && |
5818 | 5 | (ARMMCRegisterClasses[ARM::DPRRegClassID].contains( |
5819 | 0 | static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) || |
5820 | 0 | ARMMCRegisterClasses[ARM::QPRRegClassID].contains( |
5821 | 0 | static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()))) |
5822 | 0 | return true; |
5823 | 5 | } |
5824 | 119k | return false; |
5825 | 119k | } |
5826 | | |
5827 | 102k | static bool isDataTypeToken(StringRef Tok) { |
5828 | 102k | return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || |
5829 | 102k | Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || |
5830 | 102k | Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || |
5831 | 102k | Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || |
5832 | 102k | Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || |
5833 | 102k | Tok == ".f" || Tok == ".d"; |
5834 | 102k | } |
5835 | | |
5836 | | // FIXME: This bit should probably be handled via an explicit match class |
5837 | | // in the .td files that matches the suffix instead of having it be |
5838 | | // a literal string token the way it is now. |
5839 | 13.9k | static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { |
5840 | 13.9k | return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); |
5841 | 13.9k | } |
5842 | | static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features, |
5843 | | unsigned VariantID); |
5844 | | |
5845 | | static bool RequiresVFPRegListValidation(StringRef Inst, |
5846 | | bool &AcceptSinglePrecisionOnly, |
5847 | 161k | bool &AcceptDoublePrecisionOnly) { |
5848 | 161k | if (Inst.size() < 7) |
5849 | 149k | return false; |
5850 | | |
5851 | 11.6k | if (Inst.startswith("fldm") || Inst.startswith("fstm")) { |
5852 | 1.09k | StringRef AddressingMode = Inst.substr(4, 2); |
5853 | 1.09k | if (AddressingMode == "ia" || AddressingMode == "db" || |
5854 | 1.09k | AddressingMode == "ea" || AddressingMode == "fd") { |
5855 | 604 | AcceptSinglePrecisionOnly = Inst[6] == 's'; |
5856 | 604 | AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x'; |
5857 | 604 | return true; |
5858 | 604 | } |
5859 | 1.09k | } |
5860 | | |
5861 | 11.0k | return false; |
5862 | 11.6k | } |
5863 | | |
5864 | | /// Parse an arm instruction mnemonic followed by its operands. |
5865 | | bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, |
5866 | | SMLoc NameLoc, OperandVector &Operands, unsigned int &ErrorCode) |
5867 | 161k | { |
5868 | 161k | MCAsmParser &Parser = getParser(); |
5869 | | // FIXME: Can this be done via tablegen in some fashion? |
5870 | 161k | bool RequireVFPRegisterListCheck; |
5871 | 161k | bool AcceptSinglePrecisionOnly; |
5872 | 161k | bool AcceptDoublePrecisionOnly; |
5873 | 161k | RequireVFPRegisterListCheck = |
5874 | 161k | RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly, |
5875 | 161k | AcceptDoublePrecisionOnly); |
5876 | | |
5877 | | // Apply mnemonic aliases before doing anything else, as the destination |
5878 | | // mnemonic may include suffices and we want to handle them normally. |
5879 | | // The generic tblgen'erated code does this later, at the start of |
5880 | | // MatchInstructionImpl(), but that's too late for aliases that include |
5881 | | // any sort of suffix. |
5882 | 161k | uint64_t AvailableFeatures = getAvailableFeatures(); |
5883 | 161k | unsigned AssemblerDialect = getParser().getAssemblerDialect(); |
5884 | 161k | applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect); |
5885 | | |
5886 | | // First check for the ARM-specific .req directive. |
5887 | 161k | if (Parser.getTok().is(AsmToken::Identifier) && |
5888 | 161k | Parser.getTok().getIdentifier() == ".req") { |
5889 | 5 | parseDirectiveReq(Name, NameLoc); |
5890 | | // We always return 'error' for this, as we're done with this |
5891 | | // statement and don't need to match the 'instruction." |
5892 | 5 | ErrorCode = KS_ERR_ASM_DIRECTIVE_INVALID; |
5893 | 5 | return true; |
5894 | 5 | } |
5895 | | |
5896 | | // Create the leading tokens for the mnemonic, split by '.' characters. |
5897 | 161k | size_t Start = 0, Next = Name.find('.'); |
5898 | 161k | StringRef Mnemonic = Name.slice(Start, Next); |
5899 | | |
5900 | | // Split out the predication code and carry setting flag from the mnemonic. |
5901 | 161k | unsigned PredicationCode; |
5902 | 161k | unsigned ProcessorIMod; |
5903 | 161k | bool CarrySetting; |
5904 | 161k | StringRef ITMask; |
5905 | 161k | Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, |
5906 | 161k | ProcessorIMod, ITMask); |
5907 | | |
5908 | | // In Thumb1, only the branch (B) instruction can be predicated. |
5909 | 161k | if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { |
5910 | 12 | Parser.eatToEndOfStatement(); |
5911 | | //return Error(NameLoc, "conditional execution not supported in Thumb1"); |
5912 | 12 | ErrorCode = KS_ERR_ASM_MNEMONICFAIL; |
5913 | 12 | return true; |
5914 | 12 | } |
5915 | | |
5916 | 161k | Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); |
5917 | | |
5918 | | // Handle the IT instruction ITMask. Convert it to a bitmask. This |
5919 | | // is the mask as it will be for the IT encoding if the conditional |
5920 | | // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case |
5921 | | // where the conditional bit0 is zero, the instruction post-processing |
5922 | | // will adjust the mask accordingly. |
5923 | 161k | if (Mnemonic == "it") { |
5924 | 4.23k | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); |
5925 | 4.23k | if (ITMask.size() > 3) { |
5926 | 18 | Parser.eatToEndOfStatement(); |
5927 | | //return Error(Loc, "too many conditions on IT instruction"); |
5928 | 18 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
5929 | 18 | return true; |
5930 | 18 | } |
5931 | 4.21k | unsigned Mask = 8; |
5932 | 4.89k | for (unsigned i = ITMask.size(); i != 0; --i) { |
5933 | 697 | char pos = ITMask[i - 1]; |
5934 | 697 | if (pos != 't' && pos != 'e') { |
5935 | 23 | Parser.eatToEndOfStatement(); |
5936 | | //return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); |
5937 | 23 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
5938 | 23 | return true; |
5939 | 23 | } |
5940 | 674 | Mask >>= 1; |
5941 | 674 | if (ITMask[i - 1] == 't') |
5942 | 642 | Mask |= 8; |
5943 | 674 | } |
5944 | 4.19k | Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); |
5945 | 4.19k | } |
5946 | | |
5947 | | // FIXME: This is all a pretty gross hack. We should automatically handle |
5948 | | // optional operands like this via tblgen. |
5949 | | |
5950 | | // Next, add the CCOut and ConditionCode operands, if needed. |
5951 | | // |
5952 | | // For mnemonics which can ever incorporate a carry setting bit or predication |
5953 | | // code, our matching model involves us always generating CCOut and |
5954 | | // ConditionCode operands to match the mnemonic "as written" and then we let |
5955 | | // the matcher deal with finding the right instruction or generating an |
5956 | | // appropriate error. |
5957 | 161k | bool CanAcceptCarrySet, CanAcceptPredicationCode; |
5958 | 161k | getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode); |
5959 | | |
5960 | | // If we had a carry-set on an instruction that can't do that, issue an |
5961 | | // error. |
5962 | 161k | if (!CanAcceptCarrySet && CarrySetting) { |
5963 | 579 | Parser.eatToEndOfStatement(); |
5964 | | //return Error(NameLoc, "instruction '" + Mnemonic + |
5965 | | // "' can not set flags, but 's' suffix specified"); |
5966 | 579 | ErrorCode = KS_ERR_ASM_MNEMONICFAIL; |
5967 | 579 | return true; |
5968 | 579 | } |
5969 | | // If we had a predication code on an instruction that can't do that, issue an |
5970 | | // error. |
5971 | 160k | if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { |
5972 | 9 | Parser.eatToEndOfStatement(); |
5973 | | //return Error(NameLoc, "instruction '" + Mnemonic + |
5974 | | // "' is not predicable, but condition code specified"); |
5975 | 9 | ErrorCode = KS_ERR_ASM_MNEMONICFAIL; |
5976 | 9 | return true; |
5977 | 9 | } |
5978 | | |
5979 | | // Add the carry setting operand, if necessary. |
5980 | 160k | if (CanAcceptCarrySet) { |
5981 | 34.3k | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); |
5982 | 34.3k | Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, |
5983 | 34.3k | Loc)); |
5984 | 34.3k | } |
5985 | | |
5986 | | // Add the predication code operand, if necessary. |
5987 | 160k | if (CanAcceptPredicationCode) { |
5988 | 149k | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + |
5989 | 149k | CarrySetting); |
5990 | 149k | Operands.push_back(ARMOperand::CreateCondCode( |
5991 | 149k | ARMCC::CondCodes(PredicationCode), Loc)); |
5992 | 149k | } |
5993 | | |
5994 | | // Add the processor imod operand, if necessary. |
5995 | 160k | if (ProcessorIMod) { |
5996 | 321 | Operands.push_back(ARMOperand::CreateImm( |
5997 | 321 | MCConstantExpr::create(ProcessorIMod, getContext()), |
5998 | 321 | NameLoc, NameLoc)); |
5999 | 160k | } else if (Mnemonic == "cps" && isMClass()) { |
6000 | | //return Error(NameLoc, "instruction 'cps' requires effect for M-class"); |
6001 | 3 | ErrorCode = KS_ERR_ASM_MNEMONICFAIL; |
6002 | 3 | return true; |
6003 | 3 | } |
6004 | | |
6005 | | // Add the remaining tokens in the mnemonic. |
6006 | 263k | while (Next != StringRef::npos) { |
6007 | 102k | Start = Next; |
6008 | 102k | Next = Name.find('.', Start + 1); |
6009 | 102k | StringRef ExtraToken = Name.slice(Start, Next); |
6010 | | |
6011 | | // Some NEON instructions have an optional datatype suffix that is |
6012 | | // completely ignored. Check for that. |
6013 | 102k | if (isDataTypeToken(ExtraToken) && |
6014 | 102k | doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) |
6015 | 261 | continue; |
6016 | | |
6017 | | // For for ARM mode generate an error if the .n qualifier is used. |
6018 | 102k | if (ExtraToken == ".n" && !isThumb()) { |
6019 | | //SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); |
6020 | 13 | Parser.eatToEndOfStatement(); |
6021 | | //return Error(Loc, "instruction with .n (narrow) qualifier not allowed in " |
6022 | | // "arm mode"); |
6023 | 13 | ErrorCode = KS_ERR_ASM_MNEMONICFAIL; |
6024 | 13 | return true; |
6025 | 13 | } |
6026 | | |
6027 | | // The .n qualifier is always discarded as that is what the tables |
6028 | | // and matcher expect. In ARM mode the .w qualifier has no effect, |
6029 | | // so discard it to avoid errors that can be caused by the matcher. |
6030 | 102k | if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) { |
6031 | 100k | SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); |
6032 | 100k | Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); |
6033 | 100k | } |
6034 | 102k | } |
6035 | | |
6036 | | // Read the remaining operands. |
6037 | 160k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
6038 | | // Read the first operand. |
6039 | 148k | if (parseOperand(Operands, Mnemonic, ErrorCode)) { |
6040 | 29.8k | Parser.eatToEndOfStatement(); |
6041 | 29.8k | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
6042 | 29.8k | return true; |
6043 | 29.8k | } |
6044 | | |
6045 | 309k | while (getLexer().is(AsmToken::Comma)) { |
6046 | 198k | Parser.Lex(); // Eat the comma. |
6047 | | |
6048 | | // Parse and remember the operand. |
6049 | 198k | if (parseOperand(Operands, Mnemonic, ErrorCode)) { |
6050 | 7.25k | Parser.eatToEndOfStatement(); |
6051 | 7.25k | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
6052 | 7.25k | return true; |
6053 | 7.25k | } |
6054 | 198k | } |
6055 | 118k | } |
6056 | | |
6057 | 123k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
6058 | | //SMLoc Loc = getLexer().getLoc(); |
6059 | 4.33k | Parser.eatToEndOfStatement(); |
6060 | | //return Error(Loc, "unexpected token in argument list"); |
6061 | 4.33k | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
6062 | 4.33k | return true; |
6063 | 4.33k | } |
6064 | | |
6065 | 119k | Parser.Lex(); // Consume the EndOfStatement |
6066 | | |
6067 | 119k | if (RequireVFPRegisterListCheck) { |
6068 | 79 | ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back()); |
6069 | 79 | if (AcceptSinglePrecisionOnly && !Op.isSPRRegList()) { |
6070 | | //return Error(Op.getStartLoc(), |
6071 | | // "VFP/Neon single precision register expected"); |
6072 | 14 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
6073 | 14 | return true; |
6074 | 14 | } |
6075 | 65 | if (AcceptDoublePrecisionOnly && !Op.isDPRRegList()) { |
6076 | | //return Error(Op.getStartLoc(), |
6077 | | // "VFP/Neon double precision register expected"); |
6078 | 30 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
6079 | 30 | return true; |
6080 | 30 | } |
6081 | 65 | } |
6082 | | |
6083 | 119k | tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands); |
6084 | | |
6085 | | // Some instructions, mostly Thumb, have forms for the same mnemonic that |
6086 | | // do and don't have a cc_out optional-def operand. With some spot-checks |
6087 | | // of the operand list, we can figure out which variant we're trying to |
6088 | | // parse and adjust accordingly before actually matching. We shouldn't ever |
6089 | | // try to remove a cc_out operand that was explicitly set on the |
6090 | | // mnemonic, of course (CarrySetting == true). Reason number #317 the |
6091 | | // table driven matcher doesn't fit well with the ARM instruction set. |
6092 | 119k | if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) |
6093 | 5.19k | Operands.erase(Operands.begin() + 1); |
6094 | | |
6095 | | // Some instructions have the same mnemonic, but don't always |
6096 | | // have a predicate. Distinguish them here and delete the |
6097 | | // predicate if needed. |
6098 | 119k | if (shouldOmitPredicateOperand(Mnemonic, Operands)) |
6099 | 0 | Operands.erase(Operands.begin() + 1); |
6100 | | |
6101 | | // ARM mode 'blx' need special handling, as the register operand version |
6102 | | // is predicable, but the label operand version is not. So, we can't rely |
6103 | | // on the Mnemonic based checking to correctly figure out when to put |
6104 | | // a k_CondCode operand in the list. If we're trying to match the label |
6105 | | // version, remove the k_CondCode operand here. |
6106 | 119k | if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && |
6107 | 119k | static_cast<ARMOperand &>(*Operands[2]).isImm()) |
6108 | 977 | Operands.erase(Operands.begin() + 1); |
6109 | | |
6110 | | // Adjust operands of ldrexd/strexd to MCK_GPRPair. |
6111 | | // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, |
6112 | | // a single GPRPair reg operand is used in the .td file to replace the two |
6113 | | // GPRs. However, when parsing from asm, the two GRPs cannot be automatically |
6114 | | // expressed as a GPRPair, so we have to manually merge them. |
6115 | | // FIXME: We would really like to be able to tablegen'erate this. |
6116 | 119k | if (!isThumb() && Operands.size() > 4 && |
6117 | 119k | (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" || |
6118 | 16.4k | Mnemonic == "stlexd")) { |
6119 | 0 | bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); |
6120 | 0 | unsigned Idx = isLoad ? 2 : 3; |
6121 | 0 | ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]); |
6122 | 0 | ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]); |
6123 | |
|
6124 | 0 | const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID); |
6125 | | // Adjust only if Op1 and Op2 are GPRs. |
6126 | 0 | if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) && |
6127 | 0 | MRC.contains(Op2.getReg())) { |
6128 | 0 | unsigned Reg1 = Op1.getReg(); |
6129 | 0 | unsigned Reg2 = Op2.getReg(); |
6130 | 0 | unsigned Rt = MRI->getEncodingValue(Reg1); |
6131 | 0 | unsigned Rt2 = MRI->getEncodingValue(Reg2); |
6132 | | |
6133 | | // Rt2 must be Rt + 1 and Rt must be even. |
6134 | 0 | if (Rt + 1 != Rt2 || (Rt & 1)) { |
6135 | | //Error(Op2.getStartLoc(), isLoad |
6136 | | // ? "destination operands must be sequential" |
6137 | | // : "source operands must be sequential"); |
6138 | 0 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
6139 | 0 | return true; |
6140 | 0 | } |
6141 | 0 | unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0, |
6142 | 0 | &(MRI->getRegClass(ARM::GPRPairRegClassID))); |
6143 | 0 | Operands[Idx] = |
6144 | 0 | ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc()); |
6145 | 0 | Operands.erase(Operands.begin() + Idx + 1); |
6146 | 0 | } |
6147 | 0 | } |
6148 | | |
6149 | | // GNU Assembler extension (compatibility) |
6150 | 119k | if ((Mnemonic == "ldrd" || Mnemonic == "strd")) { |
6151 | 375 | if (Operands.size() < 4) { |
6152 | 17 | ErrorCode = KS_ERR_ASM_INVALIDOPERAND; |
6153 | 17 | return true; |
6154 | 17 | } |
6155 | 358 | ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]); |
6156 | 358 | ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]); |
6157 | 358 | if (Op3.isMem()) { |
6158 | 36 | assert(Op2.isReg() && "expected register argument"); |
6159 | | |
6160 | 36 | unsigned SuperReg = MRI->getMatchingSuperReg( |
6161 | 36 | Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID)); |
6162 | | |
6163 | 36 | assert(SuperReg && "expected register pair"); |
6164 | | |
6165 | 36 | unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1); |
6166 | | |
6167 | 36 | Operands.insert( |
6168 | 36 | Operands.begin() + 3, |
6169 | 36 | ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc())); |
6170 | 36 | } |
6171 | 358 | } |
6172 | | |
6173 | | // FIXME: As said above, this is all a pretty gross hack. This instruction |
6174 | | // does not fit with other "subs" and tblgen. |
6175 | | // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction |
6176 | | // so the Mnemonic is the original name "subs" and delete the predicate |
6177 | | // operand so it will match the table entry. |
6178 | 119k | if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 && |
6179 | 119k | static_cast<ARMOperand &>(*Operands[3]).isReg() && |
6180 | 119k | static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC && |
6181 | 119k | static_cast<ARMOperand &>(*Operands[4]).isReg() && |
6182 | 119k | static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR && |
6183 | 119k | static_cast<ARMOperand &>(*Operands[5]).isImm()) { |
6184 | 0 | Operands.front() = ARMOperand::CreateToken(Name, NameLoc); |
6185 | 0 | Operands.erase(Operands.begin() + 1); |
6186 | 0 | } |
6187 | 119k | return false; |
6188 | 119k | } |
6189 | | |
6190 | | // Validate context-sensitive operand constraints. |
6191 | | |
6192 | | // return 'true' if register list contains non-low GPR registers, |
6193 | | // 'false' otherwise. If Reg is in the register list or is HiReg, set |
6194 | | // 'containsReg' to true. |
6195 | | static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, |
6196 | | unsigned Reg, unsigned HiReg, |
6197 | 0 | bool &containsReg) { |
6198 | 0 | containsReg = false; |
6199 | 0 | for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { |
6200 | 0 | unsigned OpReg = Inst.getOperand(i).getReg(); |
6201 | 0 | if (OpReg == Reg) |
6202 | 0 | containsReg = true; |
6203 | | // Anything other than a low register isn't legal here. |
6204 | 0 | if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) |
6205 | 0 | return true; |
6206 | 0 | } |
6207 | 0 | return false; |
6208 | 0 | } |
6209 | | |
6210 | | // Check if the specified regisgter is in the register list of the inst, |
6211 | | // starting at the indicated operand number. |
6212 | 0 | static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) { |
6213 | 0 | for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) { |
6214 | 0 | unsigned OpReg = Inst.getOperand(i).getReg(); |
6215 | 0 | if (OpReg == Reg) |
6216 | 0 | return true; |
6217 | 0 | } |
6218 | 0 | return false; |
6219 | 0 | } |
6220 | | |
6221 | | // Return true if instruction has the interesting property of being |
6222 | | // allowed in IT blocks, but not being predicable. |
6223 | 3.61k | static bool instIsBreakpoint(const MCInst &Inst) { |
6224 | 3.61k | return Inst.getOpcode() == ARM::tBKPT || |
6225 | 3.61k | Inst.getOpcode() == ARM::BKPT || |
6226 | 3.61k | Inst.getOpcode() == ARM::tHLT || |
6227 | 3.61k | Inst.getOpcode() == ARM::HLT; |
6228 | | |
6229 | 3.61k | } |
6230 | | |
6231 | | bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst, |
6232 | | const OperandVector &Operands, |
6233 | | unsigned ListNo, bool IsARPop) |
6234 | 0 | { |
6235 | | //const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]); |
6236 | | //bool HasWritebackToken = Op.isToken() && Op.getToken() == "!"; |
6237 | |
|
6238 | 0 | bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP); |
6239 | 0 | bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR); |
6240 | 0 | bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC); |
6241 | |
|
6242 | 0 | if (!IsARPop && ListContainsSP) |
6243 | | //return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), |
6244 | | // "SP may not be in the register list"); |
6245 | 0 | return true; |
6246 | 0 | else if (ListContainsPC && ListContainsLR) |
6247 | | //return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), |
6248 | | // "PC and LR may not be in the register list simultaneously"); |
6249 | 0 | return true; |
6250 | 0 | else if (inITBlock() && !lastInITBlock() && ListContainsPC) |
6251 | | //return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), |
6252 | | // "instruction must be outside of IT block or the last " |
6253 | | // "instruction in an IT block"); |
6254 | 0 | return true; |
6255 | 0 | return false; |
6256 | 0 | } |
6257 | | |
6258 | | bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst, |
6259 | | const OperandVector &Operands, |
6260 | | unsigned ListNo) |
6261 | 0 | { |
6262 | | //const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]); |
6263 | | //bool HasWritebackToken = Op.isToken() && Op.getToken() == "!"; |
6264 | |
|
6265 | 0 | bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP); |
6266 | 0 | bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC); |
6267 | |
|
6268 | 0 | if (ListContainsSP && ListContainsPC) |
6269 | | //return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), |
6270 | | // "SP and PC may not be in the register list"); |
6271 | 0 | return true; |
6272 | 0 | else if (ListContainsSP) |
6273 | | //return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), |
6274 | | // "SP may not be in the register list"); |
6275 | 0 | return true; |
6276 | 0 | else if (ListContainsPC) |
6277 | | //return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), |
6278 | | // "PC may not be in the register list"); |
6279 | 0 | return true; |
6280 | 0 | return false; |
6281 | 0 | } |
6282 | | |
6283 | | // FIXME: We would really like to be able to tablegen'erate this. |
6284 | | bool ARMAsmParser::validateInstruction(MCInst &Inst, |
6285 | | const OperandVector &Operands) |
6286 | 99.8k | { |
6287 | 99.8k | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); |
6288 | | //SMLoc Loc = Operands[0]->getStartLoc(); |
6289 | | |
6290 | | // Check the IT block state first. |
6291 | | // NOTE: BKPT and HLT instructions have the interesting property of being |
6292 | | // allowed in IT blocks, but not being predicable. They just always execute. |
6293 | 99.8k | if (inITBlock() && !instIsBreakpoint(Inst)) { |
6294 | 3.57k | unsigned Bit = 1; |
6295 | 3.57k | if (ITState.FirstCond) |
6296 | 3.35k | ITState.FirstCond = false; |
6297 | 227 | else |
6298 | 227 | Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; |
6299 | | // The instruction must be predicable. |
6300 | 3.57k | if (!MCID.isPredicable()) |
6301 | | //return Error(Loc, "instructions in IT block must be predicable"); |
6302 | 38 | return true; |
6303 | 3.54k | unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); |
6304 | 3.54k | unsigned ITCond = Bit ? ITState.Cond : |
6305 | 3.54k | ARMCC::getOppositeCondition(ITState.Cond); |
6306 | 3.54k | if (Cond != ITCond) { |
6307 | | // Find the condition code Operand to get its SMLoc information. |
6308 | 25 | SMLoc CondLoc; |
6309 | 108 | for (unsigned I = 1; I < Operands.size(); ++I) |
6310 | 83 | if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) |
6311 | 25 | CondLoc = Operands[I]->getStartLoc(); |
6312 | | //return Error(CondLoc, "incorrect condition in IT block; got '" + |
6313 | | // StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + |
6314 | | // "', but expected '" + |
6315 | | // ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); |
6316 | 25 | return true; |
6317 | 25 | } |
6318 | | // Check for non-'al' condition codes outside of the IT block. |
6319 | 96.2k | } else if (isThumbTwo() && MCID.isPredicable() && |
6320 | 96.2k | Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != |
6321 | 45.1k | ARMCC::AL && Inst.getOpcode() != ARM::tBcc && |
6322 | 96.2k | Inst.getOpcode() != ARM::t2Bcc) |
6323 | | //return Error(Loc, "predicated instructions must be in IT block"); |
6324 | 8 | return true; |
6325 | | |
6326 | 99.7k | const unsigned Opcode = Inst.getOpcode(); |
6327 | 99.7k | switch (Opcode) { |
6328 | 101 | case ARM::LDRD: |
6329 | 101 | case ARM::LDRD_PRE: |
6330 | 102 | case ARM::LDRD_POST: { |
6331 | 102 | const unsigned RtReg = Inst.getOperand(0).getReg(); |
6332 | | |
6333 | | // Rt can't be R14. |
6334 | 102 | if (RtReg == ARM::LR) |
6335 | | //return Error(Operands[3]->getStartLoc(), |
6336 | | // "Rt can't be R14"); |
6337 | 1 | return true; |
6338 | | |
6339 | 101 | const unsigned Rt = MRI->getEncodingValue(RtReg); |
6340 | | // Rt must be even-numbered. |
6341 | 101 | if ((Rt & 1) == 1) |
6342 | | //return Error(Operands[3]->getStartLoc(), |
6343 | | // "Rt must be even-numbered"); |
6344 | 7 | return true; |
6345 | | |
6346 | | // Rt2 must be Rt + 1. |
6347 | 94 | const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); |
6348 | 94 | if (Rt2 != Rt + 1) |
6349 | | //return Error(Operands[3]->getStartLoc(), |
6350 | | // "destination operands must be sequential"); |
6351 | 4 | return true; |
6352 | | |
6353 | 90 | if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) { |
6354 | 1 | const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg()); |
6355 | | // For addressing modes with writeback, the base register needs to be |
6356 | | // different from the destination registers. |
6357 | 1 | if (Rn == Rt || Rn == Rt2) |
6358 | | //return Error(Operands[3]->getStartLoc(), |
6359 | | // "base register needs to be different from destination " |
6360 | | // "registers"); |
6361 | 0 | return true; |
6362 | 1 | } |
6363 | | |
6364 | 90 | return false; |
6365 | 90 | } |
6366 | 51 | case ARM::t2LDRDi8: |
6367 | 51 | case ARM::t2LDRD_PRE: |
6368 | 51 | case ARM::t2LDRD_POST: { |
6369 | | // Rt2 must be different from Rt. |
6370 | 51 | unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); |
6371 | 51 | unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); |
6372 | 51 | if (Rt2 == Rt) |
6373 | | //return Error(Operands[3]->getStartLoc(), |
6374 | | // "destination operands can't be identical"); |
6375 | 0 | return true; |
6376 | 51 | return false; |
6377 | 51 | } |
6378 | 52 | case ARM::t2BXJ: { |
6379 | 52 | const unsigned RmReg = Inst.getOperand(0).getReg(); |
6380 | | // Rm = SP is no longer unpredictable in v8-A |
6381 | 52 | if (RmReg == ARM::SP && !hasV8Ops()) |
6382 | | //return Error(Operands[2]->getStartLoc(), |
6383 | | // "r13 (SP) is an unpredictable operand to BXJ"); |
6384 | 1 | return true; |
6385 | 51 | return false; |
6386 | 52 | } |
6387 | 63 | case ARM::STRD: { |
6388 | | // Rt2 must be Rt + 1. |
6389 | 63 | unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); |
6390 | 63 | unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); |
6391 | 63 | if (Rt2 != Rt + 1) |
6392 | | //return Error(Operands[3]->getStartLoc(), |
6393 | | // "source operands must be sequential"); |
6394 | 0 | return true; |
6395 | 63 | return false; |
6396 | 63 | } |
6397 | 0 | case ARM::STRD_PRE: |
6398 | 0 | case ARM::STRD_POST: { |
6399 | | // Rt2 must be Rt + 1. |
6400 | 0 | unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); |
6401 | 0 | unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg()); |
6402 | 0 | if (Rt2 != Rt + 1) |
6403 | | //return Error(Operands[3]->getStartLoc(), |
6404 | | // "source operands must be sequential"); |
6405 | 0 | return true; |
6406 | 0 | return false; |
6407 | 0 | } |
6408 | 0 | case ARM::STR_PRE_IMM: |
6409 | 0 | case ARM::STR_PRE_REG: |
6410 | 0 | case ARM::STR_POST_IMM: |
6411 | 0 | case ARM::STR_POST_REG: |
6412 | 0 | case ARM::STRH_PRE: |
6413 | 0 | case ARM::STRH_POST: |
6414 | 0 | case ARM::STRB_PRE_IMM: |
6415 | 0 | case ARM::STRB_PRE_REG: |
6416 | 0 | case ARM::STRB_POST_IMM: |
6417 | 0 | case ARM::STRB_POST_REG: { |
6418 | | // Rt must be different from Rn. |
6419 | 0 | const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); |
6420 | 0 | const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); |
6421 | |
|
6422 | 0 | if (Rt == Rn) |
6423 | | //return Error(Operands[3]->getStartLoc(), |
6424 | | // "source register and base register can't be identical"); |
6425 | 0 | return true; |
6426 | 0 | return false; |
6427 | 0 | } |
6428 | 2 | case ARM::LDR_PRE_IMM: |
6429 | 3 | case ARM::LDR_PRE_REG: |
6430 | 57 | case ARM::LDR_POST_IMM: |
6431 | 76 | case ARM::LDR_POST_REG: |
6432 | 76 | case ARM::LDRH_PRE: |
6433 | 103 | case ARM::LDRH_POST: |
6434 | 103 | case ARM::LDRSH_PRE: |
6435 | 138 | case ARM::LDRSH_POST: |
6436 | 138 | case ARM::LDRB_PRE_IMM: |
6437 | 138 | case ARM::LDRB_PRE_REG: |
6438 | 176 | case ARM::LDRB_POST_IMM: |
6439 | 195 | case ARM::LDRB_POST_REG: |
6440 | 195 | case ARM::LDRSB_PRE: |
6441 | 224 | case ARM::LDRSB_POST: { |
6442 | | // Rt must be different from Rn. |
6443 | 224 | const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); |
6444 | 224 | const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); |
6445 | | |
6446 | 224 | if (Rt == Rn) |
6447 | | //return Error(Operands[3]->getStartLoc(), |
6448 | | // "destination register and base register can't be identical"); |
6449 | 3 | return true; |
6450 | 221 | return false; |
6451 | 224 | } |
6452 | 0 | case ARM::SBFX: |
6453 | 0 | case ARM::UBFX: { |
6454 | | // Width must be in range [1, 32-lsb]. |
6455 | 0 | unsigned LSB = Inst.getOperand(2).getImm(); |
6456 | 0 | unsigned Widthm1 = Inst.getOperand(3).getImm(); |
6457 | 0 | if (Widthm1 >= 32 - LSB) |
6458 | | //return Error(Operands[5]->getStartLoc(), |
6459 | | // "bitfield width must be in range [1,32-lsb]"); |
6460 | 0 | return true; |
6461 | 0 | return false; |
6462 | 0 | } |
6463 | | // Notionally handles ARM::tLDMIA_UPD too. |
6464 | 0 | case ARM::tLDMIA: { |
6465 | | // If we're parsing Thumb2, the .w variant is available and handles |
6466 | | // most cases that are normally illegal for a Thumb1 LDM instruction. |
6467 | | // We'll make the transformation in processInstruction() if necessary. |
6468 | | // |
6469 | | // Thumb LDM instructions are writeback iff the base register is not |
6470 | | // in the register list. |
6471 | 0 | unsigned Rn = Inst.getOperand(0).getReg(); |
6472 | 0 | bool HasWritebackToken = |
6473 | 0 | (static_cast<ARMOperand &>(*Operands[3]).isToken() && |
6474 | 0 | static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); |
6475 | 0 | bool ListContainsBase; |
6476 | 0 | if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo()) |
6477 | | //return Error(Operands[3 + HasWritebackToken]->getStartLoc(), |
6478 | | // "registers must be in range r0-r7"); |
6479 | 0 | return true; |
6480 | | // If we should have writeback, then there should be a '!' token. |
6481 | 0 | if (!ListContainsBase && !HasWritebackToken && !isThumbTwo()) |
6482 | | //return Error(Operands[2]->getStartLoc(), |
6483 | | // "writeback operator '!' expected"); |
6484 | 0 | return true; |
6485 | | // If we should not have writeback, there must not be a '!'. This is |
6486 | | // true even for the 32-bit wide encodings. |
6487 | 0 | if (ListContainsBase && HasWritebackToken) |
6488 | | //return Error(Operands[3]->getStartLoc(), |
6489 | | // "writeback operator '!' not allowed when base register " |
6490 | | // "in register list"); |
6491 | 0 | return true; |
6492 | | |
6493 | 0 | if (validatetLDMRegList(Inst, Operands, 3)) |
6494 | 0 | return true; |
6495 | 0 | break; |
6496 | 0 | } |
6497 | 0 | case ARM::LDMIA_UPD: |
6498 | 0 | case ARM::LDMDB_UPD: |
6499 | 0 | case ARM::LDMIB_UPD: |
6500 | 0 | case ARM::LDMDA_UPD: |
6501 | | // ARM variants loading and updating the same register are only officially |
6502 | | // UNPREDICTABLE on v7 upwards. Goodness knows what they did before. |
6503 | 0 | if (!hasV7Ops()) |
6504 | 0 | break; |
6505 | 0 | if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) |
6506 | | //return Error(Operands.back()->getStartLoc(), |
6507 | | // "writeback register not allowed in register list"); |
6508 | 0 | return true; |
6509 | 0 | break; |
6510 | 0 | case ARM::t2LDMIA: |
6511 | 0 | case ARM::t2LDMDB: |
6512 | 0 | if (validatetLDMRegList(Inst, Operands, 3)) |
6513 | 0 | return true; |
6514 | 0 | break; |
6515 | 0 | case ARM::t2STMIA: |
6516 | 0 | case ARM::t2STMDB: |
6517 | 0 | if (validatetSTMRegList(Inst, Operands, 3)) |
6518 | 0 | return true; |
6519 | 0 | break; |
6520 | 0 | case ARM::t2LDMIA_UPD: |
6521 | 0 | case ARM::t2LDMDB_UPD: |
6522 | 0 | case ARM::t2STMIA_UPD: |
6523 | 0 | case ARM::t2STMDB_UPD: { |
6524 | 0 | if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) |
6525 | | //return Error(Operands.back()->getStartLoc(), |
6526 | | // "writeback register not allowed in register list"); |
6527 | 0 | return true; |
6528 | | |
6529 | 0 | if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) { |
6530 | 0 | if (validatetLDMRegList(Inst, Operands, 3)) |
6531 | 0 | return true; |
6532 | 0 | } else { |
6533 | 0 | if (validatetSTMRegList(Inst, Operands, 3)) |
6534 | 0 | return true; |
6535 | 0 | } |
6536 | 0 | break; |
6537 | 0 | } |
6538 | 0 | case ARM::sysLDMIA_UPD: |
6539 | 0 | case ARM::sysLDMDA_UPD: |
6540 | 0 | case ARM::sysLDMDB_UPD: |
6541 | 0 | case ARM::sysLDMIB_UPD: |
6542 | 0 | if (!listContainsReg(Inst, 3, ARM::PC)) |
6543 | | //return Error(Operands[4]->getStartLoc(), |
6544 | | // "writeback register only allowed on system LDM " |
6545 | | // "if PC in register-list"); |
6546 | 0 | return true; |
6547 | 0 | break; |
6548 | 0 | case ARM::sysSTMIA_UPD: |
6549 | 0 | case ARM::sysSTMDA_UPD: |
6550 | 0 | case ARM::sysSTMDB_UPD: |
6551 | 0 | case ARM::sysSTMIB_UPD: |
6552 | | //return Error(Operands[2]->getStartLoc(), |
6553 | | // "system STM cannot have writeback register"); |
6554 | 0 | return true; |
6555 | 53 | case ARM::tMUL: { |
6556 | | // The second source operand must be the same register as the destination |
6557 | | // operand. |
6558 | | // |
6559 | | // In this case, we must directly check the parsed operands because the |
6560 | | // cvtThumbMultiply() function is written in such a way that it guarantees |
6561 | | // this first statement is always true for the new Inst. Essentially, the |
6562 | | // destination is unconditionally copied into the second source operand |
6563 | | // without checking to see if it matches what we actually parsed. |
6564 | 53 | if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() != |
6565 | 41 | ((ARMOperand &)*Operands[5]).getReg()) && |
6566 | 53 | (((ARMOperand &)*Operands[3]).getReg() != |
6567 | 19 | ((ARMOperand &)*Operands[4]).getReg())) { |
6568 | | //return Error(Operands[3]->getStartLoc(), |
6569 | | // "destination register must match source register"); |
6570 | 7 | return true; |
6571 | 7 | } |
6572 | 46 | break; |
6573 | 53 | } |
6574 | | // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, |
6575 | | // so only issue a diagnostic for thumb1. The instructions will be |
6576 | | // switched to the t2 encodings in processInstruction() if necessary. |
6577 | 46 | case ARM::tPOP: { |
6578 | 0 | bool ListContainsBase; |
6579 | 0 | if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) && |
6580 | 0 | !isThumbTwo()) |
6581 | | //return Error(Operands[2]->getStartLoc(), |
6582 | | // "registers must be in range r0-r7 or pc"); |
6583 | 0 | return true; |
6584 | 0 | if (validatetLDMRegList(Inst, Operands, 2, !isMClass())) |
6585 | 0 | return true; |
6586 | 0 | break; |
6587 | 0 | } |
6588 | 0 | case ARM::tPUSH: { |
6589 | 0 | bool ListContainsBase; |
6590 | 0 | if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) && |
6591 | 0 | !isThumbTwo()) |
6592 | | //return Error(Operands[2]->getStartLoc(), |
6593 | | // "registers must be in range r0-r7 or lr"); |
6594 | 0 | return true; |
6595 | 0 | if (validatetSTMRegList(Inst, Operands, 2)) |
6596 | 0 | return true; |
6597 | 0 | break; |
6598 | 0 | } |
6599 | 0 | case ARM::tSTMIA_UPD: { |
6600 | 0 | bool ListContainsBase, InvalidLowList; |
6601 | 0 | InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(), |
6602 | 0 | 0, ListContainsBase); |
6603 | 0 | if (InvalidLowList && !isThumbTwo()) |
6604 | | //return Error(Operands[4]->getStartLoc(), |
6605 | | // "registers must be in range r0-r7"); |
6606 | 0 | return true; |
6607 | | |
6608 | | // This would be converted to a 32-bit stm, but that's not valid if the |
6609 | | // writeback register is in the list. |
6610 | 0 | if (InvalidLowList && ListContainsBase) |
6611 | | //return Error(Operands[4]->getStartLoc(), |
6612 | | // "writeback operator '!' not allowed when base register " |
6613 | | // "in register list"); |
6614 | 0 | return true; |
6615 | | |
6616 | 0 | if (validatetSTMRegList(Inst, Operands, 4)) |
6617 | 0 | return true; |
6618 | 0 | break; |
6619 | 0 | } |
6620 | 398 | case ARM::tADDrSP: { |
6621 | | // If the non-SP source operand and the destination operand are not the |
6622 | | // same, we need thumb2 (for the wide encoding), or we have an error. |
6623 | 398 | if (!isThumbTwo() && |
6624 | 398 | Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { |
6625 | | //return Error(Operands[4]->getStartLoc(), |
6626 | | // "source register must be the same as destination"); |
6627 | 1 | return true; |
6628 | 1 | } |
6629 | 397 | break; |
6630 | 398 | } |
6631 | | // Final range checking for Thumb unconditional branch instructions. |
6632 | 11.6k | case ARM::tB: |
6633 | 11.6k | if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffsetRel<11, 1>(Inst.getAddress())) |
6634 | | //return Error(Operands[2]->getStartLoc(), "branch target out of range"); |
6635 | 10 | return true; |
6636 | 11.6k | break; |
6637 | 11.6k | case ARM::t2B: { |
6638 | 1.71k | int op = (Operands[2]->isImm()) ? 2 : 3; |
6639 | 1.71k | if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffsetRel<24, 1>(Inst.getAddress())) |
6640 | | //return Error(Operands[op]->getStartLoc(), "branch target out of range"); |
6641 | 237 | return true; |
6642 | 1.47k | break; |
6643 | 1.71k | } |
6644 | | // Final range checking for Thumb conditional branch instructions. |
6645 | 3.05k | case ARM::tBcc: |
6646 | 3.05k | if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffsetRel<8, 1>(Inst.getAddress())) |
6647 | | //return Error(Operands[2]->getStartLoc(), "branch target out of range"); |
6648 | 37 | return true; |
6649 | 3.01k | break; |
6650 | 3.01k | case ARM::t2Bcc: { |
6651 | 866 | int Op = (Operands[2]->isImm()) ? 2 : 3; |
6652 | 866 | if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffsetRel<20, 1>(Inst.getAddress())) |
6653 | | //return Error(Operands[Op]->getStartLoc(), "branch target out of range"); |
6654 | 176 | return true; |
6655 | 690 | break; |
6656 | 866 | } |
6657 | 690 | case ARM::MOVi16: |
6658 | 82 | case ARM::t2MOVi16: |
6659 | 235 | case ARM::t2MOVTi16: |
6660 | 235 | { |
6661 | | // We want to avoid misleadingly allowing something like "mov r0, <symbol>" |
6662 | | // especially when we turn it into a movw and the expression <symbol> does |
6663 | | // not have a :lower16: or :upper16 as part of the expression. We don't |
6664 | | // want the behavior of silently truncating, which can be unexpected and |
6665 | | // lead to bugs that are difficult to find since this is an easy mistake |
6666 | | // to make. |
6667 | 235 | int i = (Operands[3]->isImm()) ? 3 : 4; |
6668 | 235 | ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]); |
6669 | 235 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); |
6670 | 235 | if (CE) break; |
6671 | 9 | const MCExpr *E = dyn_cast<MCExpr>(Op.getImm()); |
6672 | 9 | if (!E) break; |
6673 | 9 | const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E); |
6674 | 9 | if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 && |
6675 | 0 | ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)) |
6676 | | //return Error( |
6677 | | // Op.getStartLoc(), |
6678 | | // "immediate expression for mov requires :lower16: or :upper16"); |
6679 | 9 | return true; |
6680 | 0 | break; |
6681 | 9 | } |
6682 | 99.7k | } |
6683 | | |
6684 | 98.8k | return false; |
6685 | 99.7k | } |
6686 | | |
6687 | 0 | static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) { |
6688 | 0 | switch(Opc) { |
6689 | 0 | default: llvm_unreachable("unexpected opcode!"); |
6690 | | // VST1LN |
6691 | 0 | case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; |
6692 | 0 | case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; |
6693 | 0 | case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; |
6694 | 0 | case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; |
6695 | 0 | case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; |
6696 | 0 | case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; |
6697 | 0 | case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8; |
6698 | 0 | case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16; |
6699 | 0 | case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32; |
6700 | | |
6701 | | // VST2LN |
6702 | 0 | case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; |
6703 | 0 | case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; |
6704 | 0 | case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; |
6705 | 0 | case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; |
6706 | 0 | case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; |
6707 | | |
6708 | 0 | case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; |
6709 | 0 | case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; |
6710 | 0 | case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; |
6711 | 0 | case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; |
6712 | 0 | case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; |
6713 | | |
6714 | 0 | case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8; |
6715 | 0 | case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16; |
6716 | 0 | case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32; |
6717 | 0 | case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16; |
6718 | 0 | case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32; |
6719 | | |
6720 | | // VST3LN |
6721 | 0 | case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; |
6722 | 0 | case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; |
6723 | 0 | case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; |
6724 | 0 | case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD; |
6725 | 0 | case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; |
6726 | 0 | case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; |
6727 | 0 | case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; |
6728 | 0 | case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; |
6729 | 0 | case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD; |
6730 | 0 | case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; |
6731 | 0 | case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8; |
6732 | 0 | case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16; |
6733 | 0 | case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32; |
6734 | 0 | case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16; |
6735 | 0 | case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32; |
6736 | | |
6737 | | // VST3 |
6738 | 0 | case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; |
6739 | 0 | case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; |
6740 | 0 | case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; |
6741 | 0 | case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; |
6742 | 0 | case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; |
6743 | 0 | case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; |
6744 | 0 | case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; |
6745 | 0 | case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; |
6746 | 0 | case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; |
6747 | 0 | case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; |
6748 | 0 | case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; |
6749 | 0 | case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; |
6750 | 0 | case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8; |
6751 | 0 | case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16; |
6752 | 0 | case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32; |
6753 | 0 | case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8; |
6754 | 0 | case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16; |
6755 | 0 | case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32; |
6756 | | |
6757 | | // VST4LN |
6758 | 0 | case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; |
6759 | 0 | case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; |
6760 | 0 | case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; |
6761 | 0 | case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD; |
6762 | 0 | case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; |
6763 | 0 | case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; |
6764 | 0 | case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; |
6765 | 0 | case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; |
6766 | 0 | case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD; |
6767 | 0 | case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; |
6768 | 0 | case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8; |
6769 | 0 | case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16; |
6770 | 0 | case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32; |
6771 | 0 | case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16; |
6772 | 0 | case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32; |
6773 | | |
6774 | | // VST4 |
6775 | 0 | case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; |
6776 | 0 | case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; |
6777 | 0 | case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; |
6778 | 0 | case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; |
6779 | 0 | case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; |
6780 | 0 | case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; |
6781 | 0 | case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; |
6782 | 0 | case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; |
6783 | 0 | case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; |
6784 | 0 | case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; |
6785 | 0 | case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; |
6786 | 0 | case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; |
6787 | 0 | case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8; |
6788 | 0 | case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16; |
6789 | 0 | case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32; |
6790 | 0 | case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8; |
6791 | 0 | case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16; |
6792 | 0 | case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32; |
6793 | 0 | } |
6794 | 0 | } |
6795 | | |
6796 | 0 | static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { |
6797 | 0 | switch(Opc) { |
6798 | 0 | default: llvm_unreachable("unexpected opcode!"); |
6799 | | // VLD1LN |
6800 | 0 | case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; |
6801 | 0 | case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; |
6802 | 0 | case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; |
6803 | 0 | case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; |
6804 | 0 | case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; |
6805 | 0 | case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; |
6806 | 0 | case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8; |
6807 | 0 | case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16; |
6808 | 0 | case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32; |
6809 | | |
6810 | | // VLD2LN |
6811 | 0 | case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; |
6812 | 0 | case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; |
6813 | 0 | case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; |
6814 | 0 | case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD; |
6815 | 0 | case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; |
6816 | 0 | case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; |
6817 | 0 | case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; |
6818 | 0 | case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; |
6819 | 0 | case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD; |
6820 | 0 | case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; |
6821 | 0 | case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8; |
6822 | 0 | case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16; |
6823 | 0 | case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32; |
6824 | 0 | case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16; |
6825 | 0 | case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32; |
6826 | | |
6827 | | // VLD3DUP |
6828 | 0 | case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; |
6829 | 0 | case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; |
6830 | 0 | case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; |
6831 | 0 | case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD; |
6832 | 0 | case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; |
6833 | 0 | case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; |
6834 | 0 | case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; |
6835 | 0 | case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; |
6836 | 0 | case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; |
6837 | 0 | case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD; |
6838 | 0 | case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; |
6839 | 0 | case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; |
6840 | 0 | case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8; |
6841 | 0 | case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16; |
6842 | 0 | case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32; |
6843 | 0 | case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8; |
6844 | 0 | case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16; |
6845 | 0 | case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32; |
6846 | | |
6847 | | // VLD3LN |
6848 | 0 | case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; |
6849 | 0 | case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; |
6850 | 0 | case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; |
6851 | 0 | case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD; |
6852 | 0 | case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; |
6853 | 0 | case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; |
6854 | 0 | case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; |
6855 | 0 | case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; |
6856 | 0 | case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD; |
6857 | 0 | case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; |
6858 | 0 | case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8; |
6859 | 0 | case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16; |
6860 | 0 | case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32; |
6861 | 0 | case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16; |
6862 | 0 | case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32; |
6863 | | |
6864 | | // VLD3 |
6865 | 0 | case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; |
6866 | 0 | case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; |
6867 | 0 | case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; |
6868 | 0 | case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; |
6869 | 0 | case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; |
6870 | 0 | case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; |
6871 | 0 | case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; |
6872 | 0 | case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; |
6873 | 0 | case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; |
6874 | 0 | case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; |
6875 | 0 | case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; |
6876 | 0 | case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; |
6877 | 0 | case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8; |
6878 | 0 | case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16; |
6879 | 0 | case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32; |
6880 | 0 | case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8; |
6881 | 0 | case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16; |
6882 | 0 | case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32; |
6883 | | |
6884 | | // VLD4LN |
6885 | 0 | case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; |
6886 | 0 | case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; |
6887 | 0 | case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; |
6888 | 0 | case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; |
6889 | 0 | case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; |
6890 | 0 | case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; |
6891 | 0 | case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; |
6892 | 0 | case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; |
6893 | 0 | case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; |
6894 | 0 | case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; |
6895 | 0 | case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8; |
6896 | 0 | case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16; |
6897 | 0 | case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32; |
6898 | 0 | case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16; |
6899 | 0 | case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32; |
6900 | | |
6901 | | // VLD4DUP |
6902 | 0 | case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; |
6903 | 0 | case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; |
6904 | 0 | case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; |
6905 | 0 | case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD; |
6906 | 0 | case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD; |
6907 | 0 | case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; |
6908 | 0 | case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; |
6909 | 0 | case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; |
6910 | 0 | case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; |
6911 | 0 | case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD; |
6912 | 0 | case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD; |
6913 | 0 | case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; |
6914 | 0 | case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8; |
6915 | 0 | case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16; |
6916 | 0 | case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32; |
6917 | 0 | case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8; |
6918 | 0 | case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16; |
6919 | 0 | case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32; |
6920 | | |
6921 | | // VLD4 |
6922 | 0 | case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; |
6923 | 0 | case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; |
6924 | 0 | case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; |
6925 | 0 | case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; |
6926 | 0 | case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; |
6927 | 0 | case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; |
6928 | 0 | case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; |
6929 | 0 | case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; |
6930 | 0 | case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; |
6931 | 0 | case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; |
6932 | 0 | case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; |
6933 | 0 | case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; |
6934 | 0 | case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8; |
6935 | 0 | case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16; |
6936 | 0 | case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32; |
6937 | 0 | case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8; |
6938 | 0 | case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16; |
6939 | 0 | case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32; |
6940 | 0 | } |
6941 | 0 | } |
6942 | | |
6943 | | bool ARMAsmParser::processInstruction(MCInst &Inst, |
6944 | | const OperandVector &Operands, |
6945 | 103k | MCStreamer &Out) { |
6946 | 103k | switch (Inst.getOpcode()) { |
6947 | | // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction. |
6948 | 14 | case ARM::LDRT_POST: |
6949 | 14 | case ARM::LDRBT_POST: { |
6950 | 14 | const unsigned Opcode = |
6951 | 14 | (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM |
6952 | 14 | : ARM::LDRBT_POST_IMM; |
6953 | 14 | MCInst TmpInst(Inst.getAddress()); |
6954 | 14 | TmpInst.setOpcode(Opcode); |
6955 | 14 | TmpInst.addOperand(Inst.getOperand(0)); |
6956 | 14 | TmpInst.addOperand(Inst.getOperand(1)); |
6957 | 14 | TmpInst.addOperand(Inst.getOperand(1)); |
6958 | 14 | TmpInst.addOperand(MCOperand::createReg(0)); |
6959 | 14 | TmpInst.addOperand(MCOperand::createImm(0)); |
6960 | 14 | TmpInst.addOperand(Inst.getOperand(2)); |
6961 | 14 | TmpInst.addOperand(Inst.getOperand(3)); |
6962 | 14 | Inst = TmpInst; |
6963 | 14 | return true; |
6964 | 14 | } |
6965 | | // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction. |
6966 | 0 | case ARM::STRT_POST: |
6967 | 0 | case ARM::STRBT_POST: { |
6968 | 0 | const unsigned Opcode = |
6969 | 0 | (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM |
6970 | 0 | : ARM::STRBT_POST_IMM; |
6971 | 0 | MCInst TmpInst(Inst.getAddress()); |
6972 | 0 | TmpInst.setOpcode(Opcode); |
6973 | 0 | TmpInst.addOperand(Inst.getOperand(1)); |
6974 | 0 | TmpInst.addOperand(Inst.getOperand(0)); |
6975 | 0 | TmpInst.addOperand(Inst.getOperand(1)); |
6976 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); |
6977 | 0 | TmpInst.addOperand(MCOperand::createImm(0)); |
6978 | 0 | TmpInst.addOperand(Inst.getOperand(2)); |
6979 | 0 | TmpInst.addOperand(Inst.getOperand(3)); |
6980 | 0 | Inst = TmpInst; |
6981 | 0 | return true; |
6982 | 0 | } |
6983 | | // Alias for alternate form of 'ADR Rd, #imm' instruction. |
6984 | 3.89k | case ARM::ADDri: { |
6985 | 3.89k | if (Inst.getOperand(1).getReg() != ARM::PC || |
6986 | 3.89k | Inst.getOperand(5).getReg() != 0 || |
6987 | 3.89k | !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm())) |
6988 | 3.15k | return false; |
6989 | 741 | MCInst TmpInst(Inst.getAddress()); |
6990 | 741 | TmpInst.setOpcode(ARM::ADR); |
6991 | 741 | TmpInst.addOperand(Inst.getOperand(0)); |
6992 | 741 | if (Inst.getOperand(2).isImm()) { |
6993 | | // Immediate (mod_imm) will be in its encoded form, we must unencode it |
6994 | | // before passing it to the ADR instruction. |
6995 | 706 | unsigned Enc = Inst.getOperand(2).getImm(); |
6996 | 706 | TmpInst.addOperand(MCOperand::createImm( |
6997 | 706 | ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7))); |
6998 | 706 | } else { |
6999 | | // Turn PC-relative expression into absolute expression. |
7000 | | // Reading PC provides the start of the current instruction + 8 and |
7001 | | // the transform to adr is biased by that. |
7002 | 35 | MCSymbol *Dot = getContext().createTempSymbol(); |
7003 | 35 | Out.EmitLabel(Dot); |
7004 | 35 | const MCExpr *OpExpr = Inst.getOperand(2).getExpr(); |
7005 | 35 | const MCExpr *InstPC = MCSymbolRefExpr::create(Dot, |
7006 | 35 | MCSymbolRefExpr::VK_None, |
7007 | 35 | getContext()); |
7008 | 35 | const MCExpr *Const8 = MCConstantExpr::create(8, getContext()); |
7009 | 35 | const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8, |
7010 | 35 | getContext()); |
7011 | 35 | const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr, |
7012 | 35 | getContext()); |
7013 | 35 | TmpInst.addOperand(MCOperand::createExpr(FixupAddr)); |
7014 | 35 | } |
7015 | 741 | TmpInst.addOperand(Inst.getOperand(3)); |
7016 | 741 | TmpInst.addOperand(Inst.getOperand(4)); |
7017 | 741 | Inst = TmpInst; |
7018 | 741 | return true; |
7019 | 3.89k | } |
7020 | | // Aliases for alternate PC+imm syntax of LDR instructions. |
7021 | 0 | case ARM::t2LDRpcrel: |
7022 | | // Select the narrow version if the immediate will fit. |
7023 | 0 | if (Inst.getOperand(1).getImm() > 0 && |
7024 | 0 | Inst.getOperand(1).getImm() <= 0xff && |
7025 | 0 | !(static_cast<ARMOperand &>(*Operands[2]).isToken() && |
7026 | 0 | static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w")) |
7027 | 0 | Inst.setOpcode(ARM::tLDRpci); |
7028 | 0 | else |
7029 | 0 | Inst.setOpcode(ARM::t2LDRpci); |
7030 | 0 | return true; |
7031 | 0 | case ARM::t2LDRBpcrel: |
7032 | 0 | Inst.setOpcode(ARM::t2LDRBpci); |
7033 | 0 | return true; |
7034 | 0 | case ARM::t2LDRHpcrel: |
7035 | 0 | Inst.setOpcode(ARM::t2LDRHpci); |
7036 | 0 | return true; |
7037 | 0 | case ARM::t2LDRSBpcrel: |
7038 | 0 | Inst.setOpcode(ARM::t2LDRSBpci); |
7039 | 0 | return true; |
7040 | 0 | case ARM::t2LDRSHpcrel: |
7041 | 0 | Inst.setOpcode(ARM::t2LDRSHpci); |
7042 | 0 | return true; |
7043 | | // Handle NEON VST complex aliases. |
7044 | 0 | case ARM::VST1LNdWB_register_Asm_8: |
7045 | 0 | case ARM::VST1LNdWB_register_Asm_16: |
7046 | 0 | case ARM::VST1LNdWB_register_Asm_32: { |
7047 | 0 | MCInst TmpInst(Inst.getAddress()); |
7048 | | // Shuffle the operands around so the lane index operand is in the |
7049 | | // right place. |
7050 | 0 | unsigned Spacing; |
7051 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7052 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7053 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7054 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7055 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7056 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7057 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7058 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7059 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7060 | 0 | Inst = TmpInst; |
7061 | 0 | return true; |
7062 | 0 | } |
7063 | | |
7064 | 0 | case ARM::VST2LNdWB_register_Asm_8: |
7065 | 0 | case ARM::VST2LNdWB_register_Asm_16: |
7066 | 0 | case ARM::VST2LNdWB_register_Asm_32: |
7067 | 0 | case ARM::VST2LNqWB_register_Asm_16: |
7068 | 0 | case ARM::VST2LNqWB_register_Asm_32: { |
7069 | 0 | MCInst TmpInst(Inst.getAddress()); |
7070 | | // Shuffle the operands around so the lane index operand is in the |
7071 | | // right place. |
7072 | 0 | unsigned Spacing; |
7073 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7074 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7075 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7076 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7077 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7078 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7079 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7080 | 0 | Spacing)); |
7081 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7082 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7083 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7084 | 0 | Inst = TmpInst; |
7085 | 0 | return true; |
7086 | 0 | } |
7087 | | |
7088 | 0 | case ARM::VST3LNdWB_register_Asm_8: |
7089 | 0 | case ARM::VST3LNdWB_register_Asm_16: |
7090 | 0 | case ARM::VST3LNdWB_register_Asm_32: |
7091 | 0 | case ARM::VST3LNqWB_register_Asm_16: |
7092 | 0 | case ARM::VST3LNqWB_register_Asm_32: { |
7093 | 0 | MCInst TmpInst(Inst.getAddress()); |
7094 | | // Shuffle the operands around so the lane index operand is in the |
7095 | | // right place. |
7096 | 0 | unsigned Spacing; |
7097 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7098 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7099 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7100 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7101 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7102 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7103 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7104 | 0 | Spacing)); |
7105 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7106 | 0 | Spacing * 2)); |
7107 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7108 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7109 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7110 | 0 | Inst = TmpInst; |
7111 | 0 | return true; |
7112 | 0 | } |
7113 | | |
7114 | 0 | case ARM::VST4LNdWB_register_Asm_8: |
7115 | 0 | case ARM::VST4LNdWB_register_Asm_16: |
7116 | 0 | case ARM::VST4LNdWB_register_Asm_32: |
7117 | 0 | case ARM::VST4LNqWB_register_Asm_16: |
7118 | 0 | case ARM::VST4LNqWB_register_Asm_32: { |
7119 | 0 | MCInst TmpInst(Inst.getAddress()); |
7120 | | // Shuffle the operands around so the lane index operand is in the |
7121 | | // right place. |
7122 | 0 | unsigned Spacing; |
7123 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7124 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7125 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7126 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7127 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7128 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7129 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7130 | 0 | Spacing)); |
7131 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7132 | 0 | Spacing * 2)); |
7133 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7134 | 0 | Spacing * 3)); |
7135 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7136 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7137 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7138 | 0 | Inst = TmpInst; |
7139 | 0 | return true; |
7140 | 0 | } |
7141 | | |
7142 | 0 | case ARM::VST1LNdWB_fixed_Asm_8: |
7143 | 0 | case ARM::VST1LNdWB_fixed_Asm_16: |
7144 | 0 | case ARM::VST1LNdWB_fixed_Asm_32: { |
7145 | 0 | MCInst TmpInst(Inst.getAddress()); |
7146 | | // Shuffle the operands around so the lane index operand is in the |
7147 | | // right place. |
7148 | 0 | unsigned Spacing; |
7149 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7150 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7151 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7152 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7153 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7154 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7155 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7156 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7157 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7158 | 0 | Inst = TmpInst; |
7159 | 0 | return true; |
7160 | 0 | } |
7161 | | |
7162 | 0 | case ARM::VST2LNdWB_fixed_Asm_8: |
7163 | 0 | case ARM::VST2LNdWB_fixed_Asm_16: |
7164 | 0 | case ARM::VST2LNdWB_fixed_Asm_32: |
7165 | 0 | case ARM::VST2LNqWB_fixed_Asm_16: |
7166 | 0 | case ARM::VST2LNqWB_fixed_Asm_32: { |
7167 | 0 | MCInst TmpInst(Inst.getAddress()); |
7168 | | // Shuffle the operands around so the lane index operand is in the |
7169 | | // right place. |
7170 | 0 | unsigned Spacing; |
7171 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7172 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7173 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7174 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7175 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7176 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7177 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7178 | 0 | Spacing)); |
7179 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7180 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7181 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7182 | 0 | Inst = TmpInst; |
7183 | 0 | return true; |
7184 | 0 | } |
7185 | | |
7186 | 0 | case ARM::VST3LNdWB_fixed_Asm_8: |
7187 | 0 | case ARM::VST3LNdWB_fixed_Asm_16: |
7188 | 0 | case ARM::VST3LNdWB_fixed_Asm_32: |
7189 | 0 | case ARM::VST3LNqWB_fixed_Asm_16: |
7190 | 0 | case ARM::VST3LNqWB_fixed_Asm_32: { |
7191 | 0 | MCInst TmpInst(Inst.getAddress()); |
7192 | | // Shuffle the operands around so the lane index operand is in the |
7193 | | // right place. |
7194 | 0 | unsigned Spacing; |
7195 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7196 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7197 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7198 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7199 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7200 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7201 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7202 | 0 | Spacing)); |
7203 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7204 | 0 | Spacing * 2)); |
7205 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7206 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7207 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7208 | 0 | Inst = TmpInst; |
7209 | 0 | return true; |
7210 | 0 | } |
7211 | | |
7212 | 0 | case ARM::VST4LNdWB_fixed_Asm_8: |
7213 | 0 | case ARM::VST4LNdWB_fixed_Asm_16: |
7214 | 0 | case ARM::VST4LNdWB_fixed_Asm_32: |
7215 | 0 | case ARM::VST4LNqWB_fixed_Asm_16: |
7216 | 0 | case ARM::VST4LNqWB_fixed_Asm_32: { |
7217 | 0 | MCInst TmpInst(Inst.getAddress()); |
7218 | | // Shuffle the operands around so the lane index operand is in the |
7219 | | // right place. |
7220 | 0 | unsigned Spacing; |
7221 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7222 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7223 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7224 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7225 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7226 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7227 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7228 | 0 | Spacing)); |
7229 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7230 | 0 | Spacing * 2)); |
7231 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7232 | 0 | Spacing * 3)); |
7233 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7234 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7235 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7236 | 0 | Inst = TmpInst; |
7237 | 0 | return true; |
7238 | 0 | } |
7239 | | |
7240 | 0 | case ARM::VST1LNdAsm_8: |
7241 | 0 | case ARM::VST1LNdAsm_16: |
7242 | 0 | case ARM::VST1LNdAsm_32: { |
7243 | 0 | MCInst TmpInst(Inst.getAddress()); |
7244 | | // Shuffle the operands around so the lane index operand is in the |
7245 | | // right place. |
7246 | 0 | unsigned Spacing; |
7247 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7248 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7249 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7250 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7251 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7252 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7253 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7254 | 0 | Inst = TmpInst; |
7255 | 0 | return true; |
7256 | 0 | } |
7257 | | |
7258 | 0 | case ARM::VST2LNdAsm_8: |
7259 | 0 | case ARM::VST2LNdAsm_16: |
7260 | 0 | case ARM::VST2LNdAsm_32: |
7261 | 0 | case ARM::VST2LNqAsm_16: |
7262 | 0 | case ARM::VST2LNqAsm_32: { |
7263 | 0 | MCInst TmpInst(Inst.getAddress()); |
7264 | | // Shuffle the operands around so the lane index operand is in the |
7265 | | // right place. |
7266 | 0 | unsigned Spacing; |
7267 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7268 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7269 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7270 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7271 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7272 | 0 | Spacing)); |
7273 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7274 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7275 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7276 | 0 | Inst = TmpInst; |
7277 | 0 | return true; |
7278 | 0 | } |
7279 | | |
7280 | 0 | case ARM::VST3LNdAsm_8: |
7281 | 0 | case ARM::VST3LNdAsm_16: |
7282 | 0 | case ARM::VST3LNdAsm_32: |
7283 | 0 | case ARM::VST3LNqAsm_16: |
7284 | 0 | case ARM::VST3LNqAsm_32: { |
7285 | 0 | MCInst TmpInst(Inst.getAddress()); |
7286 | | // Shuffle the operands around so the lane index operand is in the |
7287 | | // right place. |
7288 | 0 | unsigned Spacing; |
7289 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7290 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7291 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7292 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7293 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7294 | 0 | Spacing)); |
7295 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7296 | 0 | Spacing * 2)); |
7297 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7298 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7299 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7300 | 0 | Inst = TmpInst; |
7301 | 0 | return true; |
7302 | 0 | } |
7303 | | |
7304 | 0 | case ARM::VST4LNdAsm_8: |
7305 | 0 | case ARM::VST4LNdAsm_16: |
7306 | 0 | case ARM::VST4LNdAsm_32: |
7307 | 0 | case ARM::VST4LNqAsm_16: |
7308 | 0 | case ARM::VST4LNqAsm_32: { |
7309 | 0 | MCInst TmpInst(Inst.getAddress()); |
7310 | | // Shuffle the operands around so the lane index operand is in the |
7311 | | // right place. |
7312 | 0 | unsigned Spacing; |
7313 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7314 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7315 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7316 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7317 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7318 | 0 | Spacing)); |
7319 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7320 | 0 | Spacing * 2)); |
7321 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7322 | 0 | Spacing * 3)); |
7323 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7324 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7325 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7326 | 0 | Inst = TmpInst; |
7327 | 0 | return true; |
7328 | 0 | } |
7329 | | |
7330 | | // Handle NEON VLD complex aliases. |
7331 | 0 | case ARM::VLD1LNdWB_register_Asm_8: |
7332 | 0 | case ARM::VLD1LNdWB_register_Asm_16: |
7333 | 0 | case ARM::VLD1LNdWB_register_Asm_32: { |
7334 | 0 | MCInst TmpInst(Inst.getAddress()); |
7335 | | // Shuffle the operands around so the lane index operand is in the |
7336 | | // right place. |
7337 | 0 | unsigned Spacing; |
7338 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7339 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7340 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7341 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7342 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7343 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7344 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7345 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7346 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7347 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7348 | 0 | Inst = TmpInst; |
7349 | 0 | return true; |
7350 | 0 | } |
7351 | | |
7352 | 0 | case ARM::VLD2LNdWB_register_Asm_8: |
7353 | 0 | case ARM::VLD2LNdWB_register_Asm_16: |
7354 | 0 | case ARM::VLD2LNdWB_register_Asm_32: |
7355 | 0 | case ARM::VLD2LNqWB_register_Asm_16: |
7356 | 0 | case ARM::VLD2LNqWB_register_Asm_32: { |
7357 | 0 | MCInst TmpInst(Inst.getAddress()); |
7358 | | // Shuffle the operands around so the lane index operand is in the |
7359 | | // right place. |
7360 | 0 | unsigned Spacing; |
7361 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7362 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7363 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7364 | 0 | Spacing)); |
7365 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7366 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7367 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7368 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7369 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7370 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7371 | 0 | Spacing)); |
7372 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7373 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7374 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7375 | 0 | Inst = TmpInst; |
7376 | 0 | return true; |
7377 | 0 | } |
7378 | | |
7379 | 0 | case ARM::VLD3LNdWB_register_Asm_8: |
7380 | 0 | case ARM::VLD3LNdWB_register_Asm_16: |
7381 | 0 | case ARM::VLD3LNdWB_register_Asm_32: |
7382 | 0 | case ARM::VLD3LNqWB_register_Asm_16: |
7383 | 0 | case ARM::VLD3LNqWB_register_Asm_32: { |
7384 | 0 | MCInst TmpInst(Inst.getAddress()); |
7385 | | // Shuffle the operands around so the lane index operand is in the |
7386 | | // right place. |
7387 | 0 | unsigned Spacing; |
7388 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7389 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7390 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7391 | 0 | Spacing)); |
7392 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7393 | 0 | Spacing * 2)); |
7394 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7395 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7396 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7397 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7398 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7399 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7400 | 0 | Spacing)); |
7401 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7402 | 0 | Spacing * 2)); |
7403 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7404 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7405 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7406 | 0 | Inst = TmpInst; |
7407 | 0 | return true; |
7408 | 0 | } |
7409 | | |
7410 | 0 | case ARM::VLD4LNdWB_register_Asm_8: |
7411 | 0 | case ARM::VLD4LNdWB_register_Asm_16: |
7412 | 0 | case ARM::VLD4LNdWB_register_Asm_32: |
7413 | 0 | case ARM::VLD4LNqWB_register_Asm_16: |
7414 | 0 | case ARM::VLD4LNqWB_register_Asm_32: { |
7415 | 0 | MCInst TmpInst(Inst.getAddress()); |
7416 | | // Shuffle the operands around so the lane index operand is in the |
7417 | | // right place. |
7418 | 0 | unsigned Spacing; |
7419 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7420 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7421 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7422 | 0 | Spacing)); |
7423 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7424 | 0 | Spacing * 2)); |
7425 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7426 | 0 | Spacing * 3)); |
7427 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7428 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7429 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7430 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rm |
7431 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7432 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7433 | 0 | Spacing)); |
7434 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7435 | 0 | Spacing * 2)); |
7436 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7437 | 0 | Spacing * 3)); |
7438 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7439 | 0 | TmpInst.addOperand(Inst.getOperand(5)); // CondCode |
7440 | 0 | TmpInst.addOperand(Inst.getOperand(6)); |
7441 | 0 | Inst = TmpInst; |
7442 | 0 | return true; |
7443 | 0 | } |
7444 | | |
7445 | 0 | case ARM::VLD1LNdWB_fixed_Asm_8: |
7446 | 0 | case ARM::VLD1LNdWB_fixed_Asm_16: |
7447 | 0 | case ARM::VLD1LNdWB_fixed_Asm_32: { |
7448 | 0 | MCInst TmpInst(Inst.getAddress()); |
7449 | | // Shuffle the operands around so the lane index operand is in the |
7450 | | // right place. |
7451 | 0 | unsigned Spacing; |
7452 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7453 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7454 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7455 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7456 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7457 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7458 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7459 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7460 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7461 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7462 | 0 | Inst = TmpInst; |
7463 | 0 | return true; |
7464 | 0 | } |
7465 | | |
7466 | 0 | case ARM::VLD2LNdWB_fixed_Asm_8: |
7467 | 0 | case ARM::VLD2LNdWB_fixed_Asm_16: |
7468 | 0 | case ARM::VLD2LNdWB_fixed_Asm_32: |
7469 | 0 | case ARM::VLD2LNqWB_fixed_Asm_16: |
7470 | 0 | case ARM::VLD2LNqWB_fixed_Asm_32: { |
7471 | 0 | MCInst TmpInst(Inst.getAddress()); |
7472 | | // Shuffle the operands around so the lane index operand is in the |
7473 | | // right place. |
7474 | 0 | unsigned Spacing; |
7475 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7476 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7477 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7478 | 0 | Spacing)); |
7479 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7480 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7481 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7482 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7483 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7484 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7485 | 0 | Spacing)); |
7486 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7487 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7488 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7489 | 0 | Inst = TmpInst; |
7490 | 0 | return true; |
7491 | 0 | } |
7492 | | |
7493 | 0 | case ARM::VLD3LNdWB_fixed_Asm_8: |
7494 | 0 | case ARM::VLD3LNdWB_fixed_Asm_16: |
7495 | 0 | case ARM::VLD3LNdWB_fixed_Asm_32: |
7496 | 0 | case ARM::VLD3LNqWB_fixed_Asm_16: |
7497 | 0 | case ARM::VLD3LNqWB_fixed_Asm_32: { |
7498 | 0 | MCInst TmpInst(Inst.getAddress()); |
7499 | | // Shuffle the operands around so the lane index operand is in the |
7500 | | // right place. |
7501 | 0 | unsigned Spacing; |
7502 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7503 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7504 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7505 | 0 | Spacing)); |
7506 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7507 | 0 | Spacing * 2)); |
7508 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7509 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7510 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7511 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7512 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7513 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7514 | 0 | Spacing)); |
7515 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7516 | 0 | Spacing * 2)); |
7517 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7518 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7519 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7520 | 0 | Inst = TmpInst; |
7521 | 0 | return true; |
7522 | 0 | } |
7523 | | |
7524 | 0 | case ARM::VLD4LNdWB_fixed_Asm_8: |
7525 | 0 | case ARM::VLD4LNdWB_fixed_Asm_16: |
7526 | 0 | case ARM::VLD4LNdWB_fixed_Asm_32: |
7527 | 0 | case ARM::VLD4LNqWB_fixed_Asm_16: |
7528 | 0 | case ARM::VLD4LNqWB_fixed_Asm_32: { |
7529 | 0 | MCInst TmpInst(Inst.getAddress()); |
7530 | | // Shuffle the operands around so the lane index operand is in the |
7531 | | // right place. |
7532 | 0 | unsigned Spacing; |
7533 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7534 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7535 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7536 | 0 | Spacing)); |
7537 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7538 | 0 | Spacing * 2)); |
7539 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7540 | 0 | Spacing * 3)); |
7541 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb |
7542 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7543 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7544 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7545 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7546 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7547 | 0 | Spacing)); |
7548 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7549 | 0 | Spacing * 2)); |
7550 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7551 | 0 | Spacing * 3)); |
7552 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7553 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7554 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7555 | 0 | Inst = TmpInst; |
7556 | 0 | return true; |
7557 | 0 | } |
7558 | | |
7559 | 0 | case ARM::VLD1LNdAsm_8: |
7560 | 0 | case ARM::VLD1LNdAsm_16: |
7561 | 0 | case ARM::VLD1LNdAsm_32: { |
7562 | 0 | MCInst TmpInst(Inst.getAddress()); |
7563 | | // Shuffle the operands around so the lane index operand is in the |
7564 | | // right place. |
7565 | 0 | unsigned Spacing; |
7566 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7567 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7568 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7569 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7570 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7571 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7572 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7573 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7574 | 0 | Inst = TmpInst; |
7575 | 0 | return true; |
7576 | 0 | } |
7577 | | |
7578 | 0 | case ARM::VLD2LNdAsm_8: |
7579 | 0 | case ARM::VLD2LNdAsm_16: |
7580 | 0 | case ARM::VLD2LNdAsm_32: |
7581 | 0 | case ARM::VLD2LNqAsm_16: |
7582 | 0 | case ARM::VLD2LNqAsm_32: { |
7583 | 0 | MCInst TmpInst(Inst.getAddress()); |
7584 | | // Shuffle the operands around so the lane index operand is in the |
7585 | | // right place. |
7586 | 0 | unsigned Spacing; |
7587 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7588 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7589 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7590 | 0 | Spacing)); |
7591 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7592 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7593 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7594 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7595 | 0 | Spacing)); |
7596 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7597 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7598 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7599 | 0 | Inst = TmpInst; |
7600 | 0 | return true; |
7601 | 0 | } |
7602 | | |
7603 | 0 | case ARM::VLD3LNdAsm_8: |
7604 | 0 | case ARM::VLD3LNdAsm_16: |
7605 | 0 | case ARM::VLD3LNdAsm_32: |
7606 | 0 | case ARM::VLD3LNqAsm_16: |
7607 | 0 | case ARM::VLD3LNqAsm_32: { |
7608 | 0 | MCInst TmpInst(Inst.getAddress()); |
7609 | | // Shuffle the operands around so the lane index operand is in the |
7610 | | // right place. |
7611 | 0 | unsigned Spacing; |
7612 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7613 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7614 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7615 | 0 | Spacing)); |
7616 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7617 | 0 | Spacing * 2)); |
7618 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7619 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7620 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7621 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7622 | 0 | Spacing)); |
7623 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7624 | 0 | Spacing * 2)); |
7625 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7626 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7627 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7628 | 0 | Inst = TmpInst; |
7629 | 0 | return true; |
7630 | 0 | } |
7631 | | |
7632 | 0 | case ARM::VLD4LNdAsm_8: |
7633 | 0 | case ARM::VLD4LNdAsm_16: |
7634 | 0 | case ARM::VLD4LNdAsm_32: |
7635 | 0 | case ARM::VLD4LNqAsm_16: |
7636 | 0 | case ARM::VLD4LNqAsm_32: { |
7637 | 0 | MCInst TmpInst(Inst.getAddress()); |
7638 | | // Shuffle the operands around so the lane index operand is in the |
7639 | | // right place. |
7640 | 0 | unsigned Spacing; |
7641 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7642 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7643 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7644 | 0 | Spacing)); |
7645 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7646 | 0 | Spacing * 2)); |
7647 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7648 | 0 | Spacing * 3)); |
7649 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // Rn |
7650 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // alignment |
7651 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) |
7652 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7653 | 0 | Spacing)); |
7654 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7655 | 0 | Spacing * 2)); |
7656 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7657 | 0 | Spacing * 3)); |
7658 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // lane |
7659 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7660 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7661 | 0 | Inst = TmpInst; |
7662 | 0 | return true; |
7663 | 0 | } |
7664 | | |
7665 | | // VLD3DUP single 3-element structure to all lanes instructions. |
7666 | 0 | case ARM::VLD3DUPdAsm_8: |
7667 | 0 | case ARM::VLD3DUPdAsm_16: |
7668 | 0 | case ARM::VLD3DUPdAsm_32: |
7669 | 0 | case ARM::VLD3DUPqAsm_8: |
7670 | 0 | case ARM::VLD3DUPqAsm_16: |
7671 | 0 | case ARM::VLD3DUPqAsm_32: { |
7672 | 0 | MCInst TmpInst(Inst.getAddress()); |
7673 | 0 | unsigned Spacing; |
7674 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7675 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7676 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7677 | 0 | Spacing)); |
7678 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7679 | 0 | Spacing * 2)); |
7680 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7681 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7682 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7683 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7684 | 0 | Inst = TmpInst; |
7685 | 0 | return true; |
7686 | 0 | } |
7687 | | |
7688 | 0 | case ARM::VLD3DUPdWB_fixed_Asm_8: |
7689 | 0 | case ARM::VLD3DUPdWB_fixed_Asm_16: |
7690 | 0 | case ARM::VLD3DUPdWB_fixed_Asm_32: |
7691 | 0 | case ARM::VLD3DUPqWB_fixed_Asm_8: |
7692 | 0 | case ARM::VLD3DUPqWB_fixed_Asm_16: |
7693 | 0 | case ARM::VLD3DUPqWB_fixed_Asm_32: { |
7694 | 0 | MCInst TmpInst(Inst.getAddress()); |
7695 | 0 | unsigned Spacing; |
7696 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7697 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7698 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7699 | 0 | Spacing)); |
7700 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7701 | 0 | Spacing * 2)); |
7702 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7703 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7704 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7705 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7706 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7707 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7708 | 0 | Inst = TmpInst; |
7709 | 0 | return true; |
7710 | 0 | } |
7711 | | |
7712 | 0 | case ARM::VLD3DUPdWB_register_Asm_8: |
7713 | 0 | case ARM::VLD3DUPdWB_register_Asm_16: |
7714 | 0 | case ARM::VLD3DUPdWB_register_Asm_32: |
7715 | 0 | case ARM::VLD3DUPqWB_register_Asm_8: |
7716 | 0 | case ARM::VLD3DUPqWB_register_Asm_16: |
7717 | 0 | case ARM::VLD3DUPqWB_register_Asm_32: { |
7718 | 0 | MCInst TmpInst(Inst.getAddress()); |
7719 | 0 | unsigned Spacing; |
7720 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7721 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7722 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7723 | 0 | Spacing)); |
7724 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7725 | 0 | Spacing * 2)); |
7726 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7727 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7728 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7729 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // Rm |
7730 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7731 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7732 | 0 | Inst = TmpInst; |
7733 | 0 | return true; |
7734 | 0 | } |
7735 | | |
7736 | | // VLD3 multiple 3-element structure instructions. |
7737 | 0 | case ARM::VLD3dAsm_8: |
7738 | 0 | case ARM::VLD3dAsm_16: |
7739 | 0 | case ARM::VLD3dAsm_32: |
7740 | 0 | case ARM::VLD3qAsm_8: |
7741 | 0 | case ARM::VLD3qAsm_16: |
7742 | 0 | case ARM::VLD3qAsm_32: { |
7743 | 0 | MCInst TmpInst(Inst.getAddress()); |
7744 | 0 | unsigned Spacing; |
7745 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7746 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7747 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7748 | 0 | Spacing)); |
7749 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7750 | 0 | Spacing * 2)); |
7751 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7752 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7753 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7754 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7755 | 0 | Inst = TmpInst; |
7756 | 0 | return true; |
7757 | 0 | } |
7758 | | |
7759 | 0 | case ARM::VLD3dWB_fixed_Asm_8: |
7760 | 0 | case ARM::VLD3dWB_fixed_Asm_16: |
7761 | 0 | case ARM::VLD3dWB_fixed_Asm_32: |
7762 | 0 | case ARM::VLD3qWB_fixed_Asm_8: |
7763 | 0 | case ARM::VLD3qWB_fixed_Asm_16: |
7764 | 0 | case ARM::VLD3qWB_fixed_Asm_32: { |
7765 | 0 | MCInst TmpInst(Inst.getAddress()); |
7766 | 0 | unsigned Spacing; |
7767 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7768 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7769 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7770 | 0 | Spacing)); |
7771 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7772 | 0 | Spacing * 2)); |
7773 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7774 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7775 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7776 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7777 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7778 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7779 | 0 | Inst = TmpInst; |
7780 | 0 | return true; |
7781 | 0 | } |
7782 | | |
7783 | 0 | case ARM::VLD3dWB_register_Asm_8: |
7784 | 0 | case ARM::VLD3dWB_register_Asm_16: |
7785 | 0 | case ARM::VLD3dWB_register_Asm_32: |
7786 | 0 | case ARM::VLD3qWB_register_Asm_8: |
7787 | 0 | case ARM::VLD3qWB_register_Asm_16: |
7788 | 0 | case ARM::VLD3qWB_register_Asm_32: { |
7789 | 0 | MCInst TmpInst(Inst.getAddress()); |
7790 | 0 | unsigned Spacing; |
7791 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7792 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7793 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7794 | 0 | Spacing)); |
7795 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7796 | 0 | Spacing * 2)); |
7797 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7798 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7799 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7800 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // Rm |
7801 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7802 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7803 | 0 | Inst = TmpInst; |
7804 | 0 | return true; |
7805 | 0 | } |
7806 | | |
7807 | | // VLD4DUP single 3-element structure to all lanes instructions. |
7808 | 0 | case ARM::VLD4DUPdAsm_8: |
7809 | 0 | case ARM::VLD4DUPdAsm_16: |
7810 | 0 | case ARM::VLD4DUPdAsm_32: |
7811 | 0 | case ARM::VLD4DUPqAsm_8: |
7812 | 0 | case ARM::VLD4DUPqAsm_16: |
7813 | 0 | case ARM::VLD4DUPqAsm_32: { |
7814 | 0 | MCInst TmpInst(Inst.getAddress()); |
7815 | 0 | unsigned Spacing; |
7816 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7817 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7818 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7819 | 0 | Spacing)); |
7820 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7821 | 0 | Spacing * 2)); |
7822 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7823 | 0 | Spacing * 3)); |
7824 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7825 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7826 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7827 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7828 | 0 | Inst = TmpInst; |
7829 | 0 | return true; |
7830 | 0 | } |
7831 | | |
7832 | 0 | case ARM::VLD4DUPdWB_fixed_Asm_8: |
7833 | 0 | case ARM::VLD4DUPdWB_fixed_Asm_16: |
7834 | 0 | case ARM::VLD4DUPdWB_fixed_Asm_32: |
7835 | 0 | case ARM::VLD4DUPqWB_fixed_Asm_8: |
7836 | 0 | case ARM::VLD4DUPqWB_fixed_Asm_16: |
7837 | 0 | case ARM::VLD4DUPqWB_fixed_Asm_32: { |
7838 | 0 | MCInst TmpInst(Inst.getAddress()); |
7839 | 0 | unsigned Spacing; |
7840 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7841 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7842 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7843 | 0 | Spacing)); |
7844 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7845 | 0 | Spacing * 2)); |
7846 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7847 | 0 | Spacing * 3)); |
7848 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7849 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7850 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7851 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7852 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7853 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7854 | 0 | Inst = TmpInst; |
7855 | 0 | return true; |
7856 | 0 | } |
7857 | | |
7858 | 0 | case ARM::VLD4DUPdWB_register_Asm_8: |
7859 | 0 | case ARM::VLD4DUPdWB_register_Asm_16: |
7860 | 0 | case ARM::VLD4DUPdWB_register_Asm_32: |
7861 | 0 | case ARM::VLD4DUPqWB_register_Asm_8: |
7862 | 0 | case ARM::VLD4DUPqWB_register_Asm_16: |
7863 | 0 | case ARM::VLD4DUPqWB_register_Asm_32: { |
7864 | 0 | MCInst TmpInst(Inst.getAddress()); |
7865 | 0 | unsigned Spacing; |
7866 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7867 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7868 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7869 | 0 | Spacing)); |
7870 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7871 | 0 | Spacing * 2)); |
7872 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7873 | 0 | Spacing * 3)); |
7874 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7875 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7876 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7877 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // Rm |
7878 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7879 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7880 | 0 | Inst = TmpInst; |
7881 | 0 | return true; |
7882 | 0 | } |
7883 | | |
7884 | | // VLD4 multiple 4-element structure instructions. |
7885 | 0 | case ARM::VLD4dAsm_8: |
7886 | 0 | case ARM::VLD4dAsm_16: |
7887 | 0 | case ARM::VLD4dAsm_32: |
7888 | 0 | case ARM::VLD4qAsm_8: |
7889 | 0 | case ARM::VLD4qAsm_16: |
7890 | 0 | case ARM::VLD4qAsm_32: { |
7891 | 0 | MCInst TmpInst(Inst.getAddress()); |
7892 | 0 | unsigned Spacing; |
7893 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7894 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7895 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7896 | 0 | Spacing)); |
7897 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7898 | 0 | Spacing * 2)); |
7899 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7900 | 0 | Spacing * 3)); |
7901 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7902 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7903 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7904 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7905 | 0 | Inst = TmpInst; |
7906 | 0 | return true; |
7907 | 0 | } |
7908 | | |
7909 | 0 | case ARM::VLD4dWB_fixed_Asm_8: |
7910 | 0 | case ARM::VLD4dWB_fixed_Asm_16: |
7911 | 0 | case ARM::VLD4dWB_fixed_Asm_32: |
7912 | 0 | case ARM::VLD4qWB_fixed_Asm_8: |
7913 | 0 | case ARM::VLD4qWB_fixed_Asm_16: |
7914 | 0 | case ARM::VLD4qWB_fixed_Asm_32: { |
7915 | 0 | MCInst TmpInst(Inst.getAddress()); |
7916 | 0 | unsigned Spacing; |
7917 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7918 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7919 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7920 | 0 | Spacing)); |
7921 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7922 | 0 | Spacing * 2)); |
7923 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7924 | 0 | Spacing * 3)); |
7925 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7926 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7927 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7928 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7929 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7930 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7931 | 0 | Inst = TmpInst; |
7932 | 0 | return true; |
7933 | 0 | } |
7934 | | |
7935 | 0 | case ARM::VLD4dWB_register_Asm_8: |
7936 | 0 | case ARM::VLD4dWB_register_Asm_16: |
7937 | 0 | case ARM::VLD4dWB_register_Asm_32: |
7938 | 0 | case ARM::VLD4qWB_register_Asm_8: |
7939 | 0 | case ARM::VLD4qWB_register_Asm_16: |
7940 | 0 | case ARM::VLD4qWB_register_Asm_32: { |
7941 | 0 | MCInst TmpInst(Inst.getAddress()); |
7942 | 0 | unsigned Spacing; |
7943 | 0 | TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); |
7944 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7945 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7946 | 0 | Spacing)); |
7947 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7948 | 0 | Spacing * 2)); |
7949 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7950 | 0 | Spacing * 3)); |
7951 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7952 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7953 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7954 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // Rm |
7955 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
7956 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
7957 | 0 | Inst = TmpInst; |
7958 | 0 | return true; |
7959 | 0 | } |
7960 | | |
7961 | | // VST3 multiple 3-element structure instructions. |
7962 | 0 | case ARM::VST3dAsm_8: |
7963 | 0 | case ARM::VST3dAsm_16: |
7964 | 0 | case ARM::VST3dAsm_32: |
7965 | 0 | case ARM::VST3qAsm_8: |
7966 | 0 | case ARM::VST3qAsm_16: |
7967 | 0 | case ARM::VST3qAsm_32: { |
7968 | 0 | MCInst TmpInst(Inst.getAddress()); |
7969 | 0 | unsigned Spacing; |
7970 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7971 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7972 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7973 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7974 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7975 | 0 | Spacing)); |
7976 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7977 | 0 | Spacing * 2)); |
7978 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
7979 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
7980 | 0 | Inst = TmpInst; |
7981 | 0 | return true; |
7982 | 0 | } |
7983 | | |
7984 | 0 | case ARM::VST3dWB_fixed_Asm_8: |
7985 | 0 | case ARM::VST3dWB_fixed_Asm_16: |
7986 | 0 | case ARM::VST3dWB_fixed_Asm_32: |
7987 | 0 | case ARM::VST3qWB_fixed_Asm_8: |
7988 | 0 | case ARM::VST3qWB_fixed_Asm_16: |
7989 | 0 | case ARM::VST3qWB_fixed_Asm_32: { |
7990 | 0 | MCInst TmpInst(Inst.getAddress()); |
7991 | 0 | unsigned Spacing; |
7992 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
7993 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
7994 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
7995 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
7996 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
7997 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
7998 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
7999 | 0 | Spacing)); |
8000 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8001 | 0 | Spacing * 2)); |
8002 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
8003 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
8004 | 0 | Inst = TmpInst; |
8005 | 0 | return true; |
8006 | 0 | } |
8007 | | |
8008 | 0 | case ARM::VST3dWB_register_Asm_8: |
8009 | 0 | case ARM::VST3dWB_register_Asm_16: |
8010 | 0 | case ARM::VST3dWB_register_Asm_32: |
8011 | 0 | case ARM::VST3qWB_register_Asm_8: |
8012 | 0 | case ARM::VST3qWB_register_Asm_16: |
8013 | 0 | case ARM::VST3qWB_register_Asm_32: { |
8014 | 0 | MCInst TmpInst(Inst.getAddress()); |
8015 | 0 | unsigned Spacing; |
8016 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
8017 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8018 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
8019 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
8020 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // Rm |
8021 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
8022 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8023 | 0 | Spacing)); |
8024 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8025 | 0 | Spacing * 2)); |
8026 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
8027 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
8028 | 0 | Inst = TmpInst; |
8029 | 0 | return true; |
8030 | 0 | } |
8031 | | |
8032 | | // VST4 multiple 3-element structure instructions. |
8033 | 0 | case ARM::VST4dAsm_8: |
8034 | 0 | case ARM::VST4dAsm_16: |
8035 | 0 | case ARM::VST4dAsm_32: |
8036 | 0 | case ARM::VST4qAsm_8: |
8037 | 0 | case ARM::VST4qAsm_16: |
8038 | 0 | case ARM::VST4qAsm_32: { |
8039 | 0 | MCInst TmpInst(Inst.getAddress()); |
8040 | 0 | unsigned Spacing; |
8041 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
8042 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8043 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
8044 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
8045 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8046 | 0 | Spacing)); |
8047 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8048 | 0 | Spacing * 2)); |
8049 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8050 | 0 | Spacing * 3)); |
8051 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
8052 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
8053 | 0 | Inst = TmpInst; |
8054 | 0 | return true; |
8055 | 0 | } |
8056 | | |
8057 | 0 | case ARM::VST4dWB_fixed_Asm_8: |
8058 | 0 | case ARM::VST4dWB_fixed_Asm_16: |
8059 | 0 | case ARM::VST4dWB_fixed_Asm_32: |
8060 | 0 | case ARM::VST4qWB_fixed_Asm_8: |
8061 | 0 | case ARM::VST4qWB_fixed_Asm_16: |
8062 | 0 | case ARM::VST4qWB_fixed_Asm_32: { |
8063 | 0 | MCInst TmpInst(Inst.getAddress()); |
8064 | 0 | unsigned Spacing; |
8065 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
8066 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8067 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
8068 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
8069 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // Rm |
8070 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
8071 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8072 | 0 | Spacing)); |
8073 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8074 | 0 | Spacing * 2)); |
8075 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8076 | 0 | Spacing * 3)); |
8077 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
8078 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
8079 | 0 | Inst = TmpInst; |
8080 | 0 | return true; |
8081 | 0 | } |
8082 | | |
8083 | 0 | case ARM::VST4dWB_register_Asm_8: |
8084 | 0 | case ARM::VST4dWB_register_Asm_16: |
8085 | 0 | case ARM::VST4dWB_register_Asm_32: |
8086 | 0 | case ARM::VST4qWB_register_Asm_8: |
8087 | 0 | case ARM::VST4qWB_register_Asm_16: |
8088 | 0 | case ARM::VST4qWB_register_Asm_32: { |
8089 | 0 | MCInst TmpInst(Inst.getAddress()); |
8090 | 0 | unsigned Spacing; |
8091 | 0 | TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); |
8092 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8093 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn |
8094 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // alignment |
8095 | 0 | TmpInst.addOperand(Inst.getOperand(3)); // Rm |
8096 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Vd |
8097 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8098 | 0 | Spacing)); |
8099 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8100 | 0 | Spacing * 2)); |
8101 | 0 | TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + |
8102 | 0 | Spacing * 3)); |
8103 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
8104 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
8105 | 0 | Inst = TmpInst; |
8106 | 0 | return true; |
8107 | 0 | } |
8108 | | |
8109 | | // Handle encoding choice for the shift-immediate instructions. |
8110 | 92 | case ARM::t2LSLri: |
8111 | 1.25k | case ARM::t2LSRri: |
8112 | 1.30k | case ARM::t2ASRri: { |
8113 | 1.30k | if (isARMLowRegister(Inst.getOperand(0).getReg()) && |
8114 | 1.30k | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && |
8115 | 1.30k | Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && |
8116 | 1.30k | !(static_cast<ARMOperand &>(*Operands[3]).isToken() && |
8117 | 0 | static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) { |
8118 | 0 | unsigned NewOpc; |
8119 | 0 | switch (Inst.getOpcode()) { |
8120 | 0 | default: llvm_unreachable("unexpected opcode"); |
8121 | 0 | case ARM::t2LSLri: NewOpc = ARM::tLSLri; break; |
8122 | 0 | case ARM::t2LSRri: NewOpc = ARM::tLSRri; break; |
8123 | 0 | case ARM::t2ASRri: NewOpc = ARM::tASRri; break; |
8124 | 0 | } |
8125 | | // The Thumb1 operands aren't in the same order. Awesome, eh? |
8126 | 0 | MCInst TmpInst(Inst.getAddress()); |
8127 | 0 | TmpInst.setOpcode(NewOpc); |
8128 | 0 | TmpInst.addOperand(Inst.getOperand(0)); |
8129 | 0 | TmpInst.addOperand(Inst.getOperand(5)); |
8130 | 0 | TmpInst.addOperand(Inst.getOperand(1)); |
8131 | 0 | TmpInst.addOperand(Inst.getOperand(2)); |
8132 | 0 | TmpInst.addOperand(Inst.getOperand(3)); |
8133 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
8134 | 0 | Inst = TmpInst; |
8135 | 0 | return true; |
8136 | 0 | } |
8137 | 1.30k | return false; |
8138 | 1.30k | } |
8139 | | |
8140 | | // Handle the Thumb2 mode MOV complex aliases. |
8141 | 6 | case ARM::t2MOVsr: |
8142 | 6 | case ARM::t2MOVSsr: { |
8143 | | // Which instruction to expand to depends on the CCOut operand and |
8144 | | // whether we're in an IT block if the register operands are low |
8145 | | // registers. |
8146 | 6 | bool isNarrow = false; |
8147 | 6 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && |
8148 | 6 | isARMLowRegister(Inst.getOperand(1).getReg()) && |
8149 | 6 | isARMLowRegister(Inst.getOperand(2).getReg()) && |
8150 | 6 | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && |
8151 | 6 | inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr)) |
8152 | 0 | isNarrow = true; |
8153 | 6 | MCInst TmpInst(Inst.getAddress()); |
8154 | 6 | unsigned newOpc; |
8155 | 6 | switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) { |
8156 | 0 | default: llvm_unreachable("unexpected opcode!"); |
8157 | 4 | case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break; |
8158 | 1 | case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break; |
8159 | 1 | case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break; |
8160 | 0 | case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break; |
8161 | 6 | } |
8162 | 6 | TmpInst.setOpcode(newOpc); |
8163 | 6 | TmpInst.addOperand(Inst.getOperand(0)); // Rd |
8164 | 6 | if (isNarrow) |
8165 | 0 | TmpInst.addOperand(MCOperand::createReg( |
8166 | 0 | Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); |
8167 | 6 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8168 | 6 | TmpInst.addOperand(Inst.getOperand(2)); // Rm |
8169 | 6 | TmpInst.addOperand(Inst.getOperand(4)); // CondCode |
8170 | 6 | TmpInst.addOperand(Inst.getOperand(5)); |
8171 | 6 | if (!isNarrow) |
8172 | 6 | TmpInst.addOperand(MCOperand::createReg( |
8173 | 6 | Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); |
8174 | 6 | Inst = TmpInst; |
8175 | 6 | return true; |
8176 | 6 | } |
8177 | 32 | case ARM::t2MOVsi: |
8178 | 34 | case ARM::t2MOVSsi: { |
8179 | | // Which instruction to expand to depends on the CCOut operand and |
8180 | | // whether we're in an IT block if the register operands are low |
8181 | | // registers. |
8182 | 34 | bool isNarrow = false; |
8183 | 34 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && |
8184 | 34 | isARMLowRegister(Inst.getOperand(1).getReg()) && |
8185 | 34 | inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi)) |
8186 | 2 | isNarrow = true; |
8187 | 34 | MCInst TmpInst(Inst.getAddress()); |
8188 | 34 | unsigned newOpc; |
8189 | 34 | switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) { |
8190 | 0 | default: llvm_unreachable("unexpected opcode!"); |
8191 | 2 | case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; |
8192 | 4 | case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; |
8193 | 9 | case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; |
8194 | 0 | case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; |
8195 | 19 | case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break; |
8196 | 34 | } |
8197 | 34 | unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); |
8198 | 34 | if (Amount == 32) Amount = 0; |
8199 | 34 | TmpInst.setOpcode(newOpc); |
8200 | 34 | TmpInst.addOperand(Inst.getOperand(0)); // Rd |
8201 | 34 | if (isNarrow) |
8202 | 1 | TmpInst.addOperand(MCOperand::createReg( |
8203 | 1 | Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); |
8204 | 34 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8205 | 34 | if (newOpc != ARM::t2RRX) |
8206 | 15 | TmpInst.addOperand(MCOperand::createImm(Amount)); |
8207 | 34 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
8208 | 34 | TmpInst.addOperand(Inst.getOperand(4)); |
8209 | 34 | if (!isNarrow) |
8210 | 33 | TmpInst.addOperand(MCOperand::createReg( |
8211 | 33 | Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); |
8212 | 34 | Inst = TmpInst; |
8213 | 34 | return true; |
8214 | 34 | } |
8215 | | // Handle the ARM mode MOV complex aliases. |
8216 | 235 | case ARM::ASRr: |
8217 | 409 | case ARM::LSRr: |
8218 | 557 | case ARM::LSLr: |
8219 | 672 | case ARM::RORr: { |
8220 | 672 | ARM_AM::ShiftOpc ShiftTy; |
8221 | 672 | switch(Inst.getOpcode()) { |
8222 | 0 | default: llvm_unreachable("unexpected opcode!"); |
8223 | 235 | case ARM::ASRr: ShiftTy = ARM_AM::asr; break; |
8224 | 174 | case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; |
8225 | 148 | case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; |
8226 | 115 | case ARM::RORr: ShiftTy = ARM_AM::ror; break; |
8227 | 672 | } |
8228 | 672 | unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); |
8229 | 672 | MCInst TmpInst(Inst.getAddress()); |
8230 | 672 | TmpInst.setOpcode(ARM::MOVsr); |
8231 | 672 | TmpInst.addOperand(Inst.getOperand(0)); // Rd |
8232 | 672 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8233 | 672 | TmpInst.addOperand(Inst.getOperand(2)); // Rm |
8234 | 672 | TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty |
8235 | 672 | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
8236 | 672 | TmpInst.addOperand(Inst.getOperand(4)); |
8237 | 672 | TmpInst.addOperand(Inst.getOperand(5)); // cc_out |
8238 | 672 | Inst = TmpInst; |
8239 | 672 | return true; |
8240 | 672 | } |
8241 | 45 | case ARM::ASRi: |
8242 | 123 | case ARM::LSRi: |
8243 | 1.30k | case ARM::LSLi: |
8244 | 1.40k | case ARM::RORi: { |
8245 | 1.40k | ARM_AM::ShiftOpc ShiftTy; |
8246 | 1.40k | switch(Inst.getOpcode()) { |
8247 | 0 | default: llvm_unreachable("unexpected opcode!"); |
8248 | 45 | case ARM::ASRi: ShiftTy = ARM_AM::asr; break; |
8249 | 78 | case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; |
8250 | 1.17k | case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; |
8251 | 103 | case ARM::RORi: ShiftTy = ARM_AM::ror; break; |
8252 | 1.40k | } |
8253 | | // A shift by zero is a plain MOVr, not a MOVsi. |
8254 | 1.40k | unsigned Amt = Inst.getOperand(2).getImm(); |
8255 | 1.40k | unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; |
8256 | | // A shift by 32 should be encoded as 0 when permitted |
8257 | 1.40k | if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr)) |
8258 | 47 | Amt = 0; |
8259 | 1.40k | unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); |
8260 | 1.40k | MCInst TmpInst(Inst.getAddress()); |
8261 | 1.40k | TmpInst.setOpcode(Opc); |
8262 | 1.40k | TmpInst.addOperand(Inst.getOperand(0)); // Rd |
8263 | 1.40k | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8264 | 1.40k | if (Opc == ARM::MOVsi) |
8265 | 401 | TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty |
8266 | 1.40k | TmpInst.addOperand(Inst.getOperand(3)); // CondCode |
8267 | 1.40k | TmpInst.addOperand(Inst.getOperand(4)); |
8268 | 1.40k | TmpInst.addOperand(Inst.getOperand(5)); // cc_out |
8269 | 1.40k | Inst = TmpInst; |
8270 | 1.40k | return true; |
8271 | 1.40k | } |
8272 | 21 | case ARM::RRXi: { |
8273 | 21 | unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); |
8274 | 21 | MCInst TmpInst(Inst.getAddress()); |
8275 | 21 | TmpInst.setOpcode(ARM::MOVsi); |
8276 | 21 | TmpInst.addOperand(Inst.getOperand(0)); // Rd |
8277 | 21 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8278 | 21 | TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty |
8279 | 21 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode |
8280 | 21 | TmpInst.addOperand(Inst.getOperand(3)); |
8281 | 21 | TmpInst.addOperand(Inst.getOperand(4)); // cc_out |
8282 | 21 | Inst = TmpInst; |
8283 | 21 | return true; |
8284 | 1.40k | } |
8285 | 0 | case ARM::t2LDMIA_UPD: { |
8286 | | // If this is a load of a single register, then we should use |
8287 | | // a post-indexed LDR instruction instead, per the ARM ARM. |
8288 | 0 | if (Inst.getNumOperands() != 5) |
8289 | 0 | return false; |
8290 | 0 | MCInst TmpInst(Inst.getAddress()); |
8291 | 0 | TmpInst.setOpcode(ARM::t2LDR_POST); |
8292 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rt |
8293 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb |
8294 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8295 | 0 | TmpInst.addOperand(MCOperand::createImm(4)); |
8296 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode |
8297 | 0 | TmpInst.addOperand(Inst.getOperand(3)); |
8298 | 0 | Inst = TmpInst; |
8299 | 0 | return true; |
8300 | 0 | } |
8301 | 0 | case ARM::t2STMDB_UPD: { |
8302 | | // If this is a store of a single register, then we should use |
8303 | | // a pre-indexed STR instruction instead, per the ARM ARM. |
8304 | 0 | if (Inst.getNumOperands() != 5) |
8305 | 0 | return false; |
8306 | 0 | MCInst TmpInst(Inst.getAddress()); |
8307 | 0 | TmpInst.setOpcode(ARM::t2STR_PRE); |
8308 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb |
8309 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rt |
8310 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8311 | 0 | TmpInst.addOperand(MCOperand::createImm(-4)); |
8312 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode |
8313 | 0 | TmpInst.addOperand(Inst.getOperand(3)); |
8314 | 0 | Inst = TmpInst; |
8315 | 0 | return true; |
8316 | 0 | } |
8317 | 0 | case ARM::LDMIA_UPD: |
8318 | | // If this is a load of a single register via a 'pop', then we should use |
8319 | | // a post-indexed LDR instruction instead, per the ARM ARM. |
8320 | 0 | if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" && |
8321 | 0 | Inst.getNumOperands() == 5) { |
8322 | 0 | MCInst TmpInst(Inst.getAddress()); |
8323 | 0 | TmpInst.setOpcode(ARM::LDR_POST_IMM); |
8324 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rt |
8325 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb |
8326 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // Rn |
8327 | 0 | TmpInst.addOperand(MCOperand::createReg(0)); // am2offset |
8328 | 0 | TmpInst.addOperand(MCOperand::createImm(4)); |
8329 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode |
8330 | 0 | TmpInst.addOperand(Inst.getOperand(3)); |
8331 | 0 | Inst = TmpInst; |
8332 | 0 | return true; |
8333 | 0 | } |
8334 | 0 | break; |
8335 | 0 | case ARM::STMDB_UPD: |
8336 | | // If this is a store of a single register via a 'push', then we should use |
8337 | | // a pre-indexed STR instruction instead, per the ARM ARM. |
8338 | 0 | if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" && |
8339 | 0 | Inst.getNumOperands() == 5) { |
8340 | 0 | MCInst TmpInst(Inst.getAddress()); |
8341 | 0 | TmpInst.setOpcode(ARM::STR_PRE_IMM); |
8342 | 0 | TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb |
8343 | 0 | TmpInst.addOperand(Inst.getOperand(4)); // Rt |
8344 | 0 | TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 |
8345 | 0 | TmpInst.addOperand(MCOperand::createImm(-4)); |
8346 | 0 | TmpInst.addOperand(Inst.getOperand(2)); // CondCode |
8347 | 0 | TmpInst.addOperand(Inst.getOperand(3)); |
8348 | 0 | Inst = TmpInst; |
8349 | 0 | } |
8350 | 0 | break; |
8351 | 824 | case ARM::t2ADDri12: |
8352 | | // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" |
8353 | | // mnemonic was used (not "addw"), encoding T3 is preferred. |
8354 | 824 | if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" || |
8355 | 824 | ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) |
8356 | 188 | break; |
8357 | 636 | Inst.setOpcode(ARM::t2ADDri); |
8358 | 636 | Inst.addOperand(MCOperand::createReg(0)); // cc_out |
8359 | 636 | break; |
8360 | 358 | case ARM::t2SUBri12: |
8361 | | // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" |
8362 | | // mnemonic was used (not "subw"), encoding T3 is preferred. |
8363 | 358 | if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" || |
8364 | 358 | ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) |
8365 | 250 | break; |
8366 | 108 | Inst.setOpcode(ARM::t2SUBri); |
8367 | 108 | Inst.addOperand(MCOperand::createReg(0)); // cc_out |
8368 | 108 | break; |
8369 | 1.37k | case ARM::tADDi8: |
8370 | | // If the immediate is in the range 0-7, we want tADDi3 iff Rd was |
8371 | | // explicitly specified. From the ARM ARM: "Encoding T1 is preferred |
8372 | | // to encoding T2 if <Rd> is specified and encoding T2 is preferred |
8373 | | // to encoding T1 if <Rd> is omitted." |
8374 | 1.37k | if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { |
8375 | 1 | Inst.setOpcode(ARM::tADDi3); |
8376 | 1 | return true; |
8377 | 1 | } |
8378 | 1.37k | break; |
8379 | 1.37k | case ARM::tSUBi8: |
8380 | | // If the immediate is in the range 0-7, we want tADDi3 iff Rd was |
8381 | | // explicitly specified. From the ARM ARM: "Encoding T1 is preferred |
8382 | | // to encoding T2 if <Rd> is specified and encoding T2 is preferred |
8383 | | // to encoding T1 if <Rd> is omitted." |
8384 | 77 | if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { |
8385 | 1 | Inst.setOpcode(ARM::tSUBi3); |
8386 | 1 | return true; |
8387 | 1 | } |
8388 | 76 | break; |
8389 | 794 | case ARM::t2ADDri: |
8390 | 1.81k | case ARM::t2SUBri: { |
8391 | | // If the destination and first source operand are the same, and |
8392 | | // the flags are compatible with the current IT status, use encoding T2 |
8393 | | // instead of T3. For compatibility with the system 'as'. Make sure the |
8394 | | // wide encoding wasn't explicit. |
8395 | 1.81k | if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || |
8396 | 1.81k | !isARMLowRegister(Inst.getOperand(0).getReg()) || |
8397 | 1.81k | (unsigned)Inst.getOperand(2).getImm() > 255 || |
8398 | 1.81k | ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) || |
8399 | 548 | (inITBlock() && Inst.getOperand(5).getReg() != 0)) || |
8400 | 1.81k | (static_cast<ARMOperand &>(*Operands[3]).isToken() && |
8401 | 110 | static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) |
8402 | 1.74k | break; |
8403 | 70 | MCInst TmpInst(Inst.getAddress()); |
8404 | 70 | TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? |
8405 | 46 | ARM::tADDi8 : ARM::tSUBi8); |
8406 | 70 | TmpInst.addOperand(Inst.getOperand(0)); |
8407 | 70 | TmpInst.addOperand(Inst.getOperand(5)); |
8408 | 70 | TmpInst.addOperand(Inst.getOperand(0)); |
8409 | 70 | TmpInst.addOperand(Inst.getOperand(2)); |
8410 | 70 | TmpInst.addOperand(Inst.getOperand(3)); |
8411 | 70 | TmpInst.addOperand(Inst.getOperand(4)); |
8412 | 70 | Inst = TmpInst; |
8413 | 70 | return true; |
8414 | 1.81k | } |
8415 | 277 | case ARM::t2ADDrr: { |
8416 | | // If the destination and first source operand are the same, and |
8417 | | // there's no setting of the flags, use encoding T2 instead of T3. |
8418 | | // Note that this is only for ADD, not SUB. This mirrors the system |
8419 | | // 'as' behaviour. Also take advantage of ADD being commutative. |
8420 | | // Make sure the wide encoding wasn't explicit. |
8421 | 277 | bool Swap = false; |
8422 | 277 | auto DestReg = Inst.getOperand(0).getReg(); |
8423 | 277 | bool Transform = DestReg == Inst.getOperand(1).getReg(); |
8424 | 277 | if (!Transform && DestReg == Inst.getOperand(2).getReg()) { |
8425 | 26 | Transform = true; |
8426 | 26 | Swap = true; |
8427 | 26 | } |
8428 | 277 | if (!Transform || |
8429 | 277 | Inst.getOperand(5).getReg() != 0 || |
8430 | 277 | (static_cast<ARMOperand &>(*Operands[3]).isToken() && |
8431 | 36 | static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) |
8432 | 241 | break; |
8433 | 36 | MCInst TmpInst(Inst.getAddress()); |
8434 | 36 | TmpInst.setOpcode(ARM::tADDhirr); |
8435 | 36 | TmpInst.addOperand(Inst.getOperand(0)); |
8436 | 36 | TmpInst.addOperand(Inst.getOperand(0)); |
8437 | 36 | TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2)); |
8438 | 36 | TmpInst.addOperand(Inst.getOperand(3)); |
8439 | 36 | TmpInst.addOperand(Inst.getOperand(4)); |
8440 | 36 | Inst = TmpInst; |
8441 | 36 | return true; |
8442 | 277 | } |
8443 | 397 | case ARM::tADDrSP: { |
8444 | | // If the non-SP source operand and the destination operand are not the |
8445 | | // same, we need to use the 32-bit encoding if it's available. |
8446 | 397 | if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { |
8447 | 51 | Inst.setOpcode(ARM::t2ADDrr); |
8448 | 51 | Inst.addOperand(MCOperand::createReg(0)); // cc_out |
8449 | 51 | return true; |
8450 | 51 | } |
8451 | 346 | break; |
8452 | 397 | } |
8453 | 11.6k | case ARM::tB: |
8454 | | // A Thumb conditional branch outside of an IT block is a tBcc. |
8455 | 11.6k | if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { |
8456 | 0 | Inst.setOpcode(ARM::tBcc); |
8457 | 0 | return true; |
8458 | 0 | } |
8459 | 11.6k | break; |
8460 | 11.6k | case ARM::t2B: |
8461 | | // A Thumb2 conditional branch outside of an IT block is a t2Bcc. |
8462 | 1.47k | if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ |
8463 | 0 | Inst.setOpcode(ARM::t2Bcc); |
8464 | 0 | return true; |
8465 | 0 | } |
8466 | 1.47k | break; |
8467 | 1.47k | case ARM::t2Bcc: |
8468 | | // If the conditional is AL or we're in an IT block, we really want t2B. |
8469 | 690 | if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { |
8470 | 0 | Inst.setOpcode(ARM::t2B); |
8471 | 0 | return true; |
8472 | 0 | } |
8473 | 690 | break; |
8474 | 3.01k | case ARM::tBcc: |
8475 | | // If the conditional is AL, we really want tB. |
8476 | 3.01k | if (Inst.getOperand(1).getImm() == ARMCC::AL) { |
8477 | 0 | Inst.setOpcode(ARM::tB); |
8478 | 0 | return true; |
8479 | 0 | } |
8480 | 3.01k | break; |
8481 | 3.01k | case ARM::tLDMIA: { |
8482 | | // If the register list contains any high registers, or if the writeback |
8483 | | // doesn't match what tLDMIA can do, we need to use the 32-bit encoding |
8484 | | // instead if we're in Thumb2. Otherwise, this should have generated |
8485 | | // an error in validateInstruction(). |
8486 | 0 | unsigned Rn = Inst.getOperand(0).getReg(); |
8487 | 0 | bool hasWritebackToken = |
8488 | 0 | (static_cast<ARMOperand &>(*Operands[3]).isToken() && |
8489 | 0 | static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); |
8490 | 0 | bool listContainsBase; |
8491 | 0 | if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || |
8492 | 0 | (!listContainsBase && !hasWritebackToken) || |
8493 | 0 | (listContainsBase && hasWritebackToken)) { |
8494 | | // 16-bit encoding isn't sufficient. Switch to the 32-bit version. |
8495 | 0 | assert (isThumbTwo()); |
8496 | 0 | Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); |
8497 | | // If we're switching to the updating version, we need to insert |
8498 | | // the writeback tied operand. |
8499 | 0 | if (hasWritebackToken) |
8500 | 0 | Inst.insert(Inst.begin(), |
8501 | 0 | MCOperand::createReg(Inst.getOperand(0).getReg())); |
8502 | 0 | return true; |
8503 | 0 | } |
8504 | 0 | break; |
8505 | 0 | } |
8506 | 0 | case ARM::tSTMIA_UPD: { |
8507 | | // If the register list contains any high registers, we need to use |
8508 | | // the 32-bit encoding instead if we're in Thumb2. Otherwise, this |
8509 | | // should have generated an error in validateInstruction(). |
8510 | 0 | unsigned Rn = Inst.getOperand(0).getReg(); |
8511 | 0 | bool listContainsBase; |
8512 | 0 | if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { |
8513 | | // 16-bit encoding isn't sufficient. Switch to the 32-bit version. |
8514 | 0 | assert (isThumbTwo()); |
8515 | 0 | Inst.setOpcode(ARM::t2STMIA_UPD); |
8516 | 0 | return true; |
8517 | 0 | } |
8518 | 0 | break; |
8519 | 0 | } |
8520 | 0 | case ARM::tPOP: { |
8521 | 0 | bool listContainsBase; |
8522 | | // If the register list contains any high registers, we need to use |
8523 | | // the 32-bit encoding instead if we're in Thumb2. Otherwise, this |
8524 | | // should have generated an error in validateInstruction(). |
8525 | 0 | if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) |
8526 | 0 | return false; |
8527 | 0 | assert (isThumbTwo()); |
8528 | 0 | Inst.setOpcode(ARM::t2LDMIA_UPD); |
8529 | | // Add the base register and writeback operands. |
8530 | 0 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); |
8531 | 0 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); |
8532 | 0 | return true; |
8533 | 0 | } |
8534 | 0 | case ARM::tPUSH: { |
8535 | 0 | bool listContainsBase; |
8536 | 0 | if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) |
8537 | 0 | return false; |
8538 | 0 | assert (isThumbTwo()); |
8539 | 0 | Inst.setOpcode(ARM::t2STMDB_UPD); |
8540 | | // Add the base register and writeback operands. |
8541 | 0 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); |
8542 | 0 | Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); |
8543 | 0 | return true; |
8544 | 0 | } |
8545 | 193 | case ARM::t2MOVi: { |
8546 | | // If we can use the 16-bit encoding and the user didn't explicitly |
8547 | | // request the 32-bit variant, transform it here. |
8548 | 193 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && |
8549 | 193 | (unsigned)Inst.getOperand(1).getImm() <= 255 && |
8550 | 193 | ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && |
8551 | 99 | Inst.getOperand(4).getReg() == ARM::CPSR) || |
8552 | 99 | (inITBlock() && Inst.getOperand(4).getReg() == 0)) && |
8553 | 193 | (!static_cast<ARMOperand &>(*Operands[2]).isToken() || |
8554 | 42 | static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) { |
8555 | | // The operands aren't in the same order for tMOVi8... |
8556 | 40 | MCInst TmpInst(Inst.getAddress()); |
8557 | 40 | TmpInst.setOpcode(ARM::tMOVi8); |
8558 | 40 | TmpInst.addOperand(Inst.getOperand(0)); |
8559 | 40 | TmpInst.addOperand(Inst.getOperand(4)); |
8560 | 40 | TmpInst.addOperand(Inst.getOperand(1)); |
8561 | 40 | TmpInst.addOperand(Inst.getOperand(2)); |
8562 | 40 | TmpInst.addOperand(Inst.getOperand(3)); |
8563 | 40 | Inst = TmpInst; |
8564 | 40 | return true; |
8565 | 40 | } |
8566 | 153 | break; |
8567 | 193 | } |
8568 | 200 | case ARM::t2MOVr: { |
8569 | | // If we can use the 16-bit encoding and the user didn't explicitly |
8570 | | // request the 32-bit variant, transform it here. |
8571 | 200 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && |
8572 | 200 | isARMLowRegister(Inst.getOperand(1).getReg()) && |
8573 | 200 | Inst.getOperand(2).getImm() == ARMCC::AL && |
8574 | 200 | Inst.getOperand(4).getReg() == ARM::CPSR && |
8575 | 200 | (!static_cast<ARMOperand &>(*Operands[2]).isToken() || |
8576 | 112 | static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) { |
8577 | | // The operands aren't the same for tMOV[S]r... (no cc_out) |
8578 | 48 | MCInst TmpInst(Inst.getAddress()); |
8579 | 48 | TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); |
8580 | 48 | TmpInst.addOperand(Inst.getOperand(0)); |
8581 | 48 | TmpInst.addOperand(Inst.getOperand(1)); |
8582 | 48 | TmpInst.addOperand(Inst.getOperand(2)); |
8583 | 48 | TmpInst.addOperand(Inst.getOperand(3)); |
8584 | 48 | Inst = TmpInst; |
8585 | 48 | return true; |
8586 | 48 | } |
8587 | 152 | break; |
8588 | 200 | } |
8589 | 152 | case ARM::t2SXTH: |
8590 | 226 | case ARM::t2SXTB: |
8591 | 274 | case ARM::t2UXTH: |
8592 | 290 | case ARM::t2UXTB: { |
8593 | | // If we can use the 16-bit encoding and the user didn't explicitly |
8594 | | // request the 32-bit variant, transform it here. |
8595 | 290 | if (isARMLowRegister(Inst.getOperand(0).getReg()) && |
8596 | 290 | isARMLowRegister(Inst.getOperand(1).getReg()) && |
8597 | 290 | Inst.getOperand(2).getImm() == 0 && |
8598 | 290 | (!static_cast<ARMOperand &>(*Operands[2]).isToken() || |
8599 | 0 | static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) { |
8600 | 0 | unsigned NewOpc; |
8601 | 0 | switch (Inst.getOpcode()) { |
8602 | 0 | default: llvm_unreachable("Illegal opcode!"); |
8603 | 0 | case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; |
8604 | 0 | case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; |
8605 | 0 | case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; |
8606 | 0 | case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; |
8607 | 0 | } |
8608 | | // The operands aren't the same for thumb1 (no rotate operand). |
8609 | 0 | MCInst TmpInst(Inst.getAddress()); |
8610 | 0 | TmpInst.setOpcode(NewOpc); |
8611 | 0 | TmpInst.addOperand(Inst.getOperand(0)); |
8612 | 0 | TmpInst.addOperand(Inst.getOperand(1)); |
8613 | 0 | TmpInst.addOperand(Inst.getOperand(3)); |
8614 | 0 | TmpInst.addOperand(Inst.getOperand(4)); |
8615 | 0 | Inst = TmpInst; |
8616 | 0 | return true; |
8617 | 0 | } |
8618 | 290 | break; |
8619 | 290 | } |
8620 | 425 | case ARM::MOVsi: { |
8621 | 425 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); |
8622 | | // rrx shifts and asr/lsr of #32 is encoded as 0 |
8623 | 425 | if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr) |
8624 | 141 | return false; |
8625 | 284 | if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) { |
8626 | | // Shifting by zero is accepted as a vanilla 'MOVr' |
8627 | 1 | MCInst TmpInst(Inst.getAddress()); |
8628 | 1 | TmpInst.setOpcode(ARM::MOVr); |
8629 | 1 | TmpInst.addOperand(Inst.getOperand(0)); |
8630 | 1 | TmpInst.addOperand(Inst.getOperand(1)); |
8631 | 1 | TmpInst.addOperand(Inst.getOperand(3)); |
8632 | 1 | TmpInst.addOperand(Inst.getOperand(4)); |
8633 | 1 | TmpInst.addOperand(Inst.getOperand(5)); |
8634 | 1 | Inst = TmpInst; |
8635 | 1 | return true; |
8636 | 1 | } |
8637 | 283 | return false; |
8638 | 284 | } |
8639 | 36 | case ARM::ANDrsi: |
8640 | 39 | case ARM::ORRrsi: |
8641 | 42 | case ARM::EORrsi: |
8642 | 120 | case ARM::BICrsi: |
8643 | 195 | case ARM::SUBrsi: |
8644 | 284 | case ARM::ADDrsi: { |
8645 | 284 | unsigned newOpc; |
8646 | 284 | ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm()); |
8647 | 284 | if (SOpc == ARM_AM::rrx) return false; |
8648 | 111 | switch (Inst.getOpcode()) { |
8649 | 0 | default: llvm_unreachable("unexpected opcode!"); |
8650 | 32 | case ARM::ANDrsi: newOpc = ARM::ANDrr; break; |
8651 | 3 | case ARM::ORRrsi: newOpc = ARM::ORRrr; break; |
8652 | 0 | case ARM::EORrsi: newOpc = ARM::EORrr; break; |
8653 | 36 | case ARM::BICrsi: newOpc = ARM::BICrr; break; |
8654 | 31 | case ARM::SUBrsi: newOpc = ARM::SUBrr; break; |
8655 | 9 | case ARM::ADDrsi: newOpc = ARM::ADDrr; break; |
8656 | 111 | } |
8657 | | // If the shift is by zero, use the non-shifted instruction definition. |
8658 | | // The exception is for right shifts, where 0 == 32 |
8659 | 111 | if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 && |
8660 | 111 | !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) { |
8661 | 29 | MCInst TmpInst(Inst.getAddress()); |
8662 | 29 | TmpInst.setOpcode(newOpc); |
8663 | 29 | TmpInst.addOperand(Inst.getOperand(0)); |
8664 | 29 | TmpInst.addOperand(Inst.getOperand(1)); |
8665 | 29 | TmpInst.addOperand(Inst.getOperand(2)); |
8666 | 29 | TmpInst.addOperand(Inst.getOperand(4)); |
8667 | 29 | TmpInst.addOperand(Inst.getOperand(5)); |
8668 | 29 | TmpInst.addOperand(Inst.getOperand(6)); |
8669 | 29 | Inst = TmpInst; |
8670 | 29 | return true; |
8671 | 29 | } |
8672 | 82 | return false; |
8673 | 111 | } |
8674 | 1.53k | case ARM::ITasm: |
8675 | 3.53k | case ARM::t2IT: { |
8676 | | // The mask bits for all but the first condition are represented as |
8677 | | // the low bit of the condition code value implies 't'. We currently |
8678 | | // always have 1 implies 't', so XOR toggle the bits if the low bit |
8679 | | // of the condition code is zero. |
8680 | 3.53k | MCOperand &MO = Inst.getOperand(1); |
8681 | 3.53k | unsigned Mask = MO.getImm(); |
8682 | 3.53k | unsigned OrigMask = Mask; |
8683 | 3.53k | unsigned TZ = countTrailingZeros(Mask); |
8684 | 3.53k | if ((Inst.getOperand(0).getImm() & 1) == 0) { |
8685 | 325 | assert(Mask && TZ <= 3 && "illegal IT mask value!"); |
8686 | 325 | Mask ^= (0xE << TZ) & 0xF; |
8687 | 325 | } |
8688 | 3.53k | MO.setImm(Mask); |
8689 | | |
8690 | | // Set up the IT block state according to the IT instruction we just |
8691 | | // matched. |
8692 | 3.53k | assert(!inITBlock() && "nested IT blocks?!"); |
8693 | 3.53k | ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); |
8694 | 3.53k | ITState.Mask = OrigMask; // Use the original mask, not the updated one. |
8695 | 3.53k | ITState.CurPosition = 0; |
8696 | 3.53k | ITState.FirstCond = true; |
8697 | 3.53k | break; |
8698 | 3.53k | } |
8699 | 425 | case ARM::t2LSLrr: |
8700 | 591 | case ARM::t2LSRrr: |
8701 | 1.18k | case ARM::t2ASRrr: |
8702 | 1.19k | case ARM::t2SBCrr: |
8703 | 1.36k | case ARM::t2RORrr: |
8704 | 1.45k | case ARM::t2BICrr: |
8705 | 1.45k | { |
8706 | | // Assemblers should use the narrow encodings of these instructions when permissible. |
8707 | 1.45k | if ((isARMLowRegister(Inst.getOperand(1).getReg()) && |
8708 | 1.45k | isARMLowRegister(Inst.getOperand(2).getReg())) && |
8709 | 1.45k | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && |
8710 | 1.45k | ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || |
8711 | 894 | (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && |
8712 | 1.45k | (!static_cast<ARMOperand &>(*Operands[3]).isToken() || |
8713 | 620 | !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower( |
8714 | 620 | ".w"))) { |
8715 | 620 | unsigned NewOpc; |
8716 | 620 | switch (Inst.getOpcode()) { |
8717 | 0 | default: llvm_unreachable("unexpected opcode"); |
8718 | 392 | case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break; |
8719 | 103 | case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break; |
8720 | 53 | case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break; |
8721 | 1 | case ARM::t2SBCrr: NewOpc = ARM::tSBC; break; |
8722 | 21 | case ARM::t2RORrr: NewOpc = ARM::tROR; break; |
8723 | 50 | case ARM::t2BICrr: NewOpc = ARM::tBIC; break; |
8724 | 620 | } |
8725 | 620 | MCInst TmpInst(Inst.getAddress()); |
8726 | 620 | TmpInst.setOpcode(NewOpc); |
8727 | 620 | TmpInst.addOperand(Inst.getOperand(0)); |
8728 | 620 | TmpInst.addOperand(Inst.getOperand(5)); |
8729 | 620 | TmpInst.addOperand(Inst.getOperand(1)); |
8730 | 620 | TmpInst.addOperand(Inst.getOperand(2)); |
8731 | 620 | TmpInst.addOperand(Inst.getOperand(3)); |
8732 | 620 | TmpInst.addOperand(Inst.getOperand(4)); |
8733 | 620 | Inst = TmpInst; |
8734 | 620 | return true; |
8735 | 620 | } |
8736 | 835 | return false; |
8737 | 1.45k | } |
8738 | 105 | case ARM::t2ANDrr: |
8739 | 148 | case ARM::t2EORrr: |
8740 | 258 | case ARM::t2ADCrr: |
8741 | 353 | case ARM::t2ORRrr: |
8742 | 353 | { |
8743 | | // Assemblers should use the narrow encodings of these instructions when permissible. |
8744 | | // These instructions are special in that they are commutable, so shorter encodings |
8745 | | // are available more often. |
8746 | 353 | if ((isARMLowRegister(Inst.getOperand(1).getReg()) && |
8747 | 353 | isARMLowRegister(Inst.getOperand(2).getReg())) && |
8748 | 353 | (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() || |
8749 | 207 | Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) && |
8750 | 353 | ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || |
8751 | 160 | (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && |
8752 | 353 | (!static_cast<ARMOperand &>(*Operands[3]).isToken() || |
8753 | 125 | !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower( |
8754 | 125 | ".w"))) { |
8755 | 125 | unsigned NewOpc; |
8756 | 125 | switch (Inst.getOpcode()) { |
8757 | 0 | default: llvm_unreachable("unexpected opcode"); |
8758 | 58 | case ARM::t2ADCrr: NewOpc = ARM::tADC; break; |
8759 | 5 | case ARM::t2ANDrr: NewOpc = ARM::tAND; break; |
8760 | 20 | case ARM::t2EORrr: NewOpc = ARM::tEOR; break; |
8761 | 42 | case ARM::t2ORRrr: NewOpc = ARM::tORR; break; |
8762 | 125 | } |
8763 | 125 | MCInst TmpInst(Inst.getAddress()); |
8764 | 125 | TmpInst.setOpcode(NewOpc); |
8765 | 125 | TmpInst.addOperand(Inst.getOperand(0)); |
8766 | 125 | TmpInst.addOperand(Inst.getOperand(5)); |
8767 | 125 | if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) { |
8768 | 68 | TmpInst.addOperand(Inst.getOperand(1)); |
8769 | 68 | TmpInst.addOperand(Inst.getOperand(2)); |
8770 | 68 | } else { |
8771 | 57 | TmpInst.addOperand(Inst.getOperand(2)); |
8772 | 57 | TmpInst.addOperand(Inst.getOperand(1)); |
8773 | 57 | } |
8774 | 125 | TmpInst.addOperand(Inst.getOperand(3)); |
8775 | 125 | TmpInst.addOperand(Inst.getOperand(4)); |
8776 | 125 | Inst = TmpInst; |
8777 | 125 | return true; |
8778 | 125 | } |
8779 | 228 | return false; |
8780 | 353 | } |
8781 | 103k | } |
8782 | 93.0k | return false; |
8783 | 103k | } |
8784 | | |
8785 | 102k | unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { |
8786 | | // 16-bit thumb arithmetic instructions either require or preclude the 'S' |
8787 | | // suffix depending on whether they're in an IT block or not. |
8788 | 102k | unsigned Opc = Inst.getOpcode(); |
8789 | 102k | const MCInstrDesc &MCID = MII.get(Opc); |
8790 | 102k | if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { |
8791 | 5.08k | assert(MCID.NumOperands == Inst.getNumOperands() && |
8792 | 5.08k | "operand count mismatch!"); |
8793 | | // Find the optional-def operand (cc_out). |
8794 | 5.08k | unsigned OpNo; |
8795 | 5.08k | for (OpNo = 0; |
8796 | 10.1k | !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; |
8797 | 5.08k | ++OpNo) |
8798 | 5.08k | ; |
8799 | | // If we're parsing Thumb1, reject it completely. |
8800 | 5.08k | if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) |
8801 | 5 | return Match_MnemonicFail; |
8802 | | // If we're parsing Thumb2, which form is legal depends on whether we're |
8803 | | // in an IT block. |
8804 | 5.07k | if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && |
8805 | 5.07k | !inITBlock()) |
8806 | 2.24k | return Match_RequiresITBlock; |
8807 | 2.82k | if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && |
8808 | 2.82k | inITBlock()) |
8809 | 2 | return Match_RequiresNotITBlock; |
8810 | 97.0k | } else if (isThumbOne()) { |
8811 | | // Some high-register supporting Thumb1 encodings only allow both registers |
8812 | | // to be from r0-r7 when in Thumb2. |
8813 | 1.27k | if (Opc == ARM::tADDhirr && !hasV6MOps() && |
8814 | 1.27k | isARMLowRegister(Inst.getOperand(1).getReg()) && |
8815 | 1.27k | isARMLowRegister(Inst.getOperand(2).getReg())) |
8816 | 5 | return Match_RequiresThumb2; |
8817 | | // Others only require ARMv6 or later. |
8818 | 1.26k | else if (Opc == ARM::tMOVr && !hasV6Ops() && |
8819 | 1.26k | isARMLowRegister(Inst.getOperand(0).getReg()) && |
8820 | 1.26k | isARMLowRegister(Inst.getOperand(1).getReg())) |
8821 | 0 | return Match_RequiresV6; |
8822 | 1.27k | } |
8823 | | |
8824 | 484k | for (unsigned I = 0; I < MCID.NumOperands; ++I) |
8825 | 385k | if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) { |
8826 | | // rGPRRegClass excludes PC, and also excluded SP before ARMv8 |
8827 | 16.5k | if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops()) |
8828 | 1 | return Match_RequiresV8; |
8829 | 16.5k | else if (Inst.getOperand(I).getReg() == ARM::PC) |
8830 | 0 | return Match_InvalidOperand; |
8831 | 16.5k | } |
8832 | | |
8833 | 99.8k | return Match_Success; |
8834 | 99.8k | } |
8835 | | |
8836 | | namespace llvm_ks { |
8837 | 12 | template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) { |
8838 | 12 | return true; // In an assembly source, no need to second-guess |
8839 | 12 | } |
8840 | | } |
8841 | | |
8842 | | bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
8843 | | OperandVector &Operands, |
8844 | | MCStreamer &Out, uint64_t &ErrorInfo, |
8845 | | bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address) |
8846 | 119k | { |
8847 | 119k | MCInst Inst(Address); |
8848 | 119k | unsigned MatchResult; |
8849 | | |
8850 | 119k | MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, |
8851 | 119k | MatchingInlineAsm); |
8852 | 119k | switch (MatchResult) { |
8853 | 99.8k | case Match_Success: |
8854 | | // Context sensitive operand constraints aren't handled by the matcher, |
8855 | | // so check them here. |
8856 | 99.8k | if (validateInstruction(Inst, Operands)) { |
8857 | | // Still progress the IT block, otherwise one wrong condition causes |
8858 | | // nasty cascading errors. |
8859 | 564 | forwardITPosition(); |
8860 | 564 | return true; |
8861 | 564 | } |
8862 | | |
8863 | 99.2k | { // processInstruction() updates inITBlock state, we need to save it away |
8864 | 99.2k | bool wasInITBlock = inITBlock(); |
8865 | | |
8866 | | // Some instructions need post-processing to, for example, tweak which |
8867 | | // encoding is selected. Loop on it while changes happen so the |
8868 | | // individual transformations can chain off each other. E.g., |
8869 | | // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) |
8870 | 103k | while (processInstruction(Inst, Operands, Out)) |
8871 | 3.91k | ; |
8872 | | |
8873 | | // Only after the instruction is fully processed, we can validate it |
8874 | 99.2k | if (wasInITBlock && hasV8Ops() && isThumb() && |
8875 | 99.2k | !isV8EligibleForIT(&Inst)) { |
8876 | 335 | Warning(IDLoc, "deprecated instruction in IT block"); |
8877 | 335 | } |
8878 | 99.2k | } |
8879 | | |
8880 | | // Only move forward at the very end so that everything in validate |
8881 | | // and process gets a consistent answer about whether we're in an IT |
8882 | | // block. |
8883 | 99.2k | forwardITPosition(); |
8884 | | |
8885 | | // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and |
8886 | | // doesn't actually encode. |
8887 | 99.2k | if (Inst.getOpcode() == ARM::ITasm) |
8888 | 1.53k | return false; |
8889 | | |
8890 | 97.7k | Inst.setLoc(IDLoc); |
8891 | 97.7k | Out.EmitInstruction(Inst, getSTI(), ErrorCode); |
8892 | 97.7k | if (ErrorCode == 0) { |
8893 | 97.6k | Address = Inst.getAddress(); // Keystone update address |
8894 | 97.6k | return false; |
8895 | 97.6k | } else |
8896 | 62 | return true; |
8897 | 421 | case Match_MissingFeature: { |
8898 | 421 | ErrorCode = KS_ERR_ASM_ARM_MISSINGFEATURE; |
8899 | 421 | return true; |
8900 | 97.7k | } |
8901 | 8.49k | case Match_InvalidOperand: { |
8902 | | #if 0 |
8903 | | SMLoc ErrorLoc = IDLoc; |
8904 | | if (ErrorInfo != ~0ULL) { |
8905 | | if (ErrorInfo >= Operands.size()) |
8906 | | return Error(IDLoc, "too few operands for instruction"); |
8907 | | |
8908 | | ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); |
8909 | | if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; |
8910 | | } |
8911 | | #endif |
8912 | | |
8913 | | // return Error(ErrorLoc, "invalid operand for instruction"); |
8914 | 8.49k | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8915 | 8.49k | return true; |
8916 | 97.7k | } |
8917 | 10.1k | case Match_MnemonicFail: |
8918 | | //return Error(IDLoc, "invalid instruction", |
8919 | | // ((ARMOperand &)*Operands[0]).getLocRange()); |
8920 | 10.1k | ErrorCode = KS_ERR_ASM_ARM_MNEMONICFAIL; |
8921 | 10.1k | return true; |
8922 | 0 | case Match_RequiresNotITBlock: |
8923 | | //return Error(IDLoc, "flag setting instruction only valid outside IT block"); |
8924 | 0 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8925 | 0 | return true; |
8926 | 4 | case Match_RequiresITBlock: |
8927 | | //return Error(IDLoc, "instruction only valid inside IT block"); |
8928 | 4 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8929 | 4 | return true; |
8930 | 0 | case Match_RequiresV6: |
8931 | | //return Error(IDLoc, "instruction variant requires ARMv6 or later"); |
8932 | 0 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8933 | 0 | return true; |
8934 | 5 | case Match_RequiresThumb2: |
8935 | | //return Error(IDLoc, "instruction variant requires Thumb2"); |
8936 | 5 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8937 | 5 | return true; |
8938 | 1 | case Match_RequiresV8: |
8939 | | //return Error(IDLoc, "instruction variant requires ARMv8 or later"); |
8940 | 1 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8941 | 1 | return true; |
8942 | 94 | case Match_ImmRange0_15: { |
8943 | 94 | SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); |
8944 | 94 | if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; |
8945 | | // return Error(ErrorLoc, "immediate operand must be in the range [0,15]"); |
8946 | 94 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8947 | 94 | return true; |
8948 | 97.7k | } |
8949 | 178 | case Match_ImmRange0_239: { |
8950 | 178 | SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc(); |
8951 | 178 | if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; |
8952 | | // return Error(ErrorLoc, "immediate operand must be in the range [0,239]"); |
8953 | 178 | ErrorCode = KS_ERR_ASM_ARM_INVALIDOPERAND; |
8954 | 178 | return true; |
8955 | 97.7k | } |
8956 | 0 | case Match_AlignedMemoryRequiresNone: |
8957 | 0 | case Match_DupAlignedMemoryRequiresNone: |
8958 | 0 | case Match_AlignedMemoryRequires16: |
8959 | 0 | case Match_DupAlignedMemoryRequires16: |
8960 | 0 | case Match_AlignedMemoryRequires32: |
8961 | 0 | case Match_DupAlignedMemoryRequires32: |
8962 | 0 | case Match_AlignedMemoryRequires64: |
8963 | 0 | case Match_DupAlignedMemoryRequires64: |
8964 | 0 | case Match_AlignedMemoryRequires64or128: |
8965 | 0 | case Match_DupAlignedMemoryRequires64or128: |
8966 | 0 | case Match_AlignedMemoryRequires64or128or256: |
8967 | 0 | { |
8968 | 0 | SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc(); |
8969 | 0 | if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; |
8970 | 0 | switch (MatchResult) { |
8971 | 0 | default: |
8972 | 0 | llvm_unreachable("Missing Match_Aligned type"); |
8973 | 0 | case Match_AlignedMemoryRequiresNone: |
8974 | 0 | case Match_DupAlignedMemoryRequiresNone: |
8975 | | //return Error(ErrorLoc, "alignment must be omitted"); |
8976 | 0 | return true; |
8977 | 0 | case Match_AlignedMemoryRequires16: |
8978 | 0 | case Match_DupAlignedMemoryRequires16: |
8979 | | //return Error(ErrorLoc, "alignment must be 16 or omitted"); |
8980 | 0 | return true; |
8981 | 0 | case Match_AlignedMemoryRequires32: |
8982 | 0 | case Match_DupAlignedMemoryRequires32: |
8983 | | //return Error(ErrorLoc, "alignment must be 32 or omitted"); |
8984 | 0 | return true; |
8985 | 0 | case Match_AlignedMemoryRequires64: |
8986 | 0 | case Match_DupAlignedMemoryRequires64: |
8987 | | //return Error(ErrorLoc, "alignment must be 64 or omitted"); |
8988 | 0 | return true; |
8989 | 0 | case Match_AlignedMemoryRequires64or128: |
8990 | 0 | case Match_DupAlignedMemoryRequires64or128: |
8991 | | //return Error(ErrorLoc, "alignment must be 64, 128 or omitted"); |
8992 | 0 | return true; |
8993 | 0 | case Match_AlignedMemoryRequires64or128or256: |
8994 | | //return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted"); |
8995 | 0 | return true; |
8996 | 0 | } |
8997 | 0 | } |
8998 | 119k | } |
8999 | | |
9000 | 119k | llvm_unreachable("Implement any new match types added!"); |
9001 | 119k | } |
9002 | | |
9003 | | /// parseDirective parses the arm specific directives |
9004 | | bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) |
9005 | 1.47M | { |
9006 | 1.47M | const MCObjectFileInfo::Environment Format = |
9007 | 1.47M | getContext().getObjectFileInfo()->getObjectFileType(); |
9008 | 1.47M | bool IsMachO = Format == MCObjectFileInfo::IsMachO; |
9009 | 1.47M | bool IsCOFF = Format == MCObjectFileInfo::IsCOFF; |
9010 | | |
9011 | 1.47M | StringRef IDVal = DirectiveID.getIdentifier(); |
9012 | 1.47M | if (IDVal == ".word") |
9013 | 87.5k | return parseLiteralValues(4, DirectiveID.getLoc()); |
9014 | 1.38M | else if (IDVal == ".short" || IDVal == ".hword") |
9015 | 414 | return parseLiteralValues(2, DirectiveID.getLoc()); |
9016 | 1.38M | else if (IDVal == ".thumb") |
9017 | 4.30k | return parseDirectiveThumb(DirectiveID.getLoc()); |
9018 | 1.38M | else if (IDVal == ".arm") |
9019 | 3.58k | return parseDirectiveARM(DirectiveID.getLoc()); |
9020 | 1.37M | else if (IDVal == ".thumb_func") |
9021 | 3.31k | return parseDirectiveThumbFunc(DirectiveID.getLoc()); |
9022 | 1.37M | else if (IDVal == ".code") |
9023 | 196 | return parseDirectiveCode(DirectiveID.getLoc()); |
9024 | 1.37M | else if (IDVal == ".syntax") |
9025 | 1.97k | return parseDirectiveSyntax(DirectiveID.getLoc()); |
9026 | 1.37M | else if (IDVal == ".unreq") |
9027 | 396 | return parseDirectiveUnreq(DirectiveID.getLoc()); |
9028 | 1.37M | else if (IDVal == ".fnend") |
9029 | 1.11k | return parseDirectiveFnEnd(DirectiveID.getLoc()); |
9030 | 1.36M | else if (IDVal == ".cantunwind") |
9031 | 1.19k | return parseDirectiveCantUnwind(DirectiveID.getLoc()); |
9032 | 1.36M | else if (IDVal == ".personality") |
9033 | 1.56k | return parseDirectivePersonality(DirectiveID.getLoc()); |
9034 | 1.36M | else if (IDVal == ".handlerdata") |
9035 | 1.71k | return parseDirectiveHandlerData(DirectiveID.getLoc()); |
9036 | 1.36M | else if (IDVal == ".setfp") |
9037 | 432 | return parseDirectiveSetFP(DirectiveID.getLoc()); |
9038 | 1.36M | else if (IDVal == ".pad") |
9039 | 1.00k | return parseDirectivePad(DirectiveID.getLoc()); |
9040 | 1.36M | else if (IDVal == ".save") |
9041 | 81 | return parseDirectiveRegSave(DirectiveID.getLoc(), false); |
9042 | 1.36M | else if (IDVal == ".vsave") |
9043 | 113 | return parseDirectiveRegSave(DirectiveID.getLoc(), true); |
9044 | 1.36M | else if (IDVal == ".ltorg" || IDVal == ".pool") |
9045 | 1.95k | return parseDirectiveLtorg(DirectiveID.getLoc()); |
9046 | 1.36M | else if (IDVal == ".even") |
9047 | 698 | return parseDirectiveEven(DirectiveID.getLoc()); |
9048 | 1.36M | else if (IDVal == ".personalityindex") |
9049 | 1.04k | return parseDirectivePersonalityIndex(DirectiveID.getLoc()); |
9050 | 1.35M | else if (IDVal == ".unwind_raw") |
9051 | 1.44k | return parseDirectiveUnwindRaw(DirectiveID.getLoc()); |
9052 | 1.35M | else if (IDVal == ".movsp") |
9053 | 643 | return parseDirectiveMovSP(DirectiveID.getLoc()); |
9054 | 1.35M | else if (IDVal == ".arch_extension") |
9055 | 3.60k | return parseDirectiveArchExtension(DirectiveID.getLoc()); |
9056 | 1.35M | else if (IDVal == ".align") |
9057 | 47.5k | return parseDirectiveAlign(DirectiveID.getLoc()); |
9058 | 1.30M | else if (IDVal == ".thumb_set") |
9059 | 264 | return parseDirectiveThumbSet(DirectiveID.getLoc()); |
9060 | | |
9061 | 1.30M | if (!IsMachO && !IsCOFF) { |
9062 | 1.30M | if (IDVal == ".arch") |
9063 | 170k | return parseDirectiveArch(DirectiveID.getLoc()); |
9064 | 1.13M | else if (IDVal == ".cpu") |
9065 | 7.62k | return parseDirectiveCPU(DirectiveID.getLoc()); |
9066 | 1.12M | else if (IDVal == ".eabi_attribute") |
9067 | 1.34k | return parseDirectiveEabiAttr(DirectiveID.getLoc()); |
9068 | 1.12M | else if (IDVal == ".fpu") |
9069 | 45.8k | return parseDirectiveFPU(DirectiveID.getLoc()); |
9070 | 1.08M | else if (IDVal == ".fnstart") |
9071 | 3.36k | return parseDirectiveFnStart(DirectiveID.getLoc()); |
9072 | 1.07M | else if (IDVal == ".inst") |
9073 | 1.89k | return parseDirectiveInst(DirectiveID.getLoc()); |
9074 | 1.07M | else if (IDVal == ".inst.n") |
9075 | 584 | return parseDirectiveInst(DirectiveID.getLoc(), 'n'); |
9076 | 1.07M | else if (IDVal == ".inst.w") |
9077 | 69 | return parseDirectiveInst(DirectiveID.getLoc(), 'w'); |
9078 | 1.07M | else if (IDVal == ".object_arch") |
9079 | 380 | return parseDirectiveObjectArch(DirectiveID.getLoc()); |
9080 | 1.07M | else if (IDVal == ".tlsdescseq") |
9081 | 714 | return parseDirectiveTLSDescSeq(DirectiveID.getLoc()); |
9082 | 1.30M | } |
9083 | | |
9084 | 1.07M | return true; |
9085 | 1.30M | } |
9086 | | |
9087 | | /// parseLiteralValues |
9088 | | /// ::= .hword expression [, expression]* |
9089 | | /// ::= .short expression [, expression]* |
9090 | | /// ::= .word expression [, expression]* |
9091 | 87.9k | bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) { |
9092 | 87.9k | MCAsmParser &Parser = getParser(); |
9093 | 87.9k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
9094 | 374k | for (;;) { |
9095 | 374k | const MCExpr *Value; |
9096 | 374k | if (getParser().parseExpression(Value)) { |
9097 | 39.1k | Parser.eatToEndOfStatement(); |
9098 | 39.1k | return false; |
9099 | 39.1k | } |
9100 | | |
9101 | 335k | getParser().getStreamer().EmitValue(Value, Size, L); |
9102 | | |
9103 | 335k | if (getLexer().is(AsmToken::EndOfStatement)) |
9104 | 39.9k | break; |
9105 | | |
9106 | | // FIXME: Improve diagnostic. |
9107 | 295k | if (getLexer().isNot(AsmToken::Comma)) { |
9108 | | //Error(L, "unexpected token in directive"); |
9109 | 4.21k | return false; |
9110 | 4.21k | } |
9111 | 290k | Parser.Lex(); |
9112 | 290k | } |
9113 | 83.3k | } |
9114 | | |
9115 | 44.6k | Parser.Lex(); |
9116 | 44.6k | return false; |
9117 | 87.9k | } |
9118 | | |
9119 | | /// parseDirectiveThumb |
9120 | | /// ::= .thumb |
9121 | 4.30k | bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { |
9122 | 4.30k | MCAsmParser &Parser = getParser(); |
9123 | 4.30k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
9124 | | //Error(L, "unexpected token in directive"); |
9125 | 85 | return false; |
9126 | 85 | } |
9127 | 4.21k | Parser.Lex(); |
9128 | | |
9129 | 4.21k | if (!hasThumb()) { |
9130 | | //Error(L, "target does not support Thumb mode"); |
9131 | 140 | return false; |
9132 | 140 | } |
9133 | | |
9134 | 4.07k | if (!isThumb()) |
9135 | 2.50k | SwitchMode(); |
9136 | | |
9137 | 4.07k | getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); |
9138 | 4.07k | return false; |
9139 | 4.21k | } |
9140 | | |
9141 | | /// parseDirectiveARM |
9142 | | /// ::= .arm |
9143 | 3.58k | bool ARMAsmParser::parseDirectiveARM(SMLoc L) { |
9144 | 3.58k | MCAsmParser &Parser = getParser(); |
9145 | 3.58k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
9146 | | //Error(L, "unexpected token in directive"); |
9147 | 187 | return false; |
9148 | 187 | } |
9149 | 3.39k | Parser.Lex(); |
9150 | | |
9151 | 3.39k | if (!hasARM()) { |
9152 | | //Error(L, "target does not support ARM mode"); |
9153 | 157 | return false; |
9154 | 157 | } |
9155 | | |
9156 | 3.24k | if (isThumb()) |
9157 | 1.44k | SwitchMode(); |
9158 | | |
9159 | 3.24k | getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); |
9160 | 3.24k | return false; |
9161 | 3.39k | } |
9162 | | |
9163 | 870 | void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) { |
9164 | 870 | if (NextSymbolIsThumb) { |
9165 | 0 | getParser().getStreamer().EmitThumbFunc(Symbol); |
9166 | 0 | NextSymbolIsThumb = false; |
9167 | 0 | } |
9168 | 870 | } |
9169 | | |
9170 | | /// parseDirectiveThumbFunc |
9171 | | /// ::= .thumbfunc symbol_name |
9172 | 3.31k | bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { |
9173 | 3.31k | MCAsmParser &Parser = getParser(); |
9174 | 3.31k | const auto Format = getContext().getObjectFileInfo()->getObjectFileType(); |
9175 | 3.31k | bool IsMachO = Format == MCObjectFileInfo::IsMachO; |
9176 | | |
9177 | | // Darwin asm has (optionally) function name after .thumb_func direction |
9178 | | // ELF doesn't |
9179 | 3.31k | if (IsMachO) { |
9180 | 0 | const AsmToken &Tok = Parser.getTok(); |
9181 | 0 | if (Tok.isNot(AsmToken::EndOfStatement)) { |
9182 | 0 | if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) { |
9183 | | //Error(L, "unexpected token in .thumb_func directive"); |
9184 | 0 | return false; |
9185 | 0 | } |
9186 | | |
9187 | 0 | MCSymbol *Func = |
9188 | 0 | getParser().getContext().getOrCreateSymbol(Tok.getIdentifier()); |
9189 | 0 | getParser().getStreamer().EmitThumbFunc(Func); |
9190 | 0 | Parser.Lex(); // Consume the identifier token. |
9191 | 0 | return false; |
9192 | 0 | } |
9193 | 0 | } |
9194 | | |
9195 | 3.31k | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
9196 | | //Error(Parser.getTok().getLoc(), "unexpected token in directive"); |
9197 | 382 | Parser.eatToEndOfStatement(); |
9198 | 382 | return false; |
9199 | 382 | } |
9200 | | |
9201 | 2.92k | NextSymbolIsThumb = true; |
9202 | 2.92k | return false; |
9203 | 3.31k | } |
9204 | | |
9205 | | /// parseDirectiveSyntax |
9206 | | /// ::= .syntax unified | divided |
9207 | 1.97k | bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { |
9208 | 1.97k | MCAsmParser &Parser = getParser(); |
9209 | 1.97k | const AsmToken &Tok = Parser.getTok(); |
9210 | 1.97k | if (Tok.isNot(AsmToken::Identifier)) { |
9211 | | //Error(L, "unexpected token in .syntax directive"); |
9212 | 56 | return false; |
9213 | 56 | } |
9214 | | |
9215 | 1.91k | StringRef Mode = Tok.getString(); |
9216 | 1.91k | if (Mode == "unified" || Mode == "UNIFIED") { |
9217 | 174 | Parser.Lex(); |
9218 | 1.74k | } else if (Mode == "divided" || Mode == "DIVIDED") { |
9219 | | //Error(L, "'.syntax divided' arm asssembly not supported"); |
9220 | 0 | return false; |
9221 | 1.74k | } else { |
9222 | | //Error(L, "unrecognized syntax mode in .syntax directive"); |
9223 | 1.74k | return false; |
9224 | 1.74k | } |
9225 | | |
9226 | 174 | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
9227 | | //Error(Parser.getTok().getLoc(), "unexpected token in directive"); |
9228 | 164 | return false; |
9229 | 164 | } |
9230 | 10 | Parser.Lex(); |
9231 | | |
9232 | | // TODO tell the MC streamer the mode |
9233 | | // getParser().getStreamer().Emit???(); |
9234 | 10 | return false; |
9235 | 174 | } |
9236 | | |
9237 | | /// parseDirectiveCode |
9238 | | /// ::= .code 16 | 32 |
9239 | 196 | bool ARMAsmParser::parseDirectiveCode(SMLoc L) { |
9240 | 196 | MCAsmParser &Parser = getParser(); |
9241 | 196 | const AsmToken &Tok = Parser.getTok(); |
9242 | 196 | if (Tok.isNot(AsmToken::Integer)) { |
9243 | | //Error(L, "unexpected token in .code directive"); |
9244 | 127 | return false; |
9245 | 127 | } |
9246 | 69 | bool valid; |
9247 | 69 | int64_t Val = Parser.getTok().getIntVal(valid); |
9248 | 69 | if (!valid) |
9249 | 0 | return false; |
9250 | 69 | if (Val != 16 && Val != 32) { |
9251 | | //Error(L, "invalid operand to .code directive"); |
9252 | 22 | return false; |
9253 | 22 | } |
9254 | 47 | Parser.Lex(); |
9255 | | |
9256 | 47 | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
9257 | | //Error(Parser.getTok().getLoc(), "unexpected token in directive"); |
9258 | 33 | return false; |
9259 | 33 | } |
9260 | 14 | Parser.Lex(); |
9261 | | |
9262 | 14 | if (Val == 16) { |
9263 | 10 | if (!hasThumb()) { |
9264 | | //Error(L, "target does not support Thumb mode"); |
9265 | 0 | return false; |
9266 | 0 | } |
9267 | | |
9268 | 10 | if (!isThumb()) |
9269 | 6 | SwitchMode(); |
9270 | 10 | getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); |
9271 | 10 | } else { |
9272 | 4 | if (!hasARM()) { |
9273 | | //Error(L, "target does not support ARM mode"); |
9274 | 0 | return false; |
9275 | 0 | } |
9276 | | |
9277 | 4 | if (isThumb()) |
9278 | 3 | SwitchMode(); |
9279 | 4 | getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); |
9280 | 4 | } |
9281 | | |
9282 | 14 | return false; |
9283 | 14 | } |
9284 | | |
9285 | | /// parseDirectiveReq |
9286 | | /// ::= name .req registername |
9287 | 5 | bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { |
9288 | 5 | MCAsmParser &Parser = getParser(); |
9289 | 5 | Parser.Lex(); // Eat the '.req' token. |
9290 | 5 | unsigned Reg; |
9291 | 5 | SMLoc SRegLoc, ERegLoc; |
9292 | 5 | unsigned int ErrorCode; |
9293 | 5 | if (ParseRegister(Reg, SRegLoc, ERegLoc, ErrorCode)) { |
9294 | 2 | Parser.eatToEndOfStatement(); |
9295 | | //Error(SRegLoc, "register name expected"); |
9296 | 2 | return false; |
9297 | 2 | } |
9298 | | |
9299 | | // Shouldn't be anything else. |
9300 | 3 | if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { |
9301 | 2 | Parser.eatToEndOfStatement(); |
9302 | | //Error(Parser.getTok().getLoc(), "unexpected input in .req directive."); |
9303 | 2 | return false; |
9304 | 2 | } |
9305 | | |
9306 | 1 | Parser.Lex(); // Consume the EndOfStatement |
9307 | | |
9308 | 1 | if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) { |
9309 | | //Error(SRegLoc, "redefinition of '" + Name + "' does not match original."); |
9310 | 0 | return false; |
9311 | 0 | } |
9312 | | |
9313 | 1 | return false; |
9314 | 1 | } |
9315 | | |
9316 | | /// parseDirectiveUneq |
9317 | | /// ::= .unreq registername |
9318 | 396 | bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { |
9319 | 396 | MCAsmParser &Parser = getParser(); |
9320 | 396 | if (Parser.getTok().isNot(AsmToken::Identifier)) { |
9321 | 47 | Parser.eatToEndOfStatement(); |
9322 | | //Error(L, "unexpected input in .unreq directive."); |
9323 | 47 | return false; |
9324 | 47 | } |
9325 | 349 | RegisterReqs.erase(Parser.getTok().getIdentifier().lower()); |
9326 | 349 | Parser.Lex(); // Eat the identifier. |
9327 | 349 | return false; |
9328 | 396 | } |
9329 | | |
9330 | | /// parseDirectiveArch |
9331 | | /// ::= .arch token |
9332 | 170k | bool ARMAsmParser::parseDirectiveArch(SMLoc L) { |
9333 | 170k | StringRef Arch = getParser().parseStringToEndOfStatement().trim(); |
9334 | | |
9335 | 170k | unsigned ID = ARM::parseArch(Arch); |
9336 | | |
9337 | 170k | if (ID == ARM::AK_INVALID) { |
9338 | | //Error(L, "Unknown arch name"); |
9339 | 82.3k | return false; |
9340 | 82.3k | } |
9341 | | |
9342 | 87.9k | Triple T; |
9343 | 87.9k | MCSubtargetInfo &STI = copySTI(); |
9344 | 87.9k | STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str()); |
9345 | 87.9k | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); |
9346 | | |
9347 | 87.9k | getTargetStreamer().emitArch(ID); |
9348 | 87.9k | return false; |
9349 | 170k | } |
9350 | | |
9351 | | /// parseDirectiveEabiAttr |
9352 | | /// ::= .eabi_attribute int, int [, "str"] |
9353 | | /// ::= .eabi_attribute Tag_name, int [, "str"] |
9354 | | bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) |
9355 | 1.34k | { |
9356 | 1.34k | MCAsmParser &Parser = getParser(); |
9357 | 1.34k | int64_t Tag; |
9358 | 1.34k | SMLoc TagLoc; |
9359 | 1.34k | TagLoc = Parser.getTok().getLoc(); |
9360 | 1.34k | if (Parser.getTok().is(AsmToken::Identifier)) { |
9361 | 825 | StringRef Name = Parser.getTok().getIdentifier(); |
9362 | 825 | Tag = ARMBuildAttrs::AttrTypeFromString(Name); |
9363 | 825 | if (Tag == -1) { |
9364 | | //Error(TagLoc, "attribute name not recognised: " + Name); |
9365 | 680 | Parser.eatToEndOfStatement(); |
9366 | 680 | return false; |
9367 | 680 | } |
9368 | 145 | Parser.Lex(); |
9369 | 516 | } else { |
9370 | 516 | const MCExpr *AttrExpr; |
9371 | | |
9372 | 516 | TagLoc = Parser.getTok().getLoc(); |
9373 | 516 | if (Parser.parseExpression(AttrExpr)) { |
9374 | 88 | Parser.eatToEndOfStatement(); |
9375 | 88 | return false; |
9376 | 88 | } |
9377 | | |
9378 | 428 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr); |
9379 | 428 | if (!CE) { |
9380 | | //Error(TagLoc, "expected numeric constant"); |
9381 | 33 | Parser.eatToEndOfStatement(); |
9382 | 33 | return false; |
9383 | 33 | } |
9384 | | |
9385 | 395 | Tag = CE->getValue(); |
9386 | 395 | } |
9387 | | |
9388 | 540 | if (Parser.getTok().isNot(AsmToken::Comma)) { |
9389 | | //Error(Parser.getTok().getLoc(), "comma expected"); |
9390 | 221 | Parser.eatToEndOfStatement(); |
9391 | 221 | return false; |
9392 | 221 | } |
9393 | 319 | Parser.Lex(); // skip comma |
9394 | | |
9395 | 319 | StringRef StringValue = ""; |
9396 | 319 | bool IsStringValue = false; |
9397 | | |
9398 | 319 | int64_t IntegerValue = 0; |
9399 | 319 | bool IsIntegerValue = false; |
9400 | | |
9401 | 319 | if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name) |
9402 | 27 | IsStringValue = true; |
9403 | 292 | else if (Tag == ARMBuildAttrs::compatibility) { |
9404 | 52 | IsStringValue = true; |
9405 | 52 | IsIntegerValue = true; |
9406 | 240 | } else if (Tag < 32 || Tag % 2 == 0) |
9407 | 217 | IsIntegerValue = true; |
9408 | 23 | else if (Tag % 2 == 1) |
9409 | 23 | IsStringValue = true; |
9410 | 0 | else |
9411 | 0 | llvm_unreachable("invalid tag type"); |
9412 | | |
9413 | 319 | if (IsIntegerValue) { |
9414 | 269 | const MCExpr *ValueExpr; |
9415 | | //SMLoc ValueExprLoc = Parser.getTok().getLoc(); |
9416 | 269 | if (Parser.parseExpression(ValueExpr)) { |
9417 | 74 | Parser.eatToEndOfStatement(); |
9418 | 74 | return false; |
9419 | 74 | } |
9420 | | |
9421 | 195 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr); |
9422 | 195 | if (!CE) { |
9423 | | //Error(ValueExprLoc, "expected numeric constant"); |
9424 | 36 | Parser.eatToEndOfStatement(); |
9425 | 36 | return false; |
9426 | 36 | } |
9427 | | |
9428 | 159 | IntegerValue = CE->getValue(); |
9429 | 159 | } |
9430 | | |
9431 | 209 | if (Tag == ARMBuildAttrs::compatibility) { |
9432 | 52 | if (Parser.getTok().isNot(AsmToken::Comma)) |
9433 | 30 | IsStringValue = false; |
9434 | 52 | if (Parser.getTok().isNot(AsmToken::Comma)) { |
9435 | | //Error(Parser.getTok().getLoc(), "comma expected"); |
9436 | 30 | Parser.eatToEndOfStatement(); |
9437 | 30 | return false; |
9438 | 30 | } else { |
9439 | 22 | Parser.Lex(); |
9440 | 22 | } |
9441 | 52 | } |
9442 | | |
9443 | 179 | if (IsStringValue) { |
9444 | 72 | if (Parser.getTok().isNot(AsmToken::String)) { |
9445 | | //Error(Parser.getTok().getLoc(), "bad string constant"); |
9446 | 72 | Parser.eatToEndOfStatement(); |
9447 | 72 | return false; |
9448 | 72 | } |
9449 | | |
9450 | 0 | bool valid; |
9451 | 0 | StringValue = Parser.getTok().getStringContents(valid); |
9452 | 0 | if (!valid) { |
9453 | | //KsError = KS_ERR_ASM_DIRECTIVE_STR; |
9454 | 0 | return true; |
9455 | 0 | } |
9456 | 0 | Parser.Lex(); |
9457 | 0 | } |
9458 | | |
9459 | 107 | if (IsIntegerValue && IsStringValue) { |
9460 | 0 | assert(Tag == ARMBuildAttrs::compatibility); |
9461 | 0 | getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue); |
9462 | 107 | } else if (IsIntegerValue) |
9463 | 107 | getTargetStreamer().emitAttribute(Tag, IntegerValue); |
9464 | 0 | else if (IsStringValue) |
9465 | 0 | getTargetStreamer().emitTextAttribute(Tag, StringValue); |
9466 | 107 | return false; |
9467 | 107 | } |
9468 | | |
9469 | | /// parseDirectiveCPU |
9470 | | /// ::= .cpu str |
9471 | 7.62k | bool ARMAsmParser::parseDirectiveCPU(SMLoc L) { |
9472 | 7.62k | StringRef CPU = getParser().parseStringToEndOfStatement().trim(); |
9473 | 7.62k | getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU); |
9474 | | |
9475 | | // FIXME: This is using table-gen data, but should be moved to |
9476 | | // ARMTargetParser once that is table-gen'd. |
9477 | 7.62k | if (!getSTI().isCPUStringValid(CPU)) { |
9478 | | //Error(L, "Unknown CPU name"); |
9479 | 5.86k | return false; |
9480 | 5.86k | } |
9481 | | |
9482 | 1.76k | MCSubtargetInfo &STI = copySTI(); |
9483 | 1.76k | STI.setDefaultFeatures(CPU, ""); |
9484 | 1.76k | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); |
9485 | | |
9486 | 1.76k | return false; |
9487 | 7.62k | } |
9488 | | /// parseDirectiveFPU |
9489 | | /// ::= .fpu str |
9490 | | bool ARMAsmParser::parseDirectiveFPU(SMLoc L) |
9491 | 45.8k | { |
9492 | | //SMLoc FPUNameLoc = getTok().getLoc(); |
9493 | 45.8k | StringRef FPU = getParser().parseStringToEndOfStatement().trim(); |
9494 | | |
9495 | 45.8k | unsigned ID = ARM::parseFPU(FPU); |
9496 | 45.8k | std::vector<const char *> Features; |
9497 | 45.8k | if (!ARM::getFPUFeatures(ID, Features)) { |
9498 | | //Error(FPUNameLoc, "Unknown FPU name"); |
9499 | 22.4k | return false; |
9500 | 22.4k | } |
9501 | | |
9502 | 23.3k | MCSubtargetInfo &STI = copySTI(); |
9503 | 23.3k | for (auto Feature : Features) |
9504 | 203k | STI.ApplyFeatureFlag(Feature); |
9505 | 23.3k | setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); |
9506 | | |
9507 | 23.3k | getTargetStreamer().emitFPU(ID); |
9508 | 23.3k | return false; |
9509 | 45.8k | } |
9510 | | |
9511 | | /// parseDirectiveFnStart |
9512 | | /// ::= .fnstart |
9513 | 3.36k | bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) { |
9514 | 3.36k | if (UC.hasFnStart()) { |
9515 | | //Error(L, ".fnstart starts before the end of previous one"); |
9516 | 1.54k | UC.emitFnStartLocNotes(); |
9517 | 1.54k | return false; |
9518 | 1.54k | } |
9519 | | |
9520 | | // Reset the unwind directives parser state |
9521 | 1.81k | UC.reset(); |
9522 | | |
9523 | 1.81k | getTargetStreamer().emitFnStart(); |
9524 | | |
9525 | 1.81k | UC.recordFnStart(L); |
9526 | 1.81k | return false; |
9527 | 3.36k | } |
9528 | | |
9529 | | /// parseDirectiveFnEnd |
9530 | | /// ::= .fnend |
9531 | 1.11k | bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) { |
9532 | | // Check the ordering of unwind directives |
9533 | 1.11k | if (!UC.hasFnStart()) { |
9534 | | //Error(L, ".fnstart must precede .fnend directive"); |
9535 | 152 | return false; |
9536 | 152 | } |
9537 | | |
9538 | | // Reset the unwind directives parser state |
9539 | 965 | getTargetStreamer().emitFnEnd(); |
9540 | | |
9541 | 965 | UC.reset(); |
9542 | 965 | return false; |
9543 | 1.11k | } |
9544 | | |
9545 | | /// parseDirectiveCantUnwind |
9546 | | /// ::= .cantunwind |
9547 | 1.19k | bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) { |
9548 | 1.19k | UC.recordCantUnwind(L); |
9549 | | |
9550 | | // Check the ordering of unwind directives |
9551 | 1.19k | if (!UC.hasFnStart()) { |
9552 | | //Error(L, ".fnstart must precede .cantunwind directive"); |
9553 | 304 | return false; |
9554 | 304 | } |
9555 | 895 | if (UC.hasHandlerData()) { |
9556 | | //Error(L, ".cantunwind can't be used with .handlerdata directive"); |
9557 | 209 | UC.emitHandlerDataLocNotes(); |
9558 | 209 | return false; |
9559 | 209 | } |
9560 | 686 | if (UC.hasPersonality()) { |
9561 | | //Error(L, ".cantunwind can't be used with .personality directive"); |
9562 | 350 | UC.emitPersonalityLocNotes(); |
9563 | 350 | return false; |
9564 | 350 | } |
9565 | | |
9566 | 336 | getTargetStreamer().emitCantUnwind(); |
9567 | 336 | return false; |
9568 | 686 | } |
9569 | | |
9570 | | /// parseDirectivePersonality |
9571 | | /// ::= .personality name |
9572 | 1.56k | bool ARMAsmParser::parseDirectivePersonality(SMLoc L) { |
9573 | 1.56k | MCAsmParser &Parser = getParser(); |
9574 | 1.56k | bool HasExistingPersonality = UC.hasPersonality(); |
9575 | | |
9576 | 1.56k | UC.recordPersonality(L); |
9577 | | |
9578 | | // Check the ordering of unwind directives |
9579 | 1.56k | if (!UC.hasFnStart()) { |
9580 | | //Error(L, ".fnstart must precede .personality directive"); |
9581 | 667 | return false; |
9582 | 667 | } |
9583 | 900 | if (UC.cantUnwind()) { |
9584 | | //Error(L, ".personality can't be used with .cantunwind directive"); |
9585 | 283 | UC.emitCantUnwindLocNotes(); |
9586 | 283 | return false; |
9587 | 283 | } |
9588 | 617 | if (UC.hasHandlerData()) { |
9589 | | //Error(L, ".personality must precede .handlerdata directive"); |
9590 | 62 | UC.emitHandlerDataLocNotes(); |
9591 | 62 | return false; |
9592 | 62 | } |
9593 | 555 | if (HasExistingPersonality) { |
9594 | 452 | Parser.eatToEndOfStatement(); |
9595 | | //Error(L, "multiple personality directives"); |
9596 | 452 | UC.emitPersonalityLocNotes(); |
9597 | 452 | return false; |
9598 | 452 | } |
9599 | | |
9600 | | // Parse the name of the personality routine |
9601 | 103 | if (Parser.getTok().isNot(AsmToken::Identifier)) { |
9602 | 100 | Parser.eatToEndOfStatement(); |
9603 | | //Error(L, "unexpected input in .personality directive."); |
9604 | 100 | return false; |
9605 | 100 | } |
9606 | 3 | StringRef Name(Parser.getTok().getIdentifier()); |
9607 | 3 | Parser.Lex(); |
9608 | | |
9609 | 3 | MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name); |
9610 | 3 | getTargetStreamer().emitPersonality(PR); |
9611 | 3 | return false; |
9612 | 103 | } |
9613 | | |
9614 | | /// parseDirectiveHandlerData |
9615 | | /// ::= .handlerdata |
9616 | 1.71k | bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) { |
9617 | 1.71k | UC.recordHandlerData(L); |
9618 | | |
9619 | | // Check the ordering of unwind directives |
9620 | 1.71k | if (!UC.hasFnStart()) { |
9621 | | //Error(L, ".fnstart must precede .personality directive"); |
9622 | 1.08k | return false; |
9623 | 1.08k | } |
9624 | 635 | if (UC.cantUnwind()) { |
9625 | | //Error(L, ".handlerdata can't be used with .cantunwind directive"); |
9626 | 162 | UC.emitCantUnwindLocNotes(); |
9627 | 162 | return false; |
9628 | 162 | } |
9629 | | |
9630 | 473 | getTargetStreamer().emitHandlerData(); |
9631 | 473 | return false; |
9632 | 635 | } |
9633 | | |
9634 | | /// parseDirectiveSetFP |
9635 | | /// ::= .setfp fpreg, spreg [, offset] |
9636 | 432 | bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) { |
9637 | 432 | MCAsmParser &Parser = getParser(); |
9638 | | // Check the ordering of unwind directives |
9639 | 432 | if (!UC.hasFnStart()) { |
9640 | | //Error(L, ".fnstart must precede .setfp directive"); |
9641 | 134 | return false; |
9642 | 134 | } |
9643 | 298 | if (UC.hasHandlerData()) { |
9644 | | //Error(L, ".setfp must precede .handlerdata directive"); |
9645 | 6 | return false; |
9646 | 6 | } |
9647 | | |
9648 | | // Parse fpreg |
9649 | | //SMLoc FPRegLoc = Parser.getTok().getLoc(); |
9650 | 292 | int FPReg = tryParseRegister(); |
9651 | 292 | if (FPReg == -1) { |
9652 | | //Error(FPRegLoc, "frame pointer register expected"); |
9653 | 280 | return false; |
9654 | 280 | } |
9655 | | |
9656 | | // Consume comma |
9657 | 12 | if (Parser.getTok().isNot(AsmToken::Comma)) { |
9658 | | //Error(Parser.getTok().getLoc(), "comma expected"); |
9659 | 1 | return false; |
9660 | 1 | } |
9661 | 11 | Parser.Lex(); // skip comma |
9662 | | |
9663 | | // Parse spreg |
9664 | | //SMLoc SPRegLoc = Parser.getTok().getLoc(); |
9665 | 11 | int SPReg = tryParseRegister(); |
9666 | 11 | if (SPReg == -1) { |
9667 | | //Error(SPRegLoc, "stack pointer register expected"); |
9668 | 1 | return false; |
9669 | 1 | } |
9670 | | |
9671 | 10 | if (SPReg != ARM::SP && SPReg != UC.getFPReg()) { |
9672 | | //Error(SPRegLoc, "register should be either $sp or the latest fp register"); |
9673 | 10 | return false; |
9674 | 10 | } |
9675 | | |
9676 | | // Update the frame pointer register |
9677 | 0 | UC.saveFPReg(FPReg); |
9678 | | |
9679 | | // Parse offset |
9680 | 0 | int64_t Offset = 0; |
9681 | 0 | if (Parser.getTok().is(AsmToken::Comma)) { |
9682 | 0 | Parser.Lex(); // skip comma |
9683 | |
|
9684 | 0 | if (Parser.getTok().isNot(AsmToken::Hash) && |
9685 | 0 | Parser.getTok().isNot(AsmToken::Dollar)) { |
9686 | | //Error(Parser.getTok().getLoc(), "'#' expected"); |
9687 | 0 | return false; |
9688 | 0 | } |
9689 | 0 | Parser.Lex(); // skip hash token. |
9690 | |
|
9691 | 0 | const MCExpr *OffsetExpr; |
9692 | | //SMLoc ExLoc = Parser.getTok().getLoc(); |
9693 | 0 | SMLoc EndLoc; |
9694 | 0 | if (getParser().parseExpression(OffsetExpr, EndLoc)) { |
9695 | | //Error(ExLoc, "malformed setfp offset"); |
9696 | 0 | return false; |
9697 | 0 | } |
9698 | 0 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); |
9699 | 0 | if (!CE) { |
9700 | | //Error(ExLoc, "setfp offset must be an immediate"); |
9701 | 0 | return false; |
9702 | 0 | } |
9703 | | |
9704 | 0 | Offset = CE->getValue(); |
9705 | 0 | } |
9706 | | |
9707 | 0 | getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg), |
9708 | 0 | static_cast<unsigned>(SPReg), Offset); |
9709 | 0 | return false; |
9710 | 0 | } |
9711 | | |
9712 | | /// parseDirective |
9713 | | /// ::= .pad offset |
9714 | 1.00k | bool ARMAsmParser::parseDirectivePad(SMLoc L) { |
9715 | 1.00k | MCAsmParser &Parser = getParser(); |
9716 | | // Check the ordering of unwind directives |
9717 | 1.00k | if (!UC.hasFnStart()) { |
9718 | | //Error(L, ".fnstart must precede .pad directive"); |
9719 | 151 | return false; |
9720 | 151 | } |
9721 | 852 | if (UC.hasHandlerData()) { |
9722 | | //Error(L, ".pad must precede .handlerdata directive"); |
9723 | 12 | return false; |
9724 | 12 | } |
9725 | | |
9726 | | // Parse the offset |
9727 | 840 | if (Parser.getTok().isNot(AsmToken::Hash) && |
9728 | 840 | Parser.getTok().isNot(AsmToken::Dollar)) { |
9729 | | //Error(Parser.getTok().getLoc(), "'#' expected"); |
9730 | 435 | return false; |
9731 | 435 | } |
9732 | 405 | Parser.Lex(); // skip hash token. |
9733 | | |
9734 | 405 | const MCExpr *OffsetExpr; |
9735 | | //SMLoc ExLoc = Parser.getTok().getLoc(); |
9736 | 405 | SMLoc EndLoc; |
9737 | 405 | if (getParser().parseExpression(OffsetExpr, EndLoc)) { |
9738 | | //Error(ExLoc, "malformed pad offset"); |
9739 | 172 | return false; |
9740 | 172 | } |
9741 | 233 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); |
9742 | 233 | if (!CE) { |
9743 | | //Error(ExLoc, "pad offset must be an immediate"); |
9744 | 224 | return false; |
9745 | 224 | } |
9746 | | |
9747 | 9 | getTargetStreamer().emitPad(CE->getValue()); |
9748 | 9 | return false; |
9749 | 233 | } |
9750 | | |
9751 | | /// parseDirectiveRegSave |
9752 | | /// ::= .save { registers } |
9753 | | /// ::= .vsave { registers } |
9754 | 194 | bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { |
9755 | | // Check the ordering of unwind directives |
9756 | 194 | if (!UC.hasFnStart()) { |
9757 | | //Error(L, ".fnstart must precede .save or .vsave directives"); |
9758 | 191 | return false; |
9759 | 191 | } |
9760 | 3 | if (UC.hasHandlerData()) { |
9761 | | //Error(L, ".save or .vsave must precede .handlerdata directive"); |
9762 | 3 | return false; |
9763 | 3 | } |
9764 | | |
9765 | | // RAII object to make sure parsed operands are deleted. |
9766 | 0 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands; |
9767 | | |
9768 | | // Parse the register list |
9769 | 0 | if (parseRegisterList(Operands)) |
9770 | 0 | return false; |
9771 | 0 | ARMOperand &Op = (ARMOperand &)*Operands[0]; |
9772 | 0 | if (!IsVector && !Op.isRegList()) { |
9773 | | //Error(L, ".save expects GPR registers"); |
9774 | 0 | return false; |
9775 | 0 | } |
9776 | 0 | if (IsVector && !Op.isDPRRegList()) { |
9777 | | //Error(L, ".vsave expects DPR registers"); |
9778 | 0 | return false; |
9779 | 0 | } |
9780 | | |
9781 | 0 | getTargetStreamer().emitRegSave(Op.getRegList(), IsVector); |
9782 | 0 | return false; |
9783 | 0 | } |
9784 | | |
9785 | | /// parseDirectiveInst |
9786 | | /// ::= .inst opcode [, ...] |
9787 | | /// ::= .inst.n opcode [, ...] |
9788 | | /// ::= .inst.w opcode [, ...] |
9789 | 2.54k | bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) { |
9790 | 2.54k | MCAsmParser &Parser = getParser(); |
9791 | 2.54k | int Width; |
9792 | | |
9793 | 2.54k | if (isThumb()) { |
9794 | 899 | switch (Suffix) { |
9795 | 458 | case 'n': |
9796 | 458 | Width = 2; |
9797 | 458 | break; |
9798 | 58 | case 'w': |
9799 | 58 | Width = 4; |
9800 | 58 | break; |
9801 | 383 | default: |
9802 | 383 | Parser.eatToEndOfStatement(); |
9803 | | //Error(Loc, "cannot determine Thumb instruction size, " |
9804 | | // "use inst.n/inst.w instead"); |
9805 | 383 | return false; |
9806 | 899 | } |
9807 | 1.64k | } else { |
9808 | 1.64k | if (Suffix) { |
9809 | 137 | Parser.eatToEndOfStatement(); |
9810 | | //Error(Loc, "width suffixes are invalid in ARM mode"); |
9811 | 137 | return false; |
9812 | 137 | } |
9813 | 1.50k | Width = 4; |
9814 | 1.50k | } |
9815 | | |
9816 | 2.02k | if (getLexer().is(AsmToken::EndOfStatement)) { |
9817 | 123 | Parser.eatToEndOfStatement(); |
9818 | | //Error(Loc, "expected expression following directive"); |
9819 | 123 | return false; |
9820 | 123 | } |
9821 | | |
9822 | 2.06k | for (;;) { |
9823 | 2.06k | const MCExpr *Expr; |
9824 | | |
9825 | 2.06k | if (getParser().parseExpression(Expr)) { |
9826 | | //Error(Loc, "expected expression"); |
9827 | 311 | return false; |
9828 | 311 | } |
9829 | | |
9830 | 1.75k | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); |
9831 | 1.75k | if (!Value) { |
9832 | | //Error(Loc, "expected constant expression"); |
9833 | 1.22k | return false; |
9834 | 1.22k | } |
9835 | | |
9836 | 526 | switch (Width) { |
9837 | 231 | case 2: |
9838 | 231 | if (Value->getValue() > 0xffff) { |
9839 | | //Error(Loc, "inst.n operand is too big, use inst.w instead"); |
9840 | 26 | return false; |
9841 | 26 | } |
9842 | 205 | break; |
9843 | 295 | case 4: |
9844 | 295 | if (Value->getValue() > 0xffffffff) { |
9845 | | //Error(Loc, |
9846 | | // StringRef(Suffix ? "inst.w" : "inst") + " operand is too big"); |
9847 | 3 | return false; |
9848 | 3 | } |
9849 | 292 | break; |
9850 | 292 | default: |
9851 | 0 | llvm_unreachable("only supported widths are 2 and 4"); |
9852 | 526 | } |
9853 | | |
9854 | 497 | getTargetStreamer().emitInst(Value->getValue(), Suffix); |
9855 | | |
9856 | 497 | if (getLexer().is(AsmToken::EndOfStatement)) |
9857 | 154 | break; |
9858 | | |
9859 | 343 | if (getLexer().isNot(AsmToken::Comma)) { |
9860 | | //Error(Loc, "unexpected token in directive"); |
9861 | 181 | return false; |
9862 | 181 | } |
9863 | | |
9864 | 162 | Parser.Lex(); |
9865 | 162 | } |
9866 | | |
9867 | 154 | Parser.Lex(); |
9868 | 154 | return false; |
9869 | 1.90k | } |
9870 | | |
9871 | | /// parseDirectiveLtorg |
9872 | | /// ::= .ltorg | .pool |
9873 | 1.95k | bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) { |
9874 | 1.95k | getTargetStreamer().emitCurrentConstantPool(); |
9875 | 1.95k | return false; |
9876 | 1.95k | } |
9877 | | |
9878 | | bool ARMAsmParser::parseDirectiveEven(SMLoc L) |
9879 | 698 | { |
9880 | 698 | const MCSection *Section = getStreamer().getCurrentSection().first; |
9881 | | |
9882 | 698 | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
9883 | | //TokError("unexpected token in directive"); |
9884 | 424 | return false; |
9885 | 424 | } |
9886 | | |
9887 | 274 | if (!Section) { |
9888 | 0 | getStreamer().InitSections(false); |
9889 | 0 | Section = getStreamer().getCurrentSection().first; |
9890 | 0 | } |
9891 | | |
9892 | 274 | assert(Section && "must have section to emit alignment"); |
9893 | 274 | if (Section->UseCodeAlign()) |
9894 | 203 | getStreamer().EmitCodeAlignment(2); |
9895 | 71 | else |
9896 | 71 | getStreamer().EmitValueToAlignment(2); |
9897 | | |
9898 | 274 | return false; |
9899 | 274 | } |
9900 | | |
9901 | | /// parseDirectivePersonalityIndex |
9902 | | /// ::= .personalityindex index |
9903 | 1.04k | bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) { |
9904 | 1.04k | MCAsmParser &Parser = getParser(); |
9905 | 1.04k | bool HasExistingPersonality = UC.hasPersonality(); |
9906 | | |
9907 | 1.04k | UC.recordPersonalityIndex(L); |
9908 | | |
9909 | 1.04k | if (!UC.hasFnStart()) { |
9910 | 51 | Parser.eatToEndOfStatement(); |
9911 | | //Error(L, ".fnstart must precede .personalityindex directive"); |
9912 | 51 | return false; |
9913 | 51 | } |
9914 | 996 | if (UC.cantUnwind()) { |
9915 | 42 | Parser.eatToEndOfStatement(); |
9916 | | //Error(L, ".personalityindex cannot be used with .cantunwind"); |
9917 | 42 | UC.emitCantUnwindLocNotes(); |
9918 | 42 | return false; |
9919 | 42 | } |
9920 | 954 | if (UC.hasHandlerData()) { |
9921 | 15 | Parser.eatToEndOfStatement(); |
9922 | | //Error(L, ".personalityindex must precede .handlerdata directive"); |
9923 | 15 | UC.emitHandlerDataLocNotes(); |
9924 | 15 | return false; |
9925 | 15 | } |
9926 | 939 | if (HasExistingPersonality) { |
9927 | 789 | Parser.eatToEndOfStatement(); |
9928 | | //Error(L, "multiple personality directives"); |
9929 | 789 | UC.emitPersonalityLocNotes(); |
9930 | 789 | return false; |
9931 | 789 | } |
9932 | | |
9933 | 150 | const MCExpr *IndexExpression; |
9934 | | //SMLoc IndexLoc = Parser.getTok().getLoc(); |
9935 | 150 | if (Parser.parseExpression(IndexExpression)) { |
9936 | 98 | Parser.eatToEndOfStatement(); |
9937 | 98 | return false; |
9938 | 98 | } |
9939 | | |
9940 | 52 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression); |
9941 | 52 | if (!CE) { |
9942 | 3 | Parser.eatToEndOfStatement(); |
9943 | | //Error(IndexLoc, "index must be a constant number"); |
9944 | 3 | return false; |
9945 | 3 | } |
9946 | 49 | if (CE->getValue() < 0 || |
9947 | 49 | CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) { |
9948 | 47 | Parser.eatToEndOfStatement(); |
9949 | | //Error(IndexLoc, "personality routine index should be in range [0-3]"); |
9950 | 47 | return false; |
9951 | 47 | } |
9952 | | |
9953 | 2 | getTargetStreamer().emitPersonalityIndex(CE->getValue()); |
9954 | 2 | return false; |
9955 | 49 | } |
9956 | | |
9957 | | /// parseDirectiveUnwindRaw |
9958 | | /// ::= .unwind_raw offset, opcode [, opcode...] |
9959 | 1.44k | bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) { |
9960 | 1.44k | MCAsmParser &Parser = getParser(); |
9961 | 1.44k | if (!UC.hasFnStart()) { |
9962 | 572 | Parser.eatToEndOfStatement(); |
9963 | | //Error(L, ".fnstart must precede .unwind_raw directives"); |
9964 | 572 | return false; |
9965 | 572 | } |
9966 | | |
9967 | 873 | int64_t StackOffset; |
9968 | | |
9969 | 873 | const MCExpr *OffsetExpr; |
9970 | | //SMLoc OffsetLoc = getLexer().getLoc(); |
9971 | 873 | if (getLexer().is(AsmToken::EndOfStatement) || |
9972 | 873 | getParser().parseExpression(OffsetExpr)) { |
9973 | | //Error(OffsetLoc, "expected expression"); |
9974 | 321 | Parser.eatToEndOfStatement(); |
9975 | 321 | return false; |
9976 | 321 | } |
9977 | | |
9978 | 552 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); |
9979 | 552 | if (!CE) { |
9980 | | //Error(OffsetLoc, "offset must be a constant"); |
9981 | 100 | Parser.eatToEndOfStatement(); |
9982 | 100 | return false; |
9983 | 100 | } |
9984 | | |
9985 | 452 | StackOffset = CE->getValue(); |
9986 | | |
9987 | 452 | if (getLexer().isNot(AsmToken::Comma)) { |
9988 | | //Error(getLexer().getLoc(), "expected comma"); |
9989 | 94 | Parser.eatToEndOfStatement(); |
9990 | 94 | return false; |
9991 | 94 | } |
9992 | 358 | Parser.Lex(); |
9993 | | |
9994 | 358 | SmallVector<uint8_t, 16> Opcodes; |
9995 | 400 | for (;;) { |
9996 | 400 | const MCExpr *OE; |
9997 | | |
9998 | | //SMLoc OpcodeLoc = getLexer().getLoc(); |
9999 | 400 | if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) { |
10000 | | //Error(OpcodeLoc, "expected opcode expression"); |
10001 | 155 | Parser.eatToEndOfStatement(); |
10002 | 155 | return false; |
10003 | 155 | } |
10004 | | |
10005 | 245 | const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE); |
10006 | 245 | if (!OC) { |
10007 | | //Error(OpcodeLoc, "opcode value must be a constant"); |
10008 | 87 | Parser.eatToEndOfStatement(); |
10009 | 87 | return false; |
10010 | 87 | } |
10011 | | |
10012 | 158 | const int64_t Opcode = OC->getValue(); |
10013 | 158 | if (Opcode & ~0xff) { |
10014 | | //Error(OpcodeLoc, "invalid opcode"); |
10015 | 14 | Parser.eatToEndOfStatement(); |
10016 | 14 | return false; |
10017 | 14 | } |
10018 | | |
10019 | 144 | Opcodes.push_back(uint8_t(Opcode)); |
10020 | | |
10021 | 144 | if (getLexer().is(AsmToken::EndOfStatement)) |
10022 | 42 | break; |
10023 | | |
10024 | 102 | if (getLexer().isNot(AsmToken::Comma)) { |
10025 | | //Error(getLexer().getLoc(), "unexpected token in directive"); |
10026 | 60 | Parser.eatToEndOfStatement(); |
10027 | 60 | return false; |
10028 | 60 | } |
10029 | | |
10030 | 42 | Parser.Lex(); |
10031 | 42 | } |
10032 | | |
10033 | 42 | getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes); |
10034 | | |
10035 | 42 | Parser.Lex(); |
10036 | 42 | return false; |
10037 | 358 | } |
10038 | | |
10039 | | /// parseDirectiveTLSDescSeq |
10040 | | /// ::= .tlsdescseq tls-variable |
10041 | | bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) |
10042 | 714 | { |
10043 | 714 | MCAsmParser &Parser = getParser(); |
10044 | | |
10045 | 714 | if (getLexer().isNot(AsmToken::Identifier)) { |
10046 | | //TokError("expected variable after '.tlsdescseq' directive"); |
10047 | 39 | Parser.eatToEndOfStatement(); |
10048 | 39 | return false; |
10049 | 39 | } |
10050 | | |
10051 | 675 | const MCSymbolRefExpr *SRE = |
10052 | 675 | MCSymbolRefExpr::create(Parser.getTok().getIdentifier(), |
10053 | 675 | MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext()); |
10054 | 675 | Lex(); |
10055 | | |
10056 | 675 | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
10057 | | //Error(Parser.getTok().getLoc(), "unexpected token"); |
10058 | 60 | Parser.eatToEndOfStatement(); |
10059 | 60 | return false; |
10060 | 60 | } |
10061 | | |
10062 | 615 | getTargetStreamer().AnnotateTLSDescriptorSequence(SRE); |
10063 | 615 | return false; |
10064 | 675 | } |
10065 | | |
10066 | | /// parseDirectiveMovSP |
10067 | | /// ::= .movsp reg [, #offset] |
10068 | 643 | bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) { |
10069 | 643 | MCAsmParser &Parser = getParser(); |
10070 | 643 | if (!UC.hasFnStart()) { |
10071 | 499 | Parser.eatToEndOfStatement(); |
10072 | | //Error(L, ".fnstart must precede .movsp directives"); |
10073 | 499 | return false; |
10074 | 499 | } |
10075 | 144 | if (UC.getFPReg() != ARM::SP) { |
10076 | 15 | Parser.eatToEndOfStatement(); |
10077 | | //Error(L, "unexpected .movsp directive"); |
10078 | 15 | return false; |
10079 | 15 | } |
10080 | | |
10081 | | //SMLoc SPRegLoc = Parser.getTok().getLoc(); |
10082 | 129 | int SPReg = tryParseRegister(); |
10083 | 129 | if (SPReg == -1) { |
10084 | 79 | Parser.eatToEndOfStatement(); |
10085 | | //Error(SPRegLoc, "register expected"); |
10086 | 79 | return false; |
10087 | 79 | } |
10088 | | |
10089 | 50 | if (SPReg == ARM::SP || SPReg == ARM::PC) { |
10090 | 21 | Parser.eatToEndOfStatement(); |
10091 | | //Error(SPRegLoc, "sp and pc are not permitted in .movsp directive"); |
10092 | 21 | return false; |
10093 | 21 | } |
10094 | | |
10095 | 29 | int64_t Offset = 0; |
10096 | 29 | if (Parser.getTok().is(AsmToken::Comma)) { |
10097 | 24 | Parser.Lex(); |
10098 | | |
10099 | 24 | if (Parser.getTok().isNot(AsmToken::Hash)) { |
10100 | | //Error(Parser.getTok().getLoc(), "expected #constant"); |
10101 | 15 | Parser.eatToEndOfStatement(); |
10102 | 15 | return false; |
10103 | 15 | } |
10104 | 9 | Parser.Lex(); |
10105 | | |
10106 | 9 | const MCExpr *OffsetExpr; |
10107 | | //SMLoc OffsetLoc = Parser.getTok().getLoc(); |
10108 | 9 | if (Parser.parseExpression(OffsetExpr)) { |
10109 | 2 | Parser.eatToEndOfStatement(); |
10110 | | //Error(OffsetLoc, "malformed offset expression"); |
10111 | 2 | return false; |
10112 | 2 | } |
10113 | | |
10114 | 7 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); |
10115 | 7 | if (!CE) { |
10116 | 3 | Parser.eatToEndOfStatement(); |
10117 | | //Error(OffsetLoc, "offset must be an immediate constant"); |
10118 | 3 | return false; |
10119 | 3 | } |
10120 | | |
10121 | 4 | Offset = CE->getValue(); |
10122 | 4 | } |
10123 | | |
10124 | 9 | getTargetStreamer().emitMovSP(SPReg, Offset); |
10125 | 9 | UC.saveFPReg(SPReg); |
10126 | | |
10127 | 9 | return false; |
10128 | 29 | } |
10129 | | |
10130 | | /// parseDirectiveObjectArch |
10131 | | /// ::= .object_arch name |
10132 | 380 | bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) { |
10133 | 380 | MCAsmParser &Parser = getParser(); |
10134 | 380 | if (getLexer().isNot(AsmToken::Identifier)) { |
10135 | | //Error(getLexer().getLoc(), "unexpected token"); |
10136 | 299 | Parser.eatToEndOfStatement(); |
10137 | 299 | return false; |
10138 | 299 | } |
10139 | | |
10140 | 81 | StringRef Arch = Parser.getTok().getString(); |
10141 | | //SMLoc ArchLoc = Parser.getTok().getLoc(); |
10142 | 81 | getLexer().Lex(); |
10143 | | |
10144 | 81 | unsigned ID = ARM::parseArch(Arch); |
10145 | | |
10146 | 81 | if (ID == ARM::AK_INVALID) { |
10147 | | //Error(ArchLoc, "unknown architecture '" + Arch + "'"); |
10148 | 27 | Parser.eatToEndOfStatement(); |
10149 | 27 | return false; |
10150 | 27 | } |
10151 | | |
10152 | 54 | getTargetStreamer().emitObjectArch(ID); |
10153 | | |
10154 | 54 | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
10155 | | //Error(getLexer().getLoc(), "unexpected token"); |
10156 | 15 | Parser.eatToEndOfStatement(); |
10157 | 15 | } |
10158 | | |
10159 | 54 | return false; |
10160 | 81 | } |
10161 | | |
10162 | | /// parseDirectiveAlign |
10163 | | /// ::= .align |
10164 | 47.5k | bool ARMAsmParser::parseDirectiveAlign(SMLoc L) { |
10165 | | // NOTE: if this is not the end of the statement, fall back to the target |
10166 | | // agnostic handling for this directive which will correctly handle this. |
10167 | 47.5k | if (getLexer().isNot(AsmToken::EndOfStatement)) |
10168 | 47.2k | return true; |
10169 | | |
10170 | | // '.align' is target specifically handled to mean 2**2 byte alignment. |
10171 | 364 | if (getStreamer().getCurrentSection().first->UseCodeAlign()) |
10172 | 309 | getStreamer().EmitCodeAlignment(4, 0); |
10173 | 55 | else |
10174 | 55 | getStreamer().EmitValueToAlignment(4, 0, 1, 0); |
10175 | | |
10176 | 364 | return false; |
10177 | 47.5k | } |
10178 | | |
10179 | | /// parseDirectiveThumbSet |
10180 | | /// ::= .thumb_set name, value |
10181 | | bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) |
10182 | 264 | { |
10183 | 264 | MCAsmParser &Parser = getParser(); |
10184 | | |
10185 | 264 | StringRef Name; |
10186 | 264 | if (Parser.parseIdentifier(Name)) { |
10187 | | //TokError("expected identifier after '.thumb_set'"); |
10188 | 77 | Parser.eatToEndOfStatement(); |
10189 | 77 | return false; |
10190 | 77 | } |
10191 | | |
10192 | 187 | if (getLexer().isNot(AsmToken::Comma)) { |
10193 | | //TokError("expected comma after name '" + Name + "'"); |
10194 | 124 | Parser.eatToEndOfStatement(); |
10195 | 124 | return false; |
10196 | 124 | } |
10197 | 63 | Lex(); |
10198 | | |
10199 | 63 | MCSymbol *Sym; |
10200 | 63 | const MCExpr *Value; |
10201 | 63 | if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true, |
10202 | 63 | Parser, Sym, Value)) |
10203 | 21 | return true; |
10204 | | |
10205 | 42 | getTargetStreamer().emitThumbSet(Sym, Value); |
10206 | 42 | return false; |
10207 | 63 | } |
10208 | | |
10209 | | /// Force static initialization. |
10210 | 26 | extern "C" void LLVMInitializeARMAsmParser() { |
10211 | 26 | RegisterMCAsmParser<ARMAsmParser> X(TheARMLETarget); |
10212 | 26 | RegisterMCAsmParser<ARMAsmParser> Y(TheARMBETarget); |
10213 | 26 | RegisterMCAsmParser<ARMAsmParser> A(TheThumbLETarget); |
10214 | 26 | RegisterMCAsmParser<ARMAsmParser> B(TheThumbBETarget); |
10215 | 26 | } |
10216 | | |
10217 | | #define GET_REGISTER_MATCHER |
10218 | | #define GET_SUBTARGET_FEATURE_NAME |
10219 | | #define GET_MATCHER_IMPLEMENTATION |
10220 | | #include "ARMGenAsmMatcher.inc" |
10221 | | |
10222 | | // FIXME: This structure should be moved inside ARMTargetParser |
10223 | | // when we start to table-generate them, and we can use the ARM |
10224 | | // flags below, that were generated by table-gen. |
10225 | | static const struct { |
10226 | | const unsigned Kind; |
10227 | | const uint64_t ArchCheck; |
10228 | | const FeatureBitset Features; |
10229 | | } Extensions[] = { |
10230 | | { ARM::AEK_CRC, Feature_HasV8, {ARM::FeatureCRC} }, |
10231 | | { ARM::AEK_CRYPTO, Feature_HasV8, |
10232 | | {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} }, |
10233 | | { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} }, |
10234 | | { (ARM::AEK_HWDIV | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass, |
10235 | | {ARM::FeatureHWDiv, ARM::FeatureHWDivARM} }, |
10236 | | { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} }, |
10237 | | { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} }, |
10238 | | { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} }, |
10239 | | // FIXME: Only available in A-class, isel not predicated |
10240 | | { ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} }, |
10241 | | { ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} }, |
10242 | | // FIXME: Unsupported extensions. |
10243 | | { ARM::AEK_OS, Feature_None, {} }, |
10244 | | { ARM::AEK_IWMMXT, Feature_None, {} }, |
10245 | | { ARM::AEK_IWMMXT2, Feature_None, {} }, |
10246 | | { ARM::AEK_MAVERICK, Feature_None, {} }, |
10247 | | { ARM::AEK_XSCALE, Feature_None, {} }, |
10248 | | }; |
10249 | | |
10250 | | /// parseDirectiveArchExtension |
10251 | | /// ::= .arch_extension [no]feature |
10252 | | bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) |
10253 | 3.60k | { |
10254 | 3.60k | MCAsmParser &Parser = getParser(); |
10255 | | |
10256 | 3.60k | if (getLexer().isNot(AsmToken::Identifier)) { |
10257 | | //Error(getLexer().getLoc(), "unexpected token"); |
10258 | 180 | Parser.eatToEndOfStatement(); |
10259 | 180 | return false; |
10260 | 180 | } |
10261 | | |
10262 | 3.42k | StringRef Name = Parser.getTok().getString(); |
10263 | | //SMLoc ExtLoc = Parser.getTok().getLoc(); |
10264 | 3.42k | getLexer().Lex(); |
10265 | | |
10266 | 3.42k | bool EnableFeature = true; |
10267 | 3.42k | if (Name.startswith_lower("no")) { |
10268 | 19 | EnableFeature = false; |
10269 | 19 | Name = Name.substr(2); |
10270 | 19 | } |
10271 | 3.42k | unsigned FeatureKind = ARM::parseArchExt(Name); |
10272 | | //if (FeatureKind == ARM::AEK_INVALID) |
10273 | | // Error(ExtLoc, "unknown architectural extension: " + Name); |
10274 | | |
10275 | 24.4k | for (const auto &Extension : Extensions) { |
10276 | 24.4k | if (Extension.Kind != FeatureKind) |
10277 | 21.8k | continue; |
10278 | | |
10279 | 2.61k | if (Extension.Features.none()) |
10280 | 0 | report_fatal_error("unsupported architectural extension: " + Name); |
10281 | | |
10282 | 2.61k | if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) { |
10283 | | //Error(ExtLoc, "architectural extension '" + Name + "' is not " |
10284 | | // "allowed for the current base architecture"); |
10285 | 328 | return false; |
10286 | 328 | } |
10287 | | |
10288 | 2.28k | MCSubtargetInfo &STI = copySTI(); |
10289 | 2.28k | FeatureBitset ToggleFeatures = EnableFeature |
10290 | 2.28k | ? (~STI.getFeatureBits() & Extension.Features) |
10291 | 2.28k | : ( STI.getFeatureBits() & Extension.Features); |
10292 | | |
10293 | 2.28k | uint64_t Features = |
10294 | 2.28k | ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); |
10295 | 2.28k | setAvailableFeatures(Features); |
10296 | 2.28k | return false; |
10297 | 2.61k | } |
10298 | | |
10299 | | //Error(ExtLoc, "unknown architectural extension: " + Name); |
10300 | 818 | Parser.eatToEndOfStatement(); |
10301 | 818 | return false; |
10302 | 3.42k | } |
10303 | | |
10304 | | // Define this matcher function after the auto-generated include so we |
10305 | | // have the match class enum definitions. |
10306 | | unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, |
10307 | | unsigned Kind) |
10308 | 508k | { |
10309 | 508k | ARMOperand &Op = static_cast<ARMOperand &>(AsmOp); |
10310 | | // If the kind is a token for a literal immediate, check if our asm |
10311 | | // operand matches. This is for InstAliases which have a fixed-value |
10312 | | // immediate in the syntax. |
10313 | 508k | switch (Kind) { |
10314 | 480k | default: break; |
10315 | 480k | case MCK__35_0: |
10316 | 205 | if (Op.isImm()) |
10317 | 159 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) |
10318 | 107 | if (CE->getValue() == 0) |
10319 | 39 | return Match_Success; |
10320 | 166 | break; |
10321 | 9.50k | case MCK_ModImm: |
10322 | 9.50k | if (Op.isImm()) { |
10323 | 3.46k | const MCExpr *SOExpr = Op.getImm(); |
10324 | 3.46k | int64_t Value; |
10325 | 3.46k | if (!SOExpr->evaluateAsAbsolute(Value)) |
10326 | 2.08k | return Match_Success; |
10327 | | //assert((Value >= INT32_MIN && Value <= UINT32_MAX) && |
10328 | | // "expression value must be representable in 32 bits"); |
10329 | 1.37k | if (Value < INT32_MIN || Value > UINT32_MAX) |
10330 | 290 | return Match_InvalidOperand; |
10331 | 1.37k | } |
10332 | 7.12k | break; |
10333 | 18.6k | case MCK_rGPR: |
10334 | 18.6k | if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP) |
10335 | 1.17k | return Match_Success; |
10336 | 17.4k | break; |
10337 | 17.4k | case MCK_GPRPair: |
10338 | 6 | if (Op.isReg() && |
10339 | 6 | MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg())) |
10340 | 1 | return Match_Success; |
10341 | 5 | break; |
10342 | 508k | } |
10343 | 504k | return Match_InvalidOperand; |
10344 | 508k | } |