/src/keystone/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file implements the AArch64MCCodeEmitter class. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "MCTargetDesc/AArch64AddressingModes.h" |
15 | | #include "MCTargetDesc/AArch64FixupKinds.h" |
16 | | #include "MCTargetDesc/AArch64MCExpr.h" |
17 | | #include "Utils/AArch64BaseInfo.h" |
18 | | #include "llvm/MC/MCCodeEmitter.h" |
19 | | #include "llvm/MC/MCContext.h" |
20 | | #include "llvm/MC/MCInst.h" |
21 | | #include "llvm/MC/MCInstrInfo.h" |
22 | | #include "llvm/MC/MCRegisterInfo.h" |
23 | | #include "llvm/MC/MCSubtargetInfo.h" |
24 | | #include "llvm/Support/EndianStream.h" |
25 | | #include "llvm/Support/raw_ostream.h" |
26 | | using namespace llvm_ks; |
27 | | |
28 | | #define DEBUG_TYPE "mccodeemitter" |
29 | | |
30 | | |
31 | | namespace { |
32 | | |
33 | | class AArch64MCCodeEmitter : public MCCodeEmitter { |
34 | | MCContext &Ctx; |
35 | | |
36 | | AArch64MCCodeEmitter(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT |
37 | | void operator=(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT |
38 | | public: |
39 | 1.41k | AArch64MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) : Ctx(ctx) {} |
40 | | |
41 | 0 | ~AArch64MCCodeEmitter() override {} |
42 | | |
43 | | // getBinaryCodeForInstr - TableGen'erated function for getting the |
44 | | // binary encoding for an instruction. |
45 | | uint64_t getBinaryCodeForInstr(const MCInst &MI, |
46 | | SmallVectorImpl<MCFixup> &Fixups, |
47 | | const MCSubtargetInfo &STI) const; |
48 | | |
49 | | /// getMachineOpValue - Return binary encoding of operand. If the machine |
50 | | /// operand requires relocation, record the relocation and return zero. |
51 | | unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
52 | | SmallVectorImpl<MCFixup> &Fixups, |
53 | | const MCSubtargetInfo &STI) const; |
54 | | |
55 | | /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate |
56 | | /// attached to a load, store or prfm instruction. If operand requires a |
57 | | /// relocation, record it and return zero in that part of the encoding. |
58 | | template <uint32_t FixupKind> |
59 | | uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, |
60 | | SmallVectorImpl<MCFixup> &Fixups, |
61 | | const MCSubtargetInfo &STI) const; |
62 | | |
63 | | /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label |
64 | | /// target. |
65 | | uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
66 | | SmallVectorImpl<MCFixup> &Fixups, |
67 | | const MCSubtargetInfo &STI) const; |
68 | | |
69 | | /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and |
70 | | /// the 2-bit shift field. |
71 | | uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, |
72 | | SmallVectorImpl<MCFixup> &Fixups, |
73 | | const MCSubtargetInfo &STI) const; |
74 | | |
75 | | /// getCondBranchTargetOpValue - Return the encoded value for a conditional |
76 | | /// branch target. |
77 | | uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
78 | | SmallVectorImpl<MCFixup> &Fixups, |
79 | | const MCSubtargetInfo &STI) const; |
80 | | |
81 | | /// getLoadLiteralOpValue - Return the encoded value for a load-literal |
82 | | /// pc-relative address. |
83 | | uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, |
84 | | SmallVectorImpl<MCFixup> &Fixups, |
85 | | const MCSubtargetInfo &STI) const; |
86 | | |
87 | | /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store |
88 | | /// instruction: bit 0 is whether a shift is present, bit 1 is whether the |
89 | | /// operation is a sign extend (as opposed to a zero extend). |
90 | | uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, |
91 | | SmallVectorImpl<MCFixup> &Fixups, |
92 | | const MCSubtargetInfo &STI) const; |
93 | | |
94 | | /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- |
95 | | /// branch target. |
96 | | uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
97 | | SmallVectorImpl<MCFixup> &Fixups, |
98 | | const MCSubtargetInfo &STI) const; |
99 | | |
100 | | /// getBranchTargetOpValue - Return the encoded value for an unconditional |
101 | | /// branch target. |
102 | | uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
103 | | SmallVectorImpl<MCFixup> &Fixups, |
104 | | const MCSubtargetInfo &STI) const; |
105 | | |
106 | | /// getMoveWideImmOpValue - Return the encoded value for the immediate operand |
107 | | /// of a MOVZ or MOVK instruction. |
108 | | uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, |
109 | | SmallVectorImpl<MCFixup> &Fixups, |
110 | | const MCSubtargetInfo &STI) const; |
111 | | |
112 | | /// getVecShifterOpValue - Return the encoded value for the vector shifter. |
113 | | uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
114 | | SmallVectorImpl<MCFixup> &Fixups, |
115 | | const MCSubtargetInfo &STI) const; |
116 | | |
117 | | /// getMoveVecShifterOpValue - Return the encoded value for the vector move |
118 | | /// shifter (MSL). |
119 | | uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
120 | | SmallVectorImpl<MCFixup> &Fixups, |
121 | | const MCSubtargetInfo &STI) const; |
122 | | |
123 | | /// getFixedPointScaleOpValue - Return the encoded value for the |
124 | | // FP-to-fixed-point scale factor. |
125 | | uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx, |
126 | | SmallVectorImpl<MCFixup> &Fixups, |
127 | | const MCSubtargetInfo &STI) const; |
128 | | |
129 | | uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, |
130 | | SmallVectorImpl<MCFixup> &Fixups, |
131 | | const MCSubtargetInfo &STI) const; |
132 | | uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, |
133 | | SmallVectorImpl<MCFixup> &Fixups, |
134 | | const MCSubtargetInfo &STI) const; |
135 | | uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, |
136 | | SmallVectorImpl<MCFixup> &Fixups, |
137 | | const MCSubtargetInfo &STI) const; |
138 | | uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, |
139 | | SmallVectorImpl<MCFixup> &Fixups, |
140 | | const MCSubtargetInfo &STI) const; |
141 | | uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, |
142 | | SmallVectorImpl<MCFixup> &Fixups, |
143 | | const MCSubtargetInfo &STI) const; |
144 | | uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, |
145 | | SmallVectorImpl<MCFixup> &Fixups, |
146 | | const MCSubtargetInfo &STI) const; |
147 | | uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, |
148 | | SmallVectorImpl<MCFixup> &Fixups, |
149 | | const MCSubtargetInfo &STI) const; |
150 | | uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, |
151 | | SmallVectorImpl<MCFixup> &Fixups, |
152 | | const MCSubtargetInfo &STI) const; |
153 | | |
154 | | unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue, |
155 | | const MCSubtargetInfo &STI) const; |
156 | | |
157 | | void encodeInstruction(MCInst &MI, raw_ostream &OS, |
158 | | SmallVectorImpl<MCFixup> &Fixups, |
159 | | const MCSubtargetInfo &STI, |
160 | | unsigned int &KsError) const override; |
161 | | |
162 | | unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue, |
163 | | const MCSubtargetInfo &STI) const; |
164 | | |
165 | | template<int hasRs, int hasRt2> unsigned |
166 | | fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue, |
167 | | const MCSubtargetInfo &STI) const; |
168 | | |
169 | | unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue, |
170 | | const MCSubtargetInfo &STI) const; |
171 | | }; |
172 | | |
173 | | } // end anonymous namespace |
174 | | |
175 | | MCCodeEmitter *llvm_ks::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, |
176 | | const MCRegisterInfo &MRI, |
177 | 1.41k | MCContext &Ctx) { |
178 | 1.41k | return new AArch64MCCodeEmitter(MCII, Ctx); |
179 | 1.41k | } |
180 | | |
181 | | /// getMachineOpValue - Return binary encoding of operand. If the machine |
182 | | /// operand requires relocation, record the relocation and return zero. |
183 | | unsigned |
184 | | AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, |
185 | | SmallVectorImpl<MCFixup> &Fixups, |
186 | 2.31k | const MCSubtargetInfo &STI) const { |
187 | 2.31k | if (MO.isReg()) |
188 | 333 | return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); |
189 | | |
190 | 1.98k | assert(MO.isImm() && "did not expect relocated expression"); |
191 | 1.98k | return static_cast<unsigned>(MO.getImm()); |
192 | 1.98k | } |
193 | | |
194 | | template<unsigned FixupKind> uint32_t |
195 | | AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, |
196 | | SmallVectorImpl<MCFixup> &Fixups, |
197 | 0 | const MCSubtargetInfo &STI) const { |
198 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
199 | 0 | uint32_t ImmVal = 0; |
200 | |
|
201 | 0 | if (MO.isImm()) |
202 | 0 | ImmVal = static_cast<uint32_t>(MO.getImm()); |
203 | 0 | else { |
204 | 0 | assert(MO.isExpr() && "unable to encode load/store imm operand"); |
205 | 0 | MCFixupKind Kind = MCFixupKind(FixupKind); |
206 | 0 | Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); |
207 | 0 | } |
208 | | |
209 | 0 | return ImmVal; |
210 | 0 | } Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::getLdStUImm12OpValue<135u>(llvm_ks::MCInst const&, unsigned int, llvm_ks::SmallVectorImpl<llvm_ks::MCFixup>&, llvm_ks::MCSubtargetInfo const&) const Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::getLdStUImm12OpValue<131u>(llvm_ks::MCInst const&, unsigned int, llvm_ks::SmallVectorImpl<llvm_ks::MCFixup>&, llvm_ks::MCSubtargetInfo const&) const Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::getLdStUImm12OpValue<132u>(llvm_ks::MCInst const&, unsigned int, llvm_ks::SmallVectorImpl<llvm_ks::MCFixup>&, llvm_ks::MCSubtargetInfo const&) const Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::getLdStUImm12OpValue<133u>(llvm_ks::MCInst const&, unsigned int, llvm_ks::SmallVectorImpl<llvm_ks::MCFixup>&, llvm_ks::MCSubtargetInfo const&) const Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::getLdStUImm12OpValue<134u>(llvm_ks::MCInst const&, unsigned int, llvm_ks::SmallVectorImpl<llvm_ks::MCFixup>&, llvm_ks::MCSubtargetInfo const&) const |
211 | | |
212 | | /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label |
213 | | /// target. |
214 | | uint32_t |
215 | | AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, |
216 | | SmallVectorImpl<MCFixup> &Fixups, |
217 | 0 | const MCSubtargetInfo &STI) const { |
218 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
219 | | |
220 | | // If the destination is an immediate, we have nothing to do. |
221 | 0 | if (MO.isImm()) |
222 | 0 | return MO.getImm() - (MI.getAddress() >> 12); |
223 | 0 | assert(MO.isExpr() && "Unexpected target type!"); |
224 | 0 | const MCExpr *Expr = MO.getExpr(); |
225 | |
|
226 | 0 | MCFixupKind Kind = MI.getOpcode() == AArch64::ADR |
227 | 0 | ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21) |
228 | 0 | : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21); |
229 | 0 | Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); |
230 | | |
231 | | // All of the information is in the fixup. |
232 | 0 | return 0; |
233 | 0 | } |
234 | | |
235 | | /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and |
236 | | /// the 2-bit shift field. The shift field is stored in bits 13-14 of the |
237 | | /// return value. |
238 | | uint32_t |
239 | | AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, |
240 | | SmallVectorImpl<MCFixup> &Fixups, |
241 | 0 | const MCSubtargetInfo &STI) const { |
242 | | // Suboperands are [imm, shifter]. |
243 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
244 | 0 | const MCOperand &MO1 = MI.getOperand(OpIdx + 1); |
245 | 0 | assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && |
246 | 0 | "unexpected shift type for add/sub immediate"); |
247 | 0 | unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm()); |
248 | 0 | assert((ShiftVal == 0 || ShiftVal == 12) && |
249 | 0 | "unexpected shift value for add/sub immediate"); |
250 | 0 | if (MO.isImm()) |
251 | 0 | return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << 12)); |
252 | 0 | assert(MO.isExpr() && "Unable to encode MCOperand!"); |
253 | 0 | const MCExpr *Expr = MO.getExpr(); |
254 | | |
255 | | // Encode the 12 bits of the fixup. |
256 | 0 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12); |
257 | 0 | Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); |
258 | |
|
259 | 0 | return 0; |
260 | 0 | } |
261 | | |
262 | | /// getCondBranchTargetOpValue - Return the encoded value for a conditional |
263 | | /// branch target. |
264 | | uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue( |
265 | | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
266 | 1.69k | const MCSubtargetInfo &STI) const { |
267 | 1.69k | const MCOperand &MO = MI.getOperand(OpIdx); |
268 | | |
269 | | // If the destination is an immediate, we have nothing to do. |
270 | 1.69k | if (MO.isImm()) |
271 | 14 | return (MO.getImm() * 4 - MI.getAddress()) / 4; |
272 | 1.68k | assert(MO.isExpr() && "Unexpected target type!"); |
273 | | |
274 | 1.68k | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19); |
275 | 1.68k | Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); |
276 | | |
277 | | // All of the information is in the fixup. |
278 | 1.68k | return 0; |
279 | 1.68k | } |
280 | | |
281 | | /// getLoadLiteralOpValue - Return the encoded value for a load-literal |
282 | | /// pc-relative address. |
283 | | uint32_t |
284 | | AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, |
285 | | SmallVectorImpl<MCFixup> &Fixups, |
286 | 0 | const MCSubtargetInfo &STI) const { |
287 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
288 | | |
289 | | // If the destination is an immediate, we have nothing to do. |
290 | 0 | if (MO.isImm()) |
291 | 0 | return (MO.getImm() * 4 - MI.getAddress()) / 4; |
292 | 0 | assert(MO.isExpr() && "Unexpected target type!"); |
293 | | |
294 | 0 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19); |
295 | 0 | Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); |
296 | | |
297 | | // All of the information is in the fixup. |
298 | 0 | return 0; |
299 | 0 | } |
300 | | |
301 | | uint32_t |
302 | | AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, |
303 | | SmallVectorImpl<MCFixup> &Fixups, |
304 | 0 | const MCSubtargetInfo &STI) const { |
305 | 0 | unsigned SignExtend = MI.getOperand(OpIdx).getImm(); |
306 | 0 | unsigned DoShift = MI.getOperand(OpIdx + 1).getImm(); |
307 | 0 | return (SignExtend << 1) | DoShift; |
308 | 0 | } |
309 | | |
310 | | uint32_t |
311 | | AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, |
312 | | SmallVectorImpl<MCFixup> &Fixups, |
313 | 0 | const MCSubtargetInfo &STI) const { |
314 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
315 | |
|
316 | 0 | if (MO.isImm()) |
317 | 0 | return MO.getImm(); |
318 | 0 | assert(MO.isExpr() && "Unexpected movz/movk immediate"); |
319 | | |
320 | 0 | Fixups.push_back(MCFixup::create( |
321 | 0 | 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc())); |
322 | |
|
323 | 0 | return 0; |
324 | 0 | } |
325 | | |
326 | | /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- |
327 | | /// branch target. |
328 | | uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue( |
329 | | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
330 | 0 | const MCSubtargetInfo &STI) const { |
331 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
332 | | |
333 | | // If the destination is an immediate, we have nothing to do. |
334 | 0 | if (MO.isImm()) |
335 | 0 | return (MO.getImm() * 4 - MI.getAddress()) / 4; |
336 | 0 | assert(MO.isExpr() && "Unexpected ADR target type!"); |
337 | | |
338 | 0 | MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14); |
339 | 0 | Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); |
340 | | |
341 | | // All of the information is in the fixup. |
342 | 0 | return 0; |
343 | 0 | } |
344 | | |
345 | | /// getBranchTargetOpValue - Return the encoded value for an unconditional |
346 | | /// branch target. |
347 | | uint32_t |
348 | | AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, |
349 | | SmallVectorImpl<MCFixup> &Fixups, |
350 | 493 | const MCSubtargetInfo &STI) const { |
351 | 493 | const MCOperand &MO = MI.getOperand(OpIdx); |
352 | | |
353 | | // If the destination is an immediate, we have nothing to do. |
354 | 493 | if (MO.isImm()) { |
355 | | // encode relative address |
356 | 0 | return (MO.getImm() * 4 - MI.getAddress()) / 4; |
357 | 0 | } |
358 | 493 | assert(MO.isExpr() && "Unexpected ADR target type!"); |
359 | | |
360 | 493 | MCFixupKind Kind = MI.getOpcode() == AArch64::BL |
361 | 493 | ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26) |
362 | 493 | : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26); |
363 | 493 | Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); |
364 | | |
365 | | // All of the information is in the fixup. |
366 | 493 | return 0; |
367 | 493 | } |
368 | | |
369 | | /// getVecShifterOpValue - Return the encoded value for the vector shifter: |
370 | | /// |
371 | | /// 00 -> 0 |
372 | | /// 01 -> 8 |
373 | | /// 10 -> 16 |
374 | | /// 11 -> 24 |
375 | | uint32_t |
376 | | AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, |
377 | | SmallVectorImpl<MCFixup> &Fixups, |
378 | 0 | const MCSubtargetInfo &STI) const { |
379 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
380 | 0 | assert(MO.isImm() && "Expected an immediate value for the shift amount!"); |
381 | | |
382 | 0 | switch (MO.getImm()) { |
383 | 0 | default: |
384 | 0 | break; |
385 | 0 | case 0: |
386 | 0 | return 0; |
387 | 0 | case 8: |
388 | 0 | return 1; |
389 | 0 | case 16: |
390 | 0 | return 2; |
391 | 0 | case 24: |
392 | 0 | return 3; |
393 | 0 | } |
394 | | |
395 | 0 | llvm_unreachable("Invalid value for vector shift amount!"); |
396 | 0 | } |
397 | | |
398 | | /// getFixedPointScaleOpValue - Return the encoded value for the |
399 | | // FP-to-fixed-point scale factor. |
400 | | uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue( |
401 | | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
402 | 0 | const MCSubtargetInfo &STI) const { |
403 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
404 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
405 | 0 | return 64 - MO.getImm(); |
406 | 0 | } |
407 | | |
408 | | uint32_t |
409 | | AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, |
410 | | SmallVectorImpl<MCFixup> &Fixups, |
411 | 0 | const MCSubtargetInfo &STI) const { |
412 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
413 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
414 | 0 | return 64 - MO.getImm(); |
415 | 0 | } |
416 | | |
417 | | uint32_t |
418 | | AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, |
419 | | SmallVectorImpl<MCFixup> &Fixups, |
420 | 0 | const MCSubtargetInfo &STI) const { |
421 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
422 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
423 | 0 | return 32 - MO.getImm(); |
424 | 0 | } |
425 | | |
426 | | uint32_t |
427 | | AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, |
428 | | SmallVectorImpl<MCFixup> &Fixups, |
429 | 0 | const MCSubtargetInfo &STI) const { |
430 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
431 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
432 | 0 | return 16 - MO.getImm(); |
433 | 0 | } |
434 | | |
435 | | uint32_t |
436 | | AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, |
437 | | SmallVectorImpl<MCFixup> &Fixups, |
438 | 0 | const MCSubtargetInfo &STI) const { |
439 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
440 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
441 | 0 | return 8 - MO.getImm(); |
442 | 0 | } |
443 | | |
444 | | uint32_t |
445 | | AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, |
446 | | SmallVectorImpl<MCFixup> &Fixups, |
447 | 0 | const MCSubtargetInfo &STI) const { |
448 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
449 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
450 | 0 | return MO.getImm() - 64; |
451 | 0 | } |
452 | | |
453 | | uint32_t |
454 | | AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, |
455 | | SmallVectorImpl<MCFixup> &Fixups, |
456 | 0 | const MCSubtargetInfo &STI) const { |
457 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
458 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
459 | 0 | return MO.getImm() - 32; |
460 | 0 | } |
461 | | |
462 | | uint32_t |
463 | | AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, |
464 | | SmallVectorImpl<MCFixup> &Fixups, |
465 | 0 | const MCSubtargetInfo &STI) const { |
466 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
467 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
468 | 0 | return MO.getImm() - 16; |
469 | 0 | } |
470 | | |
471 | | uint32_t |
472 | | AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, |
473 | | SmallVectorImpl<MCFixup> &Fixups, |
474 | 0 | const MCSubtargetInfo &STI) const { |
475 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
476 | 0 | assert(MO.isImm() && "Expected an immediate value for the scale amount!"); |
477 | 0 | return MO.getImm() - 8; |
478 | 0 | } |
479 | | |
480 | | /// getMoveVecShifterOpValue - Return the encoded value for the vector move |
481 | | /// shifter (MSL). |
482 | | uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue( |
483 | | const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, |
484 | 0 | const MCSubtargetInfo &STI) const { |
485 | 0 | const MCOperand &MO = MI.getOperand(OpIdx); |
486 | 0 | assert(MO.isImm() && |
487 | 0 | "Expected an immediate value for the move shift amount!"); |
488 | 0 | unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm()); |
489 | 0 | assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!"); |
490 | 0 | return ShiftVal == 8 ? 0 : 1; |
491 | 0 | } |
492 | | |
493 | | unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue, |
494 | 0 | const MCSubtargetInfo &STI) const { |
495 | | // If one of the signed fixup kinds is applied to a MOVZ instruction, the |
496 | | // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's |
497 | | // job to ensure that any bits possibly affected by this are 0. This means we |
498 | | // must zero out bit 30 (essentially emitting a MOVN). |
499 | 0 | MCOperand UImm16MO = MI.getOperand(1); |
500 | | |
501 | | // Nothing to do if there's no fixup. |
502 | 0 | if (UImm16MO.isImm()) |
503 | 0 | return EncodedValue; |
504 | | |
505 | 0 | const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); |
506 | 0 | switch (A64E->getKind()) { |
507 | 0 | case AArch64MCExpr::VK_DTPREL_G2: |
508 | 0 | case AArch64MCExpr::VK_DTPREL_G1: |
509 | 0 | case AArch64MCExpr::VK_DTPREL_G0: |
510 | 0 | case AArch64MCExpr::VK_GOTTPREL_G1: |
511 | 0 | case AArch64MCExpr::VK_TPREL_G2: |
512 | 0 | case AArch64MCExpr::VK_TPREL_G1: |
513 | 0 | case AArch64MCExpr::VK_TPREL_G0: |
514 | 0 | return EncodedValue & ~(1u << 30); |
515 | 0 | default: |
516 | | // Nothing to do for an unsigned fixup. |
517 | 0 | return EncodedValue; |
518 | 0 | } |
519 | | |
520 | | |
521 | 0 | return EncodedValue & ~(1u << 30); |
522 | 0 | } |
523 | | |
524 | | void AArch64MCCodeEmitter::encodeInstruction(MCInst &MI, raw_ostream &OS, |
525 | | SmallVectorImpl<MCFixup> &Fixups, |
526 | | const MCSubtargetInfo &STI, |
527 | | unsigned int &KsError) const |
528 | 2.71k | { |
529 | 2.71k | KsError = 0; |
530 | | #if 0 |
531 | | if (MI.getOpcode() == AArch64::TLSDESCCALL) { |
532 | | // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the |
533 | | // following (BLR) instruction. It doesn't emit any code itself so it |
534 | | // doesn't go through the normal TableGenerated channels. |
535 | | MCFixupKind Fixup = MCFixupKind(AArch64::fixup_aarch64_tlsdesc_call); |
536 | | Fixups.push_back(MCFixup::create(0, MI.getOperand(0).getExpr(), Fixup)); |
537 | | return; |
538 | | } |
539 | | #endif |
540 | | |
541 | 2.71k | uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); |
542 | 2.71k | support::endian::Writer<support::little>(OS).write<uint32_t>(Binary); |
543 | | |
544 | | // Keystone: update Inst.Address to point to the next instruction |
545 | 2.71k | MI.setAddress(MI.getAddress() + 4); |
546 | 2.71k | } |
547 | | |
548 | | unsigned |
549 | | AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, |
550 | | unsigned EncodedValue, |
551 | 0 | const MCSubtargetInfo &STI) const { |
552 | | // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 |
553 | | // (i.e. all bits 1) but is ignored by the processor. |
554 | 0 | EncodedValue |= 0x1f << 10; |
555 | 0 | return EncodedValue; |
556 | 0 | } |
557 | | |
558 | | template<int hasRs, int hasRt2> unsigned |
559 | | AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, |
560 | | unsigned EncodedValue, |
561 | 0 | const MCSubtargetInfo &STI) const { |
562 | 0 | if (!hasRs) EncodedValue |= 0x001F0000; |
563 | 0 | if (!hasRt2) EncodedValue |= 0x00007C00; |
564 | |
|
565 | 0 | return EncodedValue; |
566 | 0 | } Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::fixLoadStoreExclusive<0, 0>(llvm_ks::MCInst const&, unsigned int, llvm_ks::MCSubtargetInfo const&) const Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::fixLoadStoreExclusive<0, 1>(llvm_ks::MCInst const&, unsigned int, llvm_ks::MCSubtargetInfo const&) const Unexecuted instantiation: AArch64MCCodeEmitter.cpp:unsigned int (anonymous namespace)::AArch64MCCodeEmitter::fixLoadStoreExclusive<1, 0>(llvm_ks::MCInst const&, unsigned int, llvm_ks::MCSubtargetInfo const&) const |
567 | | |
568 | | unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison( |
569 | 0 | const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const { |
570 | | // The Rm field of FCMP and friends is unused - it should be assembled |
571 | | // as 0, but is ignored by the processor. |
572 | 0 | EncodedValue &= ~(0x1f << 16); |
573 | 0 | return EncodedValue; |
574 | 0 | } |
575 | | |
576 | | #include "AArch64GenMCCodeEmitter.inc" |