/src/keystone/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | |
10 | | #include "llvm/MC/MCRegisterInfo.h" |
11 | | #include "Utils/AArch64BaseInfo.h" |
12 | | #include "MCTargetDesc/AArch64MCTargetDesc.h" |
13 | | #include "MCTargetDesc/AArch64FixupKinds.h" |
14 | | #include "llvm/ADT/Triple.h" |
15 | | #include "llvm/MC/MCAsmBackend.h" |
16 | | #include "llvm/MC/MCDirectives.h" |
17 | | #include "llvm/MC/MCELFObjectWriter.h" |
18 | | #include "llvm/MC/MCFixupKindInfo.h" |
19 | | #include "llvm/MC/MCObjectWriter.h" |
20 | | #include "llvm/MC/MCSectionELF.h" |
21 | | #include "llvm/MC/MCSectionMachO.h" |
22 | | #include "llvm/MC/MCValue.h" |
23 | | #include "llvm/Support/ErrorHandling.h" |
24 | | #include "llvm/Support/MachO.h" |
25 | | |
26 | | #include <keystone/keystone.h> |
27 | | |
28 | | using namespace llvm_ks; |
29 | | |
30 | | namespace { |
31 | | |
32 | | class AArch64AsmBackend : public MCAsmBackend { |
33 | | static const unsigned PCRelFlagVal = |
34 | | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; |
35 | | public: |
36 | | bool IsLittleEndian; |
37 | | |
38 | | public: |
39 | | AArch64AsmBackend(const Target &T, bool IsLittleEndian) |
40 | 2.03k | : MCAsmBackend(), IsLittleEndian(IsLittleEndian) {} |
41 | | |
42 | 1.87k | unsigned getNumFixupKinds() const override { |
43 | 1.87k | return AArch64::NumTargetFixupKinds; |
44 | 1.87k | } |
45 | | |
46 | 6.68k | const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { |
47 | 6.68k | const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { |
48 | | // This table *must* be in the order that the fixup_* kinds are defined in |
49 | | // AArch64FixupKinds.h. |
50 | | // |
51 | | // Name Offset (bits) Size (bits) Flags |
52 | 6.68k | { "fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal }, |
53 | 6.68k | { "fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal }, |
54 | 6.68k | { "fixup_aarch64_add_imm12", 10, 12, 0 }, |
55 | 6.68k | { "fixup_aarch64_ldst_imm12_scale1", 10, 12, 0 }, |
56 | 6.68k | { "fixup_aarch64_ldst_imm12_scale2", 10, 12, 0 }, |
57 | 6.68k | { "fixup_aarch64_ldst_imm12_scale4", 10, 12, 0 }, |
58 | 6.68k | { "fixup_aarch64_ldst_imm12_scale8", 10, 12, 0 }, |
59 | 6.68k | { "fixup_aarch64_ldst_imm12_scale16", 10, 12, 0 }, |
60 | 6.68k | { "fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal }, |
61 | 6.68k | { "fixup_aarch64_movw", 5, 16, 0 }, |
62 | 6.68k | { "fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal }, |
63 | 6.68k | { "fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal }, |
64 | 6.68k | { "fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal }, |
65 | 6.68k | { "fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal }, |
66 | 6.68k | { "fixup_aarch64_tlsdesc_call", 0, 0, 0 } |
67 | 6.68k | }; |
68 | | |
69 | 6.68k | if (Kind < FirstTargetFixupKind) |
70 | 4.80k | return MCAsmBackend::getFixupKindInfo(Kind); |
71 | | |
72 | 1.87k | assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && |
73 | 1.87k | "Invalid kind!"); |
74 | 0 | return Infos[Kind - FirstTargetFixupKind]; |
75 | 6.68k | } |
76 | | |
77 | | void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, |
78 | | uint64_t Value, bool IsPCRel, unsigned int &KsError) const override; |
79 | | |
80 | | bool mayNeedRelaxation(const MCInst &Inst) const override; |
81 | | bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, |
82 | | const MCRelaxableFragment *DF, |
83 | | const MCAsmLayout &Layout, unsigned &KsError) const override; |
84 | | void relaxInstruction(const MCInst &Inst, MCInst &Res) const override; |
85 | | bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; |
86 | | |
87 | 0 | void HandleAssemblerFlag(MCAssemblerFlag Flag) {} |
88 | | |
89 | 0 | unsigned getPointerSize() const { return 8; } |
90 | | |
91 | | unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const; |
92 | | }; |
93 | | |
94 | | } // end anonymous namespace |
95 | | |
96 | | /// \brief The number of bytes the fixup may change. |
97 | 1.77k | static unsigned getFixupKindNumBytes(unsigned Kind) { |
98 | 1.77k | switch (Kind) { |
99 | 0 | default: |
100 | 0 | llvm_unreachable("Unknown fixup kind!"); |
101 | | |
102 | 0 | case AArch64::fixup_aarch64_tlsdesc_call: |
103 | 0 | return 0; |
104 | | |
105 | 0 | case FK_Data_1: |
106 | 0 | return 1; |
107 | | |
108 | 33 | case FK_Data_2: |
109 | 33 | case AArch64::fixup_aarch64_movw: |
110 | 33 | return 2; |
111 | | |
112 | 0 | case AArch64::fixup_aarch64_pcrel_branch14: |
113 | 0 | case AArch64::fixup_aarch64_add_imm12: |
114 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
115 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
116 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
117 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
118 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
119 | 1 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
120 | 179 | case AArch64::fixup_aarch64_pcrel_branch19: |
121 | 179 | return 3; |
122 | | |
123 | 0 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
124 | 0 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
125 | 108 | case AArch64::fixup_aarch64_pcrel_branch26: |
126 | 402 | case AArch64::fixup_aarch64_pcrel_call26: |
127 | 1.56k | case FK_Data_4: |
128 | 1.56k | return 4; |
129 | | |
130 | 0 | case FK_Data_8: |
131 | 0 | return 8; |
132 | 1.77k | } |
133 | 1.77k | } |
134 | | |
135 | 0 | static unsigned AdrImmBits(unsigned Value) { |
136 | 0 | unsigned lo2 = Value & 0x3; |
137 | 0 | unsigned hi19 = (Value & 0x1ffffc) >> 2; |
138 | 0 | return (hi19 << 5) | (lo2 << 29); |
139 | 0 | } |
140 | | |
141 | 1.17k | static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) { |
142 | 1.17k | int64_t SignedValue = static_cast<int64_t>(Value); |
143 | 1.17k | switch (Kind) { |
144 | 0 | default: |
145 | 0 | llvm_unreachable("Unknown fixup kind!"); |
146 | 0 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
147 | 0 | if (SignedValue > 2097151 || SignedValue < -2097152) |
148 | 0 | report_fatal_error("fixup value out of range"); |
149 | 0 | return AdrImmBits(Value & 0x1fffffULL); |
150 | 0 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
151 | 0 | return AdrImmBits((Value & 0x1fffff000ULL) >> 12); |
152 | 0 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
153 | 18 | case AArch64::fixup_aarch64_pcrel_branch19: |
154 | | // Signed 21-bit immediate |
155 | 18 | if (SignedValue > 2097151 || SignedValue < -2097152) |
156 | 0 | report_fatal_error("fixup value out of range"); |
157 | | // Low two bits are not encoded. |
158 | 18 | return (Value >> 2) & 0x7ffff; |
159 | 0 | case AArch64::fixup_aarch64_add_imm12: |
160 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
161 | | // Unsigned 12-bit immediate |
162 | 0 | if (Value >= 0x1000) |
163 | 0 | report_fatal_error("invalid imm12 fixup value"); |
164 | 0 | return Value; |
165 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
166 | | // Unsigned 12-bit immediate which gets multiplied by 2 |
167 | 0 | if (Value & 1 || Value >= 0x2000) |
168 | 0 | report_fatal_error("invalid imm12 fixup value"); |
169 | 0 | return Value >> 1; |
170 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
171 | | // Unsigned 12-bit immediate which gets multiplied by 4 |
172 | 0 | if (Value & 3 || Value >= 0x4000) |
173 | 0 | report_fatal_error("invalid imm12 fixup value"); |
174 | 0 | return Value >> 2; |
175 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
176 | | // Unsigned 12-bit immediate which gets multiplied by 8 |
177 | 0 | if (Value & 7 || Value >= 0x8000) |
178 | 0 | report_fatal_error("invalid imm12 fixup value"); |
179 | 0 | return Value >> 3; |
180 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
181 | | // Unsigned 12-bit immediate which gets multiplied by 16 |
182 | 0 | if (Value & 15 || Value >= 0x10000) |
183 | 0 | report_fatal_error("invalid imm12 fixup value"); |
184 | 0 | return Value >> 4; |
185 | 0 | case AArch64::fixup_aarch64_movw: |
186 | 0 | report_fatal_error("no resolvable MOVZ/MOVK fixups supported yet"); |
187 | 0 | return Value; |
188 | 0 | case AArch64::fixup_aarch64_pcrel_branch14: |
189 | | // Signed 16-bit immediate |
190 | 0 | if (SignedValue > 32767 || SignedValue < -32768) |
191 | 0 | report_fatal_error("fixup value out of range"); |
192 | | // Low two bits are not encoded (4-byte alignment assumed). |
193 | 0 | if (Value & 0x3) |
194 | 0 | report_fatal_error("fixup not sufficiently aligned"); |
195 | 0 | return (Value >> 2) & 0x3fff; |
196 | 0 | case AArch64::fixup_aarch64_pcrel_branch26: |
197 | 64 | case AArch64::fixup_aarch64_pcrel_call26: |
198 | | // Signed 28-bit immediate |
199 | 64 | if (SignedValue > 134217727 || SignedValue < -134217728) |
200 | 0 | report_fatal_error("fixup value out of range"); |
201 | | // Low two bits are not encoded (4-byte alignment assumed). |
202 | 64 | if (Value & 0x3) |
203 | 0 | report_fatal_error("fixup not sufficiently aligned"); |
204 | 64 | return (Value >> 2) & 0x3ffffff; |
205 | 0 | case FK_Data_1: |
206 | 33 | case FK_Data_2: |
207 | 1.09k | case FK_Data_4: |
208 | 1.09k | case FK_Data_8: |
209 | 1.09k | return Value; |
210 | 1.17k | } |
211 | 1.17k | } |
212 | | |
213 | | /// getFixupKindContainereSizeInBytes - The number of bytes of the |
214 | | /// container involved in big endian or 0 if the item is little endian |
215 | 1.17k | unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const { |
216 | 1.17k | if (IsLittleEndian) |
217 | 1.17k | return 0; |
218 | | |
219 | 0 | switch (Kind) { |
220 | 0 | default: |
221 | 0 | llvm_unreachable("Unknown fixup kind!"); |
222 | | |
223 | 0 | case FK_Data_1: |
224 | 0 | return 1; |
225 | 0 | case FK_Data_2: |
226 | 0 | return 2; |
227 | 0 | case FK_Data_4: |
228 | 0 | return 4; |
229 | 0 | case FK_Data_8: |
230 | 0 | return 8; |
231 | | |
232 | 0 | case AArch64::fixup_aarch64_tlsdesc_call: |
233 | 0 | case AArch64::fixup_aarch64_movw: |
234 | 0 | case AArch64::fixup_aarch64_pcrel_branch14: |
235 | 0 | case AArch64::fixup_aarch64_add_imm12: |
236 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale1: |
237 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale2: |
238 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale4: |
239 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale8: |
240 | 0 | case AArch64::fixup_aarch64_ldst_imm12_scale16: |
241 | 0 | case AArch64::fixup_aarch64_ldr_pcrel_imm19: |
242 | 0 | case AArch64::fixup_aarch64_pcrel_branch19: |
243 | 0 | case AArch64::fixup_aarch64_pcrel_adr_imm21: |
244 | 0 | case AArch64::fixup_aarch64_pcrel_adrp_imm21: |
245 | 0 | case AArch64::fixup_aarch64_pcrel_branch26: |
246 | 0 | case AArch64::fixup_aarch64_pcrel_call26: |
247 | | // Instructions are always little endian |
248 | 0 | return 0; |
249 | 0 | } |
250 | 0 | } |
251 | | |
252 | | void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data, |
253 | | unsigned DataSize, uint64_t Value, |
254 | 1.77k | bool IsPCRel, unsigned int &KsError) const { |
255 | 1.77k | unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); |
256 | 1.77k | if (!Value) |
257 | 604 | return; // Doesn't change encoding. |
258 | 1.17k | MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); |
259 | | // Apply any target-specific value adjustments. |
260 | 1.17k | Value = adjustFixupValue(Fixup.getKind(), Value); |
261 | | |
262 | | // Shift the value into position. |
263 | 1.17k | Value <<= Info.TargetOffset; |
264 | | |
265 | 1.17k | unsigned Offset = Fixup.getOffset(); |
266 | | //assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); |
267 | 1.17k | if (Offset + NumBytes > DataSize) { |
268 | 0 | KsError = KS_ERR_ASM_FIXUP_INVALID; |
269 | 0 | return; |
270 | 0 | } |
271 | | |
272 | | // Used to point to big endian bytes. |
273 | 1.17k | unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind()); |
274 | | |
275 | | // For each byte of the fragment that the fixup touches, mask in the |
276 | | // bits from the fixup value. |
277 | 1.17k | if (FulleSizeInBytes == 0) { |
278 | | // Handle as little-endian |
279 | 5.79k | for (unsigned i = 0; i != NumBytes; ++i) { |
280 | 4.61k | Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); |
281 | 4.61k | } |
282 | 1.17k | } else { |
283 | | // Handle as big-endian |
284 | | //assert((Offset + FulleSizeInBytes) <= DataSize && "Invalid fixup size!"); |
285 | | //assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!"); |
286 | 0 | if ((Offset + FulleSizeInBytes) > DataSize || |
287 | 0 | NumBytes > FulleSizeInBytes) { |
288 | 0 | KsError = KS_ERR_ASM_FIXUP_INVALID; |
289 | 0 | return; |
290 | 0 | } |
291 | 0 | for (unsigned i = 0; i != NumBytes; ++i) { |
292 | 0 | unsigned Idx = FulleSizeInBytes - 1 - i; |
293 | 0 | Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); |
294 | 0 | } |
295 | 0 | } |
296 | 1.17k | } |
297 | | |
298 | 1.97k | bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { |
299 | 1.97k | return false; |
300 | 1.97k | } |
301 | | |
302 | | bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, |
303 | | uint64_t Value, |
304 | | const MCRelaxableFragment *DF, |
305 | 0 | const MCAsmLayout &Layout, unsigned &KsError) const { |
306 | | // FIXME: This isn't correct for AArch64. Just moving the "generic" logic |
307 | | // into the targets for now. |
308 | | // |
309 | | // Relax if the value is too big for a (signed) i8. |
310 | 0 | return int64_t(Value) != int64_t(int8_t(Value)); |
311 | 0 | } |
312 | | |
313 | | void AArch64AsmBackend::relaxInstruction(const MCInst &Inst, |
314 | 0 | MCInst &Res) const { |
315 | 0 | llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented"); |
316 | 0 | } |
317 | | |
318 | 359 | bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { |
319 | | // If the count is not 4-byte aligned, we must be writing data into the text |
320 | | // section (otherwise we have unaligned instructions, and thus have far |
321 | | // bigger problems), so just write zeros instead. |
322 | 359 | OW->WriteZeros(Count % 4); |
323 | | |
324 | | // We are properly aligned, so write NOPs as requested. |
325 | 359 | Count /= 4; |
326 | 484 | for (uint64_t i = 0; i != Count; ++i) |
327 | 125 | OW->write32(0xd503201f); |
328 | 359 | return true; |
329 | 359 | } |
330 | | |
331 | | namespace { |
332 | | |
333 | | namespace CU { |
334 | | |
335 | | /// \brief Compact unwind encoding values. |
336 | | enum CompactUnwindEncodings { |
337 | | /// \brief A "frameless" leaf function, where no non-volatile registers are |
338 | | /// saved. The return remains in LR throughout the function. |
339 | | UNWIND_AArch64_MODE_FRAMELESS = 0x02000000, |
340 | | |
341 | | /// \brief No compact unwind encoding available. Instead the low 23-bits of |
342 | | /// the compact unwind encoding is the offset of the DWARF FDE in the |
343 | | /// __eh_frame section. This mode is never used in object files. It is only |
344 | | /// generated by the linker in final linked images, which have only DWARF info |
345 | | /// for a function. |
346 | | UNWIND_AArch64_MODE_DWARF = 0x03000000, |
347 | | |
348 | | /// \brief This is a standard arm64 prologue where FP/LR are immediately |
349 | | /// pushed on the stack, then SP is copied to FP. If there are any |
350 | | /// non-volatile register saved, they are copied into the stack fame in pairs |
351 | | /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the |
352 | | /// five X pairs and four D pairs can be saved, but the memory layout must be |
353 | | /// in register number order. |
354 | | UNWIND_AArch64_MODE_FRAME = 0x04000000, |
355 | | |
356 | | /// \brief Frame register pair encodings. |
357 | | UNWIND_AArch64_FRAME_X19_X20_PAIR = 0x00000001, |
358 | | UNWIND_AArch64_FRAME_X21_X22_PAIR = 0x00000002, |
359 | | UNWIND_AArch64_FRAME_X23_X24_PAIR = 0x00000004, |
360 | | UNWIND_AArch64_FRAME_X25_X26_PAIR = 0x00000008, |
361 | | UNWIND_AArch64_FRAME_X27_X28_PAIR = 0x00000010, |
362 | | UNWIND_AArch64_FRAME_D8_D9_PAIR = 0x00000100, |
363 | | UNWIND_AArch64_FRAME_D10_D11_PAIR = 0x00000200, |
364 | | UNWIND_AArch64_FRAME_D12_D13_PAIR = 0x00000400, |
365 | | UNWIND_AArch64_FRAME_D14_D15_PAIR = 0x00000800 |
366 | | }; |
367 | | |
368 | | } // end CU namespace |
369 | | |
370 | | } // end anonymous namespace |
371 | | |
372 | | namespace { |
373 | | |
374 | | class ELFAArch64AsmBackend : public AArch64AsmBackend { |
375 | | public: |
376 | | uint8_t OSABI; |
377 | | |
378 | | ELFAArch64AsmBackend(const Target &T, uint8_t OSABI, bool IsLittleEndian) |
379 | 2.03k | : AArch64AsmBackend(T, IsLittleEndian), OSABI(OSABI) {} |
380 | | |
381 | 2.03k | MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { |
382 | 2.03k | return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian); |
383 | 2.03k | } |
384 | | |
385 | | void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout, |
386 | | const MCFixup &Fixup, const MCFragment *DF, |
387 | | const MCValue &Target, uint64_t &Value, |
388 | | bool &IsResolved) override; |
389 | | }; |
390 | | |
391 | | void ELFAArch64AsmBackend::processFixupValue( |
392 | | const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup, |
393 | | const MCFragment *DF, const MCValue &Target, uint64_t &Value, |
394 | 1.77k | bool &IsResolved) { |
395 | | // The ADRP instruction adds some multiple of 0x1000 to the current PC & |
396 | | // ~0xfff. This means that the required offset to reach a symbol can vary by |
397 | | // up to one step depending on where the ADRP is in memory. For example: |
398 | | // |
399 | | // ADRP x0, there |
400 | | // there: |
401 | | // |
402 | | // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and |
403 | | // we'll need that as an offset. At any other address "there" will be in the |
404 | | // same page as the ADRP and the instruction should encode 0x0. Assuming the |
405 | | // section isn't 0x1000-aligned, we therefore need to delegate this decision |
406 | | // to the linker -- a relocation! |
407 | 1.77k | if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21) |
408 | 0 | IsResolved = false; |
409 | 1.77k | } |
410 | | |
411 | | } |
412 | | |
413 | | MCAsmBackend *llvm_ks::createAArch64leAsmBackend(const Target &T, |
414 | | const MCRegisterInfo &MRI, |
415 | | const Triple &TheTriple, |
416 | 2.03k | StringRef CPU) { |
417 | 2.03k | assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target"); |
418 | 0 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
419 | 2.03k | return new ELFAArch64AsmBackend(T, OSABI, /*IsLittleEndian=*/true); |
420 | 2.03k | } |
421 | | |
422 | | MCAsmBackend *llvm_ks::createAArch64beAsmBackend(const Target &T, |
423 | | const MCRegisterInfo &MRI, |
424 | | const Triple &TheTriple, |
425 | 0 | StringRef CPU) { |
426 | 0 | assert(TheTriple.isOSBinFormatELF() && |
427 | 0 | "Big endian is only supported for ELF targets!"); |
428 | 0 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
429 | 0 | return new ELFAArch64AsmBackend(T, OSABI, |
430 | 0 | /*IsLittleEndian=*/false); |
431 | 0 | } |