Coverage Report

Created: 2024-01-17 10:31

/src/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements AArch64 TargetInfo objects.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "AArch64.h"
14
#include "clang/Basic/LangOptions.h"
15
#include "clang/Basic/TargetBuiltins.h"
16
#include "clang/Basic/TargetInfo.h"
17
#include "llvm/ADT/ArrayRef.h"
18
#include "llvm/ADT/StringExtras.h"
19
#include "llvm/ADT/StringSwitch.h"
20
#include "llvm/TargetParser/AArch64TargetParser.h"
21
#include "llvm/TargetParser/ARMTargetParserCommon.h"
22
#include <optional>
23
24
using namespace clang;
25
using namespace clang::targets;
26
27
static constexpr Builtin::Info BuiltinInfo[] = {
28
#define BUILTIN(ID, TYPE, ATTRS)                                               \
29
  {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
31
  {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32
#include "clang/Basic/BuiltinsNEON.def"
33
34
#define BUILTIN(ID, TYPE, ATTRS)                                               \
35
  {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
37
  {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38
#include "clang/Basic/BuiltinsSVE.def"
39
40
#define BUILTIN(ID, TYPE, ATTRS)                                               \
41
  {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
43
  {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44
#include "clang/Basic/BuiltinsSME.def"
45
46
#define BUILTIN(ID, TYPE, ATTRS)                                               \
47
  {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
49
  {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
51
  {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
53
  {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54
#include "clang/Basic/BuiltinsAArch64.def"
55
};
56
57
0
void AArch64TargetInfo::setArchFeatures() {
58
0
  if (*ArchInfo == llvm::AArch64::ARMV8R) {
59
0
    HasDotProd = true;
60
0
    HasDIT = true;
61
0
    HasFlagM = true;
62
0
    HasRCPC = true;
63
0
    FPU |= NeonMode;
64
0
    HasCCPP = true;
65
0
    HasCRC = true;
66
0
    HasLSE = true;
67
0
    HasRDM = true;
68
0
  } else if (ArchInfo->Version.getMajor() == 8) {
69
0
    if (ArchInfo->Version.getMinor() >= 7u) {
70
0
      HasWFxT = true;
71
0
    }
72
0
    if (ArchInfo->Version.getMinor() >= 6u) {
73
0
      HasBFloat16 = true;
74
0
      HasMatMul = true;
75
0
    }
76
0
    if (ArchInfo->Version.getMinor() >= 5u) {
77
0
      HasAlternativeNZCV = true;
78
0
      HasFRInt3264 = true;
79
0
      HasSSBS = true;
80
0
      HasSB = true;
81
0
      HasPredRes = true;
82
0
      HasBTI = true;
83
0
    }
84
0
    if (ArchInfo->Version.getMinor() >= 4u) {
85
0
      HasDotProd = true;
86
0
      HasDIT = true;
87
0
      HasFlagM = true;
88
0
    }
89
0
    if (ArchInfo->Version.getMinor() >= 3u) {
90
0
      HasRCPC = true;
91
0
      FPU |= NeonMode;
92
0
    }
93
0
    if (ArchInfo->Version.getMinor() >= 2u) {
94
0
      HasCCPP = true;
95
0
    }
96
0
    if (ArchInfo->Version.getMinor() >= 1u) {
97
0
      HasCRC = true;
98
0
      HasLSE = true;
99
0
      HasRDM = true;
100
0
    }
101
0
  } else if (ArchInfo->Version.getMajor() == 9) {
102
0
    if (ArchInfo->Version.getMinor() >= 2u) {
103
0
      HasWFxT = true;
104
0
    }
105
0
    if (ArchInfo->Version.getMinor() >= 1u) {
106
0
      HasBFloat16 = true;
107
0
      HasMatMul = true;
108
0
    }
109
0
    FPU |= SveMode;
110
0
    HasSVE2 = true;
111
0
    HasFullFP16 = true;
112
0
    HasAlternativeNZCV = true;
113
0
    HasFRInt3264 = true;
114
0
    HasSSBS = true;
115
0
    HasSB = true;
116
0
    HasPredRes = true;
117
0
    HasBTI = true;
118
0
    HasDotProd = true;
119
0
    HasDIT = true;
120
0
    HasFlagM = true;
121
0
    HasRCPC = true;
122
0
    FPU |= NeonMode;
123
0
    HasCCPP = true;
124
0
    HasCRC = true;
125
0
    HasLSE = true;
126
0
    HasRDM = true;
127
0
  }
128
0
}
129
130
AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131
                                     const TargetOptions &Opts)
132
0
    : TargetInfo(Triple), ABI("aapcs") {
133
0
  if (getTriple().isOSOpenBSD()) {
134
0
    Int64Type = SignedLongLong;
135
0
    IntMaxType = SignedLongLong;
136
0
  } else {
137
0
    if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138
0
      WCharType = UnsignedInt;
139
140
0
    Int64Type = SignedLong;
141
0
    IntMaxType = SignedLong;
142
0
  }
143
144
  // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145
0
  HasLegalHalfType = true;
146
0
  HalfArgsAndReturns = true;
147
0
  HasFloat16 = true;
148
0
  HasStrictFP = true;
149
150
0
  if (Triple.isArch64Bit())
151
0
    LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152
0
  else
153
0
    LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154
155
0
  MaxVectorAlign = 128;
156
0
  MaxAtomicInlineWidth = 128;
157
0
  MaxAtomicPromoteWidth = 128;
158
159
0
  LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160
0
  LongDoubleFormat = &llvm::APFloat::IEEEquad();
161
162
0
  BFloat16Width = BFloat16Align = 16;
163
0
  BFloat16Format = &llvm::APFloat::BFloat();
164
165
  // Make __builtin_ms_va_list available.
166
0
  HasBuiltinMSVaList = true;
167
168
  // Make the SVE types available.  Note that this deliberately doesn't
169
  // depend on SveMode, since in principle it should be possible to turn
170
  // SVE on and off within a translation unit.  It should also be possible
171
  // to compile the global declaration:
172
  //
173
  // __SVInt8_t *ptr;
174
  //
175
  // even without SVE.
176
0
  HasAArch64SVETypes = true;
177
178
  // {} in inline assembly are neon specifiers, not assembly variant
179
  // specifiers.
180
0
  NoAsmVariants = true;
181
182
  // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183
  // contributes to the alignment of the containing aggregate in the same way
184
  // a plain (non bit-field) member of that type would, without exception for
185
  // zero-sized or anonymous bit-fields."
186
0
  assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187
0
  UseZeroLengthBitfieldAlignment = true;
188
189
  // AArch64 targets default to using the ARM C++ ABI.
190
0
  TheCXXABI.set(TargetCXXABI::GenericAArch64);
191
192
0
  if (Triple.getOS() == llvm::Triple::Linux)
193
0
    this->MCountName = "\01_mcount";
194
0
  else if (Triple.getOS() == llvm::Triple::UnknownOS)
195
0
    this->MCountName =
196
0
        Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
197
0
}
198
199
0
StringRef AArch64TargetInfo::getABI() const { return ABI; }
200
201
0
bool AArch64TargetInfo::setABI(const std::string &Name) {
202
0
  if (Name != "aapcs" && Name != "darwinpcs")
203
0
    return false;
204
205
0
  ABI = Name;
206
0
  return true;
207
0
}
208
209
bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210
                                                 BranchProtectionInfo &BPI,
211
0
                                                 StringRef &Err) const {
212
0
  llvm::ARM::ParsedBranchProtection PBP;
213
0
  if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214
0
    return false;
215
216
0
  BPI.SignReturnAddr =
217
0
      llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218
0
          .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219
0
          .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220
0
          .Default(LangOptions::SignReturnAddressScopeKind::None);
221
222
0
  if (PBP.Key == "a_key")
223
0
    BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224
0
  else
225
0
    BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
226
227
0
  BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228
0
  BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
229
0
  BPI.GuardedControlStack = PBP.GuardedControlStack;
230
0
  return true;
231
0
}
232
233
0
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
234
0
  return Name == "generic" || llvm::AArch64::parseCpu(Name);
235
0
}
236
237
0
bool AArch64TargetInfo::setCPU(const std::string &Name) {
238
0
  return isValidCPUName(Name);
239
0
}
240
241
void AArch64TargetInfo::fillValidCPUList(
242
0
    SmallVectorImpl<StringRef> &Values) const {
243
0
  llvm::AArch64::fillValidCPUArchList(Values);
244
0
}
245
246
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
247
0
                                                MacroBuilder &Builder) const {
248
0
  Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
249
0
}
250
251
void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
252
0
                                                MacroBuilder &Builder) const {
253
  // Also include the ARMv8.1 defines
254
0
  getTargetDefinesARMV81A(Opts, Builder);
255
0
}
256
257
void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
258
0
                                                MacroBuilder &Builder) const {
259
0
  Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
260
0
  Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
261
0
  Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
262
  // Also include the Armv8.2 defines
263
0
  getTargetDefinesARMV82A(Opts, Builder);
264
0
}
265
266
void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
267
0
                                                MacroBuilder &Builder) const {
268
  // Also include the Armv8.3 defines
269
0
  getTargetDefinesARMV83A(Opts, Builder);
270
0
}
271
272
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
273
0
                                                MacroBuilder &Builder) const {
274
0
  Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
275
0
  Builder.defineMacro("__ARM_FEATURE_BTI", "1");
276
  // Also include the Armv8.4 defines
277
0
  getTargetDefinesARMV84A(Opts, Builder);
278
0
}
279
280
void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
281
0
                                                MacroBuilder &Builder) const {
282
  // Also include the Armv8.5 defines
283
  // FIXME: Armv8.6 makes the following extensions mandatory:
284
  // - __ARM_FEATURE_BF16
285
  // - __ARM_FEATURE_MATMUL_INT8
286
  // Handle them here.
287
0
  getTargetDefinesARMV85A(Opts, Builder);
288
0
}
289
290
void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
291
0
                                                MacroBuilder &Builder) const {
292
  // Also include the Armv8.6 defines
293
0
  getTargetDefinesARMV86A(Opts, Builder);
294
0
}
295
296
void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
297
0
                                                MacroBuilder &Builder) const {
298
  // Also include the Armv8.7 defines
299
0
  getTargetDefinesARMV87A(Opts, Builder);
300
0
}
301
302
void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
303
0
                                                MacroBuilder &Builder) const {
304
  // Also include the Armv8.8 defines
305
0
  getTargetDefinesARMV88A(Opts, Builder);
306
0
}
307
308
void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
309
0
                                               MacroBuilder &Builder) const {
310
  // Armv9-A maps to Armv8.5-A
311
0
  getTargetDefinesARMV85A(Opts, Builder);
312
0
}
313
314
void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
315
0
                                                MacroBuilder &Builder) const {
316
  // Armv9.1-A maps to Armv8.6-A
317
0
  getTargetDefinesARMV86A(Opts, Builder);
318
0
}
319
320
void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
321
0
                                                MacroBuilder &Builder) const {
322
  // Armv9.2-A maps to Armv8.7-A
323
0
  getTargetDefinesARMV87A(Opts, Builder);
324
0
}
325
326
void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
327
0
                                                MacroBuilder &Builder) const {
328
  // Armv9.3-A maps to Armv8.8-A
329
0
  getTargetDefinesARMV88A(Opts, Builder);
330
0
}
331
332
void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
333
0
                                                MacroBuilder &Builder) const {
334
  // Armv9.4-A maps to Armv8.9-A
335
0
  getTargetDefinesARMV89A(Opts, Builder);
336
0
}
337
338
void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
339
0
                                                MacroBuilder &Builder) const {
340
  // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
341
0
  getTargetDefinesARMV94A(Opts, Builder);
342
0
}
343
344
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
345
0
                                         MacroBuilder &Builder) const {
346
  // Target identification.
347
0
  if (getTriple().isWindowsArm64EC()) {
348
    // Define the same set of macros as would be defined on x86_64 to ensure that
349
    // ARM64EC datatype layouts match those of x86_64 compiled code
350
0
    Builder.defineMacro("__amd64__");
351
0
    Builder.defineMacro("__amd64");
352
0
    Builder.defineMacro("__x86_64");
353
0
    Builder.defineMacro("__x86_64__");
354
0
    Builder.defineMacro("__arm64ec__");
355
0
  } else {
356
0
    Builder.defineMacro("__aarch64__");
357
0
  }
358
359
  // Inline assembly supports AArch64 flag outputs.
360
0
  Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
361
362
0
  std::string CodeModel = getTargetOpts().CodeModel;
363
0
  if (CodeModel == "default")
364
0
    CodeModel = "small";
365
0
  for (char &c : CodeModel)
366
0
    c = toupper(c);
367
0
  Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
368
369
  // ACLE predefines. Many can only have one possible value on v8 AArch64.
370
0
  Builder.defineMacro("__ARM_ACLE", "200");
371
0
  Builder.defineMacro("__ARM_ARCH",
372
0
                      std::to_string(ArchInfo->Version.getMajor()));
373
0
  Builder.defineMacro("__ARM_ARCH_PROFILE",
374
0
                      std::string("'") + (char)ArchInfo->Profile + "'");
375
376
0
  Builder.defineMacro("__ARM_64BIT_STATE", "1");
377
0
  Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
378
0
  Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
379
380
0
  Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
381
0
  Builder.defineMacro("__ARM_FEATURE_FMA", "1");
382
0
  Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
383
0
  Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
384
0
  Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
385
0
  Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
386
0
  Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
387
388
0
  Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
389
390
  // 0xe implies support for half, single and double precision operations.
391
0
  if (FPU & FPUMode)
392
0
    Builder.defineMacro("__ARM_FP", "0xE");
393
394
  // PCS specifies this for SysV variants, which is all we support. Other ABIs
395
  // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
396
0
  Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
397
0
  Builder.defineMacro("__ARM_FP16_ARGS", "1");
398
399
0
  if (Opts.UnsafeFPMath)
400
0
    Builder.defineMacro("__ARM_FP_FAST", "1");
401
402
0
  Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
403
0
                      Twine(Opts.WCharSize ? Opts.WCharSize : 4));
404
405
0
  Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
406
407
0
  if (FPU & NeonMode) {
408
0
    Builder.defineMacro("__ARM_NEON", "1");
409
    // 64-bit NEON supports half, single and double precision operations.
410
0
    Builder.defineMacro("__ARM_NEON_FP", "0xE");
411
0
  }
412
413
0
  if (FPU & SveMode)
414
0
    Builder.defineMacro("__ARM_FEATURE_SVE", "1");
415
416
0
  if ((FPU & NeonMode) && (FPU & SveMode))
417
0
    Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
418
419
0
  if (HasSVE2)
420
0
    Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
421
422
0
  if (HasSVE2 && HasSVE2AES)
423
0
    Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
424
425
0
  if (HasSVE2 && HasSVE2BitPerm)
426
0
    Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
427
428
0
  if (HasSVE2 && HasSVE2SHA3)
429
0
    Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
430
431
0
  if (HasSVE2 && HasSVE2SM4)
432
0
    Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
433
434
0
  if (HasCRC)
435
0
    Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
436
437
0
  if (HasRCPC3)
438
0
    Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
439
0
  else if (HasRCPC)
440
0
    Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
441
442
0
  if (HasFMV)
443
0
    Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
444
445
  // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
446
  // macros for AES, SHA2, SHA3 and SM4
447
0
  if (HasAES && HasSHA2)
448
0
    Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
449
450
0
  if (HasAES)
451
0
    Builder.defineMacro("__ARM_FEATURE_AES", "1");
452
453
0
  if (HasSHA2)
454
0
    Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
455
456
0
  if (HasSHA3) {
457
0
    Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
458
0
    Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
459
0
  }
460
461
0
  if (HasSM4) {
462
0
    Builder.defineMacro("__ARM_FEATURE_SM3", "1");
463
0
    Builder.defineMacro("__ARM_FEATURE_SM4", "1");
464
0
  }
465
466
0
  if (HasPAuth)
467
0
    Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
468
469
0
  if (HasUnaligned)
470
0
    Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
471
472
0
  if ((FPU & NeonMode) && HasFullFP16)
473
0
    Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
474
0
  if (HasFullFP16)
475
0
   Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
476
477
0
  if (HasDotProd)
478
0
    Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
479
480
0
  if (HasMTE)
481
0
    Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
482
483
0
  if (HasTME)
484
0
    Builder.defineMacro("__ARM_FEATURE_TME", "1");
485
486
0
  if (HasMatMul)
487
0
    Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
488
489
0
  if (HasLSE)
490
0
    Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
491
492
0
  if (HasBFloat16) {
493
0
    Builder.defineMacro("__ARM_FEATURE_BF16", "1");
494
0
    Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
495
0
    Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
496
0
    Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
497
0
  }
498
499
0
  if ((FPU & SveMode) && HasBFloat16) {
500
0
    Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
501
0
  }
502
503
0
  if ((FPU & SveMode) && HasMatmulFP64)
504
0
    Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
505
506
0
  if ((FPU & SveMode) && HasMatmulFP32)
507
0
    Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
508
509
0
  if ((FPU & SveMode) && HasMatMul)
510
0
    Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
511
512
0
  if ((FPU & NeonMode) && HasFP16FML)
513
0
    Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
514
515
0
  if (Opts.hasSignReturnAddress()) {
516
    // Bitmask:
517
    // 0: Protection using the A key
518
    // 1: Protection using the B key
519
    // 2: Protection including leaf functions
520
0
    unsigned Value = 0;
521
522
0
    if (Opts.isSignReturnAddressWithAKey())
523
0
      Value |= (1 << 0);
524
0
    else
525
0
      Value |= (1 << 1);
526
527
0
    if (Opts.isSignReturnAddressScopeAll())
528
0
      Value |= (1 << 2);
529
530
0
    Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
531
0
  }
532
533
0
  if (Opts.BranchTargetEnforcement)
534
0
    Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
535
536
0
  if (Opts.GuardedControlStack)
537
0
    Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
538
539
0
  if (HasLS64)
540
0
    Builder.defineMacro("__ARM_FEATURE_LS64", "1");
541
542
0
  if (HasRandGen)
543
0
    Builder.defineMacro("__ARM_FEATURE_RNG", "1");
544
545
0
  if (HasMOPS)
546
0
    Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
547
548
0
  if (HasD128)
549
0
    Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
550
551
0
  if (HasGCS)
552
0
    Builder.defineMacro("__ARM_FEATURE_GCS", "1");
553
554
0
  if (*ArchInfo == llvm::AArch64::ARMV8_1A)
555
0
    getTargetDefinesARMV81A(Opts, Builder);
556
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
557
0
    getTargetDefinesARMV82A(Opts, Builder);
558
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
559
0
    getTargetDefinesARMV83A(Opts, Builder);
560
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
561
0
    getTargetDefinesARMV84A(Opts, Builder);
562
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
563
0
    getTargetDefinesARMV85A(Opts, Builder);
564
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
565
0
    getTargetDefinesARMV86A(Opts, Builder);
566
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
567
0
    getTargetDefinesARMV87A(Opts, Builder);
568
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
569
0
    getTargetDefinesARMV88A(Opts, Builder);
570
0
  else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
571
0
    getTargetDefinesARMV89A(Opts, Builder);
572
0
  else if (*ArchInfo == llvm::AArch64::ARMV9A)
573
0
    getTargetDefinesARMV9A(Opts, Builder);
574
0
  else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
575
0
    getTargetDefinesARMV91A(Opts, Builder);
576
0
  else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
577
0
    getTargetDefinesARMV92A(Opts, Builder);
578
0
  else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
579
0
    getTargetDefinesARMV93A(Opts, Builder);
580
0
  else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
581
0
    getTargetDefinesARMV94A(Opts, Builder);
582
0
  else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
583
0
    getTargetDefinesARMV95A(Opts, Builder);
584
585
  // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
586
0
  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
587
0
  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
588
0
  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
589
0
  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
590
0
  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
591
592
  // Allow detection of fast FMA support.
593
0
  Builder.defineMacro("__FP_FAST_FMA", "1");
594
0
  Builder.defineMacro("__FP_FAST_FMAF", "1");
595
596
  // C/C++ operators work on both VLS and VLA SVE types
597
0
  if (FPU & SveMode)
598
0
    Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
599
600
0
  if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
601
0
    Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
602
0
  }
603
0
}
604
605
0
ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
606
0
  return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
607
0
                                         Builtin::FirstTSBuiltin);
608
0
}
609
610
std::optional<std::pair<unsigned, unsigned>>
611
0
AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
612
0
  if (LangOpts.VScaleMin || LangOpts.VScaleMax)
613
0
    return std::pair<unsigned, unsigned>(
614
0
        LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
615
616
0
  if (hasFeature("sve"))
617
0
    return std::pair<unsigned, unsigned>(1, 16);
618
619
0
  return std::nullopt;
620
0
}
621
622
0
unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
623
0
  if (Name == "default")
624
0
    return 0;
625
0
  for (const auto &E : llvm::AArch64::Extensions)
626
0
    if (Name == E.Name)
627
0
      return E.FmvPriority;
628
0
  return 0;
629
0
}
630
631
0
unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
632
  // Take the maximum priority as per feature cost, so more features win.
633
0
  return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
634
0
}
635
636
0
bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
637
0
  auto F = llvm::find_if(llvm::AArch64::Extensions, [&](const auto &E) {
638
0
    return Name == E.Name && !E.DependentFeatures.empty();
639
0
  });
640
0
  return F != std::end(llvm::AArch64::Extensions);
641
0
}
642
643
0
StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
644
0
  auto F = llvm::find_if(llvm::AArch64::Extensions,
645
0
                         [&](const auto &E) { return Name == E.Name; });
646
0
  return F != std::end(llvm::AArch64::Extensions) ? F->DependentFeatures
647
0
                                                  : StringRef();
648
0
}
649
650
0
bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
651
0
  for (const auto &E : llvm::AArch64::Extensions)
652
0
    if (FeatureStr == E.Name)
653
0
      return true;
654
0
  return false;
655
0
}
656
657
0
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
658
0
  return llvm::StringSwitch<bool>(Feature)
659
0
      .Cases("aarch64", "arm64", "arm", true)
660
0
      .Case("fmv", HasFMV)
661
0
      .Cases("neon", "fp", "simd", FPU & NeonMode)
662
0
      .Case("jscvt", HasJSCVT)
663
0
      .Case("fcma", HasFCMA)
664
0
      .Case("rng", HasRandGen)
665
0
      .Case("flagm", HasFlagM)
666
0
      .Case("flagm2", HasAlternativeNZCV)
667
0
      .Case("fp16fml", HasFP16FML)
668
0
      .Case("dotprod", HasDotProd)
669
0
      .Case("sm4", HasSM4)
670
0
      .Case("rdm", HasRDM)
671
0
      .Case("lse", HasLSE)
672
0
      .Case("crc", HasCRC)
673
0
      .Case("sha2", HasSHA2)
674
0
      .Case("sha3", HasSHA3)
675
0
      .Cases("aes", "pmull", HasAES)
676
0
      .Cases("fp16", "fullfp16", HasFullFP16)
677
0
      .Case("dit", HasDIT)
678
0
      .Case("dpb", HasCCPP)
679
0
      .Case("dpb2", HasCCDP)
680
0
      .Case("rcpc", HasRCPC)
681
0
      .Case("frintts", HasFRInt3264)
682
0
      .Case("i8mm", HasMatMul)
683
0
      .Case("bf16", HasBFloat16)
684
0
      .Case("sve", FPU & SveMode)
685
0
      .Case("sve-bf16", FPU & SveMode && HasBFloat16)
686
0
      .Case("sve-i8mm", FPU & SveMode && HasMatMul)
687
0
      .Case("f32mm", FPU & SveMode && HasMatmulFP32)
688
0
      .Case("f64mm", FPU & SveMode && HasMatmulFP64)
689
0
      .Case("sve2", FPU & SveMode && HasSVE2)
690
0
      .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
691
0
      .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
692
0
      .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
693
0
      .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
694
0
      .Case("sme", HasSME)
695
0
      .Case("sme-f64f64", HasSMEF64F64)
696
0
      .Case("sme-i16i64", HasSMEI16I64)
697
0
      .Case("sme-fa64", HasSMEFA64)
698
0
      .Cases("memtag", "memtag2", HasMTE)
699
0
      .Case("sb", HasSB)
700
0
      .Case("predres", HasPredRes)
701
0
      .Cases("ssbs", "ssbs2", HasSSBS)
702
0
      .Case("bti", HasBTI)
703
0
      .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
704
0
      .Case("wfxt", HasWFxT)
705
0
      .Case("rcpc3", HasRCPC3)
706
0
      .Default(false);
707
0
}
708
709
void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
710
0
                                          StringRef Name, bool Enabled) const {
711
0
  Features[Name] = Enabled;
712
  // If the feature is an architecture feature (like v8.2a), add all previous
713
  // architecture versions and any dependant target features.
714
0
  const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
715
0
      llvm::AArch64::ArchInfo::findBySubArch(Name);
716
717
0
  if (!ArchInfo)
718
0
    return; // Not an architecture, nothing more to do.
719
720
  // Disabling an architecture feature does not affect dependent features
721
0
  if (!Enabled)
722
0
    return;
723
724
0
  for (const auto *OtherArch : llvm::AArch64::ArchInfos)
725
0
    if (ArchInfo->implies(*OtherArch))
726
0
      Features[OtherArch->getSubArch()] = true;
727
728
  // Set any features implied by the architecture
729
0
  std::vector<StringRef> CPUFeats;
730
0
  if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
731
0
    for (auto F : CPUFeats) {
732
0
      assert(F[0] == '+' && "Expected + in target feature!");
733
0
      Features[F.drop_front(1)] = true;
734
0
    }
735
0
  }
736
0
}
737
738
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
739
0
                                             DiagnosticsEngine &Diags) {
740
0
  for (const auto &Feature : Features) {
741
0
    if (Feature == "-fp-armv8")
742
0
      HasNoFP = true;
743
0
    if (Feature == "-neon")
744
0
      HasNoNeon = true;
745
0
    if (Feature == "-sve")
746
0
      HasNoSVE = true;
747
748
0
    if (Feature == "+neon" || Feature == "+fp-armv8")
749
0
      FPU |= NeonMode;
750
0
    if (Feature == "+jscvt") {
751
0
      HasJSCVT = true;
752
0
      FPU |= NeonMode;
753
0
    }
754
0
    if (Feature == "+fcma") {
755
0
      HasFCMA = true;
756
0
      FPU |= NeonMode;
757
0
    }
758
759
0
    if (Feature == "+sve") {
760
0
      FPU |= NeonMode;
761
0
      FPU |= SveMode;
762
0
      HasFullFP16 = true;
763
0
    }
764
0
    if (Feature == "+sve2") {
765
0
      FPU |= NeonMode;
766
0
      FPU |= SveMode;
767
0
      HasFullFP16 = true;
768
0
      HasSVE2 = true;
769
0
    }
770
0
    if (Feature == "+sve2-aes") {
771
0
      FPU |= NeonMode;
772
0
      FPU |= SveMode;
773
0
      HasFullFP16 = true;
774
0
      HasSVE2 = true;
775
0
      HasSVE2AES = true;
776
0
    }
777
0
    if (Feature == "+sve2-sha3") {
778
0
      FPU |= NeonMode;
779
0
      FPU |= SveMode;
780
0
      HasFullFP16 = true;
781
0
      HasSVE2 = true;
782
0
      HasSVE2SHA3 = true;
783
0
    }
784
0
    if (Feature == "+sve2-sm4") {
785
0
      FPU |= NeonMode;
786
0
      FPU |= SveMode;
787
0
      HasFullFP16 = true;
788
0
      HasSVE2 = true;
789
0
      HasSVE2SM4 = true;
790
0
    }
791
0
    if (Feature == "+sve2-bitperm") {
792
0
      FPU |= NeonMode;
793
0
      FPU |= SveMode;
794
0
      HasFullFP16 = true;
795
0
      HasSVE2 = true;
796
0
      HasSVE2BitPerm = true;
797
0
    }
798
0
    if (Feature == "+f32mm") {
799
0
      FPU |= NeonMode;
800
0
      FPU |= SveMode;
801
0
      HasFullFP16 = true;
802
0
      HasMatmulFP32 = true;
803
0
    }
804
0
    if (Feature == "+f64mm") {
805
0
      FPU |= NeonMode;
806
0
      FPU |= SveMode;
807
0
      HasFullFP16 = true;
808
0
      HasMatmulFP64 = true;
809
0
    }
810
0
    if (Feature == "+sme") {
811
0
      HasSME = true;
812
0
      HasBFloat16 = true;
813
0
      HasFullFP16 = true;
814
0
    }
815
0
    if (Feature == "+sme-f64f64") {
816
0
      HasSME = true;
817
0
      HasSMEF64F64 = true;
818
0
      HasBFloat16 = true;
819
0
      HasFullFP16 = true;
820
0
    }
821
0
    if (Feature == "+sme-i16i64") {
822
0
      HasSME = true;
823
0
      HasSMEI16I64 = true;
824
0
      HasBFloat16 = true;
825
0
      HasFullFP16 = true;
826
0
    }
827
0
    if (Feature == "+sme-fa64") {
828
0
      FPU |= NeonMode;
829
0
      FPU |= SveMode;
830
0
      HasSME = true;
831
0
      HasSVE2 = true;
832
0
      HasSMEFA64 = true;
833
0
    }
834
0
    if (Feature == "+sb")
835
0
      HasSB = true;
836
0
    if (Feature == "+predres")
837
0
      HasPredRes = true;
838
0
    if (Feature == "+ssbs")
839
0
      HasSSBS = true;
840
0
    if (Feature == "+bti")
841
0
      HasBTI = true;
842
0
    if (Feature == "+wfxt")
843
0
      HasWFxT = true;
844
0
    if (Feature == "-fmv")
845
0
      HasFMV = false;
846
0
    if (Feature == "+crc")
847
0
      HasCRC = true;
848
0
    if (Feature == "+rcpc")
849
0
      HasRCPC = true;
850
0
    if (Feature == "+aes") {
851
0
      FPU |= NeonMode;
852
0
      HasAES = true;
853
0
    }
854
0
    if (Feature == "+sha2") {
855
0
      FPU |= NeonMode;
856
0
      HasSHA2 = true;
857
0
    }
858
0
    if (Feature == "+sha3") {
859
0
      FPU |= NeonMode;
860
0
      HasSHA2 = true;
861
0
      HasSHA3 = true;
862
0
    }
863
0
    if (Feature == "+rdm") {
864
0
      FPU |= NeonMode;
865
0
      HasRDM = true;
866
0
    }
867
0
    if (Feature == "+dit")
868
0
      HasDIT = true;
869
0
    if (Feature == "+cccp")
870
0
      HasCCPP = true;
871
0
    if (Feature == "+ccdp") {
872
0
      HasCCPP = true;
873
0
      HasCCDP = true;
874
0
    }
875
0
    if (Feature == "+fptoint")
876
0
      HasFRInt3264 = true;
877
0
    if (Feature == "+sm4") {
878
0
      FPU |= NeonMode;
879
0
      HasSM4 = true;
880
0
    }
881
0
    if (Feature == "+strict-align")
882
0
      HasUnaligned = false;
883
    // All predecessor archs are added but select the latest one for ArchKind.
884
0
    if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
885
0
      ArchInfo = &llvm::AArch64::ARMV8A;
886
0
    if (Feature == "+v8.1a" &&
887
0
        ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
888
0
      ArchInfo = &llvm::AArch64::ARMV8_1A;
889
0
    if (Feature == "+v8.2a" &&
890
0
        ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
891
0
      ArchInfo = &llvm::AArch64::ARMV8_2A;
892
0
    if (Feature == "+v8.3a" &&
893
0
        ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
894
0
      ArchInfo = &llvm::AArch64::ARMV8_3A;
895
0
    if (Feature == "+v8.4a" &&
896
0
        ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
897
0
      ArchInfo = &llvm::AArch64::ARMV8_4A;
898
0
    if (Feature == "+v8.5a" &&
899
0
        ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
900
0
      ArchInfo = &llvm::AArch64::ARMV8_5A;
901
0
    if (Feature == "+v8.6a" &&
902
0
        ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
903
0
      ArchInfo = &llvm::AArch64::ARMV8_6A;
904
0
    if (Feature == "+v8.7a" &&
905
0
        ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
906
0
      ArchInfo = &llvm::AArch64::ARMV8_7A;
907
0
    if (Feature == "+v8.8a" &&
908
0
        ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
909
0
      ArchInfo = &llvm::AArch64::ARMV8_8A;
910
0
    if (Feature == "+v8.9a" &&
911
0
        ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
912
0
      ArchInfo = &llvm::AArch64::ARMV8_9A;
913
0
    if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
914
0
      ArchInfo = &llvm::AArch64::ARMV9A;
915
0
    if (Feature == "+v9.1a" &&
916
0
        ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
917
0
      ArchInfo = &llvm::AArch64::ARMV9_1A;
918
0
    if (Feature == "+v9.2a" &&
919
0
        ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
920
0
      ArchInfo = &llvm::AArch64::ARMV9_2A;
921
0
    if (Feature == "+v9.3a" &&
922
0
        ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
923
0
      ArchInfo = &llvm::AArch64::ARMV9_3A;
924
0
    if (Feature == "+v9.4a" &&
925
0
        ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
926
0
      ArchInfo = &llvm::AArch64::ARMV9_4A;
927
0
    if (Feature == "+v9.5a" &&
928
0
        ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
929
0
      ArchInfo = &llvm::AArch64::ARMV9_5A;
930
0
    if (Feature == "+v8r")
931
0
      ArchInfo = &llvm::AArch64::ARMV8R;
932
0
    if (Feature == "+fullfp16") {
933
0
      FPU |= NeonMode;
934
0
      HasFullFP16 = true;
935
0
    }
936
0
    if (Feature == "+dotprod") {
937
0
      FPU |= NeonMode;
938
0
      HasDotProd = true;
939
0
    }
940
0
    if (Feature == "+fp16fml") {
941
0
      FPU |= NeonMode;
942
0
      HasFullFP16 = true;
943
0
      HasFP16FML = true;
944
0
    }
945
0
    if (Feature == "+mte")
946
0
      HasMTE = true;
947
0
    if (Feature == "+tme")
948
0
      HasTME = true;
949
0
    if (Feature == "+pauth")
950
0
      HasPAuth = true;
951
0
    if (Feature == "+i8mm")
952
0
      HasMatMul = true;
953
0
    if (Feature == "+bf16")
954
0
      HasBFloat16 = true;
955
0
    if (Feature == "+lse")
956
0
      HasLSE = true;
957
0
    if (Feature == "+ls64")
958
0
      HasLS64 = true;
959
0
    if (Feature == "+rand")
960
0
      HasRandGen = true;
961
0
    if (Feature == "+flagm")
962
0
      HasFlagM = true;
963
0
    if (Feature == "+altnzcv") {
964
0
      HasFlagM = true;
965
0
      HasAlternativeNZCV = true;
966
0
    }
967
0
    if (Feature == "+mops")
968
0
      HasMOPS = true;
969
0
    if (Feature == "+d128")
970
0
      HasD128 = true;
971
0
    if (Feature == "+gcs")
972
0
      HasGCS = true;
973
0
    if (Feature == "+rcpc3")
974
0
      HasRCPC3 = true;
975
0
  }
976
977
  // Check features that are manually disabled by command line options.
978
  // This needs to be checked after architecture-related features are handled,
979
  // making sure they are properly disabled when required.
980
0
  for (const auto &Feature : Features) {
981
0
    if (Feature == "-d128")
982
0
      HasD128 = false;
983
0
  }
984
985
0
  setDataLayout();
986
0
  setArchFeatures();
987
988
0
  if (HasNoFP) {
989
0
    FPU &= ~FPUMode;
990
0
    FPU &= ~NeonMode;
991
0
    FPU &= ~SveMode;
992
0
  }
993
0
  if (HasNoNeon) {
994
0
    FPU &= ~NeonMode;
995
0
    FPU &= ~SveMode;
996
0
  }
997
0
  if (HasNoSVE)
998
0
    FPU &= ~SveMode;
999
1000
0
  return true;
1001
0
}
1002
1003
bool AArch64TargetInfo::initFeatureMap(
1004
    llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
1005
0
    const std::vector<std::string> &FeaturesVec) const {
1006
0
  std::vector<std::string> UpdatedFeaturesVec;
1007
  // Parse the CPU and add any implied features.
1008
0
  std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
1009
0
  if (CpuInfo) {
1010
0
    auto Exts = CpuInfo->getImpliedExtensions();
1011
0
    std::vector<StringRef> CPUFeats;
1012
0
    llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
1013
0
    for (auto F : CPUFeats) {
1014
0
      assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
1015
0
      UpdatedFeaturesVec.push_back(F.str());
1016
0
    }
1017
0
  }
1018
1019
  // Process target and dependent features. This is done in two loops collecting
1020
  // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
1021
  // add target '+/-'features that can later disable some of features added on
1022
  // the first loop. Function Multi Versioning features begin with '?'.
1023
0
  for (const auto &Feature : FeaturesVec)
1024
0
    if (((Feature[0] == '?' || Feature[0] == '+')) &&
1025
0
        AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
1026
0
      StringRef DepFeatures =
1027
0
          AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
1028
0
      SmallVector<StringRef, 1> AttrFeatures;
1029
0
      DepFeatures.split(AttrFeatures, ",");
1030
0
      for (auto F : AttrFeatures)
1031
0
        UpdatedFeaturesVec.push_back(F.str());
1032
0
    }
1033
0
  for (const auto &Feature : FeaturesVec)
1034
0
    if (Feature[0] != '?') {
1035
0
      std::string UpdatedFeature = Feature;
1036
0
      if (Feature[0] == '+') {
1037
0
        std::optional<llvm::AArch64::ExtensionInfo> Extension =
1038
0
          llvm::AArch64::parseArchExtension(Feature.substr(1));
1039
0
        if (Extension)
1040
0
          UpdatedFeature = Extension->Feature.str();
1041
0
      }
1042
0
      UpdatedFeaturesVec.push_back(UpdatedFeature);
1043
0
    }
1044
1045
0
  return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1046
0
}
1047
1048
// Parse AArch64 Target attributes, which are a comma separated list of:
1049
//  "arch=<arch>" - parsed to features as per -march=..
1050
//  "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1051
//  "tune=<cpu>" - TuneCPU set to <cpu>
1052
//  "feature", "no-feature" - Add (or remove) feature.
1053
//  "+feature", "+nofeature" - Add (or remove) feature.
1054
0
ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1055
0
  ParsedTargetAttr Ret;
1056
0
  if (Features == "default")
1057
0
    return Ret;
1058
0
  SmallVector<StringRef, 1> AttrFeatures;
1059
0
  Features.split(AttrFeatures, ",");
1060
0
  bool FoundArch = false;
1061
1062
0
  auto SplitAndAddFeatures = [](StringRef FeatString,
1063
0
                                std::vector<std::string> &Features) {
1064
0
    SmallVector<StringRef, 8> SplitFeatures;
1065
0
    FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1066
0
    for (StringRef Feature : SplitFeatures) {
1067
0
      StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1068
0
      if (!FeatureName.empty())
1069
0
        Features.push_back(FeatureName.str());
1070
0
      else
1071
        // Pushing the original feature string to give a sema error later on
1072
        // when they get checked.
1073
0
        if (Feature.starts_with("no"))
1074
0
          Features.push_back("-" + Feature.drop_front(2).str());
1075
0
        else
1076
0
          Features.push_back("+" + Feature.str());
1077
0
    }
1078
0
  };
1079
1080
0
  for (auto &Feature : AttrFeatures) {
1081
0
    Feature = Feature.trim();
1082
0
    if (Feature.starts_with("fpmath="))
1083
0
      continue;
1084
1085
0
    if (Feature.starts_with("branch-protection=")) {
1086
0
      Ret.BranchProtection = Feature.split('=').second.trim();
1087
0
      continue;
1088
0
    }
1089
1090
0
    if (Feature.starts_with("arch=")) {
1091
0
      if (FoundArch)
1092
0
        Ret.Duplicate = "arch=";
1093
0
      FoundArch = true;
1094
0
      std::pair<StringRef, StringRef> Split =
1095
0
          Feature.split("=").second.trim().split("+");
1096
0
      const std::optional<llvm::AArch64::ArchInfo> AI =
1097
0
          llvm::AArch64::parseArch(Split.first);
1098
1099
      // Parse the architecture version, adding the required features to
1100
      // Ret.Features.
1101
0
      if (!AI)
1102
0
        continue;
1103
0
      Ret.Features.push_back(AI->ArchFeature.str());
1104
      // Add any extra features, after the +
1105
0
      SplitAndAddFeatures(Split.second, Ret.Features);
1106
0
    } else if (Feature.starts_with("cpu=")) {
1107
0
      if (!Ret.CPU.empty())
1108
0
        Ret.Duplicate = "cpu=";
1109
0
      else {
1110
        // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1111
        // "+feat" features.
1112
0
        std::pair<StringRef, StringRef> Split =
1113
0
            Feature.split("=").second.trim().split("+");
1114
0
        Ret.CPU = Split.first;
1115
0
        SplitAndAddFeatures(Split.second, Ret.Features);
1116
0
      }
1117
0
    } else if (Feature.starts_with("tune=")) {
1118
0
      if (!Ret.Tune.empty())
1119
0
        Ret.Duplicate = "tune=";
1120
0
      else
1121
0
        Ret.Tune = Feature.split("=").second.trim();
1122
0
    } else if (Feature.starts_with("+")) {
1123
0
      SplitAndAddFeatures(Feature, Ret.Features);
1124
0
    } else if (Feature.starts_with("no-")) {
1125
0
      StringRef FeatureName =
1126
0
          llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1127
0
      if (!FeatureName.empty())
1128
0
        Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1129
0
      else
1130
0
        Ret.Features.push_back("-" + Feature.split("-").second.str());
1131
0
    } else {
1132
      // Try parsing the string to the internal target feature name. If it is
1133
      // invalid, add the original string (which could already be an internal
1134
      // name). These should be checked later by isValidFeatureName.
1135
0
      StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1136
0
      if (!FeatureName.empty())
1137
0
        Ret.Features.push_back(FeatureName.str());
1138
0
      else
1139
0
        Ret.Features.push_back("+" + Feature.str());
1140
0
    }
1141
0
  }
1142
0
  return Ret;
1143
0
}
1144
1145
0
bool AArch64TargetInfo::hasBFloat16Type() const {
1146
0
  return true;
1147
0
}
1148
1149
TargetInfo::CallingConvCheckResult
1150
0
AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1151
0
  switch (CC) {
1152
0
  case CC_C:
1153
0
  case CC_Swift:
1154
0
  case CC_SwiftAsync:
1155
0
  case CC_PreserveMost:
1156
0
  case CC_PreserveAll:
1157
0
  case CC_OpenCLKernel:
1158
0
  case CC_AArch64VectorCall:
1159
0
  case CC_AArch64SVEPCS:
1160
0
  case CC_Win64:
1161
0
    return CCCR_OK;
1162
0
  default:
1163
0
    return CCCR_Warning;
1164
0
  }
1165
0
}
1166
1167
0
bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1168
1169
0
TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1170
0
  return TargetInfo::AArch64ABIBuiltinVaList;
1171
0
}
1172
1173
const char *const AArch64TargetInfo::GCCRegNames[] = {
1174
    // 32-bit Integer registers
1175
    "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1176
    "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1177
    "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1178
1179
    // 64-bit Integer registers
1180
    "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1181
    "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1182
    "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1183
1184
    // 32-bit floating point regsisters
1185
    "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1186
    "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1187
    "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1188
1189
    // 64-bit floating point regsisters
1190
    "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1191
    "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1192
    "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1193
1194
    // Neon vector registers
1195
    "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1196
    "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1197
    "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1198
1199
    // SVE vector registers
1200
    "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
1201
    "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1202
    "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1203
1204
    // SVE predicate registers
1205
    "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
1206
    "p11", "p12", "p13", "p14", "p15",
1207
1208
    // SVE predicate-as-counter registers
1209
    "pn0",  "pn1",  "pn2",  "pn3",  "pn4",  "pn5",  "pn6",  "pn7",  "pn8",
1210
    "pn9",  "pn10", "pn11", "pn12", "pn13", "pn14", "pn15"
1211
};
1212
1213
0
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1214
0
  return llvm::ArrayRef(GCCRegNames);
1215
0
}
1216
1217
const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1218
    {{"w31"}, "wsp"},
1219
    {{"x31"}, "sp"},
1220
    // GCC rN registers are aliases of xN registers.
1221
    {{"r0"}, "x0"},
1222
    {{"r1"}, "x1"},
1223
    {{"r2"}, "x2"},
1224
    {{"r3"}, "x3"},
1225
    {{"r4"}, "x4"},
1226
    {{"r5"}, "x5"},
1227
    {{"r6"}, "x6"},
1228
    {{"r7"}, "x7"},
1229
    {{"r8"}, "x8"},
1230
    {{"r9"}, "x9"},
1231
    {{"r10"}, "x10"},
1232
    {{"r11"}, "x11"},
1233
    {{"r12"}, "x12"},
1234
    {{"r13"}, "x13"},
1235
    {{"r14"}, "x14"},
1236
    {{"r15"}, "x15"},
1237
    {{"r16"}, "x16"},
1238
    {{"r17"}, "x17"},
1239
    {{"r18"}, "x18"},
1240
    {{"r19"}, "x19"},
1241
    {{"r20"}, "x20"},
1242
    {{"r21"}, "x21"},
1243
    {{"r22"}, "x22"},
1244
    {{"r23"}, "x23"},
1245
    {{"r24"}, "x24"},
1246
    {{"r25"}, "x25"},
1247
    {{"r26"}, "x26"},
1248
    {{"r27"}, "x27"},
1249
    {{"r28"}, "x28"},
1250
    {{"r29", "x29"}, "fp"},
1251
    {{"r30", "x30"}, "lr"},
1252
    // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1253
    // don't want to substitute one of these for a different-sized one.
1254
};
1255
1256
0
ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1257
0
  return llvm::ArrayRef(GCCRegAliases);
1258
0
}
1259
1260
// Returns the length of cc constraint.
1261
0
static unsigned matchAsmCCConstraint(const char *Name) {
1262
0
  constexpr unsigned len = 5;
1263
0
  auto RV = llvm::StringSwitch<unsigned>(Name)
1264
0
                .Case("@cceq", len)
1265
0
                .Case("@ccne", len)
1266
0
                .Case("@cchs", len)
1267
0
                .Case("@cccs", len)
1268
0
                .Case("@cccc", len)
1269
0
                .Case("@cclo", len)
1270
0
                .Case("@ccmi", len)
1271
0
                .Case("@ccpl", len)
1272
0
                .Case("@ccvs", len)
1273
0
                .Case("@ccvc", len)
1274
0
                .Case("@cchi", len)
1275
0
                .Case("@ccls", len)
1276
0
                .Case("@ccge", len)
1277
0
                .Case("@cclt", len)
1278
0
                .Case("@ccgt", len)
1279
0
                .Case("@ccle", len)
1280
0
                .Default(0);
1281
0
  return RV;
1282
0
}
1283
1284
std::string
1285
0
AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1286
0
  std::string R;
1287
0
  switch (*Constraint) {
1288
0
  case 'U': // Three-character constraint; add "@3" hint for later parsing.
1289
0
    R = std::string("@3") + std::string(Constraint, 3);
1290
0
    Constraint += 2;
1291
0
    break;
1292
0
  case '@':
1293
0
    if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1294
0
      std::string Converted = "{" + std::string(Constraint, Len) + "}";
1295
0
      Constraint += Len - 1;
1296
0
      return Converted;
1297
0
    }
1298
0
    return std::string(1, *Constraint);
1299
0
  default:
1300
0
    R = TargetInfo::convertConstraint(Constraint);
1301
0
    break;
1302
0
  }
1303
0
  return R;
1304
0
}
1305
1306
bool AArch64TargetInfo::validateAsmConstraint(
1307
0
    const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1308
0
  switch (*Name) {
1309
0
  default:
1310
0
    return false;
1311
0
  case 'w': // Floating point and SIMD registers (V0-V31)
1312
0
    Info.setAllowsRegister();
1313
0
    return true;
1314
0
  case 'I': // Constant that can be used with an ADD instruction
1315
0
  case 'J': // Constant that can be used with a SUB instruction
1316
0
  case 'K': // Constant that can be used with a 32-bit logical instruction
1317
0
  case 'L': // Constant that can be used with a 64-bit logical instruction
1318
0
  case 'M': // Constant that can be used as a 32-bit MOV immediate
1319
0
  case 'N': // Constant that can be used as a 64-bit MOV immediate
1320
0
  case 'Y': // Floating point constant zero
1321
0
  case 'Z': // Integer constant zero
1322
0
    return true;
1323
0
  case 'Q': // A memory reference with base register and no offset
1324
0
    Info.setAllowsMemory();
1325
0
    return true;
1326
0
  case 'S': // A symbolic address
1327
0
    Info.setAllowsRegister();
1328
0
    return true;
1329
0
  case 'U':
1330
0
    if (Name[1] == 'p' &&
1331
0
        (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1332
      // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1333
0
      Info.setAllowsRegister();
1334
0
      Name += 2;
1335
0
      return true;
1336
0
    }
1337
0
    if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1338
      // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1339
0
      Info.setAllowsRegister();
1340
0
      Name += 2;
1341
0
      return true;
1342
0
    }
1343
    // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1344
    // Utf: A memory address suitable for ldp/stp in TF mode.
1345
    // Usa: An absolute symbolic address.
1346
    // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1347
1348
    // Better to return an error saying that it's an unrecognised constraint
1349
    // even if this is a valid constraint in gcc.
1350
0
    return false;
1351
0
  case 'z': // Zero register, wzr or xzr
1352
0
    Info.setAllowsRegister();
1353
0
    return true;
1354
0
  case 'x': // Floating point and SIMD registers (V0-V15)
1355
0
    Info.setAllowsRegister();
1356
0
    return true;
1357
0
  case 'y': // SVE registers (V0-V7)
1358
0
    Info.setAllowsRegister();
1359
0
    return true;
1360
0
  case '@':
1361
    // CC condition
1362
0
    if (const unsigned Len = matchAsmCCConstraint(Name)) {
1363
0
      Name += Len - 1;
1364
0
      Info.setAllowsRegister();
1365
0
      return true;
1366
0
    }
1367
0
  }
1368
0
  return false;
1369
0
}
1370
1371
bool AArch64TargetInfo::validateConstraintModifier(
1372
    StringRef Constraint, char Modifier, unsigned Size,
1373
0
    std::string &SuggestedModifier) const {
1374
  // Strip off constraint modifiers.
1375
0
  Constraint = Constraint.ltrim("=+&");
1376
1377
0
  switch (Constraint[0]) {
1378
0
  default:
1379
0
    return true;
1380
0
  case 'z':
1381
0
  case 'r': {
1382
0
    switch (Modifier) {
1383
0
    case 'x':
1384
0
    case 'w':
1385
      // For now assume that the person knows what they're
1386
      // doing with the modifier.
1387
0
      return true;
1388
0
    default:
1389
      // By default an 'r' constraint will be in the 'x'
1390
      // registers.
1391
0
      if (Size == 64)
1392
0
        return true;
1393
1394
0
      if (Size == 512)
1395
0
        return HasLS64;
1396
1397
0
      SuggestedModifier = "w";
1398
0
      return false;
1399
0
    }
1400
0
  }
1401
0
  }
1402
0
}
1403
1404
0
std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1405
1406
0
int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1407
0
  if (RegNo == 0)
1408
0
    return 0;
1409
0
  if (RegNo == 1)
1410
0
    return 1;
1411
0
  return -1;
1412
0
}
1413
1414
0
bool AArch64TargetInfo::hasInt128Type() const { return true; }
1415
1416
AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1417
                                         const TargetOptions &Opts)
1418
0
    : AArch64TargetInfo(Triple, Opts) {}
1419
1420
0
void AArch64leTargetInfo::setDataLayout() {
1421
0
  if (getTriple().isOSBinFormatMachO()) {
1422
0
    if(getTriple().isArch32Bit())
1423
0
      resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1424
0
    else
1425
0
      resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1426
0
  } else
1427
0
    resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1428
0
}
1429
1430
void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1431
0
                                           MacroBuilder &Builder) const {
1432
0
  Builder.defineMacro("__AARCH64EL__");
1433
0
  AArch64TargetInfo::getTargetDefines(Opts, Builder);
1434
0
}
1435
1436
AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1437
                                         const TargetOptions &Opts)
1438
0
    : AArch64TargetInfo(Triple, Opts) {}
1439
1440
void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1441
0
                                           MacroBuilder &Builder) const {
1442
0
  Builder.defineMacro("__AARCH64EB__");
1443
0
  Builder.defineMacro("__AARCH_BIG_ENDIAN");
1444
0
  Builder.defineMacro("__ARM_BIG_ENDIAN");
1445
0
  AArch64TargetInfo::getTargetDefines(Opts, Builder);
1446
0
}
1447
1448
0
void AArch64beTargetInfo::setDataLayout() {
1449
0
  assert(!getTriple().isOSBinFormatMachO());
1450
0
  resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1451
0
}
1452
1453
WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1454
                                               const TargetOptions &Opts)
1455
0
    : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1456
1457
  // This is an LLP64 platform.
1458
  // int:4, long:4, long long:8, long double:8.
1459
0
  IntWidth = IntAlign = 32;
1460
0
  LongWidth = LongAlign = 32;
1461
0
  DoubleAlign = LongLongAlign = 64;
1462
0
  LongDoubleWidth = LongDoubleAlign = 64;
1463
0
  LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1464
0
  IntMaxType = SignedLongLong;
1465
0
  Int64Type = SignedLongLong;
1466
0
  SizeType = UnsignedLongLong;
1467
0
  PtrDiffType = SignedLongLong;
1468
0
  IntPtrType = SignedLongLong;
1469
0
}
1470
1471
0
void WindowsARM64TargetInfo::setDataLayout() {
1472
0
  resetDataLayout(Triple.isOSBinFormatMachO()
1473
0
                      ? "e-m:o-i64:64-i128:128-n32:64-S128"
1474
0
                      : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1475
0
                  Triple.isOSBinFormatMachO() ? "_" : "");
1476
0
}
1477
1478
TargetInfo::BuiltinVaListKind
1479
0
WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1480
0
  return TargetInfo::CharPtrBuiltinVaList;
1481
0
}
1482
1483
TargetInfo::CallingConvCheckResult
1484
0
WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1485
0
  switch (CC) {
1486
0
  case CC_X86StdCall:
1487
0
  case CC_X86ThisCall:
1488
0
  case CC_X86FastCall:
1489
0
  case CC_X86VectorCall:
1490
0
    return CCCR_Ignore;
1491
0
  case CC_C:
1492
0
  case CC_OpenCLKernel:
1493
0
  case CC_PreserveMost:
1494
0
  case CC_PreserveAll:
1495
0
  case CC_Swift:
1496
0
  case CC_SwiftAsync:
1497
0
  case CC_Win64:
1498
0
    return CCCR_OK;
1499
0
  default:
1500
0
    return CCCR_Warning;
1501
0
  }
1502
0
}
1503
1504
MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1505
                                                   const TargetOptions &Opts)
1506
0
    : WindowsARM64TargetInfo(Triple, Opts) {
1507
0
  TheCXXABI.set(TargetCXXABI::Microsoft);
1508
0
}
1509
1510
void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1511
0
                                                MacroBuilder &Builder) const {
1512
0
  WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1513
0
  if (getTriple().isWindowsArm64EC()) {
1514
0
    Builder.defineMacro("_M_X64", "100");
1515
0
    Builder.defineMacro("_M_AMD64", "100");
1516
0
    Builder.defineMacro("_M_ARM64EC", "1");
1517
0
  } else {
1518
0
    Builder.defineMacro("_M_ARM64", "1");
1519
0
  }
1520
0
}
1521
1522
TargetInfo::CallingConvKind
1523
0
MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1524
0
  return CCK_MicrosoftWin64;
1525
0
}
1526
1527
0
unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1528
0
  unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1529
1530
  // MSVC does size based alignment for arm64 based on alignment section in
1531
  // below document, replicate that to keep alignment consistent with object
1532
  // files compiled by MSVC.
1533
  // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1534
0
  if (TypeSize >= 512) {              // TypeSize >= 64 bytes
1535
0
    Align = std::max(Align, 128u);    // align type at least 16 bytes
1536
0
  } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
1537
0
    Align = std::max(Align, 64u);     // align type at least 8 butes
1538
0
  } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
1539
0
    Align = std::max(Align, 32u);     // align type at least 4 bytes
1540
0
  }
1541
0
  return Align;
1542
0
}
1543
1544
MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1545
                                           const TargetOptions &Opts)
1546
0
    : WindowsARM64TargetInfo(Triple, Opts) {
1547
0
  TheCXXABI.set(TargetCXXABI::GenericAArch64);
1548
0
}
1549
1550
DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1551
                                                 const TargetOptions &Opts)
1552
0
    : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1553
0
  Int64Type = SignedLongLong;
1554
0
  if (getTriple().isArch32Bit())
1555
0
    IntMaxType = SignedLongLong;
1556
1557
0
  WCharType = SignedInt;
1558
0
  UseSignedCharForObjCBool = false;
1559
1560
0
  LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1561
0
  LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1562
1563
0
  UseZeroLengthBitfieldAlignment = false;
1564
1565
0
  if (getTriple().isArch32Bit()) {
1566
0
    UseBitFieldTypeAlignment = false;
1567
0
    ZeroLengthBitfieldBoundary = 32;
1568
0
    UseZeroLengthBitfieldAlignment = true;
1569
0
    TheCXXABI.set(TargetCXXABI::WatchOS);
1570
0
  } else
1571
0
    TheCXXABI.set(TargetCXXABI::AppleARM64);
1572
0
}
1573
1574
void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1575
                                           const llvm::Triple &Triple,
1576
0
                                           MacroBuilder &Builder) const {
1577
0
  Builder.defineMacro("__AARCH64_SIMD__");
1578
0
  if (Triple.isArch32Bit())
1579
0
    Builder.defineMacro("__ARM64_ARCH_8_32__");
1580
0
  else
1581
0
    Builder.defineMacro("__ARM64_ARCH_8__");
1582
0
  Builder.defineMacro("__ARM_NEON__");
1583
0
  Builder.defineMacro("__REGISTER_PREFIX__", "");
1584
0
  Builder.defineMacro("__arm64", "1");
1585
0
  Builder.defineMacro("__arm64__", "1");
1586
1587
0
  if (Triple.isArm64e())
1588
0
    Builder.defineMacro("__arm64e__", "1");
1589
1590
0
  getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1591
0
}
1592
1593
TargetInfo::BuiltinVaListKind
1594
0
DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1595
0
  return TargetInfo::CharPtrBuiltinVaList;
1596
0
}
1597
1598
// 64-bit RenderScript is aarch64
1599
RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1600
                                                   const TargetOptions &Opts)
1601
    : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1602
                                       Triple.getOSName(),
1603
                                       Triple.getEnvironmentName()),
1604
0
                          Opts) {
1605
0
  IsRenderScriptTarget = true;
1606
0
}
1607
1608
void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1609
0
                                                MacroBuilder &Builder) const {
1610
0
  Builder.defineMacro("__RENDERSCRIPT__");
1611
0
  AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1612
0
}