Coverage Report

Created: 2024-01-17 10:31

/src/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- X86.cpp ------------------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "ABIInfoImpl.h"
10
#include "TargetInfo.h"
11
#include "clang/Basic/DiagnosticFrontend.h"
12
#include "llvm/ADT/SmallBitVector.h"
13
14
using namespace clang;
15
using namespace clang::CodeGen;
16
17
namespace {
18
19
/// IsX86_MMXType - Return true if this is an MMX type.
20
0
bool IsX86_MMXType(llvm::Type *IRType) {
21
  // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
22
0
  return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
23
0
    cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
24
0
    IRType->getScalarSizeInBits() != 64;
25
0
}
26
27
static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
28
                                          StringRef Constraint,
29
0
                                          llvm::Type* Ty) {
30
0
  bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
31
0
                     .Cases("y", "&y", "^Ym", true)
32
0
                     .Default(false);
33
0
  if (IsMMXCons && Ty->isVectorTy()) {
34
0
    if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
35
0
        64) {
36
      // Invalid MMX constraint
37
0
      return nullptr;
38
0
    }
39
40
0
    return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
41
0
  }
42
43
0
  if (Constraint == "k") {
44
0
    llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGF.getLLVMContext());
45
0
    return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
46
0
  }
47
48
  // No operation needed
49
0
  return Ty;
50
0
}
51
52
/// Returns true if this type can be passed in SSE registers with the
53
/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
54
0
static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
55
0
  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
56
0
    if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
57
0
      if (BT->getKind() == BuiltinType::LongDouble) {
58
0
        if (&Context.getTargetInfo().getLongDoubleFormat() ==
59
0
            &llvm::APFloat::x87DoubleExtended())
60
0
          return false;
61
0
      }
62
0
      return true;
63
0
    }
64
0
  } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
65
    // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
66
    // registers specially.
67
0
    unsigned VecSize = Context.getTypeSize(VT);
68
0
    if (VecSize == 128 || VecSize == 256 || VecSize == 512)
69
0
      return true;
70
0
  }
71
0
  return false;
72
0
}
73
74
/// Returns true if this aggregate is small enough to be passed in SSE registers
75
/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
76
0
static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
77
0
  return NumMembers <= 4;
78
0
}
79
80
/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
81
0
static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
82
0
  auto AI = ABIArgInfo::getDirect(T);
83
0
  AI.setInReg(true);
84
0
  AI.setCanBeFlattened(false);
85
0
  return AI;
86
0
}
87
88
//===----------------------------------------------------------------------===//
89
// X86-32 ABI Implementation
90
//===----------------------------------------------------------------------===//
91
92
/// Similar to llvm::CCState, but for Clang.
93
struct CCState {
94
  CCState(CGFunctionInfo &FI)
95
      : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
96
0
  Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {}
97
98
  llvm::SmallBitVector IsPreassigned;
99
  unsigned CC = CallingConv::CC_C;
100
  unsigned FreeRegs = 0;
101
  unsigned FreeSSERegs = 0;
102
  RequiredArgs Required;
103
  bool IsDelegateCall = false;
104
};
105
106
/// X86_32ABIInfo - The X86-32 ABI information.
107
class X86_32ABIInfo : public ABIInfo {
108
  enum Class {
109
    Integer,
110
    Float
111
  };
112
113
  static const unsigned MinABIStackAlignInBytes = 4;
114
115
  bool IsDarwinVectorABI;
116
  bool IsRetSmallStructInRegABI;
117
  bool IsWin32StructABI;
118
  bool IsSoftFloatABI;
119
  bool IsMCUABI;
120
  bool IsLinuxABI;
121
  unsigned DefaultNumRegisterParameters;
122
123
0
  static bool isRegisterSize(unsigned Size) {
124
0
    return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
125
0
  }
126
127
0
  bool isHomogeneousAggregateBaseType(QualType Ty) const override {
128
    // FIXME: Assumes vectorcall is in use.
129
0
    return isX86VectorTypeForVectorCall(getContext(), Ty);
130
0
  }
131
132
  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
133
0
                                         uint64_t NumMembers) const override {
134
    // FIXME: Assumes vectorcall is in use.
135
0
    return isX86VectorCallAggregateSmallEnough(NumMembers);
136
0
  }
137
138
  bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
139
140
  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
141
  /// such that the argument will be passed in memory.
142
  ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
143
144
  ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
145
146
  /// Return the alignment to use for the given type on the stack.
147
  unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
148
149
  Class classify(QualType Ty) const;
150
  ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
151
  ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State,
152
                                  unsigned ArgIndex) const;
153
154
  /// Updates the number of available free registers, returns
155
  /// true if any registers were allocated.
156
  bool updateFreeRegs(QualType Ty, CCState &State) const;
157
158
  bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
159
                                bool &NeedsPadding) const;
160
  bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
161
162
  bool canExpandIndirectArgument(QualType Ty) const;
163
164
  /// Rewrite the function info so that all memory arguments use
165
  /// inalloca.
166
  void rewriteWithInAlloca(CGFunctionInfo &FI) const;
167
168
  void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
169
                           CharUnits &StackOffset, ABIArgInfo &Info,
170
                           QualType Type) const;
171
  void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
172
173
public:
174
175
  void computeInfo(CGFunctionInfo &FI) const override;
176
  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
177
                    QualType Ty) const override;
178
179
  X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
180
                bool RetSmallStructInRegABI, bool Win32StructABI,
181
                unsigned NumRegisterParameters, bool SoftFloatABI)
182
      : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
183
        IsRetSmallStructInRegABI(RetSmallStructInRegABI),
184
        IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
185
        IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
186
        IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
187
                   CGT.getTarget().getTriple().isOSCygMing()),
188
0
        DefaultNumRegisterParameters(NumRegisterParameters) {}
189
};
190
191
class X86_32SwiftABIInfo : public SwiftABIInfo {
192
public:
193
  explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
194
0
      : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/false) {}
195
196
  bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
197
0
                            bool AsReturnValue) const override {
198
    // LLVM's x86-32 lowering currently only assigns up to three
199
    // integer registers and three fp registers.  Oddly, it'll use up to
200
    // four vector registers for vectors, but those can overlap with the
201
    // scalar registers.
202
0
    return occupiesMoreThan(ComponentTys, /*total=*/3);
203
0
  }
204
};
205
206
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
207
public:
208
  X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
209
                          bool RetSmallStructInRegABI, bool Win32StructABI,
210
                          unsigned NumRegisterParameters, bool SoftFloatABI)
211
      : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
212
            CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
213
0
            NumRegisterParameters, SoftFloatABI)) {
214
0
    SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
215
0
  }
216
217
  static bool isStructReturnInRegABI(
218
      const llvm::Triple &Triple, const CodeGenOptions &Opts);
219
220
  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
221
                           CodeGen::CodeGenModule &CGM) const override;
222
223
0
  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
224
    // Darwin uses different dwarf register numbers for EH.
225
0
    if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
226
0
    return 4;
227
0
  }
228
229
  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
230
                               llvm::Value *Address) const override;
231
232
  llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
233
                                  StringRef Constraint,
234
0
                                  llvm::Type* Ty) const override {
235
0
    return X86AdjustInlineAsmType(CGF, Constraint, Ty);
236
0
  }
237
238
  void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
239
                                std::string &Constraints,
240
                                std::vector<llvm::Type *> &ResultRegTypes,
241
                                std::vector<llvm::Type *> &ResultTruncRegTypes,
242
                                std::vector<LValue> &ResultRegDests,
243
                                std::string &AsmString,
244
                                unsigned NumOutputs) const override;
245
246
0
  StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
247
0
    return "movl\t%ebp, %ebp"
248
0
           "\t\t// marker for objc_retainAutoreleaseReturnValue";
249
0
  }
250
};
251
252
}
253
254
/// Rewrite input constraint references after adding some output constraints.
255
/// In the case where there is one output and one input and we add one output,
256
/// we need to replace all operand references greater than or equal to 1:
257
///     mov $0, $1
258
///     mov eax, $1
259
/// The result will be:
260
///     mov $0, $2
261
///     mov eax, $2
262
static void rewriteInputConstraintReferences(unsigned FirstIn,
263
                                             unsigned NumNewOuts,
264
0
                                             std::string &AsmString) {
265
0
  std::string Buf;
266
0
  llvm::raw_string_ostream OS(Buf);
267
0
  size_t Pos = 0;
268
0
  while (Pos < AsmString.size()) {
269
0
    size_t DollarStart = AsmString.find('$', Pos);
270
0
    if (DollarStart == std::string::npos)
271
0
      DollarStart = AsmString.size();
272
0
    size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
273
0
    if (DollarEnd == std::string::npos)
274
0
      DollarEnd = AsmString.size();
275
0
    OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
276
0
    Pos = DollarEnd;
277
0
    size_t NumDollars = DollarEnd - DollarStart;
278
0
    if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
279
      // We have an operand reference.
280
0
      size_t DigitStart = Pos;
281
0
      if (AsmString[DigitStart] == '{') {
282
0
        OS << '{';
283
0
        ++DigitStart;
284
0
      }
285
0
      size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
286
0
      if (DigitEnd == std::string::npos)
287
0
        DigitEnd = AsmString.size();
288
0
      StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
289
0
      unsigned OperandIndex;
290
0
      if (!OperandStr.getAsInteger(10, OperandIndex)) {
291
0
        if (OperandIndex >= FirstIn)
292
0
          OperandIndex += NumNewOuts;
293
0
        OS << OperandIndex;
294
0
      } else {
295
0
        OS << OperandStr;
296
0
      }
297
0
      Pos = DigitEnd;
298
0
    }
299
0
  }
300
0
  AsmString = std::move(OS.str());
301
0
}
302
303
/// Add output constraints for EAX:EDX because they are return registers.
304
void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
305
    CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
306
    std::vector<llvm::Type *> &ResultRegTypes,
307
    std::vector<llvm::Type *> &ResultTruncRegTypes,
308
    std::vector<LValue> &ResultRegDests, std::string &AsmString,
309
0
    unsigned NumOutputs) const {
310
0
  uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
311
312
  // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
313
  // larger.
314
0
  if (!Constraints.empty())
315
0
    Constraints += ',';
316
0
  if (RetWidth <= 32) {
317
0
    Constraints += "={eax}";
318
0
    ResultRegTypes.push_back(CGF.Int32Ty);
319
0
  } else {
320
    // Use the 'A' constraint for EAX:EDX.
321
0
    Constraints += "=A";
322
0
    ResultRegTypes.push_back(CGF.Int64Ty);
323
0
  }
324
325
  // Truncate EAX or EAX:EDX to an integer of the appropriate size.
326
0
  llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
327
0
  ResultTruncRegTypes.push_back(CoerceTy);
328
329
  // Coerce the integer by bitcasting the return slot pointer.
330
0
  ReturnSlot.setAddress(ReturnSlot.getAddress(CGF).withElementType(CoerceTy));
331
0
  ResultRegDests.push_back(ReturnSlot);
332
333
0
  rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
334
0
}
335
336
/// shouldReturnTypeInRegister - Determine if the given type should be
337
/// returned in a register (for the Darwin and MCU ABI).
338
bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
339
0
                                               ASTContext &Context) const {
340
0
  uint64_t Size = Context.getTypeSize(Ty);
341
342
  // For i386, type must be register sized.
343
  // For the MCU ABI, it only needs to be <= 8-byte
344
0
  if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
345
0
   return false;
346
347
0
  if (Ty->isVectorType()) {
348
    // 64- and 128- bit vectors inside structures are not returned in
349
    // registers.
350
0
    if (Size == 64 || Size == 128)
351
0
      return false;
352
353
0
    return true;
354
0
  }
355
356
  // If this is a builtin, pointer, enum, complex type, member pointer, or
357
  // member function pointer it is ok.
358
0
  if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
359
0
      Ty->isAnyComplexType() || Ty->isEnumeralType() ||
360
0
      Ty->isBlockPointerType() || Ty->isMemberPointerType())
361
0
    return true;
362
363
  // Arrays are treated like records.
364
0
  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
365
0
    return shouldReturnTypeInRegister(AT->getElementType(), Context);
366
367
  // Otherwise, it must be a record type.
368
0
  const RecordType *RT = Ty->getAs<RecordType>();
369
0
  if (!RT) return false;
370
371
  // FIXME: Traverse bases here too.
372
373
  // Structure types are passed in register if all fields would be
374
  // passed in a register.
375
0
  for (const auto *FD : RT->getDecl()->fields()) {
376
    // Empty fields are ignored.
377
0
    if (isEmptyField(Context, FD, true))
378
0
      continue;
379
380
    // Check fields recursively.
381
0
    if (!shouldReturnTypeInRegister(FD->getType(), Context))
382
0
      return false;
383
0
  }
384
0
  return true;
385
0
}
386
387
0
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
388
  // Treat complex types as the element type.
389
0
  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
390
0
    Ty = CTy->getElementType();
391
392
  // Check for a type which we know has a simple scalar argument-passing
393
  // convention without any padding.  (We're specifically looking for 32
394
  // and 64-bit integer and integer-equivalents, float, and double.)
395
0
  if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
396
0
      !Ty->isEnumeralType() && !Ty->isBlockPointerType())
397
0
    return false;
398
399
0
  uint64_t Size = Context.getTypeSize(Ty);
400
0
  return Size == 32 || Size == 64;
401
0
}
402
403
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
404
0
                          uint64_t &Size) {
405
0
  for (const auto *FD : RD->fields()) {
406
    // Scalar arguments on the stack get 4 byte alignment on x86. If the
407
    // argument is smaller than 32-bits, expanding the struct will create
408
    // alignment padding.
409
0
    if (!is32Or64BitBasicType(FD->getType(), Context))
410
0
      return false;
411
412
    // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
413
    // how to expand them yet, and the predicate for telling if a bitfield still
414
    // counts as "basic" is more complicated than what we were doing previously.
415
0
    if (FD->isBitField())
416
0
      return false;
417
418
0
    Size += Context.getTypeSize(FD->getType());
419
0
  }
420
0
  return true;
421
0
}
422
423
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
424
0
                                 uint64_t &Size) {
425
  // Don't do this if there are any non-empty bases.
426
0
  for (const CXXBaseSpecifier &Base : RD->bases()) {
427
0
    if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
428
0
                              Size))
429
0
      return false;
430
0
  }
431
0
  if (!addFieldSizes(Context, RD, Size))
432
0
    return false;
433
0
  return true;
434
0
}
435
436
/// Test whether an argument type which is to be passed indirectly (on the
437
/// stack) would have the equivalent layout if it was expanded into separate
438
/// arguments. If so, we prefer to do the latter to avoid inhibiting
439
/// optimizations.
440
0
bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
441
  // We can only expand structure types.
442
0
  const RecordType *RT = Ty->getAs<RecordType>();
443
0
  if (!RT)
444
0
    return false;
445
0
  const RecordDecl *RD = RT->getDecl();
446
0
  uint64_t Size = 0;
447
0
  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
448
0
    if (!IsWin32StructABI) {
449
      // On non-Windows, we have to conservatively match our old bitcode
450
      // prototypes in order to be ABI-compatible at the bitcode level.
451
0
      if (!CXXRD->isCLike())
452
0
        return false;
453
0
    } else {
454
      // Don't do this for dynamic classes.
455
0
      if (CXXRD->isDynamicClass())
456
0
        return false;
457
0
    }
458
0
    if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
459
0
      return false;
460
0
  } else {
461
0
    if (!addFieldSizes(getContext(), RD, Size))
462
0
      return false;
463
0
  }
464
465
  // We can do this if there was no alignment padding.
466
0
  return Size == getContext().getTypeSize(Ty);
467
0
}
468
469
0
ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
470
  // If the return value is indirect, then the hidden argument is consuming one
471
  // integer register.
472
0
  if (State.FreeRegs) {
473
0
    --State.FreeRegs;
474
0
    if (!IsMCUABI)
475
0
      return getNaturalAlignIndirectInReg(RetTy);
476
0
  }
477
0
  return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
478
0
}
479
480
ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
481
0
                                             CCState &State) const {
482
0
  if (RetTy->isVoidType())
483
0
    return ABIArgInfo::getIgnore();
484
485
0
  const Type *Base = nullptr;
486
0
  uint64_t NumElts = 0;
487
0
  if ((State.CC == llvm::CallingConv::X86_VectorCall ||
488
0
       State.CC == llvm::CallingConv::X86_RegCall) &&
489
0
      isHomogeneousAggregate(RetTy, Base, NumElts)) {
490
    // The LLVM struct type for such an aggregate should lower properly.
491
0
    return ABIArgInfo::getDirect();
492
0
  }
493
494
0
  if (const VectorType *VT = RetTy->getAs<VectorType>()) {
495
    // On Darwin, some vectors are returned in registers.
496
0
    if (IsDarwinVectorABI) {
497
0
      uint64_t Size = getContext().getTypeSize(RetTy);
498
499
      // 128-bit vectors are a special case; they are returned in
500
      // registers and we need to make sure to pick a type the LLVM
501
      // backend will like.
502
0
      if (Size == 128)
503
0
        return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
504
0
            llvm::Type::getInt64Ty(getVMContext()), 2));
505
506
      // Always return in register if it fits in a general purpose
507
      // register, or if it is 64 bits and has a single element.
508
0
      if ((Size == 8 || Size == 16 || Size == 32) ||
509
0
          (Size == 64 && VT->getNumElements() == 1))
510
0
        return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
511
0
                                                            Size));
512
513
0
      return getIndirectReturnResult(RetTy, State);
514
0
    }
515
516
0
    return ABIArgInfo::getDirect();
517
0
  }
518
519
0
  if (isAggregateTypeForABI(RetTy)) {
520
0
    if (const RecordType *RT = RetTy->getAs<RecordType>()) {
521
      // Structures with flexible arrays are always indirect.
522
0
      if (RT->getDecl()->hasFlexibleArrayMember())
523
0
        return getIndirectReturnResult(RetTy, State);
524
0
    }
525
526
    // If specified, structs and unions are always indirect.
527
0
    if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
528
0
      return getIndirectReturnResult(RetTy, State);
529
530
    // Ignore empty structs/unions.
531
0
    if (isEmptyRecord(getContext(), RetTy, true))
532
0
      return ABIArgInfo::getIgnore();
533
534
    // Return complex of _Float16 as <2 x half> so the backend will use xmm0.
535
0
    if (const ComplexType *CT = RetTy->getAs<ComplexType>()) {
536
0
      QualType ET = getContext().getCanonicalType(CT->getElementType());
537
0
      if (ET->isFloat16Type())
538
0
        return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
539
0
            llvm::Type::getHalfTy(getVMContext()), 2));
540
0
    }
541
542
    // Small structures which are register sized are generally returned
543
    // in a register.
544
0
    if (shouldReturnTypeInRegister(RetTy, getContext())) {
545
0
      uint64_t Size = getContext().getTypeSize(RetTy);
546
547
      // As a special-case, if the struct is a "single-element" struct, and
548
      // the field is of type "float" or "double", return it in a
549
      // floating-point register. (MSVC does not apply this special case.)
550
      // We apply a similar transformation for pointer types to improve the
551
      // quality of the generated IR.
552
0
      if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
553
0
        if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
554
0
            || SeltTy->hasPointerRepresentation())
555
0
          return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
556
557
      // FIXME: We should be able to narrow this integer in cases with dead
558
      // padding.
559
0
      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
560
0
    }
561
562
0
    return getIndirectReturnResult(RetTy, State);
563
0
  }
564
565
  // Treat an enum type as its underlying type.
566
0
  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
567
0
    RetTy = EnumTy->getDecl()->getIntegerType();
568
569
0
  if (const auto *EIT = RetTy->getAs<BitIntType>())
570
0
    if (EIT->getNumBits() > 64)
571
0
      return getIndirectReturnResult(RetTy, State);
572
573
0
  return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
574
0
                                               : ABIArgInfo::getDirect());
575
0
}
576
577
unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
578
0
                                                 unsigned Align) const {
579
  // Otherwise, if the alignment is less than or equal to the minimum ABI
580
  // alignment, just use the default; the backend will handle this.
581
0
  if (Align <= MinABIStackAlignInBytes)
582
0
    return 0; // Use default alignment.
583
584
0
  if (IsLinuxABI) {
585
    // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
586
    // want to spend any effort dealing with the ramifications of ABI breaks.
587
    //
588
    // If the vector type is __m128/__m256/__m512, return the default alignment.
589
0
    if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
590
0
      return Align;
591
0
  }
592
  // On non-Darwin, the stack type alignment is always 4.
593
0
  if (!IsDarwinVectorABI) {
594
    // Set explicit alignment, since we may need to realign the top.
595
0
    return MinABIStackAlignInBytes;
596
0
  }
597
598
  // Otherwise, if the type contains an SSE vector type, the alignment is 16.
599
0
  if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
600
0
                      isRecordWithSIMDVectorType(getContext(), Ty)))
601
0
    return 16;
602
603
0
  return MinABIStackAlignInBytes;
604
0
}
605
606
ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
607
0
                                            CCState &State) const {
608
0
  if (!ByVal) {
609
0
    if (State.FreeRegs) {
610
0
      --State.FreeRegs; // Non-byval indirects just use one pointer.
611
0
      if (!IsMCUABI)
612
0
        return getNaturalAlignIndirectInReg(Ty);
613
0
    }
614
0
    return getNaturalAlignIndirect(Ty, false);
615
0
  }
616
617
  // Compute the byval alignment.
618
0
  unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
619
0
  unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
620
0
  if (StackAlign == 0)
621
0
    return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
622
623
  // If the stack alignment is less than the type alignment, realign the
624
  // argument.
625
0
  bool Realign = TypeAlign > StackAlign;
626
0
  return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
627
0
                                 /*ByVal=*/true, Realign);
628
0
}
629
630
0
X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
631
0
  const Type *T = isSingleElementStruct(Ty, getContext());
632
0
  if (!T)
633
0
    T = Ty.getTypePtr();
634
635
0
  if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
636
0
    BuiltinType::Kind K = BT->getKind();
637
0
    if (K == BuiltinType::Float || K == BuiltinType::Double)
638
0
      return Float;
639
0
  }
640
0
  return Integer;
641
0
}
642
643
0
bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
644
0
  if (!IsSoftFloatABI) {
645
0
    Class C = classify(Ty);
646
0
    if (C == Float)
647
0
      return false;
648
0
  }
649
650
0
  unsigned Size = getContext().getTypeSize(Ty);
651
0
  unsigned SizeInRegs = (Size + 31) / 32;
652
653
0
  if (SizeInRegs == 0)
654
0
    return false;
655
656
0
  if (!IsMCUABI) {
657
0
    if (SizeInRegs > State.FreeRegs) {
658
0
      State.FreeRegs = 0;
659
0
      return false;
660
0
    }
661
0
  } else {
662
    // The MCU psABI allows passing parameters in-reg even if there are
663
    // earlier parameters that are passed on the stack. Also,
664
    // it does not allow passing >8-byte structs in-register,
665
    // even if there are 3 free registers available.
666
0
    if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
667
0
      return false;
668
0
  }
669
670
0
  State.FreeRegs -= SizeInRegs;
671
0
  return true;
672
0
}
673
674
bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
675
                                             bool &InReg,
676
0
                                             bool &NeedsPadding) const {
677
  // On Windows, aggregates other than HFAs are never passed in registers, and
678
  // they do not consume register slots. Homogenous floating-point aggregates
679
  // (HFAs) have already been dealt with at this point.
680
0
  if (IsWin32StructABI && isAggregateTypeForABI(Ty))
681
0
    return false;
682
683
0
  NeedsPadding = false;
684
0
  InReg = !IsMCUABI;
685
686
0
  if (!updateFreeRegs(Ty, State))
687
0
    return false;
688
689
0
  if (IsMCUABI)
690
0
    return true;
691
692
0
  if (State.CC == llvm::CallingConv::X86_FastCall ||
693
0
      State.CC == llvm::CallingConv::X86_VectorCall ||
694
0
      State.CC == llvm::CallingConv::X86_RegCall) {
695
0
    if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
696
0
      NeedsPadding = true;
697
698
0
    return false;
699
0
  }
700
701
0
  return true;
702
0
}
703
704
0
bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
705
0
  bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
706
0
                    (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
707
0
                     Ty->isReferenceType());
708
709
0
  if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
710
0
                      State.CC == llvm::CallingConv::X86_VectorCall))
711
0
    return false;
712
713
0
  if (!updateFreeRegs(Ty, State))
714
0
    return false;
715
716
0
  if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
717
0
    return false;
718
719
  // Return true to apply inreg to all legal parameters except for MCU targets.
720
0
  return !IsMCUABI;
721
0
}
722
723
0
void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
724
  // Vectorcall x86 works subtly different than in x64, so the format is
725
  // a bit different than the x64 version.  First, all vector types (not HVAs)
726
  // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
727
  // This differs from the x64 implementation, where the first 6 by INDEX get
728
  // registers.
729
  // In the second pass over the arguments, HVAs are passed in the remaining
730
  // vector registers if possible, or indirectly by address. The address will be
731
  // passed in ECX/EDX if available. Any other arguments are passed according to
732
  // the usual fastcall rules.
733
0
  MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
734
0
  for (int I = 0, E = Args.size(); I < E; ++I) {
735
0
    const Type *Base = nullptr;
736
0
    uint64_t NumElts = 0;
737
0
    const QualType &Ty = Args[I].type;
738
0
    if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
739
0
        isHomogeneousAggregate(Ty, Base, NumElts)) {
740
0
      if (State.FreeSSERegs >= NumElts) {
741
0
        State.FreeSSERegs -= NumElts;
742
0
        Args[I].info = ABIArgInfo::getDirectInReg();
743
0
        State.IsPreassigned.set(I);
744
0
      }
745
0
    }
746
0
  }
747
0
}
748
749
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
750
0
                                               unsigned ArgIndex) const {
751
  // FIXME: Set alignment on indirect arguments.
752
0
  bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
753
0
  bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
754
0
  bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
755
756
0
  Ty = useFirstFieldIfTransparentUnion(Ty);
757
0
  TypeInfo TI = getContext().getTypeInfo(Ty);
758
759
  // Check with the C++ ABI first.
760
0
  const RecordType *RT = Ty->getAs<RecordType>();
761
0
  if (RT) {
762
0
    CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
763
0
    if (RAA == CGCXXABI::RAA_Indirect) {
764
0
      return getIndirectResult(Ty, false, State);
765
0
    } else if (State.IsDelegateCall) {
766
      // Avoid having different alignments on delegate call args by always
767
      // setting the alignment to 4, which is what we do for inallocas.
768
0
      ABIArgInfo Res = getIndirectResult(Ty, false, State);
769
0
      Res.setIndirectAlign(CharUnits::fromQuantity(4));
770
0
      return Res;
771
0
    } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
772
      // The field index doesn't matter, we'll fix it up later.
773
0
      return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
774
0
    }
775
0
  }
776
777
  // Regcall uses the concept of a homogenous vector aggregate, similar
778
  // to other targets.
779
0
  const Type *Base = nullptr;
780
0
  uint64_t NumElts = 0;
781
0
  if ((IsRegCall || IsVectorCall) &&
782
0
      isHomogeneousAggregate(Ty, Base, NumElts)) {
783
0
    if (State.FreeSSERegs >= NumElts) {
784
0
      State.FreeSSERegs -= NumElts;
785
786
      // Vectorcall passes HVAs directly and does not flatten them, but regcall
787
      // does.
788
0
      if (IsVectorCall)
789
0
        return getDirectX86Hva();
790
791
0
      if (Ty->isBuiltinType() || Ty->isVectorType())
792
0
        return ABIArgInfo::getDirect();
793
0
      return ABIArgInfo::getExpand();
794
0
    }
795
0
    return getIndirectResult(Ty, /*ByVal=*/false, State);
796
0
  }
797
798
0
  if (isAggregateTypeForABI(Ty)) {
799
    // Structures with flexible arrays are always indirect.
800
    // FIXME: This should not be byval!
801
0
    if (RT && RT->getDecl()->hasFlexibleArrayMember())
802
0
      return getIndirectResult(Ty, true, State);
803
804
    // Ignore empty structs/unions on non-Windows.
805
0
    if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
806
0
      return ABIArgInfo::getIgnore();
807
808
0
    llvm::LLVMContext &LLVMContext = getVMContext();
809
0
    llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
810
0
    bool NeedsPadding = false;
811
0
    bool InReg;
812
0
    if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
813
0
      unsigned SizeInRegs = (TI.Width + 31) / 32;
814
0
      SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
815
0
      llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
816
0
      if (InReg)
817
0
        return ABIArgInfo::getDirectInReg(Result);
818
0
      else
819
0
        return ABIArgInfo::getDirect(Result);
820
0
    }
821
0
    llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
822
823
    // Pass over-aligned aggregates to non-variadic functions on Windows
824
    // indirectly. This behavior was added in MSVC 2015. Use the required
825
    // alignment from the record layout, since that may be less than the
826
    // regular type alignment, and types with required alignment of less than 4
827
    // bytes are not passed indirectly.
828
0
    if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {
829
0
      unsigned AlignInBits = 0;
830
0
      if (RT) {
831
0
        const ASTRecordLayout &Layout =
832
0
          getContext().getASTRecordLayout(RT->getDecl());
833
0
        AlignInBits = getContext().toBits(Layout.getRequiredAlignment());
834
0
      } else if (TI.isAlignRequired()) {
835
0
        AlignInBits = TI.Align;
836
0
      }
837
0
      if (AlignInBits > 32)
838
0
        return getIndirectResult(Ty, /*ByVal=*/false, State);
839
0
    }
840
841
    // Expand small (<= 128-bit) record types when we know that the stack layout
842
    // of those arguments will match the struct. This is important because the
843
    // LLVM backend isn't smart enough to remove byval, which inhibits many
844
    // optimizations.
845
    // Don't do this for the MCU if there are still free integer registers
846
    // (see X86_64 ABI for full explanation).
847
0
    if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
848
0
        canExpandIndirectArgument(Ty))
849
0
      return ABIArgInfo::getExpandWithPadding(
850
0
          IsFastCall || IsVectorCall || IsRegCall, PaddingType);
851
852
0
    return getIndirectResult(Ty, true, State);
853
0
  }
854
855
0
  if (const VectorType *VT = Ty->getAs<VectorType>()) {
856
    // On Windows, vectors are passed directly if registers are available, or
857
    // indirectly if not. This avoids the need to align argument memory. Pass
858
    // user-defined vector types larger than 512 bits indirectly for simplicity.
859
0
    if (IsWin32StructABI) {
860
0
      if (TI.Width <= 512 && State.FreeSSERegs > 0) {
861
0
        --State.FreeSSERegs;
862
0
        return ABIArgInfo::getDirectInReg();
863
0
      }
864
0
      return getIndirectResult(Ty, /*ByVal=*/false, State);
865
0
    }
866
867
    // On Darwin, some vectors are passed in memory, we handle this by passing
868
    // it as an i8/i16/i32/i64.
869
0
    if (IsDarwinVectorABI) {
870
0
      if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
871
0
          (TI.Width == 64 && VT->getNumElements() == 1))
872
0
        return ABIArgInfo::getDirect(
873
0
            llvm::IntegerType::get(getVMContext(), TI.Width));
874
0
    }
875
876
0
    if (IsX86_MMXType(CGT.ConvertType(Ty)))
877
0
      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
878
879
0
    return ABIArgInfo::getDirect();
880
0
  }
881
882
883
0
  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
884
0
    Ty = EnumTy->getDecl()->getIntegerType();
885
886
0
  bool InReg = shouldPrimitiveUseInReg(Ty, State);
887
888
0
  if (isPromotableIntegerTypeForABI(Ty)) {
889
0
    if (InReg)
890
0
      return ABIArgInfo::getExtendInReg(Ty);
891
0
    return ABIArgInfo::getExtend(Ty);
892
0
  }
893
894
0
  if (const auto *EIT = Ty->getAs<BitIntType>()) {
895
0
    if (EIT->getNumBits() <= 64) {
896
0
      if (InReg)
897
0
        return ABIArgInfo::getDirectInReg();
898
0
      return ABIArgInfo::getDirect();
899
0
    }
900
0
    return getIndirectResult(Ty, /*ByVal=*/false, State);
901
0
  }
902
903
0
  if (InReg)
904
0
    return ABIArgInfo::getDirectInReg();
905
0
  return ABIArgInfo::getDirect();
906
0
}
907
908
0
void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
909
0
  CCState State(FI);
910
0
  if (IsMCUABI)
911
0
    State.FreeRegs = 3;
912
0
  else if (State.CC == llvm::CallingConv::X86_FastCall) {
913
0
    State.FreeRegs = 2;
914
0
    State.FreeSSERegs = 3;
915
0
  } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
916
0
    State.FreeRegs = 2;
917
0
    State.FreeSSERegs = 6;
918
0
  } else if (FI.getHasRegParm())
919
0
    State.FreeRegs = FI.getRegParm();
920
0
  else if (State.CC == llvm::CallingConv::X86_RegCall) {
921
0
    State.FreeRegs = 5;
922
0
    State.FreeSSERegs = 8;
923
0
  } else if (IsWin32StructABI) {
924
    // Since MSVC 2015, the first three SSE vectors have been passed in
925
    // registers. The rest are passed indirectly.
926
0
    State.FreeRegs = DefaultNumRegisterParameters;
927
0
    State.FreeSSERegs = 3;
928
0
  } else
929
0
    State.FreeRegs = DefaultNumRegisterParameters;
930
931
0
  if (!::classifyReturnType(getCXXABI(), FI, *this)) {
932
0
    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
933
0
  } else if (FI.getReturnInfo().isIndirect()) {
934
    // The C++ ABI is not aware of register usage, so we have to check if the
935
    // return value was sret and put it in a register ourselves if appropriate.
936
0
    if (State.FreeRegs) {
937
0
      --State.FreeRegs;  // The sret parameter consumes a register.
938
0
      if (!IsMCUABI)
939
0
        FI.getReturnInfo().setInReg(true);
940
0
    }
941
0
  }
942
943
  // The chain argument effectively gives us another free register.
944
0
  if (FI.isChainCall())
945
0
    ++State.FreeRegs;
946
947
  // For vectorcall, do a first pass over the arguments, assigning FP and vector
948
  // arguments to XMM registers as available.
949
0
  if (State.CC == llvm::CallingConv::X86_VectorCall)
950
0
    runVectorCallFirstPass(FI, State);
951
952
0
  bool UsedInAlloca = false;
953
0
  MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
954
0
  for (unsigned I = 0, E = Args.size(); I < E; ++I) {
955
    // Skip arguments that have already been assigned.
956
0
    if (State.IsPreassigned.test(I))
957
0
      continue;
958
959
0
    Args[I].info =
960
0
        classifyArgumentType(Args[I].type, State, I);
961
0
    UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
962
0
  }
963
964
  // If we needed to use inalloca for any argument, do a second pass and rewrite
965
  // all the memory arguments to use inalloca.
966
0
  if (UsedInAlloca)
967
0
    rewriteWithInAlloca(FI);
968
0
}
969
970
void
971
X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
972
                                   CharUnits &StackOffset, ABIArgInfo &Info,
973
0
                                   QualType Type) const {
974
  // Arguments are always 4-byte-aligned.
975
0
  CharUnits WordSize = CharUnits::fromQuantity(4);
976
0
  assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
977
978
  // sret pointers and indirect things will require an extra pointer
979
  // indirection, unless they are byval. Most things are byval, and will not
980
  // require this indirection.
981
0
  bool IsIndirect = false;
982
0
  if (Info.isIndirect() && !Info.getIndirectByVal())
983
0
    IsIndirect = true;
984
0
  Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
985
0
  llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
986
0
  if (IsIndirect)
987
0
    LLTy = llvm::PointerType::getUnqual(getVMContext());
988
0
  FrameFields.push_back(LLTy);
989
0
  StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
990
991
  // Insert padding bytes to respect alignment.
992
0
  CharUnits FieldEnd = StackOffset;
993
0
  StackOffset = FieldEnd.alignTo(WordSize);
994
0
  if (StackOffset != FieldEnd) {
995
0
    CharUnits NumBytes = StackOffset - FieldEnd;
996
0
    llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
997
0
    Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
998
0
    FrameFields.push_back(Ty);
999
0
  }
1000
0
}
1001
1002
0
static bool isArgInAlloca(const ABIArgInfo &Info) {
1003
  // Leave ignored and inreg arguments alone.
1004
0
  switch (Info.getKind()) {
1005
0
  case ABIArgInfo::InAlloca:
1006
0
    return true;
1007
0
  case ABIArgInfo::Ignore:
1008
0
  case ABIArgInfo::IndirectAliased:
1009
0
    return false;
1010
0
  case ABIArgInfo::Indirect:
1011
0
  case ABIArgInfo::Direct:
1012
0
  case ABIArgInfo::Extend:
1013
0
    return !Info.getInReg();
1014
0
  case ABIArgInfo::Expand:
1015
0
  case ABIArgInfo::CoerceAndExpand:
1016
    // These are aggregate types which are never passed in registers when
1017
    // inalloca is involved.
1018
0
    return true;
1019
0
  }
1020
0
  llvm_unreachable("invalid enum");
1021
0
}
1022
1023
0
void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1024
0
  assert(IsWin32StructABI && "inalloca only supported on win32");
1025
1026
  // Build a packed struct type for all of the arguments in memory.
1027
0
  SmallVector<llvm::Type *, 6> FrameFields;
1028
1029
  // The stack alignment is always 4.
1030
0
  CharUnits StackAlign = CharUnits::fromQuantity(4);
1031
1032
0
  CharUnits StackOffset;
1033
0
  CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1034
1035
  // Put 'this' into the struct before 'sret', if necessary.
1036
0
  bool IsThisCall =
1037
0
      FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1038
0
  ABIArgInfo &Ret = FI.getReturnInfo();
1039
0
  if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1040
0
      isArgInAlloca(I->info)) {
1041
0
    addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1042
0
    ++I;
1043
0
  }
1044
1045
  // Put the sret parameter into the inalloca struct if it's in memory.
1046
0
  if (Ret.isIndirect() && !Ret.getInReg()) {
1047
0
    addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
1048
    // On Windows, the hidden sret parameter is always returned in eax.
1049
0
    Ret.setInAllocaSRet(IsWin32StructABI);
1050
0
  }
1051
1052
  // Skip the 'this' parameter in ecx.
1053
0
  if (IsThisCall)
1054
0
    ++I;
1055
1056
  // Put arguments passed in memory into the struct.
1057
0
  for (; I != E; ++I) {
1058
0
    if (isArgInAlloca(I->info))
1059
0
      addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1060
0
  }
1061
1062
0
  FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1063
0
                                        /*isPacked=*/true),
1064
0
                  StackAlign);
1065
0
}
1066
1067
Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1068
0
                                 Address VAListAddr, QualType Ty) const {
1069
1070
0
  auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1071
1072
  // x86-32 changes the alignment of certain arguments on the stack.
1073
  //
1074
  // Just messing with TypeInfo like this works because we never pass
1075
  // anything indirectly.
1076
0
  TypeInfo.Align = CharUnits::fromQuantity(
1077
0
                getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
1078
1079
0
  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1080
0
                          TypeInfo, CharUnits::fromQuantity(4),
1081
0
                          /*AllowHigherAlign*/ true);
1082
0
}
1083
1084
bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1085
0
    const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1086
0
  assert(Triple.getArch() == llvm::Triple::x86);
1087
1088
0
  switch (Opts.getStructReturnConvention()) {
1089
0
  case CodeGenOptions::SRCK_Default:
1090
0
    break;
1091
0
  case CodeGenOptions::SRCK_OnStack:  // -fpcc-struct-return
1092
0
    return false;
1093
0
  case CodeGenOptions::SRCK_InRegs:  // -freg-struct-return
1094
0
    return true;
1095
0
  }
1096
1097
0
  if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1098
0
    return true;
1099
1100
0
  switch (Triple.getOS()) {
1101
0
  case llvm::Triple::DragonFly:
1102
0
  case llvm::Triple::FreeBSD:
1103
0
  case llvm::Triple::OpenBSD:
1104
0
  case llvm::Triple::Win32:
1105
0
    return true;
1106
0
  default:
1107
0
    return false;
1108
0
  }
1109
0
}
1110
1111
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
1112
0
                                 CodeGen::CodeGenModule &CGM) {
1113
0
  if (!FD->hasAttr<AnyX86InterruptAttr>())
1114
0
    return;
1115
1116
0
  llvm::Function *Fn = cast<llvm::Function>(GV);
1117
0
  Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1118
0
  if (FD->getNumParams() == 0)
1119
0
    return;
1120
1121
0
  auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
1122
0
  llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
1123
0
  llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1124
0
    Fn->getContext(), ByValTy);
1125
0
  Fn->addParamAttr(0, NewAttr);
1126
0
}
1127
1128
void X86_32TargetCodeGenInfo::setTargetAttributes(
1129
0
    const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1130
0
  if (GV->isDeclaration())
1131
0
    return;
1132
0
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1133
0
    if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1134
0
      llvm::Function *Fn = cast<llvm::Function>(GV);
1135
0
      Fn->addFnAttr("stackrealign");
1136
0
    }
1137
1138
0
    addX86InterruptAttrs(FD, GV, CGM);
1139
0
  }
1140
0
}
1141
1142
bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1143
                                               CodeGen::CodeGenFunction &CGF,
1144
0
                                               llvm::Value *Address) const {
1145
0
  CodeGen::CGBuilderTy &Builder = CGF.Builder;
1146
1147
0
  llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1148
1149
  // 0-7 are the eight integer registers;  the order is different
1150
  //   on Darwin (for EH), but the range is the same.
1151
  // 8 is %eip.
1152
0
  AssignToArrayRange(Builder, Address, Four8, 0, 8);
1153
1154
0
  if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1155
    // 12-16 are st(0..4).  Not sure why we stop at 4.
1156
    // These have size 16, which is sizeof(long double) on
1157
    // platforms with 8-byte alignment for that type.
1158
0
    llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1159
0
    AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1160
1161
0
  } else {
1162
    // 9 is %eflags, which doesn't get a size on Darwin for some
1163
    // reason.
1164
0
    Builder.CreateAlignedStore(
1165
0
        Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1166
0
                               CharUnits::One());
1167
1168
    // 11-16 are st(0..5).  Not sure why we stop at 5.
1169
    // These have size 12, which is sizeof(long double) on
1170
    // platforms with 4-byte alignment for that type.
1171
0
    llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1172
0
    AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1173
0
  }
1174
1175
0
  return false;
1176
0
}
1177
1178
//===----------------------------------------------------------------------===//
1179
// X86-64 ABI Implementation
1180
//===----------------------------------------------------------------------===//
1181
1182
1183
namespace {
1184
1185
/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1186
0
static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1187
0
  switch (AVXLevel) {
1188
0
  case X86AVXABILevel::AVX512:
1189
0
    return 512;
1190
0
  case X86AVXABILevel::AVX:
1191
0
    return 256;
1192
0
  case X86AVXABILevel::None:
1193
0
    return 128;
1194
0
  }
1195
0
  llvm_unreachable("Unknown AVXLevel");
1196
0
}
1197
1198
/// X86_64ABIInfo - The X86_64 ABI information.
1199
class X86_64ABIInfo : public ABIInfo {
1200
  enum Class {
1201
    Integer = 0,
1202
    SSE,
1203
    SSEUp,
1204
    X87,
1205
    X87Up,
1206
    ComplexX87,
1207
    NoClass,
1208
    Memory
1209
  };
1210
1211
  /// merge - Implement the X86_64 ABI merging algorithm.
1212
  ///
1213
  /// Merge an accumulating classification \arg Accum with a field
1214
  /// classification \arg Field.
1215
  ///
1216
  /// \param Accum - The accumulating classification. This should
1217
  /// always be either NoClass or the result of a previous merge
1218
  /// call. In addition, this should never be Memory (the caller
1219
  /// should just return Memory for the aggregate).
1220
  static Class merge(Class Accum, Class Field);
1221
1222
  /// postMerge - Implement the X86_64 ABI post merging algorithm.
1223
  ///
1224
  /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1225
  /// final MEMORY or SSE classes when necessary.
1226
  ///
1227
  /// \param AggregateSize - The size of the current aggregate in
1228
  /// the classification process.
1229
  ///
1230
  /// \param Lo - The classification for the parts of the type
1231
  /// residing in the low word of the containing object.
1232
  ///
1233
  /// \param Hi - The classification for the parts of the type
1234
  /// residing in the higher words of the containing object.
1235
  ///
1236
  void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1237
1238
  /// classify - Determine the x86_64 register classes in which the
1239
  /// given type T should be passed.
1240
  ///
1241
  /// \param Lo - The classification for the parts of the type
1242
  /// residing in the low word of the containing object.
1243
  ///
1244
  /// \param Hi - The classification for the parts of the type
1245
  /// residing in the high word of the containing object.
1246
  ///
1247
  /// \param OffsetBase - The bit offset of this type in the
1248
  /// containing object.  Some parameters are classified different
1249
  /// depending on whether they straddle an eightbyte boundary.
1250
  ///
1251
  /// \param isNamedArg - Whether the argument in question is a "named"
1252
  /// argument, as used in AMD64-ABI 3.5.7.
1253
  ///
1254
  /// \param IsRegCall - Whether the calling conversion is regcall.
1255
  ///
1256
  /// If a word is unused its result will be NoClass; if a type should
1257
  /// be passed in Memory then at least the classification of \arg Lo
1258
  /// will be Memory.
1259
  ///
1260
  /// The \arg Lo class will be NoClass iff the argument is ignored.
1261
  ///
1262
  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1263
  /// also be ComplexX87.
1264
  void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1265
                bool isNamedArg, bool IsRegCall = false) const;
1266
1267
  llvm::Type *GetByteVectorType(QualType Ty) const;
1268
  llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1269
                                 unsigned IROffset, QualType SourceTy,
1270
                                 unsigned SourceOffset) const;
1271
  llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1272
                                     unsigned IROffset, QualType SourceTy,
1273
                                     unsigned SourceOffset) const;
1274
1275
  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1276
  /// such that the argument will be returned in memory.
1277
  ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1278
1279
  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1280
  /// such that the argument will be passed in memory.
1281
  ///
1282
  /// \param freeIntRegs - The number of free integer registers remaining
1283
  /// available.
1284
  ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1285
1286
  ABIArgInfo classifyReturnType(QualType RetTy) const;
1287
1288
  ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
1289
                                  unsigned &neededInt, unsigned &neededSSE,
1290
                                  bool isNamedArg,
1291
                                  bool IsRegCall = false) const;
1292
1293
  ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
1294
                                       unsigned &NeededSSE,
1295
                                       unsigned &MaxVectorWidth) const;
1296
1297
  ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
1298
                                           unsigned &NeededSSE,
1299
                                           unsigned &MaxVectorWidth) const;
1300
1301
  bool IsIllegalVectorType(QualType Ty) const;
1302
1303
  /// The 0.98 ABI revision clarified a lot of ambiguities,
1304
  /// unfortunately in ways that were not always consistent with
1305
  /// certain previous compilers.  In particular, platforms which
1306
  /// required strict binary compatibility with older versions of GCC
1307
  /// may need to exempt themselves.
1308
0
  bool honorsRevision0_98() const {
1309
0
    return !getTarget().getTriple().isOSDarwin();
1310
0
  }
1311
1312
  /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
1313
  /// classify it as INTEGER (for compatibility with older clang compilers).
1314
0
  bool classifyIntegerMMXAsSSE() const {
1315
    // Clang <= 3.8 did not do this.
1316
0
    if (getContext().getLangOpts().getClangABICompat() <=
1317
0
        LangOptions::ClangABI::Ver3_8)
1318
0
      return false;
1319
1320
0
    const llvm::Triple &Triple = getTarget().getTriple();
1321
0
    if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1322
0
      return false;
1323
0
    return true;
1324
0
  }
1325
1326
  // GCC classifies vectors of __int128 as memory.
1327
0
  bool passInt128VectorsInMem() const {
1328
    // Clang <= 9.0 did not do this.
1329
0
    if (getContext().getLangOpts().getClangABICompat() <=
1330
0
        LangOptions::ClangABI::Ver9)
1331
0
      return false;
1332
1333
0
    const llvm::Triple &T = getTarget().getTriple();
1334
0
    return T.isOSLinux() || T.isOSNetBSD();
1335
0
  }
1336
1337
  X86AVXABILevel AVXLevel;
1338
  // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1339
  // 64-bit hardware.
1340
  bool Has64BitPointers;
1341
1342
public:
1343
  X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1344
      : ABIInfo(CGT), AVXLevel(AVXLevel),
1345
46
        Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1346
1347
0
  bool isPassedUsingAVXType(QualType type) const {
1348
0
    unsigned neededInt, neededSSE;
1349
    // The freeIntRegs argument doesn't matter here.
1350
0
    ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1351
0
                                           /*isNamedArg*/true);
1352
0
    if (info.isDirect()) {
1353
0
      llvm::Type *ty = info.getCoerceToType();
1354
0
      if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1355
0
        return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1356
0
    }
1357
0
    return false;
1358
0
  }
1359
1360
  void computeInfo(CGFunctionInfo &FI) const override;
1361
1362
  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1363
                    QualType Ty) const override;
1364
  Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1365
                      QualType Ty) const override;
1366
1367
0
  bool has64BitPointers() const {
1368
0
    return Has64BitPointers;
1369
0
  }
1370
};
1371
1372
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1373
class WinX86_64ABIInfo : public ABIInfo {
1374
public:
1375
  WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1376
      : ABIInfo(CGT), AVXLevel(AVXLevel),
1377
0
        IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1378
1379
  void computeInfo(CGFunctionInfo &FI) const override;
1380
1381
  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1382
                    QualType Ty) const override;
1383
1384
0
  bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1385
    // FIXME: Assumes vectorcall is in use.
1386
0
    return isX86VectorTypeForVectorCall(getContext(), Ty);
1387
0
  }
1388
1389
  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1390
0
                                         uint64_t NumMembers) const override {
1391
    // FIXME: Assumes vectorcall is in use.
1392
0
    return isX86VectorCallAggregateSmallEnough(NumMembers);
1393
0
  }
1394
1395
private:
1396
  ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
1397
                      bool IsVectorCall, bool IsRegCall) const;
1398
  ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
1399
                                           const ABIArgInfo &current) const;
1400
1401
  X86AVXABILevel AVXLevel;
1402
1403
  bool IsMingw64;
1404
};
1405
1406
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1407
public:
1408
  X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1409
46
      : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
1410
46
    SwiftInfo =
1411
46
        std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
1412
46
  }
1413
1414
  /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
1415
  /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
1416
0
  bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
1417
1418
0
  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1419
0
    return 7;
1420
0
  }
1421
1422
  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1423
0
                               llvm::Value *Address) const override {
1424
0
    llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1425
1426
    // 0-15 are the 16 integer registers.
1427
    // 16 is %rip.
1428
0
    AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1429
0
    return false;
1430
0
  }
1431
1432
  llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1433
                                  StringRef Constraint,
1434
0
                                  llvm::Type* Ty) const override {
1435
0
    return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1436
0
  }
1437
1438
  bool isNoProtoCallVariadic(const CallArgList &args,
1439
0
                             const FunctionNoProtoType *fnType) const override {
1440
    // The default CC on x86-64 sets %al to the number of SSA
1441
    // registers used, and GCC sets this when calling an unprototyped
1442
    // function, so we override the default behavior.  However, don't do
1443
    // that when AVX types are involved: the ABI explicitly states it is
1444
    // undefined, and it doesn't work in practice because of how the ABI
1445
    // defines varargs anyway.
1446
0
    if (fnType->getCallConv() == CC_C) {
1447
0
      bool HasAVXType = false;
1448
0
      for (CallArgList::const_iterator
1449
0
             it = args.begin(), ie = args.end(); it != ie; ++it) {
1450
0
        if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
1451
0
          HasAVXType = true;
1452
0
          break;
1453
0
        }
1454
0
      }
1455
1456
0
      if (!HasAVXType)
1457
0
        return true;
1458
0
    }
1459
1460
0
    return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1461
0
  }
1462
1463
  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1464
0
                           CodeGen::CodeGenModule &CGM) const override {
1465
0
    if (GV->isDeclaration())
1466
0
      return;
1467
0
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1468
0
      if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1469
0
        llvm::Function *Fn = cast<llvm::Function>(GV);
1470
0
        Fn->addFnAttr("stackrealign");
1471
0
      }
1472
1473
0
      addX86InterruptAttrs(FD, GV, CGM);
1474
0
    }
1475
0
  }
1476
1477
  void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
1478
                            const FunctionDecl *Caller,
1479
                            const FunctionDecl *Callee,
1480
                            const CallArgList &Args) const override;
1481
};
1482
} // namespace
1483
1484
static void initFeatureMaps(const ASTContext &Ctx,
1485
                            llvm::StringMap<bool> &CallerMap,
1486
                            const FunctionDecl *Caller,
1487
                            llvm::StringMap<bool> &CalleeMap,
1488
0
                            const FunctionDecl *Callee) {
1489
0
  if (CalleeMap.empty() && CallerMap.empty()) {
1490
    // The caller is potentially nullptr in the case where the call isn't in a
1491
    // function.  In this case, the getFunctionFeatureMap ensures we just get
1492
    // the TU level setting (since it cannot be modified by 'target'..
1493
0
    Ctx.getFunctionFeatureMap(CallerMap, Caller);
1494
0
    Ctx.getFunctionFeatureMap(CalleeMap, Callee);
1495
0
  }
1496
0
}
1497
1498
static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
1499
                                 SourceLocation CallLoc,
1500
                                 const llvm::StringMap<bool> &CallerMap,
1501
                                 const llvm::StringMap<bool> &CalleeMap,
1502
                                 QualType Ty, StringRef Feature,
1503
0
                                 bool IsArgument) {
1504
0
  bool CallerHasFeat = CallerMap.lookup(Feature);
1505
0
  bool CalleeHasFeat = CalleeMap.lookup(Feature);
1506
0
  if (!CallerHasFeat && !CalleeHasFeat)
1507
0
    return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1508
0
           << IsArgument << Ty << Feature;
1509
1510
  // Mixing calling conventions here is very clearly an error.
1511
0
  if (!CallerHasFeat || !CalleeHasFeat)
1512
0
    return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1513
0
           << IsArgument << Ty << Feature;
1514
1515
  // Else, both caller and callee have the required feature, so there is no need
1516
  // to diagnose.
1517
0
  return false;
1518
0
}
1519
1520
static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag,
1521
                                    SourceLocation CallLoc,
1522
                                    const llvm::StringMap<bool> &CallerMap,
1523
                                    const llvm::StringMap<bool> &CalleeMap,
1524
0
                                    QualType Ty, bool IsArgument) {
1525
0
  bool Caller256 = CallerMap.lookup("avx512f") && !CallerMap.lookup("evex512");
1526
0
  bool Callee256 = CalleeMap.lookup("avx512f") && !CalleeMap.lookup("evex512");
1527
1528
  // Forbid 512-bit or larger vector pass or return when we disabled ZMM
1529
  // instructions.
1530
0
  if (Caller256 || Callee256)
1531
0
    return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1532
0
           << IsArgument << Ty << "evex512";
1533
1534
0
  return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
1535
0
                              "avx512f", IsArgument);
1536
0
}
1537
1538
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
1539
                          SourceLocation CallLoc,
1540
                          const llvm::StringMap<bool> &CallerMap,
1541
                          const llvm::StringMap<bool> &CalleeMap, QualType Ty,
1542
0
                          bool IsArgument) {
1543
0
  uint64_t Size = Ctx.getTypeSize(Ty);
1544
0
  if (Size > 256)
1545
0
    return checkAVX512ParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
1546
0
                                   IsArgument);
1547
1548
0
  if (Size > 128)
1549
0
    return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
1550
0
                                IsArgument);
1551
1552
0
  return false;
1553
0
}
1554
1555
void X86_64TargetCodeGenInfo::checkFunctionCallABI(
1556
    CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
1557
0
    const FunctionDecl *Callee, const CallArgList &Args) const {
1558
0
  llvm::StringMap<bool> CallerMap;
1559
0
  llvm::StringMap<bool> CalleeMap;
1560
0
  unsigned ArgIndex = 0;
1561
1562
  // We need to loop through the actual call arguments rather than the
1563
  // function's parameters, in case this variadic.
1564
0
  for (const CallArg &Arg : Args) {
1565
    // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
1566
    // additionally changes how vectors >256 in size are passed. Like GCC, we
1567
    // warn when a function is called with an argument where this will change.
1568
    // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
1569
    // the caller and callee features are mismatched.
1570
    // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
1571
    // change its ABI with attribute-target after this call.
1572
0
    if (Arg.getType()->isVectorType() &&
1573
0
        CGM.getContext().getTypeSize(Arg.getType()) > 128) {
1574
0
      initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
1575
0
      QualType Ty = Arg.getType();
1576
      // The CallArg seems to have desugared the type already, so for clearer
1577
      // diagnostics, replace it with the type in the FunctionDecl if possible.
1578
0
      if (ArgIndex < Callee->getNumParams())
1579
0
        Ty = Callee->getParamDecl(ArgIndex)->getType();
1580
1581
0
      if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
1582
0
                        CalleeMap, Ty, /*IsArgument*/ true))
1583
0
        return;
1584
0
    }
1585
0
    ++ArgIndex;
1586
0
  }
1587
1588
  // Check return always, as we don't have a good way of knowing in codegen
1589
  // whether this value is used, tail-called, etc.
1590
0
  if (Callee->getReturnType()->isVectorType() &&
1591
0
      CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
1592
0
    initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
1593
0
    checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
1594
0
                  CalleeMap, Callee->getReturnType(),
1595
0
                  /*IsArgument*/ false);
1596
0
  }
1597
0
}
1598
1599
0
std::string TargetCodeGenInfo::qualifyWindowsLibrary(StringRef Lib) {
1600
  // If the argument does not end in .lib, automatically add the suffix.
1601
  // If the argument contains a space, enclose it in quotes.
1602
  // This matches the behavior of MSVC.
1603
0
  bool Quote = Lib.contains(' ');
1604
0
  std::string ArgStr = Quote ? "\"" : "";
1605
0
  ArgStr += Lib;
1606
0
  if (!Lib.ends_with_insensitive(".lib") && !Lib.ends_with_insensitive(".a"))
1607
0
    ArgStr += ".lib";
1608
0
  ArgStr += Quote ? "\"" : "";
1609
0
  return ArgStr;
1610
0
}
1611
1612
namespace {
1613
class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1614
public:
1615
  WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1616
        bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
1617
        unsigned NumRegisterParameters)
1618
    : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1619
0
        Win32StructABI, NumRegisterParameters, false) {}
1620
1621
  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1622
                           CodeGen::CodeGenModule &CGM) const override;
1623
1624
  void getDependentLibraryOption(llvm::StringRef Lib,
1625
0
                                 llvm::SmallString<24> &Opt) const override {
1626
0
    Opt = "/DEFAULTLIB:";
1627
0
    Opt += qualifyWindowsLibrary(Lib);
1628
0
  }
1629
1630
  void getDetectMismatchOption(llvm::StringRef Name,
1631
                               llvm::StringRef Value,
1632
0
                               llvm::SmallString<32> &Opt) const override {
1633
0
    Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1634
0
  }
1635
};
1636
} // namespace
1637
1638
void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1639
0
    const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1640
0
  X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1641
0
  if (GV->isDeclaration())
1642
0
    return;
1643
0
  addStackProbeTargetAttributes(D, GV, CGM);
1644
0
}
1645
1646
namespace {
1647
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1648
public:
1649
  WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1650
                             X86AVXABILevel AVXLevel)
1651
0
      : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
1652
0
    SwiftInfo =
1653
0
        std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
1654
0
  }
1655
1656
  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1657
                           CodeGen::CodeGenModule &CGM) const override;
1658
1659
0
  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1660
0
    return 7;
1661
0
  }
1662
1663
  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1664
0
                               llvm::Value *Address) const override {
1665
0
    llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1666
1667
    // 0-15 are the 16 integer registers.
1668
    // 16 is %rip.
1669
0
    AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1670
0
    return false;
1671
0
  }
1672
1673
  void getDependentLibraryOption(llvm::StringRef Lib,
1674
0
                                 llvm::SmallString<24> &Opt) const override {
1675
0
    Opt = "/DEFAULTLIB:";
1676
0
    Opt += qualifyWindowsLibrary(Lib);
1677
0
  }
1678
1679
  void getDetectMismatchOption(llvm::StringRef Name,
1680
                               llvm::StringRef Value,
1681
0
                               llvm::SmallString<32> &Opt) const override {
1682
0
    Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1683
0
  }
1684
};
1685
} // namespace
1686
1687
void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1688
0
    const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1689
0
  TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1690
0
  if (GV->isDeclaration())
1691
0
    return;
1692
0
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1693
0
    if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1694
0
      llvm::Function *Fn = cast<llvm::Function>(GV);
1695
0
      Fn->addFnAttr("stackrealign");
1696
0
    }
1697
1698
0
    addX86InterruptAttrs(FD, GV, CGM);
1699
0
  }
1700
1701
0
  addStackProbeTargetAttributes(D, GV, CGM);
1702
0
}
1703
1704
void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1705
0
                              Class &Hi) const {
1706
  // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1707
  //
1708
  // (a) If one of the classes is Memory, the whole argument is passed in
1709
  //     memory.
1710
  //
1711
  // (b) If X87UP is not preceded by X87, the whole argument is passed in
1712
  //     memory.
1713
  //
1714
  // (c) If the size of the aggregate exceeds two eightbytes and the first
1715
  //     eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1716
  //     argument is passed in memory. NOTE: This is necessary to keep the
1717
  //     ABI working for processors that don't support the __m256 type.
1718
  //
1719
  // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1720
  //
1721
  // Some of these are enforced by the merging logic.  Others can arise
1722
  // only with unions; for example:
1723
  //   union { _Complex double; unsigned; }
1724
  //
1725
  // Note that clauses (b) and (c) were added in 0.98.
1726
  //
1727
0
  if (Hi == Memory)
1728
0
    Lo = Memory;
1729
0
  if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1730
0
    Lo = Memory;
1731
0
  if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1732
0
    Lo = Memory;
1733
0
  if (Hi == SSEUp && Lo != SSE)
1734
0
    Hi = SSE;
1735
0
}
1736
1737
0
X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1738
  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1739
  // classified recursively so that always two fields are
1740
  // considered. The resulting class is calculated according to
1741
  // the classes of the fields in the eightbyte:
1742
  //
1743
  // (a) If both classes are equal, this is the resulting class.
1744
  //
1745
  // (b) If one of the classes is NO_CLASS, the resulting class is
1746
  // the other class.
1747
  //
1748
  // (c) If one of the classes is MEMORY, the result is the MEMORY
1749
  // class.
1750
  //
1751
  // (d) If one of the classes is INTEGER, the result is the
1752
  // INTEGER.
1753
  //
1754
  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1755
  // MEMORY is used as class.
1756
  //
1757
  // (f) Otherwise class SSE is used.
1758
1759
  // Accum should never be memory (we should have returned) or
1760
  // ComplexX87 (because this cannot be passed in a structure).
1761
0
  assert((Accum != Memory && Accum != ComplexX87) &&
1762
0
         "Invalid accumulated classification during merge.");
1763
0
  if (Accum == Field || Field == NoClass)
1764
0
    return Accum;
1765
0
  if (Field == Memory)
1766
0
    return Memory;
1767
0
  if (Accum == NoClass)
1768
0
    return Field;
1769
0
  if (Accum == Integer || Field == Integer)
1770
0
    return Integer;
1771
0
  if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1772
0
      Accum == X87 || Accum == X87Up)
1773
0
    return Memory;
1774
0
  return SSE;
1775
0
}
1776
1777
void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
1778
0
                             Class &Hi, bool isNamedArg, bool IsRegCall) const {
1779
  // FIXME: This code can be simplified by introducing a simple value class for
1780
  // Class pairs with appropriate constructor methods for the various
1781
  // situations.
1782
1783
  // FIXME: Some of the split computations are wrong; unaligned vectors
1784
  // shouldn't be passed in registers for example, so there is no chance they
1785
  // can straddle an eightbyte. Verify & simplify.
1786
1787
0
  Lo = Hi = NoClass;
1788
1789
0
  Class &Current = OffsetBase < 64 ? Lo : Hi;
1790
0
  Current = Memory;
1791
1792
0
  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1793
0
    BuiltinType::Kind k = BT->getKind();
1794
1795
0
    if (k == BuiltinType::Void) {
1796
0
      Current = NoClass;
1797
0
    } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1798
0
      Lo = Integer;
1799
0
      Hi = Integer;
1800
0
    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1801
0
      Current = Integer;
1802
0
    } else if (k == BuiltinType::Float || k == BuiltinType::Double ||
1803
0
               k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1804
0
      Current = SSE;
1805
0
    } else if (k == BuiltinType::Float128) {
1806
0
      Lo = SSE;
1807
0
      Hi = SSEUp;
1808
0
    } else if (k == BuiltinType::LongDouble) {
1809
0
      const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1810
0
      if (LDF == &llvm::APFloat::IEEEquad()) {
1811
0
        Lo = SSE;
1812
0
        Hi = SSEUp;
1813
0
      } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1814
0
        Lo = X87;
1815
0
        Hi = X87Up;
1816
0
      } else if (LDF == &llvm::APFloat::IEEEdouble()) {
1817
0
        Current = SSE;
1818
0
      } else
1819
0
        llvm_unreachable("unexpected long double representation!");
1820
0
    }
1821
    // FIXME: _Decimal32 and _Decimal64 are SSE.
1822
    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1823
0
    return;
1824
0
  }
1825
1826
0
  if (const EnumType *ET = Ty->getAs<EnumType>()) {
1827
    // Classify the underlying integer type.
1828
0
    classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1829
0
    return;
1830
0
  }
1831
1832
0
  if (Ty->hasPointerRepresentation()) {
1833
0
    Current = Integer;
1834
0
    return;
1835
0
  }
1836
1837
0
  if (Ty->isMemberPointerType()) {
1838
0
    if (Ty->isMemberFunctionPointerType()) {
1839
0
      if (Has64BitPointers) {
1840
        // If Has64BitPointers, this is an {i64, i64}, so classify both
1841
        // Lo and Hi now.
1842
0
        Lo = Hi = Integer;
1843
0
      } else {
1844
        // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
1845
        // straddles an eightbyte boundary, Hi should be classified as well.
1846
0
        uint64_t EB_FuncPtr = (OffsetBase) / 64;
1847
0
        uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1848
0
        if (EB_FuncPtr != EB_ThisAdj) {
1849
0
          Lo = Hi = Integer;
1850
0
        } else {
1851
0
          Current = Integer;
1852
0
        }
1853
0
      }
1854
0
    } else {
1855
0
      Current = Integer;
1856
0
    }
1857
0
    return;
1858
0
  }
1859
1860
0
  if (const VectorType *VT = Ty->getAs<VectorType>()) {
1861
0
    uint64_t Size = getContext().getTypeSize(VT);
1862
0
    if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1863
      // gcc passes the following as integer:
1864
      // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
1865
      // 2 bytes - <2 x char>, <1 x short>
1866
      // 1 byte  - <1 x char>
1867
0
      Current = Integer;
1868
1869
      // If this type crosses an eightbyte boundary, it should be
1870
      // split.
1871
0
      uint64_t EB_Lo = (OffsetBase) / 64;
1872
0
      uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
1873
0
      if (EB_Lo != EB_Hi)
1874
0
        Hi = Lo;
1875
0
    } else if (Size == 64) {
1876
0
      QualType ElementType = VT->getElementType();
1877
1878
      // gcc passes <1 x double> in memory. :(
1879
0
      if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
1880
0
        return;
1881
1882
      // gcc passes <1 x long long> as SSE but clang used to unconditionally
1883
      // pass them as integer.  For platforms where clang is the de facto
1884
      // platform compiler, we must continue to use integer.
1885
0
      if (!classifyIntegerMMXAsSSE() &&
1886
0
          (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
1887
0
           ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1888
0
           ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
1889
0
           ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
1890
0
        Current = Integer;
1891
0
      else
1892
0
        Current = SSE;
1893
1894
      // If this type crosses an eightbyte boundary, it should be
1895
      // split.
1896
0
      if (OffsetBase && OffsetBase != 64)
1897
0
        Hi = Lo;
1898
0
    } else if (Size == 128 ||
1899
0
               (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1900
0
      QualType ElementType = VT->getElementType();
1901
1902
      // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
1903
0
      if (passInt128VectorsInMem() && Size != 128 &&
1904
0
          (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
1905
0
           ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
1906
0
        return;
1907
1908
      // Arguments of 256-bits are split into four eightbyte chunks. The
1909
      // least significant one belongs to class SSE and all the others to class
1910
      // SSEUP. The original Lo and Hi design considers that types can't be
1911
      // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1912
      // This design isn't correct for 256-bits, but since there're no cases
1913
      // where the upper parts would need to be inspected, avoid adding
1914
      // complexity and just consider Hi to match the 64-256 part.
1915
      //
1916
      // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1917
      // registers if they are "named", i.e. not part of the "..." of a
1918
      // variadic function.
1919
      //
1920
      // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
1921
      // split into eight eightbyte chunks, one SSE and seven SSEUP.
1922
0
      Lo = SSE;
1923
0
      Hi = SSEUp;
1924
0
    }
1925
0
    return;
1926
0
  }
1927
1928
0
  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1929
0
    QualType ET = getContext().getCanonicalType(CT->getElementType());
1930
1931
0
    uint64_t Size = getContext().getTypeSize(Ty);
1932
0
    if (ET->isIntegralOrEnumerationType()) {
1933
0
      if (Size <= 64)
1934
0
        Current = Integer;
1935
0
      else if (Size <= 128)
1936
0
        Lo = Hi = Integer;
1937
0
    } else if (ET->isFloat16Type() || ET == getContext().FloatTy ||
1938
0
               ET->isBFloat16Type()) {
1939
0
      Current = SSE;
1940
0
    } else if (ET == getContext().DoubleTy) {
1941
0
      Lo = Hi = SSE;
1942
0
    } else if (ET == getContext().LongDoubleTy) {
1943
0
      const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1944
0
      if (LDF == &llvm::APFloat::IEEEquad())
1945
0
        Current = Memory;
1946
0
      else if (LDF == &llvm::APFloat::x87DoubleExtended())
1947
0
        Current = ComplexX87;
1948
0
      else if (LDF == &llvm::APFloat::IEEEdouble())
1949
0
        Lo = Hi = SSE;
1950
0
      else
1951
0
        llvm_unreachable("unexpected long double representation!");
1952
0
    }
1953
1954
    // If this complex type crosses an eightbyte boundary then it
1955
    // should be split.
1956
0
    uint64_t EB_Real = (OffsetBase) / 64;
1957
0
    uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1958
0
    if (Hi == NoClass && EB_Real != EB_Imag)
1959
0
      Hi = Lo;
1960
1961
0
    return;
1962
0
  }
1963
1964
0
  if (const auto *EITy = Ty->getAs<BitIntType>()) {
1965
0
    if (EITy->getNumBits() <= 64)
1966
0
      Current = Integer;
1967
0
    else if (EITy->getNumBits() <= 128)
1968
0
      Lo = Hi = Integer;
1969
    // Larger values need to get passed in memory.
1970
0
    return;
1971
0
  }
1972
1973
0
  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1974
    // Arrays are treated like structures.
1975
1976
0
    uint64_t Size = getContext().getTypeSize(Ty);
1977
1978
    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1979
    // than eight eightbytes, ..., it has class MEMORY.
1980
    // regcall ABI doesn't have limitation to an object. The only limitation
1981
    // is the free registers, which will be checked in computeInfo.
1982
0
    if (!IsRegCall && Size > 512)
1983
0
      return;
1984
1985
    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1986
    // fields, it has class MEMORY.
1987
    //
1988
    // Only need to check alignment of array base.
1989
0
    if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1990
0
      return;
1991
1992
    // Otherwise implement simplified merge. We could be smarter about
1993
    // this, but it isn't worth it and would be harder to verify.
1994
0
    Current = NoClass;
1995
0
    uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1996
0
    uint64_t ArraySize = AT->getSize().getZExtValue();
1997
1998
    // The only case a 256-bit wide vector could be used is when the array
1999
    // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2000
    // to work for sizes wider than 128, early check and fallback to memory.
2001
    //
2002
0
    if (Size > 128 &&
2003
0
        (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2004
0
      return;
2005
2006
0
    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2007
0
      Class FieldLo, FieldHi;
2008
0
      classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2009
0
      Lo = merge(Lo, FieldLo);
2010
0
      Hi = merge(Hi, FieldHi);
2011
0
      if (Lo == Memory || Hi == Memory)
2012
0
        break;
2013
0
    }
2014
2015
0
    postMerge(Size, Lo, Hi);
2016
0
    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2017
0
    return;
2018
0
  }
2019
2020
0
  if (const RecordType *RT = Ty->getAs<RecordType>()) {
2021
0
    uint64_t Size = getContext().getTypeSize(Ty);
2022
2023
    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2024
    // than eight eightbytes, ..., it has class MEMORY.
2025
0
    if (Size > 512)
2026
0
      return;
2027
2028
    // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2029
    // copy constructor or a non-trivial destructor, it is passed by invisible
2030
    // reference.
2031
0
    if (getRecordArgABI(RT, getCXXABI()))
2032
0
      return;
2033
2034
0
    const RecordDecl *RD = RT->getDecl();
2035
2036
    // Assume variable sized types are passed in memory.
2037
0
    if (RD->hasFlexibleArrayMember())
2038
0
      return;
2039
2040
0
    const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2041
2042
    // Reset Lo class, this will be recomputed.
2043
0
    Current = NoClass;
2044
2045
    // If this is a C++ record, classify the bases first.
2046
0
    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2047
0
      for (const auto &I : CXXRD->bases()) {
2048
0
        assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2049
0
               "Unexpected base class!");
2050
0
        const auto *Base =
2051
0
            cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
2052
2053
        // Classify this field.
2054
        //
2055
        // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2056
        // single eightbyte, each is classified separately. Each eightbyte gets
2057
        // initialized to class NO_CLASS.
2058
0
        Class FieldLo, FieldHi;
2059
0
        uint64_t Offset =
2060
0
          OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2061
0
        classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2062
0
        Lo = merge(Lo, FieldLo);
2063
0
        Hi = merge(Hi, FieldHi);
2064
0
        if (Lo == Memory || Hi == Memory) {
2065
0
          postMerge(Size, Lo, Hi);
2066
0
          return;
2067
0
        }
2068
0
      }
2069
0
    }
2070
2071
    // Classify the fields one at a time, merging the results.
2072
0
    unsigned idx = 0;
2073
0
    bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2074
0
                                LangOptions::ClangABI::Ver11 ||
2075
0
                            getContext().getTargetInfo().getTriple().isPS();
2076
0
    bool IsUnion = RT->isUnionType() && !UseClang11Compat;
2077
2078
0
    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2079
0
           i != e; ++i, ++idx) {
2080
0
      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2081
0
      bool BitField = i->isBitField();
2082
2083
      // Ignore padding bit-fields.
2084
0
      if (BitField && i->isUnnamedBitfield())
2085
0
        continue;
2086
2087
      // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2088
      // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
2089
      //
2090
      // The only case a 256-bit or a 512-bit wide vector could be used is when
2091
      // the struct contains a single 256-bit or 512-bit element. Early check
2092
      // and fallback to memory.
2093
      //
2094
      // FIXME: Extended the Lo and Hi logic properly to work for size wider
2095
      // than 128.
2096
0
      if (Size > 128 &&
2097
0
          ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2098
0
           Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2099
0
        Lo = Memory;
2100
0
        postMerge(Size, Lo, Hi);
2101
0
        return;
2102
0
      }
2103
      // Note, skip this test for bit-fields, see below.
2104
0
      if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2105
0
        Lo = Memory;
2106
0
        postMerge(Size, Lo, Hi);
2107
0
        return;
2108
0
      }
2109
2110
      // Classify this field.
2111
      //
2112
      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2113
      // exceeds a single eightbyte, each is classified
2114
      // separately. Each eightbyte gets initialized to class
2115
      // NO_CLASS.
2116
0
      Class FieldLo, FieldHi;
2117
2118
      // Bit-fields require special handling, they do not force the
2119
      // structure to be passed in memory even if unaligned, and
2120
      // therefore they can straddle an eightbyte.
2121
0
      if (BitField) {
2122
0
        assert(!i->isUnnamedBitfield());
2123
0
        uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2124
0
        uint64_t Size = i->getBitWidthValue(getContext());
2125
2126
0
        uint64_t EB_Lo = Offset / 64;
2127
0
        uint64_t EB_Hi = (Offset + Size - 1) / 64;
2128
2129
0
        if (EB_Lo) {
2130
0
          assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2131
0
          FieldLo = NoClass;
2132
0
          FieldHi = Integer;
2133
0
        } else {
2134
0
          FieldLo = Integer;
2135
0
          FieldHi = EB_Hi ? Integer : NoClass;
2136
0
        }
2137
0
      } else
2138
0
        classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2139
0
      Lo = merge(Lo, FieldLo);
2140
0
      Hi = merge(Hi, FieldHi);
2141
0
      if (Lo == Memory || Hi == Memory)
2142
0
        break;
2143
0
    }
2144
2145
0
    postMerge(Size, Lo, Hi);
2146
0
  }
2147
0
}
2148
2149
0
ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2150
  // If this is a scalar LLVM value then assume LLVM will pass it in the right
2151
  // place naturally.
2152
0
  if (!isAggregateTypeForABI(Ty)) {
2153
    // Treat an enum type as its underlying type.
2154
0
    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2155
0
      Ty = EnumTy->getDecl()->getIntegerType();
2156
2157
0
    if (Ty->isBitIntType())
2158
0
      return getNaturalAlignIndirect(Ty);
2159
2160
0
    return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
2161
0
                                              : ABIArgInfo::getDirect());
2162
0
  }
2163
2164
0
  return getNaturalAlignIndirect(Ty);
2165
0
}
2166
2167
0
bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2168
0
  if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2169
0
    uint64_t Size = getContext().getTypeSize(VecTy);
2170
0
    unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2171
0
    if (Size <= 64 || Size > LargestVector)
2172
0
      return true;
2173
0
    QualType EltTy = VecTy->getElementType();
2174
0
    if (passInt128VectorsInMem() &&
2175
0
        (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
2176
0
         EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
2177
0
      return true;
2178
0
  }
2179
2180
0
  return false;
2181
0
}
2182
2183
ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2184
0
                                            unsigned freeIntRegs) const {
2185
  // If this is a scalar LLVM value then assume LLVM will pass it in the right
2186
  // place naturally.
2187
  //
2188
  // This assumption is optimistic, as there could be free registers available
2189
  // when we need to pass this argument in memory, and LLVM could try to pass
2190
  // the argument in the free register. This does not seem to happen currently,
2191
  // but this code would be much safer if we could mark the argument with
2192
  // 'onstack'. See PR12193.
2193
0
  if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
2194
0
      !Ty->isBitIntType()) {
2195
    // Treat an enum type as its underlying type.
2196
0
    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2197
0
      Ty = EnumTy->getDecl()->getIntegerType();
2198
2199
0
    return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
2200
0
                                              : ABIArgInfo::getDirect());
2201
0
  }
2202
2203
0
  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2204
0
    return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2205
2206
  // Compute the byval alignment. We specify the alignment of the byval in all
2207
  // cases so that the mid-level optimizer knows the alignment of the byval.
2208
0
  unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2209
2210
  // Attempt to avoid passing indirect results using byval when possible. This
2211
  // is important for good codegen.
2212
  //
2213
  // We do this by coercing the value into a scalar type which the backend can
2214
  // handle naturally (i.e., without using byval).
2215
  //
2216
  // For simplicity, we currently only do this when we have exhausted all of the
2217
  // free integer registers. Doing this when there are free integer registers
2218
  // would require more care, as we would have to ensure that the coerced value
2219
  // did not claim the unused register. That would require either reording the
2220
  // arguments to the function (so that any subsequent inreg values came first),
2221
  // or only doing this optimization when there were no following arguments that
2222
  // might be inreg.
2223
  //
2224
  // We currently expect it to be rare (particularly in well written code) for
2225
  // arguments to be passed on the stack when there are still free integer
2226
  // registers available (this would typically imply large structs being passed
2227
  // by value), so this seems like a fair tradeoff for now.
2228
  //
2229
  // We can revisit this if the backend grows support for 'onstack' parameter
2230
  // attributes. See PR12193.
2231
0
  if (freeIntRegs == 0) {
2232
0
    uint64_t Size = getContext().getTypeSize(Ty);
2233
2234
    // If this type fits in an eightbyte, coerce it into the matching integral
2235
    // type, which will end up on the stack (with alignment 8).
2236
0
    if (Align == 8 && Size <= 64)
2237
0
      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2238
0
                                                          Size));
2239
0
  }
2240
2241
0
  return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2242
0
}
2243
2244
/// The ABI specifies that a value should be passed in a full vector XMM/YMM
2245
/// register. Pick an LLVM IR type that will be passed as a vector register.
2246
0
llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2247
  // Wrapper structs/arrays that only contain vectors are passed just like
2248
  // vectors; strip them off if present.
2249
0
  if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2250
0
    Ty = QualType(InnerTy, 0);
2251
2252
0
  llvm::Type *IRType = CGT.ConvertType(Ty);
2253
0
  if (isa<llvm::VectorType>(IRType)) {
2254
    // Don't pass vXi128 vectors in their native type, the backend can't
2255
    // legalize them.
2256
0
    if (passInt128VectorsInMem() &&
2257
0
        cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
2258
      // Use a vXi64 vector.
2259
0
      uint64_t Size = getContext().getTypeSize(Ty);
2260
0
      return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2261
0
                                        Size / 64);
2262
0
    }
2263
2264
0
    return IRType;
2265
0
  }
2266
2267
0
  if (IRType->getTypeID() == llvm::Type::FP128TyID)
2268
0
    return IRType;
2269
2270
  // We couldn't find the preferred IR vector type for 'Ty'.
2271
0
  uint64_t Size = getContext().getTypeSize(Ty);
2272
0
  assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2273
2274
2275
  // Return a LLVM IR vector type based on the size of 'Ty'.
2276
0
  return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2277
0
                                    Size / 64);
2278
0
}
2279
2280
/// BitsContainNoUserData - Return true if the specified [start,end) bit range
2281
/// is known to either be off the end of the specified type or being in
2282
/// alignment padding.  The user type specified is known to be at most 128 bits
2283
/// in size, and have passed through X86_64ABIInfo::classify with a successful
2284
/// classification that put one of the two halves in the INTEGER class.
2285
///
2286
/// It is conservatively correct to return false.
2287
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2288
0
                                  unsigned EndBit, ASTContext &Context) {
2289
  // If the bytes being queried are off the end of the type, there is no user
2290
  // data hiding here.  This handles analysis of builtins, vectors and other
2291
  // types that don't contain interesting padding.
2292
0
  unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2293
0
  if (TySize <= StartBit)
2294
0
    return true;
2295
2296
0
  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2297
0
    unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2298
0
    unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2299
2300
    // Check each element to see if the element overlaps with the queried range.
2301
0
    for (unsigned i = 0; i != NumElts; ++i) {
2302
      // If the element is after the span we care about, then we're done..
2303
0
      unsigned EltOffset = i*EltSize;
2304
0
      if (EltOffset >= EndBit) break;
2305
2306
0
      unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2307
0
      if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2308
0
                                 EndBit-EltOffset, Context))
2309
0
        return false;
2310
0
    }
2311
    // If it overlaps no elements, then it is safe to process as padding.
2312
0
    return true;
2313
0
  }
2314
2315
0
  if (const RecordType *RT = Ty->getAs<RecordType>()) {
2316
0
    const RecordDecl *RD = RT->getDecl();
2317
0
    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2318
2319
    // If this is a C++ record, check the bases first.
2320
0
    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2321
0
      for (const auto &I : CXXRD->bases()) {
2322
0
        assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2323
0
               "Unexpected base class!");
2324
0
        const auto *Base =
2325
0
            cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
2326
2327
        // If the base is after the span we care about, ignore it.
2328
0
        unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2329
0
        if (BaseOffset >= EndBit) continue;
2330
2331
0
        unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2332
0
        if (!BitsContainNoUserData(I.getType(), BaseStart,
2333
0
                                   EndBit-BaseOffset, Context))
2334
0
          return false;
2335
0
      }
2336
0
    }
2337
2338
    // Verify that no field has data that overlaps the region of interest.  Yes
2339
    // this could be sped up a lot by being smarter about queried fields,
2340
    // however we're only looking at structs up to 16 bytes, so we don't care
2341
    // much.
2342
0
    unsigned idx = 0;
2343
0
    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2344
0
         i != e; ++i, ++idx) {
2345
0
      unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2346
2347
      // If we found a field after the region we care about, then we're done.
2348
0
      if (FieldOffset >= EndBit) break;
2349
2350
0
      unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2351
0
      if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2352
0
                                 Context))
2353
0
        return false;
2354
0
    }
2355
2356
    // If nothing in this record overlapped the area of interest, then we're
2357
    // clean.
2358
0
    return true;
2359
0
  }
2360
2361
0
  return false;
2362
0
}
2363
2364
/// getFPTypeAtOffset - Return a floating point type at the specified offset.
2365
static llvm::Type *getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2366
0
                                     const llvm::DataLayout &TD) {
2367
0
  if (IROffset == 0 && IRType->isFloatingPointTy())
2368
0
    return IRType;
2369
2370
  // If this is a struct, recurse into the field at the specified offset.
2371
0
  if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2372
0
    if (!STy->getNumContainedTypes())
2373
0
      return nullptr;
2374
2375
0
    const llvm::StructLayout *SL = TD.getStructLayout(STy);
2376
0
    unsigned Elt = SL->getElementContainingOffset(IROffset);
2377
0
    IROffset -= SL->getElementOffset(Elt);
2378
0
    return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD);
2379
0
  }
2380
2381
  // If this is an array, recurse into the field at the specified offset.
2382
0
  if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2383
0
    llvm::Type *EltTy = ATy->getElementType();
2384
0
    unsigned EltSize = TD.getTypeAllocSize(EltTy);
2385
0
    IROffset -= IROffset / EltSize * EltSize;
2386
0
    return getFPTypeAtOffset(EltTy, IROffset, TD);
2387
0
  }
2388
2389
0
  return nullptr;
2390
0
}
2391
2392
/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2393
/// low 8 bytes of an XMM register, corresponding to the SSE class.
2394
llvm::Type *X86_64ABIInfo::
2395
GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2396
0
                   QualType SourceTy, unsigned SourceOffset) const {
2397
0
  const llvm::DataLayout &TD = getDataLayout();
2398
0
  unsigned SourceSize =
2399
0
      (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2400
0
  llvm::Type *T0 = getFPTypeAtOffset(IRType, IROffset, TD);
2401
0
  if (!T0 || T0->isDoubleTy())
2402
0
    return llvm::Type::getDoubleTy(getVMContext());
2403
2404
  // Get the adjacent FP type.
2405
0
  llvm::Type *T1 = nullptr;
2406
0
  unsigned T0Size = TD.getTypeAllocSize(T0);
2407
0
  if (SourceSize > T0Size)
2408
0
      T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD);
2409
0
  if (T1 == nullptr) {
2410
    // Check if IRType is a half/bfloat + float. float type will be in IROffset+4 due
2411
    // to its alignment.
2412
0
    if (T0->is16bitFPTy() && SourceSize > 4)
2413
0
      T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
2414
    // If we can't get a second FP type, return a simple half or float.
2415
    // avx512fp16-abi.c:pr51813_2 shows it works to return float for
2416
    // {float, i8} too.
2417
0
    if (T1 == nullptr)
2418
0
      return T0;
2419
0
  }
2420
2421
0
  if (T0->isFloatTy() && T1->isFloatTy())
2422
0
    return llvm::FixedVectorType::get(T0, 2);
2423
2424
0
  if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2425
0
    llvm::Type *T2 = nullptr;
2426
0
    if (SourceSize > 4)
2427
0
      T2 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
2428
0
    if (T2 == nullptr)
2429
0
      return llvm::FixedVectorType::get(T0, 2);
2430
0
    return llvm::FixedVectorType::get(T0, 4);
2431
0
  }
2432
2433
0
  if (T0->is16bitFPTy() || T1->is16bitFPTy())
2434
0
    return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2435
2436
0
  return llvm::Type::getDoubleTy(getVMContext());
2437
0
}
2438
2439
2440
/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2441
/// an 8-byte GPR.  This means that we either have a scalar or we are talking
2442
/// about the high or low part of an up-to-16-byte struct.  This routine picks
2443
/// the best LLVM IR type to represent this, which may be i64 or may be anything
2444
/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2445
/// etc).
2446
///
2447
/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2448
/// the source type.  IROffset is an offset in bytes into the LLVM IR type that
2449
/// the 8-byte value references.  PrefType may be null.
2450
///
2451
/// SourceTy is the source-level type for the entire argument.  SourceOffset is
2452
/// an offset into this that we're processing (which is always either 0 or 8).
2453
///
2454
llvm::Type *X86_64ABIInfo::
2455
GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2456
0
                       QualType SourceTy, unsigned SourceOffset) const {
2457
  // If we're dealing with an un-offset LLVM IR type, then it means that we're
2458
  // returning an 8-byte unit starting with it.  See if we can safely use it.
2459
0
  if (IROffset == 0) {
2460
    // Pointers and int64's always fill the 8-byte unit.
2461
0
    if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2462
0
        IRType->isIntegerTy(64))
2463
0
      return IRType;
2464
2465
    // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2466
    // goodness in the source type is just tail padding.  This is allowed to
2467
    // kick in for struct {double,int} on the int, but not on
2468
    // struct{double,int,int} because we wouldn't return the second int.  We
2469
    // have to do this analysis on the source type because we can't depend on
2470
    // unions being lowered a specific way etc.
2471
0
    if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2472
0
        IRType->isIntegerTy(32) ||
2473
0
        (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2474
0
      unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2475
0
          cast<llvm::IntegerType>(IRType)->getBitWidth();
2476
2477
0
      if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2478
0
                                SourceOffset*8+64, getContext()))
2479
0
        return IRType;
2480
0
    }
2481
0
  }
2482
2483
0
  if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2484
    // If this is a struct, recurse into the field at the specified offset.
2485
0
    const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2486
0
    if (IROffset < SL->getSizeInBytes()) {
2487
0
      unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2488
0
      IROffset -= SL->getElementOffset(FieldIdx);
2489
2490
0
      return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2491
0
                                    SourceTy, SourceOffset);
2492
0
    }
2493
0
  }
2494
2495
0
  if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2496
0
    llvm::Type *EltTy = ATy->getElementType();
2497
0
    unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2498
0
    unsigned EltOffset = IROffset/EltSize*EltSize;
2499
0
    return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2500
0
                                  SourceOffset);
2501
0
  }
2502
2503
  // Okay, we don't have any better idea of what to pass, so we pass this in an
2504
  // integer register that isn't too big to fit the rest of the struct.
2505
0
  unsigned TySizeInBytes =
2506
0
    (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2507
2508
0
  assert(TySizeInBytes != SourceOffset && "Empty field?");
2509
2510
  // It is always safe to classify this as an integer type up to i64 that
2511
  // isn't larger than the structure.
2512
0
  return llvm::IntegerType::get(getVMContext(),
2513
0
                                std::min(TySizeInBytes-SourceOffset, 8U)*8);
2514
0
}
2515
2516
2517
/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2518
/// be used as elements of a two register pair to pass or return, return a
2519
/// first class aggregate to represent them.  For example, if the low part of
2520
/// a by-value argument should be passed as i32* and the high part as float,
2521
/// return {i32*, float}.
2522
static llvm::Type *
2523
GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2524
0
                           const llvm::DataLayout &TD) {
2525
  // In order to correctly satisfy the ABI, we need to the high part to start
2526
  // at offset 8.  If the high and low parts we inferred are both 4-byte types
2527
  // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2528
  // the second element at offset 8.  Check for this:
2529
0
  unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2530
0
  llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2531
0
  unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2532
0
  assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2533
2534
  // To handle this, we have to increase the size of the low part so that the
2535
  // second element will start at an 8 byte offset.  We can't increase the size
2536
  // of the second element because it might make us access off the end of the
2537
  // struct.
2538
0
  if (HiStart != 8) {
2539
    // There are usually two sorts of types the ABI generation code can produce
2540
    // for the low part of a pair that aren't 8 bytes in size: half, float or
2541
    // i8/i16/i32.  This can also include pointers when they are 32-bit (X32 and
2542
    // NaCl).
2543
    // Promote these to a larger type.
2544
0
    if (Lo->isHalfTy() || Lo->isFloatTy())
2545
0
      Lo = llvm::Type::getDoubleTy(Lo->getContext());
2546
0
    else {
2547
0
      assert((Lo->isIntegerTy() || Lo->isPointerTy())
2548
0
             && "Invalid/unknown lo type");
2549
0
      Lo = llvm::Type::getInt64Ty(Lo->getContext());
2550
0
    }
2551
0
  }
2552
2553
0
  llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
2554
2555
  // Verify that the second element is at an 8-byte offset.
2556
0
  assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2557
0
         "Invalid x86-64 argument pair!");
2558
0
  return Result;
2559
0
}
2560
2561
ABIArgInfo X86_64ABIInfo::
2562
0
classifyReturnType(QualType RetTy) const {
2563
  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2564
  // classification algorithm.
2565
0
  X86_64ABIInfo::Class Lo, Hi;
2566
0
  classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2567
2568
  // Check some invariants.
2569
0
  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2570
0
  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2571
2572
0
  llvm::Type *ResType = nullptr;
2573
0
  switch (Lo) {
2574
0
  case NoClass:
2575
0
    if (Hi == NoClass)
2576
0
      return ABIArgInfo::getIgnore();
2577
    // If the low part is just padding, it takes no register, leave ResType
2578
    // null.
2579
0
    assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2580
0
           "Unknown missing lo part");
2581
0
    break;
2582
2583
0
  case SSEUp:
2584
0
  case X87Up:
2585
0
    llvm_unreachable("Invalid classification for lo word.");
2586
2587
    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2588
    // hidden argument.
2589
0
  case Memory:
2590
0
    return getIndirectReturnResult(RetTy);
2591
2592
    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2593
    // available register of the sequence %rax, %rdx is used.
2594
0
  case Integer:
2595
0
    ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2596
2597
    // If we have a sign or zero extended integer, make sure to return Extend
2598
    // so that the parameter gets the right LLVM IR attributes.
2599
0
    if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2600
      // Treat an enum type as its underlying type.
2601
0
      if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2602
0
        RetTy = EnumTy->getDecl()->getIntegerType();
2603
2604
0
      if (RetTy->isIntegralOrEnumerationType() &&
2605
0
          isPromotableIntegerTypeForABI(RetTy))
2606
0
        return ABIArgInfo::getExtend(RetTy);
2607
0
    }
2608
0
    break;
2609
2610
    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2611
    // available SSE register of the sequence %xmm0, %xmm1 is used.
2612
0
  case SSE:
2613
0
    ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2614
0
    break;
2615
2616
    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2617
    // returned on the X87 stack in %st0 as 80-bit x87 number.
2618
0
  case X87:
2619
0
    ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2620
0
    break;
2621
2622
    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2623
    // part of the value is returned in %st0 and the imaginary part in
2624
    // %st1.
2625
0
  case ComplexX87:
2626
0
    assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2627
0
    ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2628
0
                                    llvm::Type::getX86_FP80Ty(getVMContext()));
2629
0
    break;
2630
0
  }
2631
2632
0
  llvm::Type *HighPart = nullptr;
2633
0
  switch (Hi) {
2634
    // Memory was handled previously and X87 should
2635
    // never occur as a hi class.
2636
0
  case Memory:
2637
0
  case X87:
2638
0
    llvm_unreachable("Invalid classification for hi word.");
2639
2640
0
  case ComplexX87: // Previously handled.
2641
0
  case NoClass:
2642
0
    break;
2643
2644
0
  case Integer:
2645
0
    HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2646
0
    if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2647
0
      return ABIArgInfo::getDirect(HighPart, 8);
2648
0
    break;
2649
0
  case SSE:
2650
0
    HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2651
0
    if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2652
0
      return ABIArgInfo::getDirect(HighPart, 8);
2653
0
    break;
2654
2655
    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2656
    // is passed in the next available eightbyte chunk if the last used
2657
    // vector register.
2658
    //
2659
    // SSEUP should always be preceded by SSE, just widen.
2660
0
  case SSEUp:
2661
0
    assert(Lo == SSE && "Unexpected SSEUp classification.");
2662
0
    ResType = GetByteVectorType(RetTy);
2663
0
    break;
2664
2665
    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2666
    // returned together with the previous X87 value in %st0.
2667
0
  case X87Up:
2668
    // If X87Up is preceded by X87, we don't need to do
2669
    // anything. However, in some cases with unions it may not be
2670
    // preceded by X87. In such situations we follow gcc and pass the
2671
    // extra bits in an SSE reg.
2672
0
    if (Lo != X87) {
2673
0
      HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2674
0
      if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2675
0
        return ABIArgInfo::getDirect(HighPart, 8);
2676
0
    }
2677
0
    break;
2678
0
  }
2679
2680
  // If a high part was specified, merge it together with the low part.  It is
2681
  // known to pass in the high eightbyte of the result.  We do this by forming a
2682
  // first class struct aggregate with the high and low part: {low, high}
2683
0
  if (HighPart)
2684
0
    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2685
2686
0
  return ABIArgInfo::getDirect(ResType);
2687
0
}
2688
2689
ABIArgInfo
2690
X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2691
                                    unsigned &neededInt, unsigned &neededSSE,
2692
0
                                    bool isNamedArg, bool IsRegCall) const {
2693
0
  Ty = useFirstFieldIfTransparentUnion(Ty);
2694
2695
0
  X86_64ABIInfo::Class Lo, Hi;
2696
0
  classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2697
2698
  // Check some invariants.
2699
  // FIXME: Enforce these by construction.
2700
0
  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2701
0
  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2702
2703
0
  neededInt = 0;
2704
0
  neededSSE = 0;
2705
0
  llvm::Type *ResType = nullptr;
2706
0
  switch (Lo) {
2707
0
  case NoClass:
2708
0
    if (Hi == NoClass)
2709
0
      return ABIArgInfo::getIgnore();
2710
    // If the low part is just padding, it takes no register, leave ResType
2711
    // null.
2712
0
    assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2713
0
           "Unknown missing lo part");
2714
0
    break;
2715
2716
    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2717
    // on the stack.
2718
0
  case Memory:
2719
2720
    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2721
    // COMPLEX_X87, it is passed in memory.
2722
0
  case X87:
2723
0
  case ComplexX87:
2724
0
    if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2725
0
      ++neededInt;
2726
0
    return getIndirectResult(Ty, freeIntRegs);
2727
2728
0
  case SSEUp:
2729
0
  case X87Up:
2730
0
    llvm_unreachable("Invalid classification for lo word.");
2731
2732
    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2733
    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2734
    // and %r9 is used.
2735
0
  case Integer:
2736
0
    ++neededInt;
2737
2738
    // Pick an 8-byte type based on the preferred type.
2739
0
    ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2740
2741
    // If we have a sign or zero extended integer, make sure to return Extend
2742
    // so that the parameter gets the right LLVM IR attributes.
2743
0
    if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2744
      // Treat an enum type as its underlying type.
2745
0
      if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2746
0
        Ty = EnumTy->getDecl()->getIntegerType();
2747
2748
0
      if (Ty->isIntegralOrEnumerationType() &&
2749
0
          isPromotableIntegerTypeForABI(Ty))
2750
0
        return ABIArgInfo::getExtend(Ty);
2751
0
    }
2752
2753
0
    break;
2754
2755
    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2756
    // available SSE register is used, the registers are taken in the
2757
    // order from %xmm0 to %xmm7.
2758
0
  case SSE: {
2759
0
    llvm::Type *IRType = CGT.ConvertType(Ty);
2760
0
    ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2761
0
    ++neededSSE;
2762
0
    break;
2763
0
  }
2764
0
  }
2765
2766
0
  llvm::Type *HighPart = nullptr;
2767
0
  switch (Hi) {
2768
    // Memory was handled previously, ComplexX87 and X87 should
2769
    // never occur as hi classes, and X87Up must be preceded by X87,
2770
    // which is passed in memory.
2771
0
  case Memory:
2772
0
  case X87:
2773
0
  case ComplexX87:
2774
0
    llvm_unreachable("Invalid classification for hi word.");
2775
2776
0
  case NoClass: break;
2777
2778
0
  case Integer:
2779
0
    ++neededInt;
2780
    // Pick an 8-byte type based on the preferred type.
2781
0
    HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2782
2783
0
    if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2784
0
      return ABIArgInfo::getDirect(HighPart, 8);
2785
0
    break;
2786
2787
    // X87Up generally doesn't occur here (long double is passed in
2788
    // memory), except in situations involving unions.
2789
0
  case X87Up:
2790
0
  case SSE:
2791
0
    HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2792
2793
0
    if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2794
0
      return ABIArgInfo::getDirect(HighPart, 8);
2795
2796
0
    ++neededSSE;
2797
0
    break;
2798
2799
    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2800
    // eightbyte is passed in the upper half of the last used SSE
2801
    // register.  This only happens when 128-bit vectors are passed.
2802
0
  case SSEUp:
2803
0
    assert(Lo == SSE && "Unexpected SSEUp classification");
2804
0
    ResType = GetByteVectorType(Ty);
2805
0
    break;
2806
0
  }
2807
2808
  // If a high part was specified, merge it together with the low part.  It is
2809
  // known to pass in the high eightbyte of the result.  We do this by forming a
2810
  // first class struct aggregate with the high and low part: {low, high}
2811
0
  if (HighPart)
2812
0
    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2813
2814
0
  return ABIArgInfo::getDirect(ResType);
2815
0
}
2816
2817
ABIArgInfo
2818
X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2819
                                             unsigned &NeededSSE,
2820
0
                                             unsigned &MaxVectorWidth) const {
2821
0
  auto RT = Ty->getAs<RecordType>();
2822
0
  assert(RT && "classifyRegCallStructType only valid with struct types");
2823
2824
0
  if (RT->getDecl()->hasFlexibleArrayMember())
2825
0
    return getIndirectReturnResult(Ty);
2826
2827
  // Sum up bases
2828
0
  if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
2829
0
    if (CXXRD->isDynamicClass()) {
2830
0
      NeededInt = NeededSSE = 0;
2831
0
      return getIndirectReturnResult(Ty);
2832
0
    }
2833
2834
0
    for (const auto &I : CXXRD->bases())
2835
0
      if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2836
0
                                        MaxVectorWidth)
2837
0
              .isIndirect()) {
2838
0
        NeededInt = NeededSSE = 0;
2839
0
        return getIndirectReturnResult(Ty);
2840
0
      }
2841
0
  }
2842
2843
  // Sum up members
2844
0
  for (const auto *FD : RT->getDecl()->fields()) {
2845
0
    QualType MTy = FD->getType();
2846
0
    if (MTy->isRecordType() && !MTy->isUnionType()) {
2847
0
      if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2848
0
                                        MaxVectorWidth)
2849
0
              .isIndirect()) {
2850
0
        NeededInt = NeededSSE = 0;
2851
0
        return getIndirectReturnResult(Ty);
2852
0
      }
2853
0
    } else {
2854
0
      unsigned LocalNeededInt, LocalNeededSSE;
2855
0
      if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE,
2856
0
                               true, true)
2857
0
              .isIndirect()) {
2858
0
        NeededInt = NeededSSE = 0;
2859
0
        return getIndirectReturnResult(Ty);
2860
0
      }
2861
0
      if (const auto *AT = getContext().getAsConstantArrayType(MTy))
2862
0
        MTy = AT->getElementType();
2863
0
      if (const auto *VT = MTy->getAs<VectorType>())
2864
0
        if (getContext().getTypeSize(VT) > MaxVectorWidth)
2865
0
          MaxVectorWidth = getContext().getTypeSize(VT);
2866
0
      NeededInt += LocalNeededInt;
2867
0
      NeededSSE += LocalNeededSSE;
2868
0
    }
2869
0
  }
2870
2871
0
  return ABIArgInfo::getDirect();
2872
0
}
2873
2874
ABIArgInfo
2875
X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2876
                                         unsigned &NeededSSE,
2877
0
                                         unsigned &MaxVectorWidth) const {
2878
2879
0
  NeededInt = 0;
2880
0
  NeededSSE = 0;
2881
0
  MaxVectorWidth = 0;
2882
2883
0
  return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2884
0
                                       MaxVectorWidth);
2885
0
}
2886
2887
0
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2888
2889
0
  const unsigned CallingConv = FI.getCallingConvention();
2890
  // It is possible to force Win64 calling convention on any x86_64 target by
2891
  // using __attribute__((ms_abi)). In such case to correctly emit Win64
2892
  // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
2893
0
  if (CallingConv == llvm::CallingConv::Win64) {
2894
0
    WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2895
0
    Win64ABIInfo.computeInfo(FI);
2896
0
    return;
2897
0
  }
2898
2899
0
  bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
2900
2901
  // Keep track of the number of assigned registers.
2902
0
  unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2903
0
  unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2904
0
  unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2905
2906
0
  if (!::classifyReturnType(getCXXABI(), FI, *this)) {
2907
0
    if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
2908
0
        !FI.getReturnType()->getTypePtr()->isUnionType()) {
2909
0
      FI.getReturnInfo() = classifyRegCallStructType(
2910
0
          FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
2911
0
      if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2912
0
        FreeIntRegs -= NeededInt;
2913
0
        FreeSSERegs -= NeededSSE;
2914
0
      } else {
2915
0
        FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
2916
0
      }
2917
0
    } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
2918
0
               getContext().getCanonicalType(FI.getReturnType()
2919
0
                                                 ->getAs<ComplexType>()
2920
0
                                                 ->getElementType()) ==
2921
0
                   getContext().LongDoubleTy)
2922
      // Complex Long Double Type is passed in Memory when Regcall
2923
      // calling convention is used.
2924
0
      FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
2925
0
    else
2926
0
      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2927
0
  }
2928
2929
  // If the return value is indirect, then the hidden argument is consuming one
2930
  // integer register.
2931
0
  if (FI.getReturnInfo().isIndirect())
2932
0
    --FreeIntRegs;
2933
0
  else if (NeededSSE && MaxVectorWidth > 0)
2934
0
    FI.setMaxVectorWidth(MaxVectorWidth);
2935
2936
  // The chain argument effectively gives us another free register.
2937
0
  if (FI.isChainCall())
2938
0
    ++FreeIntRegs;
2939
2940
0
  unsigned NumRequiredArgs = FI.getNumRequiredArgs();
2941
  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2942
  // get assigned (in left-to-right order) for passing as follows...
2943
0
  unsigned ArgNo = 0;
2944
0
  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2945
0
       it != ie; ++it, ++ArgNo) {
2946
0
    bool IsNamedArg = ArgNo < NumRequiredArgs;
2947
2948
0
    if (IsRegCall && it->type->isStructureOrClassType())
2949
0
      it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2950
0
                                           MaxVectorWidth);
2951
0
    else
2952
0
      it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
2953
0
                                      NeededSSE, IsNamedArg);
2954
2955
    // AMD64-ABI 3.2.3p3: If there are no registers available for any
2956
    // eightbyte of an argument, the whole argument is passed on the
2957
    // stack. If registers have already been assigned for some
2958
    // eightbytes of such an argument, the assignments get reverted.
2959
0
    if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2960
0
      FreeIntRegs -= NeededInt;
2961
0
      FreeSSERegs -= NeededSSE;
2962
0
      if (MaxVectorWidth > FI.getMaxVectorWidth())
2963
0
        FI.setMaxVectorWidth(MaxVectorWidth);
2964
0
    } else {
2965
0
      it->info = getIndirectResult(it->type, FreeIntRegs);
2966
0
    }
2967
0
  }
2968
0
}
2969
2970
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
2971
0
                                         Address VAListAddr, QualType Ty) {
2972
0
  Address overflow_arg_area_p =
2973
0
      CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2974
0
  llvm::Value *overflow_arg_area =
2975
0
    CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2976
2977
  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2978
  // byte boundary if alignment needed by type exceeds 8 byte boundary.
2979
  // It isn't stated explicitly in the standard, but in practice we use
2980
  // alignment greater than 16 where necessary.
2981
0
  CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
2982
0
  if (Align > CharUnits::fromQuantity(8)) {
2983
0
    overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
2984
0
                                                      Align);
2985
0
  }
2986
2987
  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2988
0
  llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2989
0
  llvm::Value *Res = overflow_arg_area;
2990
2991
  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2992
  // l->overflow_arg_area + sizeof(type).
2993
  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2994
  // an 8 byte boundary.
2995
2996
0
  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2997
0
  llvm::Value *Offset =
2998
0
      llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
2999
0
  overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
3000
0
                                            Offset, "overflow_arg_area.next");
3001
0
  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3002
3003
  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3004
0
  return Address(Res, LTy, Align);
3005
0
}
3006
3007
Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3008
0
                                 QualType Ty) const {
3009
  // Assume that va_list type is correct; should be pointer to LLVM type:
3010
  // struct {
3011
  //   i32 gp_offset;
3012
  //   i32 fp_offset;
3013
  //   i8* overflow_arg_area;
3014
  //   i8* reg_save_area;
3015
  // };
3016
0
  unsigned neededInt, neededSSE;
3017
3018
0
  Ty = getContext().getCanonicalType(Ty);
3019
0
  ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3020
0
                                       /*isNamedArg*/false);
3021
3022
  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3023
  // in the registers. If not go to step 7.
3024
0
  if (!neededInt && !neededSSE)
3025
0
    return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3026
3027
  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3028
  // general purpose registers needed to pass type and num_fp to hold
3029
  // the number of floating point registers needed.
3030
3031
  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3032
  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3033
  // l->fp_offset > 304 - num_fp * 16 go to step 7.
3034
  //
3035
  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3036
  // register save space).
3037
3038
0
  llvm::Value *InRegs = nullptr;
3039
0
  Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3040
0
  llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3041
0
  if (neededInt) {
3042
0
    gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3043
0
    gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3044
0
    InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3045
0
    InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3046
0
  }
3047
3048
0
  if (neededSSE) {
3049
0
    fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
3050
0
    fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3051
0
    llvm::Value *FitsInFP =
3052
0
      llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3053
0
    FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3054
0
    InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3055
0
  }
3056
3057
0
  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3058
0
  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3059
0
  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3060
0
  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3061
3062
  // Emit code to load the value if it was passed in registers.
3063
3064
0
  CGF.EmitBlock(InRegBlock);
3065
3066
  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3067
  // an offset of l->gp_offset and/or l->fp_offset. This may require
3068
  // copying to a temporary location in case the parameter is passed
3069
  // in different register classes or requires an alignment greater
3070
  // than 8 for general purpose registers and 16 for XMM registers.
3071
  //
3072
  // FIXME: This really results in shameful code when we end up needing to
3073
  // collect arguments from different places; often what should result in a
3074
  // simple assembling of a structure from scattered addresses has many more
3075
  // loads than necessary. Can we clean this up?
3076
0
  llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3077
0
  llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3078
0
      CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
3079
3080
0
  Address RegAddr = Address::invalid();
3081
0
  if (neededInt && neededSSE) {
3082
    // FIXME: Cleanup.
3083
0
    assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3084
0
    llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3085
0
    Address Tmp = CGF.CreateMemTemp(Ty);
3086
0
    Tmp = Tmp.withElementType(ST);
3087
0
    assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3088
0
    llvm::Type *TyLo = ST->getElementType(0);
3089
0
    llvm::Type *TyHi = ST->getElementType(1);
3090
0
    assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3091
0
           "Unexpected ABI info for mixed regs");
3092
0
    llvm::Value *GPAddr =
3093
0
        CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
3094
0
    llvm::Value *FPAddr =
3095
0
        CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
3096
0
    llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3097
0
    llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3098
3099
    // Copy the first element.
3100
    // FIXME: Our choice of alignment here and below is probably pessimistic.
3101
0
    llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3102
0
        TyLo, RegLoAddr,
3103
0
        CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyLo)));
3104
0
    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3105
3106
    // Copy the second element.
3107
0
    V = CGF.Builder.CreateAlignedLoad(
3108
0
        TyHi, RegHiAddr,
3109
0
        CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyHi)));
3110
0
    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3111
3112
0
    RegAddr = Tmp.withElementType(LTy);
3113
0
  } else if (neededInt) {
3114
0
    RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
3115
0
                      LTy, CharUnits::fromQuantity(8));
3116
3117
    // Copy to a temporary if necessary to ensure the appropriate alignment.
3118
0
    auto TInfo = getContext().getTypeInfoInChars(Ty);
3119
0
    uint64_t TySize = TInfo.Width.getQuantity();
3120
0
    CharUnits TyAlign = TInfo.Align;
3121
3122
    // Copy into a temporary if the type is more aligned than the
3123
    // register save area.
3124
0
    if (TyAlign.getQuantity() > 8) {
3125
0
      Address Tmp = CGF.CreateMemTemp(Ty);
3126
0
      CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3127
0
      RegAddr = Tmp;
3128
0
    }
3129
3130
0
  } else if (neededSSE == 1) {
3131
0
    RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
3132
0
                      LTy, CharUnits::fromQuantity(16));
3133
0
  } else {
3134
0
    assert(neededSSE == 2 && "Invalid number of needed registers!");
3135
    // SSE registers are spaced 16 bytes apart in the register save
3136
    // area, we need to collect the two eightbytes together.
3137
    // The ABI isn't explicit about this, but it seems reasonable
3138
    // to assume that the slots are 16-byte aligned, since the stack is
3139
    // naturally 16-byte aligned and the prologue is expected to store
3140
    // all the SSE registers to the RSA.
3141
0
    Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
3142
0
                                                      fp_offset),
3143
0
                                CGF.Int8Ty, CharUnits::fromQuantity(16));
3144
0
    Address RegAddrHi =
3145
0
      CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3146
0
                                             CharUnits::fromQuantity(16));
3147
0
    llvm::Type *ST = AI.canHaveCoerceToType()
3148
0
                         ? AI.getCoerceToType()
3149
0
                         : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
3150
0
    llvm::Value *V;
3151
0
    Address Tmp = CGF.CreateMemTemp(Ty);
3152
0
    Tmp = Tmp.withElementType(ST);
3153
0
    V = CGF.Builder.CreateLoad(
3154
0
        RegAddrLo.withElementType(ST->getStructElementType(0)));
3155
0
    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3156
0
    V = CGF.Builder.CreateLoad(
3157
0
        RegAddrHi.withElementType(ST->getStructElementType(1)));
3158
0
    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3159
3160
0
    RegAddr = Tmp.withElementType(LTy);
3161
0
  }
3162
3163
  // AMD64-ABI 3.5.7p5: Step 5. Set:
3164
  // l->gp_offset = l->gp_offset + num_gp * 8
3165
  // l->fp_offset = l->fp_offset + num_fp * 16.
3166
0
  if (neededInt) {
3167
0
    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3168
0
    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3169
0
                            gp_offset_p);
3170
0
  }
3171
0
  if (neededSSE) {
3172
0
    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3173
0
    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3174
0
                            fp_offset_p);
3175
0
  }
3176
0
  CGF.EmitBranch(ContBlock);
3177
3178
  // Emit code to load the value if it was passed in memory.
3179
3180
0
  CGF.EmitBlock(InMemBlock);
3181
0
  Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3182
3183
  // Return the appropriate result.
3184
3185
0
  CGF.EmitBlock(ContBlock);
3186
0
  Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3187
0
                                 "vaarg.addr");
3188
0
  return ResAddr;
3189
0
}
3190
3191
Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3192
0
                                   QualType Ty) const {
3193
  // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3194
  // not 1, 2, 4, or 8 bytes, must be passed by reference."
3195
0
  uint64_t Width = getContext().getTypeSize(Ty);
3196
0
  bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3197
3198
0
  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
3199
0
                          CGF.getContext().getTypeInfoInChars(Ty),
3200
0
                          CharUnits::fromQuantity(8),
3201
0
                          /*allowHigherAlign*/ false);
3202
0
}
3203
3204
ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3205
0
    QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
3206
0
  const Type *Base = nullptr;
3207
0
  uint64_t NumElts = 0;
3208
3209
0
  if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3210
0
      isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3211
0
    FreeSSERegs -= NumElts;
3212
0
    return getDirectX86Hva();
3213
0
  }
3214
0
  return current;
3215
0
}
3216
3217
ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3218
                                      bool IsReturnType, bool IsVectorCall,
3219
0
                                      bool IsRegCall) const {
3220
3221
0
  if (Ty->isVoidType())
3222
0
    return ABIArgInfo::getIgnore();
3223
3224
0
  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3225
0
    Ty = EnumTy->getDecl()->getIntegerType();
3226
3227
0
  TypeInfo Info = getContext().getTypeInfo(Ty);
3228
0
  uint64_t Width = Info.Width;
3229
0
  CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3230
3231
0
  const RecordType *RT = Ty->getAs<RecordType>();
3232
0
  if (RT) {
3233
0
    if (!IsReturnType) {
3234
0
      if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3235
0
        return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3236
0
    }
3237
3238
0
    if (RT->getDecl()->hasFlexibleArrayMember())
3239
0
      return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3240
3241
0
  }
3242
3243
0
  const Type *Base = nullptr;
3244
0
  uint64_t NumElts = 0;
3245
  // vectorcall adds the concept of a homogenous vector aggregate, similar to
3246
  // other targets.
3247
0
  if ((IsVectorCall || IsRegCall) &&
3248
0
      isHomogeneousAggregate(Ty, Base, NumElts)) {
3249
0
    if (IsRegCall) {
3250
0
      if (FreeSSERegs >= NumElts) {
3251
0
        FreeSSERegs -= NumElts;
3252
0
        if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3253
0
          return ABIArgInfo::getDirect();
3254
0
        return ABIArgInfo::getExpand();
3255
0
      }
3256
0
      return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3257
0
    } else if (IsVectorCall) {
3258
0
      if (FreeSSERegs >= NumElts &&
3259
0
          (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3260
0
        FreeSSERegs -= NumElts;
3261
0
        return ABIArgInfo::getDirect();
3262
0
      } else if (IsReturnType) {
3263
0
        return ABIArgInfo::getExpand();
3264
0
      } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3265
        // HVAs are delayed and reclassified in the 2nd step.
3266
0
        return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3267
0
      }
3268
0
    }
3269
0
  }
3270
3271
0
  if (Ty->isMemberPointerType()) {
3272
    // If the member pointer is represented by an LLVM int or ptr, pass it
3273
    // directly.
3274
0
    llvm::Type *LLTy = CGT.ConvertType(Ty);
3275
0
    if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3276
0
      return ABIArgInfo::getDirect();
3277
0
  }
3278
3279
0
  if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3280
    // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3281
    // not 1, 2, 4, or 8 bytes, must be passed by reference."
3282
0
    if (Width > 64 || !llvm::isPowerOf2_64(Width))
3283
0
      return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3284
3285
    // Otherwise, coerce it to a small integer.
3286
0
    return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3287
0
  }
3288
3289
0
  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3290
0
    switch (BT->getKind()) {
3291
0
    case BuiltinType::Bool:
3292
      // Bool type is always extended to the ABI, other builtin types are not
3293
      // extended.
3294
0
      return ABIArgInfo::getExtend(Ty);
3295
3296
0
    case BuiltinType::LongDouble:
3297
      // Mingw64 GCC uses the old 80 bit extended precision floating point
3298
      // unit. It passes them indirectly through memory.
3299
0
      if (IsMingw64) {
3300
0
        const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3301
0
        if (LDF == &llvm::APFloat::x87DoubleExtended())
3302
0
          return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3303
0
      }
3304
0
      break;
3305
3306
0
    case BuiltinType::Int128:
3307
0
    case BuiltinType::UInt128:
3308
      // If it's a parameter type, the normal ABI rule is that arguments larger
3309
      // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
3310
      // even though it isn't particularly efficient.
3311
0
      if (!IsReturnType)
3312
0
        return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3313
3314
      // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
3315
      // Clang matches them for compatibility.
3316
0
      return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
3317
0
          llvm::Type::getInt64Ty(getVMContext()), 2));
3318
3319
0
    default:
3320
0
      break;
3321
0
    }
3322
0
  }
3323
3324
0
  if (Ty->isBitIntType()) {
3325
    // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3326
    // not 1, 2, 4, or 8 bytes, must be passed by reference."
3327
    // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
3328
    // or 8 bytes anyway as long is it fits in them, so we don't have to check
3329
    // the power of 2.
3330
0
    if (Width <= 64)
3331
0
      return ABIArgInfo::getDirect();
3332
0
    return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3333
0
  }
3334
3335
0
  return ABIArgInfo::getDirect();
3336
0
}
3337
3338
0
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3339
0
  const unsigned CC = FI.getCallingConvention();
3340
0
  bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3341
0
  bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3342
3343
  // If __attribute__((sysv_abi)) is in use, use the SysV argument
3344
  // classification rules.
3345
0
  if (CC == llvm::CallingConv::X86_64_SysV) {
3346
0
    X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3347
0
    SysVABIInfo.computeInfo(FI);
3348
0
    return;
3349
0
  }
3350
3351
0
  unsigned FreeSSERegs = 0;
3352
0
  if (IsVectorCall) {
3353
    // We can use up to 4 SSE return registers with vectorcall.
3354
0
    FreeSSERegs = 4;
3355
0
  } else if (IsRegCall) {
3356
    // RegCall gives us 16 SSE registers.
3357
0
    FreeSSERegs = 16;
3358
0
  }
3359
3360
0
  if (!getCXXABI().classifyReturnType(FI))
3361
0
    FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
3362
0
                                  IsVectorCall, IsRegCall);
3363
3364
0
  if (IsVectorCall) {
3365
    // We can use up to 6 SSE register parameters with vectorcall.
3366
0
    FreeSSERegs = 6;
3367
0
  } else if (IsRegCall) {
3368
    // RegCall gives us 16 SSE registers, we can reuse the return registers.
3369
0
    FreeSSERegs = 16;
3370
0
  }
3371
3372
0
  unsigned ArgNum = 0;
3373
0
  unsigned ZeroSSERegs = 0;
3374
0
  for (auto &I : FI.arguments()) {
3375
    // Vectorcall in x64 only permits the first 6 arguments to be passed as
3376
    // XMM/YMM registers. After the sixth argument, pretend no vector
3377
    // registers are left.
3378
0
    unsigned *MaybeFreeSSERegs =
3379
0
        (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3380
0
    I.info =
3381
0
        classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
3382
0
    ++ArgNum;
3383
0
  }
3384
3385
0
  if (IsVectorCall) {
3386
    // For vectorcall, assign aggregate HVAs to any free vector registers in a
3387
    // second pass.
3388
0
    for (auto &I : FI.arguments())
3389
0
      I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
3390
0
  }
3391
0
}
3392
3393
Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3394
0
                                    QualType Ty) const {
3395
  // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3396
  // not 1, 2, 4, or 8 bytes, must be passed by reference."
3397
0
  uint64_t Width = getContext().getTypeSize(Ty);
3398
0
  bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3399
3400
0
  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
3401
0
                          CGF.getContext().getTypeInfoInChars(Ty),
3402
0
                          CharUnits::fromQuantity(8),
3403
0
                          /*allowHigherAlign*/ false);
3404
0
}
3405
3406
std::unique_ptr<TargetCodeGenInfo> CodeGen::createX86_32TargetCodeGenInfo(
3407
    CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
3408
0
    unsigned NumRegisterParameters, bool SoftFloatABI) {
3409
0
  bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3410
0
      CGM.getTriple(), CGM.getCodeGenOpts());
3411
0
  return std::make_unique<X86_32TargetCodeGenInfo>(
3412
0
      CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3413
0
      NumRegisterParameters, SoftFloatABI);
3414
0
}
3415
3416
std::unique_ptr<TargetCodeGenInfo> CodeGen::createWinX86_32TargetCodeGenInfo(
3417
    CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
3418
0
    unsigned NumRegisterParameters) {
3419
0
  bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3420
0
      CGM.getTriple(), CGM.getCodeGenOpts());
3421
0
  return std::make_unique<WinX86_32TargetCodeGenInfo>(
3422
0
      CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3423
0
      NumRegisterParameters);
3424
0
}
3425
3426
std::unique_ptr<TargetCodeGenInfo>
3427
CodeGen::createX86_64TargetCodeGenInfo(CodeGenModule &CGM,
3428
46
                                       X86AVXABILevel AVXLevel) {
3429
46
  return std::make_unique<X86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
3430
46
}
3431
3432
std::unique_ptr<TargetCodeGenInfo>
3433
CodeGen::createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM,
3434
0
                                          X86AVXABILevel AVXLevel) {
3435
0
  return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
3436
0
}