Coverage Report

Created: 2024-01-17 10:31

/src/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- PPC.cpp ------------------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "ABIInfoImpl.h"
10
#include "TargetInfo.h"
11
12
using namespace clang;
13
using namespace clang::CodeGen;
14
15
static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
16
                                    QualType Ty, CharUnits SlotSize,
17
0
                                    CharUnits EltSize, const ComplexType *CTy) {
18
0
  Address Addr =
19
0
      emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
20
0
                             SlotSize, SlotSize, /*AllowHigher*/ true);
21
22
0
  Address RealAddr = Addr;
23
0
  Address ImagAddr = RealAddr;
24
0
  if (CGF.CGM.getDataLayout().isBigEndian()) {
25
0
    RealAddr =
26
0
        CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
27
0
    ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
28
0
                                                      2 * SlotSize - EltSize);
29
0
  } else {
30
0
    ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
31
0
  }
32
33
0
  llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
34
0
  RealAddr = RealAddr.withElementType(EltTy);
35
0
  ImagAddr = ImagAddr.withElementType(EltTy);
36
0
  llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
37
0
  llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
38
39
0
  Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
40
0
  CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
41
0
                         /*init*/ true);
42
0
  return Temp;
43
0
}
44
45
static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
46
                                        llvm::Value *Address, bool Is64Bit,
47
0
                                        bool IsAIX) {
48
  // This is calculated from the LLVM and GCC tables and verified
49
  // against gcc output.  AFAIK all PPC ABIs use the same encoding.
50
51
0
  CodeGen::CGBuilderTy &Builder = CGF.Builder;
52
53
0
  llvm::IntegerType *i8 = CGF.Int8Ty;
54
0
  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
55
0
  llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
56
0
  llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
57
58
  // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
59
0
  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
60
61
  // 32-63: fp0-31, the 8-byte floating-point registers
62
0
  AssignToArrayRange(Builder, Address, Eight8, 32, 63);
63
64
  // 64-67 are various 4-byte or 8-byte special-purpose registers:
65
  // 64: mq
66
  // 65: lr
67
  // 66: ctr
68
  // 67: ap
69
0
  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
70
71
  // 68-76 are various 4-byte special-purpose registers:
72
  // 68-75 cr0-7
73
  // 76: xer
74
0
  AssignToArrayRange(Builder, Address, Four8, 68, 76);
75
76
  // 77-108: v0-31, the 16-byte vector registers
77
0
  AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
78
79
  // 109: vrsave
80
  // 110: vscr
81
0
  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
82
83
  // AIX does not utilize the rest of the registers.
84
0
  if (IsAIX)
85
0
    return false;
86
87
  // 111: spe_acc
88
  // 112: spefscr
89
  // 113: sfp
90
0
  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
91
92
0
  if (!Is64Bit)
93
0
    return false;
94
95
  // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
96
  // or above CPU.
97
  // 64-bit only registers:
98
  // 114: tfhar
99
  // 115: tfiar
100
  // 116: texasr
101
0
  AssignToArrayRange(Builder, Address, Eight8, 114, 116);
102
103
0
  return false;
104
0
}
105
106
// AIX
107
namespace {
108
/// AIXABIInfo - The AIX XCOFF ABI information.
109
class AIXABIInfo : public ABIInfo {
110
  const bool Is64Bit;
111
  const unsigned PtrByteSize;
112
  CharUnits getParamTypeAlignment(QualType Ty) const;
113
114
public:
115
  AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
116
0
      : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
117
118
  bool isPromotableTypeForABI(QualType Ty) const;
119
120
  ABIArgInfo classifyReturnType(QualType RetTy) const;
121
  ABIArgInfo classifyArgumentType(QualType Ty) const;
122
123
0
  void computeInfo(CGFunctionInfo &FI) const override {
124
0
    if (!getCXXABI().classifyReturnType(FI))
125
0
      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
126
127
0
    for (auto &I : FI.arguments())
128
0
      I.info = classifyArgumentType(I.type);
129
0
  }
130
131
  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
132
                    QualType Ty) const override;
133
};
134
135
class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
136
  const bool Is64Bit;
137
138
public:
139
  AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
140
      : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
141
0
        Is64Bit(Is64Bit) {}
142
0
  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
143
0
    return 1; // r1 is the dedicated stack pointer
144
0
  }
145
146
  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
147
                               llvm::Value *Address) const override;
148
};
149
} // namespace
150
151
// Return true if the ABI requires Ty to be passed sign- or zero-
152
// extended to 32/64 bits.
153
0
bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
154
  // Treat an enum type as its underlying type.
155
0
  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
156
0
    Ty = EnumTy->getDecl()->getIntegerType();
157
158
  // Promotable integer types are required to be promoted by the ABI.
159
0
  if (getContext().isPromotableIntegerType(Ty))
160
0
    return true;
161
162
0
  if (!Is64Bit)
163
0
    return false;
164
165
  // For 64 bit mode, in addition to the usual promotable integer types, we also
166
  // need to extend all 32-bit types, since the ABI requires promotion to 64
167
  // bits.
168
0
  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
169
0
    switch (BT->getKind()) {
170
0
    case BuiltinType::Int:
171
0
    case BuiltinType::UInt:
172
0
      return true;
173
0
    default:
174
0
      break;
175
0
    }
176
177
0
  return false;
178
0
}
179
180
0
ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
181
0
  if (RetTy->isAnyComplexType())
182
0
    return ABIArgInfo::getDirect();
183
184
0
  if (RetTy->isVectorType())
185
0
    return ABIArgInfo::getDirect();
186
187
0
  if (RetTy->isVoidType())
188
0
    return ABIArgInfo::getIgnore();
189
190
0
  if (isAggregateTypeForABI(RetTy))
191
0
    return getNaturalAlignIndirect(RetTy);
192
193
0
  return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
194
0
                                        : ABIArgInfo::getDirect());
195
0
}
196
197
0
ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
198
0
  Ty = useFirstFieldIfTransparentUnion(Ty);
199
200
0
  if (Ty->isAnyComplexType())
201
0
    return ABIArgInfo::getDirect();
202
203
0
  if (Ty->isVectorType())
204
0
    return ABIArgInfo::getDirect();
205
206
0
  if (isAggregateTypeForABI(Ty)) {
207
    // Records with non-trivial destructors/copy-constructors should not be
208
    // passed by value.
209
0
    if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
210
0
      return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
211
212
0
    CharUnits CCAlign = getParamTypeAlignment(Ty);
213
0
    CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
214
215
0
    return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
216
0
                                   /*Realign*/ TyAlign > CCAlign);
217
0
  }
218
219
0
  return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
220
0
                                     : ABIArgInfo::getDirect());
221
0
}
222
223
0
CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
224
  // Complex types are passed just like their elements.
225
0
  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
226
0
    Ty = CTy->getElementType();
227
228
0
  if (Ty->isVectorType())
229
0
    return CharUnits::fromQuantity(16);
230
231
  // If the structure contains a vector type, the alignment is 16.
232
0
  if (isRecordWithSIMDVectorType(getContext(), Ty))
233
0
    return CharUnits::fromQuantity(16);
234
235
0
  return CharUnits::fromQuantity(PtrByteSize);
236
0
}
237
238
Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
239
0
                              QualType Ty) const {
240
241
0
  auto TypeInfo = getContext().getTypeInfoInChars(Ty);
242
0
  TypeInfo.Align = getParamTypeAlignment(Ty);
243
244
0
  CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
245
246
  // If we have a complex type and the base type is smaller than the register
247
  // size, the ABI calls for the real and imaginary parts to be right-adjusted
248
  // in separate words in 32bit mode or doublewords in 64bit mode. However,
249
  // Clang expects us to produce a pointer to a structure with the two parts
250
  // packed tightly. So generate loads of the real and imaginary parts relative
251
  // to the va_list pointer, and store them to a temporary structure. We do the
252
  // same as the PPC64ABI here.
253
0
  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
254
0
    CharUnits EltSize = TypeInfo.Width / 2;
255
0
    if (EltSize < SlotSize)
256
0
      return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
257
0
  }
258
259
0
  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
260
0
                          SlotSize, /*AllowHigher*/ true);
261
0
}
262
263
bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
264
0
    CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
265
0
  return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
266
0
}
267
268
// PowerPC-32
269
namespace {
270
/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
271
class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
272
  bool IsSoftFloatABI;
273
  bool IsRetSmallStructInRegABI;
274
275
  CharUnits getParamTypeAlignment(QualType Ty) const;
276
277
public:
278
  PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
279
                     bool RetSmallStructInRegABI)
280
      : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
281
0
        IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
282
283
  ABIArgInfo classifyReturnType(QualType RetTy) const;
284
285
0
  void computeInfo(CGFunctionInfo &FI) const override {
286
0
    if (!getCXXABI().classifyReturnType(FI))
287
0
      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
288
0
    for (auto &I : FI.arguments())
289
0
      I.info = classifyArgumentType(I.type);
290
0
  }
291
292
  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
293
                    QualType Ty) const override;
294
};
295
296
class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
297
public:
298
  PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
299
                         bool RetSmallStructInRegABI)
300
      : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
301
0
            CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
302
303
  static bool isStructReturnInRegABI(const llvm::Triple &Triple,
304
                                     const CodeGenOptions &Opts);
305
306
0
  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
307
    // This is recovered from gcc output.
308
0
    return 1; // r1 is the dedicated stack pointer
309
0
  }
310
311
  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
312
                               llvm::Value *Address) const override;
313
};
314
}
315
316
0
CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
317
  // Complex types are passed just like their elements.
318
0
  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
319
0
    Ty = CTy->getElementType();
320
321
0
  if (Ty->isVectorType())
322
0
    return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
323
0
                                                                       : 4);
324
325
  // For single-element float/vector structs, we consider the whole type
326
  // to have the same alignment requirements as its single element.
327
0
  const Type *AlignTy = nullptr;
328
0
  if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
329
0
    const BuiltinType *BT = EltType->getAs<BuiltinType>();
330
0
    if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
331
0
        (BT && BT->isFloatingPoint()))
332
0
      AlignTy = EltType;
333
0
  }
334
335
0
  if (AlignTy)
336
0
    return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
337
0
  return CharUnits::fromQuantity(4);
338
0
}
339
340
0
ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
341
0
  uint64_t Size;
342
343
  // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
344
0
  if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
345
0
      (Size = getContext().getTypeSize(RetTy)) <= 64) {
346
    // System V ABI (1995), page 3-22, specified:
347
    // > A structure or union whose size is less than or equal to 8 bytes
348
    // > shall be returned in r3 and r4, as if it were first stored in the
349
    // > 8-byte aligned memory area and then the low addressed word were
350
    // > loaded into r3 and the high-addressed word into r4.  Bits beyond
351
    // > the last member of the structure or union are not defined.
352
    //
353
    // GCC for big-endian PPC32 inserts the pad before the first member,
354
    // not "beyond the last member" of the struct.  To stay compatible
355
    // with GCC, we coerce the struct to an integer of the same size.
356
    // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
357
0
    if (Size == 0)
358
0
      return ABIArgInfo::getIgnore();
359
0
    else {
360
0
      llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
361
0
      return ABIArgInfo::getDirect(CoerceTy);
362
0
    }
363
0
  }
364
365
0
  return DefaultABIInfo::classifyReturnType(RetTy);
366
0
}
367
368
// TODO: this implementation is now likely redundant with
369
// DefaultABIInfo::EmitVAArg.
370
Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
371
0
                                      QualType Ty) const {
372
0
  if (getTarget().getTriple().isOSDarwin()) {
373
0
    auto TI = getContext().getTypeInfoInChars(Ty);
374
0
    TI.Align = getParamTypeAlignment(Ty);
375
376
0
    CharUnits SlotSize = CharUnits::fromQuantity(4);
377
0
    return emitVoidPtrVAArg(CGF, VAList, Ty,
378
0
                            classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
379
0
                            /*AllowHigherAlign=*/true);
380
0
  }
381
382
0
  const unsigned OverflowLimit = 8;
383
0
  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
384
    // TODO: Implement this. For now ignore.
385
0
    (void)CTy;
386
0
    return Address::invalid(); // FIXME?
387
0
  }
388
389
  // struct __va_list_tag {
390
  //   unsigned char gpr;
391
  //   unsigned char fpr;
392
  //   unsigned short reserved;
393
  //   void *overflow_arg_area;
394
  //   void *reg_save_area;
395
  // };
396
397
0
  bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
398
0
  bool isInt = !Ty->isFloatingType();
399
0
  bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
400
401
  // All aggregates are passed indirectly?  That doesn't seem consistent
402
  // with the argument-lowering code.
403
0
  bool isIndirect = isAggregateTypeForABI(Ty);
404
405
0
  CGBuilderTy &Builder = CGF.Builder;
406
407
  // The calling convention either uses 1-2 GPRs or 1 FPR.
408
0
  Address NumRegsAddr = Address::invalid();
409
0
  if (isInt || IsSoftFloatABI) {
410
0
    NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
411
0
  } else {
412
0
    NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
413
0
  }
414
415
0
  llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
416
417
  // "Align" the register count when TY is i64.
418
0
  if (isI64 || (isF64 && IsSoftFloatABI)) {
419
0
    NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
420
0
    NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
421
0
  }
422
423
0
  llvm::Value *CC =
424
0
      Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
425
426
0
  llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
427
0
  llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
428
0
  llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
429
430
0
  Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
431
432
0
  llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
433
0
  if (isIndirect)
434
0
    DirectTy = CGF.UnqualPtrTy;
435
436
  // Case 1: consume registers.
437
0
  Address RegAddr = Address::invalid();
438
0
  {
439
0
    CGF.EmitBlock(UsingRegs);
440
441
0
    Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
442
0
    RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
443
0
                      CharUnits::fromQuantity(8));
444
0
    assert(RegAddr.getElementType() == CGF.Int8Ty);
445
446
    // Floating-point registers start after the general-purpose registers.
447
0
    if (!(isInt || IsSoftFloatABI)) {
448
0
      RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
449
0
                                                   CharUnits::fromQuantity(32));
450
0
    }
451
452
    // Get the address of the saved value by scaling the number of
453
    // registers we've used by the number of
454
0
    CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
455
0
    llvm::Value *RegOffset =
456
0
        Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
457
0
    RegAddr = Address(
458
0
        Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
459
0
        DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
460
461
    // Increase the used-register count.
462
0
    NumRegs =
463
0
      Builder.CreateAdd(NumRegs,
464
0
                        Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
465
0
    Builder.CreateStore(NumRegs, NumRegsAddr);
466
467
0
    CGF.EmitBranch(Cont);
468
0
  }
469
470
  // Case 2: consume space in the overflow area.
471
0
  Address MemAddr = Address::invalid();
472
0
  {
473
0
    CGF.EmitBlock(UsingOverflow);
474
475
0
    Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
476
477
    // Everything in the overflow area is rounded up to a size of at least 4.
478
0
    CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
479
480
0
    CharUnits Size;
481
0
    if (!isIndirect) {
482
0
      auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
483
0
      Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
484
0
    } else {
485
0
      Size = CGF.getPointerSize();
486
0
    }
487
488
0
    Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
489
0
    Address OverflowArea =
490
0
        Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
491
0
                OverflowAreaAlign);
492
    // Round up address of argument to alignment
493
0
    CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
494
0
    if (Align > OverflowAreaAlign) {
495
0
      llvm::Value *Ptr = OverflowArea.getPointer();
496
0
      OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
497
0
                             OverflowArea.getElementType(), Align);
498
0
    }
499
500
0
    MemAddr = OverflowArea.withElementType(DirectTy);
501
502
    // Increase the overflow area.
503
0
    OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
504
0
    Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
505
0
    CGF.EmitBranch(Cont);
506
0
  }
507
508
0
  CGF.EmitBlock(Cont);
509
510
  // Merge the cases with a phi.
511
0
  Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
512
0
                                "vaarg.addr");
513
514
  // Load the pointer if the argument was passed indirectly.
515
0
  if (isIndirect) {
516
0
    Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
517
0
                     getContext().getTypeAlignInChars(Ty));
518
0
  }
519
520
0
  return Result;
521
0
}
522
523
bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
524
0
    const llvm::Triple &Triple, const CodeGenOptions &Opts) {
525
0
  assert(Triple.isPPC32());
526
527
0
  switch (Opts.getStructReturnConvention()) {
528
0
  case CodeGenOptions::SRCK_Default:
529
0
    break;
530
0
  case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
531
0
    return false;
532
0
  case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
533
0
    return true;
534
0
  }
535
536
0
  if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
537
0
    return true;
538
539
0
  return false;
540
0
}
541
542
bool
543
PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
544
0
                                                llvm::Value *Address) const {
545
0
  return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
546
0
                                     /*IsAIX*/ false);
547
0
}
548
549
// PowerPC-64
550
551
namespace {
552
553
/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
554
class PPC64_SVR4_ABIInfo : public ABIInfo {
555
  static const unsigned GPRBits = 64;
556
  PPC64_SVR4_ABIKind Kind;
557
  bool IsSoftFloatABI;
558
559
public:
560
  PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
561
                     bool SoftFloatABI)
562
0
      : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
563
564
  bool isPromotableTypeForABI(QualType Ty) const;
565
  CharUnits getParamTypeAlignment(QualType Ty) const;
566
567
  ABIArgInfo classifyReturnType(QualType RetTy) const;
568
  ABIArgInfo classifyArgumentType(QualType Ty) const;
569
570
  bool isHomogeneousAggregateBaseType(QualType Ty) const override;
571
  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
572
                                         uint64_t Members) const override;
573
574
  // TODO: We can add more logic to computeInfo to improve performance.
575
  // Example: For aggregate arguments that fit in a register, we could
576
  // use getDirectInReg (as is done below for structs containing a single
577
  // floating-point value) to avoid pushing them to memory on function
578
  // entry.  This would require changing the logic in PPCISelLowering
579
  // when lowering the parameters in the caller and args in the callee.
580
0
  void computeInfo(CGFunctionInfo &FI) const override {
581
0
    if (!getCXXABI().classifyReturnType(FI))
582
0
      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
583
0
    for (auto &I : FI.arguments()) {
584
      // We rely on the default argument classification for the most part.
585
      // One exception:  An aggregate containing a single floating-point
586
      // or vector item must be passed in a register if one is available.
587
0
      const Type *T = isSingleElementStruct(I.type, getContext());
588
0
      if (T) {
589
0
        const BuiltinType *BT = T->getAs<BuiltinType>();
590
0
        if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
591
0
            (BT && BT->isFloatingPoint())) {
592
0
          QualType QT(T, 0);
593
0
          I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
594
0
          continue;
595
0
        }
596
0
      }
597
0
      I.info = classifyArgumentType(I.type);
598
0
    }
599
0
  }
600
601
  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
602
                    QualType Ty) const override;
603
};
604
605
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
606
607
public:
608
  PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
609
                               bool SoftFloatABI)
610
      : TargetCodeGenInfo(
611
0
            std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {
612
0
    SwiftInfo =
613
0
        std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
614
0
  }
615
616
0
  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
617
    // This is recovered from gcc output.
618
0
    return 1; // r1 is the dedicated stack pointer
619
0
  }
620
621
  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
622
                               llvm::Value *Address) const override;
623
  void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
624
                          const llvm::MapVector<GlobalDecl, StringRef>
625
                              &MangledDeclNames) const override;
626
};
627
628
class PPC64TargetCodeGenInfo : public TargetCodeGenInfo {
629
public:
630
  PPC64TargetCodeGenInfo(CodeGenTypes &CGT)
631
0
      : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
632
633
0
  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
634
    // This is recovered from gcc output.
635
0
    return 1; // r1 is the dedicated stack pointer
636
0
  }
637
638
  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
639
                               llvm::Value *Address) const override;
640
};
641
}
642
643
// Return true if the ABI requires Ty to be passed sign- or zero-
644
// extended to 64 bits.
645
bool
646
0
PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
647
  // Treat an enum type as its underlying type.
648
0
  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
649
0
    Ty = EnumTy->getDecl()->getIntegerType();
650
651
  // Promotable integer types are required to be promoted by the ABI.
652
0
  if (isPromotableIntegerTypeForABI(Ty))
653
0
    return true;
654
655
  // In addition to the usual promotable integer types, we also need to
656
  // extend all 32-bit types, since the ABI requires promotion to 64 bits.
657
0
  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
658
0
    switch (BT->getKind()) {
659
0
    case BuiltinType::Int:
660
0
    case BuiltinType::UInt:
661
0
      return true;
662
0
    default:
663
0
      break;
664
0
    }
665
666
0
  if (const auto *EIT = Ty->getAs<BitIntType>())
667
0
    if (EIT->getNumBits() < 64)
668
0
      return true;
669
670
0
  return false;
671
0
}
672
673
/// isAlignedParamType - Determine whether a type requires 16-byte or
674
/// higher alignment in the parameter area.  Always returns at least 8.
675
0
CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
676
  // Complex types are passed just like their elements.
677
0
  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
678
0
    Ty = CTy->getElementType();
679
680
0
  auto FloatUsesVector = [this](QualType Ty){
681
0
    return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
682
0
                                           Ty) == &llvm::APFloat::IEEEquad();
683
0
  };
684
685
  // Only vector types of size 16 bytes need alignment (larger types are
686
  // passed via reference, smaller types are not aligned).
687
0
  if (Ty->isVectorType()) {
688
0
    return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
689
0
  } else if (FloatUsesVector(Ty)) {
690
    // According to ABI document section 'Optional Save Areas': If extended
691
    // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
692
    // format are supported, map them to a single quadword, quadword aligned.
693
0
    return CharUnits::fromQuantity(16);
694
0
  }
695
696
  // For single-element float/vector structs, we consider the whole type
697
  // to have the same alignment requirements as its single element.
698
0
  const Type *AlignAsType = nullptr;
699
0
  const Type *EltType = isSingleElementStruct(Ty, getContext());
700
0
  if (EltType) {
701
0
    const BuiltinType *BT = EltType->getAs<BuiltinType>();
702
0
    if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
703
0
        (BT && BT->isFloatingPoint()))
704
0
      AlignAsType = EltType;
705
0
  }
706
707
  // Likewise for ELFv2 homogeneous aggregates.
708
0
  const Type *Base = nullptr;
709
0
  uint64_t Members = 0;
710
0
  if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 &&
711
0
      isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
712
0
    AlignAsType = Base;
713
714
  // With special case aggregates, only vector base types need alignment.
715
0
  if (AlignAsType) {
716
0
    bool UsesVector = AlignAsType->isVectorType() ||
717
0
                      FloatUsesVector(QualType(AlignAsType, 0));
718
0
    return CharUnits::fromQuantity(UsesVector ? 16 : 8);
719
0
  }
720
721
  // Otherwise, we only need alignment for any aggregate type that
722
  // has an alignment requirement of >= 16 bytes.
723
0
  if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
724
0
    return CharUnits::fromQuantity(16);
725
0
  }
726
727
0
  return CharUnits::fromQuantity(8);
728
0
}
729
730
0
bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
731
  // Homogeneous aggregates for ELFv2 must have base types of float,
732
  // double, long double, or 128-bit vectors.
733
0
  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
734
0
    if (BT->getKind() == BuiltinType::Float ||
735
0
        BT->getKind() == BuiltinType::Double ||
736
0
        BT->getKind() == BuiltinType::LongDouble ||
737
0
        BT->getKind() == BuiltinType::Ibm128 ||
738
0
        (getContext().getTargetInfo().hasFloat128Type() &&
739
0
         (BT->getKind() == BuiltinType::Float128))) {
740
0
      if (IsSoftFloatABI)
741
0
        return false;
742
0
      return true;
743
0
    }
744
0
  }
745
0
  if (const VectorType *VT = Ty->getAs<VectorType>()) {
746
0
    if (getContext().getTypeSize(VT) == 128)
747
0
      return true;
748
0
  }
749
0
  return false;
750
0
}
751
752
bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
753
0
    const Type *Base, uint64_t Members) const {
754
  // Vector and fp128 types require one register, other floating point types
755
  // require one or two registers depending on their size.
756
0
  uint32_t NumRegs =
757
0
      ((getContext().getTargetInfo().hasFloat128Type() &&
758
0
          Base->isFloat128Type()) ||
759
0
        Base->isVectorType()) ? 1
760
0
                              : (getContext().getTypeSize(Base) + 63) / 64;
761
762
  // Homogeneous Aggregates may occupy at most 8 registers.
763
0
  return Members * NumRegs <= 8;
764
0
}
765
766
ABIArgInfo
767
0
PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
768
0
  Ty = useFirstFieldIfTransparentUnion(Ty);
769
770
0
  if (Ty->isAnyComplexType())
771
0
    return ABIArgInfo::getDirect();
772
773
  // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
774
  // or via reference (larger than 16 bytes).
775
0
  if (Ty->isVectorType()) {
776
0
    uint64_t Size = getContext().getTypeSize(Ty);
777
0
    if (Size > 128)
778
0
      return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
779
0
    else if (Size < 128) {
780
0
      llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
781
0
      return ABIArgInfo::getDirect(CoerceTy);
782
0
    }
783
0
  }
784
785
0
  if (const auto *EIT = Ty->getAs<BitIntType>())
786
0
    if (EIT->getNumBits() > 128)
787
0
      return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
788
789
0
  if (isAggregateTypeForABI(Ty)) {
790
0
    if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
791
0
      return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
792
793
0
    uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
794
0
    uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
795
796
    // ELFv2 homogeneous aggregates are passed as array types.
797
0
    const Type *Base = nullptr;
798
0
    uint64_t Members = 0;
799
0
    if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
800
0
        isHomogeneousAggregate(Ty, Base, Members)) {
801
0
      llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
802
0
      llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
803
0
      return ABIArgInfo::getDirect(CoerceTy);
804
0
    }
805
806
    // If an aggregate may end up fully in registers, we do not
807
    // use the ByVal method, but pass the aggregate as array.
808
    // This is usually beneficial since we avoid forcing the
809
    // back-end to store the argument to memory.
810
0
    uint64_t Bits = getContext().getTypeSize(Ty);
811
0
    if (Bits > 0 && Bits <= 8 * GPRBits) {
812
0
      llvm::Type *CoerceTy;
813
814
      // Types up to 8 bytes are passed as integer type (which will be
815
      // properly aligned in the argument save area doubleword).
816
0
      if (Bits <= GPRBits)
817
0
        CoerceTy =
818
0
            llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
819
      // Larger types are passed as arrays, with the base type selected
820
      // according to the required alignment in the save area.
821
0
      else {
822
0
        uint64_t RegBits = ABIAlign * 8;
823
0
        uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
824
0
        llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
825
0
        CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
826
0
      }
827
828
0
      return ABIArgInfo::getDirect(CoerceTy);
829
0
    }
830
831
    // All other aggregates are passed ByVal.
832
0
    return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
833
0
                                   /*ByVal=*/true,
834
0
                                   /*Realign=*/TyAlign > ABIAlign);
835
0
  }
836
837
0
  return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
838
0
                                     : ABIArgInfo::getDirect());
839
0
}
840
841
ABIArgInfo
842
0
PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
843
0
  if (RetTy->isVoidType())
844
0
    return ABIArgInfo::getIgnore();
845
846
0
  if (RetTy->isAnyComplexType())
847
0
    return ABIArgInfo::getDirect();
848
849
  // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
850
  // or via reference (larger than 16 bytes).
851
0
  if (RetTy->isVectorType()) {
852
0
    uint64_t Size = getContext().getTypeSize(RetTy);
853
0
    if (Size > 128)
854
0
      return getNaturalAlignIndirect(RetTy);
855
0
    else if (Size < 128) {
856
0
      llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
857
0
      return ABIArgInfo::getDirect(CoerceTy);
858
0
    }
859
0
  }
860
861
0
  if (const auto *EIT = RetTy->getAs<BitIntType>())
862
0
    if (EIT->getNumBits() > 128)
863
0
      return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
864
865
0
  if (isAggregateTypeForABI(RetTy)) {
866
    // ELFv2 homogeneous aggregates are returned as array types.
867
0
    const Type *Base = nullptr;
868
0
    uint64_t Members = 0;
869
0
    if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
870
0
        isHomogeneousAggregate(RetTy, Base, Members)) {
871
0
      llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
872
0
      llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
873
0
      return ABIArgInfo::getDirect(CoerceTy);
874
0
    }
875
876
    // ELFv2 small aggregates are returned in up to two registers.
877
0
    uint64_t Bits = getContext().getTypeSize(RetTy);
878
0
    if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) {
879
0
      if (Bits == 0)
880
0
        return ABIArgInfo::getIgnore();
881
882
0
      llvm::Type *CoerceTy;
883
0
      if (Bits > GPRBits) {
884
0
        CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
885
0
        CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
886
0
      } else
887
0
        CoerceTy =
888
0
            llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
889
0
      return ABIArgInfo::getDirect(CoerceTy);
890
0
    }
891
892
    // All other aggregates are returned indirectly.
893
0
    return getNaturalAlignIndirect(RetTy);
894
0
  }
895
896
0
  return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
897
0
                                        : ABIArgInfo::getDirect());
898
0
}
899
900
// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
901
Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
902
0
                                      QualType Ty) const {
903
0
  auto TypeInfo = getContext().getTypeInfoInChars(Ty);
904
0
  TypeInfo.Align = getParamTypeAlignment(Ty);
905
906
0
  CharUnits SlotSize = CharUnits::fromQuantity(8);
907
908
  // If we have a complex type and the base type is smaller than 8 bytes,
909
  // the ABI calls for the real and imaginary parts to be right-adjusted
910
  // in separate doublewords.  However, Clang expects us to produce a
911
  // pointer to a structure with the two parts packed tightly.  So generate
912
  // loads of the real and imaginary parts relative to the va_list pointer,
913
  // and store them to a temporary structure.
914
0
  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
915
0
    CharUnits EltSize = TypeInfo.Width / 2;
916
0
    if (EltSize < SlotSize)
917
0
      return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
918
0
  }
919
920
  // Otherwise, just use the general rule.
921
  //
922
  // The PPC64 ABI passes some arguments in integer registers, even to variadic
923
  // functions. To allow va_list to use the simple "void*" representation,
924
  // variadic calls allocate space in the argument area for the integer argument
925
  // registers, and variadic functions spill their integer argument registers to
926
  // this area in their prologues. When aggregates smaller than a register are
927
  // passed this way, they are passed in the least significant bits of the
928
  // register, which means that after spilling on big-endian targets they will
929
  // be right-aligned in their argument slot. This is uncommon; for a variety of
930
  // reasons, other big-endian targets don't end up right-aligning aggregate
931
  // types this way, and so right-alignment only applies to fundamental types.
932
  // So on PPC64, we must force the use of right-alignment even for aggregates.
933
0
  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
934
0
                          SlotSize, /*AllowHigher*/ true,
935
0
                          /*ForceRightAdjust*/ true);
936
0
}
937
938
bool
939
PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
940
  CodeGen::CodeGenFunction &CGF,
941
0
  llvm::Value *Address) const {
942
0
  return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
943
0
                                     /*IsAIX*/ false);
944
0
}
945
946
void PPC64_SVR4_TargetCodeGenInfo::emitTargetMetadata(
947
    CodeGen::CodeGenModule &CGM,
948
0
    const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
949
0
  if (CGM.getTypes().isLongDoubleReferenced()) {
950
0
    llvm::LLVMContext &Ctx = CGM.getLLVMContext();
951
0
    const auto *flt = &CGM.getTarget().getLongDoubleFormat();
952
0
    if (flt == &llvm::APFloat::PPCDoubleDouble())
953
0
      CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
954
0
                                    llvm::MDString::get(Ctx, "doubledouble"));
955
0
    else if (flt == &llvm::APFloat::IEEEquad())
956
0
      CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
957
0
                                    llvm::MDString::get(Ctx, "ieeequad"));
958
0
    else if (flt == &llvm::APFloat::IEEEdouble())
959
0
      CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
960
0
                                    llvm::MDString::get(Ctx, "ieeedouble"));
961
0
  }
962
0
}
963
964
bool
965
PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
966
0
                                                llvm::Value *Address) const {
967
0
  return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
968
0
                                     /*IsAIX*/ false);
969
0
}
970
971
std::unique_ptr<TargetCodeGenInfo>
972
0
CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) {
973
0
  return std::make_unique<AIXTargetCodeGenInfo>(CGM.getTypes(), Is64Bit);
974
0
}
975
976
std::unique_ptr<TargetCodeGenInfo>
977
0
CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) {
978
0
  bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI(
979
0
      CGM.getTriple(), CGM.getCodeGenOpts());
980
0
  return std::make_unique<PPC32TargetCodeGenInfo>(CGM.getTypes(), SoftFloatABI,
981
0
                                                  RetSmallStructInRegABI);
982
0
}
983
984
std::unique_ptr<TargetCodeGenInfo>
985
0
CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) {
986
0
  return std::make_unique<PPC64TargetCodeGenInfo>(CGM.getTypes());
987
0
}
988
989
std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo(
990
0
    CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) {
991
0
  return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(CGM.getTypes(), Kind,
992
0
                                                        SoftFloatABI);
993
0
}