/src/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- ABIInfo.cpp --------------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include "ABIInfo.h" |
10 | | #include "ABIInfoImpl.h" |
11 | | |
12 | | using namespace clang; |
13 | | using namespace clang::CodeGen; |
14 | | |
15 | | // Pin the vtable to this file. |
16 | 46 | ABIInfo::~ABIInfo() = default; |
17 | | |
18 | 0 | CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); } |
19 | | |
20 | 0 | ASTContext &ABIInfo::getContext() const { return CGT.getContext(); } |
21 | | |
22 | 0 | llvm::LLVMContext &ABIInfo::getVMContext() const { |
23 | 0 | return CGT.getLLVMContext(); |
24 | 0 | } |
25 | | |
26 | 0 | const llvm::DataLayout &ABIInfo::getDataLayout() const { |
27 | 0 | return CGT.getDataLayout(); |
28 | 0 | } |
29 | | |
30 | 0 | const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); } |
31 | | |
32 | 0 | const CodeGenOptions &ABIInfo::getCodeGenOpts() const { |
33 | 0 | return CGT.getCodeGenOpts(); |
34 | 0 | } |
35 | | |
36 | 0 | bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } |
37 | | |
38 | 0 | bool ABIInfo::isOHOSFamily() const { |
39 | 0 | return getTarget().getTriple().isOHOSFamily(); |
40 | 0 | } |
41 | | |
42 | | Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, |
43 | 0 | QualType Ty) const { |
44 | 0 | return Address::invalid(); |
45 | 0 | } |
46 | | |
47 | 0 | bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { |
48 | 0 | return false; |
49 | 0 | } |
50 | | |
51 | | bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, |
52 | 0 | uint64_t Members) const { |
53 | 0 | return false; |
54 | 0 | } |
55 | | |
56 | 0 | bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const { |
57 | | // For compatibility with GCC, ignore empty bitfields in C++ mode. |
58 | 0 | return getContext().getLangOpts().CPlusPlus; |
59 | 0 | } |
60 | | |
61 | | bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, |
62 | 0 | uint64_t &Members) const { |
63 | 0 | if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { |
64 | 0 | uint64_t NElements = AT->getSize().getZExtValue(); |
65 | 0 | if (NElements == 0) |
66 | 0 | return false; |
67 | 0 | if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) |
68 | 0 | return false; |
69 | 0 | Members *= NElements; |
70 | 0 | } else if (const RecordType *RT = Ty->getAs<RecordType>()) { |
71 | 0 | const RecordDecl *RD = RT->getDecl(); |
72 | 0 | if (RD->hasFlexibleArrayMember()) |
73 | 0 | return false; |
74 | | |
75 | 0 | Members = 0; |
76 | | |
77 | | // If this is a C++ record, check the properties of the record such as |
78 | | // bases and ABI specific restrictions |
79 | 0 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
80 | 0 | if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD)) |
81 | 0 | return false; |
82 | | |
83 | 0 | for (const auto &I : CXXRD->bases()) { |
84 | | // Ignore empty records. |
85 | 0 | if (isEmptyRecord(getContext(), I.getType(), true)) |
86 | 0 | continue; |
87 | | |
88 | 0 | uint64_t FldMembers; |
89 | 0 | if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) |
90 | 0 | return false; |
91 | | |
92 | 0 | Members += FldMembers; |
93 | 0 | } |
94 | 0 | } |
95 | | |
96 | 0 | for (const auto *FD : RD->fields()) { |
97 | | // Ignore (non-zero arrays of) empty records. |
98 | 0 | QualType FT = FD->getType(); |
99 | 0 | while (const ConstantArrayType *AT = |
100 | 0 | getContext().getAsConstantArrayType(FT)) { |
101 | 0 | if (AT->getSize().getZExtValue() == 0) |
102 | 0 | return false; |
103 | 0 | FT = AT->getElementType(); |
104 | 0 | } |
105 | 0 | if (isEmptyRecord(getContext(), FT, true)) |
106 | 0 | continue; |
107 | | |
108 | 0 | if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() && |
109 | 0 | FD->isZeroLengthBitField(getContext())) |
110 | 0 | continue; |
111 | | |
112 | 0 | uint64_t FldMembers; |
113 | 0 | if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) |
114 | 0 | return false; |
115 | | |
116 | 0 | Members = (RD->isUnion() ? |
117 | 0 | std::max(Members, FldMembers) : Members + FldMembers); |
118 | 0 | } |
119 | | |
120 | 0 | if (!Base) |
121 | 0 | return false; |
122 | | |
123 | | // Ensure there is no padding. |
124 | 0 | if (getContext().getTypeSize(Base) * Members != |
125 | 0 | getContext().getTypeSize(Ty)) |
126 | 0 | return false; |
127 | 0 | } else { |
128 | 0 | Members = 1; |
129 | 0 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
130 | 0 | Members = 2; |
131 | 0 | Ty = CT->getElementType(); |
132 | 0 | } |
133 | | |
134 | | // Most ABIs only support float, double, and some vector type widths. |
135 | 0 | if (!isHomogeneousAggregateBaseType(Ty)) |
136 | 0 | return false; |
137 | | |
138 | | // The base type must be the same for all members. Types that |
139 | | // agree in both total size and mode (float vs. vector) are |
140 | | // treated as being equivalent here. |
141 | 0 | const Type *TyPtr = Ty.getTypePtr(); |
142 | 0 | if (!Base) { |
143 | 0 | Base = TyPtr; |
144 | | // If it's a non-power-of-2 vector, its size is already a power-of-2, |
145 | | // so make sure to widen it explicitly. |
146 | 0 | if (const VectorType *VT = Base->getAs<VectorType>()) { |
147 | 0 | QualType EltTy = VT->getElementType(); |
148 | 0 | unsigned NumElements = |
149 | 0 | getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); |
150 | 0 | Base = getContext() |
151 | 0 | .getVectorType(EltTy, NumElements, VT->getVectorKind()) |
152 | 0 | .getTypePtr(); |
153 | 0 | } |
154 | 0 | } |
155 | |
|
156 | 0 | if (Base->isVectorType() != TyPtr->isVectorType() || |
157 | 0 | getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) |
158 | 0 | return false; |
159 | 0 | } |
160 | 0 | return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); |
161 | 0 | } |
162 | | |
163 | 0 | bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { |
164 | 0 | if (getContext().isPromotableIntegerType(Ty)) |
165 | 0 | return true; |
166 | | |
167 | 0 | if (const auto *EIT = Ty->getAs<BitIntType>()) |
168 | 0 | if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy)) |
169 | 0 | return true; |
170 | | |
171 | 0 | return false; |
172 | 0 | } |
173 | | |
174 | | ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal, |
175 | | bool Realign, |
176 | 0 | llvm::Type *Padding) const { |
177 | 0 | return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal, |
178 | 0 | Realign, Padding); |
179 | 0 | } |
180 | | |
181 | | ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, |
182 | 0 | bool Realign) const { |
183 | 0 | return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), |
184 | 0 | /*ByVal*/ false, Realign); |
185 | 0 | } |
186 | | |
187 | | // Pin the vtable to this file. |
188 | 46 | SwiftABIInfo::~SwiftABIInfo() = default; |
189 | | |
190 | | /// Does the given lowering require more than the given number of |
191 | | /// registers when expanded? |
192 | | /// |
193 | | /// This is intended to be the basis of a reasonable basic implementation |
194 | | /// of should{Pass,Return}Indirectly. |
195 | | /// |
196 | | /// For most targets, a limit of four total registers is reasonable; this |
197 | | /// limits the amount of code required in order to move around the value |
198 | | /// in case it wasn't produced immediately prior to the call by the caller |
199 | | /// (or wasn't produced in exactly the right registers) or isn't used |
200 | | /// immediately within the callee. But some targets may need to further |
201 | | /// limit the register count due to an inability to support that many |
202 | | /// return registers. |
203 | | bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes, |
204 | 0 | unsigned maxAllRegisters) const { |
205 | 0 | unsigned intCount = 0, fpCount = 0; |
206 | 0 | for (llvm::Type *type : scalarTypes) { |
207 | 0 | if (type->isPointerTy()) { |
208 | 0 | intCount++; |
209 | 0 | } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { |
210 | 0 | auto ptrWidth = CGT.getTarget().getPointerWidth(LangAS::Default); |
211 | 0 | intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; |
212 | 0 | } else { |
213 | 0 | assert(type->isVectorTy() || type->isFloatingPointTy()); |
214 | 0 | fpCount++; |
215 | 0 | } |
216 | 0 | } |
217 | |
|
218 | 0 | return (intCount + fpCount > maxAllRegisters); |
219 | 0 | } |
220 | | |
221 | | bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys, |
222 | 0 | bool AsReturnValue) const { |
223 | 0 | return occupiesMoreThan(ComponentTys, /*total=*/4); |
224 | 0 | } |
225 | | |
226 | | bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, |
227 | 0 | unsigned NumElts) const { |
228 | | // The default implementation of this assumes that the target guarantees |
229 | | // 128-bit SIMD support but nothing more. |
230 | 0 | return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16); |
231 | 0 | } |