/src/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- RISCV.cpp ----------------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include "ABIInfoImpl.h" |
10 | | #include "TargetInfo.h" |
11 | | |
12 | | using namespace clang; |
13 | | using namespace clang::CodeGen; |
14 | | |
15 | | //===----------------------------------------------------------------------===// |
16 | | // RISC-V ABI Implementation |
17 | | //===----------------------------------------------------------------------===// |
18 | | |
19 | | namespace { |
20 | | class RISCVABIInfo : public DefaultABIInfo { |
21 | | private: |
22 | | // Size of the integer ('x') registers in bits. |
23 | | unsigned XLen; |
24 | | // Size of the floating point ('f') registers in bits. Note that the target |
25 | | // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target |
26 | | // with soft float ABI has FLen==0). |
27 | | unsigned FLen; |
28 | | const int NumArgGPRs; |
29 | | const int NumArgFPRs; |
30 | | const bool EABI; |
31 | | bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, |
32 | | llvm::Type *&Field1Ty, |
33 | | CharUnits &Field1Off, |
34 | | llvm::Type *&Field2Ty, |
35 | | CharUnits &Field2Off) const; |
36 | | |
37 | | public: |
38 | | RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen, |
39 | | bool EABI) |
40 | | : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen), NumArgGPRs(EABI ? 6 : 8), |
41 | 0 | NumArgFPRs(FLen != 0 ? 8 : 0), EABI(EABI) {} |
42 | | |
43 | | // DefaultABIInfo's classifyReturnType and classifyArgumentType are |
44 | | // non-virtual, but computeInfo is virtual, so we overload it. |
45 | | void computeInfo(CGFunctionInfo &FI) const override; |
46 | | |
47 | | ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, |
48 | | int &ArgFPRsLeft) const; |
49 | | ABIArgInfo classifyReturnType(QualType RetTy) const; |
50 | | |
51 | | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
52 | | QualType Ty) const override; |
53 | | |
54 | | ABIArgInfo extendType(QualType Ty) const; |
55 | | |
56 | | bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, |
57 | | CharUnits &Field1Off, llvm::Type *&Field2Ty, |
58 | | CharUnits &Field2Off, int &NeededArgGPRs, |
59 | | int &NeededArgFPRs) const; |
60 | | ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, |
61 | | CharUnits Field1Off, |
62 | | llvm::Type *Field2Ty, |
63 | | CharUnits Field2Off) const; |
64 | | |
65 | | ABIArgInfo coerceVLSVector(QualType Ty) const; |
66 | | }; |
67 | | } // end anonymous namespace |
68 | | |
69 | 0 | void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { |
70 | 0 | QualType RetTy = FI.getReturnType(); |
71 | 0 | if (!getCXXABI().classifyReturnType(FI)) |
72 | 0 | FI.getReturnInfo() = classifyReturnType(RetTy); |
73 | | |
74 | | // IsRetIndirect is true if classifyArgumentType indicated the value should |
75 | | // be passed indirect, or if the type size is a scalar greater than 2*XLen |
76 | | // and not a complex type with elements <= FLen. e.g. fp128 is passed direct |
77 | | // in LLVM IR, relying on the backend lowering code to rewrite the argument |
78 | | // list and pass indirectly on RV32. |
79 | 0 | bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect; |
80 | 0 | if (!IsRetIndirect && RetTy->isScalarType() && |
81 | 0 | getContext().getTypeSize(RetTy) > (2 * XLen)) { |
82 | 0 | if (RetTy->isComplexType() && FLen) { |
83 | 0 | QualType EltTy = RetTy->castAs<ComplexType>()->getElementType(); |
84 | 0 | IsRetIndirect = getContext().getTypeSize(EltTy) > FLen; |
85 | 0 | } else { |
86 | | // This is a normal scalar > 2*XLen, such as fp128 on RV32. |
87 | 0 | IsRetIndirect = true; |
88 | 0 | } |
89 | 0 | } |
90 | |
|
91 | 0 | int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; |
92 | 0 | int ArgFPRsLeft = NumArgFPRs; |
93 | 0 | int NumFixedArgs = FI.getNumRequiredArgs(); |
94 | |
|
95 | 0 | int ArgNum = 0; |
96 | 0 | for (auto &ArgInfo : FI.arguments()) { |
97 | 0 | bool IsFixed = ArgNum < NumFixedArgs; |
98 | 0 | ArgInfo.info = |
99 | 0 | classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); |
100 | 0 | ArgNum++; |
101 | 0 | } |
102 | 0 | } |
103 | | |
104 | | // Returns true if the struct is a potential candidate for the floating point |
105 | | // calling convention. If this function returns true, the caller is |
106 | | // responsible for checking that if there is only a single field then that |
107 | | // field is a float. |
108 | | bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, |
109 | | llvm::Type *&Field1Ty, |
110 | | CharUnits &Field1Off, |
111 | | llvm::Type *&Field2Ty, |
112 | 0 | CharUnits &Field2Off) const { |
113 | 0 | bool IsInt = Ty->isIntegralOrEnumerationType(); |
114 | 0 | bool IsFloat = Ty->isRealFloatingType(); |
115 | |
|
116 | 0 | if (IsInt || IsFloat) { |
117 | 0 | uint64_t Size = getContext().getTypeSize(Ty); |
118 | 0 | if (IsInt && Size > XLen) |
119 | 0 | return false; |
120 | | // Can't be eligible if larger than the FP registers. Handling of half |
121 | | // precision values has been specified in the ABI, so don't block those. |
122 | 0 | if (IsFloat && Size > FLen) |
123 | 0 | return false; |
124 | | // Can't be eligible if an integer type was already found (int+int pairs |
125 | | // are not eligible). |
126 | 0 | if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) |
127 | 0 | return false; |
128 | 0 | if (!Field1Ty) { |
129 | 0 | Field1Ty = CGT.ConvertType(Ty); |
130 | 0 | Field1Off = CurOff; |
131 | 0 | return true; |
132 | 0 | } |
133 | 0 | if (!Field2Ty) { |
134 | 0 | Field2Ty = CGT.ConvertType(Ty); |
135 | 0 | Field2Off = CurOff; |
136 | 0 | return true; |
137 | 0 | } |
138 | 0 | return false; |
139 | 0 | } |
140 | | |
141 | 0 | if (auto CTy = Ty->getAs<ComplexType>()) { |
142 | 0 | if (Field1Ty) |
143 | 0 | return false; |
144 | 0 | QualType EltTy = CTy->getElementType(); |
145 | 0 | if (getContext().getTypeSize(EltTy) > FLen) |
146 | 0 | return false; |
147 | 0 | Field1Ty = CGT.ConvertType(EltTy); |
148 | 0 | Field1Off = CurOff; |
149 | 0 | Field2Ty = Field1Ty; |
150 | 0 | Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy); |
151 | 0 | return true; |
152 | 0 | } |
153 | | |
154 | 0 | if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { |
155 | 0 | uint64_t ArraySize = ATy->getSize().getZExtValue(); |
156 | 0 | QualType EltTy = ATy->getElementType(); |
157 | | // Non-zero-length arrays of empty records make the struct ineligible for |
158 | | // the FP calling convention in C++. |
159 | 0 | if (const auto *RTy = EltTy->getAs<RecordType>()) { |
160 | 0 | if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) && |
161 | 0 | isEmptyRecord(getContext(), EltTy, true, true)) |
162 | 0 | return false; |
163 | 0 | } |
164 | 0 | CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); |
165 | 0 | for (uint64_t i = 0; i < ArraySize; ++i) { |
166 | 0 | bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, |
167 | 0 | Field1Off, Field2Ty, Field2Off); |
168 | 0 | if (!Ret) |
169 | 0 | return false; |
170 | 0 | CurOff += EltSize; |
171 | 0 | } |
172 | 0 | return true; |
173 | 0 | } |
174 | | |
175 | 0 | if (const auto *RTy = Ty->getAs<RecordType>()) { |
176 | | // Structures with either a non-trivial destructor or a non-trivial |
177 | | // copy constructor are not eligible for the FP calling convention. |
178 | 0 | if (getRecordArgABI(Ty, CGT.getCXXABI())) |
179 | 0 | return false; |
180 | 0 | if (isEmptyRecord(getContext(), Ty, true, true)) |
181 | 0 | return true; |
182 | 0 | const RecordDecl *RD = RTy->getDecl(); |
183 | | // Unions aren't eligible unless they're empty (which is caught above). |
184 | 0 | if (RD->isUnion()) |
185 | 0 | return false; |
186 | 0 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
187 | | // If this is a C++ record, check the bases first. |
188 | 0 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
189 | 0 | for (const CXXBaseSpecifier &B : CXXRD->bases()) { |
190 | 0 | const auto *BDecl = |
191 | 0 | cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl()); |
192 | 0 | CharUnits BaseOff = Layout.getBaseClassOffset(BDecl); |
193 | 0 | bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff, |
194 | 0 | Field1Ty, Field1Off, Field2Ty, |
195 | 0 | Field2Off); |
196 | 0 | if (!Ret) |
197 | 0 | return false; |
198 | 0 | } |
199 | 0 | } |
200 | 0 | int ZeroWidthBitFieldCount = 0; |
201 | 0 | for (const FieldDecl *FD : RD->fields()) { |
202 | 0 | uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); |
203 | 0 | QualType QTy = FD->getType(); |
204 | 0 | if (FD->isBitField()) { |
205 | 0 | unsigned BitWidth = FD->getBitWidthValue(getContext()); |
206 | | // Allow a bitfield with a type greater than XLen as long as the |
207 | | // bitwidth is XLen or less. |
208 | 0 | if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) |
209 | 0 | QTy = getContext().getIntTypeForBitwidth(XLen, false); |
210 | 0 | if (BitWidth == 0) { |
211 | 0 | ZeroWidthBitFieldCount++; |
212 | 0 | continue; |
213 | 0 | } |
214 | 0 | } |
215 | | |
216 | 0 | bool Ret = detectFPCCEligibleStructHelper( |
217 | 0 | QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), |
218 | 0 | Field1Ty, Field1Off, Field2Ty, Field2Off); |
219 | 0 | if (!Ret) |
220 | 0 | return false; |
221 | | |
222 | | // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp |
223 | | // or int+fp structs, but are ignored for a struct with an fp field and |
224 | | // any number of zero-width bitfields. |
225 | 0 | if (Field2Ty && ZeroWidthBitFieldCount > 0) |
226 | 0 | return false; |
227 | 0 | } |
228 | 0 | return Field1Ty != nullptr; |
229 | 0 | } |
230 | | |
231 | 0 | return false; |
232 | 0 | } |
233 | | |
234 | | // Determine if a struct is eligible for passing according to the floating |
235 | | // point calling convention (i.e., when flattened it contains a single fp |
236 | | // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and |
237 | | // NeededArgGPRs are incremented appropriately. |
238 | | bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, |
239 | | CharUnits &Field1Off, |
240 | | llvm::Type *&Field2Ty, |
241 | | CharUnits &Field2Off, |
242 | | int &NeededArgGPRs, |
243 | 0 | int &NeededArgFPRs) const { |
244 | 0 | Field1Ty = nullptr; |
245 | 0 | Field2Ty = nullptr; |
246 | 0 | NeededArgGPRs = 0; |
247 | 0 | NeededArgFPRs = 0; |
248 | 0 | bool IsCandidate = detectFPCCEligibleStructHelper( |
249 | 0 | Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off); |
250 | 0 | if (!Field1Ty) |
251 | 0 | return false; |
252 | | // Not really a candidate if we have a single int but no float. |
253 | 0 | if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) |
254 | 0 | return false; |
255 | 0 | if (!IsCandidate) |
256 | 0 | return false; |
257 | 0 | if (Field1Ty && Field1Ty->isFloatingPointTy()) |
258 | 0 | NeededArgFPRs++; |
259 | 0 | else if (Field1Ty) |
260 | 0 | NeededArgGPRs++; |
261 | 0 | if (Field2Ty && Field2Ty->isFloatingPointTy()) |
262 | 0 | NeededArgFPRs++; |
263 | 0 | else if (Field2Ty) |
264 | 0 | NeededArgGPRs++; |
265 | 0 | return true; |
266 | 0 | } |
267 | | |
268 | | // Call getCoerceAndExpand for the two-element flattened struct described by |
269 | | // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an |
270 | | // appropriate coerceToType and unpaddedCoerceToType. |
271 | | ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( |
272 | | llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty, |
273 | 0 | CharUnits Field2Off) const { |
274 | 0 | SmallVector<llvm::Type *, 3> CoerceElts; |
275 | 0 | SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; |
276 | 0 | if (!Field1Off.isZero()) |
277 | 0 | CoerceElts.push_back(llvm::ArrayType::get( |
278 | 0 | llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity())); |
279 | |
|
280 | 0 | CoerceElts.push_back(Field1Ty); |
281 | 0 | UnpaddedCoerceElts.push_back(Field1Ty); |
282 | |
|
283 | 0 | if (!Field2Ty) { |
284 | 0 | return ABIArgInfo::getCoerceAndExpand( |
285 | 0 | llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()), |
286 | 0 | UnpaddedCoerceElts[0]); |
287 | 0 | } |
288 | | |
289 | 0 | CharUnits Field2Align = |
290 | 0 | CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty)); |
291 | 0 | CharUnits Field1End = Field1Off + |
292 | 0 | CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); |
293 | 0 | CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align); |
294 | |
|
295 | 0 | CharUnits Padding = CharUnits::Zero(); |
296 | 0 | if (Field2Off > Field2OffNoPadNoPack) |
297 | 0 | Padding = Field2Off - Field2OffNoPadNoPack; |
298 | 0 | else if (Field2Off != Field2Align && Field2Off > Field1End) |
299 | 0 | Padding = Field2Off - Field1End; |
300 | |
|
301 | 0 | bool IsPacked = !Field2Off.isMultipleOf(Field2Align); |
302 | |
|
303 | 0 | if (!Padding.isZero()) |
304 | 0 | CoerceElts.push_back(llvm::ArrayType::get( |
305 | 0 | llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); |
306 | |
|
307 | 0 | CoerceElts.push_back(Field2Ty); |
308 | 0 | UnpaddedCoerceElts.push_back(Field2Ty); |
309 | |
|
310 | 0 | auto CoerceToType = |
311 | 0 | llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); |
312 | 0 | auto UnpaddedCoerceToType = |
313 | 0 | llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); |
314 | |
|
315 | 0 | return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); |
316 | 0 | } |
317 | | |
318 | | // Fixed-length RVV vectors are represented as scalable vectors in function |
319 | | // args/return and must be coerced from fixed vectors. |
320 | 0 | ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const { |
321 | 0 | assert(Ty->isVectorType() && "expected vector type!"); |
322 | | |
323 | 0 | const auto *VT = Ty->castAs<VectorType>(); |
324 | 0 | assert(VT->getVectorKind() == VectorKind::RVVFixedLengthData && |
325 | 0 | "Unexpected vector kind"); |
326 | | |
327 | 0 | assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); |
328 | | |
329 | 0 | auto VScale = |
330 | 0 | getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts()); |
331 | | // The MinNumElts is simplified from equation: |
332 | | // NumElts / VScale = |
333 | | // (EltSize * NumElts / (VScale * RVVBitsPerBlock)) |
334 | | // * (RVVBitsPerBlock / EltSize) |
335 | 0 | llvm::ScalableVectorType *ResType = |
336 | 0 | llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()), |
337 | 0 | VT->getNumElements() / VScale->first); |
338 | 0 | return ABIArgInfo::getDirect(ResType); |
339 | 0 | } |
340 | | |
341 | | ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, |
342 | | int &ArgGPRsLeft, |
343 | 0 | int &ArgFPRsLeft) const { |
344 | 0 | assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); |
345 | 0 | Ty = useFirstFieldIfTransparentUnion(Ty); |
346 | | |
347 | | // Structures with either a non-trivial destructor or a non-trivial |
348 | | // copy constructor are always passed indirectly. |
349 | 0 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { |
350 | 0 | if (ArgGPRsLeft) |
351 | 0 | ArgGPRsLeft -= 1; |
352 | 0 | return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == |
353 | 0 | CGCXXABI::RAA_DirectInMemory); |
354 | 0 | } |
355 | | |
356 | | // Ignore empty structs/unions. |
357 | 0 | if (isEmptyRecord(getContext(), Ty, true)) |
358 | 0 | return ABIArgInfo::getIgnore(); |
359 | | |
360 | 0 | uint64_t Size = getContext().getTypeSize(Ty); |
361 | | |
362 | | // Pass floating point values via FPRs if possible. |
363 | 0 | if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && |
364 | 0 | FLen >= Size && ArgFPRsLeft) { |
365 | 0 | ArgFPRsLeft--; |
366 | 0 | return ABIArgInfo::getDirect(); |
367 | 0 | } |
368 | | |
369 | | // Complex types for the hard float ABI must be passed direct rather than |
370 | | // using CoerceAndExpand. |
371 | 0 | if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { |
372 | 0 | QualType EltTy = Ty->castAs<ComplexType>()->getElementType(); |
373 | 0 | if (getContext().getTypeSize(EltTy) <= FLen) { |
374 | 0 | ArgFPRsLeft -= 2; |
375 | 0 | return ABIArgInfo::getDirect(); |
376 | 0 | } |
377 | 0 | } |
378 | | |
379 | 0 | if (IsFixed && FLen && Ty->isStructureOrClassType()) { |
380 | 0 | llvm::Type *Field1Ty = nullptr; |
381 | 0 | llvm::Type *Field2Ty = nullptr; |
382 | 0 | CharUnits Field1Off = CharUnits::Zero(); |
383 | 0 | CharUnits Field2Off = CharUnits::Zero(); |
384 | 0 | int NeededArgGPRs = 0; |
385 | 0 | int NeededArgFPRs = 0; |
386 | 0 | bool IsCandidate = |
387 | 0 | detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, |
388 | 0 | NeededArgGPRs, NeededArgFPRs); |
389 | 0 | if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && |
390 | 0 | NeededArgFPRs <= ArgFPRsLeft) { |
391 | 0 | ArgGPRsLeft -= NeededArgGPRs; |
392 | 0 | ArgFPRsLeft -= NeededArgFPRs; |
393 | 0 | return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty, |
394 | 0 | Field2Off); |
395 | 0 | } |
396 | 0 | } |
397 | | |
398 | 0 | uint64_t NeededAlign = getContext().getTypeAlign(Ty); |
399 | | // Determine the number of GPRs needed to pass the current argument |
400 | | // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" |
401 | | // register pairs, so may consume 3 registers. |
402 | | // TODO: To be compatible with GCC's behaviors, we don't align registers |
403 | | // currently if we are using ILP32E calling convention. This behavior may be |
404 | | // changed when RV32E/ILP32E is ratified. |
405 | 0 | int NeededArgGPRs = 1; |
406 | 0 | if (!IsFixed && NeededAlign == 2 * XLen) |
407 | 0 | NeededArgGPRs = 2 + (EABI && XLen == 32 ? 0 : (ArgGPRsLeft % 2)); |
408 | 0 | else if (Size > XLen && Size <= 2 * XLen) |
409 | 0 | NeededArgGPRs = 2; |
410 | |
|
411 | 0 | if (NeededArgGPRs > ArgGPRsLeft) { |
412 | 0 | NeededArgGPRs = ArgGPRsLeft; |
413 | 0 | } |
414 | |
|
415 | 0 | ArgGPRsLeft -= NeededArgGPRs; |
416 | |
|
417 | 0 | if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { |
418 | | // Treat an enum type as its underlying type. |
419 | 0 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
420 | 0 | Ty = EnumTy->getDecl()->getIntegerType(); |
421 | | |
422 | | // All integral types are promoted to XLen width |
423 | 0 | if (Size < XLen && Ty->isIntegralOrEnumerationType()) { |
424 | 0 | return extendType(Ty); |
425 | 0 | } |
426 | | |
427 | 0 | if (const auto *EIT = Ty->getAs<BitIntType>()) { |
428 | 0 | if (EIT->getNumBits() < XLen) |
429 | 0 | return extendType(Ty); |
430 | 0 | if (EIT->getNumBits() > 128 || |
431 | 0 | (!getContext().getTargetInfo().hasInt128Type() && |
432 | 0 | EIT->getNumBits() > 64)) |
433 | 0 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); |
434 | 0 | } |
435 | | |
436 | 0 | return ABIArgInfo::getDirect(); |
437 | 0 | } |
438 | | |
439 | 0 | if (const VectorType *VT = Ty->getAs<VectorType>()) |
440 | 0 | if (VT->getVectorKind() == VectorKind::RVVFixedLengthData) |
441 | 0 | return coerceVLSVector(Ty); |
442 | | |
443 | | // Aggregates which are <= 2*XLen will be passed in registers if possible, |
444 | | // so coerce to integers. |
445 | 0 | if (Size <= 2 * XLen) { |
446 | 0 | unsigned Alignment = getContext().getTypeAlign(Ty); |
447 | | |
448 | | // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is |
449 | | // required, and a 2-element XLen array if only XLen alignment is required. |
450 | 0 | if (Size <= XLen) { |
451 | 0 | return ABIArgInfo::getDirect( |
452 | 0 | llvm::IntegerType::get(getVMContext(), XLen)); |
453 | 0 | } else if (Alignment == 2 * XLen) { |
454 | 0 | return ABIArgInfo::getDirect( |
455 | 0 | llvm::IntegerType::get(getVMContext(), 2 * XLen)); |
456 | 0 | } else { |
457 | 0 | return ABIArgInfo::getDirect(llvm::ArrayType::get( |
458 | 0 | llvm::IntegerType::get(getVMContext(), XLen), 2)); |
459 | 0 | } |
460 | 0 | } |
461 | 0 | return getNaturalAlignIndirect(Ty, /*ByVal=*/false); |
462 | 0 | } |
463 | | |
464 | 0 | ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { |
465 | 0 | if (RetTy->isVoidType()) |
466 | 0 | return ABIArgInfo::getIgnore(); |
467 | | |
468 | 0 | int ArgGPRsLeft = 2; |
469 | 0 | int ArgFPRsLeft = FLen ? 2 : 0; |
470 | | |
471 | | // The rules for return and argument types are the same, so defer to |
472 | | // classifyArgumentType. |
473 | 0 | return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, |
474 | 0 | ArgFPRsLeft); |
475 | 0 | } |
476 | | |
477 | | Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
478 | 0 | QualType Ty) const { |
479 | 0 | CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); |
480 | | |
481 | | // Empty records are ignored for parameter passing purposes. |
482 | 0 | if (isEmptyRecord(getContext(), Ty, true)) { |
483 | 0 | return Address(CGF.Builder.CreateLoad(VAListAddr), |
484 | 0 | CGF.ConvertTypeForMem(Ty), SlotSize); |
485 | 0 | } |
486 | | |
487 | 0 | auto TInfo = getContext().getTypeInfoInChars(Ty); |
488 | | |
489 | | // TODO: To be compatible with GCC's behaviors, we force arguments with |
490 | | // 2×XLEN-bit alignment and size at most 2×XLEN bits like `long long`, |
491 | | // `unsigned long long` and `double` to have 4-byte alignment. This |
492 | | // behavior may be changed when RV32E/ILP32E is ratified. |
493 | 0 | if (EABI && XLen == 32) |
494 | 0 | TInfo.Align = std::min(TInfo.Align, CharUnits::fromQuantity(4)); |
495 | | |
496 | | // Arguments bigger than 2*Xlen bytes are passed indirectly. |
497 | 0 | bool IsIndirect = TInfo.Width > 2 * SlotSize; |
498 | |
|
499 | 0 | return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, |
500 | 0 | SlotSize, /*AllowHigherAlign=*/true); |
501 | 0 | } |
502 | | |
503 | 0 | ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { |
504 | 0 | int TySize = getContext().getTypeSize(Ty); |
505 | | // RV64 ABI requires unsigned 32 bit integers to be sign extended. |
506 | 0 | if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) |
507 | 0 | return ABIArgInfo::getSignExtend(Ty); |
508 | 0 | return ABIArgInfo::getExtend(Ty); |
509 | 0 | } |
510 | | |
511 | | namespace { |
512 | | class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { |
513 | | public: |
514 | | RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, |
515 | | unsigned FLen, bool EABI) |
516 | | : TargetCodeGenInfo( |
517 | 0 | std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {} |
518 | | |
519 | | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
520 | 0 | CodeGen::CodeGenModule &CGM) const override { |
521 | 0 | const auto *FD = dyn_cast_or_null<FunctionDecl>(D); |
522 | 0 | if (!FD) return; |
523 | | |
524 | 0 | const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); |
525 | 0 | if (!Attr) |
526 | 0 | return; |
527 | | |
528 | 0 | const char *Kind; |
529 | 0 | switch (Attr->getInterrupt()) { |
530 | 0 | case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; |
531 | 0 | case RISCVInterruptAttr::machine: Kind = "machine"; break; |
532 | 0 | } |
533 | | |
534 | 0 | auto *Fn = cast<llvm::Function>(GV); |
535 | |
|
536 | 0 | Fn->addFnAttr("interrupt", Kind); |
537 | 0 | } |
538 | | }; |
539 | | } // namespace |
540 | | |
541 | | std::unique_ptr<TargetCodeGenInfo> |
542 | | CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, |
543 | 0 | unsigned FLen, bool EABI) { |
544 | 0 | return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen, |
545 | 0 | EABI); |
546 | 0 | } |