/src/llvm-project/clang/lib/CodeGen/CGCall.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGCall.cpp - Encapsulate calling convention details --------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // These classes wrap the information about a call or function |
10 | | // definition used to handle ABI compliancy. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "CGCall.h" |
15 | | #include "ABIInfo.h" |
16 | | #include "ABIInfoImpl.h" |
17 | | #include "CGBlocks.h" |
18 | | #include "CGCXXABI.h" |
19 | | #include "CGCleanup.h" |
20 | | #include "CGRecordLayout.h" |
21 | | #include "CodeGenFunction.h" |
22 | | #include "CodeGenModule.h" |
23 | | #include "TargetInfo.h" |
24 | | #include "clang/AST/Attr.h" |
25 | | #include "clang/AST/Decl.h" |
26 | | #include "clang/AST/DeclCXX.h" |
27 | | #include "clang/AST/DeclObjC.h" |
28 | | #include "clang/Basic/CodeGenOptions.h" |
29 | | #include "clang/Basic/TargetInfo.h" |
30 | | #include "clang/CodeGen/CGFunctionInfo.h" |
31 | | #include "clang/CodeGen/SwiftCallingConv.h" |
32 | | #include "llvm/ADT/StringExtras.h" |
33 | | #include "llvm/Analysis/ValueTracking.h" |
34 | | #include "llvm/IR/Assumptions.h" |
35 | | #include "llvm/IR/AttributeMask.h" |
36 | | #include "llvm/IR/Attributes.h" |
37 | | #include "llvm/IR/CallingConv.h" |
38 | | #include "llvm/IR/DataLayout.h" |
39 | | #include "llvm/IR/InlineAsm.h" |
40 | | #include "llvm/IR/IntrinsicInst.h" |
41 | | #include "llvm/IR/Intrinsics.h" |
42 | | #include "llvm/IR/Type.h" |
43 | | #include "llvm/Transforms/Utils/Local.h" |
44 | | #include <optional> |
45 | | using namespace clang; |
46 | | using namespace CodeGen; |
47 | | |
48 | | /***/ |
49 | | |
50 | 0 | unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { |
51 | 0 | switch (CC) { |
52 | 0 | default: return llvm::CallingConv::C; |
53 | 0 | case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; |
54 | 0 | case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; |
55 | 0 | case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; |
56 | 0 | case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; |
57 | 0 | case CC_Win64: return llvm::CallingConv::Win64; |
58 | 0 | case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; |
59 | 0 | case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; |
60 | 0 | case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; |
61 | 0 | case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; |
62 | | // TODO: Add support for __pascal to LLVM. |
63 | 0 | case CC_X86Pascal: return llvm::CallingConv::C; |
64 | | // TODO: Add support for __vectorcall to LLVM. |
65 | 0 | case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; |
66 | 0 | case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; |
67 | 0 | case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall; |
68 | 0 | case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL; |
69 | 0 | case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; |
70 | 0 | case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); |
71 | 0 | case CC_PreserveMost: return llvm::CallingConv::PreserveMost; |
72 | 0 | case CC_PreserveAll: return llvm::CallingConv::PreserveAll; |
73 | 0 | case CC_Swift: return llvm::CallingConv::Swift; |
74 | 0 | case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; |
75 | 0 | case CC_M68kRTD: return llvm::CallingConv::M68k_RTD; |
76 | 0 | } |
77 | 0 | } |
78 | | |
79 | | /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR |
80 | | /// qualification. Either or both of RD and MD may be null. A null RD indicates |
81 | | /// that there is no meaningful 'this' type, and a null MD can occur when |
82 | | /// calling a method pointer. |
83 | | CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, |
84 | 0 | const CXXMethodDecl *MD) { |
85 | 0 | QualType RecTy; |
86 | 0 | if (RD) |
87 | 0 | RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); |
88 | 0 | else |
89 | 0 | RecTy = Context.VoidTy; |
90 | |
|
91 | 0 | if (MD) |
92 | 0 | RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); |
93 | 0 | return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); |
94 | 0 | } |
95 | | |
96 | | /// Returns the canonical formal type of the given C++ method. |
97 | 0 | static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { |
98 | 0 | return MD->getType()->getCanonicalTypeUnqualified() |
99 | 0 | .getAs<FunctionProtoType>(); |
100 | 0 | } |
101 | | |
102 | | /// Returns the "extra-canonicalized" return type, which discards |
103 | | /// qualifiers on the return type. Codegen doesn't care about them, |
104 | | /// and it makes ABI code a little easier to be able to assume that |
105 | | /// all parameter and return types are top-level unqualified. |
106 | 0 | static CanQualType GetReturnType(QualType RetTy) { |
107 | 0 | return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); |
108 | 0 | } |
109 | | |
110 | | /// Arrange the argument and result information for a value of the given |
111 | | /// unprototyped freestanding function type. |
112 | | const CGFunctionInfo & |
113 | 0 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { |
114 | | // When translating an unprototyped function type, always use a |
115 | | // variadic type. |
116 | 0 | return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), |
117 | 0 | FnInfoOpts::None, std::nullopt, |
118 | 0 | FTNP->getExtInfo(), {}, RequiredArgs(0)); |
119 | 0 | } |
120 | | |
121 | | static void addExtParameterInfosForCall( |
122 | | llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
123 | | const FunctionProtoType *proto, |
124 | | unsigned prefixArgs, |
125 | 0 | unsigned totalArgs) { |
126 | 0 | assert(proto->hasExtParameterInfos()); |
127 | 0 | assert(paramInfos.size() <= prefixArgs); |
128 | 0 | assert(proto->getNumParams() + prefixArgs <= totalArgs); |
129 | | |
130 | 0 | paramInfos.reserve(totalArgs); |
131 | | |
132 | | // Add default infos for any prefix args that don't already have infos. |
133 | 0 | paramInfos.resize(prefixArgs); |
134 | | |
135 | | // Add infos for the prototype. |
136 | 0 | for (const auto &ParamInfo : proto->getExtParameterInfos()) { |
137 | 0 | paramInfos.push_back(ParamInfo); |
138 | | // pass_object_size params have no parameter info. |
139 | 0 | if (ParamInfo.hasPassObjectSize()) |
140 | 0 | paramInfos.emplace_back(); |
141 | 0 | } |
142 | |
|
143 | 0 | assert(paramInfos.size() <= totalArgs && |
144 | 0 | "Did we forget to insert pass_object_size args?"); |
145 | | // Add default infos for the variadic and/or suffix arguments. |
146 | 0 | paramInfos.resize(totalArgs); |
147 | 0 | } |
148 | | |
149 | | /// Adds the formal parameters in FPT to the given prefix. If any parameter in |
150 | | /// FPT has pass_object_size attrs, then we'll add parameters for those, too. |
151 | | static void appendParameterTypes(const CodeGenTypes &CGT, |
152 | | SmallVectorImpl<CanQualType> &prefix, |
153 | | SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, |
154 | 0 | CanQual<FunctionProtoType> FPT) { |
155 | | // Fast path: don't touch param info if we don't need to. |
156 | 0 | if (!FPT->hasExtParameterInfos()) { |
157 | 0 | assert(paramInfos.empty() && |
158 | 0 | "We have paramInfos, but the prototype doesn't?"); |
159 | 0 | prefix.append(FPT->param_type_begin(), FPT->param_type_end()); |
160 | 0 | return; |
161 | 0 | } |
162 | | |
163 | 0 | unsigned PrefixSize = prefix.size(); |
164 | | // In the vast majority of cases, we'll have precisely FPT->getNumParams() |
165 | | // parameters; the only thing that can change this is the presence of |
166 | | // pass_object_size. So, we preallocate for the common case. |
167 | 0 | prefix.reserve(prefix.size() + FPT->getNumParams()); |
168 | |
|
169 | 0 | auto ExtInfos = FPT->getExtParameterInfos(); |
170 | 0 | assert(ExtInfos.size() == FPT->getNumParams()); |
171 | 0 | for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { |
172 | 0 | prefix.push_back(FPT->getParamType(I)); |
173 | 0 | if (ExtInfos[I].hasPassObjectSize()) |
174 | 0 | prefix.push_back(CGT.getContext().getSizeType()); |
175 | 0 | } |
176 | |
|
177 | 0 | addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, |
178 | 0 | prefix.size()); |
179 | 0 | } |
180 | | |
181 | | /// Arrange the LLVM function layout for a value of the given function |
182 | | /// type, on top of any implicit parameters already stored. |
183 | | static const CGFunctionInfo & |
184 | | arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, |
185 | | SmallVectorImpl<CanQualType> &prefix, |
186 | 0 | CanQual<FunctionProtoType> FTP) { |
187 | 0 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
188 | 0 | RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); |
189 | | // FIXME: Kill copy. |
190 | 0 | appendParameterTypes(CGT, prefix, paramInfos, FTP); |
191 | 0 | CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); |
192 | |
|
193 | 0 | FnInfoOpts opts = |
194 | 0 | instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None; |
195 | 0 | return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix, |
196 | 0 | FTP->getExtInfo(), paramInfos, Required); |
197 | 0 | } |
198 | | |
199 | | /// Arrange the argument and result information for a value of the |
200 | | /// given freestanding function type. |
201 | | const CGFunctionInfo & |
202 | 0 | CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { |
203 | 0 | SmallVector<CanQualType, 16> argTypes; |
204 | 0 | return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, |
205 | 0 | FTP); |
206 | 0 | } |
207 | | |
208 | | static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, |
209 | 0 | bool IsWindows) { |
210 | | // Set the appropriate calling convention for the Function. |
211 | 0 | if (D->hasAttr<StdCallAttr>()) |
212 | 0 | return CC_X86StdCall; |
213 | | |
214 | 0 | if (D->hasAttr<FastCallAttr>()) |
215 | 0 | return CC_X86FastCall; |
216 | | |
217 | 0 | if (D->hasAttr<RegCallAttr>()) |
218 | 0 | return CC_X86RegCall; |
219 | | |
220 | 0 | if (D->hasAttr<ThisCallAttr>()) |
221 | 0 | return CC_X86ThisCall; |
222 | | |
223 | 0 | if (D->hasAttr<VectorCallAttr>()) |
224 | 0 | return CC_X86VectorCall; |
225 | | |
226 | 0 | if (D->hasAttr<PascalAttr>()) |
227 | 0 | return CC_X86Pascal; |
228 | | |
229 | 0 | if (PcsAttr *PCS = D->getAttr<PcsAttr>()) |
230 | 0 | return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); |
231 | | |
232 | 0 | if (D->hasAttr<AArch64VectorPcsAttr>()) |
233 | 0 | return CC_AArch64VectorCall; |
234 | | |
235 | 0 | if (D->hasAttr<AArch64SVEPcsAttr>()) |
236 | 0 | return CC_AArch64SVEPCS; |
237 | | |
238 | 0 | if (D->hasAttr<AMDGPUKernelCallAttr>()) |
239 | 0 | return CC_AMDGPUKernelCall; |
240 | | |
241 | 0 | if (D->hasAttr<IntelOclBiccAttr>()) |
242 | 0 | return CC_IntelOclBicc; |
243 | | |
244 | 0 | if (D->hasAttr<MSABIAttr>()) |
245 | 0 | return IsWindows ? CC_C : CC_Win64; |
246 | | |
247 | 0 | if (D->hasAttr<SysVABIAttr>()) |
248 | 0 | return IsWindows ? CC_X86_64SysV : CC_C; |
249 | | |
250 | 0 | if (D->hasAttr<PreserveMostAttr>()) |
251 | 0 | return CC_PreserveMost; |
252 | | |
253 | 0 | if (D->hasAttr<PreserveAllAttr>()) |
254 | 0 | return CC_PreserveAll; |
255 | | |
256 | 0 | if (D->hasAttr<M68kRTDAttr>()) |
257 | 0 | return CC_M68kRTD; |
258 | | |
259 | 0 | return CC_C; |
260 | 0 | } |
261 | | |
262 | | /// Arrange the argument and result information for a call to an |
263 | | /// unknown C++ non-static member function of the given abstract type. |
264 | | /// (A null RD means we don't have any meaningful "this" argument type, |
265 | | /// so fall back to a generic pointer type). |
266 | | /// The member function must be an ordinary function, i.e. not a |
267 | | /// constructor or destructor. |
268 | | const CGFunctionInfo & |
269 | | CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, |
270 | | const FunctionProtoType *FTP, |
271 | 0 | const CXXMethodDecl *MD) { |
272 | 0 | SmallVector<CanQualType, 16> argTypes; |
273 | | |
274 | | // Add the 'this' pointer. |
275 | 0 | argTypes.push_back(DeriveThisType(RD, MD)); |
276 | |
|
277 | 0 | return ::arrangeLLVMFunctionInfo( |
278 | 0 | *this, /*instanceMethod=*/true, argTypes, |
279 | 0 | FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); |
280 | 0 | } |
281 | | |
282 | | /// Set calling convention for CUDA/HIP kernel. |
283 | | static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, |
284 | 0 | const FunctionDecl *FD) { |
285 | 0 | if (FD->hasAttr<CUDAGlobalAttr>()) { |
286 | 0 | const FunctionType *FT = FTy->getAs<FunctionType>(); |
287 | 0 | CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); |
288 | 0 | FTy = FT->getCanonicalTypeUnqualified(); |
289 | 0 | } |
290 | 0 | } |
291 | | |
292 | | /// Arrange the argument and result information for a declaration or |
293 | | /// definition of the given C++ non-static member function. The |
294 | | /// member function must be an ordinary function, i.e. not a |
295 | | /// constructor or destructor. |
296 | | const CGFunctionInfo & |
297 | 0 | CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { |
298 | 0 | assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); |
299 | 0 | assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); |
300 | | |
301 | 0 | CanQualType FT = GetFormalType(MD).getAs<Type>(); |
302 | 0 | setCUDAKernelCallingConvention(FT, CGM, MD); |
303 | 0 | auto prototype = FT.getAs<FunctionProtoType>(); |
304 | |
|
305 | 0 | if (MD->isImplicitObjectMemberFunction()) { |
306 | | // The abstract case is perfectly fine. |
307 | 0 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); |
308 | 0 | return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); |
309 | 0 | } |
310 | | |
311 | 0 | return arrangeFreeFunctionType(prototype); |
312 | 0 | } |
313 | | |
314 | | bool CodeGenTypes::inheritingCtorHasParams( |
315 | 0 | const InheritedConstructor &Inherited, CXXCtorType Type) { |
316 | | // Parameters are unnecessary if we're constructing a base class subobject |
317 | | // and the inherited constructor lives in a virtual base. |
318 | 0 | return Type == Ctor_Complete || |
319 | 0 | !Inherited.getShadowDecl()->constructsVirtualBase() || |
320 | 0 | !Target.getCXXABI().hasConstructorVariants(); |
321 | 0 | } |
322 | | |
323 | | const CGFunctionInfo & |
324 | 0 | CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { |
325 | 0 | auto *MD = cast<CXXMethodDecl>(GD.getDecl()); |
326 | |
|
327 | 0 | SmallVector<CanQualType, 16> argTypes; |
328 | 0 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
329 | |
|
330 | 0 | const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD); |
331 | 0 | argTypes.push_back(DeriveThisType(ThisType, MD)); |
332 | |
|
333 | 0 | bool PassParams = true; |
334 | |
|
335 | 0 | if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { |
336 | | // A base class inheriting constructor doesn't get forwarded arguments |
337 | | // needed to construct a virtual base (or base class thereof). |
338 | 0 | if (auto Inherited = CD->getInheritedConstructor()) |
339 | 0 | PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); |
340 | 0 | } |
341 | |
|
342 | 0 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
343 | | |
344 | | // Add the formal parameters. |
345 | 0 | if (PassParams) |
346 | 0 | appendParameterTypes(*this, argTypes, paramInfos, FTP); |
347 | |
|
348 | 0 | CGCXXABI::AddedStructorArgCounts AddedArgs = |
349 | 0 | TheCXXABI.buildStructorSignature(GD, argTypes); |
350 | 0 | if (!paramInfos.empty()) { |
351 | | // Note: prefix implies after the first param. |
352 | 0 | if (AddedArgs.Prefix) |
353 | 0 | paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, |
354 | 0 | FunctionProtoType::ExtParameterInfo{}); |
355 | 0 | if (AddedArgs.Suffix) |
356 | 0 | paramInfos.append(AddedArgs.Suffix, |
357 | 0 | FunctionProtoType::ExtParameterInfo{}); |
358 | 0 | } |
359 | |
|
360 | 0 | RequiredArgs required = |
361 | 0 | (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) |
362 | 0 | : RequiredArgs::All); |
363 | |
|
364 | 0 | FunctionType::ExtInfo extInfo = FTP->getExtInfo(); |
365 | 0 | CanQualType resultType = TheCXXABI.HasThisReturn(GD) |
366 | 0 | ? argTypes.front() |
367 | 0 | : TheCXXABI.hasMostDerivedReturn(GD) |
368 | 0 | ? CGM.getContext().VoidPtrTy |
369 | 0 | : Context.VoidTy; |
370 | 0 | return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod, |
371 | 0 | argTypes, extInfo, paramInfos, required); |
372 | 0 | } |
373 | | |
374 | | static SmallVector<CanQualType, 16> |
375 | 0 | getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { |
376 | 0 | SmallVector<CanQualType, 16> argTypes; |
377 | 0 | for (auto &arg : args) |
378 | 0 | argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); |
379 | 0 | return argTypes; |
380 | 0 | } |
381 | | |
382 | | static SmallVector<CanQualType, 16> |
383 | 0 | getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { |
384 | 0 | SmallVector<CanQualType, 16> argTypes; |
385 | 0 | for (auto &arg : args) |
386 | 0 | argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); |
387 | 0 | return argTypes; |
388 | 0 | } |
389 | | |
390 | | static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> |
391 | | getExtParameterInfosForCall(const FunctionProtoType *proto, |
392 | 0 | unsigned prefixArgs, unsigned totalArgs) { |
393 | 0 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; |
394 | 0 | if (proto->hasExtParameterInfos()) { |
395 | 0 | addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); |
396 | 0 | } |
397 | 0 | return result; |
398 | 0 | } |
399 | | |
400 | | /// Arrange a call to a C++ method, passing the given arguments. |
401 | | /// |
402 | | /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` |
403 | | /// parameter. |
404 | | /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of |
405 | | /// args. |
406 | | /// PassProtoArgs indicates whether `args` has args for the parameters in the |
407 | | /// given CXXConstructorDecl. |
408 | | const CGFunctionInfo & |
409 | | CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, |
410 | | const CXXConstructorDecl *D, |
411 | | CXXCtorType CtorKind, |
412 | | unsigned ExtraPrefixArgs, |
413 | | unsigned ExtraSuffixArgs, |
414 | 0 | bool PassProtoArgs) { |
415 | | // FIXME: Kill copy. |
416 | 0 | SmallVector<CanQualType, 16> ArgTypes; |
417 | 0 | for (const auto &Arg : args) |
418 | 0 | ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); |
419 | | |
420 | | // +1 for implicit this, which should always be args[0]. |
421 | 0 | unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; |
422 | |
|
423 | 0 | CanQual<FunctionProtoType> FPT = GetFormalType(D); |
424 | 0 | RequiredArgs Required = PassProtoArgs |
425 | 0 | ? RequiredArgs::forPrototypePlus( |
426 | 0 | FPT, TotalPrefixArgs + ExtraSuffixArgs) |
427 | 0 | : RequiredArgs::All; |
428 | |
|
429 | 0 | GlobalDecl GD(D, CtorKind); |
430 | 0 | CanQualType ResultType = TheCXXABI.HasThisReturn(GD) |
431 | 0 | ? ArgTypes.front() |
432 | 0 | : TheCXXABI.hasMostDerivedReturn(GD) |
433 | 0 | ? CGM.getContext().VoidPtrTy |
434 | 0 | : Context.VoidTy; |
435 | |
|
436 | 0 | FunctionType::ExtInfo Info = FPT->getExtInfo(); |
437 | 0 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; |
438 | | // If the prototype args are elided, we should only have ABI-specific args, |
439 | | // which never have param info. |
440 | 0 | if (PassProtoArgs && FPT->hasExtParameterInfos()) { |
441 | | // ABI-specific suffix arguments are treated the same as variadic arguments. |
442 | 0 | addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, |
443 | 0 | ArgTypes.size()); |
444 | 0 | } |
445 | |
|
446 | 0 | return arrangeLLVMFunctionInfo(ResultType, FnInfoOpts::IsInstanceMethod, |
447 | 0 | ArgTypes, Info, ParamInfos, Required); |
448 | 0 | } |
449 | | |
450 | | /// Arrange the argument and result information for the declaration or |
451 | | /// definition of the given function. |
452 | | const CGFunctionInfo & |
453 | 0 | CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { |
454 | 0 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) |
455 | 0 | if (MD->isImplicitObjectMemberFunction()) |
456 | 0 | return arrangeCXXMethodDeclaration(MD); |
457 | | |
458 | 0 | CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); |
459 | |
|
460 | 0 | assert(isa<FunctionType>(FTy)); |
461 | 0 | setCUDAKernelCallingConvention(FTy, CGM, FD); |
462 | | |
463 | | // When declaring a function without a prototype, always use a |
464 | | // non-variadic type. |
465 | 0 | if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { |
466 | 0 | return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None, |
467 | 0 | std::nullopt, noProto->getExtInfo(), {}, |
468 | 0 | RequiredArgs::All); |
469 | 0 | } |
470 | | |
471 | 0 | return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); |
472 | 0 | } |
473 | | |
474 | | /// Arrange the argument and result information for the declaration or |
475 | | /// definition of an Objective-C method. |
476 | | const CGFunctionInfo & |
477 | 0 | CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { |
478 | | // It happens that this is the same as a call with no optional |
479 | | // arguments, except also using the formal 'self' type. |
480 | 0 | return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); |
481 | 0 | } |
482 | | |
483 | | /// Arrange the argument and result information for the function type |
484 | | /// through which to perform a send to the given Objective-C method, |
485 | | /// using the given receiver type. The receiver type is not always |
486 | | /// the 'self' type of the method or even an Objective-C pointer type. |
487 | | /// This is *not* the right method for actually performing such a |
488 | | /// message send, due to the possibility of optional arguments. |
489 | | const CGFunctionInfo & |
490 | | CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, |
491 | 0 | QualType receiverType) { |
492 | 0 | SmallVector<CanQualType, 16> argTys; |
493 | 0 | SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos( |
494 | 0 | MD->isDirectMethod() ? 1 : 2); |
495 | 0 | argTys.push_back(Context.getCanonicalParamType(receiverType)); |
496 | 0 | if (!MD->isDirectMethod()) |
497 | 0 | argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); |
498 | | // FIXME: Kill copy? |
499 | 0 | for (const auto *I : MD->parameters()) { |
500 | 0 | argTys.push_back(Context.getCanonicalParamType(I->getType())); |
501 | 0 | auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( |
502 | 0 | I->hasAttr<NoEscapeAttr>()); |
503 | 0 | extParamInfos.push_back(extParamInfo); |
504 | 0 | } |
505 | |
|
506 | 0 | FunctionType::ExtInfo einfo; |
507 | 0 | bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); |
508 | 0 | einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); |
509 | |
|
510 | 0 | if (getContext().getLangOpts().ObjCAutoRefCount && |
511 | 0 | MD->hasAttr<NSReturnsRetainedAttr>()) |
512 | 0 | einfo = einfo.withProducesResult(true); |
513 | |
|
514 | 0 | RequiredArgs required = |
515 | 0 | (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); |
516 | |
|
517 | 0 | return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), |
518 | 0 | FnInfoOpts::None, argTys, einfo, extParamInfos, |
519 | 0 | required); |
520 | 0 | } |
521 | | |
522 | | const CGFunctionInfo & |
523 | | CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, |
524 | 0 | const CallArgList &args) { |
525 | 0 | auto argTypes = getArgTypesForCall(Context, args); |
526 | 0 | FunctionType::ExtInfo einfo; |
527 | |
|
528 | 0 | return arrangeLLVMFunctionInfo(GetReturnType(returnType), FnInfoOpts::None, |
529 | 0 | argTypes, einfo, {}, RequiredArgs::All); |
530 | 0 | } |
531 | | |
532 | | const CGFunctionInfo & |
533 | 0 | CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { |
534 | | // FIXME: Do we need to handle ObjCMethodDecl? |
535 | 0 | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
536 | |
|
537 | 0 | if (isa<CXXConstructorDecl>(GD.getDecl()) || |
538 | 0 | isa<CXXDestructorDecl>(GD.getDecl())) |
539 | 0 | return arrangeCXXStructorDeclaration(GD); |
540 | | |
541 | 0 | return arrangeFunctionDeclaration(FD); |
542 | 0 | } |
543 | | |
544 | | /// Arrange a thunk that takes 'this' as the first parameter followed by |
545 | | /// varargs. Return a void pointer, regardless of the actual return type. |
546 | | /// The body of the thunk will end in a musttail call to a function of the |
547 | | /// correct type, and the caller will bitcast the function to the correct |
548 | | /// prototype. |
549 | | const CGFunctionInfo & |
550 | 0 | CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { |
551 | 0 | assert(MD->isVirtual() && "only methods have thunks"); |
552 | 0 | CanQual<FunctionProtoType> FTP = GetFormalType(MD); |
553 | 0 | CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; |
554 | 0 | return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::None, ArgTys, |
555 | 0 | FTP->getExtInfo(), {}, RequiredArgs(1)); |
556 | 0 | } |
557 | | |
558 | | const CGFunctionInfo & |
559 | | CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, |
560 | 0 | CXXCtorType CT) { |
561 | 0 | assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); |
562 | | |
563 | 0 | CanQual<FunctionProtoType> FTP = GetFormalType(CD); |
564 | 0 | SmallVector<CanQualType, 2> ArgTys; |
565 | 0 | const CXXRecordDecl *RD = CD->getParent(); |
566 | 0 | ArgTys.push_back(DeriveThisType(RD, CD)); |
567 | 0 | if (CT == Ctor_CopyingClosure) |
568 | 0 | ArgTys.push_back(*FTP->param_type_begin()); |
569 | 0 | if (RD->getNumVBases() > 0) |
570 | 0 | ArgTys.push_back(Context.IntTy); |
571 | 0 | CallingConv CC = Context.getDefaultCallingConvention( |
572 | 0 | /*IsVariadic=*/false, /*IsCXXMethod=*/true); |
573 | 0 | return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::IsInstanceMethod, |
574 | 0 | ArgTys, FunctionType::ExtInfo(CC), {}, |
575 | 0 | RequiredArgs::All); |
576 | 0 | } |
577 | | |
578 | | /// Arrange a call as unto a free function, except possibly with an |
579 | | /// additional number of formal parameters considered required. |
580 | | static const CGFunctionInfo & |
581 | | arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, |
582 | | CodeGenModule &CGM, |
583 | | const CallArgList &args, |
584 | | const FunctionType *fnType, |
585 | | unsigned numExtraRequiredArgs, |
586 | 0 | bool chainCall) { |
587 | 0 | assert(args.size() >= numExtraRequiredArgs); |
588 | | |
589 | 0 | llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
590 | | |
591 | | // In most cases, there are no optional arguments. |
592 | 0 | RequiredArgs required = RequiredArgs::All; |
593 | | |
594 | | // If we have a variadic prototype, the required arguments are the |
595 | | // extra prefix plus the arguments in the prototype. |
596 | 0 | if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { |
597 | 0 | if (proto->isVariadic()) |
598 | 0 | required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); |
599 | |
|
600 | 0 | if (proto->hasExtParameterInfos()) |
601 | 0 | addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, |
602 | 0 | args.size()); |
603 | | |
604 | | // If we don't have a prototype at all, but we're supposed to |
605 | | // explicitly use the variadic convention for unprototyped calls, |
606 | | // treat all of the arguments as required but preserve the nominal |
607 | | // possibility of variadics. |
608 | 0 | } else if (CGM.getTargetCodeGenInfo() |
609 | 0 | .isNoProtoCallVariadic(args, |
610 | 0 | cast<FunctionNoProtoType>(fnType))) { |
611 | 0 | required = RequiredArgs(args.size()); |
612 | 0 | } |
613 | | |
614 | | // FIXME: Kill copy. |
615 | 0 | SmallVector<CanQualType, 16> argTypes; |
616 | 0 | for (const auto &arg : args) |
617 | 0 | argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); |
618 | 0 | FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None; |
619 | 0 | return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), |
620 | 0 | opts, argTypes, fnType->getExtInfo(), |
621 | 0 | paramInfos, required); |
622 | 0 | } |
623 | | |
624 | | /// Figure out the rules for calling a function with the given formal |
625 | | /// type using the given arguments. The arguments are necessary |
626 | | /// because the function might be unprototyped, in which case it's |
627 | | /// target-dependent in crazy ways. |
628 | | const CGFunctionInfo & |
629 | | CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, |
630 | | const FunctionType *fnType, |
631 | 0 | bool chainCall) { |
632 | 0 | return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, |
633 | 0 | chainCall ? 1 : 0, chainCall); |
634 | 0 | } |
635 | | |
636 | | /// A block function is essentially a free function with an |
637 | | /// extra implicit argument. |
638 | | const CGFunctionInfo & |
639 | | CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, |
640 | 0 | const FunctionType *fnType) { |
641 | 0 | return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, |
642 | 0 | /*chainCall=*/false); |
643 | 0 | } |
644 | | |
645 | | const CGFunctionInfo & |
646 | | CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, |
647 | 0 | const FunctionArgList ¶ms) { |
648 | 0 | auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); |
649 | 0 | auto argTypes = getArgTypesForDeclaration(Context, params); |
650 | |
|
651 | 0 | return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), |
652 | 0 | FnInfoOpts::None, argTypes, |
653 | 0 | proto->getExtInfo(), paramInfos, |
654 | 0 | RequiredArgs::forPrototypePlus(proto, 1)); |
655 | 0 | } |
656 | | |
657 | | const CGFunctionInfo & |
658 | | CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, |
659 | 0 | const CallArgList &args) { |
660 | | // FIXME: Kill copy. |
661 | 0 | SmallVector<CanQualType, 16> argTypes; |
662 | 0 | for (const auto &Arg : args) |
663 | 0 | argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); |
664 | 0 | return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None, |
665 | 0 | argTypes, FunctionType::ExtInfo(), |
666 | 0 | /*paramInfos=*/{}, RequiredArgs::All); |
667 | 0 | } |
668 | | |
669 | | const CGFunctionInfo & |
670 | | CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, |
671 | 0 | const FunctionArgList &args) { |
672 | 0 | auto argTypes = getArgTypesForDeclaration(Context, args); |
673 | |
|
674 | 0 | return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None, |
675 | 0 | argTypes, FunctionType::ExtInfo(), {}, |
676 | 0 | RequiredArgs::All); |
677 | 0 | } |
678 | | |
679 | | const CGFunctionInfo & |
680 | | CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, |
681 | 0 | ArrayRef<CanQualType> argTypes) { |
682 | 0 | return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::None, argTypes, |
683 | 0 | FunctionType::ExtInfo(), {}, |
684 | 0 | RequiredArgs::All); |
685 | 0 | } |
686 | | |
687 | | /// Arrange a call to a C++ method, passing the given arguments. |
688 | | /// |
689 | | /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It |
690 | | /// does not count `this`. |
691 | | const CGFunctionInfo & |
692 | | CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, |
693 | | const FunctionProtoType *proto, |
694 | | RequiredArgs required, |
695 | 0 | unsigned numPrefixArgs) { |
696 | 0 | assert(numPrefixArgs + 1 <= args.size() && |
697 | 0 | "Emitting a call with less args than the required prefix?"); |
698 | | // Add one to account for `this`. It's a bit awkward here, but we don't count |
699 | | // `this` in similar places elsewhere. |
700 | 0 | auto paramInfos = |
701 | 0 | getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); |
702 | | |
703 | | // FIXME: Kill copy. |
704 | 0 | auto argTypes = getArgTypesForCall(Context, args); |
705 | |
|
706 | 0 | FunctionType::ExtInfo info = proto->getExtInfo(); |
707 | 0 | return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), |
708 | 0 | FnInfoOpts::IsInstanceMethod, argTypes, info, |
709 | 0 | paramInfos, required); |
710 | 0 | } |
711 | | |
712 | 0 | const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { |
713 | 0 | return arrangeLLVMFunctionInfo(getContext().VoidTy, FnInfoOpts::None, |
714 | 0 | std::nullopt, FunctionType::ExtInfo(), {}, |
715 | 0 | RequiredArgs::All); |
716 | 0 | } |
717 | | |
718 | | const CGFunctionInfo & |
719 | | CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, |
720 | 0 | const CallArgList &args) { |
721 | 0 | assert(signature.arg_size() <= args.size()); |
722 | 0 | if (signature.arg_size() == args.size()) |
723 | 0 | return signature; |
724 | | |
725 | 0 | SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; |
726 | 0 | auto sigParamInfos = signature.getExtParameterInfos(); |
727 | 0 | if (!sigParamInfos.empty()) { |
728 | 0 | paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); |
729 | 0 | paramInfos.resize(args.size()); |
730 | 0 | } |
731 | |
|
732 | 0 | auto argTypes = getArgTypesForCall(Context, args); |
733 | |
|
734 | 0 | assert(signature.getRequiredArgs().allowsOptionalArgs()); |
735 | 0 | FnInfoOpts opts = FnInfoOpts::None; |
736 | 0 | if (signature.isInstanceMethod()) |
737 | 0 | opts |= FnInfoOpts::IsInstanceMethod; |
738 | 0 | if (signature.isChainCall()) |
739 | 0 | opts |= FnInfoOpts::IsChainCall; |
740 | 0 | if (signature.isDelegateCall()) |
741 | 0 | opts |= FnInfoOpts::IsDelegateCall; |
742 | 0 | return arrangeLLVMFunctionInfo(signature.getReturnType(), opts, argTypes, |
743 | 0 | signature.getExtInfo(), paramInfos, |
744 | 0 | signature.getRequiredArgs()); |
745 | 0 | } |
746 | | |
747 | | namespace clang { |
748 | | namespace CodeGen { |
749 | | void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); |
750 | | } |
751 | | } |
752 | | |
753 | | /// Arrange the argument and result information for an abstract value |
754 | | /// of a given function type. This is the method which all of the |
755 | | /// above functions ultimately defer to. |
756 | | const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo( |
757 | | CanQualType resultType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes, |
758 | | FunctionType::ExtInfo info, |
759 | | ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, |
760 | 0 | RequiredArgs required) { |
761 | 0 | assert(llvm::all_of(argTypes, |
762 | 0 | [](CanQualType T) { return T.isCanonicalAsParam(); })); |
763 | | |
764 | | // Lookup or create unique function info. |
765 | 0 | llvm::FoldingSetNodeID ID; |
766 | 0 | bool isInstanceMethod = |
767 | 0 | (opts & FnInfoOpts::IsInstanceMethod) == FnInfoOpts::IsInstanceMethod; |
768 | 0 | bool isChainCall = |
769 | 0 | (opts & FnInfoOpts::IsChainCall) == FnInfoOpts::IsChainCall; |
770 | 0 | bool isDelegateCall = |
771 | 0 | (opts & FnInfoOpts::IsDelegateCall) == FnInfoOpts::IsDelegateCall; |
772 | 0 | CGFunctionInfo::Profile(ID, isInstanceMethod, isChainCall, isDelegateCall, |
773 | 0 | info, paramInfos, required, resultType, argTypes); |
774 | |
|
775 | 0 | void *insertPos = nullptr; |
776 | 0 | CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); |
777 | 0 | if (FI) |
778 | 0 | return *FI; |
779 | | |
780 | 0 | unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); |
781 | | |
782 | | // Construct the function info. We co-allocate the ArgInfos. |
783 | 0 | FI = CGFunctionInfo::create(CC, isInstanceMethod, isChainCall, isDelegateCall, |
784 | 0 | info, paramInfos, resultType, argTypes, required); |
785 | 0 | FunctionInfos.InsertNode(FI, insertPos); |
786 | |
|
787 | 0 | bool inserted = FunctionsBeingProcessed.insert(FI).second; |
788 | 0 | (void)inserted; |
789 | 0 | assert(inserted && "Recursively being processed?"); |
790 | | |
791 | | // Compute ABI information. |
792 | 0 | if (CC == llvm::CallingConv::SPIR_KERNEL) { |
793 | | // Force target independent argument handling for the host visible |
794 | | // kernel functions. |
795 | 0 | computeSPIRKernelABIInfo(CGM, *FI); |
796 | 0 | } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { |
797 | 0 | swiftcall::computeABIInfo(CGM, *FI); |
798 | 0 | } else { |
799 | 0 | getABIInfo().computeInfo(*FI); |
800 | 0 | } |
801 | | |
802 | | // Loop over all of the computed argument and return value info. If any of |
803 | | // them are direct or extend without a specified coerce type, specify the |
804 | | // default now. |
805 | 0 | ABIArgInfo &retInfo = FI->getReturnInfo(); |
806 | 0 | if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) |
807 | 0 | retInfo.setCoerceToType(ConvertType(FI->getReturnType())); |
808 | |
|
809 | 0 | for (auto &I : FI->arguments()) |
810 | 0 | if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) |
811 | 0 | I.info.setCoerceToType(ConvertType(I.type)); |
812 | |
|
813 | 0 | bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; |
814 | 0 | assert(erased && "Not in set?"); |
815 | | |
816 | 0 | return *FI; |
817 | 0 | } |
818 | | |
819 | | CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod, |
820 | | bool chainCall, bool delegateCall, |
821 | | const FunctionType::ExtInfo &info, |
822 | | ArrayRef<ExtParameterInfo> paramInfos, |
823 | | CanQualType resultType, |
824 | | ArrayRef<CanQualType> argTypes, |
825 | 0 | RequiredArgs required) { |
826 | 0 | assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); |
827 | 0 | assert(!required.allowsOptionalArgs() || |
828 | 0 | required.getNumRequiredArgs() <= argTypes.size()); |
829 | | |
830 | 0 | void *buffer = |
831 | 0 | operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( |
832 | 0 | argTypes.size() + 1, paramInfos.size())); |
833 | |
|
834 | 0 | CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); |
835 | 0 | FI->CallingConvention = llvmCC; |
836 | 0 | FI->EffectiveCallingConvention = llvmCC; |
837 | 0 | FI->ASTCallingConvention = info.getCC(); |
838 | 0 | FI->InstanceMethod = instanceMethod; |
839 | 0 | FI->ChainCall = chainCall; |
840 | 0 | FI->DelegateCall = delegateCall; |
841 | 0 | FI->CmseNSCall = info.getCmseNSCall(); |
842 | 0 | FI->NoReturn = info.getNoReturn(); |
843 | 0 | FI->ReturnsRetained = info.getProducesResult(); |
844 | 0 | FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); |
845 | 0 | FI->NoCfCheck = info.getNoCfCheck(); |
846 | 0 | FI->Required = required; |
847 | 0 | FI->HasRegParm = info.getHasRegParm(); |
848 | 0 | FI->RegParm = info.getRegParm(); |
849 | 0 | FI->ArgStruct = nullptr; |
850 | 0 | FI->ArgStructAlign = 0; |
851 | 0 | FI->NumArgs = argTypes.size(); |
852 | 0 | FI->HasExtParameterInfos = !paramInfos.empty(); |
853 | 0 | FI->getArgsBuffer()[0].type = resultType; |
854 | 0 | FI->MaxVectorWidth = 0; |
855 | 0 | for (unsigned i = 0, e = argTypes.size(); i != e; ++i) |
856 | 0 | FI->getArgsBuffer()[i + 1].type = argTypes[i]; |
857 | 0 | for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) |
858 | 0 | FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; |
859 | 0 | return FI; |
860 | 0 | } |
861 | | |
862 | | /***/ |
863 | | |
864 | | namespace { |
865 | | // ABIArgInfo::Expand implementation. |
866 | | |
867 | | // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. |
868 | | struct TypeExpansion { |
869 | | enum TypeExpansionKind { |
870 | | // Elements of constant arrays are expanded recursively. |
871 | | TEK_ConstantArray, |
872 | | // Record fields are expanded recursively (but if record is a union, only |
873 | | // the field with the largest size is expanded). |
874 | | TEK_Record, |
875 | | // For complex types, real and imaginary parts are expanded recursively. |
876 | | TEK_Complex, |
877 | | // All other types are not expandable. |
878 | | TEK_None |
879 | | }; |
880 | | |
881 | | const TypeExpansionKind Kind; |
882 | | |
883 | 0 | TypeExpansion(TypeExpansionKind K) : Kind(K) {} |
884 | 0 | virtual ~TypeExpansion() {} |
885 | | }; |
886 | | |
887 | | struct ConstantArrayExpansion : TypeExpansion { |
888 | | QualType EltTy; |
889 | | uint64_t NumElts; |
890 | | |
891 | | ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) |
892 | 0 | : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} |
893 | 0 | static bool classof(const TypeExpansion *TE) { |
894 | 0 | return TE->Kind == TEK_ConstantArray; |
895 | 0 | } |
896 | | }; |
897 | | |
898 | | struct RecordExpansion : TypeExpansion { |
899 | | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
900 | | |
901 | | SmallVector<const FieldDecl *, 1> Fields; |
902 | | |
903 | | RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, |
904 | | SmallVector<const FieldDecl *, 1> &&Fields) |
905 | | : TypeExpansion(TEK_Record), Bases(std::move(Bases)), |
906 | 0 | Fields(std::move(Fields)) {} |
907 | 0 | static bool classof(const TypeExpansion *TE) { |
908 | 0 | return TE->Kind == TEK_Record; |
909 | 0 | } |
910 | | }; |
911 | | |
912 | | struct ComplexExpansion : TypeExpansion { |
913 | | QualType EltTy; |
914 | | |
915 | 0 | ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} |
916 | 0 | static bool classof(const TypeExpansion *TE) { |
917 | 0 | return TE->Kind == TEK_Complex; |
918 | 0 | } |
919 | | }; |
920 | | |
921 | | struct NoExpansion : TypeExpansion { |
922 | 0 | NoExpansion() : TypeExpansion(TEK_None) {} |
923 | 0 | static bool classof(const TypeExpansion *TE) { |
924 | 0 | return TE->Kind == TEK_None; |
925 | 0 | } |
926 | | }; |
927 | | } // namespace |
928 | | |
929 | | static std::unique_ptr<TypeExpansion> |
930 | 0 | getTypeExpansion(QualType Ty, const ASTContext &Context) { |
931 | 0 | if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { |
932 | 0 | return std::make_unique<ConstantArrayExpansion>( |
933 | 0 | AT->getElementType(), AT->getSize().getZExtValue()); |
934 | 0 | } |
935 | 0 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
936 | 0 | SmallVector<const CXXBaseSpecifier *, 1> Bases; |
937 | 0 | SmallVector<const FieldDecl *, 1> Fields; |
938 | 0 | const RecordDecl *RD = RT->getDecl(); |
939 | 0 | assert(!RD->hasFlexibleArrayMember() && |
940 | 0 | "Cannot expand structure with flexible array."); |
941 | 0 | if (RD->isUnion()) { |
942 | | // Unions can be here only in degenerative cases - all the fields are same |
943 | | // after flattening. Thus we have to use the "largest" field. |
944 | 0 | const FieldDecl *LargestFD = nullptr; |
945 | 0 | CharUnits UnionSize = CharUnits::Zero(); |
946 | |
|
947 | 0 | for (const auto *FD : RD->fields()) { |
948 | 0 | if (FD->isZeroLengthBitField(Context)) |
949 | 0 | continue; |
950 | 0 | assert(!FD->isBitField() && |
951 | 0 | "Cannot expand structure with bit-field members."); |
952 | 0 | CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); |
953 | 0 | if (UnionSize < FieldSize) { |
954 | 0 | UnionSize = FieldSize; |
955 | 0 | LargestFD = FD; |
956 | 0 | } |
957 | 0 | } |
958 | 0 | if (LargestFD) |
959 | 0 | Fields.push_back(LargestFD); |
960 | 0 | } else { |
961 | 0 | if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { |
962 | 0 | assert(!CXXRD->isDynamicClass() && |
963 | 0 | "cannot expand vtable pointers in dynamic classes"); |
964 | 0 | llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases())); |
965 | 0 | } |
966 | | |
967 | 0 | for (const auto *FD : RD->fields()) { |
968 | 0 | if (FD->isZeroLengthBitField(Context)) |
969 | 0 | continue; |
970 | 0 | assert(!FD->isBitField() && |
971 | 0 | "Cannot expand structure with bit-field members."); |
972 | 0 | Fields.push_back(FD); |
973 | 0 | } |
974 | 0 | } |
975 | 0 | return std::make_unique<RecordExpansion>(std::move(Bases), |
976 | 0 | std::move(Fields)); |
977 | 0 | } |
978 | 0 | if (const ComplexType *CT = Ty->getAs<ComplexType>()) { |
979 | 0 | return std::make_unique<ComplexExpansion>(CT->getElementType()); |
980 | 0 | } |
981 | 0 | return std::make_unique<NoExpansion>(); |
982 | 0 | } |
983 | | |
984 | 0 | static int getExpansionSize(QualType Ty, const ASTContext &Context) { |
985 | 0 | auto Exp = getTypeExpansion(Ty, Context); |
986 | 0 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
987 | 0 | return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); |
988 | 0 | } |
989 | 0 | if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
990 | 0 | int Res = 0; |
991 | 0 | for (auto BS : RExp->Bases) |
992 | 0 | Res += getExpansionSize(BS->getType(), Context); |
993 | 0 | for (auto FD : RExp->Fields) |
994 | 0 | Res += getExpansionSize(FD->getType(), Context); |
995 | 0 | return Res; |
996 | 0 | } |
997 | 0 | if (isa<ComplexExpansion>(Exp.get())) |
998 | 0 | return 2; |
999 | 0 | assert(isa<NoExpansion>(Exp.get())); |
1000 | 0 | return 1; |
1001 | 0 | } |
1002 | | |
1003 | | void |
1004 | | CodeGenTypes::getExpandedTypes(QualType Ty, |
1005 | 0 | SmallVectorImpl<llvm::Type *>::iterator &TI) { |
1006 | 0 | auto Exp = getTypeExpansion(Ty, Context); |
1007 | 0 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
1008 | 0 | for (int i = 0, n = CAExp->NumElts; i < n; i++) { |
1009 | 0 | getExpandedTypes(CAExp->EltTy, TI); |
1010 | 0 | } |
1011 | 0 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
1012 | 0 | for (auto BS : RExp->Bases) |
1013 | 0 | getExpandedTypes(BS->getType(), TI); |
1014 | 0 | for (auto FD : RExp->Fields) |
1015 | 0 | getExpandedTypes(FD->getType(), TI); |
1016 | 0 | } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { |
1017 | 0 | llvm::Type *EltTy = ConvertType(CExp->EltTy); |
1018 | 0 | *TI++ = EltTy; |
1019 | 0 | *TI++ = EltTy; |
1020 | 0 | } else { |
1021 | 0 | assert(isa<NoExpansion>(Exp.get())); |
1022 | 0 | *TI++ = ConvertType(Ty); |
1023 | 0 | } |
1024 | 0 | } |
1025 | | |
1026 | | static void forConstantArrayExpansion(CodeGenFunction &CGF, |
1027 | | ConstantArrayExpansion *CAE, |
1028 | | Address BaseAddr, |
1029 | 0 | llvm::function_ref<void(Address)> Fn) { |
1030 | 0 | CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); |
1031 | 0 | CharUnits EltAlign = |
1032 | 0 | BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); |
1033 | 0 | llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy); |
1034 | |
|
1035 | 0 | for (int i = 0, n = CAE->NumElts; i < n; i++) { |
1036 | 0 | llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32( |
1037 | 0 | BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i); |
1038 | 0 | Fn(Address(EltAddr, EltTy, EltAlign)); |
1039 | 0 | } |
1040 | 0 | } |
1041 | | |
1042 | | void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, |
1043 | 0 | llvm::Function::arg_iterator &AI) { |
1044 | 0 | assert(LV.isSimple() && |
1045 | 0 | "Unexpected non-simple lvalue during struct expansion."); |
1046 | | |
1047 | 0 | auto Exp = getTypeExpansion(Ty, getContext()); |
1048 | 0 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
1049 | 0 | forConstantArrayExpansion( |
1050 | 0 | *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { |
1051 | 0 | LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); |
1052 | 0 | ExpandTypeFromArgs(CAExp->EltTy, LV, AI); |
1053 | 0 | }); |
1054 | 0 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
1055 | 0 | Address This = LV.getAddress(*this); |
1056 | 0 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1057 | | // Perform a single step derived-to-base conversion. |
1058 | 0 | Address Base = |
1059 | 0 | GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, |
1060 | 0 | /*NullCheckValue=*/false, SourceLocation()); |
1061 | 0 | LValue SubLV = MakeAddrLValue(Base, BS->getType()); |
1062 | | |
1063 | | // Recurse onto bases. |
1064 | 0 | ExpandTypeFromArgs(BS->getType(), SubLV, AI); |
1065 | 0 | } |
1066 | 0 | for (auto FD : RExp->Fields) { |
1067 | | // FIXME: What are the right qualifiers here? |
1068 | 0 | LValue SubLV = EmitLValueForFieldInitialization(LV, FD); |
1069 | 0 | ExpandTypeFromArgs(FD->getType(), SubLV, AI); |
1070 | 0 | } |
1071 | 0 | } else if (isa<ComplexExpansion>(Exp.get())) { |
1072 | 0 | auto realValue = &*AI++; |
1073 | 0 | auto imagValue = &*AI++; |
1074 | 0 | EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); |
1075 | 0 | } else { |
1076 | | // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a |
1077 | | // primitive store. |
1078 | 0 | assert(isa<NoExpansion>(Exp.get())); |
1079 | 0 | llvm::Value *Arg = &*AI++; |
1080 | 0 | if (LV.isBitField()) { |
1081 | 0 | EmitStoreThroughLValue(RValue::get(Arg), LV); |
1082 | 0 | } else { |
1083 | | // TODO: currently there are some places are inconsistent in what LLVM |
1084 | | // pointer type they use (see D118744). Once clang uses opaque pointers |
1085 | | // all LLVM pointer types will be the same and we can remove this check. |
1086 | 0 | if (Arg->getType()->isPointerTy()) { |
1087 | 0 | Address Addr = LV.getAddress(*this); |
1088 | 0 | Arg = Builder.CreateBitCast(Arg, Addr.getElementType()); |
1089 | 0 | } |
1090 | 0 | EmitStoreOfScalar(Arg, LV); |
1091 | 0 | } |
1092 | 0 | } |
1093 | 0 | } |
1094 | | |
1095 | | void CodeGenFunction::ExpandTypeToArgs( |
1096 | | QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, |
1097 | 0 | SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { |
1098 | 0 | auto Exp = getTypeExpansion(Ty, getContext()); |
1099 | 0 | if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { |
1100 | 0 | Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) |
1101 | 0 | : Arg.getKnownRValue().getAggregateAddress(); |
1102 | 0 | forConstantArrayExpansion( |
1103 | 0 | *this, CAExp, Addr, [&](Address EltAddr) { |
1104 | 0 | CallArg EltArg = CallArg( |
1105 | 0 | convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), |
1106 | 0 | CAExp->EltTy); |
1107 | 0 | ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, |
1108 | 0 | IRCallArgPos); |
1109 | 0 | }); |
1110 | 0 | } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { |
1111 | 0 | Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) |
1112 | 0 | : Arg.getKnownRValue().getAggregateAddress(); |
1113 | 0 | for (const CXXBaseSpecifier *BS : RExp->Bases) { |
1114 | | // Perform a single step derived-to-base conversion. |
1115 | 0 | Address Base = |
1116 | 0 | GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, |
1117 | 0 | /*NullCheckValue=*/false, SourceLocation()); |
1118 | 0 | CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); |
1119 | | |
1120 | | // Recurse onto bases. |
1121 | 0 | ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, |
1122 | 0 | IRCallArgPos); |
1123 | 0 | } |
1124 | |
|
1125 | 0 | LValue LV = MakeAddrLValue(This, Ty); |
1126 | 0 | for (auto FD : RExp->Fields) { |
1127 | 0 | CallArg FldArg = |
1128 | 0 | CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); |
1129 | 0 | ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, |
1130 | 0 | IRCallArgPos); |
1131 | 0 | } |
1132 | 0 | } else if (isa<ComplexExpansion>(Exp.get())) { |
1133 | 0 | ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); |
1134 | 0 | IRCallArgs[IRCallArgPos++] = CV.first; |
1135 | 0 | IRCallArgs[IRCallArgPos++] = CV.second; |
1136 | 0 | } else { |
1137 | 0 | assert(isa<NoExpansion>(Exp.get())); |
1138 | 0 | auto RV = Arg.getKnownRValue(); |
1139 | 0 | assert(RV.isScalar() && |
1140 | 0 | "Unexpected non-scalar rvalue during struct expansion."); |
1141 | | |
1142 | | // Insert a bitcast as needed. |
1143 | 0 | llvm::Value *V = RV.getScalarVal(); |
1144 | 0 | if (IRCallArgPos < IRFuncTy->getNumParams() && |
1145 | 0 | V->getType() != IRFuncTy->getParamType(IRCallArgPos)) |
1146 | 0 | V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); |
1147 | |
|
1148 | 0 | IRCallArgs[IRCallArgPos++] = V; |
1149 | 0 | } |
1150 | 0 | } |
1151 | | |
1152 | | /// Create a temporary allocation for the purposes of coercion. |
1153 | | static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, |
1154 | | CharUnits MinAlign, |
1155 | 0 | const Twine &Name = "tmp") { |
1156 | | // Don't use an alignment that's worse than what LLVM would prefer. |
1157 | 0 | auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty); |
1158 | 0 | CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); |
1159 | |
|
1160 | 0 | return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); |
1161 | 0 | } |
1162 | | |
1163 | | /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are |
1164 | | /// accessing some number of bytes out of it, try to gep into the struct to get |
1165 | | /// at its inner goodness. Dive as deep as possible without entering an element |
1166 | | /// with an in-memory size smaller than DstSize. |
1167 | | static Address |
1168 | | EnterStructPointerForCoercedAccess(Address SrcPtr, |
1169 | | llvm::StructType *SrcSTy, |
1170 | 0 | uint64_t DstSize, CodeGenFunction &CGF) { |
1171 | | // We can't dive into a zero-element struct. |
1172 | 0 | if (SrcSTy->getNumElements() == 0) return SrcPtr; |
1173 | | |
1174 | 0 | llvm::Type *FirstElt = SrcSTy->getElementType(0); |
1175 | | |
1176 | | // If the first elt is at least as large as what we're looking for, or if the |
1177 | | // first element is the same size as the whole struct, we can enter it. The |
1178 | | // comparison must be made on the store size and not the alloca size. Using |
1179 | | // the alloca size may overstate the size of the load. |
1180 | 0 | uint64_t FirstEltSize = |
1181 | 0 | CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); |
1182 | 0 | if (FirstEltSize < DstSize && |
1183 | 0 | FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) |
1184 | 0 | return SrcPtr; |
1185 | | |
1186 | | // GEP into the first element. |
1187 | 0 | SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); |
1188 | | |
1189 | | // If the first element is a struct, recurse. |
1190 | 0 | llvm::Type *SrcTy = SrcPtr.getElementType(); |
1191 | 0 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) |
1192 | 0 | return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); |
1193 | | |
1194 | 0 | return SrcPtr; |
1195 | 0 | } |
1196 | | |
1197 | | /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both |
1198 | | /// are either integers or pointers. This does a truncation of the value if it |
1199 | | /// is too large or a zero extension if it is too small. |
1200 | | /// |
1201 | | /// This behaves as if the value were coerced through memory, so on big-endian |
1202 | | /// targets the high bits are preserved in a truncation, while little-endian |
1203 | | /// targets preserve the low bits. |
1204 | | static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, |
1205 | | llvm::Type *Ty, |
1206 | 0 | CodeGenFunction &CGF) { |
1207 | 0 | if (Val->getType() == Ty) |
1208 | 0 | return Val; |
1209 | | |
1210 | 0 | if (isa<llvm::PointerType>(Val->getType())) { |
1211 | | // If this is Pointer->Pointer avoid conversion to and from int. |
1212 | 0 | if (isa<llvm::PointerType>(Ty)) |
1213 | 0 | return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); |
1214 | | |
1215 | | // Convert the pointer to an integer so we can play with its width. |
1216 | 0 | Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); |
1217 | 0 | } |
1218 | | |
1219 | 0 | llvm::Type *DestIntTy = Ty; |
1220 | 0 | if (isa<llvm::PointerType>(DestIntTy)) |
1221 | 0 | DestIntTy = CGF.IntPtrTy; |
1222 | |
|
1223 | 0 | if (Val->getType() != DestIntTy) { |
1224 | 0 | const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); |
1225 | 0 | if (DL.isBigEndian()) { |
1226 | | // Preserve the high bits on big-endian targets. |
1227 | | // That is what memory coercion does. |
1228 | 0 | uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); |
1229 | 0 | uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); |
1230 | |
|
1231 | 0 | if (SrcSize > DstSize) { |
1232 | 0 | Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); |
1233 | 0 | Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); |
1234 | 0 | } else { |
1235 | 0 | Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); |
1236 | 0 | Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); |
1237 | 0 | } |
1238 | 0 | } else { |
1239 | | // Little-endian targets preserve the low bits. No shifts required. |
1240 | 0 | Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); |
1241 | 0 | } |
1242 | 0 | } |
1243 | |
|
1244 | 0 | if (isa<llvm::PointerType>(Ty)) |
1245 | 0 | Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); |
1246 | 0 | return Val; |
1247 | 0 | } |
1248 | | |
1249 | | |
1250 | | |
1251 | | /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as |
1252 | | /// a pointer to an object of type \arg Ty, known to be aligned to |
1253 | | /// \arg SrcAlign bytes. |
1254 | | /// |
1255 | | /// This safely handles the case when the src type is smaller than the |
1256 | | /// destination type; in this situation the values of bits which not |
1257 | | /// present in the src are undefined. |
1258 | | static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, |
1259 | 0 | CodeGenFunction &CGF) { |
1260 | 0 | llvm::Type *SrcTy = Src.getElementType(); |
1261 | | |
1262 | | // If SrcTy and Ty are the same, just do a load. |
1263 | 0 | if (SrcTy == Ty) |
1264 | 0 | return CGF.Builder.CreateLoad(Src); |
1265 | | |
1266 | 0 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); |
1267 | |
|
1268 | 0 | if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { |
1269 | 0 | Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, |
1270 | 0 | DstSize.getFixedValue(), CGF); |
1271 | 0 | SrcTy = Src.getElementType(); |
1272 | 0 | } |
1273 | |
|
1274 | 0 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); |
1275 | | |
1276 | | // If the source and destination are integer or pointer types, just do an |
1277 | | // extension or truncation to the desired type. |
1278 | 0 | if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && |
1279 | 0 | (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { |
1280 | 0 | llvm::Value *Load = CGF.Builder.CreateLoad(Src); |
1281 | 0 | return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); |
1282 | 0 | } |
1283 | | |
1284 | | // If load is legal, just bitcast the src pointer. |
1285 | 0 | if (!SrcSize.isScalable() && !DstSize.isScalable() && |
1286 | 0 | SrcSize.getFixedValue() >= DstSize.getFixedValue()) { |
1287 | | // Generally SrcSize is never greater than DstSize, since this means we are |
1288 | | // losing bits. However, this can happen in cases where the structure has |
1289 | | // additional padding, for example due to a user specified alignment. |
1290 | | // |
1291 | | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1292 | | // to that information. |
1293 | 0 | Src = Src.withElementType(Ty); |
1294 | 0 | return CGF.Builder.CreateLoad(Src); |
1295 | 0 | } |
1296 | | |
1297 | | // If coercing a fixed vector to a scalable vector for ABI compatibility, and |
1298 | | // the types match, use the llvm.vector.insert intrinsic to perform the |
1299 | | // conversion. |
1300 | 0 | if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { |
1301 | 0 | if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { |
1302 | | // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate |
1303 | | // vector, use a vector insert and bitcast the result. |
1304 | 0 | bool NeedsBitcast = false; |
1305 | 0 | auto PredType = |
1306 | 0 | llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16); |
1307 | 0 | llvm::Type *OrigType = Ty; |
1308 | 0 | if (ScalableDst == PredType && |
1309 | 0 | FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) { |
1310 | 0 | ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2); |
1311 | 0 | NeedsBitcast = true; |
1312 | 0 | } |
1313 | 0 | if (ScalableDst->getElementType() == FixedSrc->getElementType()) { |
1314 | 0 | auto *Load = CGF.Builder.CreateLoad(Src); |
1315 | 0 | auto *UndefVec = llvm::UndefValue::get(ScalableDst); |
1316 | 0 | auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); |
1317 | 0 | llvm::Value *Result = CGF.Builder.CreateInsertVector( |
1318 | 0 | ScalableDst, UndefVec, Load, Zero, "cast.scalable"); |
1319 | 0 | if (NeedsBitcast) |
1320 | 0 | Result = CGF.Builder.CreateBitCast(Result, OrigType); |
1321 | 0 | return Result; |
1322 | 0 | } |
1323 | 0 | } |
1324 | 0 | } |
1325 | | |
1326 | | // Otherwise do coercion through memory. This is stupid, but simple. |
1327 | 0 | Address Tmp = |
1328 | 0 | CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); |
1329 | 0 | CGF.Builder.CreateMemCpy( |
1330 | 0 | Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), |
1331 | 0 | Src.getAlignment().getAsAlign(), |
1332 | 0 | llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue())); |
1333 | 0 | return CGF.Builder.CreateLoad(Tmp); |
1334 | 0 | } |
1335 | | |
1336 | | // Function to store a first-class aggregate into memory. We prefer to |
1337 | | // store the elements rather than the aggregate to be more friendly to |
1338 | | // fast-isel. |
1339 | | // FIXME: Do we need to recurse here? |
1340 | | void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, |
1341 | 0 | bool DestIsVolatile) { |
1342 | | // Prefer scalar stores to first-class aggregate stores. |
1343 | 0 | if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { |
1344 | 0 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
1345 | 0 | Address EltPtr = Builder.CreateStructGEP(Dest, i); |
1346 | 0 | llvm::Value *Elt = Builder.CreateExtractValue(Val, i); |
1347 | 0 | Builder.CreateStore(Elt, EltPtr, DestIsVolatile); |
1348 | 0 | } |
1349 | 0 | } else { |
1350 | 0 | Builder.CreateStore(Val, Dest, DestIsVolatile); |
1351 | 0 | } |
1352 | 0 | } |
1353 | | |
1354 | | /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, |
1355 | | /// where the source and destination may have different types. The |
1356 | | /// destination is known to be aligned to \arg DstAlign bytes. |
1357 | | /// |
1358 | | /// This safely handles the case when the src type is larger than the |
1359 | | /// destination type; the upper bits of the src will be lost. |
1360 | | static void CreateCoercedStore(llvm::Value *Src, |
1361 | | Address Dst, |
1362 | | bool DstIsVolatile, |
1363 | 0 | CodeGenFunction &CGF) { |
1364 | 0 | llvm::Type *SrcTy = Src->getType(); |
1365 | 0 | llvm::Type *DstTy = Dst.getElementType(); |
1366 | 0 | if (SrcTy == DstTy) { |
1367 | 0 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); |
1368 | 0 | return; |
1369 | 0 | } |
1370 | | |
1371 | 0 | llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); |
1372 | |
|
1373 | 0 | if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { |
1374 | 0 | Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, |
1375 | 0 | SrcSize.getFixedValue(), CGF); |
1376 | 0 | DstTy = Dst.getElementType(); |
1377 | 0 | } |
1378 | |
|
1379 | 0 | llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); |
1380 | 0 | llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); |
1381 | 0 | if (SrcPtrTy && DstPtrTy && |
1382 | 0 | SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { |
1383 | 0 | Src = CGF.Builder.CreateAddrSpaceCast(Src, DstTy); |
1384 | 0 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); |
1385 | 0 | return; |
1386 | 0 | } |
1387 | | |
1388 | | // If the source and destination are integer or pointer types, just do an |
1389 | | // extension or truncation to the desired type. |
1390 | 0 | if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && |
1391 | 0 | (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { |
1392 | 0 | Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); |
1393 | 0 | CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); |
1394 | 0 | return; |
1395 | 0 | } |
1396 | | |
1397 | 0 | llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); |
1398 | | |
1399 | | // If store is legal, just bitcast the src pointer. |
1400 | 0 | if (isa<llvm::ScalableVectorType>(SrcTy) || |
1401 | 0 | isa<llvm::ScalableVectorType>(DstTy) || |
1402 | 0 | SrcSize.getFixedValue() <= DstSize.getFixedValue()) { |
1403 | 0 | Dst = Dst.withElementType(SrcTy); |
1404 | 0 | CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); |
1405 | 0 | } else { |
1406 | | // Otherwise do coercion through memory. This is stupid, but |
1407 | | // simple. |
1408 | | |
1409 | | // Generally SrcSize is never greater than DstSize, since this means we are |
1410 | | // losing bits. However, this can happen in cases where the structure has |
1411 | | // additional padding, for example due to a user specified alignment. |
1412 | | // |
1413 | | // FIXME: Assert that we aren't truncating non-padding bits when have access |
1414 | | // to that information. |
1415 | 0 | Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); |
1416 | 0 | CGF.Builder.CreateStore(Src, Tmp); |
1417 | 0 | CGF.Builder.CreateMemCpy( |
1418 | 0 | Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), |
1419 | 0 | Tmp.getAlignment().getAsAlign(), |
1420 | 0 | llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue())); |
1421 | 0 | } |
1422 | 0 | } |
1423 | | |
1424 | | static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, |
1425 | 0 | const ABIArgInfo &info) { |
1426 | 0 | if (unsigned offset = info.getDirectOffset()) { |
1427 | 0 | addr = addr.withElementType(CGF.Int8Ty); |
1428 | 0 | addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, |
1429 | 0 | CharUnits::fromQuantity(offset)); |
1430 | 0 | addr = addr.withElementType(info.getCoerceToType()); |
1431 | 0 | } |
1432 | 0 | return addr; |
1433 | 0 | } |
1434 | | |
1435 | | namespace { |
1436 | | |
1437 | | /// Encapsulates information about the way function arguments from |
1438 | | /// CGFunctionInfo should be passed to actual LLVM IR function. |
1439 | | class ClangToLLVMArgMapping { |
1440 | | static const unsigned InvalidIndex = ~0U; |
1441 | | unsigned InallocaArgNo; |
1442 | | unsigned SRetArgNo; |
1443 | | unsigned TotalIRArgs; |
1444 | | |
1445 | | /// Arguments of LLVM IR function corresponding to single Clang argument. |
1446 | | struct IRArgs { |
1447 | | unsigned PaddingArgIndex; |
1448 | | // Argument is expanded to IR arguments at positions |
1449 | | // [FirstArgIndex, FirstArgIndex + NumberOfArgs). |
1450 | | unsigned FirstArgIndex; |
1451 | | unsigned NumberOfArgs; |
1452 | | |
1453 | | IRArgs() |
1454 | | : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), |
1455 | 0 | NumberOfArgs(0) {} |
1456 | | }; |
1457 | | |
1458 | | SmallVector<IRArgs, 8> ArgInfo; |
1459 | | |
1460 | | public: |
1461 | | ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, |
1462 | | bool OnlyRequiredArgs = false) |
1463 | | : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), |
1464 | 0 | ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { |
1465 | 0 | construct(Context, FI, OnlyRequiredArgs); |
1466 | 0 | } |
1467 | | |
1468 | 0 | bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } |
1469 | 0 | unsigned getInallocaArgNo() const { |
1470 | 0 | assert(hasInallocaArg()); |
1471 | 0 | return InallocaArgNo; |
1472 | 0 | } |
1473 | | |
1474 | 0 | bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } |
1475 | 0 | unsigned getSRetArgNo() const { |
1476 | 0 | assert(hasSRetArg()); |
1477 | 0 | return SRetArgNo; |
1478 | 0 | } |
1479 | | |
1480 | 0 | unsigned totalIRArgs() const { return TotalIRArgs; } |
1481 | | |
1482 | 0 | bool hasPaddingArg(unsigned ArgNo) const { |
1483 | 0 | assert(ArgNo < ArgInfo.size()); |
1484 | 0 | return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; |
1485 | 0 | } |
1486 | 0 | unsigned getPaddingArgNo(unsigned ArgNo) const { |
1487 | 0 | assert(hasPaddingArg(ArgNo)); |
1488 | 0 | return ArgInfo[ArgNo].PaddingArgIndex; |
1489 | 0 | } |
1490 | | |
1491 | | /// Returns index of first IR argument corresponding to ArgNo, and their |
1492 | | /// quantity. |
1493 | 0 | std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { |
1494 | 0 | assert(ArgNo < ArgInfo.size()); |
1495 | 0 | return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, |
1496 | 0 | ArgInfo[ArgNo].NumberOfArgs); |
1497 | 0 | } |
1498 | | |
1499 | | private: |
1500 | | void construct(const ASTContext &Context, const CGFunctionInfo &FI, |
1501 | | bool OnlyRequiredArgs); |
1502 | | }; |
1503 | | |
1504 | | void ClangToLLVMArgMapping::construct(const ASTContext &Context, |
1505 | | const CGFunctionInfo &FI, |
1506 | 0 | bool OnlyRequiredArgs) { |
1507 | 0 | unsigned IRArgNo = 0; |
1508 | 0 | bool SwapThisWithSRet = false; |
1509 | 0 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
1510 | |
|
1511 | 0 | if (RetAI.getKind() == ABIArgInfo::Indirect) { |
1512 | 0 | SwapThisWithSRet = RetAI.isSRetAfterThis(); |
1513 | 0 | SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; |
1514 | 0 | } |
1515 | |
|
1516 | 0 | unsigned ArgNo = 0; |
1517 | 0 | unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); |
1518 | 0 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; |
1519 | 0 | ++I, ++ArgNo) { |
1520 | 0 | assert(I != FI.arg_end()); |
1521 | 0 | QualType ArgType = I->type; |
1522 | 0 | const ABIArgInfo &AI = I->info; |
1523 | | // Collect data about IR arguments corresponding to Clang argument ArgNo. |
1524 | 0 | auto &IRArgs = ArgInfo[ArgNo]; |
1525 | |
|
1526 | 0 | if (AI.getPaddingType()) |
1527 | 0 | IRArgs.PaddingArgIndex = IRArgNo++; |
1528 | |
|
1529 | 0 | switch (AI.getKind()) { |
1530 | 0 | case ABIArgInfo::Extend: |
1531 | 0 | case ABIArgInfo::Direct: { |
1532 | | // FIXME: handle sseregparm someday... |
1533 | 0 | llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); |
1534 | 0 | if (AI.isDirect() && AI.getCanBeFlattened() && STy) { |
1535 | 0 | IRArgs.NumberOfArgs = STy->getNumElements(); |
1536 | 0 | } else { |
1537 | 0 | IRArgs.NumberOfArgs = 1; |
1538 | 0 | } |
1539 | 0 | break; |
1540 | 0 | } |
1541 | 0 | case ABIArgInfo::Indirect: |
1542 | 0 | case ABIArgInfo::IndirectAliased: |
1543 | 0 | IRArgs.NumberOfArgs = 1; |
1544 | 0 | break; |
1545 | 0 | case ABIArgInfo::Ignore: |
1546 | 0 | case ABIArgInfo::InAlloca: |
1547 | | // ignore and inalloca doesn't have matching LLVM parameters. |
1548 | 0 | IRArgs.NumberOfArgs = 0; |
1549 | 0 | break; |
1550 | 0 | case ABIArgInfo::CoerceAndExpand: |
1551 | 0 | IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); |
1552 | 0 | break; |
1553 | 0 | case ABIArgInfo::Expand: |
1554 | 0 | IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); |
1555 | 0 | break; |
1556 | 0 | } |
1557 | | |
1558 | 0 | if (IRArgs.NumberOfArgs > 0) { |
1559 | 0 | IRArgs.FirstArgIndex = IRArgNo; |
1560 | 0 | IRArgNo += IRArgs.NumberOfArgs; |
1561 | 0 | } |
1562 | | |
1563 | | // Skip over the sret parameter when it comes second. We already handled it |
1564 | | // above. |
1565 | 0 | if (IRArgNo == 1 && SwapThisWithSRet) |
1566 | 0 | IRArgNo++; |
1567 | 0 | } |
1568 | 0 | assert(ArgNo == ArgInfo.size()); |
1569 | | |
1570 | 0 | if (FI.usesInAlloca()) |
1571 | 0 | InallocaArgNo = IRArgNo++; |
1572 | |
|
1573 | 0 | TotalIRArgs = IRArgNo; |
1574 | 0 | } |
1575 | | } // namespace |
1576 | | |
1577 | | /***/ |
1578 | | |
1579 | 0 | bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { |
1580 | 0 | const auto &RI = FI.getReturnInfo(); |
1581 | 0 | return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); |
1582 | 0 | } |
1583 | | |
1584 | 0 | bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { |
1585 | 0 | return ReturnTypeUsesSRet(FI) && |
1586 | 0 | getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); |
1587 | 0 | } |
1588 | | |
1589 | 0 | bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { |
1590 | 0 | if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { |
1591 | 0 | switch (BT->getKind()) { |
1592 | 0 | default: |
1593 | 0 | return false; |
1594 | 0 | case BuiltinType::Float: |
1595 | 0 | return getTarget().useObjCFPRetForRealType(FloatModeKind::Float); |
1596 | 0 | case BuiltinType::Double: |
1597 | 0 | return getTarget().useObjCFPRetForRealType(FloatModeKind::Double); |
1598 | 0 | case BuiltinType::LongDouble: |
1599 | 0 | return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble); |
1600 | 0 | } |
1601 | 0 | } |
1602 | | |
1603 | 0 | return false; |
1604 | 0 | } |
1605 | | |
1606 | 0 | bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { |
1607 | 0 | if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { |
1608 | 0 | if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { |
1609 | 0 | if (BT->getKind() == BuiltinType::LongDouble) |
1610 | 0 | return getTarget().useObjCFP2RetForComplexLongDouble(); |
1611 | 0 | } |
1612 | 0 | } |
1613 | | |
1614 | 0 | return false; |
1615 | 0 | } |
1616 | | |
1617 | 0 | llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { |
1618 | 0 | const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); |
1619 | 0 | return GetFunctionType(FI); |
1620 | 0 | } |
1621 | | |
1622 | | llvm::FunctionType * |
1623 | 0 | CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { |
1624 | |
|
1625 | 0 | bool Inserted = FunctionsBeingProcessed.insert(&FI).second; |
1626 | 0 | (void)Inserted; |
1627 | 0 | assert(Inserted && "Recursively being processed?"); |
1628 | | |
1629 | 0 | llvm::Type *resultType = nullptr; |
1630 | 0 | const ABIArgInfo &retAI = FI.getReturnInfo(); |
1631 | 0 | switch (retAI.getKind()) { |
1632 | 0 | case ABIArgInfo::Expand: |
1633 | 0 | case ABIArgInfo::IndirectAliased: |
1634 | 0 | llvm_unreachable("Invalid ABI kind for return argument"); |
1635 | |
|
1636 | 0 | case ABIArgInfo::Extend: |
1637 | 0 | case ABIArgInfo::Direct: |
1638 | 0 | resultType = retAI.getCoerceToType(); |
1639 | 0 | break; |
1640 | | |
1641 | 0 | case ABIArgInfo::InAlloca: |
1642 | 0 | if (retAI.getInAllocaSRet()) { |
1643 | | // sret things on win32 aren't void, they return the sret pointer. |
1644 | 0 | QualType ret = FI.getReturnType(); |
1645 | 0 | unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret); |
1646 | 0 | resultType = llvm::PointerType::get(getLLVMContext(), addressSpace); |
1647 | 0 | } else { |
1648 | 0 | resultType = llvm::Type::getVoidTy(getLLVMContext()); |
1649 | 0 | } |
1650 | 0 | break; |
1651 | | |
1652 | 0 | case ABIArgInfo::Indirect: |
1653 | 0 | case ABIArgInfo::Ignore: |
1654 | 0 | resultType = llvm::Type::getVoidTy(getLLVMContext()); |
1655 | 0 | break; |
1656 | | |
1657 | 0 | case ABIArgInfo::CoerceAndExpand: |
1658 | 0 | resultType = retAI.getUnpaddedCoerceAndExpandType(); |
1659 | 0 | break; |
1660 | 0 | } |
1661 | | |
1662 | 0 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); |
1663 | 0 | SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); |
1664 | | |
1665 | | // Add type for sret argument. |
1666 | 0 | if (IRFunctionArgs.hasSRetArg()) { |
1667 | 0 | QualType Ret = FI.getReturnType(); |
1668 | 0 | unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret); |
1669 | 0 | ArgTypes[IRFunctionArgs.getSRetArgNo()] = |
1670 | 0 | llvm::PointerType::get(getLLVMContext(), AddressSpace); |
1671 | 0 | } |
1672 | | |
1673 | | // Add type for inalloca argument. |
1674 | 0 | if (IRFunctionArgs.hasInallocaArg()) |
1675 | 0 | ArgTypes[IRFunctionArgs.getInallocaArgNo()] = |
1676 | 0 | llvm::PointerType::getUnqual(getLLVMContext()); |
1677 | | |
1678 | | // Add in all of the required arguments. |
1679 | 0 | unsigned ArgNo = 0; |
1680 | 0 | CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), |
1681 | 0 | ie = it + FI.getNumRequiredArgs(); |
1682 | 0 | for (; it != ie; ++it, ++ArgNo) { |
1683 | 0 | const ABIArgInfo &ArgInfo = it->info; |
1684 | | |
1685 | | // Insert a padding type to ensure proper alignment. |
1686 | 0 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) |
1687 | 0 | ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
1688 | 0 | ArgInfo.getPaddingType(); |
1689 | |
|
1690 | 0 | unsigned FirstIRArg, NumIRArgs; |
1691 | 0 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
1692 | |
|
1693 | 0 | switch (ArgInfo.getKind()) { |
1694 | 0 | case ABIArgInfo::Ignore: |
1695 | 0 | case ABIArgInfo::InAlloca: |
1696 | 0 | assert(NumIRArgs == 0); |
1697 | 0 | break; |
1698 | | |
1699 | 0 | case ABIArgInfo::Indirect: |
1700 | 0 | assert(NumIRArgs == 1); |
1701 | | // indirect arguments are always on the stack, which is alloca addr space. |
1702 | 0 | ArgTypes[FirstIRArg] = llvm::PointerType::get( |
1703 | 0 | getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace()); |
1704 | 0 | break; |
1705 | 0 | case ABIArgInfo::IndirectAliased: |
1706 | 0 | assert(NumIRArgs == 1); |
1707 | 0 | ArgTypes[FirstIRArg] = llvm::PointerType::get( |
1708 | 0 | getLLVMContext(), ArgInfo.getIndirectAddrSpace()); |
1709 | 0 | break; |
1710 | 0 | case ABIArgInfo::Extend: |
1711 | 0 | case ABIArgInfo::Direct: { |
1712 | | // Fast-isel and the optimizer generally like scalar values better than |
1713 | | // FCAs, so we flatten them if this is safe to do for this argument. |
1714 | 0 | llvm::Type *argType = ArgInfo.getCoerceToType(); |
1715 | 0 | llvm::StructType *st = dyn_cast<llvm::StructType>(argType); |
1716 | 0 | if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { |
1717 | 0 | assert(NumIRArgs == st->getNumElements()); |
1718 | 0 | for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) |
1719 | 0 | ArgTypes[FirstIRArg + i] = st->getElementType(i); |
1720 | 0 | } else { |
1721 | 0 | assert(NumIRArgs == 1); |
1722 | 0 | ArgTypes[FirstIRArg] = argType; |
1723 | 0 | } |
1724 | 0 | break; |
1725 | 0 | } |
1726 | | |
1727 | 0 | case ABIArgInfo::CoerceAndExpand: { |
1728 | 0 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1729 | 0 | for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { |
1730 | 0 | *ArgTypesIter++ = EltTy; |
1731 | 0 | } |
1732 | 0 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1733 | 0 | break; |
1734 | 0 | } |
1735 | | |
1736 | 0 | case ABIArgInfo::Expand: |
1737 | 0 | auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; |
1738 | 0 | getExpandedTypes(it->type, ArgTypesIter); |
1739 | 0 | assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); |
1740 | 0 | break; |
1741 | 0 | } |
1742 | 0 | } |
1743 | | |
1744 | 0 | bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; |
1745 | 0 | assert(Erased && "Not in set?"); |
1746 | | |
1747 | 0 | return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); |
1748 | 0 | } |
1749 | | |
1750 | 0 | llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { |
1751 | 0 | const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); |
1752 | 0 | const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); |
1753 | |
|
1754 | 0 | if (!isFuncTypeConvertible(FPT)) |
1755 | 0 | return llvm::StructType::get(getLLVMContext()); |
1756 | | |
1757 | 0 | return GetFunctionType(GD); |
1758 | 0 | } |
1759 | | |
1760 | | static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, |
1761 | | llvm::AttrBuilder &FuncAttrs, |
1762 | 0 | const FunctionProtoType *FPT) { |
1763 | 0 | if (!FPT) |
1764 | 0 | return; |
1765 | | |
1766 | 0 | if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && |
1767 | 0 | FPT->isNothrow()) |
1768 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
1769 | |
|
1770 | 0 | unsigned SMEBits = FPT->getAArch64SMEAttributes(); |
1771 | 0 | if (SMEBits & FunctionType::SME_PStateSMEnabledMask) |
1772 | 0 | FuncAttrs.addAttribute("aarch64_pstate_sm_enabled"); |
1773 | 0 | if (SMEBits & FunctionType::SME_PStateSMCompatibleMask) |
1774 | 0 | FuncAttrs.addAttribute("aarch64_pstate_sm_compatible"); |
1775 | | |
1776 | | // ZA |
1777 | 0 | if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Out || |
1778 | 0 | FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_InOut) |
1779 | 0 | FuncAttrs.addAttribute("aarch64_pstate_za_shared"); |
1780 | 0 | if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Preserves || |
1781 | 0 | FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_In) { |
1782 | 0 | FuncAttrs.addAttribute("aarch64_pstate_za_shared"); |
1783 | 0 | FuncAttrs.addAttribute("aarch64_pstate_za_preserved"); |
1784 | 0 | } |
1785 | 0 | } |
1786 | | |
1787 | | static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs, |
1788 | 0 | const Decl *Callee) { |
1789 | 0 | if (!Callee) |
1790 | 0 | return; |
1791 | | |
1792 | 0 | SmallVector<StringRef, 4> Attrs; |
1793 | |
|
1794 | 0 | for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>()) |
1795 | 0 | AA->getAssumption().split(Attrs, ","); |
1796 | |
|
1797 | 0 | if (!Attrs.empty()) |
1798 | 0 | FuncAttrs.addAttribute(llvm::AssumptionAttrKey, |
1799 | 0 | llvm::join(Attrs.begin(), Attrs.end(), ",")); |
1800 | 0 | } |
1801 | | |
1802 | | bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, |
1803 | 0 | QualType ReturnType) const { |
1804 | | // We can't just discard the return value for a record type with a |
1805 | | // complex destructor or a non-trivially copyable type. |
1806 | 0 | if (const RecordType *RT = |
1807 | 0 | ReturnType.getCanonicalType()->getAs<RecordType>()) { |
1808 | 0 | if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) |
1809 | 0 | return ClassDecl->hasTrivialDestructor(); |
1810 | 0 | } |
1811 | 0 | return ReturnType.isTriviallyCopyableType(Context); |
1812 | 0 | } |
1813 | | |
1814 | | static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, |
1815 | 0 | const Decl *TargetDecl) { |
1816 | | // As-is msan can not tolerate noundef mismatch between caller and |
1817 | | // implementation. Mismatch is possible for e.g. indirect calls from C-caller |
1818 | | // into C++. Such mismatches lead to confusing false reports. To avoid |
1819 | | // expensive workaround on msan we enforce initialization event in uncommon |
1820 | | // cases where it's allowed. |
1821 | 0 | if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory)) |
1822 | 0 | return true; |
1823 | | // C++ explicitly makes returning undefined values UB. C's rule only applies |
1824 | | // to used values, so we never mark them noundef for now. |
1825 | 0 | if (!Module.getLangOpts().CPlusPlus) |
1826 | 0 | return false; |
1827 | 0 | if (TargetDecl) { |
1828 | 0 | if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) { |
1829 | 0 | if (FDecl->isExternC()) |
1830 | 0 | return false; |
1831 | 0 | } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) { |
1832 | | // Function pointer. |
1833 | 0 | if (VDecl->isExternC()) |
1834 | 0 | return false; |
1835 | 0 | } |
1836 | 0 | } |
1837 | | |
1838 | | // We don't want to be too aggressive with the return checking, unless |
1839 | | // it's explicit in the code opts or we're using an appropriate sanitizer. |
1840 | | // Try to respect what the programmer intended. |
1841 | 0 | return Module.getCodeGenOpts().StrictReturn || |
1842 | 0 | !Module.MayDropFunctionReturn(Module.getContext(), RetTy) || |
1843 | 0 | Module.getLangOpts().Sanitize.has(SanitizerKind::Return); |
1844 | 0 | } |
1845 | | |
1846 | | /// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the |
1847 | | /// requested denormal behavior, accounting for the overriding behavior of the |
1848 | | /// -f32 case. |
1849 | | static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, |
1850 | | llvm::DenormalMode FP32DenormalMode, |
1851 | 0 | llvm::AttrBuilder &FuncAttrs) { |
1852 | 0 | if (FPDenormalMode != llvm::DenormalMode::getDefault()) |
1853 | 0 | FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str()); |
1854 | |
|
1855 | 0 | if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid()) |
1856 | 0 | FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str()); |
1857 | 0 | } |
1858 | | |
1859 | | /// Add default attributes to a function, which have merge semantics under |
1860 | | /// -mlink-builtin-bitcode and should not simply overwrite any existing |
1861 | | /// attributes in the linked library. |
1862 | | static void |
1863 | | addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, |
1864 | 0 | llvm::AttrBuilder &FuncAttrs) { |
1865 | 0 | addDenormalModeAttrs(CodeGenOpts.FPDenormalMode, CodeGenOpts.FP32DenormalMode, |
1866 | 0 | FuncAttrs); |
1867 | 0 | } |
1868 | | |
1869 | | static void getTrivialDefaultFunctionAttributes( |
1870 | | StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, |
1871 | | const LangOptions &LangOpts, bool AttrOnCallSite, |
1872 | 0 | llvm::AttrBuilder &FuncAttrs) { |
1873 | | // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. |
1874 | 0 | if (!HasOptnone) { |
1875 | 0 | if (CodeGenOpts.OptimizeSize) |
1876 | 0 | FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); |
1877 | 0 | if (CodeGenOpts.OptimizeSize == 2) |
1878 | 0 | FuncAttrs.addAttribute(llvm::Attribute::MinSize); |
1879 | 0 | } |
1880 | |
|
1881 | 0 | if (CodeGenOpts.DisableRedZone) |
1882 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); |
1883 | 0 | if (CodeGenOpts.IndirectTlsSegRefs) |
1884 | 0 | FuncAttrs.addAttribute("indirect-tls-seg-refs"); |
1885 | 0 | if (CodeGenOpts.NoImplicitFloat) |
1886 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); |
1887 | |
|
1888 | 0 | if (AttrOnCallSite) { |
1889 | | // Attributes that should go on the call site only. |
1890 | | // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking |
1891 | | // the -fno-builtin-foo list. |
1892 | 0 | if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) |
1893 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); |
1894 | 0 | if (!CodeGenOpts.TrapFuncName.empty()) |
1895 | 0 | FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); |
1896 | 0 | } else { |
1897 | 0 | switch (CodeGenOpts.getFramePointer()) { |
1898 | 0 | case CodeGenOptions::FramePointerKind::None: |
1899 | | // This is the default behavior. |
1900 | 0 | break; |
1901 | 0 | case CodeGenOptions::FramePointerKind::NonLeaf: |
1902 | 0 | case CodeGenOptions::FramePointerKind::All: |
1903 | 0 | FuncAttrs.addAttribute("frame-pointer", |
1904 | 0 | CodeGenOptions::getFramePointerKindName( |
1905 | 0 | CodeGenOpts.getFramePointer())); |
1906 | 0 | } |
1907 | | |
1908 | 0 | if (CodeGenOpts.LessPreciseFPMAD) |
1909 | 0 | FuncAttrs.addAttribute("less-precise-fpmad", "true"); |
1910 | |
|
1911 | 0 | if (CodeGenOpts.NullPointerIsValid) |
1912 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); |
1913 | |
|
1914 | 0 | if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore) |
1915 | 0 | FuncAttrs.addAttribute("no-trapping-math", "true"); |
1916 | | |
1917 | | // TODO: Are these all needed? |
1918 | | // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. |
1919 | 0 | if (LangOpts.NoHonorInfs) |
1920 | 0 | FuncAttrs.addAttribute("no-infs-fp-math", "true"); |
1921 | 0 | if (LangOpts.NoHonorNaNs) |
1922 | 0 | FuncAttrs.addAttribute("no-nans-fp-math", "true"); |
1923 | 0 | if (LangOpts.ApproxFunc) |
1924 | 0 | FuncAttrs.addAttribute("approx-func-fp-math", "true"); |
1925 | 0 | if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip && |
1926 | 0 | LangOpts.NoSignedZero && LangOpts.ApproxFunc && |
1927 | 0 | (LangOpts.getDefaultFPContractMode() == |
1928 | 0 | LangOptions::FPModeKind::FPM_Fast || |
1929 | 0 | LangOpts.getDefaultFPContractMode() == |
1930 | 0 | LangOptions::FPModeKind::FPM_FastHonorPragmas)) |
1931 | 0 | FuncAttrs.addAttribute("unsafe-fp-math", "true"); |
1932 | 0 | if (CodeGenOpts.SoftFloat) |
1933 | 0 | FuncAttrs.addAttribute("use-soft-float", "true"); |
1934 | 0 | FuncAttrs.addAttribute("stack-protector-buffer-size", |
1935 | 0 | llvm::utostr(CodeGenOpts.SSPBufferSize)); |
1936 | 0 | if (LangOpts.NoSignedZero) |
1937 | 0 | FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); |
1938 | | |
1939 | | // TODO: Reciprocal estimate codegen options should apply to instructions? |
1940 | 0 | const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; |
1941 | 0 | if (!Recips.empty()) |
1942 | 0 | FuncAttrs.addAttribute("reciprocal-estimates", |
1943 | 0 | llvm::join(Recips, ",")); |
1944 | |
|
1945 | 0 | if (!CodeGenOpts.PreferVectorWidth.empty() && |
1946 | 0 | CodeGenOpts.PreferVectorWidth != "none") |
1947 | 0 | FuncAttrs.addAttribute("prefer-vector-width", |
1948 | 0 | CodeGenOpts.PreferVectorWidth); |
1949 | |
|
1950 | 0 | if (CodeGenOpts.StackRealignment) |
1951 | 0 | FuncAttrs.addAttribute("stackrealign"); |
1952 | 0 | if (CodeGenOpts.Backchain) |
1953 | 0 | FuncAttrs.addAttribute("backchain"); |
1954 | 0 | if (CodeGenOpts.EnableSegmentedStacks) |
1955 | 0 | FuncAttrs.addAttribute("split-stack"); |
1956 | |
|
1957 | 0 | if (CodeGenOpts.SpeculativeLoadHardening) |
1958 | 0 | FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); |
1959 | | |
1960 | | // Add zero-call-used-regs attribute. |
1961 | 0 | switch (CodeGenOpts.getZeroCallUsedRegs()) { |
1962 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip: |
1963 | 0 | FuncAttrs.removeAttribute("zero-call-used-regs"); |
1964 | 0 | break; |
1965 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg: |
1966 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg"); |
1967 | 0 | break; |
1968 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR: |
1969 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr"); |
1970 | 0 | break; |
1971 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg: |
1972 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "used-arg"); |
1973 | 0 | break; |
1974 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used: |
1975 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "used"); |
1976 | 0 | break; |
1977 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg: |
1978 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg"); |
1979 | 0 | break; |
1980 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR: |
1981 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr"); |
1982 | 0 | break; |
1983 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg: |
1984 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "all-arg"); |
1985 | 0 | break; |
1986 | 0 | case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All: |
1987 | 0 | FuncAttrs.addAttribute("zero-call-used-regs", "all"); |
1988 | 0 | break; |
1989 | 0 | } |
1990 | 0 | } |
1991 | | |
1992 | 0 | if (LangOpts.assumeFunctionsAreConvergent()) { |
1993 | | // Conservatively, mark all functions and calls in CUDA and OpenCL as |
1994 | | // convergent (meaning, they may call an intrinsically convergent op, such |
1995 | | // as __syncthreads() / barrier(), and so can't have certain optimizations |
1996 | | // applied around them). LLVM will remove this attribute where it safely |
1997 | | // can. |
1998 | 0 | FuncAttrs.addAttribute(llvm::Attribute::Convergent); |
1999 | 0 | } |
2000 | | |
2001 | | // TODO: NoUnwind attribute should be added for other GPU modes HIP, |
2002 | | // OpenMP offload. AFAIK, neither of them support exceptions in device code. |
2003 | 0 | if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL || |
2004 | 0 | LangOpts.SYCLIsDevice) { |
2005 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2006 | 0 | } |
2007 | |
|
2008 | 0 | for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { |
2009 | 0 | StringRef Var, Value; |
2010 | 0 | std::tie(Var, Value) = Attr.split('='); |
2011 | 0 | FuncAttrs.addAttribute(Var, Value); |
2012 | 0 | } |
2013 | 0 | } |
2014 | | |
2015 | | /// Merges `target-features` from \TargetOpts and \F, and sets the result in |
2016 | | /// \FuncAttr |
2017 | | /// * features from \F are always kept |
2018 | | /// * a feature from \TargetOpts is kept if itself and its opposite are absent |
2019 | | /// from \F |
2020 | | static void |
2021 | | overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, |
2022 | | const llvm::Function &F, |
2023 | 0 | const TargetOptions &TargetOpts) { |
2024 | 0 | auto FFeatures = F.getFnAttribute("target-features"); |
2025 | |
|
2026 | 0 | llvm::StringSet<> MergedNames; |
2027 | 0 | SmallVector<StringRef> MergedFeatures; |
2028 | 0 | MergedFeatures.reserve(TargetOpts.Features.size()); |
2029 | |
|
2030 | 0 | auto AddUnmergedFeatures = [&](auto &&FeatureRange) { |
2031 | 0 | for (StringRef Feature : FeatureRange) { |
2032 | 0 | if (Feature.empty()) |
2033 | 0 | continue; |
2034 | 0 | assert(Feature[0] == '+' || Feature[0] == '-'); |
2035 | 0 | StringRef Name = Feature.drop_front(1); |
2036 | 0 | bool Merged = !MergedNames.insert(Name).second; |
2037 | 0 | if (!Merged) |
2038 | 0 | MergedFeatures.push_back(Feature); |
2039 | 0 | } |
2040 | 0 | }; Unexecuted instantiation: CGCall.cpp:auto overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder&, llvm::Function const&, clang::TargetOptions const&)::$_3::operator()<llvm::iterator_range<llvm::SplittingIterator> >(llvm::iterator_range<llvm::SplittingIterator>&&) const Unexecuted instantiation: CGCall.cpp:auto overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder&, llvm::Function const&, clang::TargetOptions const&)::$_3::operator()<std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&>(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&) const |
2041 | |
|
2042 | 0 | if (FFeatures.isValid()) |
2043 | 0 | AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(), ',')); |
2044 | 0 | AddUnmergedFeatures(TargetOpts.Features); |
2045 | |
|
2046 | 0 | if (!MergedFeatures.empty()) { |
2047 | 0 | llvm::sort(MergedFeatures); |
2048 | 0 | FuncAttr.addAttribute("target-features", llvm::join(MergedFeatures, ",")); |
2049 | 0 | } |
2050 | 0 | } |
2051 | | |
2052 | | void CodeGen::mergeDefaultFunctionDefinitionAttributes( |
2053 | | llvm::Function &F, const CodeGenOptions &CodeGenOpts, |
2054 | | const LangOptions &LangOpts, const TargetOptions &TargetOpts, |
2055 | 0 | bool WillInternalize) { |
2056 | |
|
2057 | 0 | llvm::AttrBuilder FuncAttrs(F.getContext()); |
2058 | | // Here we only extract the options that are relevant compared to the version |
2059 | | // from GetCPUAndFeaturesAttributes. |
2060 | 0 | if (!TargetOpts.CPU.empty()) |
2061 | 0 | FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU); |
2062 | 0 | if (!TargetOpts.TuneCPU.empty()) |
2063 | 0 | FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU); |
2064 | |
|
2065 | 0 | ::getTrivialDefaultFunctionAttributes(F.getName(), F.hasOptNone(), |
2066 | 0 | CodeGenOpts, LangOpts, |
2067 | 0 | /*AttrOnCallSite=*/false, FuncAttrs); |
2068 | |
|
2069 | 0 | if (!WillInternalize && F.isInterposable()) { |
2070 | | // Do not promote "dynamic" denormal-fp-math to this translation unit's |
2071 | | // setting for weak functions that won't be internalized. The user has no |
2072 | | // real control for how builtin bitcode is linked, so we shouldn't assume |
2073 | | // later copies will use a consistent mode. |
2074 | 0 | F.addFnAttrs(FuncAttrs); |
2075 | 0 | return; |
2076 | 0 | } |
2077 | | |
2078 | 0 | llvm::AttributeMask AttrsToRemove; |
2079 | |
|
2080 | 0 | llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw(); |
2081 | 0 | llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw(); |
2082 | 0 | llvm::DenormalMode Merged = |
2083 | 0 | CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge); |
2084 | 0 | llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode; |
2085 | |
|
2086 | 0 | if (DenormModeToMergeF32.isValid()) { |
2087 | 0 | MergedF32 = |
2088 | 0 | CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32); |
2089 | 0 | } |
2090 | |
|
2091 | 0 | if (Merged == llvm::DenormalMode::getDefault()) { |
2092 | 0 | AttrsToRemove.addAttribute("denormal-fp-math"); |
2093 | 0 | } else if (Merged != DenormModeToMerge) { |
2094 | | // Overwrite existing attribute |
2095 | 0 | FuncAttrs.addAttribute("denormal-fp-math", |
2096 | 0 | CodeGenOpts.FPDenormalMode.str()); |
2097 | 0 | } |
2098 | |
|
2099 | 0 | if (MergedF32 == llvm::DenormalMode::getDefault()) { |
2100 | 0 | AttrsToRemove.addAttribute("denormal-fp-math-f32"); |
2101 | 0 | } else if (MergedF32 != DenormModeToMergeF32) { |
2102 | | // Overwrite existing attribute |
2103 | 0 | FuncAttrs.addAttribute("denormal-fp-math-f32", |
2104 | 0 | CodeGenOpts.FP32DenormalMode.str()); |
2105 | 0 | } |
2106 | |
|
2107 | 0 | F.removeFnAttrs(AttrsToRemove); |
2108 | 0 | addDenormalModeAttrs(Merged, MergedF32, FuncAttrs); |
2109 | |
|
2110 | 0 | overrideFunctionFeaturesWithTargetFeatures(FuncAttrs, F, TargetOpts); |
2111 | |
|
2112 | 0 | F.addFnAttrs(FuncAttrs); |
2113 | 0 | } |
2114 | | |
2115 | | void CodeGenModule::getTrivialDefaultFunctionAttributes( |
2116 | | StringRef Name, bool HasOptnone, bool AttrOnCallSite, |
2117 | 0 | llvm::AttrBuilder &FuncAttrs) { |
2118 | 0 | ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, getCodeGenOpts(), |
2119 | 0 | getLangOpts(), AttrOnCallSite, |
2120 | 0 | FuncAttrs); |
2121 | 0 | } |
2122 | | |
2123 | | void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, |
2124 | | bool HasOptnone, |
2125 | | bool AttrOnCallSite, |
2126 | 0 | llvm::AttrBuilder &FuncAttrs) { |
2127 | 0 | getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, |
2128 | 0 | FuncAttrs); |
2129 | | // If we're just getting the default, get the default values for mergeable |
2130 | | // attributes. |
2131 | 0 | if (!AttrOnCallSite) |
2132 | 0 | addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs); |
2133 | 0 | } |
2134 | | |
2135 | | void CodeGenModule::addDefaultFunctionDefinitionAttributes( |
2136 | 0 | llvm::AttrBuilder &attrs) { |
2137 | 0 | getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, |
2138 | 0 | /*for call*/ false, attrs); |
2139 | 0 | GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); |
2140 | 0 | } |
2141 | | |
2142 | | static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, |
2143 | | const LangOptions &LangOpts, |
2144 | 0 | const NoBuiltinAttr *NBA = nullptr) { |
2145 | 0 | auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { |
2146 | 0 | SmallString<32> AttributeName; |
2147 | 0 | AttributeName += "no-builtin-"; |
2148 | 0 | AttributeName += BuiltinName; |
2149 | 0 | FuncAttrs.addAttribute(AttributeName); |
2150 | 0 | }; |
2151 | | |
2152 | | // First, handle the language options passed through -fno-builtin. |
2153 | 0 | if (LangOpts.NoBuiltin) { |
2154 | | // -fno-builtin disables them all. |
2155 | 0 | FuncAttrs.addAttribute("no-builtins"); |
2156 | 0 | return; |
2157 | 0 | } |
2158 | | |
2159 | | // Then, add attributes for builtins specified through -fno-builtin-<name>. |
2160 | 0 | llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); |
2161 | | |
2162 | | // Now, let's check the __attribute__((no_builtin("...")) attribute added to |
2163 | | // the source. |
2164 | 0 | if (!NBA) |
2165 | 0 | return; |
2166 | | |
2167 | | // If there is a wildcard in the builtin names specified through the |
2168 | | // attribute, disable them all. |
2169 | 0 | if (llvm::is_contained(NBA->builtinNames(), "*")) { |
2170 | 0 | FuncAttrs.addAttribute("no-builtins"); |
2171 | 0 | return; |
2172 | 0 | } |
2173 | | |
2174 | | // And last, add the rest of the builtin names. |
2175 | 0 | llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); |
2176 | 0 | } |
2177 | | |
2178 | | static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, |
2179 | | const llvm::DataLayout &DL, const ABIArgInfo &AI, |
2180 | 0 | bool CheckCoerce = true) { |
2181 | 0 | llvm::Type *Ty = Types.ConvertTypeForMem(QTy); |
2182 | 0 | if (AI.getKind() == ABIArgInfo::Indirect || |
2183 | 0 | AI.getKind() == ABIArgInfo::IndirectAliased) |
2184 | 0 | return true; |
2185 | 0 | if (AI.getKind() == ABIArgInfo::Extend) |
2186 | 0 | return true; |
2187 | 0 | if (!DL.typeSizeEqualsStoreSize(Ty)) |
2188 | | // TODO: This will result in a modest amount of values not marked noundef |
2189 | | // when they could be. We care about values that *invisibly* contain undef |
2190 | | // bits from the perspective of LLVM IR. |
2191 | 0 | return false; |
2192 | 0 | if (CheckCoerce && AI.canHaveCoerceToType()) { |
2193 | 0 | llvm::Type *CoerceTy = AI.getCoerceToType(); |
2194 | 0 | if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), |
2195 | 0 | DL.getTypeSizeInBits(Ty))) |
2196 | | // If we're coercing to a type with a greater size than the canonical one, |
2197 | | // we're introducing new undef bits. |
2198 | | // Coercing to a type of smaller or equal size is ok, as we know that |
2199 | | // there's no internal padding (typeSizeEqualsStoreSize). |
2200 | 0 | return false; |
2201 | 0 | } |
2202 | 0 | if (QTy->isBitIntType()) |
2203 | 0 | return true; |
2204 | 0 | if (QTy->isReferenceType()) |
2205 | 0 | return true; |
2206 | 0 | if (QTy->isNullPtrType()) |
2207 | 0 | return false; |
2208 | 0 | if (QTy->isMemberPointerType()) |
2209 | | // TODO: Some member pointers are `noundef`, but it depends on the ABI. For |
2210 | | // now, never mark them. |
2211 | 0 | return false; |
2212 | 0 | if (QTy->isScalarType()) { |
2213 | 0 | if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) |
2214 | 0 | return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); |
2215 | 0 | return true; |
2216 | 0 | } |
2217 | 0 | if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) |
2218 | 0 | return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); |
2219 | 0 | if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) |
2220 | 0 | return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); |
2221 | 0 | if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) |
2222 | 0 | return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); |
2223 | | |
2224 | | // TODO: Some structs may be `noundef`, in specific situations. |
2225 | 0 | return false; |
2226 | 0 | } |
2227 | | |
2228 | | /// Check if the argument of a function has maybe_undef attribute. |
2229 | | static bool IsArgumentMaybeUndef(const Decl *TargetDecl, |
2230 | 0 | unsigned NumRequiredArgs, unsigned ArgNo) { |
2231 | 0 | const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); |
2232 | 0 | if (!FD) |
2233 | 0 | return false; |
2234 | | |
2235 | | // Assume variadic arguments do not have maybe_undef attribute. |
2236 | 0 | if (ArgNo >= NumRequiredArgs) |
2237 | 0 | return false; |
2238 | | |
2239 | | // Check if argument has maybe_undef attribute. |
2240 | 0 | if (ArgNo < FD->getNumParams()) { |
2241 | 0 | const ParmVarDecl *Param = FD->getParamDecl(ArgNo); |
2242 | 0 | if (Param && Param->hasAttr<MaybeUndefAttr>()) |
2243 | 0 | return true; |
2244 | 0 | } |
2245 | | |
2246 | 0 | return false; |
2247 | 0 | } |
2248 | | |
2249 | | /// Test if it's legal to apply nofpclass for the given parameter type and it's |
2250 | | /// lowered IR type. |
2251 | | static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, |
2252 | 0 | bool IsReturn) { |
2253 | | // Should only apply to FP types in the source, not ABI promoted. |
2254 | 0 | if (!ParamType->hasFloatingRepresentation()) |
2255 | 0 | return false; |
2256 | | |
2257 | | // The promoted-to IR type also needs to support nofpclass. |
2258 | 0 | llvm::Type *IRTy = AI.getCoerceToType(); |
2259 | 0 | if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy)) |
2260 | 0 | return true; |
2261 | | |
2262 | 0 | if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) { |
2263 | 0 | return !IsReturn && AI.getCanBeFlattened() && |
2264 | 0 | llvm::all_of(ST->elements(), [](llvm::Type *Ty) { |
2265 | 0 | return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty); |
2266 | 0 | }); |
2267 | 0 | } |
2268 | | |
2269 | 0 | return false; |
2270 | 0 | } |
2271 | | |
2272 | | /// Return the nofpclass mask that can be applied to floating-point parameters. |
2273 | 0 | static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) { |
2274 | 0 | llvm::FPClassTest Mask = llvm::fcNone; |
2275 | 0 | if (LangOpts.NoHonorInfs) |
2276 | 0 | Mask |= llvm::fcInf; |
2277 | 0 | if (LangOpts.NoHonorNaNs) |
2278 | 0 | Mask |= llvm::fcNan; |
2279 | 0 | return Mask; |
2280 | 0 | } |
2281 | | |
2282 | | void CodeGenModule::AdjustMemoryAttribute(StringRef Name, |
2283 | | CGCalleeInfo CalleeInfo, |
2284 | 0 | llvm::AttributeList &Attrs) { |
2285 | 0 | if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) { |
2286 | 0 | Attrs = Attrs.removeFnAttribute(getLLVMContext(), llvm::Attribute::Memory); |
2287 | 0 | llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects( |
2288 | 0 | getLLVMContext(), llvm::MemoryEffects::writeOnly()); |
2289 | 0 | Attrs = Attrs.addFnAttribute(getLLVMContext(), MemoryAttr); |
2290 | 0 | } |
2291 | 0 | } |
2292 | | |
2293 | | /// Construct the IR attribute list of a function or call. |
2294 | | /// |
2295 | | /// When adding an attribute, please consider where it should be handled: |
2296 | | /// |
2297 | | /// - getDefaultFunctionAttributes is for attributes that are essentially |
2298 | | /// part of the global target configuration (but perhaps can be |
2299 | | /// overridden on a per-function basis). Adding attributes there |
2300 | | /// will cause them to also be set in frontends that build on Clang's |
2301 | | /// target-configuration logic, as well as for code defined in library |
2302 | | /// modules such as CUDA's libdevice. |
2303 | | /// |
2304 | | /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes |
2305 | | /// and adds declaration-specific, convention-specific, and |
2306 | | /// frontend-specific logic. The last is of particular importance: |
2307 | | /// attributes that restrict how the frontend generates code must be |
2308 | | /// added here rather than getDefaultFunctionAttributes. |
2309 | | /// |
2310 | | void CodeGenModule::ConstructAttributeList(StringRef Name, |
2311 | | const CGFunctionInfo &FI, |
2312 | | CGCalleeInfo CalleeInfo, |
2313 | | llvm::AttributeList &AttrList, |
2314 | | unsigned &CallingConv, |
2315 | 0 | bool AttrOnCallSite, bool IsThunk) { |
2316 | 0 | llvm::AttrBuilder FuncAttrs(getLLVMContext()); |
2317 | 0 | llvm::AttrBuilder RetAttrs(getLLVMContext()); |
2318 | | |
2319 | | // Collect function IR attributes from the CC lowering. |
2320 | | // We'll collect the paramete and result attributes later. |
2321 | 0 | CallingConv = FI.getEffectiveCallingConvention(); |
2322 | 0 | if (FI.isNoReturn()) |
2323 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); |
2324 | 0 | if (FI.isCmseNSCall()) |
2325 | 0 | FuncAttrs.addAttribute("cmse_nonsecure_call"); |
2326 | | |
2327 | | // Collect function IR attributes from the callee prototype if we have one. |
2328 | 0 | AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, |
2329 | 0 | CalleeInfo.getCalleeFunctionProtoType()); |
2330 | |
|
2331 | 0 | const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); |
2332 | | |
2333 | | // Attach assumption attributes to the declaration. If this is a call |
2334 | | // site, attach assumptions from the caller to the call as well. |
2335 | 0 | AddAttributesFromAssumes(FuncAttrs, TargetDecl); |
2336 | |
|
2337 | 0 | bool HasOptnone = false; |
2338 | | // The NoBuiltinAttr attached to the target FunctionDecl. |
2339 | 0 | const NoBuiltinAttr *NBA = nullptr; |
2340 | | |
2341 | | // Some ABIs may result in additional accesses to arguments that may |
2342 | | // otherwise not be present. |
2343 | 0 | auto AddPotentialArgAccess = [&]() { |
2344 | 0 | llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory); |
2345 | 0 | if (A.isValid()) |
2346 | 0 | FuncAttrs.addMemoryAttr(A.getMemoryEffects() | |
2347 | 0 | llvm::MemoryEffects::argMemOnly()); |
2348 | 0 | }; |
2349 | | |
2350 | | // Collect function IR attributes based on declaration-specific |
2351 | | // information. |
2352 | | // FIXME: handle sseregparm someday... |
2353 | 0 | if (TargetDecl) { |
2354 | 0 | if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) |
2355 | 0 | FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); |
2356 | 0 | if (TargetDecl->hasAttr<NoThrowAttr>()) |
2357 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2358 | 0 | if (TargetDecl->hasAttr<NoReturnAttr>()) |
2359 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); |
2360 | 0 | if (TargetDecl->hasAttr<ColdAttr>()) |
2361 | 0 | FuncAttrs.addAttribute(llvm::Attribute::Cold); |
2362 | 0 | if (TargetDecl->hasAttr<HotAttr>()) |
2363 | 0 | FuncAttrs.addAttribute(llvm::Attribute::Hot); |
2364 | 0 | if (TargetDecl->hasAttr<NoDuplicateAttr>()) |
2365 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); |
2366 | 0 | if (TargetDecl->hasAttr<ConvergentAttr>()) |
2367 | 0 | FuncAttrs.addAttribute(llvm::Attribute::Convergent); |
2368 | |
|
2369 | 0 | if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { |
2370 | 0 | AddAttributesFromFunctionProtoType( |
2371 | 0 | getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); |
2372 | 0 | if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { |
2373 | | // A sane operator new returns a non-aliasing pointer. |
2374 | 0 | auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); |
2375 | 0 | if (getCodeGenOpts().AssumeSaneOperatorNew && |
2376 | 0 | (Kind == OO_New || Kind == OO_Array_New)) |
2377 | 0 | RetAttrs.addAttribute(llvm::Attribute::NoAlias); |
2378 | 0 | } |
2379 | 0 | const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); |
2380 | 0 | const bool IsVirtualCall = MD && MD->isVirtual(); |
2381 | | // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a |
2382 | | // virtual function. These attributes are not inherited by overloads. |
2383 | 0 | if (!(AttrOnCallSite && IsVirtualCall)) { |
2384 | 0 | if (Fn->isNoReturn()) |
2385 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoReturn); |
2386 | 0 | NBA = Fn->getAttr<NoBuiltinAttr>(); |
2387 | 0 | } |
2388 | 0 | } |
2389 | |
|
2390 | 0 | if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) { |
2391 | | // Only place nomerge attribute on call sites, never functions. This |
2392 | | // allows it to work on indirect virtual function calls. |
2393 | 0 | if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) |
2394 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoMerge); |
2395 | 0 | } |
2396 | | |
2397 | | // 'const', 'pure' and 'noalias' attributed functions are also nounwind. |
2398 | 0 | if (TargetDecl->hasAttr<ConstAttr>()) { |
2399 | 0 | FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none()); |
2400 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2401 | | // gcc specifies that 'const' functions have greater restrictions than |
2402 | | // 'pure' functions, so they also cannot have infinite loops. |
2403 | 0 | FuncAttrs.addAttribute(llvm::Attribute::WillReturn); |
2404 | 0 | } else if (TargetDecl->hasAttr<PureAttr>()) { |
2405 | 0 | FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly()); |
2406 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2407 | | // gcc specifies that 'pure' functions cannot have infinite loops. |
2408 | 0 | FuncAttrs.addAttribute(llvm::Attribute::WillReturn); |
2409 | 0 | } else if (TargetDecl->hasAttr<NoAliasAttr>()) { |
2410 | 0 | FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly()); |
2411 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); |
2412 | 0 | } |
2413 | 0 | if (TargetDecl->hasAttr<RestrictAttr>()) |
2414 | 0 | RetAttrs.addAttribute(llvm::Attribute::NoAlias); |
2415 | 0 | if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && |
2416 | 0 | !CodeGenOpts.NullPointerIsValid) |
2417 | 0 | RetAttrs.addAttribute(llvm::Attribute::NonNull); |
2418 | 0 | if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) |
2419 | 0 | FuncAttrs.addAttribute("no_caller_saved_registers"); |
2420 | 0 | if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) |
2421 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); |
2422 | 0 | if (TargetDecl->hasAttr<LeafAttr>()) |
2423 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NoCallback); |
2424 | |
|
2425 | 0 | HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); |
2426 | 0 | if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { |
2427 | 0 | std::optional<unsigned> NumElemsParam; |
2428 | 0 | if (AllocSize->getNumElemsParam().isValid()) |
2429 | 0 | NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); |
2430 | 0 | FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), |
2431 | 0 | NumElemsParam); |
2432 | 0 | } |
2433 | |
|
2434 | 0 | if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { |
2435 | 0 | if (getLangOpts().OpenCLVersion <= 120) { |
2436 | | // OpenCL v1.2 Work groups are always uniform |
2437 | 0 | FuncAttrs.addAttribute("uniform-work-group-size", "true"); |
2438 | 0 | } else { |
2439 | | // OpenCL v2.0 Work groups may be whether uniform or not. |
2440 | | // '-cl-uniform-work-group-size' compile option gets a hint |
2441 | | // to the compiler that the global work-size be a multiple of |
2442 | | // the work-group size specified to clEnqueueNDRangeKernel |
2443 | | // (i.e. work groups are uniform). |
2444 | 0 | FuncAttrs.addAttribute( |
2445 | 0 | "uniform-work-group-size", |
2446 | 0 | llvm::toStringRef(getLangOpts().OffloadUniformBlock)); |
2447 | 0 | } |
2448 | 0 | } |
2449 | |
|
2450 | 0 | if (TargetDecl->hasAttr<CUDAGlobalAttr>() && |
2451 | 0 | getLangOpts().OffloadUniformBlock) |
2452 | 0 | FuncAttrs.addAttribute("uniform-work-group-size", "true"); |
2453 | |
|
2454 | 0 | if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>()) |
2455 | 0 | FuncAttrs.addAttribute("aarch64_pstate_sm_body"); |
2456 | 0 | } |
2457 | | |
2458 | | // Attach "no-builtins" attributes to: |
2459 | | // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". |
2460 | | // * definitions: "no-builtins" or "no-builtin-<name>" only. |
2461 | | // The attributes can come from: |
2462 | | // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> |
2463 | | // * FunctionDecl attributes: __attribute__((no_builtin(...))) |
2464 | 0 | addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); |
2465 | | |
2466 | | // Collect function IR attributes based on global settiings. |
2467 | 0 | getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); |
2468 | | |
2469 | | // Override some default IR attributes based on declaration-specific |
2470 | | // information. |
2471 | 0 | if (TargetDecl) { |
2472 | 0 | if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) |
2473 | 0 | FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); |
2474 | 0 | if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) |
2475 | 0 | FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); |
2476 | 0 | if (TargetDecl->hasAttr<NoSplitStackAttr>()) |
2477 | 0 | FuncAttrs.removeAttribute("split-stack"); |
2478 | 0 | if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) { |
2479 | | // A function "__attribute__((...))" overrides the command-line flag. |
2480 | 0 | auto Kind = |
2481 | 0 | TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs(); |
2482 | 0 | FuncAttrs.removeAttribute("zero-call-used-regs"); |
2483 | 0 | FuncAttrs.addAttribute( |
2484 | 0 | "zero-call-used-regs", |
2485 | 0 | ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind)); |
2486 | 0 | } |
2487 | | |
2488 | | // Add NonLazyBind attribute to function declarations when -fno-plt |
2489 | | // is used. |
2490 | | // FIXME: what if we just haven't processed the function definition |
2491 | | // yet, or if it's an external definition like C99 inline? |
2492 | 0 | if (CodeGenOpts.NoPLT) { |
2493 | 0 | if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { |
2494 | 0 | if (!Fn->isDefined() && !AttrOnCallSite) { |
2495 | 0 | FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); |
2496 | 0 | } |
2497 | 0 | } |
2498 | 0 | } |
2499 | 0 | } |
2500 | | |
2501 | | // Add "sample-profile-suffix-elision-policy" attribute for internal linkage |
2502 | | // functions with -funique-internal-linkage-names. |
2503 | 0 | if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { |
2504 | 0 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { |
2505 | 0 | if (!FD->isExternallyVisible()) |
2506 | 0 | FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", |
2507 | 0 | "selected"); |
2508 | 0 | } |
2509 | 0 | } |
2510 | | |
2511 | | // Collect non-call-site function IR attributes from declaration-specific |
2512 | | // information. |
2513 | 0 | if (!AttrOnCallSite) { |
2514 | 0 | if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) |
2515 | 0 | FuncAttrs.addAttribute("cmse_nonsecure_entry"); |
2516 | | |
2517 | | // Whether tail calls are enabled. |
2518 | 0 | auto shouldDisableTailCalls = [&] { |
2519 | | // Should this be honored in getDefaultFunctionAttributes? |
2520 | 0 | if (CodeGenOpts.DisableTailCalls) |
2521 | 0 | return true; |
2522 | | |
2523 | 0 | if (!TargetDecl) |
2524 | 0 | return false; |
2525 | | |
2526 | 0 | if (TargetDecl->hasAttr<DisableTailCallsAttr>() || |
2527 | 0 | TargetDecl->hasAttr<AnyX86InterruptAttr>()) |
2528 | 0 | return true; |
2529 | | |
2530 | 0 | if (CodeGenOpts.NoEscapingBlockTailCalls) { |
2531 | 0 | if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) |
2532 | 0 | if (!BD->doesNotEscape()) |
2533 | 0 | return true; |
2534 | 0 | } |
2535 | | |
2536 | 0 | return false; |
2537 | 0 | }; |
2538 | 0 | if (shouldDisableTailCalls()) |
2539 | 0 | FuncAttrs.addAttribute("disable-tail-calls", "true"); |
2540 | | |
2541 | | // CPU/feature overrides. addDefaultFunctionDefinitionAttributes |
2542 | | // handles these separately to set them based on the global defaults. |
2543 | 0 | GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); |
2544 | 0 | } |
2545 | | |
2546 | | // Collect attributes from arguments and return values. |
2547 | 0 | ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); |
2548 | |
|
2549 | 0 | QualType RetTy = FI.getReturnType(); |
2550 | 0 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
2551 | 0 | const llvm::DataLayout &DL = getDataLayout(); |
2552 | | |
2553 | | // Determine if the return type could be partially undef |
2554 | 0 | if (CodeGenOpts.EnableNoundefAttrs && |
2555 | 0 | HasStrictReturn(*this, RetTy, TargetDecl)) { |
2556 | 0 | if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && |
2557 | 0 | DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) |
2558 | 0 | RetAttrs.addAttribute(llvm::Attribute::NoUndef); |
2559 | 0 | } |
2560 | |
|
2561 | 0 | switch (RetAI.getKind()) { |
2562 | 0 | case ABIArgInfo::Extend: |
2563 | 0 | if (RetAI.isSignExt()) |
2564 | 0 | RetAttrs.addAttribute(llvm::Attribute::SExt); |
2565 | 0 | else |
2566 | 0 | RetAttrs.addAttribute(llvm::Attribute::ZExt); |
2567 | 0 | [[fallthrough]]; |
2568 | 0 | case ABIArgInfo::Direct: |
2569 | 0 | if (RetAI.getInReg()) |
2570 | 0 | RetAttrs.addAttribute(llvm::Attribute::InReg); |
2571 | |
|
2572 | 0 | if (canApplyNoFPClass(RetAI, RetTy, true)) |
2573 | 0 | RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts())); |
2574 | |
|
2575 | 0 | break; |
2576 | 0 | case ABIArgInfo::Ignore: |
2577 | 0 | break; |
2578 | | |
2579 | 0 | case ABIArgInfo::InAlloca: |
2580 | 0 | case ABIArgInfo::Indirect: { |
2581 | | // inalloca and sret disable readnone and readonly |
2582 | 0 | AddPotentialArgAccess(); |
2583 | 0 | break; |
2584 | 0 | } |
2585 | | |
2586 | 0 | case ABIArgInfo::CoerceAndExpand: |
2587 | 0 | break; |
2588 | | |
2589 | 0 | case ABIArgInfo::Expand: |
2590 | 0 | case ABIArgInfo::IndirectAliased: |
2591 | 0 | llvm_unreachable("Invalid ABI kind for return argument"); |
2592 | 0 | } |
2593 | | |
2594 | 0 | if (!IsThunk) { |
2595 | | // FIXME: fix this properly, https://reviews.llvm.org/D100388 |
2596 | 0 | if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { |
2597 | 0 | QualType PTy = RefTy->getPointeeType(); |
2598 | 0 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) |
2599 | 0 | RetAttrs.addDereferenceableAttr( |
2600 | 0 | getMinimumObjectSize(PTy).getQuantity()); |
2601 | 0 | if (getTypes().getTargetAddressSpace(PTy) == 0 && |
2602 | 0 | !CodeGenOpts.NullPointerIsValid) |
2603 | 0 | RetAttrs.addAttribute(llvm::Attribute::NonNull); |
2604 | 0 | if (PTy->isObjectType()) { |
2605 | 0 | llvm::Align Alignment = |
2606 | 0 | getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); |
2607 | 0 | RetAttrs.addAlignmentAttr(Alignment); |
2608 | 0 | } |
2609 | 0 | } |
2610 | 0 | } |
2611 | |
|
2612 | 0 | bool hasUsedSRet = false; |
2613 | 0 | SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); |
2614 | | |
2615 | | // Attach attributes to sret. |
2616 | 0 | if (IRFunctionArgs.hasSRetArg()) { |
2617 | 0 | llvm::AttrBuilder SRETAttrs(getLLVMContext()); |
2618 | 0 | SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); |
2619 | 0 | SRETAttrs.addAttribute(llvm::Attribute::Writable); |
2620 | 0 | SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind); |
2621 | 0 | hasUsedSRet = true; |
2622 | 0 | if (RetAI.getInReg()) |
2623 | 0 | SRETAttrs.addAttribute(llvm::Attribute::InReg); |
2624 | 0 | SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); |
2625 | 0 | ArgAttrs[IRFunctionArgs.getSRetArgNo()] = |
2626 | 0 | llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); |
2627 | 0 | } |
2628 | | |
2629 | | // Attach attributes to inalloca argument. |
2630 | 0 | if (IRFunctionArgs.hasInallocaArg()) { |
2631 | 0 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2632 | 0 | Attrs.addInAllocaAttr(FI.getArgStruct()); |
2633 | 0 | ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = |
2634 | 0 | llvm::AttributeSet::get(getLLVMContext(), Attrs); |
2635 | 0 | } |
2636 | | |
2637 | | // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, |
2638 | | // unless this is a thunk function. |
2639 | | // FIXME: fix this properly, https://reviews.llvm.org/D100388 |
2640 | 0 | if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && |
2641 | 0 | !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { |
2642 | 0 | auto IRArgs = IRFunctionArgs.getIRArgs(0); |
2643 | |
|
2644 | 0 | assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); |
2645 | | |
2646 | 0 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2647 | |
|
2648 | 0 | QualType ThisTy = |
2649 | 0 | FI.arg_begin()->type.getTypePtr()->getPointeeType(); |
2650 | |
|
2651 | 0 | if (!CodeGenOpts.NullPointerIsValid && |
2652 | 0 | getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) { |
2653 | 0 | Attrs.addAttribute(llvm::Attribute::NonNull); |
2654 | 0 | Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity()); |
2655 | 0 | } else { |
2656 | | // FIXME dereferenceable should be correct here, regardless of |
2657 | | // NullPointerIsValid. However, dereferenceable currently does not always |
2658 | | // respect NullPointerIsValid and may imply nonnull and break the program. |
2659 | | // See https://reviews.llvm.org/D66618 for discussions. |
2660 | 0 | Attrs.addDereferenceableOrNullAttr( |
2661 | 0 | getMinimumObjectSize( |
2662 | 0 | FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) |
2663 | 0 | .getQuantity()); |
2664 | 0 | } |
2665 | |
|
2666 | 0 | llvm::Align Alignment = |
2667 | 0 | getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr, |
2668 | 0 | /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) |
2669 | 0 | .getAsAlign(); |
2670 | 0 | Attrs.addAlignmentAttr(Alignment); |
2671 | |
|
2672 | 0 | ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); |
2673 | 0 | } |
2674 | | |
2675 | 0 | unsigned ArgNo = 0; |
2676 | 0 | for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), |
2677 | 0 | E = FI.arg_end(); |
2678 | 0 | I != E; ++I, ++ArgNo) { |
2679 | 0 | QualType ParamType = I->type; |
2680 | 0 | const ABIArgInfo &AI = I->info; |
2681 | 0 | llvm::AttrBuilder Attrs(getLLVMContext()); |
2682 | | |
2683 | | // Add attribute for padding argument, if necessary. |
2684 | 0 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) { |
2685 | 0 | if (AI.getPaddingInReg()) { |
2686 | 0 | ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
2687 | 0 | llvm::AttributeSet::get( |
2688 | 0 | getLLVMContext(), |
2689 | 0 | llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg)); |
2690 | 0 | } |
2691 | 0 | } |
2692 | | |
2693 | | // Decide whether the argument we're handling could be partially undef |
2694 | 0 | if (CodeGenOpts.EnableNoundefAttrs && |
2695 | 0 | DetermineNoUndef(ParamType, getTypes(), DL, AI)) { |
2696 | 0 | Attrs.addAttribute(llvm::Attribute::NoUndef); |
2697 | 0 | } |
2698 | | |
2699 | | // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we |
2700 | | // have the corresponding parameter variable. It doesn't make |
2701 | | // sense to do it here because parameters are so messed up. |
2702 | 0 | switch (AI.getKind()) { |
2703 | 0 | case ABIArgInfo::Extend: |
2704 | 0 | if (AI.isSignExt()) |
2705 | 0 | Attrs.addAttribute(llvm::Attribute::SExt); |
2706 | 0 | else |
2707 | 0 | Attrs.addAttribute(llvm::Attribute::ZExt); |
2708 | 0 | [[fallthrough]]; |
2709 | 0 | case ABIArgInfo::Direct: |
2710 | 0 | if (ArgNo == 0 && FI.isChainCall()) |
2711 | 0 | Attrs.addAttribute(llvm::Attribute::Nest); |
2712 | 0 | else if (AI.getInReg()) |
2713 | 0 | Attrs.addAttribute(llvm::Attribute::InReg); |
2714 | 0 | Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); |
2715 | |
|
2716 | 0 | if (canApplyNoFPClass(AI, ParamType, false)) |
2717 | 0 | Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts())); |
2718 | 0 | break; |
2719 | 0 | case ABIArgInfo::Indirect: { |
2720 | 0 | if (AI.getInReg()) |
2721 | 0 | Attrs.addAttribute(llvm::Attribute::InReg); |
2722 | |
|
2723 | 0 | if (AI.getIndirectByVal()) |
2724 | 0 | Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); |
2725 | |
|
2726 | 0 | auto *Decl = ParamType->getAsRecordDecl(); |
2727 | 0 | if (CodeGenOpts.PassByValueIsNoAlias && Decl && |
2728 | 0 | Decl->getArgPassingRestrictions() == |
2729 | 0 | RecordArgPassingKind::CanPassInRegs) |
2730 | | // When calling the function, the pointer passed in will be the only |
2731 | | // reference to the underlying object. Mark it accordingly. |
2732 | 0 | Attrs.addAttribute(llvm::Attribute::NoAlias); |
2733 | | |
2734 | | // TODO: We could add the byref attribute if not byval, but it would |
2735 | | // require updating many testcases. |
2736 | |
|
2737 | 0 | CharUnits Align = AI.getIndirectAlign(); |
2738 | | |
2739 | | // In a byval argument, it is important that the required |
2740 | | // alignment of the type is honored, as LLVM might be creating a |
2741 | | // *new* stack object, and needs to know what alignment to give |
2742 | | // it. (Sometimes it can deduce a sensible alignment on its own, |
2743 | | // but not if clang decides it must emit a packed struct, or the |
2744 | | // user specifies increased alignment requirements.) |
2745 | | // |
2746 | | // This is different from indirect *not* byval, where the object |
2747 | | // exists already, and the align attribute is purely |
2748 | | // informative. |
2749 | 0 | assert(!Align.isZero()); |
2750 | | |
2751 | | // For now, only add this when we have a byval argument. |
2752 | | // TODO: be less lazy about updating test cases. |
2753 | 0 | if (AI.getIndirectByVal()) |
2754 | 0 | Attrs.addAlignmentAttr(Align.getQuantity()); |
2755 | | |
2756 | | // byval disables readnone and readonly. |
2757 | 0 | AddPotentialArgAccess(); |
2758 | 0 | break; |
2759 | 0 | } |
2760 | 0 | case ABIArgInfo::IndirectAliased: { |
2761 | 0 | CharUnits Align = AI.getIndirectAlign(); |
2762 | 0 | Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); |
2763 | 0 | Attrs.addAlignmentAttr(Align.getQuantity()); |
2764 | 0 | break; |
2765 | 0 | } |
2766 | 0 | case ABIArgInfo::Ignore: |
2767 | 0 | case ABIArgInfo::Expand: |
2768 | 0 | case ABIArgInfo::CoerceAndExpand: |
2769 | 0 | break; |
2770 | | |
2771 | 0 | case ABIArgInfo::InAlloca: |
2772 | | // inalloca disables readnone and readonly. |
2773 | 0 | AddPotentialArgAccess(); |
2774 | 0 | continue; |
2775 | 0 | } |
2776 | | |
2777 | 0 | if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { |
2778 | 0 | QualType PTy = RefTy->getPointeeType(); |
2779 | 0 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) |
2780 | 0 | Attrs.addDereferenceableAttr( |
2781 | 0 | getMinimumObjectSize(PTy).getQuantity()); |
2782 | 0 | if (getTypes().getTargetAddressSpace(PTy) == 0 && |
2783 | 0 | !CodeGenOpts.NullPointerIsValid) |
2784 | 0 | Attrs.addAttribute(llvm::Attribute::NonNull); |
2785 | 0 | if (PTy->isObjectType()) { |
2786 | 0 | llvm::Align Alignment = |
2787 | 0 | getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); |
2788 | 0 | Attrs.addAlignmentAttr(Alignment); |
2789 | 0 | } |
2790 | 0 | } |
2791 | | |
2792 | | // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types: |
2793 | | // > For arguments to a __kernel function declared to be a pointer to a |
2794 | | // > data type, the OpenCL compiler can assume that the pointee is always |
2795 | | // > appropriately aligned as required by the data type. |
2796 | 0 | if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() && |
2797 | 0 | ParamType->isPointerType()) { |
2798 | 0 | QualType PTy = ParamType->getPointeeType(); |
2799 | 0 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { |
2800 | 0 | llvm::Align Alignment = |
2801 | 0 | getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); |
2802 | 0 | Attrs.addAlignmentAttr(Alignment); |
2803 | 0 | } |
2804 | 0 | } |
2805 | |
|
2806 | 0 | switch (FI.getExtParameterInfo(ArgNo).getABI()) { |
2807 | 0 | case ParameterABI::Ordinary: |
2808 | 0 | break; |
2809 | | |
2810 | 0 | case ParameterABI::SwiftIndirectResult: { |
2811 | | // Add 'sret' if we haven't already used it for something, but |
2812 | | // only if the result is void. |
2813 | 0 | if (!hasUsedSRet && RetTy->isVoidType()) { |
2814 | 0 | Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); |
2815 | 0 | hasUsedSRet = true; |
2816 | 0 | } |
2817 | | |
2818 | | // Add 'noalias' in either case. |
2819 | 0 | Attrs.addAttribute(llvm::Attribute::NoAlias); |
2820 | | |
2821 | | // Add 'dereferenceable' and 'alignment'. |
2822 | 0 | auto PTy = ParamType->getPointeeType(); |
2823 | 0 | if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { |
2824 | 0 | auto info = getContext().getTypeInfoInChars(PTy); |
2825 | 0 | Attrs.addDereferenceableAttr(info.Width.getQuantity()); |
2826 | 0 | Attrs.addAlignmentAttr(info.Align.getAsAlign()); |
2827 | 0 | } |
2828 | 0 | break; |
2829 | 0 | } |
2830 | | |
2831 | 0 | case ParameterABI::SwiftErrorResult: |
2832 | 0 | Attrs.addAttribute(llvm::Attribute::SwiftError); |
2833 | 0 | break; |
2834 | | |
2835 | 0 | case ParameterABI::SwiftContext: |
2836 | 0 | Attrs.addAttribute(llvm::Attribute::SwiftSelf); |
2837 | 0 | break; |
2838 | | |
2839 | 0 | case ParameterABI::SwiftAsyncContext: |
2840 | 0 | Attrs.addAttribute(llvm::Attribute::SwiftAsync); |
2841 | 0 | break; |
2842 | 0 | } |
2843 | | |
2844 | 0 | if (FI.getExtParameterInfo(ArgNo).isNoEscape()) |
2845 | 0 | Attrs.addAttribute(llvm::Attribute::NoCapture); |
2846 | |
|
2847 | 0 | if (Attrs.hasAttributes()) { |
2848 | 0 | unsigned FirstIRArg, NumIRArgs; |
2849 | 0 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
2850 | 0 | for (unsigned i = 0; i < NumIRArgs; i++) |
2851 | 0 | ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes( |
2852 | 0 | getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs)); |
2853 | 0 | } |
2854 | 0 | } |
2855 | 0 | assert(ArgNo == FI.arg_size()); |
2856 | | |
2857 | 0 | AttrList = llvm::AttributeList::get( |
2858 | 0 | getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), |
2859 | 0 | llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); |
2860 | 0 | } |
2861 | | |
2862 | | /// An argument came in as a promoted argument; demote it back to its |
2863 | | /// declared type. |
2864 | | static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, |
2865 | | const VarDecl *var, |
2866 | 0 | llvm::Value *value) { |
2867 | 0 | llvm::Type *varType = CGF.ConvertType(var->getType()); |
2868 | | |
2869 | | // This can happen with promotions that actually don't change the |
2870 | | // underlying type, like the enum promotions. |
2871 | 0 | if (value->getType() == varType) return value; |
2872 | | |
2873 | 0 | assert((varType->isIntegerTy() || varType->isFloatingPointTy()) |
2874 | 0 | && "unexpected promotion type"); |
2875 | | |
2876 | 0 | if (isa<llvm::IntegerType>(varType)) |
2877 | 0 | return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); |
2878 | | |
2879 | 0 | return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); |
2880 | 0 | } |
2881 | | |
2882 | | /// Returns the attribute (either parameter attribute, or function |
2883 | | /// attribute), which declares argument ArgNo to be non-null. |
2884 | | static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, |
2885 | 0 | QualType ArgType, unsigned ArgNo) { |
2886 | | // FIXME: __attribute__((nonnull)) can also be applied to: |
2887 | | // - references to pointers, where the pointee is known to be |
2888 | | // nonnull (apparently a Clang extension) |
2889 | | // - transparent unions containing pointers |
2890 | | // In the former case, LLVM IR cannot represent the constraint. In |
2891 | | // the latter case, we have no guarantee that the transparent union |
2892 | | // is in fact passed as a pointer. |
2893 | 0 | if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) |
2894 | 0 | return nullptr; |
2895 | | // First, check attribute on parameter itself. |
2896 | 0 | if (PVD) { |
2897 | 0 | if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) |
2898 | 0 | return ParmNNAttr; |
2899 | 0 | } |
2900 | | // Check function attributes. |
2901 | 0 | if (!FD) |
2902 | 0 | return nullptr; |
2903 | 0 | for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { |
2904 | 0 | if (NNAttr->isNonNull(ArgNo)) |
2905 | 0 | return NNAttr; |
2906 | 0 | } |
2907 | 0 | return nullptr; |
2908 | 0 | } |
2909 | | |
2910 | | namespace { |
2911 | | struct CopyBackSwiftError final : EHScopeStack::Cleanup { |
2912 | | Address Temp; |
2913 | | Address Arg; |
2914 | 0 | CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} |
2915 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2916 | 0 | llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); |
2917 | 0 | CGF.Builder.CreateStore(errorValue, Arg); |
2918 | 0 | } |
2919 | | }; |
2920 | | } |
2921 | | |
2922 | | void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, |
2923 | | llvm::Function *Fn, |
2924 | 0 | const FunctionArgList &Args) { |
2925 | 0 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) |
2926 | | // Naked functions don't have prologues. |
2927 | 0 | return; |
2928 | | |
2929 | | // If this is an implicit-return-zero function, go ahead and |
2930 | | // initialize the return value. TODO: it might be nice to have |
2931 | | // a more general mechanism for this that didn't require synthesized |
2932 | | // return statements. |
2933 | 0 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { |
2934 | 0 | if (FD->hasImplicitReturnZero()) { |
2935 | 0 | QualType RetTy = FD->getReturnType().getUnqualifiedType(); |
2936 | 0 | llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); |
2937 | 0 | llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); |
2938 | 0 | Builder.CreateStore(Zero, ReturnValue); |
2939 | 0 | } |
2940 | 0 | } |
2941 | | |
2942 | | // FIXME: We no longer need the types from FunctionArgList; lift up and |
2943 | | // simplify. |
2944 | |
|
2945 | 0 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); |
2946 | 0 | assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); |
2947 | | |
2948 | | // If we're using inalloca, all the memory arguments are GEPs off of the last |
2949 | | // parameter, which is a pointer to the complete memory area. |
2950 | 0 | Address ArgStruct = Address::invalid(); |
2951 | 0 | if (IRFunctionArgs.hasInallocaArg()) |
2952 | 0 | ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), |
2953 | 0 | FI.getArgStruct(), FI.getArgStructAlignment()); |
2954 | | |
2955 | | // Name the struct return parameter. |
2956 | 0 | if (IRFunctionArgs.hasSRetArg()) { |
2957 | 0 | auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); |
2958 | 0 | AI->setName("agg.result"); |
2959 | 0 | AI->addAttr(llvm::Attribute::NoAlias); |
2960 | 0 | } |
2961 | | |
2962 | | // Track if we received the parameter as a pointer (indirect, byval, or |
2963 | | // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it |
2964 | | // into a local alloca for us. |
2965 | 0 | SmallVector<ParamValue, 16> ArgVals; |
2966 | 0 | ArgVals.reserve(Args.size()); |
2967 | | |
2968 | | // Create a pointer value for every parameter declaration. This usually |
2969 | | // entails copying one or more LLVM IR arguments into an alloca. Don't push |
2970 | | // any cleanups or do anything that might unwind. We do that separately, so |
2971 | | // we can push the cleanups in the correct order for the ABI. |
2972 | 0 | assert(FI.arg_size() == Args.size() && |
2973 | 0 | "Mismatch between function signature & arguments."); |
2974 | 0 | unsigned ArgNo = 0; |
2975 | 0 | CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); |
2976 | 0 | for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); |
2977 | 0 | i != e; ++i, ++info_it, ++ArgNo) { |
2978 | 0 | const VarDecl *Arg = *i; |
2979 | 0 | const ABIArgInfo &ArgI = info_it->info; |
2980 | |
|
2981 | 0 | bool isPromoted = |
2982 | 0 | isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); |
2983 | | // We are converting from ABIArgInfo type to VarDecl type directly, unless |
2984 | | // the parameter is promoted. In this case we convert to |
2985 | | // CGFunctionInfo::ArgInfo type with subsequent argument demotion. |
2986 | 0 | QualType Ty = isPromoted ? info_it->type : Arg->getType(); |
2987 | 0 | assert(hasScalarEvaluationKind(Ty) == |
2988 | 0 | hasScalarEvaluationKind(Arg->getType())); |
2989 | | |
2990 | 0 | unsigned FirstIRArg, NumIRArgs; |
2991 | 0 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
2992 | |
|
2993 | 0 | switch (ArgI.getKind()) { |
2994 | 0 | case ABIArgInfo::InAlloca: { |
2995 | 0 | assert(NumIRArgs == 0); |
2996 | 0 | auto FieldIndex = ArgI.getInAllocaFieldIndex(); |
2997 | 0 | Address V = |
2998 | 0 | Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); |
2999 | 0 | if (ArgI.getInAllocaIndirect()) |
3000 | 0 | V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty), |
3001 | 0 | getContext().getTypeAlignInChars(Ty)); |
3002 | 0 | ArgVals.push_back(ParamValue::forIndirect(V)); |
3003 | 0 | break; |
3004 | 0 | } |
3005 | | |
3006 | 0 | case ABIArgInfo::Indirect: |
3007 | 0 | case ABIArgInfo::IndirectAliased: { |
3008 | 0 | assert(NumIRArgs == 1); |
3009 | 0 | Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty), |
3010 | 0 | ArgI.getIndirectAlign(), KnownNonNull); |
3011 | |
|
3012 | 0 | if (!hasScalarEvaluationKind(Ty)) { |
3013 | | // Aggregates and complex variables are accessed by reference. All we |
3014 | | // need to do is realign the value, if requested. Also, if the address |
3015 | | // may be aliased, copy it to ensure that the parameter variable is |
3016 | | // mutable and has a unique adress, as C requires. |
3017 | 0 | Address V = ParamAddr; |
3018 | 0 | if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { |
3019 | 0 | Address AlignedTemp = CreateMemTemp(Ty, "coerce"); |
3020 | | |
3021 | | // Copy from the incoming argument pointer to the temporary with the |
3022 | | // appropriate alignment. |
3023 | | // |
3024 | | // FIXME: We should have a common utility for generating an aggregate |
3025 | | // copy. |
3026 | 0 | CharUnits Size = getContext().getTypeSizeInChars(Ty); |
3027 | 0 | Builder.CreateMemCpy( |
3028 | 0 | AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), |
3029 | 0 | ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), |
3030 | 0 | llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); |
3031 | 0 | V = AlignedTemp; |
3032 | 0 | } |
3033 | 0 | ArgVals.push_back(ParamValue::forIndirect(V)); |
3034 | 0 | } else { |
3035 | | // Load scalar value from indirect argument. |
3036 | 0 | llvm::Value *V = |
3037 | 0 | EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); |
3038 | |
|
3039 | 0 | if (isPromoted) |
3040 | 0 | V = emitArgumentDemotion(*this, Arg, V); |
3041 | 0 | ArgVals.push_back(ParamValue::forDirect(V)); |
3042 | 0 | } |
3043 | 0 | break; |
3044 | 0 | } |
3045 | | |
3046 | 0 | case ABIArgInfo::Extend: |
3047 | 0 | case ABIArgInfo::Direct: { |
3048 | 0 | auto AI = Fn->getArg(FirstIRArg); |
3049 | 0 | llvm::Type *LTy = ConvertType(Arg->getType()); |
3050 | | |
3051 | | // Prepare parameter attributes. So far, only attributes for pointer |
3052 | | // parameters are prepared. See |
3053 | | // http://llvm.org/docs/LangRef.html#paramattrs. |
3054 | 0 | if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && |
3055 | 0 | ArgI.getCoerceToType()->isPointerTy()) { |
3056 | 0 | assert(NumIRArgs == 1); |
3057 | | |
3058 | 0 | if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { |
3059 | | // Set `nonnull` attribute if any. |
3060 | 0 | if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), |
3061 | 0 | PVD->getFunctionScopeIndex()) && |
3062 | 0 | !CGM.getCodeGenOpts().NullPointerIsValid) |
3063 | 0 | AI->addAttr(llvm::Attribute::NonNull); |
3064 | |
|
3065 | 0 | QualType OTy = PVD->getOriginalType(); |
3066 | 0 | if (const auto *ArrTy = |
3067 | 0 | getContext().getAsConstantArrayType(OTy)) { |
3068 | | // A C99 array parameter declaration with the static keyword also |
3069 | | // indicates dereferenceability, and if the size is constant we can |
3070 | | // use the dereferenceable attribute (which requires the size in |
3071 | | // bytes). |
3072 | 0 | if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { |
3073 | 0 | QualType ETy = ArrTy->getElementType(); |
3074 | 0 | llvm::Align Alignment = |
3075 | 0 | CGM.getNaturalTypeAlignment(ETy).getAsAlign(); |
3076 | 0 | AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); |
3077 | 0 | uint64_t ArrSize = ArrTy->getSize().getZExtValue(); |
3078 | 0 | if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && |
3079 | 0 | ArrSize) { |
3080 | 0 | llvm::AttrBuilder Attrs(getLLVMContext()); |
3081 | 0 | Attrs.addDereferenceableAttr( |
3082 | 0 | getContext().getTypeSizeInChars(ETy).getQuantity() * |
3083 | 0 | ArrSize); |
3084 | 0 | AI->addAttrs(Attrs); |
3085 | 0 | } else if (getContext().getTargetInfo().getNullPointerValue( |
3086 | 0 | ETy.getAddressSpace()) == 0 && |
3087 | 0 | !CGM.getCodeGenOpts().NullPointerIsValid) { |
3088 | 0 | AI->addAttr(llvm::Attribute::NonNull); |
3089 | 0 | } |
3090 | 0 | } |
3091 | 0 | } else if (const auto *ArrTy = |
3092 | 0 | getContext().getAsVariableArrayType(OTy)) { |
3093 | | // For C99 VLAs with the static keyword, we don't know the size so |
3094 | | // we can't use the dereferenceable attribute, but in addrspace(0) |
3095 | | // we know that it must be nonnull. |
3096 | 0 | if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) { |
3097 | 0 | QualType ETy = ArrTy->getElementType(); |
3098 | 0 | llvm::Align Alignment = |
3099 | 0 | CGM.getNaturalTypeAlignment(ETy).getAsAlign(); |
3100 | 0 | AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); |
3101 | 0 | if (!getTypes().getTargetAddressSpace(ETy) && |
3102 | 0 | !CGM.getCodeGenOpts().NullPointerIsValid) |
3103 | 0 | AI->addAttr(llvm::Attribute::NonNull); |
3104 | 0 | } |
3105 | 0 | } |
3106 | | |
3107 | | // Set `align` attribute if any. |
3108 | 0 | const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); |
3109 | 0 | if (!AVAttr) |
3110 | 0 | if (const auto *TOTy = OTy->getAs<TypedefType>()) |
3111 | 0 | AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); |
3112 | 0 | if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { |
3113 | | // If alignment-assumption sanitizer is enabled, we do *not* add |
3114 | | // alignment attribute here, but emit normal alignment assumption, |
3115 | | // so the UBSAN check could function. |
3116 | 0 | llvm::ConstantInt *AlignmentCI = |
3117 | 0 | cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); |
3118 | 0 | uint64_t AlignmentInt = |
3119 | 0 | AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); |
3120 | 0 | if (AI->getParamAlign().valueOrOne() < AlignmentInt) { |
3121 | 0 | AI->removeAttr(llvm::Attribute::AttrKind::Alignment); |
3122 | 0 | AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr( |
3123 | 0 | llvm::Align(AlignmentInt))); |
3124 | 0 | } |
3125 | 0 | } |
3126 | 0 | } |
3127 | | |
3128 | | // Set 'noalias' if an argument type has the `restrict` qualifier. |
3129 | 0 | if (Arg->getType().isRestrictQualified()) |
3130 | 0 | AI->addAttr(llvm::Attribute::NoAlias); |
3131 | 0 | } |
3132 | | |
3133 | | // Prepare the argument value. If we have the trivial case, handle it |
3134 | | // with no muss and fuss. |
3135 | 0 | if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && |
3136 | 0 | ArgI.getCoerceToType() == ConvertType(Ty) && |
3137 | 0 | ArgI.getDirectOffset() == 0) { |
3138 | 0 | assert(NumIRArgs == 1); |
3139 | | |
3140 | | // LLVM expects swifterror parameters to be used in very restricted |
3141 | | // ways. Copy the value into a less-restricted temporary. |
3142 | 0 | llvm::Value *V = AI; |
3143 | 0 | if (FI.getExtParameterInfo(ArgNo).getABI() |
3144 | 0 | == ParameterABI::SwiftErrorResult) { |
3145 | 0 | QualType pointeeTy = Ty->getPointeeType(); |
3146 | 0 | assert(pointeeTy->isPointerType()); |
3147 | 0 | Address temp = |
3148 | 0 | CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); |
3149 | 0 | Address arg(V, ConvertTypeForMem(pointeeTy), |
3150 | 0 | getContext().getTypeAlignInChars(pointeeTy)); |
3151 | 0 | llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); |
3152 | 0 | Builder.CreateStore(incomingErrorValue, temp); |
3153 | 0 | V = temp.getPointer(); |
3154 | | |
3155 | | // Push a cleanup to copy the value back at the end of the function. |
3156 | | // The convention does not guarantee that the value will be written |
3157 | | // back if the function exits with an unwind exception. |
3158 | 0 | EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); |
3159 | 0 | } |
3160 | | |
3161 | | // Ensure the argument is the correct type. |
3162 | 0 | if (V->getType() != ArgI.getCoerceToType()) |
3163 | 0 | V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); |
3164 | |
|
3165 | 0 | if (isPromoted) |
3166 | 0 | V = emitArgumentDemotion(*this, Arg, V); |
3167 | | |
3168 | | // Because of merging of function types from multiple decls it is |
3169 | | // possible for the type of an argument to not match the corresponding |
3170 | | // type in the function type. Since we are codegening the callee |
3171 | | // in here, add a cast to the argument type. |
3172 | 0 | llvm::Type *LTy = ConvertType(Arg->getType()); |
3173 | 0 | if (V->getType() != LTy) |
3174 | 0 | V = Builder.CreateBitCast(V, LTy); |
3175 | |
|
3176 | 0 | ArgVals.push_back(ParamValue::forDirect(V)); |
3177 | 0 | break; |
3178 | 0 | } |
3179 | | |
3180 | | // VLST arguments are coerced to VLATs at the function boundary for |
3181 | | // ABI consistency. If this is a VLST that was coerced to |
3182 | | // a VLAT at the function boundary and the types match up, use |
3183 | | // llvm.vector.extract to convert back to the original VLST. |
3184 | 0 | if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { |
3185 | 0 | llvm::Value *Coerced = Fn->getArg(FirstIRArg); |
3186 | 0 | if (auto *VecTyFrom = |
3187 | 0 | dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { |
3188 | | // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 |
3189 | | // vector, bitcast the source and use a vector extract. |
3190 | 0 | auto PredType = |
3191 | 0 | llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
3192 | 0 | if (VecTyFrom == PredType && |
3193 | 0 | VecTyTo->getElementType() == Builder.getInt8Ty()) { |
3194 | 0 | VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); |
3195 | 0 | Coerced = Builder.CreateBitCast(Coerced, VecTyFrom); |
3196 | 0 | } |
3197 | 0 | if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { |
3198 | 0 | llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); |
3199 | |
|
3200 | 0 | assert(NumIRArgs == 1); |
3201 | 0 | Coerced->setName(Arg->getName() + ".coerce"); |
3202 | 0 | ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( |
3203 | 0 | VecTyTo, Coerced, Zero, "cast.fixed"))); |
3204 | 0 | break; |
3205 | 0 | } |
3206 | 0 | } |
3207 | 0 | } |
3208 | | |
3209 | 0 | Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), |
3210 | 0 | Arg->getName()); |
3211 | | |
3212 | | // Pointer to store into. |
3213 | 0 | Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); |
3214 | | |
3215 | | // Fast-isel and the optimizer generally like scalar values better than |
3216 | | // FCAs, so we flatten them if this is safe to do for this argument. |
3217 | 0 | llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); |
3218 | 0 | if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && |
3219 | 0 | STy->getNumElements() > 1) { |
3220 | 0 | llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy); |
3221 | 0 | llvm::TypeSize PtrElementSize = |
3222 | 0 | CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType()); |
3223 | 0 | if (StructSize.isScalable()) { |
3224 | 0 | assert(STy->containsHomogeneousScalableVectorTypes() && |
3225 | 0 | "ABI only supports structure with homogeneous scalable vector " |
3226 | 0 | "type"); |
3227 | 0 | assert(StructSize == PtrElementSize && |
3228 | 0 | "Only allow non-fractional movement of structure with" |
3229 | 0 | "homogeneous scalable vector type"); |
3230 | 0 | assert(STy->getNumElements() == NumIRArgs); |
3231 | | |
3232 | 0 | llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy); |
3233 | 0 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
3234 | 0 | auto *AI = Fn->getArg(FirstIRArg + i); |
3235 | 0 | AI->setName(Arg->getName() + ".coerce" + Twine(i)); |
3236 | 0 | LoadedStructValue = |
3237 | 0 | Builder.CreateInsertValue(LoadedStructValue, AI, i); |
3238 | 0 | } |
3239 | |
|
3240 | 0 | Builder.CreateStore(LoadedStructValue, Ptr); |
3241 | 0 | } else { |
3242 | 0 | uint64_t SrcSize = StructSize.getFixedValue(); |
3243 | 0 | uint64_t DstSize = PtrElementSize.getFixedValue(); |
3244 | |
|
3245 | 0 | Address AddrToStoreInto = Address::invalid(); |
3246 | 0 | if (SrcSize <= DstSize) { |
3247 | 0 | AddrToStoreInto = Ptr.withElementType(STy); |
3248 | 0 | } else { |
3249 | 0 | AddrToStoreInto = |
3250 | 0 | CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); |
3251 | 0 | } |
3252 | |
|
3253 | 0 | assert(STy->getNumElements() == NumIRArgs); |
3254 | 0 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
3255 | 0 | auto AI = Fn->getArg(FirstIRArg + i); |
3256 | 0 | AI->setName(Arg->getName() + ".coerce" + Twine(i)); |
3257 | 0 | Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); |
3258 | 0 | Builder.CreateStore(AI, EltPtr); |
3259 | 0 | } |
3260 | |
|
3261 | 0 | if (SrcSize > DstSize) { |
3262 | 0 | Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); |
3263 | 0 | } |
3264 | 0 | } |
3265 | 0 | } else { |
3266 | | // Simple case, just do a coerced store of the argument into the alloca. |
3267 | 0 | assert(NumIRArgs == 1); |
3268 | 0 | auto AI = Fn->getArg(FirstIRArg); |
3269 | 0 | AI->setName(Arg->getName() + ".coerce"); |
3270 | 0 | CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); |
3271 | 0 | } |
3272 | | |
3273 | | // Match to what EmitParmDecl is expecting for this type. |
3274 | 0 | if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { |
3275 | 0 | llvm::Value *V = |
3276 | 0 | EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); |
3277 | 0 | if (isPromoted) |
3278 | 0 | V = emitArgumentDemotion(*this, Arg, V); |
3279 | 0 | ArgVals.push_back(ParamValue::forDirect(V)); |
3280 | 0 | } else { |
3281 | 0 | ArgVals.push_back(ParamValue::forIndirect(Alloca)); |
3282 | 0 | } |
3283 | 0 | break; |
3284 | 0 | } |
3285 | | |
3286 | 0 | case ABIArgInfo::CoerceAndExpand: { |
3287 | | // Reconstruct into a temporary. |
3288 | 0 | Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); |
3289 | 0 | ArgVals.push_back(ParamValue::forIndirect(alloca)); |
3290 | |
|
3291 | 0 | auto coercionType = ArgI.getCoerceAndExpandType(); |
3292 | 0 | alloca = alloca.withElementType(coercionType); |
3293 | |
|
3294 | 0 | unsigned argIndex = FirstIRArg; |
3295 | 0 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
3296 | 0 | llvm::Type *eltType = coercionType->getElementType(i); |
3297 | 0 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) |
3298 | 0 | continue; |
3299 | | |
3300 | 0 | auto eltAddr = Builder.CreateStructGEP(alloca, i); |
3301 | 0 | auto elt = Fn->getArg(argIndex++); |
3302 | 0 | Builder.CreateStore(elt, eltAddr); |
3303 | 0 | } |
3304 | 0 | assert(argIndex == FirstIRArg + NumIRArgs); |
3305 | 0 | break; |
3306 | 0 | } |
3307 | | |
3308 | 0 | case ABIArgInfo::Expand: { |
3309 | | // If this structure was expanded into multiple arguments then |
3310 | | // we need to create a temporary and reconstruct it from the |
3311 | | // arguments. |
3312 | 0 | Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); |
3313 | 0 | LValue LV = MakeAddrLValue(Alloca, Ty); |
3314 | 0 | ArgVals.push_back(ParamValue::forIndirect(Alloca)); |
3315 | |
|
3316 | 0 | auto FnArgIter = Fn->arg_begin() + FirstIRArg; |
3317 | 0 | ExpandTypeFromArgs(Ty, LV, FnArgIter); |
3318 | 0 | assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); |
3319 | 0 | for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { |
3320 | 0 | auto AI = Fn->getArg(FirstIRArg + i); |
3321 | 0 | AI->setName(Arg->getName() + "." + Twine(i)); |
3322 | 0 | } |
3323 | 0 | break; |
3324 | 0 | } |
3325 | | |
3326 | 0 | case ABIArgInfo::Ignore: |
3327 | 0 | assert(NumIRArgs == 0); |
3328 | | // Initialize the local variable appropriately. |
3329 | 0 | if (!hasScalarEvaluationKind(Ty)) { |
3330 | 0 | ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); |
3331 | 0 | } else { |
3332 | 0 | llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); |
3333 | 0 | ArgVals.push_back(ParamValue::forDirect(U)); |
3334 | 0 | } |
3335 | 0 | break; |
3336 | 0 | } |
3337 | 0 | } |
3338 | | |
3339 | 0 | if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { |
3340 | 0 | for (int I = Args.size() - 1; I >= 0; --I) |
3341 | 0 | EmitParmDecl(*Args[I], ArgVals[I], I + 1); |
3342 | 0 | } else { |
3343 | 0 | for (unsigned I = 0, E = Args.size(); I != E; ++I) |
3344 | 0 | EmitParmDecl(*Args[I], ArgVals[I], I + 1); |
3345 | 0 | } |
3346 | 0 | } |
3347 | | |
3348 | 0 | static void eraseUnusedBitCasts(llvm::Instruction *insn) { |
3349 | 0 | while (insn->use_empty()) { |
3350 | 0 | llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); |
3351 | 0 | if (!bitcast) return; |
3352 | | |
3353 | | // This is "safe" because we would have used a ConstantExpr otherwise. |
3354 | 0 | insn = cast<llvm::Instruction>(bitcast->getOperand(0)); |
3355 | 0 | bitcast->eraseFromParent(); |
3356 | 0 | } |
3357 | 0 | } |
3358 | | |
3359 | | /// Try to emit a fused autorelease of a return result. |
3360 | | static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, |
3361 | 0 | llvm::Value *result) { |
3362 | | // We must be immediately followed the cast. |
3363 | 0 | llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); |
3364 | 0 | if (BB->empty()) return nullptr; |
3365 | 0 | if (&BB->back() != result) return nullptr; |
3366 | | |
3367 | 0 | llvm::Type *resultType = result->getType(); |
3368 | | |
3369 | | // result is in a BasicBlock and is therefore an Instruction. |
3370 | 0 | llvm::Instruction *generator = cast<llvm::Instruction>(result); |
3371 | |
|
3372 | 0 | SmallVector<llvm::Instruction *, 4> InstsToKill; |
3373 | | |
3374 | | // Look for: |
3375 | | // %generator = bitcast %type1* %generator2 to %type2* |
3376 | 0 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { |
3377 | | // We would have emitted this as a constant if the operand weren't |
3378 | | // an Instruction. |
3379 | 0 | generator = cast<llvm::Instruction>(bitcast->getOperand(0)); |
3380 | | |
3381 | | // Require the generator to be immediately followed by the cast. |
3382 | 0 | if (generator->getNextNode() != bitcast) |
3383 | 0 | return nullptr; |
3384 | | |
3385 | 0 | InstsToKill.push_back(bitcast); |
3386 | 0 | } |
3387 | | |
3388 | | // Look for: |
3389 | | // %generator = call i8* @objc_retain(i8* %originalResult) |
3390 | | // or |
3391 | | // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) |
3392 | 0 | llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); |
3393 | 0 | if (!call) return nullptr; |
3394 | | |
3395 | 0 | bool doRetainAutorelease; |
3396 | |
|
3397 | 0 | if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { |
3398 | 0 | doRetainAutorelease = true; |
3399 | 0 | } else if (call->getCalledOperand() == |
3400 | 0 | CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { |
3401 | 0 | doRetainAutorelease = false; |
3402 | | |
3403 | | // If we emitted an assembly marker for this call (and the |
3404 | | // ARCEntrypoints field should have been set if so), go looking |
3405 | | // for that call. If we can't find it, we can't do this |
3406 | | // optimization. But it should always be the immediately previous |
3407 | | // instruction, unless we needed bitcasts around the call. |
3408 | 0 | if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { |
3409 | 0 | llvm::Instruction *prev = call->getPrevNode(); |
3410 | 0 | assert(prev); |
3411 | 0 | if (isa<llvm::BitCastInst>(prev)) { |
3412 | 0 | prev = prev->getPrevNode(); |
3413 | 0 | assert(prev); |
3414 | 0 | } |
3415 | 0 | assert(isa<llvm::CallInst>(prev)); |
3416 | 0 | assert(cast<llvm::CallInst>(prev)->getCalledOperand() == |
3417 | 0 | CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); |
3418 | 0 | InstsToKill.push_back(prev); |
3419 | 0 | } |
3420 | 0 | } else { |
3421 | 0 | return nullptr; |
3422 | 0 | } |
3423 | | |
3424 | 0 | result = call->getArgOperand(0); |
3425 | 0 | InstsToKill.push_back(call); |
3426 | | |
3427 | | // Keep killing bitcasts, for sanity. Note that we no longer care |
3428 | | // about precise ordering as long as there's exactly one use. |
3429 | 0 | while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { |
3430 | 0 | if (!bitcast->hasOneUse()) break; |
3431 | 0 | InstsToKill.push_back(bitcast); |
3432 | 0 | result = bitcast->getOperand(0); |
3433 | 0 | } |
3434 | | |
3435 | | // Delete all the unnecessary instructions, from latest to earliest. |
3436 | 0 | for (auto *I : InstsToKill) |
3437 | 0 | I->eraseFromParent(); |
3438 | | |
3439 | | // Do the fused retain/autorelease if we were asked to. |
3440 | 0 | if (doRetainAutorelease) |
3441 | 0 | result = CGF.EmitARCRetainAutoreleaseReturnValue(result); |
3442 | | |
3443 | | // Cast back to the result type. |
3444 | 0 | return CGF.Builder.CreateBitCast(result, resultType); |
3445 | 0 | } |
3446 | | |
3447 | | /// If this is a +1 of the value of an immutable 'self', remove it. |
3448 | | static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, |
3449 | 0 | llvm::Value *result) { |
3450 | | // This is only applicable to a method with an immutable 'self'. |
3451 | 0 | const ObjCMethodDecl *method = |
3452 | 0 | dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); |
3453 | 0 | if (!method) return nullptr; |
3454 | 0 | const VarDecl *self = method->getSelfDecl(); |
3455 | 0 | if (!self->getType().isConstQualified()) return nullptr; |
3456 | | |
3457 | | // Look for a retain call. Note: stripPointerCasts looks through returned arg |
3458 | | // functions, which would cause us to miss the retain. |
3459 | 0 | llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result); |
3460 | 0 | if (!retainCall || retainCall->getCalledOperand() != |
3461 | 0 | CGF.CGM.getObjCEntrypoints().objc_retain) |
3462 | 0 | return nullptr; |
3463 | | |
3464 | | // Look for an ordinary load of 'self'. |
3465 | 0 | llvm::Value *retainedValue = retainCall->getArgOperand(0); |
3466 | 0 | llvm::LoadInst *load = |
3467 | 0 | dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); |
3468 | 0 | if (!load || load->isAtomic() || load->isVolatile() || |
3469 | 0 | load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) |
3470 | 0 | return nullptr; |
3471 | | |
3472 | | // Okay! Burn it all down. This relies for correctness on the |
3473 | | // assumption that the retain is emitted as part of the return and |
3474 | | // that thereafter everything is used "linearly". |
3475 | 0 | llvm::Type *resultType = result->getType(); |
3476 | 0 | eraseUnusedBitCasts(cast<llvm::Instruction>(result)); |
3477 | 0 | assert(retainCall->use_empty()); |
3478 | 0 | retainCall->eraseFromParent(); |
3479 | 0 | eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); |
3480 | |
|
3481 | 0 | return CGF.Builder.CreateBitCast(load, resultType); |
3482 | 0 | } |
3483 | | |
3484 | | /// Emit an ARC autorelease of the result of a function. |
3485 | | /// |
3486 | | /// \return the value to actually return from the function |
3487 | | static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, |
3488 | 0 | llvm::Value *result) { |
3489 | | // If we're returning 'self', kill the initial retain. This is a |
3490 | | // heuristic attempt to "encourage correctness" in the really unfortunate |
3491 | | // case where we have a return of self during a dealloc and we desperately |
3492 | | // need to avoid the possible autorelease. |
3493 | 0 | if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) |
3494 | 0 | return self; |
3495 | | |
3496 | | // At -O0, try to emit a fused retain/autorelease. |
3497 | 0 | if (CGF.shouldUseFusedARCCalls()) |
3498 | 0 | if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) |
3499 | 0 | return fused; |
3500 | | |
3501 | 0 | return CGF.EmitARCAutoreleaseReturnValue(result); |
3502 | 0 | } |
3503 | | |
3504 | | /// Heuristically search for a dominating store to the return-value slot. |
3505 | 0 | static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { |
3506 | | // Check if a User is a store which pointerOperand is the ReturnValue. |
3507 | | // We are looking for stores to the ReturnValue, not for stores of the |
3508 | | // ReturnValue to some other location. |
3509 | 0 | auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { |
3510 | 0 | auto *SI = dyn_cast<llvm::StoreInst>(U); |
3511 | 0 | if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() || |
3512 | 0 | SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) |
3513 | 0 | return nullptr; |
3514 | | // These aren't actually possible for non-coerced returns, and we |
3515 | | // only care about non-coerced returns on this code path. |
3516 | | // All memory instructions inside __try block are volatile. |
3517 | 0 | assert(!SI->isAtomic() && |
3518 | 0 | (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry())); |
3519 | 0 | return SI; |
3520 | 0 | }; |
3521 | | // If there are multiple uses of the return-value slot, just check |
3522 | | // for something immediately preceding the IP. Sometimes this can |
3523 | | // happen with how we generate implicit-returns; it can also happen |
3524 | | // with noreturn cleanups. |
3525 | 0 | if (!CGF.ReturnValue.getPointer()->hasOneUse()) { |
3526 | 0 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); |
3527 | 0 | if (IP->empty()) return nullptr; |
3528 | | |
3529 | | // Look at directly preceding instruction, skipping bitcasts and lifetime |
3530 | | // markers. |
3531 | 0 | for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) { |
3532 | 0 | if (isa<llvm::BitCastInst>(&I)) |
3533 | 0 | continue; |
3534 | 0 | if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) |
3535 | 0 | if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end) |
3536 | 0 | continue; |
3537 | | |
3538 | 0 | return GetStoreIfValid(&I); |
3539 | 0 | } |
3540 | 0 | return nullptr; |
3541 | 0 | } |
3542 | | |
3543 | 0 | llvm::StoreInst *store = |
3544 | 0 | GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); |
3545 | 0 | if (!store) return nullptr; |
3546 | | |
3547 | | // Now do a first-and-dirty dominance check: just walk up the |
3548 | | // single-predecessors chain from the current insertion point. |
3549 | 0 | llvm::BasicBlock *StoreBB = store->getParent(); |
3550 | 0 | llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); |
3551 | 0 | llvm::SmallPtrSet<llvm::BasicBlock *, 4> SeenBBs; |
3552 | 0 | while (IP != StoreBB) { |
3553 | 0 | if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor())) |
3554 | 0 | return nullptr; |
3555 | 0 | } |
3556 | | |
3557 | | // Okay, the store's basic block dominates the insertion point; we |
3558 | | // can do our thing. |
3559 | 0 | return store; |
3560 | 0 | } |
3561 | | |
3562 | | // Helper functions for EmitCMSEClearRecord |
3563 | | |
3564 | | // Set the bits corresponding to a field having width `BitWidth` and located at |
3565 | | // offset `BitOffset` (from the least significant bit) within a storage unit of |
3566 | | // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. |
3567 | | // Use little-endian layout, i.e.`Bits[0]` is the LSB. |
3568 | | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, |
3569 | 0 | int BitWidth, int CharWidth) { |
3570 | 0 | assert(CharWidth <= 64); |
3571 | 0 | assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); |
3572 | | |
3573 | 0 | int Pos = 0; |
3574 | 0 | if (BitOffset >= CharWidth) { |
3575 | 0 | Pos += BitOffset / CharWidth; |
3576 | 0 | BitOffset = BitOffset % CharWidth; |
3577 | 0 | } |
3578 | |
|
3579 | 0 | const uint64_t Used = (uint64_t(1) << CharWidth) - 1; |
3580 | 0 | if (BitOffset + BitWidth >= CharWidth) { |
3581 | 0 | Bits[Pos++] |= (Used << BitOffset) & Used; |
3582 | 0 | BitWidth -= CharWidth - BitOffset; |
3583 | 0 | BitOffset = 0; |
3584 | 0 | } |
3585 | |
|
3586 | 0 | while (BitWidth >= CharWidth) { |
3587 | 0 | Bits[Pos++] = Used; |
3588 | 0 | BitWidth -= CharWidth; |
3589 | 0 | } |
3590 | |
|
3591 | 0 | if (BitWidth > 0) |
3592 | 0 | Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; |
3593 | 0 | } |
3594 | | |
3595 | | // Set the bits corresponding to a field having width `BitWidth` and located at |
3596 | | // offset `BitOffset` (from the least significant bit) within a storage unit of |
3597 | | // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of |
3598 | | // `Bits` corresponds to one target byte. Use target endian layout. |
3599 | | static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, |
3600 | | int StorageSize, int BitOffset, int BitWidth, |
3601 | 0 | int CharWidth, bool BigEndian) { |
3602 | |
|
3603 | 0 | SmallVector<uint64_t, 8> TmpBits(StorageSize); |
3604 | 0 | setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); |
3605 | |
|
3606 | 0 | if (BigEndian) |
3607 | 0 | std::reverse(TmpBits.begin(), TmpBits.end()); |
3608 | |
|
3609 | 0 | for (uint64_t V : TmpBits) |
3610 | 0 | Bits[StorageOffset++] |= V; |
3611 | 0 | } |
3612 | | |
3613 | | static void setUsedBits(CodeGenModule &, QualType, int, |
3614 | | SmallVectorImpl<uint64_t> &); |
3615 | | |
3616 | | // Set the bits in `Bits`, which correspond to the value representations of |
3617 | | // the actual members of the record type `RTy`. Note that this function does |
3618 | | // not handle base classes, virtual tables, etc, since they cannot happen in |
3619 | | // CMSE function arguments or return. The bit mask corresponds to the target |
3620 | | // memory layout, i.e. it's endian dependent. |
3621 | | static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, |
3622 | 0 | SmallVectorImpl<uint64_t> &Bits) { |
3623 | 0 | ASTContext &Context = CGM.getContext(); |
3624 | 0 | int CharWidth = Context.getCharWidth(); |
3625 | 0 | const RecordDecl *RD = RTy->getDecl()->getDefinition(); |
3626 | 0 | const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); |
3627 | 0 | const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); |
3628 | |
|
3629 | 0 | int Idx = 0; |
3630 | 0 | for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { |
3631 | 0 | const FieldDecl *F = *I; |
3632 | |
|
3633 | 0 | if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || |
3634 | 0 | F->getType()->isIncompleteArrayType()) |
3635 | 0 | continue; |
3636 | | |
3637 | 0 | if (F->isBitField()) { |
3638 | 0 | const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); |
3639 | 0 | setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), |
3640 | 0 | BFI.StorageSize / CharWidth, BFI.Offset, |
3641 | 0 | BFI.Size, CharWidth, |
3642 | 0 | CGM.getDataLayout().isBigEndian()); |
3643 | 0 | continue; |
3644 | 0 | } |
3645 | | |
3646 | 0 | setUsedBits(CGM, F->getType(), |
3647 | 0 | Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); |
3648 | 0 | } |
3649 | 0 | } |
3650 | | |
3651 | | // Set the bits in `Bits`, which correspond to the value representations of |
3652 | | // the elements of an array type `ATy`. |
3653 | | static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, |
3654 | 0 | int Offset, SmallVectorImpl<uint64_t> &Bits) { |
3655 | 0 | const ASTContext &Context = CGM.getContext(); |
3656 | |
|
3657 | 0 | QualType ETy = Context.getBaseElementType(ATy); |
3658 | 0 | int Size = Context.getTypeSizeInChars(ETy).getQuantity(); |
3659 | 0 | SmallVector<uint64_t, 4> TmpBits(Size); |
3660 | 0 | setUsedBits(CGM, ETy, 0, TmpBits); |
3661 | |
|
3662 | 0 | for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { |
3663 | 0 | auto Src = TmpBits.begin(); |
3664 | 0 | auto Dst = Bits.begin() + Offset + I * Size; |
3665 | 0 | for (int J = 0; J < Size; ++J) |
3666 | 0 | *Dst++ |= *Src++; |
3667 | 0 | } |
3668 | 0 | } |
3669 | | |
3670 | | // Set the bits in `Bits`, which correspond to the value representations of |
3671 | | // the type `QTy`. |
3672 | | static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, |
3673 | 0 | SmallVectorImpl<uint64_t> &Bits) { |
3674 | 0 | if (const auto *RTy = QTy->getAs<RecordType>()) |
3675 | 0 | return setUsedBits(CGM, RTy, Offset, Bits); |
3676 | | |
3677 | 0 | ASTContext &Context = CGM.getContext(); |
3678 | 0 | if (const auto *ATy = Context.getAsConstantArrayType(QTy)) |
3679 | 0 | return setUsedBits(CGM, ATy, Offset, Bits); |
3680 | | |
3681 | 0 | int Size = Context.getTypeSizeInChars(QTy).getQuantity(); |
3682 | 0 | if (Size <= 0) |
3683 | 0 | return; |
3684 | | |
3685 | 0 | std::fill_n(Bits.begin() + Offset, Size, |
3686 | 0 | (uint64_t(1) << Context.getCharWidth()) - 1); |
3687 | 0 | } |
3688 | | |
3689 | | static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, |
3690 | | int Pos, int Size, int CharWidth, |
3691 | 0 | bool BigEndian) { |
3692 | 0 | assert(Size > 0); |
3693 | 0 | uint64_t Mask = 0; |
3694 | 0 | if (BigEndian) { |
3695 | 0 | for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; |
3696 | 0 | ++P) |
3697 | 0 | Mask = (Mask << CharWidth) | *P; |
3698 | 0 | } else { |
3699 | 0 | auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; |
3700 | 0 | do |
3701 | 0 | Mask = (Mask << CharWidth) | *--P; |
3702 | 0 | while (P != End); |
3703 | 0 | } |
3704 | 0 | return Mask; |
3705 | 0 | } |
3706 | | |
3707 | | // Emit code to clear the bits in a record, which aren't a part of any user |
3708 | | // declared member, when the record is a function return. |
3709 | | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, |
3710 | | llvm::IntegerType *ITy, |
3711 | 0 | QualType QTy) { |
3712 | 0 | assert(Src->getType() == ITy); |
3713 | 0 | assert(ITy->getScalarSizeInBits() <= 64); |
3714 | | |
3715 | 0 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); |
3716 | 0 | int Size = DataLayout.getTypeStoreSize(ITy); |
3717 | 0 | SmallVector<uint64_t, 4> Bits(Size); |
3718 | 0 | setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); |
3719 | |
|
3720 | 0 | int CharWidth = CGM.getContext().getCharWidth(); |
3721 | 0 | uint64_t Mask = |
3722 | 0 | buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); |
3723 | |
|
3724 | 0 | return Builder.CreateAnd(Src, Mask, "cmse.clear"); |
3725 | 0 | } |
3726 | | |
3727 | | // Emit code to clear the bits in a record, which aren't a part of any user |
3728 | | // declared member, when the record is a function argument. |
3729 | | llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, |
3730 | | llvm::ArrayType *ATy, |
3731 | 0 | QualType QTy) { |
3732 | 0 | const llvm::DataLayout &DataLayout = CGM.getDataLayout(); |
3733 | 0 | int Size = DataLayout.getTypeStoreSize(ATy); |
3734 | 0 | SmallVector<uint64_t, 16> Bits(Size); |
3735 | 0 | setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); |
3736 | | |
3737 | | // Clear each element of the LLVM array. |
3738 | 0 | int CharWidth = CGM.getContext().getCharWidth(); |
3739 | 0 | int CharsPerElt = |
3740 | 0 | ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; |
3741 | 0 | int MaskIndex = 0; |
3742 | 0 | llvm::Value *R = llvm::PoisonValue::get(ATy); |
3743 | 0 | for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { |
3744 | 0 | uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, |
3745 | 0 | DataLayout.isBigEndian()); |
3746 | 0 | MaskIndex += CharsPerElt; |
3747 | 0 | llvm::Value *T0 = Builder.CreateExtractValue(Src, I); |
3748 | 0 | llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); |
3749 | 0 | R = Builder.CreateInsertValue(R, T1, I); |
3750 | 0 | } |
3751 | |
|
3752 | 0 | return R; |
3753 | 0 | } |
3754 | | |
3755 | | void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, |
3756 | | bool EmitRetDbgLoc, |
3757 | 0 | SourceLocation EndLoc) { |
3758 | 0 | if (FI.isNoReturn()) { |
3759 | | // Noreturn functions don't return. |
3760 | 0 | EmitUnreachable(EndLoc); |
3761 | 0 | return; |
3762 | 0 | } |
3763 | | |
3764 | 0 | if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { |
3765 | | // Naked functions don't have epilogues. |
3766 | 0 | Builder.CreateUnreachable(); |
3767 | 0 | return; |
3768 | 0 | } |
3769 | | |
3770 | | // Functions with no result always return void. |
3771 | 0 | if (!ReturnValue.isValid()) { |
3772 | 0 | Builder.CreateRetVoid(); |
3773 | 0 | return; |
3774 | 0 | } |
3775 | | |
3776 | 0 | llvm::DebugLoc RetDbgLoc; |
3777 | 0 | llvm::Value *RV = nullptr; |
3778 | 0 | QualType RetTy = FI.getReturnType(); |
3779 | 0 | const ABIArgInfo &RetAI = FI.getReturnInfo(); |
3780 | |
|
3781 | 0 | switch (RetAI.getKind()) { |
3782 | 0 | case ABIArgInfo::InAlloca: |
3783 | | // Aggregates get evaluated directly into the destination. Sometimes we |
3784 | | // need to return the sret value in a register, though. |
3785 | 0 | assert(hasAggregateEvaluationKind(RetTy)); |
3786 | 0 | if (RetAI.getInAllocaSRet()) { |
3787 | 0 | llvm::Function::arg_iterator EI = CurFn->arg_end(); |
3788 | 0 | --EI; |
3789 | 0 | llvm::Value *ArgStruct = &*EI; |
3790 | 0 | llvm::Value *SRet = Builder.CreateStructGEP( |
3791 | 0 | FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex()); |
3792 | 0 | llvm::Type *Ty = |
3793 | 0 | cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); |
3794 | 0 | RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret"); |
3795 | 0 | } |
3796 | 0 | break; |
3797 | | |
3798 | 0 | case ABIArgInfo::Indirect: { |
3799 | 0 | auto AI = CurFn->arg_begin(); |
3800 | 0 | if (RetAI.isSRetAfterThis()) |
3801 | 0 | ++AI; |
3802 | 0 | switch (getEvaluationKind(RetTy)) { |
3803 | 0 | case TEK_Complex: { |
3804 | 0 | ComplexPairTy RT = |
3805 | 0 | EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); |
3806 | 0 | EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), |
3807 | 0 | /*isInit*/ true); |
3808 | 0 | break; |
3809 | 0 | } |
3810 | 0 | case TEK_Aggregate: |
3811 | | // Do nothing; aggregates get evaluated directly into the destination. |
3812 | 0 | break; |
3813 | 0 | case TEK_Scalar: { |
3814 | 0 | LValueBaseInfo BaseInfo; |
3815 | 0 | TBAAAccessInfo TBAAInfo; |
3816 | 0 | CharUnits Alignment = |
3817 | 0 | CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo); |
3818 | 0 | Address ArgAddr(&*AI, ConvertType(RetTy), Alignment); |
3819 | 0 | LValue ArgVal = |
3820 | 0 | LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo); |
3821 | 0 | EmitStoreOfScalar( |
3822 | 0 | Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true); |
3823 | 0 | break; |
3824 | 0 | } |
3825 | 0 | } |
3826 | 0 | break; |
3827 | 0 | } |
3828 | | |
3829 | 0 | case ABIArgInfo::Extend: |
3830 | 0 | case ABIArgInfo::Direct: |
3831 | 0 | if (RetAI.getCoerceToType() == ConvertType(RetTy) && |
3832 | 0 | RetAI.getDirectOffset() == 0) { |
3833 | | // The internal return value temp always will have pointer-to-return-type |
3834 | | // type, just do a load. |
3835 | | |
3836 | | // If there is a dominating store to ReturnValue, we can elide |
3837 | | // the load, zap the store, and usually zap the alloca. |
3838 | 0 | if (llvm::StoreInst *SI = |
3839 | 0 | findDominatingStoreToReturnValue(*this)) { |
3840 | | // Reuse the debug location from the store unless there is |
3841 | | // cleanup code to be emitted between the store and return |
3842 | | // instruction. |
3843 | 0 | if (EmitRetDbgLoc && !AutoreleaseResult) |
3844 | 0 | RetDbgLoc = SI->getDebugLoc(); |
3845 | | // Get the stored value and nuke the now-dead store. |
3846 | 0 | RV = SI->getValueOperand(); |
3847 | 0 | SI->eraseFromParent(); |
3848 | | |
3849 | | // Otherwise, we have to do a simple load. |
3850 | 0 | } else { |
3851 | 0 | RV = Builder.CreateLoad(ReturnValue); |
3852 | 0 | } |
3853 | 0 | } else { |
3854 | | // If the value is offset in memory, apply the offset now. |
3855 | 0 | Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); |
3856 | |
|
3857 | 0 | RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); |
3858 | 0 | } |
3859 | | |
3860 | | // In ARC, end functions that return a retainable type with a call |
3861 | | // to objc_autoreleaseReturnValue. |
3862 | 0 | if (AutoreleaseResult) { |
3863 | 0 | #ifndef NDEBUG |
3864 | | // Type::isObjCRetainabletype has to be called on a QualType that hasn't |
3865 | | // been stripped of the typedefs, so we cannot use RetTy here. Get the |
3866 | | // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from |
3867 | | // CurCodeDecl or BlockInfo. |
3868 | 0 | QualType RT; |
3869 | |
|
3870 | 0 | if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) |
3871 | 0 | RT = FD->getReturnType(); |
3872 | 0 | else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) |
3873 | 0 | RT = MD->getReturnType(); |
3874 | 0 | else if (isa<BlockDecl>(CurCodeDecl)) |
3875 | 0 | RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); |
3876 | 0 | else |
3877 | 0 | llvm_unreachable("Unexpected function/method type"); |
3878 | |
|
3879 | 0 | assert(getLangOpts().ObjCAutoRefCount && |
3880 | 0 | !FI.isReturnsRetained() && |
3881 | 0 | RT->isObjCRetainableType()); |
3882 | 0 | #endif |
3883 | 0 | RV = emitAutoreleaseOfResult(*this, RV); |
3884 | 0 | } |
3885 | | |
3886 | 0 | break; |
3887 | | |
3888 | 0 | case ABIArgInfo::Ignore: |
3889 | 0 | break; |
3890 | | |
3891 | 0 | case ABIArgInfo::CoerceAndExpand: { |
3892 | 0 | auto coercionType = RetAI.getCoerceAndExpandType(); |
3893 | | |
3894 | | // Load all of the coerced elements out into results. |
3895 | 0 | llvm::SmallVector<llvm::Value*, 4> results; |
3896 | 0 | Address addr = ReturnValue.withElementType(coercionType); |
3897 | 0 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
3898 | 0 | auto coercedEltType = coercionType->getElementType(i); |
3899 | 0 | if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) |
3900 | 0 | continue; |
3901 | | |
3902 | 0 | auto eltAddr = Builder.CreateStructGEP(addr, i); |
3903 | 0 | auto elt = Builder.CreateLoad(eltAddr); |
3904 | 0 | results.push_back(elt); |
3905 | 0 | } |
3906 | | |
3907 | | // If we have one result, it's the single direct result type. |
3908 | 0 | if (results.size() == 1) { |
3909 | 0 | RV = results[0]; |
3910 | | |
3911 | | // Otherwise, we need to make a first-class aggregate. |
3912 | 0 | } else { |
3913 | | // Construct a return type that lacks padding elements. |
3914 | 0 | llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); |
3915 | |
|
3916 | 0 | RV = llvm::PoisonValue::get(returnType); |
3917 | 0 | for (unsigned i = 0, e = results.size(); i != e; ++i) { |
3918 | 0 | RV = Builder.CreateInsertValue(RV, results[i], i); |
3919 | 0 | } |
3920 | 0 | } |
3921 | 0 | break; |
3922 | 0 | } |
3923 | 0 | case ABIArgInfo::Expand: |
3924 | 0 | case ABIArgInfo::IndirectAliased: |
3925 | 0 | llvm_unreachable("Invalid ABI kind for return argument"); |
3926 | 0 | } |
3927 | | |
3928 | 0 | llvm::Instruction *Ret; |
3929 | 0 | if (RV) { |
3930 | 0 | if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { |
3931 | | // For certain return types, clear padding bits, as they may reveal |
3932 | | // sensitive information. |
3933 | | // Small struct/union types are passed as integers. |
3934 | 0 | auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); |
3935 | 0 | if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) |
3936 | 0 | RV = EmitCMSEClearRecord(RV, ITy, RetTy); |
3937 | 0 | } |
3938 | 0 | EmitReturnValueCheck(RV); |
3939 | 0 | Ret = Builder.CreateRet(RV); |
3940 | 0 | } else { |
3941 | 0 | Ret = Builder.CreateRetVoid(); |
3942 | 0 | } |
3943 | |
|
3944 | 0 | if (RetDbgLoc) |
3945 | 0 | Ret->setDebugLoc(std::move(RetDbgLoc)); |
3946 | 0 | } |
3947 | | |
3948 | 0 | void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { |
3949 | | // A current decl may not be available when emitting vtable thunks. |
3950 | 0 | if (!CurCodeDecl) |
3951 | 0 | return; |
3952 | | |
3953 | | // If the return block isn't reachable, neither is this check, so don't emit |
3954 | | // it. |
3955 | 0 | if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) |
3956 | 0 | return; |
3957 | | |
3958 | 0 | ReturnsNonNullAttr *RetNNAttr = nullptr; |
3959 | 0 | if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) |
3960 | 0 | RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); |
3961 | |
|
3962 | 0 | if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) |
3963 | 0 | return; |
3964 | | |
3965 | | // Prefer the returns_nonnull attribute if it's present. |
3966 | 0 | SourceLocation AttrLoc; |
3967 | 0 | SanitizerMask CheckKind; |
3968 | 0 | SanitizerHandler Handler; |
3969 | 0 | if (RetNNAttr) { |
3970 | 0 | assert(!requiresReturnValueNullabilityCheck() && |
3971 | 0 | "Cannot check nullability and the nonnull attribute"); |
3972 | 0 | AttrLoc = RetNNAttr->getLocation(); |
3973 | 0 | CheckKind = SanitizerKind::ReturnsNonnullAttribute; |
3974 | 0 | Handler = SanitizerHandler::NonnullReturn; |
3975 | 0 | } else { |
3976 | 0 | if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) |
3977 | 0 | if (auto *TSI = DD->getTypeSourceInfo()) |
3978 | 0 | if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) |
3979 | 0 | AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); |
3980 | 0 | CheckKind = SanitizerKind::NullabilityReturn; |
3981 | 0 | Handler = SanitizerHandler::NullabilityReturn; |
3982 | 0 | } |
3983 | | |
3984 | 0 | SanitizerScope SanScope(this); |
3985 | | |
3986 | | // Make sure the "return" source location is valid. If we're checking a |
3987 | | // nullability annotation, make sure the preconditions for the check are met. |
3988 | 0 | llvm::BasicBlock *Check = createBasicBlock("nullcheck"); |
3989 | 0 | llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); |
3990 | 0 | llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); |
3991 | 0 | llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); |
3992 | 0 | if (requiresReturnValueNullabilityCheck()) |
3993 | 0 | CanNullCheck = |
3994 | 0 | Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); |
3995 | 0 | Builder.CreateCondBr(CanNullCheck, Check, NoCheck); |
3996 | 0 | EmitBlock(Check); |
3997 | | |
3998 | | // Now do the null check. |
3999 | 0 | llvm::Value *Cond = Builder.CreateIsNotNull(RV); |
4000 | 0 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; |
4001 | 0 | llvm::Value *DynamicData[] = {SLocPtr}; |
4002 | 0 | EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); |
4003 | |
|
4004 | 0 | EmitBlock(NoCheck); |
4005 | |
|
4006 | 0 | #ifndef NDEBUG |
4007 | | // The return location should not be used after the check has been emitted. |
4008 | 0 | ReturnLocation = Address::invalid(); |
4009 | 0 | #endif |
4010 | 0 | } |
4011 | | |
4012 | 0 | static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { |
4013 | 0 | const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); |
4014 | 0 | return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; |
4015 | 0 | } |
4016 | | |
4017 | | static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, |
4018 | 0 | QualType Ty) { |
4019 | | // FIXME: Generate IR in one pass, rather than going back and fixing up these |
4020 | | // placeholders. |
4021 | 0 | llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); |
4022 | 0 | llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.getLLVMContext()); |
4023 | 0 | llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy); |
4024 | | |
4025 | | // FIXME: When we generate this IR in one pass, we shouldn't need |
4026 | | // this win32-specific alignment hack. |
4027 | 0 | CharUnits Align = CharUnits::fromQuantity(4); |
4028 | 0 | Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); |
4029 | |
|
4030 | 0 | return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align), |
4031 | 0 | Ty.getQualifiers(), |
4032 | 0 | AggValueSlot::IsNotDestructed, |
4033 | 0 | AggValueSlot::DoesNotNeedGCBarriers, |
4034 | 0 | AggValueSlot::IsNotAliased, |
4035 | 0 | AggValueSlot::DoesNotOverlap); |
4036 | 0 | } |
4037 | | |
4038 | | void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, |
4039 | | const VarDecl *param, |
4040 | 0 | SourceLocation loc) { |
4041 | | // StartFunction converted the ABI-lowered parameter(s) into a |
4042 | | // local alloca. We need to turn that into an r-value suitable |
4043 | | // for EmitCall. |
4044 | 0 | Address local = GetAddrOfLocalVar(param); |
4045 | |
|
4046 | 0 | QualType type = param->getType(); |
4047 | | |
4048 | | // GetAddrOfLocalVar returns a pointer-to-pointer for references, |
4049 | | // but the argument needs to be the original pointer. |
4050 | 0 | if (type->isReferenceType()) { |
4051 | 0 | args.add(RValue::get(Builder.CreateLoad(local)), type); |
4052 | | |
4053 | | // In ARC, move out of consumed arguments so that the release cleanup |
4054 | | // entered by StartFunction doesn't cause an over-release. This isn't |
4055 | | // optimal -O0 code generation, but it should get cleaned up when |
4056 | | // optimization is enabled. This also assumes that delegate calls are |
4057 | | // performed exactly once for a set of arguments, but that should be safe. |
4058 | 0 | } else if (getLangOpts().ObjCAutoRefCount && |
4059 | 0 | param->hasAttr<NSConsumedAttr>() && |
4060 | 0 | type->isObjCRetainableType()) { |
4061 | 0 | llvm::Value *ptr = Builder.CreateLoad(local); |
4062 | 0 | auto null = |
4063 | 0 | llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); |
4064 | 0 | Builder.CreateStore(null, local); |
4065 | 0 | args.add(RValue::get(ptr), type); |
4066 | | |
4067 | | // For the most part, we just need to load the alloca, except that |
4068 | | // aggregate r-values are actually pointers to temporaries. |
4069 | 0 | } else { |
4070 | 0 | args.add(convertTempToRValue(local, type, loc), type); |
4071 | 0 | } |
4072 | | |
4073 | | // Deactivate the cleanup for the callee-destructed param that was pushed. |
4074 | 0 | if (type->isRecordType() && !CurFuncIsThunk && |
4075 | 0 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && |
4076 | 0 | param->needsDestruction(getContext())) { |
4077 | 0 | EHScopeStack::stable_iterator cleanup = |
4078 | 0 | CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); |
4079 | 0 | assert(cleanup.isValid() && |
4080 | 0 | "cleanup for callee-destructed param not recorded"); |
4081 | | // This unreachable is a temporary marker which will be removed later. |
4082 | 0 | llvm::Instruction *isActive = Builder.CreateUnreachable(); |
4083 | 0 | args.addArgCleanupDeactivation(cleanup, isActive); |
4084 | 0 | } |
4085 | 0 | } |
4086 | | |
4087 | 0 | static bool isProvablyNull(llvm::Value *addr) { |
4088 | 0 | return isa<llvm::ConstantPointerNull>(addr); |
4089 | 0 | } |
4090 | | |
4091 | | /// Emit the actual writing-back of a writeback. |
4092 | | static void emitWriteback(CodeGenFunction &CGF, |
4093 | 0 | const CallArgList::Writeback &writeback) { |
4094 | 0 | const LValue &srcLV = writeback.Source; |
4095 | 0 | Address srcAddr = srcLV.getAddress(CGF); |
4096 | 0 | assert(!isProvablyNull(srcAddr.getPointer()) && |
4097 | 0 | "shouldn't have writeback for provably null argument"); |
4098 | | |
4099 | 0 | llvm::BasicBlock *contBB = nullptr; |
4100 | | |
4101 | | // If the argument wasn't provably non-null, we need to null check |
4102 | | // before doing the store. |
4103 | 0 | bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), |
4104 | 0 | CGF.CGM.getDataLayout()); |
4105 | 0 | if (!provablyNonNull) { |
4106 | 0 | llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); |
4107 | 0 | contBB = CGF.createBasicBlock("icr.done"); |
4108 | |
|
4109 | 0 | llvm::Value *isNull = |
4110 | 0 | CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); |
4111 | 0 | CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); |
4112 | 0 | CGF.EmitBlock(writebackBB); |
4113 | 0 | } |
4114 | | |
4115 | | // Load the value to writeback. |
4116 | 0 | llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); |
4117 | | |
4118 | | // Cast it back, in case we're writing an id to a Foo* or something. |
4119 | 0 | value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), |
4120 | 0 | "icr.writeback-cast"); |
4121 | | |
4122 | | // Perform the writeback. |
4123 | | |
4124 | | // If we have a "to use" value, it's something we need to emit a use |
4125 | | // of. This has to be carefully threaded in: if it's done after the |
4126 | | // release it's potentially undefined behavior (and the optimizer |
4127 | | // will ignore it), and if it happens before the retain then the |
4128 | | // optimizer could move the release there. |
4129 | 0 | if (writeback.ToUse) { |
4130 | 0 | assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); |
4131 | | |
4132 | | // Retain the new value. No need to block-copy here: the block's |
4133 | | // being passed up the stack. |
4134 | 0 | value = CGF.EmitARCRetainNonBlock(value); |
4135 | | |
4136 | | // Emit the intrinsic use here. |
4137 | 0 | CGF.EmitARCIntrinsicUse(writeback.ToUse); |
4138 | | |
4139 | | // Load the old value (primitively). |
4140 | 0 | llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); |
4141 | | |
4142 | | // Put the new value in place (primitively). |
4143 | 0 | CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); |
4144 | | |
4145 | | // Release the old value. |
4146 | 0 | CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); |
4147 | | |
4148 | | // Otherwise, we can just do a normal lvalue store. |
4149 | 0 | } else { |
4150 | 0 | CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); |
4151 | 0 | } |
4152 | | |
4153 | | // Jump to the continuation block. |
4154 | 0 | if (!provablyNonNull) |
4155 | 0 | CGF.EmitBlock(contBB); |
4156 | 0 | } |
4157 | | |
4158 | | static void emitWritebacks(CodeGenFunction &CGF, |
4159 | 0 | const CallArgList &args) { |
4160 | 0 | for (const auto &I : args.writebacks()) |
4161 | 0 | emitWriteback(CGF, I); |
4162 | 0 | } |
4163 | | |
4164 | | static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, |
4165 | 0 | const CallArgList &CallArgs) { |
4166 | 0 | ArrayRef<CallArgList::CallArgCleanup> Cleanups = |
4167 | 0 | CallArgs.getCleanupsToDeactivate(); |
4168 | | // Iterate in reverse to increase the likelihood of popping the cleanup. |
4169 | 0 | for (const auto &I : llvm::reverse(Cleanups)) { |
4170 | 0 | CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); |
4171 | 0 | I.IsActiveIP->eraseFromParent(); |
4172 | 0 | } |
4173 | 0 | } |
4174 | | |
4175 | 0 | static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { |
4176 | 0 | if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) |
4177 | 0 | if (uop->getOpcode() == UO_AddrOf) |
4178 | 0 | return uop->getSubExpr(); |
4179 | 0 | return nullptr; |
4180 | 0 | } |
4181 | | |
4182 | | /// Emit an argument that's being passed call-by-writeback. That is, |
4183 | | /// we are passing the address of an __autoreleased temporary; it |
4184 | | /// might be copy-initialized with the current value of the given |
4185 | | /// address, but it will definitely be copied out of after the call. |
4186 | | static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, |
4187 | 0 | const ObjCIndirectCopyRestoreExpr *CRE) { |
4188 | 0 | LValue srcLV; |
4189 | | |
4190 | | // Make an optimistic effort to emit the address as an l-value. |
4191 | | // This can fail if the argument expression is more complicated. |
4192 | 0 | if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { |
4193 | 0 | srcLV = CGF.EmitLValue(lvExpr); |
4194 | | |
4195 | | // Otherwise, just emit it as a scalar. |
4196 | 0 | } else { |
4197 | 0 | Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); |
4198 | |
|
4199 | 0 | QualType srcAddrType = |
4200 | 0 | CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); |
4201 | 0 | srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); |
4202 | 0 | } |
4203 | 0 | Address srcAddr = srcLV.getAddress(CGF); |
4204 | | |
4205 | | // The dest and src types don't necessarily match in LLVM terms |
4206 | | // because of the crazy ObjC compatibility rules. |
4207 | |
|
4208 | 0 | llvm::PointerType *destType = |
4209 | 0 | cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); |
4210 | 0 | llvm::Type *destElemType = |
4211 | 0 | CGF.ConvertTypeForMem(CRE->getType()->getPointeeType()); |
4212 | | |
4213 | | // If the address is a constant null, just pass the appropriate null. |
4214 | 0 | if (isProvablyNull(srcAddr.getPointer())) { |
4215 | 0 | args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), |
4216 | 0 | CRE->getType()); |
4217 | 0 | return; |
4218 | 0 | } |
4219 | | |
4220 | | // Create the temporary. |
4221 | 0 | Address temp = |
4222 | 0 | CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp"); |
4223 | | // Loading an l-value can introduce a cleanup if the l-value is __weak, |
4224 | | // and that cleanup will be conditional if we can't prove that the l-value |
4225 | | // isn't null, so we need to register a dominating point so that the cleanups |
4226 | | // system will make valid IR. |
4227 | 0 | CodeGenFunction::ConditionalEvaluation condEval(CGF); |
4228 | | |
4229 | | // Zero-initialize it if we're not doing a copy-initialization. |
4230 | 0 | bool shouldCopy = CRE->shouldCopy(); |
4231 | 0 | if (!shouldCopy) { |
4232 | 0 | llvm::Value *null = |
4233 | 0 | llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType)); |
4234 | 0 | CGF.Builder.CreateStore(null, temp); |
4235 | 0 | } |
4236 | |
|
4237 | 0 | llvm::BasicBlock *contBB = nullptr; |
4238 | 0 | llvm::BasicBlock *originBB = nullptr; |
4239 | | |
4240 | | // If the address is *not* known to be non-null, we need to switch. |
4241 | 0 | llvm::Value *finalArgument; |
4242 | |
|
4243 | 0 | bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), |
4244 | 0 | CGF.CGM.getDataLayout()); |
4245 | 0 | if (provablyNonNull) { |
4246 | 0 | finalArgument = temp.getPointer(); |
4247 | 0 | } else { |
4248 | 0 | llvm::Value *isNull = |
4249 | 0 | CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); |
4250 | |
|
4251 | 0 | finalArgument = CGF.Builder.CreateSelect(isNull, |
4252 | 0 | llvm::ConstantPointerNull::get(destType), |
4253 | 0 | temp.getPointer(), "icr.argument"); |
4254 | | |
4255 | | // If we need to copy, then the load has to be conditional, which |
4256 | | // means we need control flow. |
4257 | 0 | if (shouldCopy) { |
4258 | 0 | originBB = CGF.Builder.GetInsertBlock(); |
4259 | 0 | contBB = CGF.createBasicBlock("icr.cont"); |
4260 | 0 | llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); |
4261 | 0 | CGF.Builder.CreateCondBr(isNull, contBB, copyBB); |
4262 | 0 | CGF.EmitBlock(copyBB); |
4263 | 0 | condEval.begin(CGF); |
4264 | 0 | } |
4265 | 0 | } |
4266 | |
|
4267 | 0 | llvm::Value *valueToUse = nullptr; |
4268 | | |
4269 | | // Perform a copy if necessary. |
4270 | 0 | if (shouldCopy) { |
4271 | 0 | RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); |
4272 | 0 | assert(srcRV.isScalar()); |
4273 | | |
4274 | 0 | llvm::Value *src = srcRV.getScalarVal(); |
4275 | 0 | src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast"); |
4276 | | |
4277 | | // Use an ordinary store, not a store-to-lvalue. |
4278 | 0 | CGF.Builder.CreateStore(src, temp); |
4279 | | |
4280 | | // If optimization is enabled, and the value was held in a |
4281 | | // __strong variable, we need to tell the optimizer that this |
4282 | | // value has to stay alive until we're doing the store back. |
4283 | | // This is because the temporary is effectively unretained, |
4284 | | // and so otherwise we can violate the high-level semantics. |
4285 | 0 | if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && |
4286 | 0 | srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { |
4287 | 0 | valueToUse = src; |
4288 | 0 | } |
4289 | 0 | } |
4290 | | |
4291 | | // Finish the control flow if we needed it. |
4292 | 0 | if (shouldCopy && !provablyNonNull) { |
4293 | 0 | llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); |
4294 | 0 | CGF.EmitBlock(contBB); |
4295 | | |
4296 | | // Make a phi for the value to intrinsically use. |
4297 | 0 | if (valueToUse) { |
4298 | 0 | llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, |
4299 | 0 | "icr.to-use"); |
4300 | 0 | phiToUse->addIncoming(valueToUse, copyBB); |
4301 | 0 | phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), |
4302 | 0 | originBB); |
4303 | 0 | valueToUse = phiToUse; |
4304 | 0 | } |
4305 | |
|
4306 | 0 | condEval.end(CGF); |
4307 | 0 | } |
4308 | |
|
4309 | 0 | args.addWriteback(srcLV, temp, valueToUse); |
4310 | 0 | args.add(RValue::get(finalArgument), CRE->getType()); |
4311 | 0 | } |
4312 | | |
4313 | 0 | void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { |
4314 | 0 | assert(!StackBase); |
4315 | | |
4316 | | // Save the stack. |
4317 | 0 | StackBase = CGF.Builder.CreateStackSave("inalloca.save"); |
4318 | 0 | } |
4319 | | |
4320 | 0 | void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { |
4321 | 0 | if (StackBase) { |
4322 | | // Restore the stack after the call. |
4323 | 0 | CGF.Builder.CreateStackRestore(StackBase); |
4324 | 0 | } |
4325 | 0 | } |
4326 | | |
4327 | | void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, |
4328 | | SourceLocation ArgLoc, |
4329 | | AbstractCallee AC, |
4330 | 0 | unsigned ParmNum) { |
4331 | 0 | if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || |
4332 | 0 | SanOpts.has(SanitizerKind::NullabilityArg))) |
4333 | 0 | return; |
4334 | | |
4335 | | // The param decl may be missing in a variadic function. |
4336 | 0 | auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; |
4337 | 0 | unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; |
4338 | | |
4339 | | // Prefer the nonnull attribute if it's present. |
4340 | 0 | const NonNullAttr *NNAttr = nullptr; |
4341 | 0 | if (SanOpts.has(SanitizerKind::NonnullAttribute)) |
4342 | 0 | NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); |
4343 | |
|
4344 | 0 | bool CanCheckNullability = false; |
4345 | 0 | if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { |
4346 | 0 | auto Nullability = PVD->getType()->getNullability(); |
4347 | 0 | CanCheckNullability = Nullability && |
4348 | 0 | *Nullability == NullabilityKind::NonNull && |
4349 | 0 | PVD->getTypeSourceInfo(); |
4350 | 0 | } |
4351 | |
|
4352 | 0 | if (!NNAttr && !CanCheckNullability) |
4353 | 0 | return; |
4354 | | |
4355 | 0 | SourceLocation AttrLoc; |
4356 | 0 | SanitizerMask CheckKind; |
4357 | 0 | SanitizerHandler Handler; |
4358 | 0 | if (NNAttr) { |
4359 | 0 | AttrLoc = NNAttr->getLocation(); |
4360 | 0 | CheckKind = SanitizerKind::NonnullAttribute; |
4361 | 0 | Handler = SanitizerHandler::NonnullArg; |
4362 | 0 | } else { |
4363 | 0 | AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); |
4364 | 0 | CheckKind = SanitizerKind::NullabilityArg; |
4365 | 0 | Handler = SanitizerHandler::NullabilityArg; |
4366 | 0 | } |
4367 | |
|
4368 | 0 | SanitizerScope SanScope(this); |
4369 | 0 | llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); |
4370 | 0 | llvm::Constant *StaticData[] = { |
4371 | 0 | EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), |
4372 | 0 | llvm::ConstantInt::get(Int32Ty, ArgNo + 1), |
4373 | 0 | }; |
4374 | 0 | EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt); |
4375 | 0 | } |
4376 | | |
4377 | | // Check if the call is going to use the inalloca convention. This needs to |
4378 | | // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged |
4379 | | // later, so we can't check it directly. |
4380 | | static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, |
4381 | 0 | ArrayRef<QualType> ArgTypes) { |
4382 | | // The Swift calling conventions don't go through the target-specific |
4383 | | // argument classification, they never use inalloca. |
4384 | | // TODO: Consider limiting inalloca use to only calling conventions supported |
4385 | | // by MSVC. |
4386 | 0 | if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) |
4387 | 0 | return false; |
4388 | 0 | if (!CGM.getTarget().getCXXABI().isMicrosoft()) |
4389 | 0 | return false; |
4390 | 0 | return llvm::any_of(ArgTypes, [&](QualType Ty) { |
4391 | 0 | return isInAllocaArgument(CGM.getCXXABI(), Ty); |
4392 | 0 | }); |
4393 | 0 | } |
4394 | | |
4395 | | #ifndef NDEBUG |
4396 | | // Determine whether the given argument is an Objective-C method |
4397 | | // that may have type parameters in its signature. |
4398 | 0 | static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { |
4399 | 0 | const DeclContext *dc = method->getDeclContext(); |
4400 | 0 | if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { |
4401 | 0 | return classDecl->getTypeParamListAsWritten(); |
4402 | 0 | } |
4403 | | |
4404 | 0 | if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { |
4405 | 0 | return catDecl->getTypeParamList(); |
4406 | 0 | } |
4407 | | |
4408 | 0 | return false; |
4409 | 0 | } |
4410 | | #endif |
4411 | | |
4412 | | /// EmitCallArgs - Emit call arguments for a function. |
4413 | | void CodeGenFunction::EmitCallArgs( |
4414 | | CallArgList &Args, PrototypeWrapper Prototype, |
4415 | | llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, |
4416 | 0 | AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { |
4417 | 0 | SmallVector<QualType, 16> ArgTypes; |
4418 | |
|
4419 | 0 | assert((ParamsToSkip == 0 || Prototype.P) && |
4420 | 0 | "Can't skip parameters if type info is not provided"); |
4421 | | |
4422 | | // This variable only captures *explicitly* written conventions, not those |
4423 | | // applied by default via command line flags or target defaults, such as |
4424 | | // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would |
4425 | | // require knowing if this is a C++ instance method or being able to see |
4426 | | // unprototyped FunctionTypes. |
4427 | 0 | CallingConv ExplicitCC = CC_C; |
4428 | | |
4429 | | // First, if a prototype was provided, use those argument types. |
4430 | 0 | bool IsVariadic = false; |
4431 | 0 | if (Prototype.P) { |
4432 | 0 | const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); |
4433 | 0 | if (MD) { |
4434 | 0 | IsVariadic = MD->isVariadic(); |
4435 | 0 | ExplicitCC = getCallingConventionForDecl( |
4436 | 0 | MD, CGM.getTarget().getTriple().isOSWindows()); |
4437 | 0 | ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, |
4438 | 0 | MD->param_type_end()); |
4439 | 0 | } else { |
4440 | 0 | const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); |
4441 | 0 | IsVariadic = FPT->isVariadic(); |
4442 | 0 | ExplicitCC = FPT->getExtInfo().getCC(); |
4443 | 0 | ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, |
4444 | 0 | FPT->param_type_end()); |
4445 | 0 | } |
4446 | |
|
4447 | 0 | #ifndef NDEBUG |
4448 | | // Check that the prototyped types match the argument expression types. |
4449 | 0 | bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); |
4450 | 0 | CallExpr::const_arg_iterator Arg = ArgRange.begin(); |
4451 | 0 | for (QualType Ty : ArgTypes) { |
4452 | 0 | assert(Arg != ArgRange.end() && "Running over edge of argument list!"); |
4453 | 0 | assert( |
4454 | 0 | (isGenericMethod || Ty->isVariablyModifiedType() || |
4455 | 0 | Ty.getNonReferenceType()->isObjCRetainableType() || |
4456 | 0 | getContext() |
4457 | 0 | .getCanonicalType(Ty.getNonReferenceType()) |
4458 | 0 | .getTypePtr() == |
4459 | 0 | getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && |
4460 | 0 | "type mismatch in call argument!"); |
4461 | 0 | ++Arg; |
4462 | 0 | } |
4463 | | |
4464 | | // Either we've emitted all the call args, or we have a call to variadic |
4465 | | // function. |
4466 | 0 | assert((Arg == ArgRange.end() || IsVariadic) && |
4467 | 0 | "Extra arguments in non-variadic function!"); |
4468 | 0 | #endif |
4469 | 0 | } |
4470 | | |
4471 | | // If we still have any arguments, emit them using the type of the argument. |
4472 | 0 | for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) |
4473 | 0 | ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); |
4474 | 0 | assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); |
4475 | | |
4476 | | // We must evaluate arguments from right to left in the MS C++ ABI, |
4477 | | // because arguments are destroyed left to right in the callee. As a special |
4478 | | // case, there are certain language constructs that require left-to-right |
4479 | | // evaluation, and in those cases we consider the evaluation order requirement |
4480 | | // to trump the "destruction order is reverse construction order" guarantee. |
4481 | 0 | bool LeftToRight = |
4482 | 0 | CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() |
4483 | 0 | ? Order == EvaluationOrder::ForceLeftToRight |
4484 | 0 | : Order != EvaluationOrder::ForceRightToLeft; |
4485 | |
|
4486 | 0 | auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, |
4487 | 0 | RValue EmittedArg) { |
4488 | 0 | if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) |
4489 | 0 | return; |
4490 | 0 | auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); |
4491 | 0 | if (PS == nullptr) |
4492 | 0 | return; |
4493 | | |
4494 | 0 | const auto &Context = getContext(); |
4495 | 0 | auto SizeTy = Context.getSizeType(); |
4496 | 0 | auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); |
4497 | 0 | assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); |
4498 | 0 | llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, |
4499 | 0 | EmittedArg.getScalarVal(), |
4500 | 0 | PS->isDynamic()); |
4501 | 0 | Args.add(RValue::get(V), SizeTy); |
4502 | | // If we're emitting args in reverse, be sure to do so with |
4503 | | // pass_object_size, as well. |
4504 | 0 | if (!LeftToRight) |
4505 | 0 | std::swap(Args.back(), *(&Args.back() - 1)); |
4506 | 0 | }; |
4507 | | |
4508 | | // Insert a stack save if we're going to need any inalloca args. |
4509 | 0 | if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { |
4510 | 0 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && |
4511 | 0 | "inalloca only supported on x86"); |
4512 | 0 | Args.allocateArgumentMemory(*this); |
4513 | 0 | } |
4514 | | |
4515 | | // Evaluate each argument in the appropriate order. |
4516 | 0 | size_t CallArgsStart = Args.size(); |
4517 | 0 | for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { |
4518 | 0 | unsigned Idx = LeftToRight ? I : E - I - 1; |
4519 | 0 | CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; |
4520 | 0 | unsigned InitialArgSize = Args.size(); |
4521 | | // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of |
4522 | | // the argument and parameter match or the objc method is parameterized. |
4523 | 0 | assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || |
4524 | 0 | getContext().hasSameUnqualifiedType((*Arg)->getType(), |
4525 | 0 | ArgTypes[Idx]) || |
4526 | 0 | (isa<ObjCMethodDecl>(AC.getDecl()) && |
4527 | 0 | isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && |
4528 | 0 | "Argument and parameter types don't match"); |
4529 | 0 | EmitCallArg(Args, *Arg, ArgTypes[Idx]); |
4530 | | // In particular, we depend on it being the last arg in Args, and the |
4531 | | // objectsize bits depend on there only being one arg if !LeftToRight. |
4532 | 0 | assert(InitialArgSize + 1 == Args.size() && |
4533 | 0 | "The code below depends on only adding one arg per EmitCallArg"); |
4534 | 0 | (void)InitialArgSize; |
4535 | | // Since pointer argument are never emitted as LValue, it is safe to emit |
4536 | | // non-null argument check for r-value only. |
4537 | 0 | if (!Args.back().hasLValue()) { |
4538 | 0 | RValue RVArg = Args.back().getKnownRValue(); |
4539 | 0 | EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, |
4540 | 0 | ParamsToSkip + Idx); |
4541 | | // @llvm.objectsize should never have side-effects and shouldn't need |
4542 | | // destruction/cleanups, so we can safely "emit" it after its arg, |
4543 | | // regardless of right-to-leftness |
4544 | 0 | MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); |
4545 | 0 | } |
4546 | 0 | } |
4547 | |
|
4548 | 0 | if (!LeftToRight) { |
4549 | | // Un-reverse the arguments we just evaluated so they match up with the LLVM |
4550 | | // IR function. |
4551 | 0 | std::reverse(Args.begin() + CallArgsStart, Args.end()); |
4552 | 0 | } |
4553 | 0 | } |
4554 | | |
4555 | | namespace { |
4556 | | |
4557 | | struct DestroyUnpassedArg final : EHScopeStack::Cleanup { |
4558 | | DestroyUnpassedArg(Address Addr, QualType Ty) |
4559 | 0 | : Addr(Addr), Ty(Ty) {} |
4560 | | |
4561 | | Address Addr; |
4562 | | QualType Ty; |
4563 | | |
4564 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
4565 | 0 | QualType::DestructionKind DtorKind = Ty.isDestructedType(); |
4566 | 0 | if (DtorKind == QualType::DK_cxx_destructor) { |
4567 | 0 | const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); |
4568 | 0 | assert(!Dtor->isTrivial()); |
4569 | 0 | CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, |
4570 | 0 | /*Delegating=*/false, Addr, Ty); |
4571 | 0 | } else { |
4572 | 0 | CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); |
4573 | 0 | } |
4574 | 0 | } |
4575 | | }; |
4576 | | |
4577 | | struct DisableDebugLocationUpdates { |
4578 | | CodeGenFunction &CGF; |
4579 | | bool disabledDebugInfo; |
4580 | 0 | DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { |
4581 | 0 | if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) |
4582 | 0 | CGF.disableDebugInfo(); |
4583 | 0 | } |
4584 | 0 | ~DisableDebugLocationUpdates() { |
4585 | 0 | if (disabledDebugInfo) |
4586 | 0 | CGF.enableDebugInfo(); |
4587 | 0 | } |
4588 | | }; |
4589 | | |
4590 | | } // end anonymous namespace |
4591 | | |
4592 | 0 | RValue CallArg::getRValue(CodeGenFunction &CGF) const { |
4593 | 0 | if (!HasLV) |
4594 | 0 | return RV; |
4595 | 0 | LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); |
4596 | 0 | CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, |
4597 | 0 | LV.isVolatile()); |
4598 | 0 | IsUsed = true; |
4599 | 0 | return RValue::getAggregate(Copy.getAddress(CGF)); |
4600 | 0 | } |
4601 | | |
4602 | 0 | void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { |
4603 | 0 | LValue Dst = CGF.MakeAddrLValue(Addr, Ty); |
4604 | 0 | if (!HasLV && RV.isScalar()) |
4605 | 0 | CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); |
4606 | 0 | else if (!HasLV && RV.isComplex()) |
4607 | 0 | CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); |
4608 | 0 | else { |
4609 | 0 | auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); |
4610 | 0 | LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); |
4611 | | // We assume that call args are never copied into subobjects. |
4612 | 0 | CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, |
4613 | 0 | HasLV ? LV.isVolatileQualified() |
4614 | 0 | : RV.isVolatileQualified()); |
4615 | 0 | } |
4616 | 0 | IsUsed = true; |
4617 | 0 | } |
4618 | | |
4619 | | void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, |
4620 | 0 | QualType type) { |
4621 | 0 | DisableDebugLocationUpdates Dis(*this, E); |
4622 | 0 | if (const ObjCIndirectCopyRestoreExpr *CRE |
4623 | 0 | = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { |
4624 | 0 | assert(getLangOpts().ObjCAutoRefCount); |
4625 | 0 | return emitWritebackArg(*this, args, CRE); |
4626 | 0 | } |
4627 | | |
4628 | 0 | assert(type->isReferenceType() == E->isGLValue() && |
4629 | 0 | "reference binding to unmaterialized r-value!"); |
4630 | | |
4631 | 0 | if (E->isGLValue()) { |
4632 | 0 | assert(E->getObjectKind() == OK_Ordinary); |
4633 | 0 | return args.add(EmitReferenceBindingToExpr(E), type); |
4634 | 0 | } |
4635 | | |
4636 | 0 | bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); |
4637 | | |
4638 | | // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. |
4639 | | // However, we still have to push an EH-only cleanup in case we unwind before |
4640 | | // we make it to the call. |
4641 | 0 | if (type->isRecordType() && |
4642 | 0 | type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { |
4643 | | // If we're using inalloca, use the argument memory. Otherwise, use a |
4644 | | // temporary. |
4645 | 0 | AggValueSlot Slot = args.isUsingInAlloca() |
4646 | 0 | ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp"); |
4647 | |
|
4648 | 0 | bool DestroyedInCallee = true, NeedsEHCleanup = true; |
4649 | 0 | if (const auto *RD = type->getAsCXXRecordDecl()) |
4650 | 0 | DestroyedInCallee = RD->hasNonTrivialDestructor(); |
4651 | 0 | else |
4652 | 0 | NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); |
4653 | |
|
4654 | 0 | if (DestroyedInCallee) |
4655 | 0 | Slot.setExternallyDestructed(); |
4656 | |
|
4657 | 0 | EmitAggExpr(E, Slot); |
4658 | 0 | RValue RV = Slot.asRValue(); |
4659 | 0 | args.add(RV, type); |
4660 | |
|
4661 | 0 | if (DestroyedInCallee && NeedsEHCleanup) { |
4662 | | // Create a no-op GEP between the placeholder and the cleanup so we can |
4663 | | // RAUW it successfully. It also serves as a marker of the first |
4664 | | // instruction where the cleanup is active. |
4665 | 0 | pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), |
4666 | 0 | type); |
4667 | | // This unreachable is a temporary marker which will be removed later. |
4668 | 0 | llvm::Instruction *IsActive = Builder.CreateUnreachable(); |
4669 | 0 | args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive); |
4670 | 0 | } |
4671 | 0 | return; |
4672 | 0 | } |
4673 | | |
4674 | 0 | if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && |
4675 | 0 | cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { |
4676 | 0 | LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); |
4677 | 0 | assert(L.isSimple()); |
4678 | 0 | args.addUncopiedAggregate(L, type); |
4679 | 0 | return; |
4680 | 0 | } |
4681 | | |
4682 | 0 | args.add(EmitAnyExprToTemp(E), type); |
4683 | 0 | } |
4684 | | |
4685 | 0 | QualType CodeGenFunction::getVarArgType(const Expr *Arg) { |
4686 | | // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC |
4687 | | // implicitly widens null pointer constants that are arguments to varargs |
4688 | | // functions to pointer-sized ints. |
4689 | 0 | if (!getTarget().getTriple().isOSWindows()) |
4690 | 0 | return Arg->getType(); |
4691 | | |
4692 | 0 | if (Arg->getType()->isIntegerType() && |
4693 | 0 | getContext().getTypeSize(Arg->getType()) < |
4694 | 0 | getContext().getTargetInfo().getPointerWidth(LangAS::Default) && |
4695 | 0 | Arg->isNullPointerConstant(getContext(), |
4696 | 0 | Expr::NPC_ValueDependentIsNotNull)) { |
4697 | 0 | return getContext().getIntPtrType(); |
4698 | 0 | } |
4699 | | |
4700 | 0 | return Arg->getType(); |
4701 | 0 | } |
4702 | | |
4703 | | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
4704 | | // optimizer it can aggressively ignore unwind edges. |
4705 | | void |
4706 | 0 | CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { |
4707 | 0 | if (CGM.getCodeGenOpts().OptimizationLevel != 0 && |
4708 | 0 | !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) |
4709 | 0 | Inst->setMetadata("clang.arc.no_objc_arc_exceptions", |
4710 | 0 | CGM.getNoObjCARCExceptionsMetadata()); |
4711 | 0 | } |
4712 | | |
4713 | | /// Emits a call to the given no-arguments nounwind runtime function. |
4714 | | llvm::CallInst * |
4715 | | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4716 | 0 | const llvm::Twine &name) { |
4717 | 0 | return EmitNounwindRuntimeCall(callee, std::nullopt, name); |
4718 | 0 | } |
4719 | | |
4720 | | /// Emits a call to the given nounwind runtime function. |
4721 | | llvm::CallInst * |
4722 | | CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, |
4723 | | ArrayRef<llvm::Value *> args, |
4724 | 0 | const llvm::Twine &name) { |
4725 | 0 | llvm::CallInst *call = EmitRuntimeCall(callee, args, name); |
4726 | 0 | call->setDoesNotThrow(); |
4727 | 0 | return call; |
4728 | 0 | } |
4729 | | |
4730 | | /// Emits a simple call (never an invoke) to the given no-arguments |
4731 | | /// runtime function. |
4732 | | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, |
4733 | 0 | const llvm::Twine &name) { |
4734 | 0 | return EmitRuntimeCall(callee, std::nullopt, name); |
4735 | 0 | } |
4736 | | |
4737 | | // Calls which may throw must have operand bundles indicating which funclet |
4738 | | // they are nested within. |
4739 | | SmallVector<llvm::OperandBundleDef, 1> |
4740 | 0 | CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { |
4741 | | // There is no need for a funclet operand bundle if we aren't inside a |
4742 | | // funclet. |
4743 | 0 | if (!CurrentFuncletPad) |
4744 | 0 | return (SmallVector<llvm::OperandBundleDef, 1>()); |
4745 | | |
4746 | | // Skip intrinsics which cannot throw (as long as they don't lower into |
4747 | | // regular function calls in the course of IR transformations). |
4748 | 0 | if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) { |
4749 | 0 | if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) { |
4750 | 0 | auto IID = CalleeFn->getIntrinsicID(); |
4751 | 0 | if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID)) |
4752 | 0 | return (SmallVector<llvm::OperandBundleDef, 1>()); |
4753 | 0 | } |
4754 | 0 | } |
4755 | | |
4756 | 0 | SmallVector<llvm::OperandBundleDef, 1> BundleList; |
4757 | 0 | BundleList.emplace_back("funclet", CurrentFuncletPad); |
4758 | 0 | return BundleList; |
4759 | 0 | } |
4760 | | |
4761 | | /// Emits a simple call (never an invoke) to the given runtime function. |
4762 | | llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, |
4763 | | ArrayRef<llvm::Value *> args, |
4764 | 0 | const llvm::Twine &name) { |
4765 | 0 | llvm::CallInst *call = Builder.CreateCall( |
4766 | 0 | callee, args, getBundlesForFunclet(callee.getCallee()), name); |
4767 | 0 | call->setCallingConv(getRuntimeCC()); |
4768 | 0 | return call; |
4769 | 0 | } |
4770 | | |
4771 | | /// Emits a call or invoke to the given noreturn runtime function. |
4772 | | void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( |
4773 | 0 | llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { |
4774 | 0 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
4775 | 0 | getBundlesForFunclet(callee.getCallee()); |
4776 | |
|
4777 | 0 | if (getInvokeDest()) { |
4778 | 0 | llvm::InvokeInst *invoke = |
4779 | 0 | Builder.CreateInvoke(callee, |
4780 | 0 | getUnreachableBlock(), |
4781 | 0 | getInvokeDest(), |
4782 | 0 | args, |
4783 | 0 | BundleList); |
4784 | 0 | invoke->setDoesNotReturn(); |
4785 | 0 | invoke->setCallingConv(getRuntimeCC()); |
4786 | 0 | } else { |
4787 | 0 | llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); |
4788 | 0 | call->setDoesNotReturn(); |
4789 | 0 | call->setCallingConv(getRuntimeCC()); |
4790 | 0 | Builder.CreateUnreachable(); |
4791 | 0 | } |
4792 | 0 | } |
4793 | | |
4794 | | /// Emits a call or invoke instruction to the given nullary runtime function. |
4795 | | llvm::CallBase * |
4796 | | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, |
4797 | 0 | const Twine &name) { |
4798 | 0 | return EmitRuntimeCallOrInvoke(callee, std::nullopt, name); |
4799 | 0 | } |
4800 | | |
4801 | | /// Emits a call or invoke instruction to the given runtime function. |
4802 | | llvm::CallBase * |
4803 | | CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, |
4804 | | ArrayRef<llvm::Value *> args, |
4805 | 0 | const Twine &name) { |
4806 | 0 | llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); |
4807 | 0 | call->setCallingConv(getRuntimeCC()); |
4808 | 0 | return call; |
4809 | 0 | } |
4810 | | |
4811 | | /// Emits a call or invoke instruction to the given function, depending |
4812 | | /// on the current state of the EH stack. |
4813 | | llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, |
4814 | | ArrayRef<llvm::Value *> Args, |
4815 | 0 | const Twine &Name) { |
4816 | 0 | llvm::BasicBlock *InvokeDest = getInvokeDest(); |
4817 | 0 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
4818 | 0 | getBundlesForFunclet(Callee.getCallee()); |
4819 | |
|
4820 | 0 | llvm::CallBase *Inst; |
4821 | 0 | if (!InvokeDest) |
4822 | 0 | Inst = Builder.CreateCall(Callee, Args, BundleList, Name); |
4823 | 0 | else { |
4824 | 0 | llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); |
4825 | 0 | Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, |
4826 | 0 | Name); |
4827 | 0 | EmitBlock(ContBB); |
4828 | 0 | } |
4829 | | |
4830 | | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
4831 | | // optimizer it can aggressively ignore unwind edges. |
4832 | 0 | if (CGM.getLangOpts().ObjCAutoRefCount) |
4833 | 0 | AddObjCARCExceptionMetadata(Inst); |
4834 | |
|
4835 | 0 | return Inst; |
4836 | 0 | } |
4837 | | |
4838 | | void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, |
4839 | 0 | llvm::Value *New) { |
4840 | 0 | DeferredReplacements.push_back( |
4841 | 0 | std::make_pair(llvm::WeakTrackingVH(Old), New)); |
4842 | 0 | } |
4843 | | |
4844 | | namespace { |
4845 | | |
4846 | | /// Specify given \p NewAlign as the alignment of return value attribute. If |
4847 | | /// such attribute already exists, re-set it to the maximal one of two options. |
4848 | | [[nodiscard]] llvm::AttributeList |
4849 | | maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, |
4850 | | const llvm::AttributeList &Attrs, |
4851 | 0 | llvm::Align NewAlign) { |
4852 | 0 | llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); |
4853 | 0 | if (CurAlign >= NewAlign) |
4854 | 0 | return Attrs; |
4855 | 0 | llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); |
4856 | 0 | return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment) |
4857 | 0 | .addRetAttribute(Ctx, AlignAttr); |
4858 | 0 | } |
4859 | | |
4860 | | template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { |
4861 | | protected: |
4862 | | CodeGenFunction &CGF; |
4863 | | |
4864 | | /// We do nothing if this is, or becomes, nullptr. |
4865 | | const AlignedAttrTy *AA = nullptr; |
4866 | | |
4867 | | llvm::Value *Alignment = nullptr; // May or may not be a constant. |
4868 | | llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. |
4869 | | |
4870 | | AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) |
4871 | 0 | : CGF(CGF_) { |
4872 | 0 | if (!FuncDecl) |
4873 | 0 | return; |
4874 | 0 | AA = FuncDecl->getAttr<AlignedAttrTy>(); |
4875 | 0 | } Unexecuted instantiation: CGCall.cpp:(anonymous namespace)::AbstractAssumeAlignedAttrEmitter<clang::AssumeAlignedAttr>::AbstractAssumeAlignedAttrEmitter(clang::CodeGen::CodeGenFunction&, clang::Decl const*) Unexecuted instantiation: CGCall.cpp:(anonymous namespace)::AbstractAssumeAlignedAttrEmitter<clang::AllocAlignAttr>::AbstractAssumeAlignedAttrEmitter(clang::CodeGen::CodeGenFunction&, clang::Decl const*) |
4876 | | |
4877 | | public: |
4878 | | /// If we can, materialize the alignment as an attribute on return value. |
4879 | | [[nodiscard]] llvm::AttributeList |
4880 | 0 | TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { |
4881 | 0 | if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) |
4882 | 0 | return Attrs; |
4883 | 0 | const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); |
4884 | 0 | if (!AlignmentCI) |
4885 | 0 | return Attrs; |
4886 | | // We may legitimately have non-power-of-2 alignment here. |
4887 | | // If so, this is UB land, emit it via `@llvm.assume` instead. |
4888 | 0 | if (!AlignmentCI->getValue().isPowerOf2()) |
4889 | 0 | return Attrs; |
4890 | 0 | llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( |
4891 | 0 | CGF.getLLVMContext(), Attrs, |
4892 | 0 | llvm::Align( |
4893 | 0 | AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); |
4894 | 0 | AA = nullptr; // We're done. Disallow doing anything else. |
4895 | 0 | return NewAttrs; |
4896 | 0 | } Unexecuted instantiation: CGCall.cpp:(anonymous namespace)::AbstractAssumeAlignedAttrEmitter<clang::AssumeAlignedAttr>::TryEmitAsCallSiteAttribute(llvm::AttributeList const&) Unexecuted instantiation: CGCall.cpp:(anonymous namespace)::AbstractAssumeAlignedAttrEmitter<clang::AllocAlignAttr>::TryEmitAsCallSiteAttribute(llvm::AttributeList const&) |
4897 | | |
4898 | | /// Emit alignment assumption. |
4899 | | /// This is a general fallback that we take if either there is an offset, |
4900 | | /// or the alignment is variable or we are sanitizing for alignment. |
4901 | 0 | void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { |
4902 | 0 | if (!AA) |
4903 | 0 | return; |
4904 | 0 | CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, |
4905 | 0 | AA->getLocation(), Alignment, OffsetCI); |
4906 | 0 | AA = nullptr; // We're done. Disallow doing anything else. |
4907 | 0 | } Unexecuted instantiation: CGCall.cpp:(anonymous namespace)::AbstractAssumeAlignedAttrEmitter<clang::AssumeAlignedAttr>::EmitAsAnAssumption(clang::SourceLocation, clang::QualType, clang::CodeGen::RValue&) Unexecuted instantiation: CGCall.cpp:(anonymous namespace)::AbstractAssumeAlignedAttrEmitter<clang::AllocAlignAttr>::EmitAsAnAssumption(clang::SourceLocation, clang::QualType, clang::CodeGen::RValue&) |
4908 | | }; |
4909 | | |
4910 | | /// Helper data structure to emit `AssumeAlignedAttr`. |
4911 | | class AssumeAlignedAttrEmitter final |
4912 | | : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { |
4913 | | public: |
4914 | | AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) |
4915 | 0 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { |
4916 | 0 | if (!AA) |
4917 | 0 | return; |
4918 | | // It is guaranteed that the alignment/offset are constants. |
4919 | 0 | Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); |
4920 | 0 | if (Expr *Offset = AA->getOffset()) { |
4921 | 0 | OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); |
4922 | 0 | if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. |
4923 | 0 | OffsetCI = nullptr; |
4924 | 0 | } |
4925 | 0 | } |
4926 | | }; |
4927 | | |
4928 | | /// Helper data structure to emit `AllocAlignAttr`. |
4929 | | class AllocAlignAttrEmitter final |
4930 | | : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { |
4931 | | public: |
4932 | | AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, |
4933 | | const CallArgList &CallArgs) |
4934 | 0 | : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { |
4935 | 0 | if (!AA) |
4936 | 0 | return; |
4937 | | // Alignment may or may not be a constant, and that is okay. |
4938 | 0 | Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] |
4939 | 0 | .getRValue(CGF) |
4940 | 0 | .getScalarVal(); |
4941 | 0 | } |
4942 | | }; |
4943 | | |
4944 | | } // namespace |
4945 | | |
4946 | 0 | static unsigned getMaxVectorWidth(const llvm::Type *Ty) { |
4947 | 0 | if (auto *VT = dyn_cast<llvm::VectorType>(Ty)) |
4948 | 0 | return VT->getPrimitiveSizeInBits().getKnownMinValue(); |
4949 | 0 | if (auto *AT = dyn_cast<llvm::ArrayType>(Ty)) |
4950 | 0 | return getMaxVectorWidth(AT->getElementType()); |
4951 | | |
4952 | 0 | unsigned MaxVectorWidth = 0; |
4953 | 0 | if (auto *ST = dyn_cast<llvm::StructType>(Ty)) |
4954 | 0 | for (auto *I : ST->elements()) |
4955 | 0 | MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I)); |
4956 | 0 | return MaxVectorWidth; |
4957 | 0 | } |
4958 | | |
4959 | | RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, |
4960 | | const CGCallee &Callee, |
4961 | | ReturnValueSlot ReturnValue, |
4962 | | const CallArgList &CallArgs, |
4963 | | llvm::CallBase **callOrInvoke, bool IsMustTail, |
4964 | 0 | SourceLocation Loc) { |
4965 | | // FIXME: We no longer need the types from CallArgs; lift up and simplify. |
4966 | |
|
4967 | 0 | assert(Callee.isOrdinary() || Callee.isVirtual()); |
4968 | | |
4969 | | // Handle struct-return functions by passing a pointer to the |
4970 | | // location that we would like to return into. |
4971 | 0 | QualType RetTy = CallInfo.getReturnType(); |
4972 | 0 | const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); |
4973 | |
|
4974 | 0 | llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); |
4975 | |
|
4976 | 0 | const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); |
4977 | 0 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { |
4978 | | // We can only guarantee that a function is called from the correct |
4979 | | // context/function based on the appropriate target attributes, |
4980 | | // so only check in the case where we have both always_inline and target |
4981 | | // since otherwise we could be making a conditional call after a check for |
4982 | | // the proper cpu features (and it won't cause code generation issues due to |
4983 | | // function based code generation). |
4984 | 0 | if (TargetDecl->hasAttr<AlwaysInlineAttr>() && |
4985 | 0 | (TargetDecl->hasAttr<TargetAttr>() || |
4986 | 0 | (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>()))) |
4987 | 0 | checkTargetFeatures(Loc, FD); |
4988 | | |
4989 | | // Some architectures (such as x86-64) have the ABI changed based on |
4990 | | // attribute-target/features. Give them a chance to diagnose. |
4991 | 0 | CGM.getTargetCodeGenInfo().checkFunctionCallABI( |
4992 | 0 | CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); |
4993 | 0 | } |
4994 | | |
4995 | | // 1. Set up the arguments. |
4996 | | |
4997 | | // If we're using inalloca, insert the allocation after the stack save. |
4998 | | // FIXME: Do this earlier rather than hacking it in here! |
4999 | 0 | Address ArgMemory = Address::invalid(); |
5000 | 0 | if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { |
5001 | 0 | const llvm::DataLayout &DL = CGM.getDataLayout(); |
5002 | 0 | llvm::Instruction *IP = CallArgs.getStackBase(); |
5003 | 0 | llvm::AllocaInst *AI; |
5004 | 0 | if (IP) { |
5005 | 0 | IP = IP->getNextNode(); |
5006 | 0 | AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), |
5007 | 0 | "argmem", IP); |
5008 | 0 | } else { |
5009 | 0 | AI = CreateTempAlloca(ArgStruct, "argmem"); |
5010 | 0 | } |
5011 | 0 | auto Align = CallInfo.getArgStructAlignment(); |
5012 | 0 | AI->setAlignment(Align.getAsAlign()); |
5013 | 0 | AI->setUsedWithInAlloca(true); |
5014 | 0 | assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); |
5015 | 0 | ArgMemory = Address(AI, ArgStruct, Align); |
5016 | 0 | } |
5017 | | |
5018 | 0 | ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); |
5019 | 0 | SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); |
5020 | | |
5021 | | // If the call returns a temporary with struct return, create a temporary |
5022 | | // alloca to hold the result, unless one is given to us. |
5023 | 0 | Address SRetPtr = Address::invalid(); |
5024 | 0 | Address SRetAlloca = Address::invalid(); |
5025 | 0 | llvm::Value *UnusedReturnSizePtr = nullptr; |
5026 | 0 | if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { |
5027 | 0 | if (!ReturnValue.isNull()) { |
5028 | 0 | SRetPtr = ReturnValue.getValue(); |
5029 | 0 | } else { |
5030 | 0 | SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); |
5031 | 0 | if (HaveInsertPoint() && ReturnValue.isUnused()) { |
5032 | 0 | llvm::TypeSize size = |
5033 | 0 | CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); |
5034 | 0 | UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); |
5035 | 0 | } |
5036 | 0 | } |
5037 | 0 | if (IRFunctionArgs.hasSRetArg()) { |
5038 | 0 | IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); |
5039 | 0 | } else if (RetAI.isInAlloca()) { |
5040 | 0 | Address Addr = |
5041 | 0 | Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); |
5042 | 0 | Builder.CreateStore(SRetPtr.getPointer(), Addr); |
5043 | 0 | } |
5044 | 0 | } |
5045 | |
|
5046 | 0 | Address swiftErrorTemp = Address::invalid(); |
5047 | 0 | Address swiftErrorArg = Address::invalid(); |
5048 | | |
5049 | | // When passing arguments using temporary allocas, we need to add the |
5050 | | // appropriate lifetime markers. This vector keeps track of all the lifetime |
5051 | | // markers that need to be ended right after the call. |
5052 | 0 | SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; |
5053 | | |
5054 | | // Translate all of the arguments as necessary to match the IR lowering. |
5055 | 0 | assert(CallInfo.arg_size() == CallArgs.size() && |
5056 | 0 | "Mismatch between function signature & arguments."); |
5057 | 0 | unsigned ArgNo = 0; |
5058 | 0 | CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); |
5059 | 0 | for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); |
5060 | 0 | I != E; ++I, ++info_it, ++ArgNo) { |
5061 | 0 | const ABIArgInfo &ArgInfo = info_it->info; |
5062 | | |
5063 | | // Insert a padding argument to ensure proper alignment. |
5064 | 0 | if (IRFunctionArgs.hasPaddingArg(ArgNo)) |
5065 | 0 | IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = |
5066 | 0 | llvm::UndefValue::get(ArgInfo.getPaddingType()); |
5067 | |
|
5068 | 0 | unsigned FirstIRArg, NumIRArgs; |
5069 | 0 | std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); |
5070 | |
|
5071 | 0 | bool ArgHasMaybeUndefAttr = |
5072 | 0 | IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo); |
5073 | |
|
5074 | 0 | switch (ArgInfo.getKind()) { |
5075 | 0 | case ABIArgInfo::InAlloca: { |
5076 | 0 | assert(NumIRArgs == 0); |
5077 | 0 | assert(getTarget().getTriple().getArch() == llvm::Triple::x86); |
5078 | 0 | if (I->isAggregate()) { |
5079 | 0 | Address Addr = I->hasLValue() |
5080 | 0 | ? I->getKnownLValue().getAddress(*this) |
5081 | 0 | : I->getKnownRValue().getAggregateAddress(); |
5082 | 0 | llvm::Instruction *Placeholder = |
5083 | 0 | cast<llvm::Instruction>(Addr.getPointer()); |
5084 | |
|
5085 | 0 | if (!ArgInfo.getInAllocaIndirect()) { |
5086 | | // Replace the placeholder with the appropriate argument slot GEP. |
5087 | 0 | CGBuilderTy::InsertPoint IP = Builder.saveIP(); |
5088 | 0 | Builder.SetInsertPoint(Placeholder); |
5089 | 0 | Addr = Builder.CreateStructGEP(ArgMemory, |
5090 | 0 | ArgInfo.getInAllocaFieldIndex()); |
5091 | 0 | Builder.restoreIP(IP); |
5092 | 0 | } else { |
5093 | | // For indirect things such as overaligned structs, replace the |
5094 | | // placeholder with a regular aggregate temporary alloca. Store the |
5095 | | // address of this alloca into the struct. |
5096 | 0 | Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); |
5097 | 0 | Address ArgSlot = Builder.CreateStructGEP( |
5098 | 0 | ArgMemory, ArgInfo.getInAllocaFieldIndex()); |
5099 | 0 | Builder.CreateStore(Addr.getPointer(), ArgSlot); |
5100 | 0 | } |
5101 | 0 | deferPlaceholderReplacement(Placeholder, Addr.getPointer()); |
5102 | 0 | } else if (ArgInfo.getInAllocaIndirect()) { |
5103 | | // Make a temporary alloca and store the address of it into the argument |
5104 | | // struct. |
5105 | 0 | Address Addr = CreateMemTempWithoutCast( |
5106 | 0 | I->Ty, getContext().getTypeAlignInChars(I->Ty), |
5107 | 0 | "indirect-arg-temp"); |
5108 | 0 | I->copyInto(*this, Addr); |
5109 | 0 | Address ArgSlot = |
5110 | 0 | Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); |
5111 | 0 | Builder.CreateStore(Addr.getPointer(), ArgSlot); |
5112 | 0 | } else { |
5113 | | // Store the RValue into the argument struct. |
5114 | 0 | Address Addr = |
5115 | 0 | Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); |
5116 | 0 | Addr = Addr.withElementType(ConvertTypeForMem(I->Ty)); |
5117 | 0 | I->copyInto(*this, Addr); |
5118 | 0 | } |
5119 | 0 | break; |
5120 | 0 | } |
5121 | | |
5122 | 0 | case ABIArgInfo::Indirect: |
5123 | 0 | case ABIArgInfo::IndirectAliased: { |
5124 | 0 | assert(NumIRArgs == 1); |
5125 | 0 | if (!I->isAggregate()) { |
5126 | | // Make a temporary alloca to pass the argument. |
5127 | 0 | Address Addr = CreateMemTempWithoutCast( |
5128 | 0 | I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); |
5129 | |
|
5130 | 0 | llvm::Value *Val = Addr.getPointer(); |
5131 | 0 | if (ArgHasMaybeUndefAttr) |
5132 | 0 | Val = Builder.CreateFreeze(Addr.getPointer()); |
5133 | 0 | IRCallArgs[FirstIRArg] = Val; |
5134 | |
|
5135 | 0 | I->copyInto(*this, Addr); |
5136 | 0 | } else { |
5137 | | // We want to avoid creating an unnecessary temporary+copy here; |
5138 | | // however, we need one in three cases: |
5139 | | // 1. If the argument is not byval, and we are required to copy the |
5140 | | // source. (This case doesn't occur on any common architecture.) |
5141 | | // 2. If the argument is byval, RV is not sufficiently aligned, and |
5142 | | // we cannot force it to be sufficiently aligned. |
5143 | | // 3. If the argument is byval, but RV is not located in default |
5144 | | // or alloca address space. |
5145 | 0 | Address Addr = I->hasLValue() |
5146 | 0 | ? I->getKnownLValue().getAddress(*this) |
5147 | 0 | : I->getKnownRValue().getAggregateAddress(); |
5148 | 0 | llvm::Value *V = Addr.getPointer(); |
5149 | 0 | CharUnits Align = ArgInfo.getIndirectAlign(); |
5150 | 0 | const llvm::DataLayout *TD = &CGM.getDataLayout(); |
5151 | |
|
5152 | 0 | assert((FirstIRArg >= IRFuncTy->getNumParams() || |
5153 | 0 | IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == |
5154 | 0 | TD->getAllocaAddrSpace()) && |
5155 | 0 | "indirect argument must be in alloca address space"); |
5156 | | |
5157 | 0 | bool NeedCopy = false; |
5158 | 0 | if (Addr.getAlignment() < Align && |
5159 | 0 | llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < |
5160 | 0 | Align.getAsAlign()) { |
5161 | 0 | NeedCopy = true; |
5162 | 0 | } else if (I->hasLValue()) { |
5163 | 0 | auto LV = I->getKnownLValue(); |
5164 | 0 | auto AS = LV.getAddressSpace(); |
5165 | |
|
5166 | 0 | bool isByValOrRef = |
5167 | 0 | ArgInfo.isIndirectAliased() || ArgInfo.getIndirectByVal(); |
5168 | |
|
5169 | 0 | if (!isByValOrRef || |
5170 | 0 | (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { |
5171 | 0 | NeedCopy = true; |
5172 | 0 | } |
5173 | 0 | if (!getLangOpts().OpenCL) { |
5174 | 0 | if ((isByValOrRef && |
5175 | 0 | (AS != LangAS::Default && |
5176 | 0 | AS != CGM.getASTAllocaAddressSpace()))) { |
5177 | 0 | NeedCopy = true; |
5178 | 0 | } |
5179 | 0 | } |
5180 | | // For OpenCL even if RV is located in default or alloca address space |
5181 | | // we don't want to perform address space cast for it. |
5182 | 0 | else if ((isByValOrRef && |
5183 | 0 | Addr.getType()->getAddressSpace() != IRFuncTy-> |
5184 | 0 | getParamType(FirstIRArg)->getPointerAddressSpace())) { |
5185 | 0 | NeedCopy = true; |
5186 | 0 | } |
5187 | 0 | } |
5188 | |
|
5189 | 0 | if (NeedCopy) { |
5190 | | // Create an aligned temporary, and copy to it. |
5191 | 0 | Address AI = CreateMemTempWithoutCast( |
5192 | 0 | I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); |
5193 | 0 | llvm::Value *Val = AI.getPointer(); |
5194 | 0 | if (ArgHasMaybeUndefAttr) |
5195 | 0 | Val = Builder.CreateFreeze(AI.getPointer()); |
5196 | 0 | IRCallArgs[FirstIRArg] = Val; |
5197 | | |
5198 | | // Emit lifetime markers for the temporary alloca. |
5199 | 0 | llvm::TypeSize ByvalTempElementSize = |
5200 | 0 | CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); |
5201 | 0 | llvm::Value *LifetimeSize = |
5202 | 0 | EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); |
5203 | | |
5204 | | // Add cleanup code to emit the end lifetime marker after the call. |
5205 | 0 | if (LifetimeSize) // In case we disabled lifetime markers. |
5206 | 0 | CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); |
5207 | | |
5208 | | // Generate the copy. |
5209 | 0 | I->copyInto(*this, AI); |
5210 | 0 | } else { |
5211 | | // Skip the extra memcpy call. |
5212 | 0 | auto *T = llvm::PointerType::get( |
5213 | 0 | CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace()); |
5214 | |
|
5215 | 0 | llvm::Value *Val = getTargetHooks().performAddrSpaceCast( |
5216 | 0 | *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, |
5217 | 0 | true); |
5218 | 0 | if (ArgHasMaybeUndefAttr) |
5219 | 0 | Val = Builder.CreateFreeze(Val); |
5220 | 0 | IRCallArgs[FirstIRArg] = Val; |
5221 | 0 | } |
5222 | 0 | } |
5223 | 0 | break; |
5224 | 0 | } |
5225 | | |
5226 | 0 | case ABIArgInfo::Ignore: |
5227 | 0 | assert(NumIRArgs == 0); |
5228 | 0 | break; |
5229 | | |
5230 | 0 | case ABIArgInfo::Extend: |
5231 | 0 | case ABIArgInfo::Direct: { |
5232 | 0 | if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && |
5233 | 0 | ArgInfo.getCoerceToType() == ConvertType(info_it->type) && |
5234 | 0 | ArgInfo.getDirectOffset() == 0) { |
5235 | 0 | assert(NumIRArgs == 1); |
5236 | 0 | llvm::Value *V; |
5237 | 0 | if (!I->isAggregate()) |
5238 | 0 | V = I->getKnownRValue().getScalarVal(); |
5239 | 0 | else |
5240 | 0 | V = Builder.CreateLoad( |
5241 | 0 | I->hasLValue() ? I->getKnownLValue().getAddress(*this) |
5242 | 0 | : I->getKnownRValue().getAggregateAddress()); |
5243 | | |
5244 | | // Implement swifterror by copying into a new swifterror argument. |
5245 | | // We'll write back in the normal path out of the call. |
5246 | 0 | if (CallInfo.getExtParameterInfo(ArgNo).getABI() |
5247 | 0 | == ParameterABI::SwiftErrorResult) { |
5248 | 0 | assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); |
5249 | | |
5250 | 0 | QualType pointeeTy = I->Ty->getPointeeType(); |
5251 | 0 | swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy), |
5252 | 0 | getContext().getTypeAlignInChars(pointeeTy)); |
5253 | |
|
5254 | 0 | swiftErrorTemp = |
5255 | 0 | CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); |
5256 | 0 | V = swiftErrorTemp.getPointer(); |
5257 | 0 | cast<llvm::AllocaInst>(V)->setSwiftError(true); |
5258 | |
|
5259 | 0 | llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); |
5260 | 0 | Builder.CreateStore(errorValue, swiftErrorTemp); |
5261 | 0 | } |
5262 | | |
5263 | | // We might have to widen integers, but we should never truncate. |
5264 | 0 | if (ArgInfo.getCoerceToType() != V->getType() && |
5265 | 0 | V->getType()->isIntegerTy()) |
5266 | 0 | V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); |
5267 | | |
5268 | | // If the argument doesn't match, perform a bitcast to coerce it. This |
5269 | | // can happen due to trivial type mismatches. |
5270 | 0 | if (FirstIRArg < IRFuncTy->getNumParams() && |
5271 | 0 | V->getType() != IRFuncTy->getParamType(FirstIRArg)) |
5272 | 0 | V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); |
5273 | |
|
5274 | 0 | if (ArgHasMaybeUndefAttr) |
5275 | 0 | V = Builder.CreateFreeze(V); |
5276 | 0 | IRCallArgs[FirstIRArg] = V; |
5277 | 0 | break; |
5278 | 0 | } |
5279 | | |
5280 | | // FIXME: Avoid the conversion through memory if possible. |
5281 | 0 | Address Src = Address::invalid(); |
5282 | 0 | if (!I->isAggregate()) { |
5283 | 0 | Src = CreateMemTemp(I->Ty, "coerce"); |
5284 | 0 | I->copyInto(*this, Src); |
5285 | 0 | } else { |
5286 | 0 | Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) |
5287 | 0 | : I->getKnownRValue().getAggregateAddress(); |
5288 | 0 | } |
5289 | | |
5290 | | // If the value is offset in memory, apply the offset now. |
5291 | 0 | Src = emitAddressAtOffset(*this, Src, ArgInfo); |
5292 | | |
5293 | | // Fast-isel and the optimizer generally like scalar values better than |
5294 | | // FCAs, so we flatten them if this is safe to do for this argument. |
5295 | 0 | llvm::StructType *STy = |
5296 | 0 | dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); |
5297 | 0 | if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { |
5298 | 0 | llvm::Type *SrcTy = Src.getElementType(); |
5299 | 0 | llvm::TypeSize SrcTypeSize = |
5300 | 0 | CGM.getDataLayout().getTypeAllocSize(SrcTy); |
5301 | 0 | llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy); |
5302 | 0 | if (SrcTypeSize.isScalable()) { |
5303 | 0 | assert(STy->containsHomogeneousScalableVectorTypes() && |
5304 | 0 | "ABI only supports structure with homogeneous scalable vector " |
5305 | 0 | "type"); |
5306 | 0 | assert(SrcTypeSize == DstTypeSize && |
5307 | 0 | "Only allow non-fractional movement of structure with " |
5308 | 0 | "homogeneous scalable vector type"); |
5309 | 0 | assert(NumIRArgs == STy->getNumElements()); |
5310 | | |
5311 | 0 | llvm::Value *StoredStructValue = |
5312 | 0 | Builder.CreateLoad(Src, Src.getName() + ".tuple"); |
5313 | 0 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
5314 | 0 | llvm::Value *Extract = Builder.CreateExtractValue( |
5315 | 0 | StoredStructValue, i, Src.getName() + ".extract" + Twine(i)); |
5316 | 0 | IRCallArgs[FirstIRArg + i] = Extract; |
5317 | 0 | } |
5318 | 0 | } else { |
5319 | 0 | uint64_t SrcSize = SrcTypeSize.getFixedValue(); |
5320 | 0 | uint64_t DstSize = DstTypeSize.getFixedValue(); |
5321 | | |
5322 | | // If the source type is smaller than the destination type of the |
5323 | | // coerce-to logic, copy the source value into a temp alloca the size |
5324 | | // of the destination type to allow loading all of it. The bits past |
5325 | | // the source value are left undef. |
5326 | 0 | if (SrcSize < DstSize) { |
5327 | 0 | Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(), |
5328 | 0 | Src.getName() + ".coerce"); |
5329 | 0 | Builder.CreateMemCpy(TempAlloca, Src, SrcSize); |
5330 | 0 | Src = TempAlloca; |
5331 | 0 | } else { |
5332 | 0 | Src = Src.withElementType(STy); |
5333 | 0 | } |
5334 | |
|
5335 | 0 | assert(NumIRArgs == STy->getNumElements()); |
5336 | 0 | for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { |
5337 | 0 | Address EltPtr = Builder.CreateStructGEP(Src, i); |
5338 | 0 | llvm::Value *LI = Builder.CreateLoad(EltPtr); |
5339 | 0 | if (ArgHasMaybeUndefAttr) |
5340 | 0 | LI = Builder.CreateFreeze(LI); |
5341 | 0 | IRCallArgs[FirstIRArg + i] = LI; |
5342 | 0 | } |
5343 | 0 | } |
5344 | 0 | } else { |
5345 | | // In the simple case, just pass the coerced loaded value. |
5346 | 0 | assert(NumIRArgs == 1); |
5347 | 0 | llvm::Value *Load = |
5348 | 0 | CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); |
5349 | |
|
5350 | 0 | if (CallInfo.isCmseNSCall()) { |
5351 | | // For certain parameter types, clear padding bits, as they may reveal |
5352 | | // sensitive information. |
5353 | | // Small struct/union types are passed as integer arrays. |
5354 | 0 | auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); |
5355 | 0 | if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) |
5356 | 0 | Load = EmitCMSEClearRecord(Load, ATy, I->Ty); |
5357 | 0 | } |
5358 | |
|
5359 | 0 | if (ArgHasMaybeUndefAttr) |
5360 | 0 | Load = Builder.CreateFreeze(Load); |
5361 | 0 | IRCallArgs[FirstIRArg] = Load; |
5362 | 0 | } |
5363 | | |
5364 | 0 | break; |
5365 | 0 | } |
5366 | | |
5367 | 0 | case ABIArgInfo::CoerceAndExpand: { |
5368 | 0 | auto coercionType = ArgInfo.getCoerceAndExpandType(); |
5369 | 0 | auto layout = CGM.getDataLayout().getStructLayout(coercionType); |
5370 | |
|
5371 | 0 | llvm::Value *tempSize = nullptr; |
5372 | 0 | Address addr = Address::invalid(); |
5373 | 0 | Address AllocaAddr = Address::invalid(); |
5374 | 0 | if (I->isAggregate()) { |
5375 | 0 | addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) |
5376 | 0 | : I->getKnownRValue().getAggregateAddress(); |
5377 | |
|
5378 | 0 | } else { |
5379 | 0 | RValue RV = I->getKnownRValue(); |
5380 | 0 | assert(RV.isScalar()); // complex should always just be direct |
5381 | | |
5382 | 0 | llvm::Type *scalarType = RV.getScalarVal()->getType(); |
5383 | 0 | auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); |
5384 | 0 | auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType); |
5385 | | |
5386 | | // Materialize to a temporary. |
5387 | 0 | addr = CreateTempAlloca( |
5388 | 0 | RV.getScalarVal()->getType(), |
5389 | 0 | CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)), |
5390 | 0 | "tmp", |
5391 | 0 | /*ArraySize=*/nullptr, &AllocaAddr); |
5392 | 0 | tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); |
5393 | |
|
5394 | 0 | Builder.CreateStore(RV.getScalarVal(), addr); |
5395 | 0 | } |
5396 | | |
5397 | 0 | addr = addr.withElementType(coercionType); |
5398 | |
|
5399 | 0 | unsigned IRArgPos = FirstIRArg; |
5400 | 0 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
5401 | 0 | llvm::Type *eltType = coercionType->getElementType(i); |
5402 | 0 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; |
5403 | 0 | Address eltAddr = Builder.CreateStructGEP(addr, i); |
5404 | 0 | llvm::Value *elt = Builder.CreateLoad(eltAddr); |
5405 | 0 | if (ArgHasMaybeUndefAttr) |
5406 | 0 | elt = Builder.CreateFreeze(elt); |
5407 | 0 | IRCallArgs[IRArgPos++] = elt; |
5408 | 0 | } |
5409 | 0 | assert(IRArgPos == FirstIRArg + NumIRArgs); |
5410 | | |
5411 | 0 | if (tempSize) { |
5412 | 0 | EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); |
5413 | 0 | } |
5414 | |
|
5415 | 0 | break; |
5416 | 0 | } |
5417 | | |
5418 | 0 | case ABIArgInfo::Expand: { |
5419 | 0 | unsigned IRArgPos = FirstIRArg; |
5420 | 0 | ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); |
5421 | 0 | assert(IRArgPos == FirstIRArg + NumIRArgs); |
5422 | 0 | break; |
5423 | 0 | } |
5424 | 0 | } |
5425 | 0 | } |
5426 | | |
5427 | 0 | const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); |
5428 | 0 | llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); |
5429 | | |
5430 | | // If we're using inalloca, set up that argument. |
5431 | 0 | if (ArgMemory.isValid()) { |
5432 | 0 | llvm::Value *Arg = ArgMemory.getPointer(); |
5433 | 0 | assert(IRFunctionArgs.hasInallocaArg()); |
5434 | 0 | IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; |
5435 | 0 | } |
5436 | | |
5437 | | // 2. Prepare the function pointer. |
5438 | | |
5439 | | // If the callee is a bitcast of a non-variadic function to have a |
5440 | | // variadic function pointer type, check to see if we can remove the |
5441 | | // bitcast. This comes up with unprototyped functions. |
5442 | | // |
5443 | | // This makes the IR nicer, but more importantly it ensures that we |
5444 | | // can inline the function at -O0 if it is marked always_inline. |
5445 | 0 | auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, |
5446 | 0 | llvm::Value *Ptr) -> llvm::Function * { |
5447 | 0 | if (!CalleeFT->isVarArg()) |
5448 | 0 | return nullptr; |
5449 | | |
5450 | | // Get underlying value if it's a bitcast |
5451 | 0 | if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { |
5452 | 0 | if (CE->getOpcode() == llvm::Instruction::BitCast) |
5453 | 0 | Ptr = CE->getOperand(0); |
5454 | 0 | } |
5455 | |
|
5456 | 0 | llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); |
5457 | 0 | if (!OrigFn) |
5458 | 0 | return nullptr; |
5459 | | |
5460 | 0 | llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); |
5461 | | |
5462 | | // If the original type is variadic, or if any of the component types |
5463 | | // disagree, we cannot remove the cast. |
5464 | 0 | if (OrigFT->isVarArg() || |
5465 | 0 | OrigFT->getNumParams() != CalleeFT->getNumParams() || |
5466 | 0 | OrigFT->getReturnType() != CalleeFT->getReturnType()) |
5467 | 0 | return nullptr; |
5468 | | |
5469 | 0 | for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) |
5470 | 0 | if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) |
5471 | 0 | return nullptr; |
5472 | | |
5473 | 0 | return OrigFn; |
5474 | 0 | }; |
5475 | |
|
5476 | 0 | if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { |
5477 | 0 | CalleePtr = OrigFn; |
5478 | 0 | IRFuncTy = OrigFn->getFunctionType(); |
5479 | 0 | } |
5480 | | |
5481 | | // 3. Perform the actual call. |
5482 | | |
5483 | | // Deactivate any cleanups that we're supposed to do immediately before |
5484 | | // the call. |
5485 | 0 | if (!CallArgs.getCleanupsToDeactivate().empty()) |
5486 | 0 | deactivateArgCleanupsBeforeCall(*this, CallArgs); |
5487 | | |
5488 | | // Assert that the arguments we computed match up. The IR verifier |
5489 | | // will catch this, but this is a common enough source of problems |
5490 | | // during IRGen changes that it's way better for debugging to catch |
5491 | | // it ourselves here. |
5492 | 0 | #ifndef NDEBUG |
5493 | 0 | assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); |
5494 | 0 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) { |
5495 | | // Inalloca argument can have different type. |
5496 | 0 | if (IRFunctionArgs.hasInallocaArg() && |
5497 | 0 | i == IRFunctionArgs.getInallocaArgNo()) |
5498 | 0 | continue; |
5499 | 0 | if (i < IRFuncTy->getNumParams()) |
5500 | 0 | assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); |
5501 | 0 | } |
5502 | 0 | #endif |
5503 | | |
5504 | | // Update the largest vector width if any arguments have vector types. |
5505 | 0 | for (unsigned i = 0; i < IRCallArgs.size(); ++i) |
5506 | 0 | LargestVectorWidth = std::max(LargestVectorWidth, |
5507 | 0 | getMaxVectorWidth(IRCallArgs[i]->getType())); |
5508 | | |
5509 | | // Compute the calling convention and attributes. |
5510 | 0 | unsigned CallingConv; |
5511 | 0 | llvm::AttributeList Attrs; |
5512 | 0 | CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, |
5513 | 0 | Callee.getAbstractInfo(), Attrs, CallingConv, |
5514 | 0 | /*AttrOnCallSite=*/true, |
5515 | 0 | /*IsThunk=*/false); |
5516 | |
|
5517 | 0 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { |
5518 | 0 | if (FD->hasAttr<StrictFPAttr>()) |
5519 | | // All calls within a strictfp function are marked strictfp |
5520 | 0 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); |
5521 | | |
5522 | | // If -ffast-math is enabled and the function is guarded by an |
5523 | | // '__attribute__((optnone)) adjust the memory attribute so the BE emits the |
5524 | | // library call instead of the intrinsic. |
5525 | 0 | if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath) |
5526 | 0 | CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(), |
5527 | 0 | Attrs); |
5528 | 0 | } |
5529 | | // Add call-site nomerge attribute if exists. |
5530 | 0 | if (InNoMergeAttributedStmt) |
5531 | 0 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge); |
5532 | | |
5533 | | // Add call-site noinline attribute if exists. |
5534 | 0 | if (InNoInlineAttributedStmt) |
5535 | 0 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); |
5536 | | |
5537 | | // Add call-site always_inline attribute if exists. |
5538 | 0 | if (InAlwaysInlineAttributedStmt) |
5539 | 0 | Attrs = |
5540 | 0 | Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); |
5541 | | |
5542 | | // Apply some call-site-specific attributes. |
5543 | | // TODO: work this into building the attribute set. |
5544 | | |
5545 | | // Apply always_inline to all calls within flatten functions. |
5546 | | // FIXME: should this really take priority over __try, below? |
5547 | 0 | if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && |
5548 | 0 | !InNoInlineAttributedStmt && |
5549 | 0 | !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { |
5550 | 0 | Attrs = |
5551 | 0 | Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); |
5552 | 0 | } |
5553 | | |
5554 | | // Disable inlining inside SEH __try blocks. |
5555 | 0 | if (isSEHTryScope()) { |
5556 | 0 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); |
5557 | 0 | } |
5558 | | |
5559 | | // Decide whether to use a call or an invoke. |
5560 | 0 | bool CannotThrow; |
5561 | 0 | if (currentFunctionUsesSEHTry()) { |
5562 | | // SEH cares about asynchronous exceptions, so everything can "throw." |
5563 | 0 | CannotThrow = false; |
5564 | 0 | } else if (isCleanupPadScope() && |
5565 | 0 | EHPersonality::get(*this).isMSVCXXPersonality()) { |
5566 | | // The MSVC++ personality will implicitly terminate the program if an |
5567 | | // exception is thrown during a cleanup outside of a try/catch. |
5568 | | // We don't need to model anything in IR to get this behavior. |
5569 | 0 | CannotThrow = true; |
5570 | 0 | } else { |
5571 | | // Otherwise, nounwind call sites will never throw. |
5572 | 0 | CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind); |
5573 | |
|
5574 | 0 | if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) |
5575 | 0 | if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) |
5576 | 0 | CannotThrow = true; |
5577 | 0 | } |
5578 | | |
5579 | | // If we made a temporary, be sure to clean up after ourselves. Note that we |
5580 | | // can't depend on being inside of an ExprWithCleanups, so we need to manually |
5581 | | // pop this cleanup later on. Being eager about this is OK, since this |
5582 | | // temporary is 'invisible' outside of the callee. |
5583 | 0 | if (UnusedReturnSizePtr) |
5584 | 0 | pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, |
5585 | 0 | UnusedReturnSizePtr); |
5586 | |
|
5587 | 0 | llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); |
5588 | |
|
5589 | 0 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
5590 | 0 | getBundlesForFunclet(CalleePtr); |
5591 | |
|
5592 | 0 | if (SanOpts.has(SanitizerKind::KCFI) && |
5593 | 0 | !isa_and_nonnull<FunctionDecl>(TargetDecl)) |
5594 | 0 | EmitKCFIOperandBundle(ConcreteCallee, BundleList); |
5595 | |
|
5596 | 0 | if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) |
5597 | 0 | if (FD->hasAttr<StrictFPAttr>()) |
5598 | | // All calls within a strictfp function are marked strictfp |
5599 | 0 | Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); |
5600 | |
|
5601 | 0 | AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); |
5602 | 0 | Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); |
5603 | |
|
5604 | 0 | AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); |
5605 | 0 | Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); |
5606 | | |
5607 | | // Emit the actual call/invoke instruction. |
5608 | 0 | llvm::CallBase *CI; |
5609 | 0 | if (!InvokeDest) { |
5610 | 0 | CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); |
5611 | 0 | } else { |
5612 | 0 | llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); |
5613 | 0 | CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, |
5614 | 0 | BundleList); |
5615 | 0 | EmitBlock(Cont); |
5616 | 0 | } |
5617 | 0 | if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() && |
5618 | 0 | CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) { |
5619 | 0 | SetSqrtFPAccuracy(CI); |
5620 | 0 | } |
5621 | 0 | if (callOrInvoke) |
5622 | 0 | *callOrInvoke = CI; |
5623 | | |
5624 | | // If this is within a function that has the guard(nocf) attribute and is an |
5625 | | // indirect call, add the "guard_nocf" attribute to this call to indicate that |
5626 | | // Control Flow Guard checks should not be added, even if the call is inlined. |
5627 | 0 | if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { |
5628 | 0 | if (const auto *A = FD->getAttr<CFGuardAttr>()) { |
5629 | 0 | if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) |
5630 | 0 | Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf"); |
5631 | 0 | } |
5632 | 0 | } |
5633 | | |
5634 | | // Apply the attributes and calling convention. |
5635 | 0 | CI->setAttributes(Attrs); |
5636 | 0 | CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); |
5637 | | |
5638 | | // Apply various metadata. |
5639 | |
|
5640 | 0 | if (!CI->getType()->isVoidTy()) |
5641 | 0 | CI->setName("call"); |
5642 | | |
5643 | | // Update largest vector width from the return type. |
5644 | 0 | LargestVectorWidth = |
5645 | 0 | std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType())); |
5646 | | |
5647 | | // Insert instrumentation or attach profile metadata at indirect call sites. |
5648 | | // For more details, see the comment before the definition of |
5649 | | // IPVK_IndirectCallTarget in InstrProfData.inc. |
5650 | 0 | if (!CI->getCalledFunction()) |
5651 | 0 | PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, |
5652 | 0 | CI, CalleePtr); |
5653 | | |
5654 | | // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC |
5655 | | // optimizer it can aggressively ignore unwind edges. |
5656 | 0 | if (CGM.getLangOpts().ObjCAutoRefCount) |
5657 | 0 | AddObjCARCExceptionMetadata(CI); |
5658 | | |
5659 | | // Set tail call kind if necessary. |
5660 | 0 | if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { |
5661 | 0 | if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) |
5662 | 0 | Call->setTailCallKind(llvm::CallInst::TCK_NoTail); |
5663 | 0 | else if (IsMustTail) |
5664 | 0 | Call->setTailCallKind(llvm::CallInst::TCK_MustTail); |
5665 | 0 | } |
5666 | | |
5667 | | // Add metadata for calls to MSAllocator functions |
5668 | 0 | if (getDebugInfo() && TargetDecl && |
5669 | 0 | TargetDecl->hasAttr<MSAllocatorAttr>()) |
5670 | 0 | getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); |
5671 | | |
5672 | | // Add metadata if calling an __attribute__((error(""))) or warning fn. |
5673 | 0 | if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { |
5674 | 0 | llvm::ConstantInt *Line = |
5675 | 0 | llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding()); |
5676 | 0 | llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line); |
5677 | 0 | llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD}); |
5678 | 0 | CI->setMetadata("srcloc", MDT); |
5679 | 0 | } |
5680 | | |
5681 | | // 4. Finish the call. |
5682 | | |
5683 | | // If the call doesn't return, finish the basic block and clear the |
5684 | | // insertion point; this allows the rest of IRGen to discard |
5685 | | // unreachable code. |
5686 | 0 | if (CI->doesNotReturn()) { |
5687 | 0 | if (UnusedReturnSizePtr) |
5688 | 0 | PopCleanupBlock(); |
5689 | | |
5690 | | // Strip away the noreturn attribute to better diagnose unreachable UB. |
5691 | 0 | if (SanOpts.has(SanitizerKind::Unreachable)) { |
5692 | | // Also remove from function since CallBase::hasFnAttr additionally checks |
5693 | | // attributes of the called function. |
5694 | 0 | if (auto *F = CI->getCalledFunction()) |
5695 | 0 | F->removeFnAttr(llvm::Attribute::NoReturn); |
5696 | 0 | CI->removeFnAttr(llvm::Attribute::NoReturn); |
5697 | | |
5698 | | // Avoid incompatibility with ASan which relies on the `noreturn` |
5699 | | // attribute to insert handler calls. |
5700 | 0 | if (SanOpts.hasOneOf(SanitizerKind::Address | |
5701 | 0 | SanitizerKind::KernelAddress)) { |
5702 | 0 | SanitizerScope SanScope(this); |
5703 | 0 | llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); |
5704 | 0 | Builder.SetInsertPoint(CI); |
5705 | 0 | auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
5706 | 0 | llvm::FunctionCallee Fn = |
5707 | 0 | CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); |
5708 | 0 | EmitNounwindRuntimeCall(Fn); |
5709 | 0 | } |
5710 | 0 | } |
5711 | |
|
5712 | 0 | EmitUnreachable(Loc); |
5713 | 0 | Builder.ClearInsertionPoint(); |
5714 | | |
5715 | | // FIXME: For now, emit a dummy basic block because expr emitters in |
5716 | | // generally are not ready to handle emitting expressions at unreachable |
5717 | | // points. |
5718 | 0 | EnsureInsertPoint(); |
5719 | | |
5720 | | // Return a reasonable RValue. |
5721 | 0 | return GetUndefRValue(RetTy); |
5722 | 0 | } |
5723 | | |
5724 | | // If this is a musttail call, return immediately. We do not branch to the |
5725 | | // epilogue in this case. |
5726 | 0 | if (IsMustTail) { |
5727 | 0 | for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end(); |
5728 | 0 | ++it) { |
5729 | 0 | EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it); |
5730 | 0 | if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) |
5731 | 0 | CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups"); |
5732 | 0 | } |
5733 | 0 | if (CI->getType()->isVoidTy()) |
5734 | 0 | Builder.CreateRetVoid(); |
5735 | 0 | else |
5736 | 0 | Builder.CreateRet(CI); |
5737 | 0 | Builder.ClearInsertionPoint(); |
5738 | 0 | EnsureInsertPoint(); |
5739 | 0 | return GetUndefRValue(RetTy); |
5740 | 0 | } |
5741 | | |
5742 | | // Perform the swifterror writeback. |
5743 | 0 | if (swiftErrorTemp.isValid()) { |
5744 | 0 | llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); |
5745 | 0 | Builder.CreateStore(errorResult, swiftErrorArg); |
5746 | 0 | } |
5747 | | |
5748 | | // Emit any call-associated writebacks immediately. Arguably this |
5749 | | // should happen after any return-value munging. |
5750 | 0 | if (CallArgs.hasWritebacks()) |
5751 | 0 | emitWritebacks(*this, CallArgs); |
5752 | | |
5753 | | // The stack cleanup for inalloca arguments has to run out of the normal |
5754 | | // lexical order, so deactivate it and run it manually here. |
5755 | 0 | CallArgs.freeArgumentMemory(*this); |
5756 | | |
5757 | | // Extract the return value. |
5758 | 0 | RValue Ret = [&] { |
5759 | 0 | switch (RetAI.getKind()) { |
5760 | 0 | case ABIArgInfo::CoerceAndExpand: { |
5761 | 0 | auto coercionType = RetAI.getCoerceAndExpandType(); |
5762 | |
|
5763 | 0 | Address addr = SRetPtr.withElementType(coercionType); |
5764 | |
|
5765 | 0 | assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); |
5766 | 0 | bool requiresExtract = isa<llvm::StructType>(CI->getType()); |
5767 | |
|
5768 | 0 | unsigned unpaddedIndex = 0; |
5769 | 0 | for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { |
5770 | 0 | llvm::Type *eltType = coercionType->getElementType(i); |
5771 | 0 | if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; |
5772 | 0 | Address eltAddr = Builder.CreateStructGEP(addr, i); |
5773 | 0 | llvm::Value *elt = CI; |
5774 | 0 | if (requiresExtract) |
5775 | 0 | elt = Builder.CreateExtractValue(elt, unpaddedIndex++); |
5776 | 0 | else |
5777 | 0 | assert(unpaddedIndex == 0); |
5778 | 0 | Builder.CreateStore(elt, eltAddr); |
5779 | 0 | } |
5780 | 0 | [[fallthrough]]; |
5781 | 0 | } |
5782 | | |
5783 | 0 | case ABIArgInfo::InAlloca: |
5784 | 0 | case ABIArgInfo::Indirect: { |
5785 | 0 | RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); |
5786 | 0 | if (UnusedReturnSizePtr) |
5787 | 0 | PopCleanupBlock(); |
5788 | 0 | return ret; |
5789 | 0 | } |
5790 | | |
5791 | 0 | case ABIArgInfo::Ignore: |
5792 | | // If we are ignoring an argument that had a result, make sure to |
5793 | | // construct the appropriate return value for our caller. |
5794 | 0 | return GetUndefRValue(RetTy); |
5795 | | |
5796 | 0 | case ABIArgInfo::Extend: |
5797 | 0 | case ABIArgInfo::Direct: { |
5798 | 0 | llvm::Type *RetIRTy = ConvertType(RetTy); |
5799 | 0 | if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { |
5800 | 0 | switch (getEvaluationKind(RetTy)) { |
5801 | 0 | case TEK_Complex: { |
5802 | 0 | llvm::Value *Real = Builder.CreateExtractValue(CI, 0); |
5803 | 0 | llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); |
5804 | 0 | return RValue::getComplex(std::make_pair(Real, Imag)); |
5805 | 0 | } |
5806 | 0 | case TEK_Aggregate: { |
5807 | 0 | Address DestPtr = ReturnValue.getValue(); |
5808 | 0 | bool DestIsVolatile = ReturnValue.isVolatile(); |
5809 | |
|
5810 | 0 | if (!DestPtr.isValid()) { |
5811 | 0 | DestPtr = CreateMemTemp(RetTy, "agg.tmp"); |
5812 | 0 | DestIsVolatile = false; |
5813 | 0 | } |
5814 | 0 | EmitAggregateStore(CI, DestPtr, DestIsVolatile); |
5815 | 0 | return RValue::getAggregate(DestPtr); |
5816 | 0 | } |
5817 | 0 | case TEK_Scalar: { |
5818 | | // If the argument doesn't match, perform a bitcast to coerce it. This |
5819 | | // can happen due to trivial type mismatches. |
5820 | 0 | llvm::Value *V = CI; |
5821 | 0 | if (V->getType() != RetIRTy) |
5822 | 0 | V = Builder.CreateBitCast(V, RetIRTy); |
5823 | 0 | return RValue::get(V); |
5824 | 0 | } |
5825 | 0 | } |
5826 | 0 | llvm_unreachable("bad evaluation kind"); |
5827 | 0 | } |
5828 | | |
5829 | | // If coercing a fixed vector from a scalable vector for ABI |
5830 | | // compatibility, and the types match, use the llvm.vector.extract |
5831 | | // intrinsic to perform the conversion. |
5832 | 0 | if (auto *FixedDst = dyn_cast<llvm::FixedVectorType>(RetIRTy)) { |
5833 | 0 | llvm::Value *V = CI; |
5834 | 0 | if (auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(V->getType())) { |
5835 | 0 | if (FixedDst->getElementType() == ScalableSrc->getElementType()) { |
5836 | 0 | llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); |
5837 | 0 | V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed"); |
5838 | 0 | return RValue::get(V); |
5839 | 0 | } |
5840 | 0 | } |
5841 | 0 | } |
5842 | | |
5843 | 0 | Address DestPtr = ReturnValue.getValue(); |
5844 | 0 | bool DestIsVolatile = ReturnValue.isVolatile(); |
5845 | |
|
5846 | 0 | if (!DestPtr.isValid()) { |
5847 | 0 | DestPtr = CreateMemTemp(RetTy, "coerce"); |
5848 | 0 | DestIsVolatile = false; |
5849 | 0 | } |
5850 | | |
5851 | | // An empty record can overlap other data (if declared with |
5852 | | // no_unique_address); omit the store for such types - as there is no |
5853 | | // actual data to store. |
5854 | 0 | if (!isEmptyRecord(getContext(), RetTy, true)) { |
5855 | | // If the value is offset in memory, apply the offset now. |
5856 | 0 | Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); |
5857 | 0 | CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); |
5858 | 0 | } |
5859 | |
|
5860 | 0 | return convertTempToRValue(DestPtr, RetTy, SourceLocation()); |
5861 | 0 | } |
5862 | | |
5863 | 0 | case ABIArgInfo::Expand: |
5864 | 0 | case ABIArgInfo::IndirectAliased: |
5865 | 0 | llvm_unreachable("Invalid ABI kind for return argument"); |
5866 | 0 | } |
5867 | | |
5868 | 0 | llvm_unreachable("Unhandled ABIArgInfo::Kind"); |
5869 | 0 | } (); |
5870 | | |
5871 | | // Emit the assume_aligned check on the return value. |
5872 | 0 | if (Ret.isScalar() && TargetDecl) { |
5873 | 0 | AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); |
5874 | 0 | AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); |
5875 | 0 | } |
5876 | | |
5877 | | // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though |
5878 | | // we can't use the full cleanup mechanism. |
5879 | 0 | for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) |
5880 | 0 | LifetimeEnd.Emit(*this, /*Flags=*/{}); |
5881 | |
|
5882 | 0 | if (!ReturnValue.isExternallyDestructed() && |
5883 | 0 | RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) |
5884 | 0 | pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), |
5885 | 0 | RetTy); |
5886 | |
|
5887 | 0 | return Ret; |
5888 | 0 | } |
5889 | | |
5890 | 0 | CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { |
5891 | 0 | if (isVirtual()) { |
5892 | 0 | const CallExpr *CE = getVirtualCallExpr(); |
5893 | 0 | return CGF.CGM.getCXXABI().getVirtualFunctionPointer( |
5894 | 0 | CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), |
5895 | 0 | CE ? CE->getBeginLoc() : SourceLocation()); |
5896 | 0 | } |
5897 | | |
5898 | 0 | return *this; |
5899 | 0 | } |
5900 | | |
5901 | | /* VarArg handling */ |
5902 | | |
5903 | 0 | Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { |
5904 | 0 | VAListAddr = VE->isMicrosoftABI() |
5905 | 0 | ? EmitMSVAListRef(VE->getSubExpr()) |
5906 | 0 | : EmitVAListRef(VE->getSubExpr()); |
5907 | 0 | QualType Ty = VE->getType(); |
5908 | 0 | if (VE->isMicrosoftABI()) |
5909 | 0 | return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); |
5910 | 0 | return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); |
5911 | 0 | } |