/src/llvm-project/clang/lib/CodeGen/CGExpr.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Expr nodes as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGCUDARuntime.h" |
14 | | #include "CGCXXABI.h" |
15 | | #include "CGCall.h" |
16 | | #include "CGCleanup.h" |
17 | | #include "CGDebugInfo.h" |
18 | | #include "CGObjCRuntime.h" |
19 | | #include "CGOpenMPRuntime.h" |
20 | | #include "CGRecordLayout.h" |
21 | | #include "CodeGenFunction.h" |
22 | | #include "CodeGenModule.h" |
23 | | #include "ConstantEmitter.h" |
24 | | #include "TargetInfo.h" |
25 | | #include "clang/AST/ASTContext.h" |
26 | | #include "clang/AST/Attr.h" |
27 | | #include "clang/AST/DeclObjC.h" |
28 | | #include "clang/AST/NSAPI.h" |
29 | | #include "clang/AST/StmtVisitor.h" |
30 | | #include "clang/Basic/Builtins.h" |
31 | | #include "clang/Basic/CodeGenOptions.h" |
32 | | #include "clang/Basic/SourceManager.h" |
33 | | #include "llvm/ADT/Hashing.h" |
34 | | #include "llvm/ADT/STLExtras.h" |
35 | | #include "llvm/ADT/StringExtras.h" |
36 | | #include "llvm/IR/DataLayout.h" |
37 | | #include "llvm/IR/Intrinsics.h" |
38 | | #include "llvm/IR/IntrinsicsWebAssembly.h" |
39 | | #include "llvm/IR/LLVMContext.h" |
40 | | #include "llvm/IR/MDBuilder.h" |
41 | | #include "llvm/IR/MatrixBuilder.h" |
42 | | #include "llvm/Passes/OptimizationLevel.h" |
43 | | #include "llvm/Support/ConvertUTF.h" |
44 | | #include "llvm/Support/MathExtras.h" |
45 | | #include "llvm/Support/Path.h" |
46 | | #include "llvm/Support/SaveAndRestore.h" |
47 | | #include "llvm/Support/xxhash.h" |
48 | | #include "llvm/Transforms/Utils/SanitizerStats.h" |
49 | | |
50 | | #include <optional> |
51 | | #include <string> |
52 | | |
53 | | using namespace clang; |
54 | | using namespace CodeGen; |
55 | | |
56 | | // Experiment to make sanitizers easier to debug |
57 | | static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization( |
58 | | "ubsan-unique-traps", llvm::cl::Optional, |
59 | | llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check"), |
60 | | llvm::cl::init(false)); |
61 | | |
62 | | //===--------------------------------------------------------------------===// |
63 | | // Miscellaneous Helper Methods |
64 | | //===--------------------------------------------------------------------===// |
65 | | |
66 | | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
67 | | /// block. |
68 | | Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, |
69 | | CharUnits Align, |
70 | | const Twine &Name, |
71 | 0 | llvm::Value *ArraySize) { |
72 | 0 | auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); |
73 | 0 | Alloca->setAlignment(Align.getAsAlign()); |
74 | 0 | return Address(Alloca, Ty, Align, KnownNonNull); |
75 | 0 | } |
76 | | |
77 | | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
78 | | /// block. The alloca is casted to default address space if necessary. |
79 | | Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, |
80 | | const Twine &Name, |
81 | | llvm::Value *ArraySize, |
82 | 0 | Address *AllocaAddr) { |
83 | 0 | auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); |
84 | 0 | if (AllocaAddr) |
85 | 0 | *AllocaAddr = Alloca; |
86 | 0 | llvm::Value *V = Alloca.getPointer(); |
87 | | // Alloca always returns a pointer in alloca address space, which may |
88 | | // be different from the type defined by the language. For example, |
89 | | // in C++ the auto variables are in the default address space. Therefore |
90 | | // cast alloca to the default address space when necessary. |
91 | 0 | if (getASTAllocaAddressSpace() != LangAS::Default) { |
92 | 0 | auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default); |
93 | 0 | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
94 | | // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, |
95 | | // otherwise alloca is inserted at the current insertion point of the |
96 | | // builder. |
97 | 0 | if (!ArraySize) |
98 | 0 | Builder.SetInsertPoint(getPostAllocaInsertPoint()); |
99 | 0 | V = getTargetHooks().performAddrSpaceCast( |
100 | 0 | *this, V, getASTAllocaAddressSpace(), LangAS::Default, |
101 | 0 | Ty->getPointerTo(DestAddrSpace), /*non-null*/ true); |
102 | 0 | } |
103 | |
|
104 | 0 | return Address(V, Ty, Align, KnownNonNull); |
105 | 0 | } |
106 | | |
107 | | /// CreateTempAlloca - This creates an alloca and inserts it into the entry |
108 | | /// block if \p ArraySize is nullptr, otherwise inserts it at the current |
109 | | /// insertion point of the builder. |
110 | | llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, |
111 | | const Twine &Name, |
112 | 0 | llvm::Value *ArraySize) { |
113 | 0 | if (ArraySize) |
114 | 0 | return Builder.CreateAlloca(Ty, ArraySize, Name); |
115 | 0 | return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), |
116 | 0 | ArraySize, Name, AllocaInsertPt); |
117 | 0 | } |
118 | | |
119 | | /// CreateDefaultAlignTempAlloca - This creates an alloca with the |
120 | | /// default alignment of the corresponding LLVM type, which is *not* |
121 | | /// guaranteed to be related in any way to the expected alignment of |
122 | | /// an AST type that might have been lowered to Ty. |
123 | | Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, |
124 | 0 | const Twine &Name) { |
125 | 0 | CharUnits Align = |
126 | 0 | CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty)); |
127 | 0 | return CreateTempAlloca(Ty, Align, Name); |
128 | 0 | } |
129 | | |
130 | 0 | Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { |
131 | 0 | CharUnits Align = getContext().getTypeAlignInChars(Ty); |
132 | 0 | return CreateTempAlloca(ConvertType(Ty), Align, Name); |
133 | 0 | } |
134 | | |
135 | | Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, |
136 | 0 | Address *Alloca) { |
137 | | // FIXME: Should we prefer the preferred type alignment here? |
138 | 0 | return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca); |
139 | 0 | } |
140 | | |
141 | | Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, |
142 | 0 | const Twine &Name, Address *Alloca) { |
143 | 0 | Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, |
144 | 0 | /*ArraySize=*/nullptr, Alloca); |
145 | |
|
146 | 0 | if (Ty->isConstantMatrixType()) { |
147 | 0 | auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType()); |
148 | 0 | auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), |
149 | 0 | ArrayTy->getNumElements()); |
150 | |
|
151 | 0 | Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(), |
152 | 0 | KnownNonNull); |
153 | 0 | } |
154 | 0 | return Result; |
155 | 0 | } |
156 | | |
157 | | Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align, |
158 | 0 | const Twine &Name) { |
159 | 0 | return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name); |
160 | 0 | } |
161 | | |
162 | | Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
163 | 0 | const Twine &Name) { |
164 | 0 | return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty), |
165 | 0 | Name); |
166 | 0 | } |
167 | | |
168 | | /// EvaluateExprAsBool - Perform the usual unary conversions on the specified |
169 | | /// expression and compare the result against zero, returning an Int1Ty value. |
170 | 0 | llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { |
171 | 0 | PGO.setCurrentStmt(E); |
172 | 0 | if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { |
173 | 0 | llvm::Value *MemPtr = EmitScalarExpr(E); |
174 | 0 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); |
175 | 0 | } |
176 | | |
177 | 0 | QualType BoolTy = getContext().BoolTy; |
178 | 0 | SourceLocation Loc = E->getExprLoc(); |
179 | 0 | CGFPOptionsRAII FPOptsRAII(*this, E); |
180 | 0 | if (!E->getType()->isAnyComplexType()) |
181 | 0 | return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc); |
182 | | |
183 | 0 | return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy, |
184 | 0 | Loc); |
185 | 0 | } |
186 | | |
187 | | /// EmitIgnoredExpr - Emit code to compute the specified expression, |
188 | | /// ignoring the result. |
189 | 0 | void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { |
190 | 0 | if (E->isPRValue()) |
191 | 0 | return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true); |
192 | | |
193 | | // if this is a bitfield-resulting conditional operator, we can special case |
194 | | // emit this. The normal 'EmitLValue' version of this is particularly |
195 | | // difficult to codegen for, since creating a single "LValue" for two |
196 | | // different sized arguments here is not particularly doable. |
197 | 0 | if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>( |
198 | 0 | E->IgnoreParenNoopCasts(getContext()))) { |
199 | 0 | if (CondOp->getObjectKind() == OK_BitField) |
200 | 0 | return EmitIgnoredConditionalOperator(CondOp); |
201 | 0 | } |
202 | | |
203 | | // Just emit it as an l-value and drop the result. |
204 | 0 | EmitLValue(E); |
205 | 0 | } |
206 | | |
207 | | /// EmitAnyExpr - Emit code to compute the specified expression which |
208 | | /// can have any type. The result is returned as an RValue struct. |
209 | | /// If this is an aggregate expression, AggSlot indicates where the |
210 | | /// result should be returned. |
211 | | RValue CodeGenFunction::EmitAnyExpr(const Expr *E, |
212 | | AggValueSlot aggSlot, |
213 | 0 | bool ignoreResult) { |
214 | 0 | switch (getEvaluationKind(E->getType())) { |
215 | 0 | case TEK_Scalar: |
216 | 0 | return RValue::get(EmitScalarExpr(E, ignoreResult)); |
217 | 0 | case TEK_Complex: |
218 | 0 | return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); |
219 | 0 | case TEK_Aggregate: |
220 | 0 | if (!ignoreResult && aggSlot.isIgnored()) |
221 | 0 | aggSlot = CreateAggTemp(E->getType(), "agg-temp"); |
222 | 0 | EmitAggExpr(E, aggSlot); |
223 | 0 | return aggSlot.asRValue(); |
224 | 0 | } |
225 | 0 | llvm_unreachable("bad evaluation kind"); |
226 | 0 | } |
227 | | |
228 | | /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will |
229 | | /// always be accessible even if no aggregate location is provided. |
230 | 0 | RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { |
231 | 0 | AggValueSlot AggSlot = AggValueSlot::ignored(); |
232 | |
|
233 | 0 | if (hasAggregateEvaluationKind(E->getType())) |
234 | 0 | AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); |
235 | 0 | return EmitAnyExpr(E, AggSlot); |
236 | 0 | } |
237 | | |
238 | | /// EmitAnyExprToMem - Evaluate an expression into a given memory |
239 | | /// location. |
240 | | void CodeGenFunction::EmitAnyExprToMem(const Expr *E, |
241 | | Address Location, |
242 | | Qualifiers Quals, |
243 | 0 | bool IsInit) { |
244 | | // FIXME: This function should take an LValue as an argument. |
245 | 0 | switch (getEvaluationKind(E->getType())) { |
246 | 0 | case TEK_Complex: |
247 | 0 | EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()), |
248 | 0 | /*isInit*/ false); |
249 | 0 | return; |
250 | | |
251 | 0 | case TEK_Aggregate: { |
252 | 0 | EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, |
253 | 0 | AggValueSlot::IsDestructed_t(IsInit), |
254 | 0 | AggValueSlot::DoesNotNeedGCBarriers, |
255 | 0 | AggValueSlot::IsAliased_t(!IsInit), |
256 | 0 | AggValueSlot::MayOverlap)); |
257 | 0 | return; |
258 | 0 | } |
259 | | |
260 | 0 | case TEK_Scalar: { |
261 | 0 | RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); |
262 | 0 | LValue LV = MakeAddrLValue(Location, E->getType()); |
263 | 0 | EmitStoreThroughLValue(RV, LV); |
264 | 0 | return; |
265 | 0 | } |
266 | 0 | } |
267 | 0 | llvm_unreachable("bad evaluation kind"); |
268 | 0 | } |
269 | | |
270 | | static void |
271 | | pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, |
272 | 0 | const Expr *E, Address ReferenceTemporary) { |
273 | | // Objective-C++ ARC: |
274 | | // If we are binding a reference to a temporary that has ownership, we |
275 | | // need to perform retain/release operations on the temporary. |
276 | | // |
277 | | // FIXME: This should be looking at E, not M. |
278 | 0 | if (auto Lifetime = M->getType().getObjCLifetime()) { |
279 | 0 | switch (Lifetime) { |
280 | 0 | case Qualifiers::OCL_None: |
281 | 0 | case Qualifiers::OCL_ExplicitNone: |
282 | | // Carry on to normal cleanup handling. |
283 | 0 | break; |
284 | | |
285 | 0 | case Qualifiers::OCL_Autoreleasing: |
286 | | // Nothing to do; cleaned up by an autorelease pool. |
287 | 0 | return; |
288 | | |
289 | 0 | case Qualifiers::OCL_Strong: |
290 | 0 | case Qualifiers::OCL_Weak: |
291 | 0 | switch (StorageDuration Duration = M->getStorageDuration()) { |
292 | 0 | case SD_Static: |
293 | | // Note: we intentionally do not register a cleanup to release |
294 | | // the object on program termination. |
295 | 0 | return; |
296 | | |
297 | 0 | case SD_Thread: |
298 | | // FIXME: We should probably register a cleanup in this case. |
299 | 0 | return; |
300 | | |
301 | 0 | case SD_Automatic: |
302 | 0 | case SD_FullExpression: |
303 | 0 | CodeGenFunction::Destroyer *Destroy; |
304 | 0 | CleanupKind CleanupKind; |
305 | 0 | if (Lifetime == Qualifiers::OCL_Strong) { |
306 | 0 | const ValueDecl *VD = M->getExtendingDecl(); |
307 | 0 | bool Precise = |
308 | 0 | VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>(); |
309 | 0 | CleanupKind = CGF.getARCCleanupKind(); |
310 | 0 | Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise |
311 | 0 | : &CodeGenFunction::destroyARCStrongImprecise; |
312 | 0 | } else { |
313 | | // __weak objects always get EH cleanups; otherwise, exceptions |
314 | | // could cause really nasty crashes instead of mere leaks. |
315 | 0 | CleanupKind = NormalAndEHCleanup; |
316 | 0 | Destroy = &CodeGenFunction::destroyARCWeak; |
317 | 0 | } |
318 | 0 | if (Duration == SD_FullExpression) |
319 | 0 | CGF.pushDestroy(CleanupKind, ReferenceTemporary, |
320 | 0 | M->getType(), *Destroy, |
321 | 0 | CleanupKind & EHCleanup); |
322 | 0 | else |
323 | 0 | CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, |
324 | 0 | M->getType(), |
325 | 0 | *Destroy, CleanupKind & EHCleanup); |
326 | 0 | return; |
327 | | |
328 | 0 | case SD_Dynamic: |
329 | 0 | llvm_unreachable("temporary cannot have dynamic storage duration"); |
330 | 0 | } |
331 | 0 | llvm_unreachable("unknown storage duration"); |
332 | 0 | } |
333 | 0 | } |
334 | | |
335 | 0 | CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; |
336 | 0 | if (const RecordType *RT = |
337 | 0 | E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { |
338 | | // Get the destructor for the reference temporary. |
339 | 0 | auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); |
340 | 0 | if (!ClassDecl->hasTrivialDestructor()) |
341 | 0 | ReferenceTemporaryDtor = ClassDecl->getDestructor(); |
342 | 0 | } |
343 | |
|
344 | 0 | if (!ReferenceTemporaryDtor) |
345 | 0 | return; |
346 | | |
347 | | // Call the destructor for the temporary. |
348 | 0 | switch (M->getStorageDuration()) { |
349 | 0 | case SD_Static: |
350 | 0 | case SD_Thread: { |
351 | 0 | llvm::FunctionCallee CleanupFn; |
352 | 0 | llvm::Constant *CleanupArg; |
353 | 0 | if (E->getType()->isArrayType()) { |
354 | 0 | CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( |
355 | 0 | ReferenceTemporary, E->getType(), |
356 | 0 | CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, |
357 | 0 | dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); |
358 | 0 | CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); |
359 | 0 | } else { |
360 | 0 | CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( |
361 | 0 | GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); |
362 | 0 | CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer()); |
363 | 0 | } |
364 | 0 | CGF.CGM.getCXXABI().registerGlobalDtor( |
365 | 0 | CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); |
366 | 0 | break; |
367 | 0 | } |
368 | | |
369 | 0 | case SD_FullExpression: |
370 | 0 | CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), |
371 | 0 | CodeGenFunction::destroyCXXObject, |
372 | 0 | CGF.getLangOpts().Exceptions); |
373 | 0 | break; |
374 | | |
375 | 0 | case SD_Automatic: |
376 | 0 | CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, |
377 | 0 | ReferenceTemporary, E->getType(), |
378 | 0 | CodeGenFunction::destroyCXXObject, |
379 | 0 | CGF.getLangOpts().Exceptions); |
380 | 0 | break; |
381 | | |
382 | 0 | case SD_Dynamic: |
383 | 0 | llvm_unreachable("temporary cannot have dynamic storage duration"); |
384 | 0 | } |
385 | 0 | } |
386 | | |
387 | | static Address createReferenceTemporary(CodeGenFunction &CGF, |
388 | | const MaterializeTemporaryExpr *M, |
389 | | const Expr *Inner, |
390 | 0 | Address *Alloca = nullptr) { |
391 | 0 | auto &TCG = CGF.getTargetHooks(); |
392 | 0 | switch (M->getStorageDuration()) { |
393 | 0 | case SD_FullExpression: |
394 | 0 | case SD_Automatic: { |
395 | | // If we have a constant temporary array or record try to promote it into a |
396 | | // constant global under the same rules a normal constant would've been |
397 | | // promoted. This is easier on the optimizer and generally emits fewer |
398 | | // instructions. |
399 | 0 | QualType Ty = Inner->getType(); |
400 | 0 | if (CGF.CGM.getCodeGenOpts().MergeAllConstants && |
401 | 0 | (Ty->isArrayType() || Ty->isRecordType()) && |
402 | 0 | Ty.isConstantStorage(CGF.getContext(), true, false)) |
403 | 0 | if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) { |
404 | 0 | auto AS = CGF.CGM.GetGlobalConstantAddressSpace(); |
405 | 0 | auto *GV = new llvm::GlobalVariable( |
406 | 0 | CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, |
407 | 0 | llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr, |
408 | 0 | llvm::GlobalValue::NotThreadLocal, |
409 | 0 | CGF.getContext().getTargetAddressSpace(AS)); |
410 | 0 | CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty); |
411 | 0 | GV->setAlignment(alignment.getAsAlign()); |
412 | 0 | llvm::Constant *C = GV; |
413 | 0 | if (AS != LangAS::Default) |
414 | 0 | C = TCG.performAddrSpaceCast( |
415 | 0 | CGF.CGM, GV, AS, LangAS::Default, |
416 | 0 | GV->getValueType()->getPointerTo( |
417 | 0 | CGF.getContext().getTargetAddressSpace(LangAS::Default))); |
418 | | // FIXME: Should we put the new global into a COMDAT? |
419 | 0 | return Address(C, GV->getValueType(), alignment); |
420 | 0 | } |
421 | 0 | return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca); |
422 | 0 | } |
423 | 0 | case SD_Thread: |
424 | 0 | case SD_Static: |
425 | 0 | return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); |
426 | | |
427 | 0 | case SD_Dynamic: |
428 | 0 | llvm_unreachable("temporary can't have dynamic storage duration"); |
429 | 0 | } |
430 | 0 | llvm_unreachable("unknown storage duration"); |
431 | 0 | } |
432 | | |
433 | | /// Helper method to check if the underlying ABI is AAPCS |
434 | 0 | static bool isAAPCS(const TargetInfo &TargetInfo) { |
435 | 0 | return TargetInfo.getABI().starts_with("aapcs"); |
436 | 0 | } |
437 | | |
438 | | LValue CodeGenFunction:: |
439 | 0 | EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { |
440 | 0 | const Expr *E = M->getSubExpr(); |
441 | |
|
442 | 0 | assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || |
443 | 0 | !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && |
444 | 0 | "Reference should never be pseudo-strong!"); |
445 | | |
446 | | // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so |
447 | | // as that will cause the lifetime adjustment to be lost for ARC |
448 | 0 | auto ownership = M->getType().getObjCLifetime(); |
449 | 0 | if (ownership != Qualifiers::OCL_None && |
450 | 0 | ownership != Qualifiers::OCL_ExplicitNone) { |
451 | 0 | Address Object = createReferenceTemporary(*this, M, E); |
452 | 0 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) { |
453 | 0 | llvm::Type *Ty = ConvertTypeForMem(E->getType()); |
454 | 0 | Object = Object.withElementType(Ty); |
455 | | |
456 | | // createReferenceTemporary will promote the temporary to a global with a |
457 | | // constant initializer if it can. It can only do this to a value of |
458 | | // ARC-manageable type if the value is global and therefore "immune" to |
459 | | // ref-counting operations. Therefore we have no need to emit either a |
460 | | // dynamic initialization or a cleanup and we can just return the address |
461 | | // of the temporary. |
462 | 0 | if (Var->hasInitializer()) |
463 | 0 | return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); |
464 | | |
465 | 0 | Var->setInitializer(CGM.EmitNullConstant(E->getType())); |
466 | 0 | } |
467 | 0 | LValue RefTempDst = MakeAddrLValue(Object, M->getType(), |
468 | 0 | AlignmentSource::Decl); |
469 | |
|
470 | 0 | switch (getEvaluationKind(E->getType())) { |
471 | 0 | default: llvm_unreachable("expected scalar or aggregate expression"); |
472 | 0 | case TEK_Scalar: |
473 | 0 | EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); |
474 | 0 | break; |
475 | 0 | case TEK_Aggregate: { |
476 | 0 | EmitAggExpr(E, AggValueSlot::forAddr(Object, |
477 | 0 | E->getType().getQualifiers(), |
478 | 0 | AggValueSlot::IsDestructed, |
479 | 0 | AggValueSlot::DoesNotNeedGCBarriers, |
480 | 0 | AggValueSlot::IsNotAliased, |
481 | 0 | AggValueSlot::DoesNotOverlap)); |
482 | 0 | break; |
483 | 0 | } |
484 | 0 | } |
485 | | |
486 | 0 | pushTemporaryCleanup(*this, M, E, Object); |
487 | 0 | return RefTempDst; |
488 | 0 | } |
489 | | |
490 | 0 | SmallVector<const Expr *, 2> CommaLHSs; |
491 | 0 | SmallVector<SubobjectAdjustment, 2> Adjustments; |
492 | 0 | E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); |
493 | |
|
494 | 0 | for (const auto &Ignored : CommaLHSs) |
495 | 0 | EmitIgnoredExpr(Ignored); |
496 | |
|
497 | 0 | if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { |
498 | 0 | if (opaque->getType()->isRecordType()) { |
499 | 0 | assert(Adjustments.empty()); |
500 | 0 | return EmitOpaqueValueLValue(opaque); |
501 | 0 | } |
502 | 0 | } |
503 | | |
504 | | // Create and initialize the reference temporary. |
505 | 0 | Address Alloca = Address::invalid(); |
506 | 0 | Address Object = createReferenceTemporary(*this, M, E, &Alloca); |
507 | 0 | if (auto *Var = dyn_cast<llvm::GlobalVariable>( |
508 | 0 | Object.getPointer()->stripPointerCasts())) { |
509 | 0 | llvm::Type *TemporaryType = ConvertTypeForMem(E->getType()); |
510 | 0 | Object = Object.withElementType(TemporaryType); |
511 | | // If the temporary is a global and has a constant initializer or is a |
512 | | // constant temporary that we promoted to a global, we may have already |
513 | | // initialized it. |
514 | 0 | if (!Var->hasInitializer()) { |
515 | 0 | Var->setInitializer(CGM.EmitNullConstant(E->getType())); |
516 | 0 | EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); |
517 | 0 | } |
518 | 0 | } else { |
519 | 0 | switch (M->getStorageDuration()) { |
520 | 0 | case SD_Automatic: |
521 | 0 | if (auto *Size = EmitLifetimeStart( |
522 | 0 | CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), |
523 | 0 | Alloca.getPointer())) { |
524 | 0 | pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker, |
525 | 0 | Alloca, Size); |
526 | 0 | } |
527 | 0 | break; |
528 | | |
529 | 0 | case SD_FullExpression: { |
530 | 0 | if (!ShouldEmitLifetimeMarkers) |
531 | 0 | break; |
532 | | |
533 | | // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end |
534 | | // marker. Instead, start the lifetime of a conditional temporary earlier |
535 | | // so that it's unconditional. Don't do this with sanitizers which need |
536 | | // more precise lifetime marks. However when inside an "await.suspend" |
537 | | // block, we should always avoid conditional cleanup because it creates |
538 | | // boolean marker that lives across await_suspend, which can destroy coro |
539 | | // frame. |
540 | 0 | ConditionalEvaluation *OldConditional = nullptr; |
541 | 0 | CGBuilderTy::InsertPoint OldIP; |
542 | 0 | if (isInConditionalBranch() && !E->getType().isDestructedType() && |
543 | 0 | ((!SanOpts.has(SanitizerKind::HWAddress) && |
544 | 0 | !SanOpts.has(SanitizerKind::Memory) && |
545 | 0 | !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) || |
546 | 0 | inSuspendBlock())) { |
547 | 0 | OldConditional = OutermostConditional; |
548 | 0 | OutermostConditional = nullptr; |
549 | |
|
550 | 0 | OldIP = Builder.saveIP(); |
551 | 0 | llvm::BasicBlock *Block = OldConditional->getStartingBlock(); |
552 | 0 | Builder.restoreIP(CGBuilderTy::InsertPoint( |
553 | 0 | Block, llvm::BasicBlock::iterator(Block->back()))); |
554 | 0 | } |
555 | |
|
556 | 0 | if (auto *Size = EmitLifetimeStart( |
557 | 0 | CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), |
558 | 0 | Alloca.getPointer())) { |
559 | 0 | pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca, |
560 | 0 | Size); |
561 | 0 | } |
562 | |
|
563 | 0 | if (OldConditional) { |
564 | 0 | OutermostConditional = OldConditional; |
565 | 0 | Builder.restoreIP(OldIP); |
566 | 0 | } |
567 | 0 | break; |
568 | 0 | } |
569 | | |
570 | 0 | default: |
571 | 0 | break; |
572 | 0 | } |
573 | 0 | EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); |
574 | 0 | } |
575 | 0 | pushTemporaryCleanup(*this, M, E, Object); |
576 | | |
577 | | // Perform derived-to-base casts and/or field accesses, to get from the |
578 | | // temporary object we created (and, potentially, for which we extended |
579 | | // the lifetime) to the subobject we're binding the reference to. |
580 | 0 | for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) { |
581 | 0 | switch (Adjustment.Kind) { |
582 | 0 | case SubobjectAdjustment::DerivedToBaseAdjustment: |
583 | 0 | Object = |
584 | 0 | GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, |
585 | 0 | Adjustment.DerivedToBase.BasePath->path_begin(), |
586 | 0 | Adjustment.DerivedToBase.BasePath->path_end(), |
587 | 0 | /*NullCheckValue=*/ false, E->getExprLoc()); |
588 | 0 | break; |
589 | | |
590 | 0 | case SubobjectAdjustment::FieldAdjustment: { |
591 | 0 | LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl); |
592 | 0 | LV = EmitLValueForField(LV, Adjustment.Field); |
593 | 0 | assert(LV.isSimple() && |
594 | 0 | "materialized temporary field is not a simple lvalue"); |
595 | 0 | Object = LV.getAddress(*this); |
596 | 0 | break; |
597 | 0 | } |
598 | | |
599 | 0 | case SubobjectAdjustment::MemberPointerAdjustment: { |
600 | 0 | llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); |
601 | 0 | Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr, |
602 | 0 | Adjustment.Ptr.MPT); |
603 | 0 | break; |
604 | 0 | } |
605 | 0 | } |
606 | 0 | } |
607 | | |
608 | 0 | return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); |
609 | 0 | } |
610 | | |
611 | | RValue |
612 | 0 | CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { |
613 | | // Emit the expression as an lvalue. |
614 | 0 | LValue LV = EmitLValue(E); |
615 | 0 | assert(LV.isSimple()); |
616 | 0 | llvm::Value *Value = LV.getPointer(*this); |
617 | |
|
618 | 0 | if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { |
619 | | // C++11 [dcl.ref]p5 (as amended by core issue 453): |
620 | | // If a glvalue to which a reference is directly bound designates neither |
621 | | // an existing object or function of an appropriate type nor a region of |
622 | | // storage of suitable size and alignment to contain an object of the |
623 | | // reference's type, the behavior is undefined. |
624 | 0 | QualType Ty = E->getType(); |
625 | 0 | EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); |
626 | 0 | } |
627 | |
|
628 | 0 | return RValue::get(Value); |
629 | 0 | } |
630 | | |
631 | | |
632 | | /// getAccessedFieldNo - Given an encoded value and a result number, return the |
633 | | /// input field number being accessed. |
634 | | unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, |
635 | 0 | const llvm::Constant *Elts) { |
636 | 0 | return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) |
637 | 0 | ->getZExtValue(); |
638 | 0 | } |
639 | | |
640 | | /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. |
641 | | static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, |
642 | 0 | llvm::Value *High) { |
643 | 0 | llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); |
644 | 0 | llvm::Value *K47 = Builder.getInt64(47); |
645 | 0 | llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); |
646 | 0 | llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); |
647 | 0 | llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); |
648 | 0 | llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); |
649 | 0 | return Builder.CreateMul(B1, KMul); |
650 | 0 | } |
651 | | |
652 | 0 | bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { |
653 | 0 | return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || |
654 | 0 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; |
655 | 0 | } |
656 | | |
657 | 0 | bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { |
658 | 0 | CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); |
659 | 0 | return (RD && RD->hasDefinition() && RD->isDynamicClass()) && |
660 | 0 | (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || |
661 | 0 | TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || |
662 | 0 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); |
663 | 0 | } |
664 | | |
665 | 0 | bool CodeGenFunction::sanitizePerformTypeCheck() const { |
666 | 0 | return SanOpts.has(SanitizerKind::Null) || |
667 | 0 | SanOpts.has(SanitizerKind::Alignment) || |
668 | 0 | SanOpts.has(SanitizerKind::ObjectSize) || |
669 | 0 | SanOpts.has(SanitizerKind::Vptr); |
670 | 0 | } |
671 | | |
672 | | void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, |
673 | | llvm::Value *Ptr, QualType Ty, |
674 | | CharUnits Alignment, |
675 | | SanitizerSet SkippedChecks, |
676 | 0 | llvm::Value *ArraySize) { |
677 | 0 | if (!sanitizePerformTypeCheck()) |
678 | 0 | return; |
679 | | |
680 | | // Don't check pointers outside the default address space. The null check |
681 | | // isn't correct, the object-size check isn't supported by LLVM, and we can't |
682 | | // communicate the addresses to the runtime handler for the vptr check. |
683 | 0 | if (Ptr->getType()->getPointerAddressSpace()) |
684 | 0 | return; |
685 | | |
686 | | // Don't check pointers to volatile data. The behavior here is implementation- |
687 | | // defined. |
688 | 0 | if (Ty.isVolatileQualified()) |
689 | 0 | return; |
690 | | |
691 | 0 | SanitizerScope SanScope(this); |
692 | |
|
693 | 0 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; |
694 | 0 | llvm::BasicBlock *Done = nullptr; |
695 | | |
696 | | // Quickly determine whether we have a pointer to an alloca. It's possible |
697 | | // to skip null checks, and some alignment checks, for these pointers. This |
698 | | // can reduce compile-time significantly. |
699 | 0 | auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts()); |
700 | |
|
701 | 0 | llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext()); |
702 | 0 | llvm::Value *IsNonNull = nullptr; |
703 | 0 | bool IsGuaranteedNonNull = |
704 | 0 | SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca; |
705 | 0 | bool AllowNullPointers = isNullPointerAllowed(TCK); |
706 | 0 | if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && |
707 | 0 | !IsGuaranteedNonNull) { |
708 | | // The glvalue must not be an empty glvalue. |
709 | 0 | IsNonNull = Builder.CreateIsNotNull(Ptr); |
710 | | |
711 | | // The IR builder can constant-fold the null check if the pointer points to |
712 | | // a constant. |
713 | 0 | IsGuaranteedNonNull = IsNonNull == True; |
714 | | |
715 | | // Skip the null check if the pointer is known to be non-null. |
716 | 0 | if (!IsGuaranteedNonNull) { |
717 | 0 | if (AllowNullPointers) { |
718 | | // When performing pointer casts, it's OK if the value is null. |
719 | | // Skip the remaining checks in that case. |
720 | 0 | Done = createBasicBlock("null"); |
721 | 0 | llvm::BasicBlock *Rest = createBasicBlock("not.null"); |
722 | 0 | Builder.CreateCondBr(IsNonNull, Rest, Done); |
723 | 0 | EmitBlock(Rest); |
724 | 0 | } else { |
725 | 0 | Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); |
726 | 0 | } |
727 | 0 | } |
728 | 0 | } |
729 | |
|
730 | 0 | if (SanOpts.has(SanitizerKind::ObjectSize) && |
731 | 0 | !SkippedChecks.has(SanitizerKind::ObjectSize) && |
732 | 0 | !Ty->isIncompleteType()) { |
733 | 0 | uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); |
734 | 0 | llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize); |
735 | 0 | if (ArraySize) |
736 | 0 | Size = Builder.CreateMul(Size, ArraySize); |
737 | | |
738 | | // Degenerate case: new X[0] does not need an objectsize check. |
739 | 0 | llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size); |
740 | 0 | if (!ConstantSize || !ConstantSize->isNullValue()) { |
741 | | // The glvalue must refer to a large enough storage region. |
742 | | // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation |
743 | | // to check this. |
744 | | // FIXME: Get object address space |
745 | 0 | llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; |
746 | 0 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); |
747 | 0 | llvm::Value *Min = Builder.getFalse(); |
748 | 0 | llvm::Value *NullIsUnknown = Builder.getFalse(); |
749 | 0 | llvm::Value *Dynamic = Builder.getFalse(); |
750 | 0 | llvm::Value *LargeEnough = Builder.CreateICmpUGE( |
751 | 0 | Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size); |
752 | 0 | Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); |
753 | 0 | } |
754 | 0 | } |
755 | |
|
756 | 0 | llvm::MaybeAlign AlignVal; |
757 | 0 | llvm::Value *PtrAsInt = nullptr; |
758 | |
|
759 | 0 | if (SanOpts.has(SanitizerKind::Alignment) && |
760 | 0 | !SkippedChecks.has(SanitizerKind::Alignment)) { |
761 | 0 | AlignVal = Alignment.getAsMaybeAlign(); |
762 | 0 | if (!Ty->isIncompleteType() && !AlignVal) |
763 | 0 | AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr, |
764 | 0 | /*ForPointeeType=*/true) |
765 | 0 | .getAsMaybeAlign(); |
766 | | |
767 | | // The glvalue must be suitably aligned. |
768 | 0 | if (AlignVal && *AlignVal > llvm::Align(1) && |
769 | 0 | (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) { |
770 | 0 | PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy); |
771 | 0 | llvm::Value *Align = Builder.CreateAnd( |
772 | 0 | PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1)); |
773 | 0 | llvm::Value *Aligned = |
774 | 0 | Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); |
775 | 0 | if (Aligned != True) |
776 | 0 | Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); |
777 | 0 | } |
778 | 0 | } |
779 | |
|
780 | 0 | if (Checks.size() > 0) { |
781 | 0 | llvm::Constant *StaticData[] = { |
782 | 0 | EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), |
783 | 0 | llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1), |
784 | 0 | llvm::ConstantInt::get(Int8Ty, TCK)}; |
785 | 0 | EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData, |
786 | 0 | PtrAsInt ? PtrAsInt : Ptr); |
787 | 0 | } |
788 | | |
789 | | // If possible, check that the vptr indicates that there is a subobject of |
790 | | // type Ty at offset zero within this object. |
791 | | // |
792 | | // C++11 [basic.life]p5,6: |
793 | | // [For storage which does not refer to an object within its lifetime] |
794 | | // The program has undefined behavior if: |
795 | | // -- the [pointer or glvalue] is used to access a non-static data member |
796 | | // or call a non-static member function |
797 | 0 | if (SanOpts.has(SanitizerKind::Vptr) && |
798 | 0 | !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { |
799 | | // Ensure that the pointer is non-null before loading it. If there is no |
800 | | // compile-time guarantee, reuse the run-time null check or emit a new one. |
801 | 0 | if (!IsGuaranteedNonNull) { |
802 | 0 | if (!IsNonNull) |
803 | 0 | IsNonNull = Builder.CreateIsNotNull(Ptr); |
804 | 0 | if (!Done) |
805 | 0 | Done = createBasicBlock("vptr.null"); |
806 | 0 | llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null"); |
807 | 0 | Builder.CreateCondBr(IsNonNull, VptrNotNull, Done); |
808 | 0 | EmitBlock(VptrNotNull); |
809 | 0 | } |
810 | | |
811 | | // Compute a hash of the mangled name of the type. |
812 | | // |
813 | | // FIXME: This is not guaranteed to be deterministic! Move to a |
814 | | // fingerprinting mechanism once LLVM provides one. For the time |
815 | | // being the implementation happens to be deterministic. |
816 | 0 | SmallString<64> MangledName; |
817 | 0 | llvm::raw_svector_ostream Out(MangledName); |
818 | 0 | CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), |
819 | 0 | Out); |
820 | | |
821 | | // Contained in NoSanitizeList based on the mangled type. |
822 | 0 | if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr, |
823 | 0 | Out.str())) { |
824 | 0 | llvm::hash_code TypeHash = hash_value(Out.str()); |
825 | | |
826 | | // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). |
827 | 0 | llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); |
828 | 0 | Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign()); |
829 | 0 | llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); |
830 | 0 | llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); |
831 | |
|
832 | 0 | llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); |
833 | 0 | Hash = Builder.CreateTrunc(Hash, IntPtrTy); |
834 | | |
835 | | // Look the hash up in our cache. |
836 | 0 | const int CacheSize = 128; |
837 | 0 | llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); |
838 | 0 | llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, |
839 | 0 | "__ubsan_vptr_type_cache"); |
840 | 0 | llvm::Value *Slot = Builder.CreateAnd(Hash, |
841 | 0 | llvm::ConstantInt::get(IntPtrTy, |
842 | 0 | CacheSize-1)); |
843 | 0 | llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; |
844 | 0 | llvm::Value *CacheVal = Builder.CreateAlignedLoad( |
845 | 0 | IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices), |
846 | 0 | getPointerAlign()); |
847 | | |
848 | | // If the hash isn't in the cache, call a runtime handler to perform the |
849 | | // hard work of checking whether the vptr is for an object of the right |
850 | | // type. This will either fill in the cache and return, or produce a |
851 | | // diagnostic. |
852 | 0 | llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); |
853 | 0 | llvm::Constant *StaticData[] = { |
854 | 0 | EmitCheckSourceLocation(Loc), |
855 | 0 | EmitCheckTypeDescriptor(Ty), |
856 | 0 | CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), |
857 | 0 | llvm::ConstantInt::get(Int8Ty, TCK) |
858 | 0 | }; |
859 | 0 | llvm::Value *DynamicData[] = { Ptr, Hash }; |
860 | 0 | EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), |
861 | 0 | SanitizerHandler::DynamicTypeCacheMiss, StaticData, |
862 | 0 | DynamicData); |
863 | 0 | } |
864 | 0 | } |
865 | |
|
866 | 0 | if (Done) { |
867 | 0 | Builder.CreateBr(Done); |
868 | 0 | EmitBlock(Done); |
869 | 0 | } |
870 | 0 | } |
871 | | |
872 | | llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, |
873 | 0 | QualType EltTy) { |
874 | 0 | ASTContext &C = getContext(); |
875 | 0 | uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity(); |
876 | 0 | if (!EltSize) |
877 | 0 | return nullptr; |
878 | | |
879 | 0 | auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()); |
880 | 0 | if (!ArrayDeclRef) |
881 | 0 | return nullptr; |
882 | | |
883 | 0 | auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl()); |
884 | 0 | if (!ParamDecl) |
885 | 0 | return nullptr; |
886 | | |
887 | 0 | auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); |
888 | 0 | if (!POSAttr) |
889 | 0 | return nullptr; |
890 | | |
891 | | // Don't load the size if it's a lower bound. |
892 | 0 | int POSType = POSAttr->getType(); |
893 | 0 | if (POSType != 0 && POSType != 1) |
894 | 0 | return nullptr; |
895 | | |
896 | | // Find the implicit size parameter. |
897 | 0 | auto PassedSizeIt = SizeArguments.find(ParamDecl); |
898 | 0 | if (PassedSizeIt == SizeArguments.end()) |
899 | 0 | return nullptr; |
900 | | |
901 | 0 | const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; |
902 | 0 | assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable"); |
903 | 0 | Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second; |
904 | 0 | llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false, |
905 | 0 | C.getSizeType(), E->getExprLoc()); |
906 | 0 | llvm::Value *SizeOfElement = |
907 | 0 | llvm::ConstantInt::get(SizeInBytes->getType(), EltSize); |
908 | 0 | return Builder.CreateUDiv(SizeInBytes, SizeOfElement); |
909 | 0 | } |
910 | | |
911 | | /// If Base is known to point to the start of an array, return the length of |
912 | | /// that array. Return 0 if the length cannot be determined. |
913 | | static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF, |
914 | | const Expr *Base, |
915 | | QualType &IndexedType, |
916 | | LangOptions::StrictFlexArraysLevelKind |
917 | 0 | StrictFlexArraysLevel) { |
918 | | // For the vector indexing extension, the bound is the number of elements. |
919 | 0 | if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { |
920 | 0 | IndexedType = Base->getType(); |
921 | 0 | return CGF.Builder.getInt32(VT->getNumElements()); |
922 | 0 | } |
923 | | |
924 | 0 | Base = Base->IgnoreParens(); |
925 | |
|
926 | 0 | if (const auto *CE = dyn_cast<CastExpr>(Base)) { |
927 | 0 | if (CE->getCastKind() == CK_ArrayToPointerDecay && |
928 | 0 | !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(), |
929 | 0 | StrictFlexArraysLevel)) { |
930 | 0 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
931 | |
|
932 | 0 | IndexedType = CE->getSubExpr()->getType(); |
933 | 0 | const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); |
934 | 0 | if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) |
935 | 0 | return CGF.Builder.getInt(CAT->getSize()); |
936 | | |
937 | 0 | if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) |
938 | 0 | return CGF.getVLASize(VAT).NumElts; |
939 | | // Ignore pass_object_size here. It's not applicable on decayed pointers. |
940 | 0 | } |
941 | 0 | } |
942 | | |
943 | 0 | CodeGenFunction::SanitizerScope SanScope(&CGF); |
944 | |
|
945 | 0 | QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; |
946 | 0 | if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) { |
947 | 0 | IndexedType = Base->getType(); |
948 | 0 | return POS; |
949 | 0 | } |
950 | | |
951 | 0 | return nullptr; |
952 | 0 | } |
953 | | |
954 | | namespace { |
955 | | |
956 | | /// \p StructAccessBase returns the base \p Expr of a field access. It returns |
957 | | /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.: |
958 | | /// |
959 | | /// p in p-> a.b.c |
960 | | /// |
961 | | /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're |
962 | | /// looking for: |
963 | | /// |
964 | | /// struct s { |
965 | | /// struct s *ptr; |
966 | | /// int count; |
967 | | /// char array[] __attribute__((counted_by(count))); |
968 | | /// }; |
969 | | /// |
970 | | /// If we have an expression like \p p->ptr->array[index], we want the |
971 | | /// \p MemberExpr for \p p->ptr instead of \p p. |
972 | | class StructAccessBase |
973 | | : public ConstStmtVisitor<StructAccessBase, const Expr *> { |
974 | | const RecordDecl *ExpectedRD; |
975 | | |
976 | 0 | bool IsExpectedRecordDecl(const Expr *E) const { |
977 | 0 | QualType Ty = E->getType(); |
978 | 0 | if (Ty->isPointerType()) |
979 | 0 | Ty = Ty->getPointeeType(); |
980 | 0 | return ExpectedRD == Ty->getAsRecordDecl(); |
981 | 0 | } |
982 | | |
983 | | public: |
984 | 0 | StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {} |
985 | | |
986 | | //===--------------------------------------------------------------------===// |
987 | | // Visitor Methods |
988 | | //===--------------------------------------------------------------------===// |
989 | | |
990 | | // NOTE: If we build C++ support for counted_by, then we'll have to handle |
991 | | // horrors like this: |
992 | | // |
993 | | // struct S { |
994 | | // int x, y; |
995 | | // int blah[] __attribute__((counted_by(x))); |
996 | | // } s; |
997 | | // |
998 | | // int foo(int index, int val) { |
999 | | // int (S::*IHatePMDs)[] = &S::blah; |
1000 | | // (s.*IHatePMDs)[index] = val; |
1001 | | // } |
1002 | | |
1003 | 0 | const Expr *Visit(const Expr *E) { |
1004 | 0 | return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E); |
1005 | 0 | } |
1006 | | |
1007 | 0 | const Expr *VisitStmt(const Stmt *S) { return nullptr; } |
1008 | | |
1009 | | // These are the types we expect to return (in order of most to least |
1010 | | // likely): |
1011 | | // |
1012 | | // 1. DeclRefExpr - This is the expression for the base of the structure. |
1013 | | // It's exactly what we want to build an access to the \p counted_by |
1014 | | // field. |
1015 | | // 2. MemberExpr - This is the expression that has the same \p RecordDecl |
1016 | | // as the flexble array member's lexical enclosing \p RecordDecl. This |
1017 | | // allows us to catch things like: "p->p->array" |
1018 | | // 3. CompoundLiteralExpr - This is for people who create something |
1019 | | // heretical like (struct foo has a flexible array member): |
1020 | | // |
1021 | | // (struct foo){ 1, 2 }.blah[idx]; |
1022 | 0 | const Expr *VisitDeclRefExpr(const DeclRefExpr *E) { |
1023 | 0 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1024 | 0 | } |
1025 | 0 | const Expr *VisitMemberExpr(const MemberExpr *E) { |
1026 | 0 | if (IsExpectedRecordDecl(E) && E->isArrow()) |
1027 | 0 | return E; |
1028 | 0 | const Expr *Res = Visit(E->getBase()); |
1029 | 0 | return !Res && IsExpectedRecordDecl(E) ? E : Res; |
1030 | 0 | } |
1031 | 0 | const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { |
1032 | 0 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1033 | 0 | } |
1034 | 0 | const Expr *VisitCallExpr(const CallExpr *E) { |
1035 | 0 | return IsExpectedRecordDecl(E) ? E : nullptr; |
1036 | 0 | } |
1037 | | |
1038 | 0 | const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { |
1039 | 0 | if (IsExpectedRecordDecl(E)) |
1040 | 0 | return E; |
1041 | 0 | return Visit(E->getBase()); |
1042 | 0 | } |
1043 | 0 | const Expr *VisitCastExpr(const CastExpr *E) { |
1044 | 0 | return Visit(E->getSubExpr()); |
1045 | 0 | } |
1046 | 0 | const Expr *VisitParenExpr(const ParenExpr *E) { |
1047 | 0 | return Visit(E->getSubExpr()); |
1048 | 0 | } |
1049 | 0 | const Expr *VisitUnaryAddrOf(const UnaryOperator *E) { |
1050 | 0 | return Visit(E->getSubExpr()); |
1051 | 0 | } |
1052 | 0 | const Expr *VisitUnaryDeref(const UnaryOperator *E) { |
1053 | 0 | return Visit(E->getSubExpr()); |
1054 | 0 | } |
1055 | | }; |
1056 | | |
1057 | | } // end anonymous namespace |
1058 | | |
1059 | | using RecIndicesTy = |
1060 | | SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>; |
1061 | | |
1062 | | static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, |
1063 | 0 | const FieldDecl *FD, RecIndicesTy &Indices) { |
1064 | 0 | const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD); |
1065 | 0 | int64_t FieldNo = -1; |
1066 | 0 | for (const Decl *D : RD->decls()) { |
1067 | 0 | if (const auto *Field = dyn_cast<FieldDecl>(D)) { |
1068 | 0 | FieldNo = Layout.getLLVMFieldNo(Field); |
1069 | 0 | if (FD == Field) { |
1070 | 0 | Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo))); |
1071 | 0 | return true; |
1072 | 0 | } |
1073 | 0 | } |
1074 | | |
1075 | 0 | if (const auto *Record = dyn_cast<RecordDecl>(D)) { |
1076 | 0 | ++FieldNo; |
1077 | 0 | if (getGEPIndicesToField(CGF, Record, FD, Indices)) { |
1078 | 0 | if (RD->isUnion()) |
1079 | 0 | FieldNo = 0; |
1080 | 0 | Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo))); |
1081 | 0 | return true; |
1082 | 0 | } |
1083 | 0 | } |
1084 | 0 | } |
1085 | | |
1086 | 0 | return false; |
1087 | 0 | } |
1088 | | |
1089 | | /// This method is typically called in contexts where we can't generate |
1090 | | /// side-effects, like in __builtin_dynamic_object_size. When finding |
1091 | | /// expressions, only choose those that have either already been emitted or can |
1092 | | /// be loaded without side-effects. |
1093 | | /// |
1094 | | /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be |
1095 | | /// within the top-level struct. |
1096 | | /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl. |
1097 | | llvm::Value *CodeGenFunction::EmitCountedByFieldExpr( |
1098 | 0 | const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { |
1099 | 0 | const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext(); |
1100 | | |
1101 | | // Find the base struct expr (i.e. p in p->a.b.c.d). |
1102 | 0 | const Expr *StructBase = StructAccessBase(RD).Visit(Base); |
1103 | 0 | if (!StructBase || StructBase->HasSideEffects(getContext())) |
1104 | 0 | return nullptr; |
1105 | | |
1106 | 0 | llvm::Value *Res = nullptr; |
1107 | 0 | if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) { |
1108 | 0 | Res = EmitDeclRefLValue(DRE).getPointer(*this); |
1109 | 0 | Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res, |
1110 | 0 | getPointerAlign(), "dre.load"); |
1111 | 0 | } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) { |
1112 | 0 | LValue LV = EmitMemberExpr(ME); |
1113 | 0 | Address Addr = LV.getAddress(*this); |
1114 | 0 | Res = Addr.getPointer(); |
1115 | 0 | } else if (StructBase->getType()->isPointerType()) { |
1116 | 0 | LValueBaseInfo BaseInfo; |
1117 | 0 | TBAAAccessInfo TBAAInfo; |
1118 | 0 | Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo); |
1119 | 0 | Res = Addr.getPointer(); |
1120 | 0 | } else { |
1121 | 0 | return nullptr; |
1122 | 0 | } |
1123 | | |
1124 | 0 | llvm::Value *Zero = Builder.getInt32(0); |
1125 | 0 | RecIndicesTy Indices; |
1126 | |
|
1127 | 0 | getGEPIndicesToField(*this, RD, CountDecl, Indices); |
1128 | |
|
1129 | 0 | for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I) |
1130 | 0 | Res = Builder.CreateInBoundsGEP( |
1131 | 0 | ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res, |
1132 | 0 | {Zero, I->second}, "..counted_by.gep"); |
1133 | |
|
1134 | 0 | return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res, |
1135 | 0 | getIntAlign(), "..counted_by.load"); |
1136 | 0 | } |
1137 | | |
1138 | 0 | const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) { |
1139 | 0 | if (!FD || !FD->hasAttr<CountedByAttr>()) |
1140 | 0 | return nullptr; |
1141 | | |
1142 | 0 | const auto *CBA = FD->getAttr<CountedByAttr>(); |
1143 | 0 | if (!CBA) |
1144 | 0 | return nullptr; |
1145 | | |
1146 | 0 | auto GetNonAnonStructOrUnion = |
1147 | 0 | [](const RecordDecl *RD) -> const RecordDecl * { |
1148 | 0 | while (RD && RD->isAnonymousStructOrUnion()) { |
1149 | 0 | const auto *R = dyn_cast<RecordDecl>(RD->getDeclContext()); |
1150 | 0 | if (!R) |
1151 | 0 | return nullptr; |
1152 | 0 | RD = R; |
1153 | 0 | } |
1154 | 0 | return RD; |
1155 | 0 | }; |
1156 | 0 | const RecordDecl *EnclosingRD = GetNonAnonStructOrUnion(FD->getParent()); |
1157 | 0 | if (!EnclosingRD) |
1158 | 0 | return nullptr; |
1159 | | |
1160 | 0 | DeclarationName DName(CBA->getCountedByField()); |
1161 | 0 | DeclContext::lookup_result Lookup = EnclosingRD->lookup(DName); |
1162 | |
|
1163 | 0 | if (Lookup.empty()) |
1164 | 0 | return nullptr; |
1165 | | |
1166 | 0 | const NamedDecl *ND = Lookup.front(); |
1167 | 0 | if (const auto *IFD = dyn_cast<IndirectFieldDecl>(ND)) |
1168 | 0 | ND = IFD->getAnonField(); |
1169 | |
|
1170 | 0 | return dyn_cast<FieldDecl>(ND); |
1171 | 0 | } |
1172 | | |
1173 | | void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, |
1174 | | llvm::Value *Index, QualType IndexType, |
1175 | 0 | bool Accessed) { |
1176 | 0 | assert(SanOpts.has(SanitizerKind::ArrayBounds) && |
1177 | 0 | "should not be called unless adding bounds checks"); |
1178 | 0 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
1179 | 0 | getLangOpts().getStrictFlexArraysLevel(); |
1180 | 0 | QualType IndexedType; |
1181 | 0 | llvm::Value *Bound = |
1182 | 0 | getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel); |
1183 | |
|
1184 | 0 | EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed); |
1185 | 0 | } |
1186 | | |
1187 | | void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, |
1188 | | llvm::Value *Index, |
1189 | | QualType IndexType, |
1190 | 0 | QualType IndexedType, bool Accessed) { |
1191 | 0 | if (!Bound) |
1192 | 0 | return; |
1193 | | |
1194 | 0 | SanitizerScope SanScope(this); |
1195 | |
|
1196 | 0 | bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); |
1197 | 0 | llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); |
1198 | 0 | llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); |
1199 | |
|
1200 | 0 | llvm::Constant *StaticData[] = { |
1201 | 0 | EmitCheckSourceLocation(E->getExprLoc()), |
1202 | 0 | EmitCheckTypeDescriptor(IndexedType), |
1203 | 0 | EmitCheckTypeDescriptor(IndexType) |
1204 | 0 | }; |
1205 | 0 | llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) |
1206 | 0 | : Builder.CreateICmpULE(IndexVal, BoundVal); |
1207 | 0 | EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), |
1208 | 0 | SanitizerHandler::OutOfBounds, StaticData, Index); |
1209 | 0 | } |
1210 | | |
1211 | | CodeGenFunction::ComplexPairTy CodeGenFunction:: |
1212 | | EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, |
1213 | 0 | bool isInc, bool isPre) { |
1214 | 0 | ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); |
1215 | |
|
1216 | 0 | llvm::Value *NextVal; |
1217 | 0 | if (isa<llvm::IntegerType>(InVal.first->getType())) { |
1218 | 0 | uint64_t AmountVal = isInc ? 1 : -1; |
1219 | 0 | NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); |
1220 | | |
1221 | | // Add the inc/dec to the real part. |
1222 | 0 | NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); |
1223 | 0 | } else { |
1224 | 0 | QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); |
1225 | 0 | llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); |
1226 | 0 | if (!isInc) |
1227 | 0 | FVal.changeSign(); |
1228 | 0 | NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); |
1229 | | |
1230 | | // Add the inc/dec to the real part. |
1231 | 0 | NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); |
1232 | 0 | } |
1233 | |
|
1234 | 0 | ComplexPairTy IncVal(NextVal, InVal.second); |
1235 | | |
1236 | | // Store the updated result through the lvalue. |
1237 | 0 | EmitStoreOfComplex(IncVal, LV, /*init*/ false); |
1238 | 0 | if (getLangOpts().OpenMP) |
1239 | 0 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, |
1240 | 0 | E->getSubExpr()); |
1241 | | |
1242 | | // If this is a postinc, return the value read from memory, otherwise use the |
1243 | | // updated value. |
1244 | 0 | return isPre ? IncVal : InVal; |
1245 | 0 | } |
1246 | | |
1247 | | void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, |
1248 | 0 | CodeGenFunction *CGF) { |
1249 | | // Bind VLAs in the cast type. |
1250 | 0 | if (CGF && E->getType()->isVariablyModifiedType()) |
1251 | 0 | CGF->EmitVariablyModifiedType(E->getType()); |
1252 | |
|
1253 | 0 | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1254 | 0 | DI->EmitExplicitCastType(E->getType()); |
1255 | 0 | } |
1256 | | |
1257 | | //===----------------------------------------------------------------------===// |
1258 | | // LValue Expression Emission |
1259 | | //===----------------------------------------------------------------------===// |
1260 | | |
1261 | | static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, |
1262 | | TBAAAccessInfo *TBAAInfo, |
1263 | | KnownNonNull_t IsKnownNonNull, |
1264 | 0 | CodeGenFunction &CGF) { |
1265 | | // We allow this with ObjC object pointers because of fragile ABIs. |
1266 | 0 | assert(E->getType()->isPointerType() || |
1267 | 0 | E->getType()->isObjCObjectPointerType()); |
1268 | 0 | E = E->IgnoreParens(); |
1269 | | |
1270 | | // Casts: |
1271 | 0 | if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { |
1272 | 0 | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE)) |
1273 | 0 | CGF.CGM.EmitExplicitCastExprType(ECE, &CGF); |
1274 | |
|
1275 | 0 | switch (CE->getCastKind()) { |
1276 | | // Non-converting casts (but not C's implicit conversion from void*). |
1277 | 0 | case CK_BitCast: |
1278 | 0 | case CK_NoOp: |
1279 | 0 | case CK_AddressSpaceConversion: |
1280 | 0 | if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { |
1281 | 0 | if (PtrTy->getPointeeType()->isVoidType()) |
1282 | 0 | break; |
1283 | | |
1284 | 0 | LValueBaseInfo InnerBaseInfo; |
1285 | 0 | TBAAAccessInfo InnerTBAAInfo; |
1286 | 0 | Address Addr = CGF.EmitPointerWithAlignment( |
1287 | 0 | CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull); |
1288 | 0 | if (BaseInfo) *BaseInfo = InnerBaseInfo; |
1289 | 0 | if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; |
1290 | |
|
1291 | 0 | if (isa<ExplicitCastExpr>(CE)) { |
1292 | 0 | LValueBaseInfo TargetTypeBaseInfo; |
1293 | 0 | TBAAAccessInfo TargetTypeTBAAInfo; |
1294 | 0 | CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( |
1295 | 0 | E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo); |
1296 | 0 | if (TBAAInfo) |
1297 | 0 | *TBAAInfo = |
1298 | 0 | CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo); |
1299 | | // If the source l-value is opaque, honor the alignment of the |
1300 | | // casted-to type. |
1301 | 0 | if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { |
1302 | 0 | if (BaseInfo) |
1303 | 0 | BaseInfo->mergeForCast(TargetTypeBaseInfo); |
1304 | 0 | Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, |
1305 | 0 | IsKnownNonNull); |
1306 | 0 | } |
1307 | 0 | } |
1308 | |
|
1309 | 0 | if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && |
1310 | 0 | CE->getCastKind() == CK_BitCast) { |
1311 | 0 | if (auto PT = E->getType()->getAs<PointerType>()) |
1312 | 0 | CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr, |
1313 | 0 | /*MayBeNull=*/true, |
1314 | 0 | CodeGenFunction::CFITCK_UnrelatedCast, |
1315 | 0 | CE->getBeginLoc()); |
1316 | 0 | } |
1317 | |
|
1318 | 0 | llvm::Type *ElemTy = |
1319 | 0 | CGF.ConvertTypeForMem(E->getType()->getPointeeType()); |
1320 | 0 | Addr = Addr.withElementType(ElemTy); |
1321 | 0 | if (CE->getCastKind() == CK_AddressSpaceConversion) |
1322 | 0 | Addr = CGF.Builder.CreateAddrSpaceCast(Addr, |
1323 | 0 | CGF.ConvertType(E->getType())); |
1324 | 0 | return Addr; |
1325 | 0 | } |
1326 | 0 | break; |
1327 | | |
1328 | | // Array-to-pointer decay. |
1329 | 0 | case CK_ArrayToPointerDecay: |
1330 | 0 | return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo); |
1331 | | |
1332 | | // Derived-to-base conversions. |
1333 | 0 | case CK_UncheckedDerivedToBase: |
1334 | 0 | case CK_DerivedToBase: { |
1335 | | // TODO: Support accesses to members of base classes in TBAA. For now, we |
1336 | | // conservatively pretend that the complete object is of the base class |
1337 | | // type. |
1338 | 0 | if (TBAAInfo) |
1339 | 0 | *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType()); |
1340 | 0 | Address Addr = CGF.EmitPointerWithAlignment( |
1341 | 0 | CE->getSubExpr(), BaseInfo, nullptr, |
1342 | 0 | (KnownNonNull_t)(IsKnownNonNull || |
1343 | 0 | CE->getCastKind() == CK_UncheckedDerivedToBase)); |
1344 | 0 | auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); |
1345 | 0 | return CGF.GetAddressOfBaseClass( |
1346 | 0 | Addr, Derived, CE->path_begin(), CE->path_end(), |
1347 | 0 | CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc()); |
1348 | 0 | } |
1349 | | |
1350 | | // TODO: Is there any reason to treat base-to-derived conversions |
1351 | | // specially? |
1352 | 0 | default: |
1353 | 0 | break; |
1354 | 0 | } |
1355 | 0 | } |
1356 | | |
1357 | | // Unary &. |
1358 | 0 | if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { |
1359 | 0 | if (UO->getOpcode() == UO_AddrOf) { |
1360 | 0 | LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull); |
1361 | 0 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1362 | 0 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1363 | 0 | return LV.getAddress(CGF); |
1364 | 0 | } |
1365 | 0 | } |
1366 | | |
1367 | | // std::addressof and variants. |
1368 | 0 | if (auto *Call = dyn_cast<CallExpr>(E)) { |
1369 | 0 | switch (Call->getBuiltinCallee()) { |
1370 | 0 | default: |
1371 | 0 | break; |
1372 | 0 | case Builtin::BIaddressof: |
1373 | 0 | case Builtin::BI__addressof: |
1374 | 0 | case Builtin::BI__builtin_addressof: { |
1375 | 0 | LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull); |
1376 | 0 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
1377 | 0 | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); |
1378 | 0 | return LV.getAddress(CGF); |
1379 | 0 | } |
1380 | 0 | } |
1381 | 0 | } |
1382 | | |
1383 | | // TODO: conditional operators, comma. |
1384 | | |
1385 | | // Otherwise, use the alignment of the type. |
1386 | 0 | CharUnits Align = |
1387 | 0 | CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo); |
1388 | 0 | llvm::Type *ElemTy = CGF.ConvertTypeForMem(E->getType()->getPointeeType()); |
1389 | 0 | return Address(CGF.EmitScalarExpr(E), ElemTy, Align, IsKnownNonNull); |
1390 | 0 | } |
1391 | | |
1392 | | /// EmitPointerWithAlignment - Given an expression of pointer type, try to |
1393 | | /// derive a more accurate bound on the alignment of the pointer. |
1394 | | Address CodeGenFunction::EmitPointerWithAlignment( |
1395 | | const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, |
1396 | 0 | KnownNonNull_t IsKnownNonNull) { |
1397 | 0 | Address Addr = |
1398 | 0 | ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this); |
1399 | 0 | if (IsKnownNonNull && !Addr.isKnownNonNull()) |
1400 | 0 | Addr.setKnownNonNull(); |
1401 | 0 | return Addr; |
1402 | 0 | } |
1403 | | |
1404 | 0 | llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { |
1405 | 0 | llvm::Value *V = RV.getScalarVal(); |
1406 | 0 | if (auto MPT = T->getAs<MemberPointerType>()) |
1407 | 0 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT); |
1408 | 0 | return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); |
1409 | 0 | } |
1410 | | |
1411 | 0 | RValue CodeGenFunction::GetUndefRValue(QualType Ty) { |
1412 | 0 | if (Ty->isVoidType()) |
1413 | 0 | return RValue::get(nullptr); |
1414 | | |
1415 | 0 | switch (getEvaluationKind(Ty)) { |
1416 | 0 | case TEK_Complex: { |
1417 | 0 | llvm::Type *EltTy = |
1418 | 0 | ConvertType(Ty->castAs<ComplexType>()->getElementType()); |
1419 | 0 | llvm::Value *U = llvm::UndefValue::get(EltTy); |
1420 | 0 | return RValue::getComplex(std::make_pair(U, U)); |
1421 | 0 | } |
1422 | | |
1423 | | // If this is a use of an undefined aggregate type, the aggregate must have an |
1424 | | // identifiable address. Just because the contents of the value are undefined |
1425 | | // doesn't mean that the address can't be taken and compared. |
1426 | 0 | case TEK_Aggregate: { |
1427 | 0 | Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); |
1428 | 0 | return RValue::getAggregate(DestPtr); |
1429 | 0 | } |
1430 | | |
1431 | 0 | case TEK_Scalar: |
1432 | 0 | return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); |
1433 | 0 | } |
1434 | 0 | llvm_unreachable("bad evaluation kind"); |
1435 | 0 | } |
1436 | | |
1437 | | RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, |
1438 | 0 | const char *Name) { |
1439 | 0 | ErrorUnsupported(E, Name); |
1440 | 0 | return GetUndefRValue(E->getType()); |
1441 | 0 | } |
1442 | | |
1443 | | LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, |
1444 | 0 | const char *Name) { |
1445 | 0 | ErrorUnsupported(E, Name); |
1446 | 0 | llvm::Type *ElTy = ConvertType(E->getType()); |
1447 | 0 | llvm::Type *Ty = UnqualPtrTy; |
1448 | 0 | return MakeAddrLValue( |
1449 | 0 | Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType()); |
1450 | 0 | } |
1451 | | |
1452 | 0 | bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { |
1453 | 0 | const Expr *Base = Obj; |
1454 | 0 | while (!isa<CXXThisExpr>(Base)) { |
1455 | | // The result of a dynamic_cast can be null. |
1456 | 0 | if (isa<CXXDynamicCastExpr>(Base)) |
1457 | 0 | return false; |
1458 | | |
1459 | 0 | if (const auto *CE = dyn_cast<CastExpr>(Base)) { |
1460 | 0 | Base = CE->getSubExpr(); |
1461 | 0 | } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) { |
1462 | 0 | Base = PE->getSubExpr(); |
1463 | 0 | } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) { |
1464 | 0 | if (UO->getOpcode() == UO_Extension) |
1465 | 0 | Base = UO->getSubExpr(); |
1466 | 0 | else |
1467 | 0 | return false; |
1468 | 0 | } else { |
1469 | 0 | return false; |
1470 | 0 | } |
1471 | 0 | } |
1472 | 0 | return true; |
1473 | 0 | } |
1474 | | |
1475 | 0 | LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { |
1476 | 0 | LValue LV; |
1477 | 0 | if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)) |
1478 | 0 | LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); |
1479 | 0 | else |
1480 | 0 | LV = EmitLValue(E); |
1481 | 0 | if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) { |
1482 | 0 | SanitizerSet SkippedChecks; |
1483 | 0 | if (const auto *ME = dyn_cast<MemberExpr>(E)) { |
1484 | 0 | bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); |
1485 | 0 | if (IsBaseCXXThis) |
1486 | 0 | SkippedChecks.set(SanitizerKind::Alignment, true); |
1487 | 0 | if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase())) |
1488 | 0 | SkippedChecks.set(SanitizerKind::Null, true); |
1489 | 0 | } |
1490 | 0 | EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(), |
1491 | 0 | LV.getAlignment(), SkippedChecks); |
1492 | 0 | } |
1493 | 0 | return LV; |
1494 | 0 | } |
1495 | | |
1496 | | /// EmitLValue - Emit code to compute a designator that specifies the location |
1497 | | /// of the expression. |
1498 | | /// |
1499 | | /// This can return one of two things: a simple address or a bitfield reference. |
1500 | | /// In either case, the LLVM Value* in the LValue structure is guaranteed to be |
1501 | | /// an LLVM pointer type. |
1502 | | /// |
1503 | | /// If this returns a bitfield reference, nothing about the pointee type of the |
1504 | | /// LLVM value is known: For example, it may not be a pointer to an integer. |
1505 | | /// |
1506 | | /// If this returns a normal address, and if the lvalue's C type is fixed size, |
1507 | | /// this method guarantees that the returned pointer type will point to an LLVM |
1508 | | /// type of the same size of the lvalue's type. If the lvalue has a variable |
1509 | | /// length type, this is not possible. |
1510 | | /// |
1511 | | LValue CodeGenFunction::EmitLValue(const Expr *E, |
1512 | 0 | KnownNonNull_t IsKnownNonNull) { |
1513 | 0 | LValue LV = EmitLValueHelper(E, IsKnownNonNull); |
1514 | 0 | if (IsKnownNonNull && !LV.isKnownNonNull()) |
1515 | 0 | LV.setKnownNonNull(); |
1516 | 0 | return LV; |
1517 | 0 | } |
1518 | | |
1519 | | LValue CodeGenFunction::EmitLValueHelper(const Expr *E, |
1520 | 0 | KnownNonNull_t IsKnownNonNull) { |
1521 | 0 | ApplyDebugLocation DL(*this, E); |
1522 | 0 | switch (E->getStmtClass()) { |
1523 | 0 | default: return EmitUnsupportedLValue(E, "l-value expression"); |
1524 | | |
1525 | 0 | case Expr::ObjCPropertyRefExprClass: |
1526 | 0 | llvm_unreachable("cannot emit a property reference directly"); |
1527 | |
|
1528 | 0 | case Expr::ObjCSelectorExprClass: |
1529 | 0 | return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); |
1530 | 0 | case Expr::ObjCIsaExprClass: |
1531 | 0 | return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); |
1532 | 0 | case Expr::BinaryOperatorClass: |
1533 | 0 | return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); |
1534 | 0 | case Expr::CompoundAssignOperatorClass: { |
1535 | 0 | QualType Ty = E->getType(); |
1536 | 0 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1537 | 0 | Ty = AT->getValueType(); |
1538 | 0 | if (!Ty->isAnyComplexType()) |
1539 | 0 | return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); |
1540 | 0 | return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); |
1541 | 0 | } |
1542 | 0 | case Expr::CallExprClass: |
1543 | 0 | case Expr::CXXMemberCallExprClass: |
1544 | 0 | case Expr::CXXOperatorCallExprClass: |
1545 | 0 | case Expr::UserDefinedLiteralClass: |
1546 | 0 | return EmitCallExprLValue(cast<CallExpr>(E)); |
1547 | 0 | case Expr::CXXRewrittenBinaryOperatorClass: |
1548 | 0 | return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(), |
1549 | 0 | IsKnownNonNull); |
1550 | 0 | case Expr::VAArgExprClass: |
1551 | 0 | return EmitVAArgExprLValue(cast<VAArgExpr>(E)); |
1552 | 0 | case Expr::DeclRefExprClass: |
1553 | 0 | return EmitDeclRefLValue(cast<DeclRefExpr>(E)); |
1554 | 0 | case Expr::ConstantExprClass: { |
1555 | 0 | const ConstantExpr *CE = cast<ConstantExpr>(E); |
1556 | 0 | if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { |
1557 | 0 | QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit()) |
1558 | 0 | ->getCallReturnType(getContext()) |
1559 | 0 | ->getPointeeType(); |
1560 | 0 | return MakeNaturalAlignAddrLValue(Result, RetType); |
1561 | 0 | } |
1562 | 0 | return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull); |
1563 | 0 | } |
1564 | 0 | case Expr::ParenExprClass: |
1565 | 0 | return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull); |
1566 | 0 | case Expr::GenericSelectionExprClass: |
1567 | 0 | return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(), |
1568 | 0 | IsKnownNonNull); |
1569 | 0 | case Expr::PredefinedExprClass: |
1570 | 0 | return EmitPredefinedLValue(cast<PredefinedExpr>(E)); |
1571 | 0 | case Expr::StringLiteralClass: |
1572 | 0 | return EmitStringLiteralLValue(cast<StringLiteral>(E)); |
1573 | 0 | case Expr::ObjCEncodeExprClass: |
1574 | 0 | return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); |
1575 | 0 | case Expr::PseudoObjectExprClass: |
1576 | 0 | return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); |
1577 | 0 | case Expr::InitListExprClass: |
1578 | 0 | return EmitInitListLValue(cast<InitListExpr>(E)); |
1579 | 0 | case Expr::CXXTemporaryObjectExprClass: |
1580 | 0 | case Expr::CXXConstructExprClass: |
1581 | 0 | return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); |
1582 | 0 | case Expr::CXXBindTemporaryExprClass: |
1583 | 0 | return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); |
1584 | 0 | case Expr::CXXUuidofExprClass: |
1585 | 0 | return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); |
1586 | 0 | case Expr::LambdaExprClass: |
1587 | 0 | return EmitAggExprToLValue(E); |
1588 | | |
1589 | 0 | case Expr::ExprWithCleanupsClass: { |
1590 | 0 | const auto *cleanups = cast<ExprWithCleanups>(E); |
1591 | 0 | RunCleanupsScope Scope(*this); |
1592 | 0 | LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull); |
1593 | 0 | if (LV.isSimple()) { |
1594 | | // Defend against branches out of gnu statement expressions surrounded by |
1595 | | // cleanups. |
1596 | 0 | Address Addr = LV.getAddress(*this); |
1597 | 0 | llvm::Value *V = Addr.getPointer(); |
1598 | 0 | Scope.ForceCleanup({&V}); |
1599 | 0 | return LValue::MakeAddr(Addr.withPointer(V, Addr.isKnownNonNull()), |
1600 | 0 | LV.getType(), getContext(), LV.getBaseInfo(), |
1601 | 0 | LV.getTBAAInfo()); |
1602 | 0 | } |
1603 | | // FIXME: Is it possible to create an ExprWithCleanups that produces a |
1604 | | // bitfield lvalue or some other non-simple lvalue? |
1605 | 0 | return LV; |
1606 | 0 | } |
1607 | | |
1608 | 0 | case Expr::CXXDefaultArgExprClass: { |
1609 | 0 | auto *DAE = cast<CXXDefaultArgExpr>(E); |
1610 | 0 | CXXDefaultArgExprScope Scope(*this, DAE); |
1611 | 0 | return EmitLValue(DAE->getExpr(), IsKnownNonNull); |
1612 | 0 | } |
1613 | 0 | case Expr::CXXDefaultInitExprClass: { |
1614 | 0 | auto *DIE = cast<CXXDefaultInitExpr>(E); |
1615 | 0 | CXXDefaultInitExprScope Scope(*this, DIE); |
1616 | 0 | return EmitLValue(DIE->getExpr(), IsKnownNonNull); |
1617 | 0 | } |
1618 | 0 | case Expr::CXXTypeidExprClass: |
1619 | 0 | return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); |
1620 | | |
1621 | 0 | case Expr::ObjCMessageExprClass: |
1622 | 0 | return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); |
1623 | 0 | case Expr::ObjCIvarRefExprClass: |
1624 | 0 | return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); |
1625 | 0 | case Expr::StmtExprClass: |
1626 | 0 | return EmitStmtExprLValue(cast<StmtExpr>(E)); |
1627 | 0 | case Expr::UnaryOperatorClass: |
1628 | 0 | return EmitUnaryOpLValue(cast<UnaryOperator>(E)); |
1629 | 0 | case Expr::ArraySubscriptExprClass: |
1630 | 0 | return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); |
1631 | 0 | case Expr::MatrixSubscriptExprClass: |
1632 | 0 | return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E)); |
1633 | 0 | case Expr::OMPArraySectionExprClass: |
1634 | 0 | return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E)); |
1635 | 0 | case Expr::ExtVectorElementExprClass: |
1636 | 0 | return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); |
1637 | 0 | case Expr::CXXThisExprClass: |
1638 | 0 | return MakeAddrLValue(LoadCXXThisAddress(), E->getType()); |
1639 | 0 | case Expr::MemberExprClass: |
1640 | 0 | return EmitMemberExpr(cast<MemberExpr>(E)); |
1641 | 0 | case Expr::CompoundLiteralExprClass: |
1642 | 0 | return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); |
1643 | 0 | case Expr::ConditionalOperatorClass: |
1644 | 0 | return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); |
1645 | 0 | case Expr::BinaryConditionalOperatorClass: |
1646 | 0 | return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); |
1647 | 0 | case Expr::ChooseExprClass: |
1648 | 0 | return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull); |
1649 | 0 | case Expr::OpaqueValueExprClass: |
1650 | 0 | return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); |
1651 | 0 | case Expr::SubstNonTypeTemplateParmExprClass: |
1652 | 0 | return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), |
1653 | 0 | IsKnownNonNull); |
1654 | 0 | case Expr::ImplicitCastExprClass: |
1655 | 0 | case Expr::CStyleCastExprClass: |
1656 | 0 | case Expr::CXXFunctionalCastExprClass: |
1657 | 0 | case Expr::CXXStaticCastExprClass: |
1658 | 0 | case Expr::CXXDynamicCastExprClass: |
1659 | 0 | case Expr::CXXReinterpretCastExprClass: |
1660 | 0 | case Expr::CXXConstCastExprClass: |
1661 | 0 | case Expr::CXXAddrspaceCastExprClass: |
1662 | 0 | case Expr::ObjCBridgedCastExprClass: |
1663 | 0 | return EmitCastLValue(cast<CastExpr>(E)); |
1664 | | |
1665 | 0 | case Expr::MaterializeTemporaryExprClass: |
1666 | 0 | return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); |
1667 | | |
1668 | 0 | case Expr::CoawaitExprClass: |
1669 | 0 | return EmitCoawaitLValue(cast<CoawaitExpr>(E)); |
1670 | 0 | case Expr::CoyieldExprClass: |
1671 | 0 | return EmitCoyieldLValue(cast<CoyieldExpr>(E)); |
1672 | 0 | } |
1673 | 0 | } |
1674 | | |
1675 | | /// Given an object of the given canonical type, can we safely copy a |
1676 | | /// value out of it based on its initializer? |
1677 | 0 | static bool isConstantEmittableObjectType(QualType type) { |
1678 | 0 | assert(type.isCanonical()); |
1679 | 0 | assert(!type->isReferenceType()); |
1680 | | |
1681 | | // Must be const-qualified but non-volatile. |
1682 | 0 | Qualifiers qs = type.getLocalQualifiers(); |
1683 | 0 | if (!qs.hasConst() || qs.hasVolatile()) return false; |
1684 | | |
1685 | | // Otherwise, all object types satisfy this except C++ classes with |
1686 | | // mutable subobjects or non-trivial copy/destroy behavior. |
1687 | 0 | if (const auto *RT = dyn_cast<RecordType>(type)) |
1688 | 0 | if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) |
1689 | 0 | if (RD->hasMutableFields() || !RD->isTrivial()) |
1690 | 0 | return false; |
1691 | | |
1692 | 0 | return true; |
1693 | 0 | } |
1694 | | |
1695 | | /// Can we constant-emit a load of a reference to a variable of the |
1696 | | /// given type? This is different from predicates like |
1697 | | /// Decl::mightBeUsableInConstantExpressions because we do want it to apply |
1698 | | /// in situations that don't necessarily satisfy the language's rules |
1699 | | /// for this (e.g. C++'s ODR-use rules). For example, we want to able |
1700 | | /// to do this with const float variables even if those variables |
1701 | | /// aren't marked 'constexpr'. |
1702 | | enum ConstantEmissionKind { |
1703 | | CEK_None, |
1704 | | CEK_AsReferenceOnly, |
1705 | | CEK_AsValueOrReference, |
1706 | | CEK_AsValueOnly |
1707 | | }; |
1708 | 0 | static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { |
1709 | 0 | type = type.getCanonicalType(); |
1710 | 0 | if (const auto *ref = dyn_cast<ReferenceType>(type)) { |
1711 | 0 | if (isConstantEmittableObjectType(ref->getPointeeType())) |
1712 | 0 | return CEK_AsValueOrReference; |
1713 | 0 | return CEK_AsReferenceOnly; |
1714 | 0 | } |
1715 | 0 | if (isConstantEmittableObjectType(type)) |
1716 | 0 | return CEK_AsValueOnly; |
1717 | 0 | return CEK_None; |
1718 | 0 | } |
1719 | | |
1720 | | /// Try to emit a reference to the given value without producing it as |
1721 | | /// an l-value. This is just an optimization, but it avoids us needing |
1722 | | /// to emit global copies of variables if they're named without triggering |
1723 | | /// a formal use in a context where we can't emit a direct reference to them, |
1724 | | /// for instance if a block or lambda or a member of a local class uses a |
1725 | | /// const int variable or constexpr variable from an enclosing function. |
1726 | | CodeGenFunction::ConstantEmission |
1727 | 0 | CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { |
1728 | 0 | ValueDecl *value = refExpr->getDecl(); |
1729 | | |
1730 | | // The value needs to be an enum constant or a constant variable. |
1731 | 0 | ConstantEmissionKind CEK; |
1732 | 0 | if (isa<ParmVarDecl>(value)) { |
1733 | 0 | CEK = CEK_None; |
1734 | 0 | } else if (auto *var = dyn_cast<VarDecl>(value)) { |
1735 | 0 | CEK = checkVarTypeForConstantEmission(var->getType()); |
1736 | 0 | } else if (isa<EnumConstantDecl>(value)) { |
1737 | 0 | CEK = CEK_AsValueOnly; |
1738 | 0 | } else { |
1739 | 0 | CEK = CEK_None; |
1740 | 0 | } |
1741 | 0 | if (CEK == CEK_None) return ConstantEmission(); |
1742 | | |
1743 | 0 | Expr::EvalResult result; |
1744 | 0 | bool resultIsReference; |
1745 | 0 | QualType resultType; |
1746 | | |
1747 | | // It's best to evaluate all the way as an r-value if that's permitted. |
1748 | 0 | if (CEK != CEK_AsReferenceOnly && |
1749 | 0 | refExpr->EvaluateAsRValue(result, getContext())) { |
1750 | 0 | resultIsReference = false; |
1751 | 0 | resultType = refExpr->getType(); |
1752 | | |
1753 | | // Otherwise, try to evaluate as an l-value. |
1754 | 0 | } else if (CEK != CEK_AsValueOnly && |
1755 | 0 | refExpr->EvaluateAsLValue(result, getContext())) { |
1756 | 0 | resultIsReference = true; |
1757 | 0 | resultType = value->getType(); |
1758 | | |
1759 | | // Failure. |
1760 | 0 | } else { |
1761 | 0 | return ConstantEmission(); |
1762 | 0 | } |
1763 | | |
1764 | | // In any case, if the initializer has side-effects, abandon ship. |
1765 | 0 | if (result.HasSideEffects) |
1766 | 0 | return ConstantEmission(); |
1767 | | |
1768 | | // In CUDA/HIP device compilation, a lambda may capture a reference variable |
1769 | | // referencing a global host variable by copy. In this case the lambda should |
1770 | | // make a copy of the value of the global host variable. The DRE of the |
1771 | | // captured reference variable cannot be emitted as load from the host |
1772 | | // global variable as compile time constant, since the host variable is not |
1773 | | // accessible on device. The DRE of the captured reference variable has to be |
1774 | | // loaded from captures. |
1775 | 0 | if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && |
1776 | 0 | refExpr->refersToEnclosingVariableOrCapture()) { |
1777 | 0 | auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl); |
1778 | 0 | if (MD && MD->getParent()->isLambda() && |
1779 | 0 | MD->getOverloadedOperator() == OO_Call) { |
1780 | 0 | const APValue::LValueBase &base = result.Val.getLValueBase(); |
1781 | 0 | if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { |
1782 | 0 | if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) { |
1783 | 0 | if (!VD->hasAttr<CUDADeviceAttr>()) { |
1784 | 0 | return ConstantEmission(); |
1785 | 0 | } |
1786 | 0 | } |
1787 | 0 | } |
1788 | 0 | } |
1789 | 0 | } |
1790 | | |
1791 | | // Emit as a constant. |
1792 | 0 | auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), |
1793 | 0 | result.Val, resultType); |
1794 | | |
1795 | | // Make sure we emit a debug reference to the global variable. |
1796 | | // This should probably fire even for |
1797 | 0 | if (isa<VarDecl>(value)) { |
1798 | 0 | if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) |
1799 | 0 | EmitDeclRefExprDbgValue(refExpr, result.Val); |
1800 | 0 | } else { |
1801 | 0 | assert(isa<EnumConstantDecl>(value)); |
1802 | 0 | EmitDeclRefExprDbgValue(refExpr, result.Val); |
1803 | 0 | } |
1804 | | |
1805 | | // If we emitted a reference constant, we need to dereference that. |
1806 | 0 | if (resultIsReference) |
1807 | 0 | return ConstantEmission::forReference(C); |
1808 | | |
1809 | 0 | return ConstantEmission::forValue(C); |
1810 | 0 | } |
1811 | | |
1812 | | static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, |
1813 | 0 | const MemberExpr *ME) { |
1814 | 0 | if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) { |
1815 | | // Try to emit static variable member expressions as DREs. |
1816 | 0 | return DeclRefExpr::Create( |
1817 | 0 | CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, |
1818 | 0 | /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), |
1819 | 0 | ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); |
1820 | 0 | } |
1821 | 0 | return nullptr; |
1822 | 0 | } |
1823 | | |
1824 | | CodeGenFunction::ConstantEmission |
1825 | 0 | CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { |
1826 | 0 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME)) |
1827 | 0 | return tryEmitAsConstant(DRE); |
1828 | 0 | return ConstantEmission(); |
1829 | 0 | } |
1830 | | |
1831 | | llvm::Value *CodeGenFunction::emitScalarConstant( |
1832 | 0 | const CodeGenFunction::ConstantEmission &Constant, Expr *E) { |
1833 | 0 | assert(Constant && "not a constant"); |
1834 | 0 | if (Constant.isReference()) |
1835 | 0 | return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E), |
1836 | 0 | E->getExprLoc()) |
1837 | 0 | .getScalarVal(); |
1838 | 0 | return Constant.getValue(); |
1839 | 0 | } |
1840 | | |
1841 | | llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, |
1842 | 0 | SourceLocation Loc) { |
1843 | 0 | return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(), |
1844 | 0 | lvalue.getType(), Loc, lvalue.getBaseInfo(), |
1845 | 0 | lvalue.getTBAAInfo(), lvalue.isNontemporal()); |
1846 | 0 | } |
1847 | | |
1848 | 0 | static bool hasBooleanRepresentation(QualType Ty) { |
1849 | 0 | if (Ty->isBooleanType()) |
1850 | 0 | return true; |
1851 | | |
1852 | 0 | if (const EnumType *ET = Ty->getAs<EnumType>()) |
1853 | 0 | return ET->getDecl()->getIntegerType()->isBooleanType(); |
1854 | | |
1855 | 0 | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1856 | 0 | return hasBooleanRepresentation(AT->getValueType()); |
1857 | | |
1858 | 0 | return false; |
1859 | 0 | } |
1860 | | |
1861 | | static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, |
1862 | | llvm::APInt &Min, llvm::APInt &End, |
1863 | 0 | bool StrictEnums, bool IsBool) { |
1864 | 0 | const EnumType *ET = Ty->getAs<EnumType>(); |
1865 | 0 | bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && |
1866 | 0 | ET && !ET->getDecl()->isFixed(); |
1867 | 0 | if (!IsBool && !IsRegularCPlusPlusEnum) |
1868 | 0 | return false; |
1869 | | |
1870 | 0 | if (IsBool) { |
1871 | 0 | Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); |
1872 | 0 | End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); |
1873 | 0 | } else { |
1874 | 0 | const EnumDecl *ED = ET->getDecl(); |
1875 | 0 | ED->getValueRange(End, Min); |
1876 | 0 | } |
1877 | 0 | return true; |
1878 | 0 | } |
1879 | | |
1880 | 0 | llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { |
1881 | 0 | llvm::APInt Min, End; |
1882 | 0 | if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums, |
1883 | 0 | hasBooleanRepresentation(Ty))) |
1884 | 0 | return nullptr; |
1885 | | |
1886 | 0 | llvm::MDBuilder MDHelper(getLLVMContext()); |
1887 | 0 | return MDHelper.createRange(Min, End); |
1888 | 0 | } |
1889 | | |
1890 | | bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, |
1891 | 0 | SourceLocation Loc) { |
1892 | 0 | bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool); |
1893 | 0 | bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum); |
1894 | 0 | if (!HasBoolCheck && !HasEnumCheck) |
1895 | 0 | return false; |
1896 | | |
1897 | 0 | bool IsBool = hasBooleanRepresentation(Ty) || |
1898 | 0 | NSAPI(CGM.getContext()).isObjCBOOLType(Ty); |
1899 | 0 | bool NeedsBoolCheck = HasBoolCheck && IsBool; |
1900 | 0 | bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); |
1901 | 0 | if (!NeedsBoolCheck && !NeedsEnumCheck) |
1902 | 0 | return false; |
1903 | | |
1904 | | // Single-bit booleans don't need to be checked. Special-case this to avoid |
1905 | | // a bit width mismatch when handling bitfield values. This is handled by |
1906 | | // EmitFromMemory for the non-bitfield case. |
1907 | 0 | if (IsBool && |
1908 | 0 | cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1) |
1909 | 0 | return false; |
1910 | | |
1911 | 0 | llvm::APInt Min, End; |
1912 | 0 | if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) |
1913 | 0 | return true; |
1914 | | |
1915 | 0 | auto &Ctx = getLLVMContext(); |
1916 | 0 | SanitizerScope SanScope(this); |
1917 | 0 | llvm::Value *Check; |
1918 | 0 | --End; |
1919 | 0 | if (!Min) { |
1920 | 0 | Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End)); |
1921 | 0 | } else { |
1922 | 0 | llvm::Value *Upper = |
1923 | 0 | Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End)); |
1924 | 0 | llvm::Value *Lower = |
1925 | 0 | Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min)); |
1926 | 0 | Check = Builder.CreateAnd(Upper, Lower); |
1927 | 0 | } |
1928 | 0 | llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), |
1929 | 0 | EmitCheckTypeDescriptor(Ty)}; |
1930 | 0 | SanitizerMask Kind = |
1931 | 0 | NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; |
1932 | 0 | EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue, |
1933 | 0 | StaticArgs, EmitCheckValue(Value)); |
1934 | 0 | return true; |
1935 | 0 | } |
1936 | | |
1937 | | llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, |
1938 | | QualType Ty, |
1939 | | SourceLocation Loc, |
1940 | | LValueBaseInfo BaseInfo, |
1941 | | TBAAAccessInfo TBAAInfo, |
1942 | 0 | bool isNontemporal) { |
1943 | 0 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer())) |
1944 | 0 | if (GV->isThreadLocal()) |
1945 | 0 | Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV), |
1946 | 0 | NotKnownNonNull); |
1947 | |
|
1948 | 0 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
1949 | | // Boolean vectors use `iN` as storage type. |
1950 | 0 | if (ClangVecTy->isExtVectorBoolType()) { |
1951 | 0 | llvm::Type *ValTy = ConvertType(Ty); |
1952 | 0 | unsigned ValNumElems = |
1953 | 0 | cast<llvm::FixedVectorType>(ValTy)->getNumElements(); |
1954 | | // Load the `iP` storage object (P is the padded vector size). |
1955 | 0 | auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits"); |
1956 | 0 | const auto *RawIntTy = RawIntV->getType(); |
1957 | 0 | assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors"); |
1958 | | // Bitcast iP --> <P x i1>. |
1959 | 0 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
1960 | 0 | Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits()); |
1961 | 0 | llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy); |
1962 | | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
1963 | 0 | V = emitBoolVecConversion(V, ValNumElems, "extractvec"); |
1964 | |
|
1965 | 0 | return EmitFromMemory(V, Ty); |
1966 | 0 | } |
1967 | | |
1968 | | // Handle vectors of size 3 like size 4 for better performance. |
1969 | 0 | const llvm::Type *EltTy = Addr.getElementType(); |
1970 | 0 | const auto *VTy = cast<llvm::FixedVectorType>(EltTy); |
1971 | |
|
1972 | 0 | if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) { |
1973 | |
|
1974 | 0 | llvm::VectorType *vec4Ty = |
1975 | 0 | llvm::FixedVectorType::get(VTy->getElementType(), 4); |
1976 | 0 | Address Cast = Addr.withElementType(vec4Ty); |
1977 | | // Now load value. |
1978 | 0 | llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4"); |
1979 | | |
1980 | | // Shuffle vector to get vec3. |
1981 | 0 | V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec"); |
1982 | 0 | return EmitFromMemory(V, Ty); |
1983 | 0 | } |
1984 | 0 | } |
1985 | | |
1986 | | // Atomic operations have to be done on integral types. |
1987 | 0 | LValue AtomicLValue = |
1988 | 0 | LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); |
1989 | 0 | if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { |
1990 | 0 | return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal(); |
1991 | 0 | } |
1992 | | |
1993 | 0 | llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile); |
1994 | 0 | if (isNontemporal) { |
1995 | 0 | llvm::MDNode *Node = llvm::MDNode::get( |
1996 | 0 | Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
1997 | 0 | Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node); |
1998 | 0 | } |
1999 | |
|
2000 | 0 | CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); |
2001 | |
|
2002 | 0 | if (EmitScalarRangeCheck(Load, Ty, Loc)) { |
2003 | | // In order to prevent the optimizer from throwing away the check, don't |
2004 | | // attach range metadata to the load. |
2005 | 0 | } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) |
2006 | 0 | if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) { |
2007 | 0 | Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); |
2008 | 0 | Load->setMetadata(llvm::LLVMContext::MD_noundef, |
2009 | 0 | llvm::MDNode::get(getLLVMContext(), std::nullopt)); |
2010 | 0 | } |
2011 | |
|
2012 | 0 | return EmitFromMemory(Load, Ty); |
2013 | 0 | } |
2014 | | |
2015 | 0 | llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { |
2016 | | // Bool has a different representation in memory than in registers. |
2017 | 0 | if (hasBooleanRepresentation(Ty)) { |
2018 | | // This should really always be an i1, but sometimes it's already |
2019 | | // an i8, and it's awkward to track those cases down. |
2020 | 0 | if (Value->getType()->isIntegerTy(1)) |
2021 | 0 | return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); |
2022 | 0 | assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && |
2023 | 0 | "wrong value rep of bool"); |
2024 | 0 | } |
2025 | | |
2026 | 0 | return Value; |
2027 | 0 | } |
2028 | | |
2029 | 0 | llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { |
2030 | | // Bool has a different representation in memory than in registers. |
2031 | 0 | if (hasBooleanRepresentation(Ty)) { |
2032 | 0 | assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && |
2033 | 0 | "wrong value rep of bool"); |
2034 | 0 | return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); |
2035 | 0 | } |
2036 | 0 | if (Ty->isExtVectorBoolType()) { |
2037 | 0 | const auto *RawIntTy = Value->getType(); |
2038 | | // Bitcast iP --> <P x i1>. |
2039 | 0 | auto *PaddedVecTy = llvm::FixedVectorType::get( |
2040 | 0 | Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits()); |
2041 | 0 | auto *V = Builder.CreateBitCast(Value, PaddedVecTy); |
2042 | | // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). |
2043 | 0 | llvm::Type *ValTy = ConvertType(Ty); |
2044 | 0 | unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements(); |
2045 | 0 | return emitBoolVecConversion(V, ValNumElems, "extractvec"); |
2046 | 0 | } |
2047 | | |
2048 | 0 | return Value; |
2049 | 0 | } |
2050 | | |
2051 | | // Convert the pointer of \p Addr to a pointer to a vector (the value type of |
2052 | | // MatrixType), if it points to a array (the memory type of MatrixType). |
2053 | | static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF, |
2054 | 0 | bool IsVector = true) { |
2055 | 0 | auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType()); |
2056 | 0 | if (ArrayTy && IsVector) { |
2057 | 0 | auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), |
2058 | 0 | ArrayTy->getNumElements()); |
2059 | |
|
2060 | 0 | return Addr.withElementType(VectorTy); |
2061 | 0 | } |
2062 | 0 | auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType()); |
2063 | 0 | if (VectorTy && !IsVector) { |
2064 | 0 | auto *ArrayTy = llvm::ArrayType::get( |
2065 | 0 | VectorTy->getElementType(), |
2066 | 0 | cast<llvm::FixedVectorType>(VectorTy)->getNumElements()); |
2067 | |
|
2068 | 0 | return Addr.withElementType(ArrayTy); |
2069 | 0 | } |
2070 | | |
2071 | 0 | return Addr; |
2072 | 0 | } |
2073 | | |
2074 | | // Emit a store of a matrix LValue. This may require casting the original |
2075 | | // pointer to memory address (ArrayType) to a pointer to the value type |
2076 | | // (VectorType). |
2077 | | static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, |
2078 | 0 | bool isInit, CodeGenFunction &CGF) { |
2079 | 0 | Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF, |
2080 | 0 | value->getType()->isVectorTy()); |
2081 | 0 | CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(), |
2082 | 0 | lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit, |
2083 | 0 | lvalue.isNontemporal()); |
2084 | 0 | } |
2085 | | |
2086 | | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, |
2087 | | bool Volatile, QualType Ty, |
2088 | | LValueBaseInfo BaseInfo, |
2089 | | TBAAAccessInfo TBAAInfo, |
2090 | 0 | bool isInit, bool isNontemporal) { |
2091 | 0 | if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer())) |
2092 | 0 | if (GV->isThreadLocal()) |
2093 | 0 | Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV), |
2094 | 0 | NotKnownNonNull); |
2095 | |
|
2096 | 0 | llvm::Type *SrcTy = Value->getType(); |
2097 | 0 | if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { |
2098 | 0 | auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy); |
2099 | 0 | if (VecTy && ClangVecTy->isExtVectorBoolType()) { |
2100 | 0 | auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType()); |
2101 | | // Expand to the memory bit width. |
2102 | 0 | unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits(); |
2103 | | // <N x i1> --> <P x i1>. |
2104 | 0 | Value = emitBoolVecConversion(Value, MemNumElems, "insertvec"); |
2105 | | // <P x i1> --> iP. |
2106 | 0 | Value = Builder.CreateBitCast(Value, MemIntTy); |
2107 | 0 | } else if (!CGM.getCodeGenOpts().PreserveVec3Type) { |
2108 | | // Handle vec3 special. |
2109 | 0 | if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) { |
2110 | | // Our source is a vec3, do a shuffle vector to make it a vec4. |
2111 | 0 | Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1}, |
2112 | 0 | "extractVec"); |
2113 | 0 | SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4); |
2114 | 0 | } |
2115 | 0 | if (Addr.getElementType() != SrcTy) { |
2116 | 0 | Addr = Addr.withElementType(SrcTy); |
2117 | 0 | } |
2118 | 0 | } |
2119 | 0 | } |
2120 | |
|
2121 | 0 | Value = EmitToMemory(Value, Ty); |
2122 | |
|
2123 | 0 | LValue AtomicLValue = |
2124 | 0 | LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); |
2125 | 0 | if (Ty->isAtomicType() || |
2126 | 0 | (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) { |
2127 | 0 | EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit); |
2128 | 0 | return; |
2129 | 0 | } |
2130 | | |
2131 | 0 | llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); |
2132 | 0 | if (isNontemporal) { |
2133 | 0 | llvm::MDNode *Node = |
2134 | 0 | llvm::MDNode::get(Store->getContext(), |
2135 | 0 | llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
2136 | 0 | Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node); |
2137 | 0 | } |
2138 | |
|
2139 | 0 | CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); |
2140 | 0 | } |
2141 | | |
2142 | | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, |
2143 | 0 | bool isInit) { |
2144 | 0 | if (lvalue.getType()->isConstantMatrixType()) { |
2145 | 0 | EmitStoreOfMatrixScalar(value, lvalue, isInit, *this); |
2146 | 0 | return; |
2147 | 0 | } |
2148 | | |
2149 | 0 | EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(), |
2150 | 0 | lvalue.getType(), lvalue.getBaseInfo(), |
2151 | 0 | lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); |
2152 | 0 | } |
2153 | | |
2154 | | // Emit a load of a LValue of matrix type. This may require casting the pointer |
2155 | | // to memory address (ArrayType) to a pointer to the value type (VectorType). |
2156 | | static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, |
2157 | 0 | CodeGenFunction &CGF) { |
2158 | 0 | assert(LV.getType()->isConstantMatrixType()); |
2159 | 0 | Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF); |
2160 | 0 | LV.setAddress(Addr); |
2161 | 0 | return RValue::get(CGF.EmitLoadOfScalar(LV, Loc)); |
2162 | 0 | } |
2163 | | |
2164 | | /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this |
2165 | | /// method emits the address of the lvalue, then loads the result as an rvalue, |
2166 | | /// returning the rvalue. |
2167 | 0 | RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { |
2168 | 0 | if (LV.isObjCWeak()) { |
2169 | | // load of a __weak object. |
2170 | 0 | Address AddrWeakObj = LV.getAddress(*this); |
2171 | 0 | return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, |
2172 | 0 | AddrWeakObj)); |
2173 | 0 | } |
2174 | 0 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
2175 | | // In MRC mode, we do a load+autorelease. |
2176 | 0 | if (!getLangOpts().ObjCAutoRefCount) { |
2177 | 0 | return RValue::get(EmitARCLoadWeak(LV.getAddress(*this))); |
2178 | 0 | } |
2179 | | |
2180 | | // In ARC mode, we load retained and then consume the value. |
2181 | 0 | llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this)); |
2182 | 0 | Object = EmitObjCConsumeObject(LV.getType(), Object); |
2183 | 0 | return RValue::get(Object); |
2184 | 0 | } |
2185 | | |
2186 | 0 | if (LV.isSimple()) { |
2187 | 0 | assert(!LV.getType()->isFunctionType()); |
2188 | | |
2189 | 0 | if (LV.getType()->isConstantMatrixType()) |
2190 | 0 | return EmitLoadOfMatrixLValue(LV, Loc, *this); |
2191 | | |
2192 | | // Everything needs a load. |
2193 | 0 | return RValue::get(EmitLoadOfScalar(LV, Loc)); |
2194 | 0 | } |
2195 | | |
2196 | 0 | if (LV.isVectorElt()) { |
2197 | 0 | llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(), |
2198 | 0 | LV.isVolatileQualified()); |
2199 | 0 | return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), |
2200 | 0 | "vecext")); |
2201 | 0 | } |
2202 | | |
2203 | | // If this is a reference to a subset of the elements of a vector, either |
2204 | | // shuffle the input or extract/insert them as appropriate. |
2205 | 0 | if (LV.isExtVectorElt()) { |
2206 | 0 | return EmitLoadOfExtVectorElementLValue(LV); |
2207 | 0 | } |
2208 | | |
2209 | | // Global Register variables always invoke intrinsics |
2210 | 0 | if (LV.isGlobalReg()) |
2211 | 0 | return EmitLoadOfGlobalRegLValue(LV); |
2212 | | |
2213 | 0 | if (LV.isMatrixElt()) { |
2214 | 0 | llvm::Value *Idx = LV.getMatrixIdx(); |
2215 | 0 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2216 | 0 | const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>(); |
2217 | 0 | llvm::MatrixBuilder MB(Builder); |
2218 | 0 | MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened()); |
2219 | 0 | } |
2220 | 0 | llvm::LoadInst *Load = |
2221 | 0 | Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified()); |
2222 | 0 | return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext")); |
2223 | 0 | } |
2224 | | |
2225 | 0 | assert(LV.isBitField() && "Unknown LValue type!"); |
2226 | 0 | return EmitLoadOfBitfieldLValue(LV, Loc); |
2227 | 0 | } |
2228 | | |
2229 | | RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, |
2230 | 0 | SourceLocation Loc) { |
2231 | 0 | const CGBitFieldInfo &Info = LV.getBitFieldInfo(); |
2232 | | |
2233 | | // Get the output type. |
2234 | 0 | llvm::Type *ResLTy = ConvertType(LV.getType()); |
2235 | |
|
2236 | 0 | Address Ptr = LV.getBitFieldAddress(); |
2237 | 0 | llvm::Value *Val = |
2238 | 0 | Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load"); |
2239 | |
|
2240 | 0 | bool UseVolatile = LV.isVolatileQualified() && |
2241 | 0 | Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); |
2242 | 0 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2243 | 0 | const unsigned StorageSize = |
2244 | 0 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2245 | 0 | if (Info.IsSigned) { |
2246 | 0 | assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); |
2247 | 0 | unsigned HighBits = StorageSize - Offset - Info.Size; |
2248 | 0 | if (HighBits) |
2249 | 0 | Val = Builder.CreateShl(Val, HighBits, "bf.shl"); |
2250 | 0 | if (Offset + HighBits) |
2251 | 0 | Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr"); |
2252 | 0 | } else { |
2253 | 0 | if (Offset) |
2254 | 0 | Val = Builder.CreateLShr(Val, Offset, "bf.lshr"); |
2255 | 0 | if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) |
2256 | 0 | Val = Builder.CreateAnd( |
2257 | 0 | Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear"); |
2258 | 0 | } |
2259 | 0 | Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); |
2260 | 0 | EmitScalarRangeCheck(Val, LV.getType(), Loc); |
2261 | 0 | return RValue::get(Val); |
2262 | 0 | } |
2263 | | |
2264 | | // If this is a reference to a subset of the elements of a vector, create an |
2265 | | // appropriate shufflevector. |
2266 | 0 | RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { |
2267 | 0 | llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(), |
2268 | 0 | LV.isVolatileQualified()); |
2269 | | |
2270 | | // HLSL allows treating scalars as one-element vectors. Converting the scalar |
2271 | | // IR value to a vector here allows the rest of codegen to behave as normal. |
2272 | 0 | if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) { |
2273 | 0 | llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1); |
2274 | 0 | llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); |
2275 | 0 | Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat"); |
2276 | 0 | } |
2277 | |
|
2278 | 0 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2279 | | |
2280 | | // If the result of the expression is a non-vector type, we must be extracting |
2281 | | // a single element. Just codegen as an extractelement. |
2282 | 0 | const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); |
2283 | 0 | if (!ExprVT) { |
2284 | 0 | unsigned InIdx = getAccessedFieldNo(0, Elts); |
2285 | 0 | llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); |
2286 | 0 | return RValue::get(Builder.CreateExtractElement(Vec, Elt)); |
2287 | 0 | } |
2288 | | |
2289 | | // Always use shuffle vector to try to retain the original program structure |
2290 | 0 | unsigned NumResultElts = ExprVT->getNumElements(); |
2291 | |
|
2292 | 0 | SmallVector<int, 4> Mask; |
2293 | 0 | for (unsigned i = 0; i != NumResultElts; ++i) |
2294 | 0 | Mask.push_back(getAccessedFieldNo(i, Elts)); |
2295 | |
|
2296 | 0 | Vec = Builder.CreateShuffleVector(Vec, Mask); |
2297 | 0 | return RValue::get(Vec); |
2298 | 0 | } |
2299 | | |
2300 | | /// Generates lvalue for partial ext_vector access. |
2301 | 0 | Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { |
2302 | 0 | Address VectorAddress = LV.getExtVectorAddress(); |
2303 | 0 | QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); |
2304 | 0 | llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); |
2305 | |
|
2306 | 0 | Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy); |
2307 | |
|
2308 | 0 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2309 | 0 | unsigned ix = getAccessedFieldNo(0, Elts); |
2310 | |
|
2311 | 0 | Address VectorBasePtrPlusIx = |
2312 | 0 | Builder.CreateConstInBoundsGEP(CastToPointerElement, ix, |
2313 | 0 | "vector.elt"); |
2314 | |
|
2315 | 0 | return VectorBasePtrPlusIx; |
2316 | 0 | } |
2317 | | |
2318 | | /// Load of global gamed gegisters are always calls to intrinsics. |
2319 | 0 | RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { |
2320 | 0 | assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && |
2321 | 0 | "Bad type for register variable"); |
2322 | 0 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2323 | 0 | cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); |
2324 | | |
2325 | | // We accept integer and pointer types only |
2326 | 0 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); |
2327 | 0 | llvm::Type *Ty = OrigTy; |
2328 | 0 | if (OrigTy->isPointerTy()) |
2329 | 0 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2330 | 0 | llvm::Type *Types[] = { Ty }; |
2331 | |
|
2332 | 0 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
2333 | 0 | llvm::Value *Call = Builder.CreateCall( |
2334 | 0 | F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); |
2335 | 0 | if (OrigTy->isPointerTy()) |
2336 | 0 | Call = Builder.CreateIntToPtr(Call, OrigTy); |
2337 | 0 | return RValue::get(Call); |
2338 | 0 | } |
2339 | | |
2340 | | /// EmitStoreThroughLValue - Store the specified rvalue into the specified |
2341 | | /// lvalue, where both are guaranteed to the have the same type, and that type |
2342 | | /// is 'Ty'. |
2343 | | void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, |
2344 | 0 | bool isInit) { |
2345 | 0 | if (!Dst.isSimple()) { |
2346 | 0 | if (Dst.isVectorElt()) { |
2347 | | // Read/modify/write the vector, inserting the new element. |
2348 | 0 | llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(), |
2349 | 0 | Dst.isVolatileQualified()); |
2350 | 0 | auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType()); |
2351 | 0 | if (IRStoreTy) { |
2352 | 0 | auto *IRVecTy = llvm::FixedVectorType::get( |
2353 | 0 | Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits()); |
2354 | 0 | Vec = Builder.CreateBitCast(Vec, IRVecTy); |
2355 | | // iN --> <N x i1>. |
2356 | 0 | } |
2357 | 0 | Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), |
2358 | 0 | Dst.getVectorIdx(), "vecins"); |
2359 | 0 | if (IRStoreTy) { |
2360 | | // <N x i1> --> <iN>. |
2361 | 0 | Vec = Builder.CreateBitCast(Vec, IRStoreTy); |
2362 | 0 | } |
2363 | 0 | Builder.CreateStore(Vec, Dst.getVectorAddress(), |
2364 | 0 | Dst.isVolatileQualified()); |
2365 | 0 | return; |
2366 | 0 | } |
2367 | | |
2368 | | // If this is an update of extended vector elements, insert them as |
2369 | | // appropriate. |
2370 | 0 | if (Dst.isExtVectorElt()) |
2371 | 0 | return EmitStoreThroughExtVectorComponentLValue(Src, Dst); |
2372 | | |
2373 | 0 | if (Dst.isGlobalReg()) |
2374 | 0 | return EmitStoreThroughGlobalRegLValue(Src, Dst); |
2375 | | |
2376 | 0 | if (Dst.isMatrixElt()) { |
2377 | 0 | llvm::Value *Idx = Dst.getMatrixIdx(); |
2378 | 0 | if (CGM.getCodeGenOpts().OptimizationLevel > 0) { |
2379 | 0 | const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>(); |
2380 | 0 | llvm::MatrixBuilder MB(Builder); |
2381 | 0 | MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened()); |
2382 | 0 | } |
2383 | 0 | llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress()); |
2384 | 0 | llvm::Value *Vec = |
2385 | 0 | Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins"); |
2386 | 0 | Builder.CreateStore(Vec, Dst.getMatrixAddress(), |
2387 | 0 | Dst.isVolatileQualified()); |
2388 | 0 | return; |
2389 | 0 | } |
2390 | | |
2391 | 0 | assert(Dst.isBitField() && "Unknown LValue type"); |
2392 | 0 | return EmitStoreThroughBitfieldLValue(Src, Dst); |
2393 | 0 | } |
2394 | | |
2395 | | // There's special magic for assigning into an ARC-qualified l-value. |
2396 | 0 | if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { |
2397 | 0 | switch (Lifetime) { |
2398 | 0 | case Qualifiers::OCL_None: |
2399 | 0 | llvm_unreachable("present but none"); |
2400 | |
|
2401 | 0 | case Qualifiers::OCL_ExplicitNone: |
2402 | | // nothing special |
2403 | 0 | break; |
2404 | | |
2405 | 0 | case Qualifiers::OCL_Strong: |
2406 | 0 | if (isInit) { |
2407 | 0 | Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal())); |
2408 | 0 | break; |
2409 | 0 | } |
2410 | 0 | EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); |
2411 | 0 | return; |
2412 | | |
2413 | 0 | case Qualifiers::OCL_Weak: |
2414 | 0 | if (isInit) |
2415 | | // Initialize and then skip the primitive store. |
2416 | 0 | EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal()); |
2417 | 0 | else |
2418 | 0 | EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(), |
2419 | 0 | /*ignore*/ true); |
2420 | 0 | return; |
2421 | | |
2422 | 0 | case Qualifiers::OCL_Autoreleasing: |
2423 | 0 | Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), |
2424 | 0 | Src.getScalarVal())); |
2425 | | // fall into the normal path |
2426 | 0 | break; |
2427 | 0 | } |
2428 | 0 | } |
2429 | | |
2430 | 0 | if (Dst.isObjCWeak() && !Dst.isNonGC()) { |
2431 | | // load of a __weak object. |
2432 | 0 | Address LvalueDst = Dst.getAddress(*this); |
2433 | 0 | llvm::Value *src = Src.getScalarVal(); |
2434 | 0 | CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); |
2435 | 0 | return; |
2436 | 0 | } |
2437 | | |
2438 | 0 | if (Dst.isObjCStrong() && !Dst.isNonGC()) { |
2439 | | // load of a __strong object. |
2440 | 0 | Address LvalueDst = Dst.getAddress(*this); |
2441 | 0 | llvm::Value *src = Src.getScalarVal(); |
2442 | 0 | if (Dst.isObjCIvar()) { |
2443 | 0 | assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); |
2444 | 0 | llvm::Type *ResultType = IntPtrTy; |
2445 | 0 | Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); |
2446 | 0 | llvm::Value *RHS = dst.getPointer(); |
2447 | 0 | RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); |
2448 | 0 | llvm::Value *LHS = |
2449 | 0 | Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType, |
2450 | 0 | "sub.ptr.lhs.cast"); |
2451 | 0 | llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); |
2452 | 0 | CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, |
2453 | 0 | BytesBetween); |
2454 | 0 | } else if (Dst.isGlobalObjCRef()) { |
2455 | 0 | CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, |
2456 | 0 | Dst.isThreadLocalRef()); |
2457 | 0 | } |
2458 | 0 | else |
2459 | 0 | CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); |
2460 | 0 | return; |
2461 | 0 | } |
2462 | | |
2463 | 0 | assert(Src.isScalar() && "Can't emit an agg store with this method"); |
2464 | 0 | EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); |
2465 | 0 | } |
2466 | | |
2467 | | void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, |
2468 | 0 | llvm::Value **Result) { |
2469 | 0 | const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); |
2470 | 0 | llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); |
2471 | 0 | Address Ptr = Dst.getBitFieldAddress(); |
2472 | | |
2473 | | // Get the source value, truncated to the width of the bit-field. |
2474 | 0 | llvm::Value *SrcVal = Src.getScalarVal(); |
2475 | | |
2476 | | // Cast the source to the storage type and shift it into place. |
2477 | 0 | SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(), |
2478 | 0 | /*isSigned=*/false); |
2479 | 0 | llvm::Value *MaskedVal = SrcVal; |
2480 | |
|
2481 | 0 | const bool UseVolatile = |
2482 | 0 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && |
2483 | 0 | Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); |
2484 | 0 | const unsigned StorageSize = |
2485 | 0 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
2486 | 0 | const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; |
2487 | | // See if there are other bits in the bitfield's storage we'll need to load |
2488 | | // and mask together with source before storing. |
2489 | 0 | if (StorageSize != Info.Size) { |
2490 | 0 | assert(StorageSize > Info.Size && "Invalid bitfield size."); |
2491 | 0 | llvm::Value *Val = |
2492 | 0 | Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); |
2493 | | |
2494 | | // Mask the source value as needed. |
2495 | 0 | if (!hasBooleanRepresentation(Dst.getType())) |
2496 | 0 | SrcVal = Builder.CreateAnd( |
2497 | 0 | SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), |
2498 | 0 | "bf.value"); |
2499 | 0 | MaskedVal = SrcVal; |
2500 | 0 | if (Offset) |
2501 | 0 | SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl"); |
2502 | | |
2503 | | // Mask out the original value. |
2504 | 0 | Val = Builder.CreateAnd( |
2505 | 0 | Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size), |
2506 | 0 | "bf.clear"); |
2507 | | |
2508 | | // Or together the unchanged values and the source value. |
2509 | 0 | SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); |
2510 | 0 | } else { |
2511 | 0 | assert(Offset == 0); |
2512 | | // According to the AACPS: |
2513 | | // When a volatile bit-field is written, and its container does not overlap |
2514 | | // with any non-bit-field member, its container must be read exactly once |
2515 | | // and written exactly once using the access width appropriate to the type |
2516 | | // of the container. The two accesses are not atomic. |
2517 | 0 | if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && |
2518 | 0 | CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) |
2519 | 0 | Builder.CreateLoad(Ptr, true, "bf.load"); |
2520 | 0 | } |
2521 | | |
2522 | | // Write the new value back out. |
2523 | 0 | Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified()); |
2524 | | |
2525 | | // Return the new value of the bit-field, if requested. |
2526 | 0 | if (Result) { |
2527 | 0 | llvm::Value *ResultVal = MaskedVal; |
2528 | | |
2529 | | // Sign extend the value if needed. |
2530 | 0 | if (Info.IsSigned) { |
2531 | 0 | assert(Info.Size <= StorageSize); |
2532 | 0 | unsigned HighBits = StorageSize - Info.Size; |
2533 | 0 | if (HighBits) { |
2534 | 0 | ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); |
2535 | 0 | ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); |
2536 | 0 | } |
2537 | 0 | } |
2538 | | |
2539 | 0 | ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, |
2540 | 0 | "bf.result.cast"); |
2541 | 0 | *Result = EmitFromMemory(ResultVal, Dst.getType()); |
2542 | 0 | } |
2543 | 0 | } |
2544 | | |
2545 | | void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, |
2546 | 0 | LValue Dst) { |
2547 | | // HLSL allows storing to scalar values through ExtVector component LValues. |
2548 | | // To support this we need to handle the case where the destination address is |
2549 | | // a scalar. |
2550 | 0 | Address DstAddr = Dst.getExtVectorAddress(); |
2551 | 0 | if (!DstAddr.getElementType()->isVectorTy()) { |
2552 | 0 | assert(!Dst.getType()->isVectorType() && |
2553 | 0 | "this should only occur for non-vector l-values"); |
2554 | 0 | Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified()); |
2555 | 0 | return; |
2556 | 0 | } |
2557 | | |
2558 | | // This access turns into a read/modify/write of the vector. Load the input |
2559 | | // value now. |
2560 | 0 | llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified()); |
2561 | 0 | const llvm::Constant *Elts = Dst.getExtVectorElts(); |
2562 | |
|
2563 | 0 | llvm::Value *SrcVal = Src.getScalarVal(); |
2564 | |
|
2565 | 0 | if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { |
2566 | 0 | unsigned NumSrcElts = VTy->getNumElements(); |
2567 | 0 | unsigned NumDstElts = |
2568 | 0 | cast<llvm::FixedVectorType>(Vec->getType())->getNumElements(); |
2569 | 0 | if (NumDstElts == NumSrcElts) { |
2570 | | // Use shuffle vector is the src and destination are the same number of |
2571 | | // elements and restore the vector mask since it is on the side it will be |
2572 | | // stored. |
2573 | 0 | SmallVector<int, 4> Mask(NumDstElts); |
2574 | 0 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2575 | 0 | Mask[getAccessedFieldNo(i, Elts)] = i; |
2576 | |
|
2577 | 0 | Vec = Builder.CreateShuffleVector(SrcVal, Mask); |
2578 | 0 | } else if (NumDstElts > NumSrcElts) { |
2579 | | // Extended the source vector to the same length and then shuffle it |
2580 | | // into the destination. |
2581 | | // FIXME: since we're shuffling with undef, can we just use the indices |
2582 | | // into that? This could be simpler. |
2583 | 0 | SmallVector<int, 4> ExtMask; |
2584 | 0 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2585 | 0 | ExtMask.push_back(i); |
2586 | 0 | ExtMask.resize(NumDstElts, -1); |
2587 | 0 | llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask); |
2588 | | // build identity |
2589 | 0 | SmallVector<int, 4> Mask; |
2590 | 0 | for (unsigned i = 0; i != NumDstElts; ++i) |
2591 | 0 | Mask.push_back(i); |
2592 | | |
2593 | | // When the vector size is odd and .odd or .hi is used, the last element |
2594 | | // of the Elts constant array will be one past the size of the vector. |
2595 | | // Ignore the last element here, if it is greater than the mask size. |
2596 | 0 | if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) |
2597 | 0 | NumSrcElts--; |
2598 | | |
2599 | | // modify when what gets shuffled in |
2600 | 0 | for (unsigned i = 0; i != NumSrcElts; ++i) |
2601 | 0 | Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts; |
2602 | 0 | Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask); |
2603 | 0 | } else { |
2604 | | // We should never shorten the vector |
2605 | 0 | llvm_unreachable("unexpected shorten vector length"); |
2606 | 0 | } |
2607 | 0 | } else { |
2608 | | // If the Src is a scalar (not a vector), and the target is a vector it must |
2609 | | // be updating one element. |
2610 | 0 | unsigned InIdx = getAccessedFieldNo(0, Elts); |
2611 | 0 | llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); |
2612 | 0 | Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); |
2613 | 0 | } |
2614 | |
|
2615 | 0 | Builder.CreateStore(Vec, Dst.getExtVectorAddress(), |
2616 | 0 | Dst.isVolatileQualified()); |
2617 | 0 | } |
2618 | | |
2619 | | /// Store of global named registers are always calls to intrinsics. |
2620 | 0 | void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { |
2621 | 0 | assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && |
2622 | 0 | "Bad type for register variable"); |
2623 | 0 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2624 | 0 | cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); |
2625 | 0 | assert(RegName && "Register LValue is not metadata"); |
2626 | | |
2627 | | // We accept integer and pointer types only |
2628 | 0 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); |
2629 | 0 | llvm::Type *Ty = OrigTy; |
2630 | 0 | if (OrigTy->isPointerTy()) |
2631 | 0 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2632 | 0 | llvm::Type *Types[] = { Ty }; |
2633 | |
|
2634 | 0 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
2635 | 0 | llvm::Value *Value = Src.getScalarVal(); |
2636 | 0 | if (OrigTy->isPointerTy()) |
2637 | 0 | Value = Builder.CreatePtrToInt(Value, Ty); |
2638 | 0 | Builder.CreateCall( |
2639 | 0 | F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); |
2640 | 0 | } |
2641 | | |
2642 | | // setObjCGCLValueClass - sets class of the lvalue for the purpose of |
2643 | | // generating write-barries API. It is currently a global, ivar, |
2644 | | // or neither. |
2645 | | static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, |
2646 | | LValue &LV, |
2647 | 0 | bool IsMemberAccess=false) { |
2648 | 0 | if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) |
2649 | 0 | return; |
2650 | | |
2651 | 0 | if (isa<ObjCIvarRefExpr>(E)) { |
2652 | 0 | QualType ExpTy = E->getType(); |
2653 | 0 | if (IsMemberAccess && ExpTy->isPointerType()) { |
2654 | | // If ivar is a structure pointer, assigning to field of |
2655 | | // this struct follows gcc's behavior and makes it a non-ivar |
2656 | | // writer-barrier conservatively. |
2657 | 0 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2658 | 0 | if (ExpTy->isRecordType()) { |
2659 | 0 | LV.setObjCIvar(false); |
2660 | 0 | return; |
2661 | 0 | } |
2662 | 0 | } |
2663 | 0 | LV.setObjCIvar(true); |
2664 | 0 | auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); |
2665 | 0 | LV.setBaseIvarExp(Exp->getBase()); |
2666 | 0 | LV.setObjCArray(E->getType()->isArrayType()); |
2667 | 0 | return; |
2668 | 0 | } |
2669 | | |
2670 | 0 | if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { |
2671 | 0 | if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { |
2672 | 0 | if (VD->hasGlobalStorage()) { |
2673 | 0 | LV.setGlobalObjCRef(true); |
2674 | 0 | LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); |
2675 | 0 | } |
2676 | 0 | } |
2677 | 0 | LV.setObjCArray(E->getType()->isArrayType()); |
2678 | 0 | return; |
2679 | 0 | } |
2680 | | |
2681 | 0 | if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { |
2682 | 0 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2683 | 0 | return; |
2684 | 0 | } |
2685 | | |
2686 | 0 | if (const auto *Exp = dyn_cast<ParenExpr>(E)) { |
2687 | 0 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2688 | 0 | if (LV.isObjCIvar()) { |
2689 | | // If cast is to a structure pointer, follow gcc's behavior and make it |
2690 | | // a non-ivar write-barrier. |
2691 | 0 | QualType ExpTy = E->getType(); |
2692 | 0 | if (ExpTy->isPointerType()) |
2693 | 0 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2694 | 0 | if (ExpTy->isRecordType()) |
2695 | 0 | LV.setObjCIvar(false); |
2696 | 0 | } |
2697 | 0 | return; |
2698 | 0 | } |
2699 | | |
2700 | 0 | if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { |
2701 | 0 | setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); |
2702 | 0 | return; |
2703 | 0 | } |
2704 | | |
2705 | 0 | if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { |
2706 | 0 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2707 | 0 | return; |
2708 | 0 | } |
2709 | | |
2710 | 0 | if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { |
2711 | 0 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2712 | 0 | return; |
2713 | 0 | } |
2714 | | |
2715 | 0 | if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { |
2716 | 0 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2717 | 0 | return; |
2718 | 0 | } |
2719 | | |
2720 | 0 | if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { |
2721 | 0 | setObjCGCLValueClass(Ctx, Exp->getBase(), LV); |
2722 | 0 | if (LV.isObjCIvar() && !LV.isObjCArray()) |
2723 | | // Using array syntax to assigning to what an ivar points to is not |
2724 | | // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; |
2725 | 0 | LV.setObjCIvar(false); |
2726 | 0 | else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) |
2727 | | // Using array syntax to assigning to what global points to is not |
2728 | | // same as assigning to the global itself. {id *G;} G[i] = 0; |
2729 | 0 | LV.setGlobalObjCRef(false); |
2730 | 0 | return; |
2731 | 0 | } |
2732 | | |
2733 | 0 | if (const auto *Exp = dyn_cast<MemberExpr>(E)) { |
2734 | 0 | setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); |
2735 | | // We don't know if member is an 'ivar', but this flag is looked at |
2736 | | // only in the context of LV.isObjCIvar(). |
2737 | 0 | LV.setObjCArray(E->getType()->isArrayType()); |
2738 | 0 | return; |
2739 | 0 | } |
2740 | 0 | } |
2741 | | |
2742 | | static LValue EmitThreadPrivateVarDeclLValue( |
2743 | | CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, |
2744 | 0 | llvm::Type *RealVarTy, SourceLocation Loc) { |
2745 | 0 | if (CGF.CGM.getLangOpts().OpenMPIRBuilder) |
2746 | 0 | Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( |
2747 | 0 | CGF, VD, Addr, Loc); |
2748 | 0 | else |
2749 | 0 | Addr = |
2750 | 0 | CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc); |
2751 | |
|
2752 | 0 | Addr = Addr.withElementType(RealVarTy); |
2753 | 0 | return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
2754 | 0 | } |
2755 | | |
2756 | | static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, |
2757 | 0 | const VarDecl *VD, QualType T) { |
2758 | 0 | std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
2759 | 0 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); |
2760 | | // Return an invalid address if variable is MT_To (or MT_Enter starting with |
2761 | | // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link |
2762 | | // and MT_To (or MT_Enter) with unified memory, return a valid address. |
2763 | 0 | if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2764 | 0 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2765 | 0 | !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) |
2766 | 0 | return Address::invalid(); |
2767 | 0 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
2768 | 0 | ((*Res == OMPDeclareTargetDeclAttr::MT_To || |
2769 | 0 | *Res == OMPDeclareTargetDeclAttr::MT_Enter) && |
2770 | 0 | CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && |
2771 | 0 | "Expected link clause OR to clause with unified memory enabled."); |
2772 | 0 | QualType PtrTy = CGF.getContext().getPointerType(VD->getType()); |
2773 | 0 | Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
2774 | 0 | return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>()); |
2775 | 0 | } |
2776 | | |
2777 | | Address |
2778 | | CodeGenFunction::EmitLoadOfReference(LValue RefLVal, |
2779 | | LValueBaseInfo *PointeeBaseInfo, |
2780 | 0 | TBAAAccessInfo *PointeeTBAAInfo) { |
2781 | 0 | llvm::LoadInst *Load = |
2782 | 0 | Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile()); |
2783 | 0 | CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); |
2784 | |
|
2785 | 0 | QualType PointeeType = RefLVal.getType()->getPointeeType(); |
2786 | 0 | CharUnits Align = CGM.getNaturalTypeAlignment( |
2787 | 0 | PointeeType, PointeeBaseInfo, PointeeTBAAInfo, |
2788 | 0 | /* forPointeeType= */ true); |
2789 | 0 | return Address(Load, ConvertTypeForMem(PointeeType), Align); |
2790 | 0 | } |
2791 | | |
2792 | 0 | LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { |
2793 | 0 | LValueBaseInfo PointeeBaseInfo; |
2794 | 0 | TBAAAccessInfo PointeeTBAAInfo; |
2795 | 0 | Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo, |
2796 | 0 | &PointeeTBAAInfo); |
2797 | 0 | return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), |
2798 | 0 | PointeeBaseInfo, PointeeTBAAInfo); |
2799 | 0 | } |
2800 | | |
2801 | | Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, |
2802 | | const PointerType *PtrTy, |
2803 | | LValueBaseInfo *BaseInfo, |
2804 | 0 | TBAAAccessInfo *TBAAInfo) { |
2805 | 0 | llvm::Value *Addr = Builder.CreateLoad(Ptr); |
2806 | 0 | return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()), |
2807 | 0 | CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo, |
2808 | 0 | TBAAInfo, |
2809 | 0 | /*forPointeeType=*/true)); |
2810 | 0 | } |
2811 | | |
2812 | | LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, |
2813 | 0 | const PointerType *PtrTy) { |
2814 | 0 | LValueBaseInfo BaseInfo; |
2815 | 0 | TBAAAccessInfo TBAAInfo; |
2816 | 0 | Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo); |
2817 | 0 | return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo); |
2818 | 0 | } |
2819 | | |
2820 | | static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, |
2821 | 0 | const Expr *E, const VarDecl *VD) { |
2822 | 0 | QualType T = E->getType(); |
2823 | | |
2824 | | // If it's thread_local, emit a call to its wrapper function instead. |
2825 | 0 | if (VD->getTLSKind() == VarDecl::TLS_Dynamic && |
2826 | 0 | CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) |
2827 | 0 | return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); |
2828 | | // Check if the variable is marked as declare target with link clause in |
2829 | | // device codegen. |
2830 | 0 | if (CGF.getLangOpts().OpenMPIsTargetDevice) { |
2831 | 0 | Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); |
2832 | 0 | if (Addr.isValid()) |
2833 | 0 | return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
2834 | 0 | } |
2835 | | |
2836 | 0 | llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); |
2837 | |
|
2838 | 0 | if (VD->getTLSKind() != VarDecl::TLS_None) |
2839 | 0 | V = CGF.Builder.CreateThreadLocalAddress(V); |
2840 | |
|
2841 | 0 | llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); |
2842 | 0 | CharUnits Alignment = CGF.getContext().getDeclAlign(VD); |
2843 | 0 | Address Addr(V, RealVarTy, Alignment); |
2844 | | // Emit reference to the private copy of the variable if it is an OpenMP |
2845 | | // threadprivate variable. |
2846 | 0 | if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && |
2847 | 0 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
2848 | 0 | return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, |
2849 | 0 | E->getExprLoc()); |
2850 | 0 | } |
2851 | 0 | LValue LV = VD->getType()->isReferenceType() ? |
2852 | 0 | CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), |
2853 | 0 | AlignmentSource::Decl) : |
2854 | 0 | CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
2855 | 0 | setObjCGCLValueClass(CGF.getContext(), E, LV); |
2856 | 0 | return LV; |
2857 | 0 | } |
2858 | | |
2859 | | static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, |
2860 | 0 | GlobalDecl GD) { |
2861 | 0 | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
2862 | 0 | if (FD->hasAttr<WeakRefAttr>()) { |
2863 | 0 | ConstantAddress aliasee = CGM.GetWeakRefReference(FD); |
2864 | 0 | return aliasee.getPointer(); |
2865 | 0 | } |
2866 | | |
2867 | 0 | llvm::Constant *V = CGM.GetAddrOfFunction(GD); |
2868 | 0 | return V; |
2869 | 0 | } |
2870 | | |
2871 | | static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, |
2872 | 0 | GlobalDecl GD) { |
2873 | 0 | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
2874 | 0 | llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD); |
2875 | 0 | CharUnits Alignment = CGF.getContext().getDeclAlign(FD); |
2876 | 0 | return CGF.MakeAddrLValue(V, E->getType(), Alignment, |
2877 | 0 | AlignmentSource::Decl); |
2878 | 0 | } |
2879 | | |
2880 | | static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, |
2881 | 0 | llvm::Value *ThisValue) { |
2882 | |
|
2883 | 0 | return CGF.EmitLValueForLambdaField(FD, ThisValue); |
2884 | 0 | } |
2885 | | |
2886 | | /// Named Registers are named metadata pointing to the register name |
2887 | | /// which will be read from/written to as an argument to the intrinsic |
2888 | | /// @llvm.read/write_register. |
2889 | | /// So far, only the name is being passed down, but other options such as |
2890 | | /// register type, allocation type or even optimization options could be |
2891 | | /// passed down via the metadata node. |
2892 | 0 | static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { |
2893 | 0 | SmallString<64> Name("llvm.named.register."); |
2894 | 0 | AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); |
2895 | 0 | assert(Asm->getLabel().size() < 64-Name.size() && |
2896 | 0 | "Register name too big"); |
2897 | 0 | Name.append(Asm->getLabel()); |
2898 | 0 | llvm::NamedMDNode *M = |
2899 | 0 | CGM.getModule().getOrInsertNamedMetadata(Name); |
2900 | 0 | if (M->getNumOperands() == 0) { |
2901 | 0 | llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), |
2902 | 0 | Asm->getLabel()); |
2903 | 0 | llvm::Metadata *Ops[] = {Str}; |
2904 | 0 | M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); |
2905 | 0 | } |
2906 | |
|
2907 | 0 | CharUnits Alignment = CGM.getContext().getDeclAlign(VD); |
2908 | |
|
2909 | 0 | llvm::Value *Ptr = |
2910 | 0 | llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)); |
2911 | 0 | return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType()); |
2912 | 0 | } |
2913 | | |
2914 | | /// Determine whether we can emit a reference to \p VD from the current |
2915 | | /// context, despite not necessarily having seen an odr-use of the variable in |
2916 | | /// this context. |
2917 | | static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, |
2918 | | const DeclRefExpr *E, |
2919 | 0 | const VarDecl *VD) { |
2920 | | // For a variable declared in an enclosing scope, do not emit a spurious |
2921 | | // reference even if we have a capture, as that will emit an unwarranted |
2922 | | // reference to our capture state, and will likely generate worse code than |
2923 | | // emitting a local copy. |
2924 | 0 | if (E->refersToEnclosingVariableOrCapture()) |
2925 | 0 | return false; |
2926 | | |
2927 | | // For a local declaration declared in this function, we can always reference |
2928 | | // it even if we don't have an odr-use. |
2929 | 0 | if (VD->hasLocalStorage()) { |
2930 | 0 | return VD->getDeclContext() == |
2931 | 0 | dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl); |
2932 | 0 | } |
2933 | | |
2934 | | // For a global declaration, we can emit a reference to it if we know |
2935 | | // for sure that we are able to emit a definition of it. |
2936 | 0 | VD = VD->getDefinition(CGF.getContext()); |
2937 | 0 | if (!VD) |
2938 | 0 | return false; |
2939 | | |
2940 | | // Don't emit a spurious reference if it might be to a variable that only |
2941 | | // exists on a different device / target. |
2942 | | // FIXME: This is unnecessarily broad. Check whether this would actually be a |
2943 | | // cross-target reference. |
2944 | 0 | if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || |
2945 | 0 | CGF.getLangOpts().OpenCL) { |
2946 | 0 | return false; |
2947 | 0 | } |
2948 | | |
2949 | | // We can emit a spurious reference only if the linkage implies that we'll |
2950 | | // be emitting a non-interposable symbol that will be retained until link |
2951 | | // time. |
2952 | 0 | switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) { |
2953 | 0 | case llvm::GlobalValue::ExternalLinkage: |
2954 | 0 | case llvm::GlobalValue::LinkOnceODRLinkage: |
2955 | 0 | case llvm::GlobalValue::WeakODRLinkage: |
2956 | 0 | case llvm::GlobalValue::InternalLinkage: |
2957 | 0 | case llvm::GlobalValue::PrivateLinkage: |
2958 | 0 | return true; |
2959 | 0 | default: |
2960 | 0 | return false; |
2961 | 0 | } |
2962 | 0 | } |
2963 | | |
2964 | 0 | LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { |
2965 | 0 | const NamedDecl *ND = E->getDecl(); |
2966 | 0 | QualType T = E->getType(); |
2967 | |
|
2968 | 0 | assert(E->isNonOdrUse() != NOUR_Unevaluated && |
2969 | 0 | "should not emit an unevaluated operand"); |
2970 | | |
2971 | 0 | if (const auto *VD = dyn_cast<VarDecl>(ND)) { |
2972 | | // Global Named registers access via intrinsics only |
2973 | 0 | if (VD->getStorageClass() == SC_Register && |
2974 | 0 | VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) |
2975 | 0 | return EmitGlobalNamedRegister(VD, CGM); |
2976 | | |
2977 | | // If this DeclRefExpr does not constitute an odr-use of the variable, |
2978 | | // we're not permitted to emit a reference to it in general, and it might |
2979 | | // not be captured if capture would be necessary for a use. Emit the |
2980 | | // constant value directly instead. |
2981 | 0 | if (E->isNonOdrUse() == NOUR_Constant && |
2982 | 0 | (VD->getType()->isReferenceType() || |
2983 | 0 | !canEmitSpuriousReferenceToVariable(*this, E, VD))) { |
2984 | 0 | VD->getAnyInitializer(VD); |
2985 | 0 | llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( |
2986 | 0 | E->getLocation(), *VD->evaluateValue(), VD->getType()); |
2987 | 0 | assert(Val && "failed to emit constant expression"); |
2988 | | |
2989 | 0 | Address Addr = Address::invalid(); |
2990 | 0 | if (!VD->getType()->isReferenceType()) { |
2991 | | // Spill the constant value to a global. |
2992 | 0 | Addr = CGM.createUnnamedGlobalFrom(*VD, Val, |
2993 | 0 | getContext().getDeclAlign(VD)); |
2994 | 0 | llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType()); |
2995 | 0 | auto *PTy = llvm::PointerType::get( |
2996 | 0 | VarTy, getTypes().getTargetAddressSpace(VD->getType())); |
2997 | 0 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy); |
2998 | 0 | } else { |
2999 | | // Should we be using the alignment of the constant pointer we emitted? |
3000 | 0 | CharUnits Alignment = |
3001 | 0 | CGM.getNaturalTypeAlignment(E->getType(), |
3002 | 0 | /* BaseInfo= */ nullptr, |
3003 | 0 | /* TBAAInfo= */ nullptr, |
3004 | 0 | /* forPointeeType= */ true); |
3005 | 0 | Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment); |
3006 | 0 | } |
3007 | 0 | return MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
3008 | 0 | } |
3009 | | |
3010 | | // FIXME: Handle other kinds of non-odr-use DeclRefExprs. |
3011 | | |
3012 | | // Check for captured variables. |
3013 | 0 | if (E->refersToEnclosingVariableOrCapture()) { |
3014 | 0 | VD = VD->getCanonicalDecl(); |
3015 | 0 | if (auto *FD = LambdaCaptureFields.lookup(VD)) |
3016 | 0 | return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); |
3017 | 0 | if (CapturedStmtInfo) { |
3018 | 0 | auto I = LocalDeclMap.find(VD); |
3019 | 0 | if (I != LocalDeclMap.end()) { |
3020 | 0 | LValue CapLVal; |
3021 | 0 | if (VD->getType()->isReferenceType()) |
3022 | 0 | CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(), |
3023 | 0 | AlignmentSource::Decl); |
3024 | 0 | else |
3025 | 0 | CapLVal = MakeAddrLValue(I->second, T); |
3026 | | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3027 | | // in simd context. |
3028 | 0 | if (getLangOpts().OpenMP && |
3029 | 0 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3030 | 0 | CapLVal.setNontemporal(/*Value=*/true); |
3031 | 0 | return CapLVal; |
3032 | 0 | } |
3033 | 0 | LValue CapLVal = |
3034 | 0 | EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), |
3035 | 0 | CapturedStmtInfo->getContextValue()); |
3036 | 0 | Address LValueAddress = CapLVal.getAddress(*this); |
3037 | 0 | CapLVal = MakeAddrLValue( |
3038 | 0 | Address(LValueAddress.getPointer(), LValueAddress.getElementType(), |
3039 | 0 | getContext().getDeclAlign(VD)), |
3040 | 0 | CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl), |
3041 | 0 | CapLVal.getTBAAInfo()); |
3042 | | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
3043 | | // in simd context. |
3044 | 0 | if (getLangOpts().OpenMP && |
3045 | 0 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
3046 | 0 | CapLVal.setNontemporal(/*Value=*/true); |
3047 | 0 | return CapLVal; |
3048 | 0 | } |
3049 | | |
3050 | 0 | assert(isa<BlockDecl>(CurCodeDecl)); |
3051 | 0 | Address addr = GetAddrOfBlockDecl(VD); |
3052 | 0 | return MakeAddrLValue(addr, T, AlignmentSource::Decl); |
3053 | 0 | } |
3054 | 0 | } |
3055 | | |
3056 | | // FIXME: We should be able to assert this for FunctionDecls as well! |
3057 | | // FIXME: We should be able to assert this for all DeclRefExprs, not just |
3058 | | // those with a valid source location. |
3059 | 0 | assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || |
3060 | 0 | !E->getLocation().isValid()) && |
3061 | 0 | "Should not use decl without marking it used!"); |
3062 | | |
3063 | 0 | if (ND->hasAttr<WeakRefAttr>()) { |
3064 | 0 | const auto *VD = cast<ValueDecl>(ND); |
3065 | 0 | ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); |
3066 | 0 | return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl); |
3067 | 0 | } |
3068 | | |
3069 | 0 | if (const auto *VD = dyn_cast<VarDecl>(ND)) { |
3070 | | // Check if this is a global variable. |
3071 | 0 | if (VD->hasLinkage() || VD->isStaticDataMember()) |
3072 | 0 | return EmitGlobalVarDeclLValue(*this, E, VD); |
3073 | | |
3074 | 0 | Address addr = Address::invalid(); |
3075 | | |
3076 | | // The variable should generally be present in the local decl map. |
3077 | 0 | auto iter = LocalDeclMap.find(VD); |
3078 | 0 | if (iter != LocalDeclMap.end()) { |
3079 | 0 | addr = iter->second; |
3080 | | |
3081 | | // Otherwise, it might be static local we haven't emitted yet for |
3082 | | // some reason; most likely, because it's in an outer function. |
3083 | 0 | } else if (VD->isStaticLocal()) { |
3084 | 0 | llvm::Constant *var = CGM.getOrCreateStaticVarDecl( |
3085 | 0 | *VD, CGM.getLLVMLinkageVarDefinition(VD)); |
3086 | 0 | addr = Address( |
3087 | 0 | var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD)); |
3088 | | |
3089 | | // No other cases for now. |
3090 | 0 | } else { |
3091 | 0 | llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?"); |
3092 | 0 | } |
3093 | | |
3094 | | // Handle threadlocal function locals. |
3095 | 0 | if (VD->getTLSKind() != VarDecl::TLS_None) |
3096 | 0 | addr = addr.withPointer( |
3097 | 0 | Builder.CreateThreadLocalAddress(addr.getPointer()), NotKnownNonNull); |
3098 | | |
3099 | | // Check for OpenMP threadprivate variables. |
3100 | 0 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && |
3101 | 0 | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
3102 | 0 | return EmitThreadPrivateVarDeclLValue( |
3103 | 0 | *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()), |
3104 | 0 | E->getExprLoc()); |
3105 | 0 | } |
3106 | | |
3107 | | // Drill into block byref variables. |
3108 | 0 | bool isBlockByref = VD->isEscapingByref(); |
3109 | 0 | if (isBlockByref) { |
3110 | 0 | addr = emitBlockByrefAddress(addr, VD); |
3111 | 0 | } |
3112 | | |
3113 | | // Drill into reference types. |
3114 | 0 | LValue LV = VD->getType()->isReferenceType() ? |
3115 | 0 | EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) : |
3116 | 0 | MakeAddrLValue(addr, T, AlignmentSource::Decl); |
3117 | |
|
3118 | 0 | bool isLocalStorage = VD->hasLocalStorage(); |
3119 | |
|
3120 | 0 | bool NonGCable = isLocalStorage && |
3121 | 0 | !VD->getType()->isReferenceType() && |
3122 | 0 | !isBlockByref; |
3123 | 0 | if (NonGCable) { |
3124 | 0 | LV.getQuals().removeObjCGCAttr(); |
3125 | 0 | LV.setNonGC(true); |
3126 | 0 | } |
3127 | |
|
3128 | 0 | bool isImpreciseLifetime = |
3129 | 0 | (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); |
3130 | 0 | if (isImpreciseLifetime) |
3131 | 0 | LV.setARCPreciseLifetime(ARCImpreciseLifetime); |
3132 | 0 | setObjCGCLValueClass(getContext(), E, LV); |
3133 | 0 | return LV; |
3134 | 0 | } |
3135 | | |
3136 | 0 | if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { |
3137 | 0 | LValue LV = EmitFunctionDeclLValue(*this, E, FD); |
3138 | | |
3139 | | // Emit debuginfo for the function declaration if the target wants to. |
3140 | 0 | if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) { |
3141 | 0 | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) { |
3142 | 0 | auto *Fn = |
3143 | 0 | cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts()); |
3144 | 0 | if (!Fn->getSubprogram()) |
3145 | 0 | DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn); |
3146 | 0 | } |
3147 | 0 | } |
3148 | |
|
3149 | 0 | return LV; |
3150 | 0 | } |
3151 | | |
3152 | | // FIXME: While we're emitting a binding from an enclosing scope, all other |
3153 | | // DeclRefExprs we see should be implicitly treated as if they also refer to |
3154 | | // an enclosing scope. |
3155 | 0 | if (const auto *BD = dyn_cast<BindingDecl>(ND)) { |
3156 | 0 | if (E->refersToEnclosingVariableOrCapture()) { |
3157 | 0 | auto *FD = LambdaCaptureFields.lookup(BD); |
3158 | 0 | return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); |
3159 | 0 | } |
3160 | 0 | return EmitLValue(BD->getBinding()); |
3161 | 0 | } |
3162 | | |
3163 | | // We can form DeclRefExprs naming GUID declarations when reconstituting |
3164 | | // non-type template parameters into expressions. |
3165 | 0 | if (const auto *GD = dyn_cast<MSGuidDecl>(ND)) |
3166 | 0 | return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T, |
3167 | 0 | AlignmentSource::Decl); |
3168 | | |
3169 | 0 | if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) { |
3170 | 0 | auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO); |
3171 | 0 | auto AS = getLangASFromTargetAS(ATPO.getAddressSpace()); |
3172 | |
|
3173 | 0 | if (AS != T.getAddressSpace()) { |
3174 | 0 | auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace()); |
3175 | 0 | auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS); |
3176 | 0 | auto ASC = getTargetHooks().performAddrSpaceCast( |
3177 | 0 | CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy); |
3178 | 0 | ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment()); |
3179 | 0 | } |
3180 | |
|
3181 | 0 | return MakeAddrLValue(ATPO, T, AlignmentSource::Decl); |
3182 | 0 | } |
3183 | | |
3184 | 0 | llvm_unreachable("Unhandled DeclRefExpr"); |
3185 | 0 | } |
3186 | | |
3187 | 0 | LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { |
3188 | | // __extension__ doesn't affect lvalue-ness. |
3189 | 0 | if (E->getOpcode() == UO_Extension) |
3190 | 0 | return EmitLValue(E->getSubExpr()); |
3191 | | |
3192 | 0 | QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); |
3193 | 0 | switch (E->getOpcode()) { |
3194 | 0 | default: llvm_unreachable("Unknown unary operator lvalue!"); |
3195 | 0 | case UO_Deref: { |
3196 | 0 | QualType T = E->getSubExpr()->getType()->getPointeeType(); |
3197 | 0 | assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); |
3198 | | |
3199 | 0 | LValueBaseInfo BaseInfo; |
3200 | 0 | TBAAAccessInfo TBAAInfo; |
3201 | 0 | Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo, |
3202 | 0 | &TBAAInfo); |
3203 | 0 | LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); |
3204 | 0 | LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); |
3205 | | |
3206 | | // We should not generate __weak write barrier on indirect reference |
3207 | | // of a pointer to object; as in void foo (__weak id *param); *param = 0; |
3208 | | // But, we continue to generate __strong write barrier on indirect write |
3209 | | // into a pointer to object. |
3210 | 0 | if (getLangOpts().ObjC && |
3211 | 0 | getLangOpts().getGC() != LangOptions::NonGC && |
3212 | 0 | LV.isObjCWeak()) |
3213 | 0 | LV.setNonGC(!E->isOBJCGCCandidate(getContext())); |
3214 | 0 | return LV; |
3215 | 0 | } |
3216 | 0 | case UO_Real: |
3217 | 0 | case UO_Imag: { |
3218 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
3219 | 0 | assert(LV.isSimple() && "real/imag on non-ordinary l-value"); |
3220 | | |
3221 | | // __real is valid on scalars. This is a faster way of testing that. |
3222 | | // __imag can only produce an rvalue on scalars. |
3223 | 0 | if (E->getOpcode() == UO_Real && |
3224 | 0 | !LV.getAddress(*this).getElementType()->isStructTy()) { |
3225 | 0 | assert(E->getSubExpr()->getType()->isArithmeticType()); |
3226 | 0 | return LV; |
3227 | 0 | } |
3228 | | |
3229 | 0 | QualType T = ExprTy->castAs<ComplexType>()->getElementType(); |
3230 | |
|
3231 | 0 | Address Component = |
3232 | 0 | (E->getOpcode() == UO_Real |
3233 | 0 | ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType()) |
3234 | 0 | : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType())); |
3235 | 0 | LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(), |
3236 | 0 | CGM.getTBAAInfoForSubobject(LV, T)); |
3237 | 0 | ElemLV.getQuals().addQualifiers(LV.getQuals()); |
3238 | 0 | return ElemLV; |
3239 | 0 | } |
3240 | 0 | case UO_PreInc: |
3241 | 0 | case UO_PreDec: { |
3242 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
3243 | 0 | bool isInc = E->getOpcode() == UO_PreInc; |
3244 | |
|
3245 | 0 | if (E->getType()->isAnyComplexType()) |
3246 | 0 | EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); |
3247 | 0 | else |
3248 | 0 | EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); |
3249 | 0 | return LV; |
3250 | 0 | } |
3251 | 0 | } |
3252 | 0 | } |
3253 | | |
3254 | 0 | LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { |
3255 | 0 | return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), |
3256 | 0 | E->getType(), AlignmentSource::Decl); |
3257 | 0 | } |
3258 | | |
3259 | 0 | LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { |
3260 | 0 | return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), |
3261 | 0 | E->getType(), AlignmentSource::Decl); |
3262 | 0 | } |
3263 | | |
3264 | 0 | LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { |
3265 | 0 | auto SL = E->getFunctionName(); |
3266 | 0 | assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); |
3267 | 0 | StringRef FnName = CurFn->getName(); |
3268 | 0 | if (FnName.starts_with("\01")) |
3269 | 0 | FnName = FnName.substr(1); |
3270 | 0 | StringRef NameItems[] = { |
3271 | 0 | PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName}; |
3272 | 0 | std::string GVName = llvm::join(NameItems, NameItems + 2, "."); |
3273 | 0 | if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) { |
3274 | 0 | std::string Name = std::string(SL->getString()); |
3275 | 0 | if (!Name.empty()) { |
3276 | 0 | unsigned Discriminator = |
3277 | 0 | CGM.getCXXABI().getMangleContext().getBlockId(BD, true); |
3278 | 0 | if (Discriminator) |
3279 | 0 | Name += "_" + Twine(Discriminator + 1).str(); |
3280 | 0 | auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str()); |
3281 | 0 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
3282 | 0 | } else { |
3283 | 0 | auto C = |
3284 | 0 | CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str()); |
3285 | 0 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
3286 | 0 | } |
3287 | 0 | } |
3288 | 0 | auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); |
3289 | 0 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
3290 | 0 | } |
3291 | | |
3292 | | /// Emit a type description suitable for use by a runtime sanitizer library. The |
3293 | | /// format of a type descriptor is |
3294 | | /// |
3295 | | /// \code |
3296 | | /// { i16 TypeKind, i16 TypeInfo } |
3297 | | /// \endcode |
3298 | | /// |
3299 | | /// followed by an array of i8 containing the type name. TypeKind is 0 for an |
3300 | | /// integer, 1 for a floating point value, and -1 for anything else. |
3301 | 0 | llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { |
3302 | | // Only emit each type's descriptor once. |
3303 | 0 | if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) |
3304 | 0 | return C; |
3305 | | |
3306 | 0 | uint16_t TypeKind = -1; |
3307 | 0 | uint16_t TypeInfo = 0; |
3308 | |
|
3309 | 0 | if (T->isIntegerType()) { |
3310 | 0 | TypeKind = 0; |
3311 | 0 | TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | |
3312 | 0 | (T->isSignedIntegerType() ? 1 : 0); |
3313 | 0 | } else if (T->isFloatingType()) { |
3314 | 0 | TypeKind = 1; |
3315 | 0 | TypeInfo = getContext().getTypeSize(T); |
3316 | 0 | } |
3317 | | |
3318 | | // Format the type name as if for a diagnostic, including quotes and |
3319 | | // optionally an 'aka'. |
3320 | 0 | SmallString<32> Buffer; |
3321 | 0 | CGM.getDiags().ConvertArgToString( |
3322 | 0 | DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(), |
3323 | 0 | StringRef(), std::nullopt, Buffer, std::nullopt); |
3324 | |
|
3325 | 0 | llvm::Constant *Components[] = { |
3326 | 0 | Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), |
3327 | 0 | llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) |
3328 | 0 | }; |
3329 | 0 | llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); |
3330 | |
|
3331 | 0 | auto *GV = new llvm::GlobalVariable( |
3332 | 0 | CGM.getModule(), Descriptor->getType(), |
3333 | 0 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); |
3334 | 0 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3335 | 0 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
3336 | | |
3337 | | // Remember the descriptor for this type. |
3338 | 0 | CGM.setTypeDescriptorInMap(T, GV); |
3339 | |
|
3340 | 0 | return GV; |
3341 | 0 | } |
3342 | | |
3343 | 0 | llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { |
3344 | 0 | llvm::Type *TargetTy = IntPtrTy; |
3345 | |
|
3346 | 0 | if (V->getType() == TargetTy) |
3347 | 0 | return V; |
3348 | | |
3349 | | // Floating-point types which fit into intptr_t are bitcast to integers |
3350 | | // and then passed directly (after zero-extension, if necessary). |
3351 | 0 | if (V->getType()->isFloatingPointTy()) { |
3352 | 0 | unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); |
3353 | 0 | if (Bits <= TargetTy->getIntegerBitWidth()) |
3354 | 0 | V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), |
3355 | 0 | Bits)); |
3356 | 0 | } |
3357 | | |
3358 | | // Integers which fit in intptr_t are zero-extended and passed directly. |
3359 | 0 | if (V->getType()->isIntegerTy() && |
3360 | 0 | V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) |
3361 | 0 | return Builder.CreateZExt(V, TargetTy); |
3362 | | |
3363 | | // Pointers are passed directly, everything else is passed by address. |
3364 | 0 | if (!V->getType()->isPointerTy()) { |
3365 | 0 | Address Ptr = CreateDefaultAlignTempAlloca(V->getType()); |
3366 | 0 | Builder.CreateStore(V, Ptr); |
3367 | 0 | V = Ptr.getPointer(); |
3368 | 0 | } |
3369 | 0 | return Builder.CreatePtrToInt(V, TargetTy); |
3370 | 0 | } |
3371 | | |
3372 | | /// Emit a representation of a SourceLocation for passing to a handler |
3373 | | /// in a sanitizer runtime library. The format for this data is: |
3374 | | /// \code |
3375 | | /// struct SourceLocation { |
3376 | | /// const char *Filename; |
3377 | | /// int32_t Line, Column; |
3378 | | /// }; |
3379 | | /// \endcode |
3380 | | /// For an invalid SourceLocation, the Filename pointer is null. |
3381 | 0 | llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { |
3382 | 0 | llvm::Constant *Filename; |
3383 | 0 | int Line, Column; |
3384 | |
|
3385 | 0 | PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); |
3386 | 0 | if (PLoc.isValid()) { |
3387 | 0 | StringRef FilenameString = PLoc.getFilename(); |
3388 | |
|
3389 | 0 | int PathComponentsToStrip = |
3390 | 0 | CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; |
3391 | 0 | if (PathComponentsToStrip < 0) { |
3392 | 0 | assert(PathComponentsToStrip != INT_MIN); |
3393 | 0 | int PathComponentsToKeep = -PathComponentsToStrip; |
3394 | 0 | auto I = llvm::sys::path::rbegin(FilenameString); |
3395 | 0 | auto E = llvm::sys::path::rend(FilenameString); |
3396 | 0 | while (I != E && --PathComponentsToKeep) |
3397 | 0 | ++I; |
3398 | |
|
3399 | 0 | FilenameString = FilenameString.substr(I - E); |
3400 | 0 | } else if (PathComponentsToStrip > 0) { |
3401 | 0 | auto I = llvm::sys::path::begin(FilenameString); |
3402 | 0 | auto E = llvm::sys::path::end(FilenameString); |
3403 | 0 | while (I != E && PathComponentsToStrip--) |
3404 | 0 | ++I; |
3405 | |
|
3406 | 0 | if (I != E) |
3407 | 0 | FilenameString = |
3408 | 0 | FilenameString.substr(I - llvm::sys::path::begin(FilenameString)); |
3409 | 0 | else |
3410 | 0 | FilenameString = llvm::sys::path::filename(FilenameString); |
3411 | 0 | } |
3412 | | |
3413 | 0 | auto FilenameGV = |
3414 | 0 | CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src"); |
3415 | 0 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal( |
3416 | 0 | cast<llvm::GlobalVariable>( |
3417 | 0 | FilenameGV.getPointer()->stripPointerCasts())); |
3418 | 0 | Filename = FilenameGV.getPointer(); |
3419 | 0 | Line = PLoc.getLine(); |
3420 | 0 | Column = PLoc.getColumn(); |
3421 | 0 | } else { |
3422 | 0 | Filename = llvm::Constant::getNullValue(Int8PtrTy); |
3423 | 0 | Line = Column = 0; |
3424 | 0 | } |
3425 | | |
3426 | 0 | llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), |
3427 | 0 | Builder.getInt32(Column)}; |
3428 | |
|
3429 | 0 | return llvm::ConstantStruct::getAnon(Data); |
3430 | 0 | } |
3431 | | |
3432 | | namespace { |
3433 | | /// Specify under what conditions this check can be recovered |
3434 | | enum class CheckRecoverableKind { |
3435 | | /// Always terminate program execution if this check fails. |
3436 | | Unrecoverable, |
3437 | | /// Check supports recovering, runtime has both fatal (noreturn) and |
3438 | | /// non-fatal handlers for this check. |
3439 | | Recoverable, |
3440 | | /// Runtime conditionally aborts, always need to support recovery. |
3441 | | AlwaysRecoverable |
3442 | | }; |
3443 | | } |
3444 | | |
3445 | 0 | static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { |
3446 | 0 | assert(Kind.countPopulation() == 1); |
3447 | 0 | if (Kind == SanitizerKind::Vptr) |
3448 | 0 | return CheckRecoverableKind::AlwaysRecoverable; |
3449 | 0 | else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable) |
3450 | 0 | return CheckRecoverableKind::Unrecoverable; |
3451 | 0 | else |
3452 | 0 | return CheckRecoverableKind::Recoverable; |
3453 | 0 | } |
3454 | | |
3455 | | namespace { |
3456 | | struct SanitizerHandlerInfo { |
3457 | | char const *const Name; |
3458 | | unsigned Version; |
3459 | | }; |
3460 | | } |
3461 | | |
3462 | | const SanitizerHandlerInfo SanitizerHandlers[] = { |
3463 | | #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, |
3464 | | LIST_SANITIZER_CHECKS |
3465 | | #undef SANITIZER_CHECK |
3466 | | }; |
3467 | | |
3468 | | static void emitCheckHandlerCall(CodeGenFunction &CGF, |
3469 | | llvm::FunctionType *FnType, |
3470 | | ArrayRef<llvm::Value *> FnArgs, |
3471 | | SanitizerHandler CheckHandler, |
3472 | | CheckRecoverableKind RecoverKind, bool IsFatal, |
3473 | 0 | llvm::BasicBlock *ContBB) { |
3474 | 0 | assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); |
3475 | 0 | std::optional<ApplyDebugLocation> DL; |
3476 | 0 | if (!CGF.Builder.getCurrentDebugLocation()) { |
3477 | | // Ensure that the call has at least an artificial debug location. |
3478 | 0 | DL.emplace(CGF, SourceLocation()); |
3479 | 0 | } |
3480 | 0 | bool NeedsAbortSuffix = |
3481 | 0 | IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; |
3482 | 0 | bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; |
3483 | 0 | const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; |
3484 | 0 | const StringRef CheckName = CheckInfo.Name; |
3485 | 0 | std::string FnName = "__ubsan_handle_" + CheckName.str(); |
3486 | 0 | if (CheckInfo.Version && !MinimalRuntime) |
3487 | 0 | FnName += "_v" + llvm::utostr(CheckInfo.Version); |
3488 | 0 | if (MinimalRuntime) |
3489 | 0 | FnName += "_minimal"; |
3490 | 0 | if (NeedsAbortSuffix) |
3491 | 0 | FnName += "_abort"; |
3492 | 0 | bool MayReturn = |
3493 | 0 | !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; |
3494 | |
|
3495 | 0 | llvm::AttrBuilder B(CGF.getLLVMContext()); |
3496 | 0 | if (!MayReturn) { |
3497 | 0 | B.addAttribute(llvm::Attribute::NoReturn) |
3498 | 0 | .addAttribute(llvm::Attribute::NoUnwind); |
3499 | 0 | } |
3500 | 0 | B.addUWTableAttr(llvm::UWTableKind::Default); |
3501 | |
|
3502 | 0 | llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( |
3503 | 0 | FnType, FnName, |
3504 | 0 | llvm::AttributeList::get(CGF.getLLVMContext(), |
3505 | 0 | llvm::AttributeList::FunctionIndex, B), |
3506 | 0 | /*Local=*/true); |
3507 | 0 | llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); |
3508 | 0 | if (!MayReturn) { |
3509 | 0 | HandlerCall->setDoesNotReturn(); |
3510 | 0 | CGF.Builder.CreateUnreachable(); |
3511 | 0 | } else { |
3512 | 0 | CGF.Builder.CreateBr(ContBB); |
3513 | 0 | } |
3514 | 0 | } |
3515 | | |
3516 | | void CodeGenFunction::EmitCheck( |
3517 | | ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, |
3518 | | SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, |
3519 | 0 | ArrayRef<llvm::Value *> DynamicArgs) { |
3520 | 0 | assert(IsSanitizerScope); |
3521 | 0 | assert(Checked.size() > 0); |
3522 | 0 | assert(CheckHandler >= 0 && |
3523 | 0 | size_t(CheckHandler) < std::size(SanitizerHandlers)); |
3524 | 0 | const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; |
3525 | |
|
3526 | 0 | llvm::Value *FatalCond = nullptr; |
3527 | 0 | llvm::Value *RecoverableCond = nullptr; |
3528 | 0 | llvm::Value *TrapCond = nullptr; |
3529 | 0 | for (int i = 0, n = Checked.size(); i < n; ++i) { |
3530 | 0 | llvm::Value *Check = Checked[i].first; |
3531 | | // -fsanitize-trap= overrides -fsanitize-recover=. |
3532 | 0 | llvm::Value *&Cond = |
3533 | 0 | CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) |
3534 | 0 | ? TrapCond |
3535 | 0 | : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) |
3536 | 0 | ? RecoverableCond |
3537 | 0 | : FatalCond; |
3538 | 0 | Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; |
3539 | 0 | } |
3540 | |
|
3541 | 0 | if (TrapCond) |
3542 | 0 | EmitTrapCheck(TrapCond, CheckHandler); |
3543 | 0 | if (!FatalCond && !RecoverableCond) |
3544 | 0 | return; |
3545 | | |
3546 | 0 | llvm::Value *JointCond; |
3547 | 0 | if (FatalCond && RecoverableCond) |
3548 | 0 | JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); |
3549 | 0 | else |
3550 | 0 | JointCond = FatalCond ? FatalCond : RecoverableCond; |
3551 | 0 | assert(JointCond); |
3552 | | |
3553 | 0 | CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); |
3554 | 0 | assert(SanOpts.has(Checked[0].second)); |
3555 | 0 | #ifndef NDEBUG |
3556 | 0 | for (int i = 1, n = Checked.size(); i < n; ++i) { |
3557 | 0 | assert(RecoverKind == getRecoverableKind(Checked[i].second) && |
3558 | 0 | "All recoverable kinds in a single check must be same!"); |
3559 | 0 | assert(SanOpts.has(Checked[i].second)); |
3560 | 0 | } |
3561 | 0 | #endif |
3562 | |
|
3563 | 0 | llvm::BasicBlock *Cont = createBasicBlock("cont"); |
3564 | 0 | llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); |
3565 | 0 | llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); |
3566 | | // Give hint that we very much don't expect to execute the handler |
3567 | | // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp |
3568 | 0 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3569 | 0 | llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); |
3570 | 0 | Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); |
3571 | 0 | EmitBlock(Handlers); |
3572 | | |
3573 | | // Handler functions take an i8* pointing to the (handler-specific) static |
3574 | | // information block, followed by a sequence of intptr_t arguments |
3575 | | // representing operand values. |
3576 | 0 | SmallVector<llvm::Value *, 4> Args; |
3577 | 0 | SmallVector<llvm::Type *, 4> ArgTypes; |
3578 | 0 | if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { |
3579 | 0 | Args.reserve(DynamicArgs.size() + 1); |
3580 | 0 | ArgTypes.reserve(DynamicArgs.size() + 1); |
3581 | | |
3582 | | // Emit handler arguments and create handler function type. |
3583 | 0 | if (!StaticArgs.empty()) { |
3584 | 0 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); |
3585 | 0 | auto *InfoPtr = new llvm::GlobalVariable( |
3586 | 0 | CGM.getModule(), Info->getType(), false, |
3587 | 0 | llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr, |
3588 | 0 | llvm::GlobalVariable::NotThreadLocal, |
3589 | 0 | CGM.getDataLayout().getDefaultGlobalsAddressSpace()); |
3590 | 0 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3591 | 0 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); |
3592 | 0 | Args.push_back(InfoPtr); |
3593 | 0 | ArgTypes.push_back(Args.back()->getType()); |
3594 | 0 | } |
3595 | |
|
3596 | 0 | for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { |
3597 | 0 | Args.push_back(EmitCheckValue(DynamicArgs[i])); |
3598 | 0 | ArgTypes.push_back(IntPtrTy); |
3599 | 0 | } |
3600 | 0 | } |
3601 | |
|
3602 | 0 | llvm::FunctionType *FnType = |
3603 | 0 | llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); |
3604 | |
|
3605 | 0 | if (!FatalCond || !RecoverableCond) { |
3606 | | // Simple case: we need to generate a single handler call, either |
3607 | | // fatal, or non-fatal. |
3608 | 0 | emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, |
3609 | 0 | (FatalCond != nullptr), Cont); |
3610 | 0 | } else { |
3611 | | // Emit two handler calls: first one for set of unrecoverable checks, |
3612 | | // another one for recoverable. |
3613 | 0 | llvm::BasicBlock *NonFatalHandlerBB = |
3614 | 0 | createBasicBlock("non_fatal." + CheckName); |
3615 | 0 | llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); |
3616 | 0 | Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); |
3617 | 0 | EmitBlock(FatalHandlerBB); |
3618 | 0 | emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true, |
3619 | 0 | NonFatalHandlerBB); |
3620 | 0 | EmitBlock(NonFatalHandlerBB); |
3621 | 0 | emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false, |
3622 | 0 | Cont); |
3623 | 0 | } |
3624 | |
|
3625 | 0 | EmitBlock(Cont); |
3626 | 0 | } |
3627 | | |
3628 | | void CodeGenFunction::EmitCfiSlowPathCheck( |
3629 | | SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, |
3630 | 0 | llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { |
3631 | 0 | llvm::BasicBlock *Cont = createBasicBlock("cfi.cont"); |
3632 | |
|
3633 | 0 | llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath"); |
3634 | 0 | llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB); |
3635 | |
|
3636 | 0 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3637 | 0 | llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); |
3638 | 0 | BI->setMetadata(llvm::LLVMContext::MD_prof, Node); |
3639 | |
|
3640 | 0 | EmitBlock(CheckBB); |
3641 | |
|
3642 | 0 | bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind); |
3643 | |
|
3644 | 0 | llvm::CallInst *CheckCall; |
3645 | 0 | llvm::FunctionCallee SlowPathFn; |
3646 | 0 | if (WithDiag) { |
3647 | 0 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); |
3648 | 0 | auto *InfoPtr = |
3649 | 0 | new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, |
3650 | 0 | llvm::GlobalVariable::PrivateLinkage, Info); |
3651 | 0 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3652 | 0 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); |
3653 | |
|
3654 | 0 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3655 | 0 | "__cfi_slowpath_diag", |
3656 | 0 | llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, |
3657 | 0 | false)); |
3658 | 0 | CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr}); |
3659 | 0 | } else { |
3660 | 0 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3661 | 0 | "__cfi_slowpath", |
3662 | 0 | llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false)); |
3663 | 0 | CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr}); |
3664 | 0 | } |
3665 | |
|
3666 | 0 | CGM.setDSOLocal( |
3667 | 0 | cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts())); |
3668 | 0 | CheckCall->setDoesNotThrow(); |
3669 | |
|
3670 | 0 | EmitBlock(Cont); |
3671 | 0 | } |
3672 | | |
3673 | | // Emit a stub for __cfi_check function so that the linker knows about this |
3674 | | // symbol in LTO mode. |
3675 | 0 | void CodeGenFunction::EmitCfiCheckStub() { |
3676 | 0 | llvm::Module *M = &CGM.getModule(); |
3677 | 0 | auto &Ctx = M->getContext(); |
3678 | 0 | llvm::Function *F = llvm::Function::Create( |
3679 | 0 | llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false), |
3680 | 0 | llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M); |
3681 | 0 | F->setAlignment(llvm::Align(4096)); |
3682 | 0 | CGM.setDSOLocal(F); |
3683 | 0 | llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F); |
3684 | | // CrossDSOCFI pass is not executed if there is no executable code. |
3685 | 0 | SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)}; |
3686 | 0 | llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB); |
3687 | 0 | llvm::ReturnInst::Create(Ctx, nullptr, BB); |
3688 | 0 | } |
3689 | | |
3690 | | // This function is basically a switch over the CFI failure kind, which is |
3691 | | // extracted from CFICheckFailData (1st function argument). Each case is either |
3692 | | // llvm.trap or a call to one of the two runtime handlers, based on |
3693 | | // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid |
3694 | | // failure kind) traps, but this should really never happen. CFICheckFailData |
3695 | | // can be nullptr if the calling module has -fsanitize-trap behavior for this |
3696 | | // check kind; in this case __cfi_check_fail traps as well. |
3697 | 0 | void CodeGenFunction::EmitCfiCheckFail() { |
3698 | 0 | SanitizerScope SanScope(this); |
3699 | 0 | FunctionArgList Args; |
3700 | 0 | ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, |
3701 | 0 | ImplicitParamKind::Other); |
3702 | 0 | ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, |
3703 | 0 | ImplicitParamKind::Other); |
3704 | 0 | Args.push_back(&ArgData); |
3705 | 0 | Args.push_back(&ArgAddr); |
3706 | |
|
3707 | 0 | const CGFunctionInfo &FI = |
3708 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); |
3709 | |
|
3710 | 0 | llvm::Function *F = llvm::Function::Create( |
3711 | 0 | llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false), |
3712 | 0 | llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule()); |
3713 | |
|
3714 | 0 | CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false); |
3715 | 0 | CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); |
3716 | 0 | F->setVisibility(llvm::GlobalValue::HiddenVisibility); |
3717 | |
|
3718 | 0 | StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args, |
3719 | 0 | SourceLocation()); |
3720 | | |
3721 | | // This function is not affected by NoSanitizeList. This function does |
3722 | | // not have a source location, but "src:*" would still apply. Revert any |
3723 | | // changes to SanOpts made in StartFunction. |
3724 | 0 | SanOpts = CGM.getLangOpts().Sanitize; |
3725 | |
|
3726 | 0 | llvm::Value *Data = |
3727 | 0 | EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, |
3728 | 0 | CGM.getContext().VoidPtrTy, ArgData.getLocation()); |
3729 | 0 | llvm::Value *Addr = |
3730 | 0 | EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, |
3731 | 0 | CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); |
3732 | | |
3733 | | // Data == nullptr means the calling module has trap behaviour for this check. |
3734 | 0 | llvm::Value *DataIsNotNullPtr = |
3735 | 0 | Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy)); |
3736 | 0 | EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail); |
3737 | |
|
3738 | 0 | llvm::StructType *SourceLocationTy = |
3739 | 0 | llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty); |
3740 | 0 | llvm::StructType *CfiCheckFailDataTy = |
3741 | 0 | llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy); |
3742 | |
|
3743 | 0 | llvm::Value *V = Builder.CreateConstGEP2_32( |
3744 | 0 | CfiCheckFailDataTy, |
3745 | 0 | Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0, |
3746 | 0 | 0); |
3747 | |
|
3748 | 0 | Address CheckKindAddr(V, Int8Ty, getIntAlign()); |
3749 | 0 | llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); |
3750 | |
|
3751 | 0 | llvm::Value *AllVtables = llvm::MetadataAsValue::get( |
3752 | 0 | CGM.getLLVMContext(), |
3753 | 0 | llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); |
3754 | 0 | llvm::Value *ValidVtable = Builder.CreateZExt( |
3755 | 0 | Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), |
3756 | 0 | {Addr, AllVtables}), |
3757 | 0 | IntPtrTy); |
3758 | |
|
3759 | 0 | const std::pair<int, SanitizerMask> CheckKinds[] = { |
3760 | 0 | {CFITCK_VCall, SanitizerKind::CFIVCall}, |
3761 | 0 | {CFITCK_NVCall, SanitizerKind::CFINVCall}, |
3762 | 0 | {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, |
3763 | 0 | {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, |
3764 | 0 | {CFITCK_ICall, SanitizerKind::CFIICall}}; |
3765 | |
|
3766 | 0 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; |
3767 | 0 | for (auto CheckKindMaskPair : CheckKinds) { |
3768 | 0 | int Kind = CheckKindMaskPair.first; |
3769 | 0 | SanitizerMask Mask = CheckKindMaskPair.second; |
3770 | 0 | llvm::Value *Cond = |
3771 | 0 | Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind)); |
3772 | 0 | if (CGM.getLangOpts().Sanitize.has(Mask)) |
3773 | 0 | EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {}, |
3774 | 0 | {Data, Addr, ValidVtable}); |
3775 | 0 | else |
3776 | 0 | EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail); |
3777 | 0 | } |
3778 | |
|
3779 | 0 | FinishFunction(); |
3780 | | // The only reference to this function will be created during LTO link. |
3781 | | // Make sure it survives until then. |
3782 | 0 | CGM.addUsedGlobal(F); |
3783 | 0 | } |
3784 | | |
3785 | 0 | void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { |
3786 | 0 | if (SanOpts.has(SanitizerKind::Unreachable)) { |
3787 | 0 | SanitizerScope SanScope(this); |
3788 | 0 | EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()), |
3789 | 0 | SanitizerKind::Unreachable), |
3790 | 0 | SanitizerHandler::BuiltinUnreachable, |
3791 | 0 | EmitCheckSourceLocation(Loc), std::nullopt); |
3792 | 0 | } |
3793 | 0 | Builder.CreateUnreachable(); |
3794 | 0 | } |
3795 | | |
3796 | | void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, |
3797 | 0 | SanitizerHandler CheckHandlerID) { |
3798 | 0 | llvm::BasicBlock *Cont = createBasicBlock("cont"); |
3799 | | |
3800 | | // If we're optimizing, collapse all calls to trap down to just one per |
3801 | | // check-type per function to save on code size. |
3802 | 0 | if (TrapBBs.size() <= CheckHandlerID) |
3803 | 0 | TrapBBs.resize(CheckHandlerID + 1); |
3804 | |
|
3805 | 0 | llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; |
3806 | |
|
3807 | 0 | if (!ClSanitizeDebugDeoptimization && |
3808 | 0 | CGM.getCodeGenOpts().OptimizationLevel && TrapBB && |
3809 | 0 | (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) { |
3810 | 0 | auto Call = TrapBB->begin(); |
3811 | 0 | assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB"); |
3812 | | |
3813 | 0 | Call->applyMergedLocation(Call->getDebugLoc(), |
3814 | 0 | Builder.getCurrentDebugLocation()); |
3815 | 0 | Builder.CreateCondBr(Checked, Cont, TrapBB); |
3816 | 0 | } else { |
3817 | 0 | TrapBB = createBasicBlock("trap"); |
3818 | 0 | Builder.CreateCondBr(Checked, Cont, TrapBB); |
3819 | 0 | EmitBlock(TrapBB); |
3820 | |
|
3821 | 0 | llvm::CallInst *TrapCall = Builder.CreateCall( |
3822 | 0 | CGM.getIntrinsic(llvm::Intrinsic::ubsantrap), |
3823 | 0 | llvm::ConstantInt::get(CGM.Int8Ty, ClSanitizeDebugDeoptimization |
3824 | 0 | ? TrapBB->getParent()->size() |
3825 | 0 | : CheckHandlerID)); |
3826 | |
|
3827 | 0 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3828 | 0 | auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", |
3829 | 0 | CGM.getCodeGenOpts().TrapFuncName); |
3830 | 0 | TrapCall->addFnAttr(A); |
3831 | 0 | } |
3832 | 0 | TrapCall->setDoesNotReturn(); |
3833 | 0 | TrapCall->setDoesNotThrow(); |
3834 | 0 | Builder.CreateUnreachable(); |
3835 | 0 | } |
3836 | | |
3837 | 0 | EmitBlock(Cont); |
3838 | 0 | } |
3839 | | |
3840 | 0 | llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { |
3841 | 0 | llvm::CallInst *TrapCall = |
3842 | 0 | Builder.CreateCall(CGM.getIntrinsic(IntrID)); |
3843 | |
|
3844 | 0 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3845 | 0 | auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", |
3846 | 0 | CGM.getCodeGenOpts().TrapFuncName); |
3847 | 0 | TrapCall->addFnAttr(A); |
3848 | 0 | } |
3849 | |
|
3850 | 0 | return TrapCall; |
3851 | 0 | } |
3852 | | |
3853 | | Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, |
3854 | | LValueBaseInfo *BaseInfo, |
3855 | 0 | TBAAAccessInfo *TBAAInfo) { |
3856 | 0 | assert(E->getType()->isArrayType() && |
3857 | 0 | "Array to pointer decay must have array source type!"); |
3858 | | |
3859 | | // Expressions of array type can't be bitfields or vector elements. |
3860 | 0 | LValue LV = EmitLValue(E); |
3861 | 0 | Address Addr = LV.getAddress(*this); |
3862 | | |
3863 | | // If the array type was an incomplete type, we need to make sure |
3864 | | // the decay ends up being the right type. |
3865 | 0 | llvm::Type *NewTy = ConvertType(E->getType()); |
3866 | 0 | Addr = Addr.withElementType(NewTy); |
3867 | | |
3868 | | // Note that VLA pointers are always decayed, so we don't need to do |
3869 | | // anything here. |
3870 | 0 | if (!E->getType()->isVariableArrayType()) { |
3871 | 0 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
3872 | 0 | "Expected pointer to array"); |
3873 | 0 | Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); |
3874 | 0 | } |
3875 | | |
3876 | | // The result of this decay conversion points to an array element within the |
3877 | | // base lvalue. However, since TBAA currently does not support representing |
3878 | | // accesses to elements of member arrays, we conservatively represent accesses |
3879 | | // to the pointee object as if it had no any base lvalue specified. |
3880 | | // TODO: Support TBAA for member arrays. |
3881 | 0 | QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); |
3882 | 0 | if (BaseInfo) *BaseInfo = LV.getBaseInfo(); |
3883 | 0 | if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType); |
3884 | |
|
3885 | 0 | return Addr.withElementType(ConvertTypeForMem(EltType)); |
3886 | 0 | } |
3887 | | |
3888 | | /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an |
3889 | | /// array to pointer, return the array subexpression. |
3890 | 0 | static const Expr *isSimpleArrayDecayOperand(const Expr *E) { |
3891 | | // If this isn't just an array->pointer decay, bail out. |
3892 | 0 | const auto *CE = dyn_cast<CastExpr>(E); |
3893 | 0 | if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) |
3894 | 0 | return nullptr; |
3895 | | |
3896 | | // If this is a decay from variable width array, bail out. |
3897 | 0 | const Expr *SubExpr = CE->getSubExpr(); |
3898 | 0 | if (SubExpr->getType()->isVariableArrayType()) |
3899 | 0 | return nullptr; |
3900 | | |
3901 | 0 | return SubExpr; |
3902 | 0 | } |
3903 | | |
3904 | | static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, |
3905 | | llvm::Type *elemType, |
3906 | | llvm::Value *ptr, |
3907 | | ArrayRef<llvm::Value*> indices, |
3908 | | bool inbounds, |
3909 | | bool signedIndices, |
3910 | | SourceLocation loc, |
3911 | 0 | const llvm::Twine &name = "arrayidx") { |
3912 | 0 | if (inbounds) { |
3913 | 0 | return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices, |
3914 | 0 | CodeGenFunction::NotSubtraction, loc, |
3915 | 0 | name); |
3916 | 0 | } else { |
3917 | 0 | return CGF.Builder.CreateGEP(elemType, ptr, indices, name); |
3918 | 0 | } |
3919 | 0 | } |
3920 | | |
3921 | | static CharUnits getArrayElementAlign(CharUnits arrayAlign, |
3922 | | llvm::Value *idx, |
3923 | 0 | CharUnits eltSize) { |
3924 | | // If we have a constant index, we can use the exact offset of the |
3925 | | // element we're accessing. |
3926 | 0 | if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) { |
3927 | 0 | CharUnits offset = constantIdx->getZExtValue() * eltSize; |
3928 | 0 | return arrayAlign.alignmentAtOffset(offset); |
3929 | | |
3930 | | // Otherwise, use the worst-case alignment for any element. |
3931 | 0 | } else { |
3932 | 0 | return arrayAlign.alignmentOfArrayElement(eltSize); |
3933 | 0 | } |
3934 | 0 | } |
3935 | | |
3936 | | static QualType getFixedSizeElementType(const ASTContext &ctx, |
3937 | 0 | const VariableArrayType *vla) { |
3938 | 0 | QualType eltType; |
3939 | 0 | do { |
3940 | 0 | eltType = vla->getElementType(); |
3941 | 0 | } while ((vla = ctx.getAsVariableArrayType(eltType))); |
3942 | 0 | return eltType; |
3943 | 0 | } |
3944 | | |
3945 | 0 | static bool hasBPFPreserveStaticOffset(const RecordDecl *D) { |
3946 | 0 | return D && D->hasAttr<BPFPreserveStaticOffsetAttr>(); |
3947 | 0 | } |
3948 | | |
3949 | 0 | static bool hasBPFPreserveStaticOffset(const Expr *E) { |
3950 | 0 | if (!E) |
3951 | 0 | return false; |
3952 | 0 | QualType PointeeType = E->getType()->getPointeeType(); |
3953 | 0 | if (PointeeType.isNull()) |
3954 | 0 | return false; |
3955 | 0 | if (const auto *BaseDecl = PointeeType->getAsRecordDecl()) |
3956 | 0 | return hasBPFPreserveStaticOffset(BaseDecl); |
3957 | 0 | return false; |
3958 | 0 | } |
3959 | | |
3960 | | // Wraps Addr with a call to llvm.preserve.static.offset intrinsic. |
3961 | | static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, |
3962 | 0 | Address &Addr) { |
3963 | 0 | if (!CGF.getTarget().getTriple().isBPF()) |
3964 | 0 | return Addr; |
3965 | | |
3966 | 0 | llvm::Function *Fn = |
3967 | 0 | CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset); |
3968 | 0 | llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.getPointer()}); |
3969 | 0 | return Address(Call, Addr.getElementType(), Addr.getAlignment()); |
3970 | 0 | } |
3971 | | |
3972 | | /// Given an array base, check whether its member access belongs to a record |
3973 | | /// with preserve_access_index attribute or not. |
3974 | 0 | static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { |
3975 | 0 | if (!ArrayBase || !CGF.getDebugInfo()) |
3976 | 0 | return false; |
3977 | | |
3978 | | // Only support base as either a MemberExpr or DeclRefExpr. |
3979 | | // DeclRefExpr to cover cases like: |
3980 | | // struct s { int a; int b[10]; }; |
3981 | | // struct s *p; |
3982 | | // p[1].a |
3983 | | // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. |
3984 | | // p->b[5] is a MemberExpr example. |
3985 | 0 | const Expr *E = ArrayBase->IgnoreImpCasts(); |
3986 | 0 | if (const auto *ME = dyn_cast<MemberExpr>(E)) |
3987 | 0 | return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
3988 | | |
3989 | 0 | if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { |
3990 | 0 | const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl()); |
3991 | 0 | if (!VarDef) |
3992 | 0 | return false; |
3993 | | |
3994 | 0 | const auto *PtrT = VarDef->getType()->getAs<PointerType>(); |
3995 | 0 | if (!PtrT) |
3996 | 0 | return false; |
3997 | | |
3998 | 0 | const auto *PointeeT = PtrT->getPointeeType() |
3999 | 0 | ->getUnqualifiedDesugaredType(); |
4000 | 0 | if (const auto *RecT = dyn_cast<RecordType>(PointeeT)) |
4001 | 0 | return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
4002 | 0 | return false; |
4003 | 0 | } |
4004 | | |
4005 | 0 | return false; |
4006 | 0 | } |
4007 | | |
4008 | | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
4009 | | ArrayRef<llvm::Value *> indices, |
4010 | | QualType eltType, bool inbounds, |
4011 | | bool signedIndices, SourceLocation loc, |
4012 | | QualType *arrayType = nullptr, |
4013 | | const Expr *Base = nullptr, |
4014 | 0 | const llvm::Twine &name = "arrayidx") { |
4015 | | // All the indices except that last must be zero. |
4016 | 0 | #ifndef NDEBUG |
4017 | 0 | for (auto *idx : indices.drop_back()) |
4018 | 0 | assert(isa<llvm::ConstantInt>(idx) && |
4019 | 0 | cast<llvm::ConstantInt>(idx)->isZero()); |
4020 | 0 | #endif |
4021 | | |
4022 | | // Determine the element size of the statically-sized base. This is |
4023 | | // the thing that the indices are expressed in terms of. |
4024 | 0 | if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { |
4025 | 0 | eltType = getFixedSizeElementType(CGF.getContext(), vla); |
4026 | 0 | } |
4027 | | |
4028 | | // We can use that to compute the best alignment of the element. |
4029 | 0 | CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); |
4030 | 0 | CharUnits eltAlign = |
4031 | 0 | getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); |
4032 | |
|
4033 | 0 | if (hasBPFPreserveStaticOffset(Base)) |
4034 | 0 | addr = wrapWithBPFPreserveStaticOffset(CGF, addr); |
4035 | |
|
4036 | 0 | llvm::Value *eltPtr; |
4037 | 0 | auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back()); |
4038 | 0 | if (!LastIndex || |
4039 | 0 | (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) { |
4040 | 0 | eltPtr = emitArraySubscriptGEP( |
4041 | 0 | CGF, addr.getElementType(), addr.getPointer(), indices, inbounds, |
4042 | 0 | signedIndices, loc, name); |
4043 | 0 | } else { |
4044 | | // Remember the original array subscript for bpf target |
4045 | 0 | unsigned idx = LastIndex->getZExtValue(); |
4046 | 0 | llvm::DIType *DbgInfo = nullptr; |
4047 | 0 | if (arrayType) |
4048 | 0 | DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc); |
4049 | 0 | eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(), |
4050 | 0 | addr.getPointer(), |
4051 | 0 | indices.size() - 1, |
4052 | 0 | idx, DbgInfo); |
4053 | 0 | } |
4054 | |
|
4055 | 0 | return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign); |
4056 | 0 | } |
4057 | | |
4058 | | /// The offset of a field from the beginning of the record. |
4059 | | static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, |
4060 | 0 | const FieldDecl *FD, int64_t &Offset) { |
4061 | 0 | ASTContext &Ctx = CGF.getContext(); |
4062 | 0 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD); |
4063 | 0 | unsigned FieldNo = 0; |
4064 | |
|
4065 | 0 | for (const Decl *D : RD->decls()) { |
4066 | 0 | if (const auto *Record = dyn_cast<RecordDecl>(D)) |
4067 | 0 | if (getFieldOffsetInBits(CGF, Record, FD, Offset)) { |
4068 | 0 | Offset += Layout.getFieldOffset(FieldNo); |
4069 | 0 | return true; |
4070 | 0 | } |
4071 | | |
4072 | 0 | if (const auto *Field = dyn_cast<FieldDecl>(D)) |
4073 | 0 | if (FD == Field) { |
4074 | 0 | Offset += Layout.getFieldOffset(FieldNo); |
4075 | 0 | return true; |
4076 | 0 | } |
4077 | | |
4078 | 0 | if (isa<FieldDecl>(D)) |
4079 | 0 | ++FieldNo; |
4080 | 0 | } |
4081 | | |
4082 | 0 | return false; |
4083 | 0 | } |
4084 | | |
4085 | | /// Returns the relative offset difference between \p FD1 and \p FD2. |
4086 | | /// \code |
4087 | | /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2) |
4088 | | /// \endcode |
4089 | | /// Both fields must be within the same struct. |
4090 | | static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF, |
4091 | | const FieldDecl *FD1, |
4092 | 0 | const FieldDecl *FD2) { |
4093 | 0 | const RecordDecl *FD1OuterRec = |
4094 | 0 | FD1->getParent()->getOuterLexicalRecordContext(); |
4095 | 0 | const RecordDecl *FD2OuterRec = |
4096 | 0 | FD2->getParent()->getOuterLexicalRecordContext(); |
4097 | |
|
4098 | 0 | if (FD1OuterRec != FD2OuterRec) |
4099 | | // Fields must be within the same RecordDecl. |
4100 | 0 | return std::optional<int64_t>(); |
4101 | | |
4102 | 0 | int64_t FD1Offset = 0; |
4103 | 0 | if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset)) |
4104 | 0 | return std::optional<int64_t>(); |
4105 | | |
4106 | 0 | int64_t FD2Offset = 0; |
4107 | 0 | if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset)) |
4108 | 0 | return std::optional<int64_t>(); |
4109 | | |
4110 | 0 | return std::make_optional<int64_t>(FD1Offset - FD2Offset); |
4111 | 0 | } |
4112 | | |
4113 | | LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, |
4114 | 0 | bool Accessed) { |
4115 | | // The index must always be an integer, which is not an aggregate. Emit it |
4116 | | // in lexical order (this complexity is, sadly, required by C++17). |
4117 | 0 | llvm::Value *IdxPre = |
4118 | 0 | (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; |
4119 | 0 | bool SignedIndices = false; |
4120 | 0 | auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { |
4121 | 0 | auto *Idx = IdxPre; |
4122 | 0 | if (E->getLHS() != E->getIdx()) { |
4123 | 0 | assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); |
4124 | 0 | Idx = EmitScalarExpr(E->getIdx()); |
4125 | 0 | } |
4126 | | |
4127 | 0 | QualType IdxTy = E->getIdx()->getType(); |
4128 | 0 | bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); |
4129 | 0 | SignedIndices |= IdxSigned; |
4130 | |
|
4131 | 0 | if (SanOpts.has(SanitizerKind::ArrayBounds)) |
4132 | 0 | EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); |
4133 | | |
4134 | | // Extend or truncate the index type to 32 or 64-bits. |
4135 | 0 | if (Promote && Idx->getType() != IntPtrTy) |
4136 | 0 | Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); |
4137 | |
|
4138 | 0 | return Idx; |
4139 | 0 | }; |
4140 | 0 | IdxPre = nullptr; |
4141 | | |
4142 | | // If the base is a vector type, then we are forming a vector element lvalue |
4143 | | // with this subscript. |
4144 | 0 | if (E->getBase()->getType()->isVectorType() && |
4145 | 0 | !isa<ExtVectorElementExpr>(E->getBase())) { |
4146 | | // Emit the vector as an lvalue to get its address. |
4147 | 0 | LValue LHS = EmitLValue(E->getBase()); |
4148 | 0 | auto *Idx = EmitIdxAfterBase(/*Promote*/false); |
4149 | 0 | assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); |
4150 | 0 | return LValue::MakeVectorElt(LHS.getAddress(*this), Idx, |
4151 | 0 | E->getBase()->getType(), LHS.getBaseInfo(), |
4152 | 0 | TBAAAccessInfo()); |
4153 | 0 | } |
4154 | | |
4155 | | // All the other cases basically behave like simple offsetting. |
4156 | | |
4157 | | // Handle the extvector case we ignored above. |
4158 | 0 | if (isa<ExtVectorElementExpr>(E->getBase())) { |
4159 | 0 | LValue LV = EmitLValue(E->getBase()); |
4160 | 0 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4161 | 0 | Address Addr = EmitExtVectorElementLValue(LV); |
4162 | |
|
4163 | 0 | QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); |
4164 | 0 | Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true, |
4165 | 0 | SignedIndices, E->getExprLoc()); |
4166 | 0 | return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(), |
4167 | 0 | CGM.getTBAAInfoForSubobject(LV, EltType)); |
4168 | 0 | } |
4169 | | |
4170 | 0 | LValueBaseInfo EltBaseInfo; |
4171 | 0 | TBAAAccessInfo EltTBAAInfo; |
4172 | 0 | Address Addr = Address::invalid(); |
4173 | 0 | if (const VariableArrayType *vla = |
4174 | 0 | getContext().getAsVariableArrayType(E->getType())) { |
4175 | | // The base must be a pointer, which is not an aggregate. Emit |
4176 | | // it. It needs to be emitted first in case it's what captures |
4177 | | // the VLA bounds. |
4178 | 0 | Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); |
4179 | 0 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4180 | | |
4181 | | // The element count here is the total number of non-VLA elements. |
4182 | 0 | llvm::Value *numElements = getVLASize(vla).NumElts; |
4183 | | |
4184 | | // Effectively, the multiply by the VLA size is part of the GEP. |
4185 | | // GEP indexes are signed, and scaling an index isn't permitted to |
4186 | | // signed-overflow, so we use the same semantics for our explicit |
4187 | | // multiply. We suppress this if overflow is not undefined behavior. |
4188 | 0 | if (getLangOpts().isSignedOverflowDefined()) { |
4189 | 0 | Idx = Builder.CreateMul(Idx, numElements); |
4190 | 0 | } else { |
4191 | 0 | Idx = Builder.CreateNSWMul(Idx, numElements); |
4192 | 0 | } |
4193 | |
|
4194 | 0 | Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), |
4195 | 0 | !getLangOpts().isSignedOverflowDefined(), |
4196 | 0 | SignedIndices, E->getExprLoc()); |
4197 | |
|
4198 | 0 | } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ |
4199 | | // Indexing over an interface, as in "NSString *P; P[4];" |
4200 | | |
4201 | | // Emit the base pointer. |
4202 | 0 | Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); |
4203 | 0 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4204 | |
|
4205 | 0 | CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); |
4206 | 0 | llvm::Value *InterfaceSizeVal = |
4207 | 0 | llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity()); |
4208 | |
|
4209 | 0 | llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal); |
4210 | | |
4211 | | // We don't necessarily build correct LLVM struct types for ObjC |
4212 | | // interfaces, so we can't rely on GEP to do this scaling |
4213 | | // correctly, so we need to cast to i8*. FIXME: is this actually |
4214 | | // true? A lot of other things in the fragile ABI would break... |
4215 | 0 | llvm::Type *OrigBaseElemTy = Addr.getElementType(); |
4216 | | |
4217 | | // Do the GEP. |
4218 | 0 | CharUnits EltAlign = |
4219 | 0 | getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); |
4220 | 0 | llvm::Value *EltPtr = |
4221 | 0 | emitArraySubscriptGEP(*this, Int8Ty, Addr.getPointer(), ScaledIdx, |
4222 | 0 | false, SignedIndices, E->getExprLoc()); |
4223 | 0 | Addr = Address(EltPtr, OrigBaseElemTy, EltAlign); |
4224 | 0 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { |
4225 | | // If this is A[i] where A is an array, the frontend will have decayed the |
4226 | | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4227 | | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4228 | | // "gep x, i" here. Emit one "gep A, 0, i". |
4229 | 0 | assert(Array->getType()->isArrayType() && |
4230 | 0 | "Array to pointer decay must have array source type!"); |
4231 | 0 | LValue ArrayLV; |
4232 | | // For simple multidimensional array indexing, set the 'accessed' flag for |
4233 | | // better bounds-checking of the base expression. |
4234 | 0 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) |
4235 | 0 | ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); |
4236 | 0 | else |
4237 | 0 | ArrayLV = EmitLValue(Array); |
4238 | 0 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4239 | |
|
4240 | 0 | if (SanOpts.has(SanitizerKind::ArrayBounds)) { |
4241 | | // If the array being accessed has a "counted_by" attribute, generate |
4242 | | // bounds checking code. The "count" field is at the top level of the |
4243 | | // struct or in an anonymous struct, that's also at the top level. Future |
4244 | | // expansions may allow the "count" to reside at any place in the struct, |
4245 | | // but the value of "counted_by" will be a "simple" path to the count, |
4246 | | // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or |
4247 | | // similar to emit the correct GEP. |
4248 | 0 | const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = |
4249 | 0 | getLangOpts().getStrictFlexArraysLevel(); |
4250 | |
|
4251 | 0 | if (const auto *ME = dyn_cast<MemberExpr>(Array); |
4252 | 0 | ME && |
4253 | 0 | ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) && |
4254 | 0 | ME->getMemberDecl()->hasAttr<CountedByAttr>()) { |
4255 | 0 | const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl()); |
4256 | 0 | if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) { |
4257 | 0 | if (std::optional<int64_t> Diff = |
4258 | 0 | getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) { |
4259 | 0 | CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff); |
4260 | | |
4261 | | // Create a GEP with a byte offset between the FAM and count and |
4262 | | // use that to load the count value. |
4263 | 0 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast( |
4264 | 0 | ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty); |
4265 | |
|
4266 | 0 | llvm::Type *CountTy = ConvertType(CountFD->getType()); |
4267 | 0 | llvm::Value *Res = Builder.CreateInBoundsGEP( |
4268 | 0 | Int8Ty, Addr.getPointer(), |
4269 | 0 | Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep"); |
4270 | 0 | Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(), |
4271 | 0 | ".counted_by.load"); |
4272 | | |
4273 | | // Now emit the bounds checking. |
4274 | 0 | EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(), |
4275 | 0 | Array->getType(), Accessed); |
4276 | 0 | } |
4277 | 0 | } |
4278 | 0 | } |
4279 | 0 | } |
4280 | | |
4281 | | // Propagate the alignment from the array itself to the result. |
4282 | 0 | QualType arrayType = Array->getType(); |
4283 | 0 | Addr = emitArraySubscriptGEP( |
4284 | 0 | *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, |
4285 | 0 | E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, |
4286 | 0 | E->getExprLoc(), &arrayType, E->getBase()); |
4287 | 0 | EltBaseInfo = ArrayLV.getBaseInfo(); |
4288 | 0 | EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType()); |
4289 | 0 | } else { |
4290 | | // The base must be a pointer; emit it with an estimate of its alignment. |
4291 | 0 | Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); |
4292 | 0 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
4293 | 0 | QualType ptrType = E->getBase()->getType(); |
4294 | 0 | Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), |
4295 | 0 | !getLangOpts().isSignedOverflowDefined(), |
4296 | 0 | SignedIndices, E->getExprLoc(), &ptrType, |
4297 | 0 | E->getBase()); |
4298 | 0 | } |
4299 | | |
4300 | 0 | LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); |
4301 | |
|
4302 | 0 | if (getLangOpts().ObjC && |
4303 | 0 | getLangOpts().getGC() != LangOptions::NonGC) { |
4304 | 0 | LV.setNonGC(!E->isOBJCGCCandidate(getContext())); |
4305 | 0 | setObjCGCLValueClass(getContext(), E, LV); |
4306 | 0 | } |
4307 | 0 | return LV; |
4308 | 0 | } |
4309 | | |
4310 | 0 | LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { |
4311 | 0 | assert( |
4312 | 0 | !E->isIncomplete() && |
4313 | 0 | "incomplete matrix subscript expressions should be rejected during Sema"); |
4314 | 0 | LValue Base = EmitLValue(E->getBase()); |
4315 | 0 | llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx()); |
4316 | 0 | llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx()); |
4317 | 0 | llvm::Value *NumRows = Builder.getIntN( |
4318 | 0 | RowIdx->getType()->getScalarSizeInBits(), |
4319 | 0 | E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); |
4320 | 0 | llvm::Value *FinalIdx = |
4321 | 0 | Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx); |
4322 | 0 | return LValue::MakeMatrixElt( |
4323 | 0 | MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx, |
4324 | 0 | E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo()); |
4325 | 0 | } |
4326 | | |
4327 | | static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, |
4328 | | LValueBaseInfo &BaseInfo, |
4329 | | TBAAAccessInfo &TBAAInfo, |
4330 | | QualType BaseTy, QualType ElTy, |
4331 | 0 | bool IsLowerBound) { |
4332 | 0 | LValue BaseLVal; |
4333 | 0 | if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) { |
4334 | 0 | BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound); |
4335 | 0 | if (BaseTy->isArrayType()) { |
4336 | 0 | Address Addr = BaseLVal.getAddress(CGF); |
4337 | 0 | BaseInfo = BaseLVal.getBaseInfo(); |
4338 | | |
4339 | | // If the array type was an incomplete type, we need to make sure |
4340 | | // the decay ends up being the right type. |
4341 | 0 | llvm::Type *NewTy = CGF.ConvertType(BaseTy); |
4342 | 0 | Addr = Addr.withElementType(NewTy); |
4343 | | |
4344 | | // Note that VLA pointers are always decayed, so we don't need to do |
4345 | | // anything here. |
4346 | 0 | if (!BaseTy->isVariableArrayType()) { |
4347 | 0 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
4348 | 0 | "Expected pointer to array"); |
4349 | 0 | Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); |
4350 | 0 | } |
4351 | | |
4352 | 0 | return Addr.withElementType(CGF.ConvertTypeForMem(ElTy)); |
4353 | 0 | } |
4354 | 0 | LValueBaseInfo TypeBaseInfo; |
4355 | 0 | TBAAAccessInfo TypeTBAAInfo; |
4356 | 0 | CharUnits Align = |
4357 | 0 | CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo); |
4358 | 0 | BaseInfo.mergeForCast(TypeBaseInfo); |
4359 | 0 | TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo); |
4360 | 0 | return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), |
4361 | 0 | CGF.ConvertTypeForMem(ElTy), Align); |
4362 | 0 | } |
4363 | 0 | return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); |
4364 | 0 | } |
4365 | | |
4366 | | LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, |
4367 | 0 | bool IsLowerBound) { |
4368 | 0 | QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase()); |
4369 | 0 | QualType ResultExprTy; |
4370 | 0 | if (auto *AT = getContext().getAsArrayType(BaseTy)) |
4371 | 0 | ResultExprTy = AT->getElementType(); |
4372 | 0 | else |
4373 | 0 | ResultExprTy = BaseTy->getPointeeType(); |
4374 | 0 | llvm::Value *Idx = nullptr; |
4375 | 0 | if (IsLowerBound || E->getColonLocFirst().isInvalid()) { |
4376 | | // Requesting lower bound or upper bound, but without provided length and |
4377 | | // without ':' symbol for the default length -> length = 1. |
4378 | | // Idx = LowerBound ?: 0; |
4379 | 0 | if (auto *LowerBound = E->getLowerBound()) { |
4380 | 0 | Idx = Builder.CreateIntCast( |
4381 | 0 | EmitScalarExpr(LowerBound), IntPtrTy, |
4382 | 0 | LowerBound->getType()->hasSignedIntegerRepresentation()); |
4383 | 0 | } else |
4384 | 0 | Idx = llvm::ConstantInt::getNullValue(IntPtrTy); |
4385 | 0 | } else { |
4386 | | // Try to emit length or lower bound as constant. If this is possible, 1 |
4387 | | // is subtracted from constant length or lower bound. Otherwise, emit LLVM |
4388 | | // IR (LB + Len) - 1. |
4389 | 0 | auto &C = CGM.getContext(); |
4390 | 0 | auto *Length = E->getLength(); |
4391 | 0 | llvm::APSInt ConstLength; |
4392 | 0 | if (Length) { |
4393 | | // Idx = LowerBound + Length - 1; |
4394 | 0 | if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) { |
4395 | 0 | ConstLength = CL->zextOrTrunc(PointerWidthInBits); |
4396 | 0 | Length = nullptr; |
4397 | 0 | } |
4398 | 0 | auto *LowerBound = E->getLowerBound(); |
4399 | 0 | llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); |
4400 | 0 | if (LowerBound) { |
4401 | 0 | if (std::optional<llvm::APSInt> LB = |
4402 | 0 | LowerBound->getIntegerConstantExpr(C)) { |
4403 | 0 | ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits); |
4404 | 0 | LowerBound = nullptr; |
4405 | 0 | } |
4406 | 0 | } |
4407 | 0 | if (!Length) |
4408 | 0 | --ConstLength; |
4409 | 0 | else if (!LowerBound) |
4410 | 0 | --ConstLowerBound; |
4411 | |
|
4412 | 0 | if (Length || LowerBound) { |
4413 | 0 | auto *LowerBoundVal = |
4414 | 0 | LowerBound |
4415 | 0 | ? Builder.CreateIntCast( |
4416 | 0 | EmitScalarExpr(LowerBound), IntPtrTy, |
4417 | 0 | LowerBound->getType()->hasSignedIntegerRepresentation()) |
4418 | 0 | : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound); |
4419 | 0 | auto *LengthVal = |
4420 | 0 | Length |
4421 | 0 | ? Builder.CreateIntCast( |
4422 | 0 | EmitScalarExpr(Length), IntPtrTy, |
4423 | 0 | Length->getType()->hasSignedIntegerRepresentation()) |
4424 | 0 | : llvm::ConstantInt::get(IntPtrTy, ConstLength); |
4425 | 0 | Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len", |
4426 | 0 | /*HasNUW=*/false, |
4427 | 0 | !getLangOpts().isSignedOverflowDefined()); |
4428 | 0 | if (Length && LowerBound) { |
4429 | 0 | Idx = Builder.CreateSub( |
4430 | 0 | Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1", |
4431 | 0 | /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); |
4432 | 0 | } |
4433 | 0 | } else |
4434 | 0 | Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound); |
4435 | 0 | } else { |
4436 | | // Idx = ArraySize - 1; |
4437 | 0 | QualType ArrayTy = BaseTy->isPointerType() |
4438 | 0 | ? E->getBase()->IgnoreParenImpCasts()->getType() |
4439 | 0 | : BaseTy; |
4440 | 0 | if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) { |
4441 | 0 | Length = VAT->getSizeExpr(); |
4442 | 0 | if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) { |
4443 | 0 | ConstLength = *L; |
4444 | 0 | Length = nullptr; |
4445 | 0 | } |
4446 | 0 | } else { |
4447 | 0 | auto *CAT = C.getAsConstantArrayType(ArrayTy); |
4448 | 0 | assert(CAT && "unexpected type for array initializer"); |
4449 | 0 | ConstLength = CAT->getSize(); |
4450 | 0 | } |
4451 | 0 | if (Length) { |
4452 | 0 | auto *LengthVal = Builder.CreateIntCast( |
4453 | 0 | EmitScalarExpr(Length), IntPtrTy, |
4454 | 0 | Length->getType()->hasSignedIntegerRepresentation()); |
4455 | 0 | Idx = Builder.CreateSub( |
4456 | 0 | LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1", |
4457 | 0 | /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); |
4458 | 0 | } else { |
4459 | 0 | ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); |
4460 | 0 | --ConstLength; |
4461 | 0 | Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength); |
4462 | 0 | } |
4463 | 0 | } |
4464 | 0 | } |
4465 | 0 | assert(Idx); |
4466 | | |
4467 | 0 | Address EltPtr = Address::invalid(); |
4468 | 0 | LValueBaseInfo BaseInfo; |
4469 | 0 | TBAAAccessInfo TBAAInfo; |
4470 | 0 | if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) { |
4471 | | // The base must be a pointer, which is not an aggregate. Emit |
4472 | | // it. It needs to be emitted first in case it's what captures |
4473 | | // the VLA bounds. |
4474 | 0 | Address Base = |
4475 | 0 | emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, |
4476 | 0 | BaseTy, VLA->getElementType(), IsLowerBound); |
4477 | | // The element count here is the total number of non-VLA elements. |
4478 | 0 | llvm::Value *NumElements = getVLASize(VLA).NumElts; |
4479 | | |
4480 | | // Effectively, the multiply by the VLA size is part of the GEP. |
4481 | | // GEP indexes are signed, and scaling an index isn't permitted to |
4482 | | // signed-overflow, so we use the same semantics for our explicit |
4483 | | // multiply. We suppress this if overflow is not undefined behavior. |
4484 | 0 | if (getLangOpts().isSignedOverflowDefined()) |
4485 | 0 | Idx = Builder.CreateMul(Idx, NumElements); |
4486 | 0 | else |
4487 | 0 | Idx = Builder.CreateNSWMul(Idx, NumElements); |
4488 | 0 | EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), |
4489 | 0 | !getLangOpts().isSignedOverflowDefined(), |
4490 | 0 | /*signedIndices=*/false, E->getExprLoc()); |
4491 | 0 | } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { |
4492 | | // If this is A[i] where A is an array, the frontend will have decayed the |
4493 | | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4494 | | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4495 | | // "gep x, i" here. Emit one "gep A, 0, i". |
4496 | 0 | assert(Array->getType()->isArrayType() && |
4497 | 0 | "Array to pointer decay must have array source type!"); |
4498 | 0 | LValue ArrayLV; |
4499 | | // For simple multidimensional array indexing, set the 'accessed' flag for |
4500 | | // better bounds-checking of the base expression. |
4501 | 0 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) |
4502 | 0 | ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); |
4503 | 0 | else |
4504 | 0 | ArrayLV = EmitLValue(Array); |
4505 | | |
4506 | | // Propagate the alignment from the array itself to the result. |
4507 | 0 | EltPtr = emitArraySubscriptGEP( |
4508 | 0 | *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, |
4509 | 0 | ResultExprTy, !getLangOpts().isSignedOverflowDefined(), |
4510 | 0 | /*signedIndices=*/false, E->getExprLoc()); |
4511 | 0 | BaseInfo = ArrayLV.getBaseInfo(); |
4512 | 0 | TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy); |
4513 | 0 | } else { |
4514 | 0 | Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, |
4515 | 0 | TBAAInfo, BaseTy, ResultExprTy, |
4516 | 0 | IsLowerBound); |
4517 | 0 | EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, |
4518 | 0 | !getLangOpts().isSignedOverflowDefined(), |
4519 | 0 | /*signedIndices=*/false, E->getExprLoc()); |
4520 | 0 | } |
4521 | | |
4522 | 0 | return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo); |
4523 | 0 | } |
4524 | | |
4525 | | LValue CodeGenFunction:: |
4526 | 0 | EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { |
4527 | | // Emit the base vector as an l-value. |
4528 | 0 | LValue Base; |
4529 | | |
4530 | | // ExtVectorElementExpr's base can either be a vector or pointer to vector. |
4531 | 0 | if (E->isArrow()) { |
4532 | | // If it is a pointer to a vector, emit the address and form an lvalue with |
4533 | | // it. |
4534 | 0 | LValueBaseInfo BaseInfo; |
4535 | 0 | TBAAAccessInfo TBAAInfo; |
4536 | 0 | Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo); |
4537 | 0 | const auto *PT = E->getBase()->getType()->castAs<PointerType>(); |
4538 | 0 | Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo); |
4539 | 0 | Base.getQuals().removeObjCGCAttr(); |
4540 | 0 | } else if (E->getBase()->isGLValue()) { |
4541 | | // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), |
4542 | | // emit the base as an lvalue. |
4543 | 0 | assert(E->getBase()->getType()->isVectorType()); |
4544 | 0 | Base = EmitLValue(E->getBase()); |
4545 | 0 | } else { |
4546 | | // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. |
4547 | 0 | assert(E->getBase()->getType()->isVectorType() && |
4548 | 0 | "Result must be a vector"); |
4549 | 0 | llvm::Value *Vec = EmitScalarExpr(E->getBase()); |
4550 | | |
4551 | | // Store the vector to memory (because LValue wants an address). |
4552 | 0 | Address VecMem = CreateMemTemp(E->getBase()->getType()); |
4553 | 0 | Builder.CreateStore(Vec, VecMem); |
4554 | 0 | Base = MakeAddrLValue(VecMem, E->getBase()->getType(), |
4555 | 0 | AlignmentSource::Decl); |
4556 | 0 | } |
4557 | | |
4558 | 0 | QualType type = |
4559 | 0 | E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); |
4560 | | |
4561 | | // Encode the element access list into a vector of unsigned indices. |
4562 | 0 | SmallVector<uint32_t, 4> Indices; |
4563 | 0 | E->getEncodedElementAccess(Indices); |
4564 | |
|
4565 | 0 | if (Base.isSimple()) { |
4566 | 0 | llvm::Constant *CV = |
4567 | 0 | llvm::ConstantDataVector::get(getLLVMContext(), Indices); |
4568 | 0 | return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type, |
4569 | 0 | Base.getBaseInfo(), TBAAAccessInfo()); |
4570 | 0 | } |
4571 | 0 | assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); |
4572 | | |
4573 | 0 | llvm::Constant *BaseElts = Base.getExtVectorElts(); |
4574 | 0 | SmallVector<llvm::Constant *, 4> CElts; |
4575 | |
|
4576 | 0 | for (unsigned i = 0, e = Indices.size(); i != e; ++i) |
4577 | 0 | CElts.push_back(BaseElts->getAggregateElement(Indices[i])); |
4578 | 0 | llvm::Constant *CV = llvm::ConstantVector::get(CElts); |
4579 | 0 | return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type, |
4580 | 0 | Base.getBaseInfo(), TBAAAccessInfo()); |
4581 | 0 | } |
4582 | | |
4583 | 0 | LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { |
4584 | 0 | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { |
4585 | 0 | EmitIgnoredExpr(E->getBase()); |
4586 | 0 | return EmitDeclRefLValue(DRE); |
4587 | 0 | } |
4588 | | |
4589 | 0 | Expr *BaseExpr = E->getBase(); |
4590 | | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
4591 | 0 | LValue BaseLV; |
4592 | 0 | if (E->isArrow()) { |
4593 | 0 | LValueBaseInfo BaseInfo; |
4594 | 0 | TBAAAccessInfo TBAAInfo; |
4595 | 0 | Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); |
4596 | 0 | QualType PtrTy = BaseExpr->getType()->getPointeeType(); |
4597 | 0 | SanitizerSet SkippedChecks; |
4598 | 0 | bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); |
4599 | 0 | if (IsBaseCXXThis) |
4600 | 0 | SkippedChecks.set(SanitizerKind::Alignment, true); |
4601 | 0 | if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr)) |
4602 | 0 | SkippedChecks.set(SanitizerKind::Null, true); |
4603 | 0 | EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, |
4604 | 0 | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
4605 | 0 | BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); |
4606 | 0 | } else |
4607 | 0 | BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); |
4608 | |
|
4609 | 0 | NamedDecl *ND = E->getMemberDecl(); |
4610 | 0 | if (auto *Field = dyn_cast<FieldDecl>(ND)) { |
4611 | 0 | LValue LV = EmitLValueForField(BaseLV, Field); |
4612 | 0 | setObjCGCLValueClass(getContext(), E, LV); |
4613 | 0 | if (getLangOpts().OpenMP) { |
4614 | | // If the member was explicitly marked as nontemporal, mark it as |
4615 | | // nontemporal. If the base lvalue is marked as nontemporal, mark access |
4616 | | // to children as nontemporal too. |
4617 | 0 | if ((IsWrappedCXXThis(BaseExpr) && |
4618 | 0 | CGM.getOpenMPRuntime().isNontemporalDecl(Field)) || |
4619 | 0 | BaseLV.isNontemporal()) |
4620 | 0 | LV.setNontemporal(/*Value=*/true); |
4621 | 0 | } |
4622 | 0 | return LV; |
4623 | 0 | } |
4624 | | |
4625 | 0 | if (const auto *FD = dyn_cast<FunctionDecl>(ND)) |
4626 | 0 | return EmitFunctionDeclLValue(*this, E, FD); |
4627 | | |
4628 | 0 | llvm_unreachable("Unhandled member declaration!"); |
4629 | 0 | } |
4630 | | |
4631 | | /// Given that we are currently emitting a lambda, emit an l-value for |
4632 | | /// one of its members. |
4633 | | /// |
4634 | | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field, |
4635 | 0 | llvm::Value *ThisValue) { |
4636 | 0 | bool HasExplicitObjectParameter = false; |
4637 | 0 | if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl)) { |
4638 | 0 | HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction(); |
4639 | 0 | assert(MD->getParent()->isLambda()); |
4640 | 0 | assert(MD->getParent() == Field->getParent()); |
4641 | 0 | } |
4642 | 0 | LValue LambdaLV; |
4643 | 0 | if (HasExplicitObjectParameter) { |
4644 | 0 | const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0); |
4645 | 0 | auto It = LocalDeclMap.find(D); |
4646 | 0 | assert(It != LocalDeclMap.end() && "explicit parameter not loaded?"); |
4647 | 0 | Address AddrOfExplicitObject = It->getSecond(); |
4648 | 0 | if (D->getType()->isReferenceType()) |
4649 | 0 | LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(), |
4650 | 0 | AlignmentSource::Decl); |
4651 | 0 | else |
4652 | 0 | LambdaLV = MakeNaturalAlignAddrLValue(AddrOfExplicitObject.getPointer(), |
4653 | 0 | D->getType().getNonReferenceType()); |
4654 | 0 | } else { |
4655 | 0 | QualType LambdaTagType = getContext().getTagDeclType(Field->getParent()); |
4656 | 0 | LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType); |
4657 | 0 | } |
4658 | 0 | return EmitLValueForField(LambdaLV, Field); |
4659 | 0 | } |
4660 | | |
4661 | 0 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { |
4662 | 0 | return EmitLValueForLambdaField(Field, CXXABIThisValue); |
4663 | 0 | } |
4664 | | |
4665 | | /// Get the field index in the debug info. The debug info structure/union |
4666 | | /// will ignore the unnamed bitfields. |
4667 | | unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, |
4668 | 0 | unsigned FieldIndex) { |
4669 | 0 | unsigned I = 0, Skipped = 0; |
4670 | |
|
4671 | 0 | for (auto *F : Rec->getDefinition()->fields()) { |
4672 | 0 | if (I == FieldIndex) |
4673 | 0 | break; |
4674 | 0 | if (F->isUnnamedBitfield()) |
4675 | 0 | Skipped++; |
4676 | 0 | I++; |
4677 | 0 | } |
4678 | |
|
4679 | 0 | return FieldIndex - Skipped; |
4680 | 0 | } |
4681 | | |
4682 | | /// Get the address of a zero-sized field within a record. The resulting |
4683 | | /// address doesn't necessarily have the right type. |
4684 | | static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, |
4685 | 0 | const FieldDecl *Field) { |
4686 | 0 | CharUnits Offset = CGF.getContext().toCharUnitsFromBits( |
4687 | 0 | CGF.getContext().getFieldOffset(Field)); |
4688 | 0 | if (Offset.isZero()) |
4689 | 0 | return Base; |
4690 | 0 | Base = Base.withElementType(CGF.Int8Ty); |
4691 | 0 | return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset); |
4692 | 0 | } |
4693 | | |
4694 | | /// Drill down to the storage of a field without walking into |
4695 | | /// reference types. |
4696 | | /// |
4697 | | /// The resulting address doesn't necessarily have the right type. |
4698 | | static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, |
4699 | 0 | const FieldDecl *field) { |
4700 | 0 | if (field->isZeroSize(CGF.getContext())) |
4701 | 0 | return emitAddrOfZeroSizeField(CGF, base, field); |
4702 | | |
4703 | 0 | const RecordDecl *rec = field->getParent(); |
4704 | |
|
4705 | 0 | unsigned idx = |
4706 | 0 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); |
4707 | |
|
4708 | 0 | return CGF.Builder.CreateStructGEP(base, idx, field->getName()); |
4709 | 0 | } |
4710 | | |
4711 | | static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, |
4712 | 0 | Address addr, const FieldDecl *field) { |
4713 | 0 | const RecordDecl *rec = field->getParent(); |
4714 | 0 | llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( |
4715 | 0 | base.getType(), rec->getLocation()); |
4716 | |
|
4717 | 0 | unsigned idx = |
4718 | 0 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); |
4719 | |
|
4720 | 0 | return CGF.Builder.CreatePreserveStructAccessIndex( |
4721 | 0 | addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo); |
4722 | 0 | } |
4723 | | |
4724 | 0 | static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { |
4725 | 0 | const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); |
4726 | 0 | if (!RD) |
4727 | 0 | return false; |
4728 | | |
4729 | 0 | if (RD->isDynamicClass()) |
4730 | 0 | return true; |
4731 | | |
4732 | 0 | for (const auto &Base : RD->bases()) |
4733 | 0 | if (hasAnyVptr(Base.getType(), Context)) |
4734 | 0 | return true; |
4735 | | |
4736 | 0 | for (const FieldDecl *Field : RD->fields()) |
4737 | 0 | if (hasAnyVptr(Field->getType(), Context)) |
4738 | 0 | return true; |
4739 | | |
4740 | 0 | return false; |
4741 | 0 | } |
4742 | | |
4743 | | LValue CodeGenFunction::EmitLValueForField(LValue base, |
4744 | 0 | const FieldDecl *field) { |
4745 | 0 | LValueBaseInfo BaseInfo = base.getBaseInfo(); |
4746 | |
|
4747 | 0 | if (field->isBitField()) { |
4748 | 0 | const CGRecordLayout &RL = |
4749 | 0 | CGM.getTypes().getCGRecordLayout(field->getParent()); |
4750 | 0 | const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); |
4751 | 0 | const bool UseVolatile = isAAPCS(CGM.getTarget()) && |
4752 | 0 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && |
4753 | 0 | Info.VolatileStorageSize != 0 && |
4754 | 0 | field->getType() |
4755 | 0 | .withCVRQualifiers(base.getVRQualifiers()) |
4756 | 0 | .isVolatileQualified(); |
4757 | 0 | Address Addr = base.getAddress(*this); |
4758 | 0 | unsigned Idx = RL.getLLVMFieldNo(field); |
4759 | 0 | const RecordDecl *rec = field->getParent(); |
4760 | 0 | if (hasBPFPreserveStaticOffset(rec)) |
4761 | 0 | Addr = wrapWithBPFPreserveStaticOffset(*this, Addr); |
4762 | 0 | if (!UseVolatile) { |
4763 | 0 | if (!IsInPreservedAIRegion && |
4764 | 0 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4765 | 0 | if (Idx != 0) |
4766 | | // For structs, we GEP to the field that the record layout suggests. |
4767 | 0 | Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); |
4768 | 0 | } else { |
4769 | 0 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( |
4770 | 0 | getContext().getRecordType(rec), rec->getLocation()); |
4771 | 0 | Addr = Builder.CreatePreserveStructAccessIndex( |
4772 | 0 | Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()), |
4773 | 0 | DbgInfo); |
4774 | 0 | } |
4775 | 0 | } |
4776 | 0 | const unsigned SS = |
4777 | 0 | UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; |
4778 | | // Get the access type. |
4779 | 0 | llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS); |
4780 | 0 | Addr = Addr.withElementType(FieldIntTy); |
4781 | 0 | if (UseVolatile) { |
4782 | 0 | const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); |
4783 | 0 | if (VolatileOffset) |
4784 | 0 | Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset); |
4785 | 0 | } |
4786 | |
|
4787 | 0 | QualType fieldType = |
4788 | 0 | field->getType().withCVRQualifiers(base.getVRQualifiers()); |
4789 | | // TODO: Support TBAA for bit fields. |
4790 | 0 | LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); |
4791 | 0 | return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo, |
4792 | 0 | TBAAAccessInfo()); |
4793 | 0 | } |
4794 | | |
4795 | | // Fields of may-alias structures are may-alias themselves. |
4796 | | // FIXME: this should get propagated down through anonymous structs |
4797 | | // and unions. |
4798 | 0 | QualType FieldType = field->getType(); |
4799 | 0 | const RecordDecl *rec = field->getParent(); |
4800 | 0 | AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); |
4801 | 0 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); |
4802 | 0 | TBAAAccessInfo FieldTBAAInfo; |
4803 | 0 | if (base.getTBAAInfo().isMayAlias() || |
4804 | 0 | rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { |
4805 | 0 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4806 | 0 | } else if (rec->isUnion()) { |
4807 | | // TODO: Support TBAA for unions. |
4808 | 0 | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4809 | 0 | } else { |
4810 | | // If no base type been assigned for the base access, then try to generate |
4811 | | // one for this base lvalue. |
4812 | 0 | FieldTBAAInfo = base.getTBAAInfo(); |
4813 | 0 | if (!FieldTBAAInfo.BaseType) { |
4814 | 0 | FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType()); |
4815 | 0 | assert(!FieldTBAAInfo.Offset && |
4816 | 0 | "Nonzero offset for an access with no base type!"); |
4817 | 0 | } |
4818 | | |
4819 | | // Adjust offset to be relative to the base type. |
4820 | 0 | const ASTRecordLayout &Layout = |
4821 | 0 | getContext().getASTRecordLayout(field->getParent()); |
4822 | 0 | unsigned CharWidth = getContext().getCharWidth(); |
4823 | 0 | if (FieldTBAAInfo.BaseType) |
4824 | 0 | FieldTBAAInfo.Offset += |
4825 | 0 | Layout.getFieldOffset(field->getFieldIndex()) / CharWidth; |
4826 | | |
4827 | | // Update the final access type and size. |
4828 | 0 | FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType); |
4829 | 0 | FieldTBAAInfo.Size = |
4830 | 0 | getContext().getTypeSizeInChars(FieldType).getQuantity(); |
4831 | 0 | } |
4832 | | |
4833 | 0 | Address addr = base.getAddress(*this); |
4834 | 0 | if (hasBPFPreserveStaticOffset(rec)) |
4835 | 0 | addr = wrapWithBPFPreserveStaticOffset(*this, addr); |
4836 | 0 | if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) { |
4837 | 0 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4838 | 0 | ClassDef->isDynamicClass()) { |
4839 | | // Getting to any field of dynamic object requires stripping dynamic |
4840 | | // information provided by invariant.group. This is because accessing |
4841 | | // fields may leak the real address of dynamic object, which could result |
4842 | | // in miscompilation when leaked pointer would be compared. |
4843 | 0 | auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer()); |
4844 | 0 | addr = Address(stripped, addr.getElementType(), addr.getAlignment()); |
4845 | 0 | } |
4846 | 0 | } |
4847 | |
|
4848 | 0 | unsigned RecordCVR = base.getVRQualifiers(); |
4849 | 0 | if (rec->isUnion()) { |
4850 | | // For unions, there is no pointer adjustment. |
4851 | 0 | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4852 | 0 | hasAnyVptr(FieldType, getContext())) |
4853 | | // Because unions can easily skip invariant.barriers, we need to add |
4854 | | // a barrier every time CXXRecord field with vptr is referenced. |
4855 | 0 | addr = Builder.CreateLaunderInvariantGroup(addr); |
4856 | |
|
4857 | 0 | if (IsInPreservedAIRegion || |
4858 | 0 | (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) { |
4859 | | // Remember the original union field index |
4860 | 0 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(), |
4861 | 0 | rec->getLocation()); |
4862 | 0 | addr = Address( |
4863 | 0 | Builder.CreatePreserveUnionAccessIndex( |
4864 | 0 | addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo), |
4865 | 0 | addr.getElementType(), addr.getAlignment()); |
4866 | 0 | } |
4867 | |
|
4868 | 0 | if (FieldType->isReferenceType()) |
4869 | 0 | addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType)); |
4870 | 0 | } else { |
4871 | 0 | if (!IsInPreservedAIRegion && |
4872 | 0 | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) |
4873 | | // For structs, we GEP to the field that the record layout suggests. |
4874 | 0 | addr = emitAddrOfFieldStorage(*this, addr, field); |
4875 | 0 | else |
4876 | | // Remember the original struct field index |
4877 | 0 | addr = emitPreserveStructAccess(*this, base, addr, field); |
4878 | 0 | } |
4879 | | |
4880 | | // If this is a reference field, load the reference right now. |
4881 | 0 | if (FieldType->isReferenceType()) { |
4882 | 0 | LValue RefLVal = |
4883 | 0 | MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); |
4884 | 0 | if (RecordCVR & Qualifiers::Volatile) |
4885 | 0 | RefLVal.getQuals().addVolatile(); |
4886 | 0 | addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo); |
4887 | | |
4888 | | // Qualifiers on the struct don't apply to the referencee. |
4889 | 0 | RecordCVR = 0; |
4890 | 0 | FieldType = FieldType->getPointeeType(); |
4891 | 0 | } |
4892 | | |
4893 | | // Make sure that the address is pointing to the right type. This is critical |
4894 | | // for both unions and structs. |
4895 | 0 | addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType)); |
4896 | |
|
4897 | 0 | if (field->hasAttr<AnnotateAttr>()) |
4898 | 0 | addr = EmitFieldAnnotations(field, addr); |
4899 | |
|
4900 | 0 | LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); |
4901 | 0 | LV.getQuals().addCVRQualifiers(RecordCVR); |
4902 | | |
4903 | | // __weak attribute on a field is ignored. |
4904 | 0 | if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) |
4905 | 0 | LV.getQuals().removeObjCGCAttr(); |
4906 | |
|
4907 | 0 | return LV; |
4908 | 0 | } |
4909 | | |
4910 | | LValue |
4911 | | CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, |
4912 | 0 | const FieldDecl *Field) { |
4913 | 0 | QualType FieldType = Field->getType(); |
4914 | |
|
4915 | 0 | if (!FieldType->isReferenceType()) |
4916 | 0 | return EmitLValueForField(Base, Field); |
4917 | | |
4918 | 0 | Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field); |
4919 | | |
4920 | | // Make sure that the address is pointing to the right type. |
4921 | 0 | llvm::Type *llvmType = ConvertTypeForMem(FieldType); |
4922 | 0 | V = V.withElementType(llvmType); |
4923 | | |
4924 | | // TODO: Generate TBAA information that describes this access as a structure |
4925 | | // member access and not just an access to an object of the field's type. This |
4926 | | // should be similar to what we do in EmitLValueForField(). |
4927 | 0 | LValueBaseInfo BaseInfo = Base.getBaseInfo(); |
4928 | 0 | AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); |
4929 | 0 | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); |
4930 | 0 | return MakeAddrLValue(V, FieldType, FieldBaseInfo, |
4931 | 0 | CGM.getTBAAInfoForSubobject(Base, FieldType)); |
4932 | 0 | } |
4933 | | |
4934 | 0 | LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ |
4935 | 0 | if (E->isFileScope()) { |
4936 | 0 | ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); |
4937 | 0 | return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); |
4938 | 0 | } |
4939 | 0 | if (E->getType()->isVariablyModifiedType()) |
4940 | | // make sure to emit the VLA size. |
4941 | 0 | EmitVariablyModifiedType(E->getType()); |
4942 | |
|
4943 | 0 | Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); |
4944 | 0 | const Expr *InitExpr = E->getInitializer(); |
4945 | 0 | LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); |
4946 | |
|
4947 | 0 | EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), |
4948 | 0 | /*Init*/ true); |
4949 | | |
4950 | | // Block-scope compound literals are destroyed at the end of the enclosing |
4951 | | // scope in C. |
4952 | 0 | if (!getLangOpts().CPlusPlus) |
4953 | 0 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
4954 | 0 | pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr, |
4955 | 0 | E->getType(), getDestroyer(DtorKind), |
4956 | 0 | DtorKind & EHCleanup); |
4957 | |
|
4958 | 0 | return Result; |
4959 | 0 | } |
4960 | | |
4961 | 0 | LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { |
4962 | 0 | if (!E->isGLValue()) |
4963 | | // Initializing an aggregate temporary in C++11: T{...}. |
4964 | 0 | return EmitAggExprToLValue(E); |
4965 | | |
4966 | | // An lvalue initializer list must be initializing a reference. |
4967 | 0 | assert(E->isTransparent() && "non-transparent glvalue init list"); |
4968 | 0 | return EmitLValue(E->getInit(0)); |
4969 | 0 | } |
4970 | | |
4971 | | /// Emit the operand of a glvalue conditional operator. This is either a glvalue |
4972 | | /// or a (possibly-parenthesized) throw-expression. If this is a throw, no |
4973 | | /// LValue is returned and the current block has been terminated. |
4974 | | static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, |
4975 | 0 | const Expr *Operand) { |
4976 | 0 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { |
4977 | 0 | CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); |
4978 | 0 | return std::nullopt; |
4979 | 0 | } |
4980 | | |
4981 | 0 | return CGF.EmitLValue(Operand); |
4982 | 0 | } |
4983 | | |
4984 | | namespace { |
4985 | | // Handle the case where the condition is a constant evaluatable simple integer, |
4986 | | // which means we don't have to separately handle the true/false blocks. |
4987 | | std::optional<LValue> HandleConditionalOperatorLValueSimpleCase( |
4988 | 0 | CodeGenFunction &CGF, const AbstractConditionalOperator *E) { |
4989 | 0 | const Expr *condExpr = E->getCond(); |
4990 | 0 | bool CondExprBool; |
4991 | 0 | if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { |
4992 | 0 | const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); |
4993 | 0 | if (!CondExprBool) |
4994 | 0 | std::swap(Live, Dead); |
4995 | |
|
4996 | 0 | if (!CGF.ContainsLabel(Dead)) { |
4997 | | // If the true case is live, we need to track its region. |
4998 | 0 | if (CondExprBool) |
4999 | 0 | CGF.incrementProfileCounter(E); |
5000 | | // If a throw expression we emit it and return an undefined lvalue |
5001 | | // because it can't be used. |
5002 | 0 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) { |
5003 | 0 | CGF.EmitCXXThrowExpr(ThrowExpr); |
5004 | 0 | llvm::Type *ElemTy = CGF.ConvertType(Dead->getType()); |
5005 | 0 | llvm::Type *Ty = CGF.UnqualPtrTy; |
5006 | 0 | return CGF.MakeAddrLValue( |
5007 | 0 | Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()), |
5008 | 0 | Dead->getType()); |
5009 | 0 | } |
5010 | 0 | return CGF.EmitLValue(Live); |
5011 | 0 | } |
5012 | 0 | } |
5013 | 0 | return std::nullopt; |
5014 | 0 | } |
5015 | | struct ConditionalInfo { |
5016 | | llvm::BasicBlock *lhsBlock, *rhsBlock; |
5017 | | std::optional<LValue> LHS, RHS; |
5018 | | }; |
5019 | | |
5020 | | // Create and generate the 3 blocks for a conditional operator. |
5021 | | // Leaves the 'current block' in the continuation basic block. |
5022 | | template<typename FuncTy> |
5023 | | ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF, |
5024 | | const AbstractConditionalOperator *E, |
5025 | 0 | const FuncTy &BranchGenFunc) { |
5026 | 0 | ConditionalInfo Info{CGF.createBasicBlock("cond.true"), |
5027 | 0 | CGF.createBasicBlock("cond.false"), std::nullopt, |
5028 | 0 | std::nullopt}; |
5029 | 0 | llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end"); |
5030 | |
|
5031 | 0 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
5032 | 0 | CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock, |
5033 | 0 | CGF.getProfileCount(E)); |
5034 | | |
5035 | | // Any temporaries created here are conditional. |
5036 | 0 | CGF.EmitBlock(Info.lhsBlock); |
5037 | 0 | CGF.incrementProfileCounter(E); |
5038 | 0 | eval.begin(CGF); |
5039 | 0 | Info.LHS = BranchGenFunc(CGF, E->getTrueExpr()); |
5040 | 0 | eval.end(CGF); |
5041 | 0 | Info.lhsBlock = CGF.Builder.GetInsertBlock(); |
5042 | |
|
5043 | 0 | if (Info.LHS) |
5044 | 0 | CGF.Builder.CreateBr(endBlock); |
5045 | | |
5046 | | // Any temporaries created here are conditional. |
5047 | 0 | CGF.EmitBlock(Info.rhsBlock); |
5048 | 0 | eval.begin(CGF); |
5049 | 0 | Info.RHS = BranchGenFunc(CGF, E->getFalseExpr()); |
5050 | 0 | eval.end(CGF); |
5051 | 0 | Info.rhsBlock = CGF.Builder.GetInsertBlock(); |
5052 | 0 | CGF.EmitBlock(endBlock); |
5053 | |
|
5054 | 0 | return Info; |
5055 | 0 | } Unexecuted instantiation: CGExpr.cpp:(anonymous namespace)::ConditionalInfo (anonymous namespace)::EmitConditionalBlocks<clang::CodeGen::CodeGenFunction::EmitIgnoredConditionalOperator(clang::AbstractConditionalOperator const*)::$_2>(clang::CodeGen::CodeGenFunction&, clang::AbstractConditionalOperator const*, clang::CodeGen::CodeGenFunction::EmitIgnoredConditionalOperator(clang::AbstractConditionalOperator const*)::$_2 const&) Unexecuted instantiation: CGExpr.cpp:(anonymous namespace)::ConditionalInfo (anonymous namespace)::EmitConditionalBlocks<clang::CodeGen::CodeGenFunction::EmitConditionalOperatorLValue(clang::AbstractConditionalOperator const*)::$_3>(clang::CodeGen::CodeGenFunction&, clang::AbstractConditionalOperator const*, clang::CodeGen::CodeGenFunction::EmitConditionalOperatorLValue(clang::AbstractConditionalOperator const*)::$_3 const&) |
5056 | | } // namespace |
5057 | | |
5058 | | void CodeGenFunction::EmitIgnoredConditionalOperator( |
5059 | 0 | const AbstractConditionalOperator *E) { |
5060 | 0 | if (!E->isGLValue()) { |
5061 | | // ?: here should be an aggregate. |
5062 | 0 | assert(hasAggregateEvaluationKind(E->getType()) && |
5063 | 0 | "Unexpected conditional operator!"); |
5064 | 0 | return (void)EmitAggExprToLValue(E); |
5065 | 0 | } |
5066 | | |
5067 | 0 | OpaqueValueMapping binding(*this, E); |
5068 | 0 | if (HandleConditionalOperatorLValueSimpleCase(*this, E)) |
5069 | 0 | return; |
5070 | | |
5071 | 0 | EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) { |
5072 | 0 | CGF.EmitIgnoredExpr(E); |
5073 | 0 | return LValue{}; |
5074 | 0 | }); |
5075 | 0 | } |
5076 | | LValue CodeGenFunction::EmitConditionalOperatorLValue( |
5077 | 0 | const AbstractConditionalOperator *expr) { |
5078 | 0 | if (!expr->isGLValue()) { |
5079 | | // ?: here should be an aggregate. |
5080 | 0 | assert(hasAggregateEvaluationKind(expr->getType()) && |
5081 | 0 | "Unexpected conditional operator!"); |
5082 | 0 | return EmitAggExprToLValue(expr); |
5083 | 0 | } |
5084 | | |
5085 | 0 | OpaqueValueMapping binding(*this, expr); |
5086 | 0 | if (std::optional<LValue> Res = |
5087 | 0 | HandleConditionalOperatorLValueSimpleCase(*this, expr)) |
5088 | 0 | return *Res; |
5089 | | |
5090 | 0 | ConditionalInfo Info = EmitConditionalBlocks( |
5091 | 0 | *this, expr, [](CodeGenFunction &CGF, const Expr *E) { |
5092 | 0 | return EmitLValueOrThrowExpression(CGF, E); |
5093 | 0 | }); |
5094 | |
|
5095 | 0 | if ((Info.LHS && !Info.LHS->isSimple()) || |
5096 | 0 | (Info.RHS && !Info.RHS->isSimple())) |
5097 | 0 | return EmitUnsupportedLValue(expr, "conditional operator"); |
5098 | | |
5099 | 0 | if (Info.LHS && Info.RHS) { |
5100 | 0 | Address lhsAddr = Info.LHS->getAddress(*this); |
5101 | 0 | Address rhsAddr = Info.RHS->getAddress(*this); |
5102 | 0 | llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue"); |
5103 | 0 | phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock); |
5104 | 0 | phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock); |
5105 | 0 | Address result(phi, lhsAddr.getElementType(), |
5106 | 0 | std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment())); |
5107 | 0 | AlignmentSource alignSource = |
5108 | 0 | std::max(Info.LHS->getBaseInfo().getAlignmentSource(), |
5109 | 0 | Info.RHS->getBaseInfo().getAlignmentSource()); |
5110 | 0 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( |
5111 | 0 | Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo()); |
5112 | 0 | return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), |
5113 | 0 | TBAAInfo); |
5114 | 0 | } else { |
5115 | 0 | assert((Info.LHS || Info.RHS) && |
5116 | 0 | "both operands of glvalue conditional are throw-expressions?"); |
5117 | 0 | return Info.LHS ? *Info.LHS : *Info.RHS; |
5118 | 0 | } |
5119 | 0 | } |
5120 | | |
5121 | | /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference |
5122 | | /// type. If the cast is to a reference, we can have the usual lvalue result, |
5123 | | /// otherwise if a cast is needed by the code generator in an lvalue context, |
5124 | | /// then it must mean that we need the address of an aggregate in order to |
5125 | | /// access one of its members. This can happen for all the reasons that casts |
5126 | | /// are permitted with aggregate result, including noop aggregate casts, and |
5127 | | /// cast from scalar to union. |
5128 | 0 | LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { |
5129 | 0 | switch (E->getCastKind()) { |
5130 | 0 | case CK_ToVoid: |
5131 | 0 | case CK_BitCast: |
5132 | 0 | case CK_LValueToRValueBitCast: |
5133 | 0 | case CK_ArrayToPointerDecay: |
5134 | 0 | case CK_FunctionToPointerDecay: |
5135 | 0 | case CK_NullToMemberPointer: |
5136 | 0 | case CK_NullToPointer: |
5137 | 0 | case CK_IntegralToPointer: |
5138 | 0 | case CK_PointerToIntegral: |
5139 | 0 | case CK_PointerToBoolean: |
5140 | 0 | case CK_IntegralCast: |
5141 | 0 | case CK_BooleanToSignedIntegral: |
5142 | 0 | case CK_IntegralToBoolean: |
5143 | 0 | case CK_IntegralToFloating: |
5144 | 0 | case CK_FloatingToIntegral: |
5145 | 0 | case CK_FloatingToBoolean: |
5146 | 0 | case CK_FloatingCast: |
5147 | 0 | case CK_FloatingRealToComplex: |
5148 | 0 | case CK_FloatingComplexToReal: |
5149 | 0 | case CK_FloatingComplexToBoolean: |
5150 | 0 | case CK_FloatingComplexCast: |
5151 | 0 | case CK_FloatingComplexToIntegralComplex: |
5152 | 0 | case CK_IntegralRealToComplex: |
5153 | 0 | case CK_IntegralComplexToReal: |
5154 | 0 | case CK_IntegralComplexToBoolean: |
5155 | 0 | case CK_IntegralComplexCast: |
5156 | 0 | case CK_IntegralComplexToFloatingComplex: |
5157 | 0 | case CK_DerivedToBaseMemberPointer: |
5158 | 0 | case CK_BaseToDerivedMemberPointer: |
5159 | 0 | case CK_MemberPointerToBoolean: |
5160 | 0 | case CK_ReinterpretMemberPointer: |
5161 | 0 | case CK_AnyPointerToBlockPointerCast: |
5162 | 0 | case CK_ARCProduceObject: |
5163 | 0 | case CK_ARCConsumeObject: |
5164 | 0 | case CK_ARCReclaimReturnedObject: |
5165 | 0 | case CK_ARCExtendBlockObject: |
5166 | 0 | case CK_CopyAndAutoreleaseBlockObject: |
5167 | 0 | case CK_IntToOCLSampler: |
5168 | 0 | case CK_FloatingToFixedPoint: |
5169 | 0 | case CK_FixedPointToFloating: |
5170 | 0 | case CK_FixedPointCast: |
5171 | 0 | case CK_FixedPointToBoolean: |
5172 | 0 | case CK_FixedPointToIntegral: |
5173 | 0 | case CK_IntegralToFixedPoint: |
5174 | 0 | case CK_MatrixCast: |
5175 | 0 | return EmitUnsupportedLValue(E, "unexpected cast lvalue"); |
5176 | | |
5177 | 0 | case CK_Dependent: |
5178 | 0 | llvm_unreachable("dependent cast kind in IR gen!"); |
5179 | |
|
5180 | 0 | case CK_BuiltinFnToFnPtr: |
5181 | 0 | llvm_unreachable("builtin functions are handled elsewhere"); |
5182 | | |
5183 | | // These are never l-values; just use the aggregate emission code. |
5184 | 0 | case CK_NonAtomicToAtomic: |
5185 | 0 | case CK_AtomicToNonAtomic: |
5186 | 0 | return EmitAggExprToLValue(E); |
5187 | | |
5188 | 0 | case CK_Dynamic: { |
5189 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
5190 | 0 | Address V = LV.getAddress(*this); |
5191 | 0 | const auto *DCE = cast<CXXDynamicCastExpr>(E); |
5192 | 0 | return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); |
5193 | 0 | } |
5194 | | |
5195 | 0 | case CK_ConstructorConversion: |
5196 | 0 | case CK_UserDefinedConversion: |
5197 | 0 | case CK_CPointerToObjCPointerCast: |
5198 | 0 | case CK_BlockPointerToObjCPointerCast: |
5199 | 0 | case CK_LValueToRValue: |
5200 | 0 | return EmitLValue(E->getSubExpr()); |
5201 | | |
5202 | 0 | case CK_NoOp: { |
5203 | | // CK_NoOp can model a qualification conversion, which can remove an array |
5204 | | // bound and change the IR type. |
5205 | | // FIXME: Once pointee types are removed from IR, remove this. |
5206 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
5207 | | // Propagate the volatile qualifer to LValue, if exist in E. |
5208 | 0 | if (E->changesVolatileQualification()) |
5209 | 0 | LV.getQuals() = E->getType().getQualifiers(); |
5210 | 0 | if (LV.isSimple()) { |
5211 | 0 | Address V = LV.getAddress(*this); |
5212 | 0 | if (V.isValid()) { |
5213 | 0 | llvm::Type *T = ConvertTypeForMem(E->getType()); |
5214 | 0 | if (V.getElementType() != T) |
5215 | 0 | LV.setAddress(V.withElementType(T)); |
5216 | 0 | } |
5217 | 0 | } |
5218 | 0 | return LV; |
5219 | 0 | } |
5220 | | |
5221 | 0 | case CK_UncheckedDerivedToBase: |
5222 | 0 | case CK_DerivedToBase: { |
5223 | 0 | const auto *DerivedClassTy = |
5224 | 0 | E->getSubExpr()->getType()->castAs<RecordType>(); |
5225 | 0 | auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); |
5226 | |
|
5227 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
5228 | 0 | Address This = LV.getAddress(*this); |
5229 | | |
5230 | | // Perform the derived-to-base conversion |
5231 | 0 | Address Base = GetAddressOfBaseClass( |
5232 | 0 | This, DerivedClassDecl, E->path_begin(), E->path_end(), |
5233 | 0 | /*NullCheckValue=*/false, E->getExprLoc()); |
5234 | | |
5235 | | // TODO: Support accesses to members of base classes in TBAA. For now, we |
5236 | | // conservatively pretend that the complete object is of the base class |
5237 | | // type. |
5238 | 0 | return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(), |
5239 | 0 | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
5240 | 0 | } |
5241 | 0 | case CK_ToUnion: |
5242 | 0 | return EmitAggExprToLValue(E); |
5243 | 0 | case CK_BaseToDerived: { |
5244 | 0 | const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); |
5245 | 0 | auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); |
5246 | |
|
5247 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
5248 | | |
5249 | | // Perform the base-to-derived conversion |
5250 | 0 | Address Derived = GetAddressOfDerivedClass( |
5251 | 0 | LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(), |
5252 | 0 | /*NullCheckValue=*/false); |
5253 | | |
5254 | | // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is |
5255 | | // performed and the object is not of the derived type. |
5256 | 0 | if (sanitizePerformTypeCheck()) |
5257 | 0 | EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), |
5258 | 0 | Derived.getPointer(), E->getType()); |
5259 | |
|
5260 | 0 | if (SanOpts.has(SanitizerKind::CFIDerivedCast)) |
5261 | 0 | EmitVTablePtrCheckForCast(E->getType(), Derived, |
5262 | 0 | /*MayBeNull=*/false, CFITCK_DerivedCast, |
5263 | 0 | E->getBeginLoc()); |
5264 | |
|
5265 | 0 | return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(), |
5266 | 0 | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
5267 | 0 | } |
5268 | 0 | case CK_LValueBitCast: { |
5269 | | // This must be a reinterpret_cast (or c-style equivalent). |
5270 | 0 | const auto *CE = cast<ExplicitCastExpr>(E); |
5271 | |
|
5272 | 0 | CGM.EmitExplicitCastExprType(CE, this); |
5273 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
5274 | 0 | Address V = LV.getAddress(*this).withElementType( |
5275 | 0 | ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType())); |
5276 | |
|
5277 | 0 | if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) |
5278 | 0 | EmitVTablePtrCheckForCast(E->getType(), V, |
5279 | 0 | /*MayBeNull=*/false, CFITCK_UnrelatedCast, |
5280 | 0 | E->getBeginLoc()); |
5281 | |
|
5282 | 0 | return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), |
5283 | 0 | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
5284 | 0 | } |
5285 | 0 | case CK_AddressSpaceConversion: { |
5286 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
5287 | 0 | QualType DestTy = getContext().getPointerType(E->getType()); |
5288 | 0 | llvm::Value *V = getTargetHooks().performAddrSpaceCast( |
5289 | 0 | *this, LV.getPointer(*this), |
5290 | 0 | E->getSubExpr()->getType().getAddressSpace(), |
5291 | 0 | E->getType().getAddressSpace(), ConvertType(DestTy)); |
5292 | 0 | return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()), |
5293 | 0 | LV.getAddress(*this).getAlignment()), |
5294 | 0 | E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); |
5295 | 0 | } |
5296 | 0 | case CK_ObjCObjectLValueCast: { |
5297 | 0 | LValue LV = EmitLValue(E->getSubExpr()); |
5298 | 0 | Address V = LV.getAddress(*this).withElementType(ConvertType(E->getType())); |
5299 | 0 | return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), |
5300 | 0 | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
5301 | 0 | } |
5302 | 0 | case CK_ZeroToOCLOpaqueType: |
5303 | 0 | llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); |
5304 | |
|
5305 | 0 | case CK_VectorSplat: { |
5306 | | // LValue results of vector splats are only supported in HLSL. |
5307 | 0 | if (!getLangOpts().HLSL) |
5308 | 0 | return EmitUnsupportedLValue(E, "unexpected cast lvalue"); |
5309 | 0 | return EmitLValue(E->getSubExpr()); |
5310 | 0 | } |
5311 | 0 | } |
5312 | | |
5313 | 0 | llvm_unreachable("Unhandled lvalue cast kind?"); |
5314 | 0 | } |
5315 | | |
5316 | 0 | LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { |
5317 | 0 | assert(OpaqueValueMappingData::shouldBindAsLValue(e)); |
5318 | 0 | return getOrCreateOpaqueLValueMapping(e); |
5319 | 0 | } |
5320 | | |
5321 | | LValue |
5322 | 0 | CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { |
5323 | 0 | assert(OpaqueValueMapping::shouldBindAsLValue(e)); |
5324 | | |
5325 | 0 | llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator |
5326 | 0 | it = OpaqueLValues.find(e); |
5327 | |
|
5328 | 0 | if (it != OpaqueLValues.end()) |
5329 | 0 | return it->second; |
5330 | | |
5331 | 0 | assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); |
5332 | 0 | return EmitLValue(e->getSourceExpr()); |
5333 | 0 | } |
5334 | | |
5335 | | RValue |
5336 | 0 | CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { |
5337 | 0 | assert(!OpaqueValueMapping::shouldBindAsLValue(e)); |
5338 | | |
5339 | 0 | llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator |
5340 | 0 | it = OpaqueRValues.find(e); |
5341 | |
|
5342 | 0 | if (it != OpaqueRValues.end()) |
5343 | 0 | return it->second; |
5344 | | |
5345 | 0 | assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); |
5346 | 0 | return EmitAnyExpr(e->getSourceExpr()); |
5347 | 0 | } |
5348 | | |
5349 | | RValue CodeGenFunction::EmitRValueForField(LValue LV, |
5350 | | const FieldDecl *FD, |
5351 | 0 | SourceLocation Loc) { |
5352 | 0 | QualType FT = FD->getType(); |
5353 | 0 | LValue FieldLV = EmitLValueForField(LV, FD); |
5354 | 0 | switch (getEvaluationKind(FT)) { |
5355 | 0 | case TEK_Complex: |
5356 | 0 | return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); |
5357 | 0 | case TEK_Aggregate: |
5358 | 0 | return FieldLV.asAggregateRValue(*this); |
5359 | 0 | case TEK_Scalar: |
5360 | | // This routine is used to load fields one-by-one to perform a copy, so |
5361 | | // don't load reference fields. |
5362 | 0 | if (FD->getType()->isReferenceType()) |
5363 | 0 | return RValue::get(FieldLV.getPointer(*this)); |
5364 | | // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a |
5365 | | // primitive load. |
5366 | 0 | if (FieldLV.isBitField()) |
5367 | 0 | return EmitLoadOfLValue(FieldLV, Loc); |
5368 | 0 | return RValue::get(EmitLoadOfScalar(FieldLV, Loc)); |
5369 | 0 | } |
5370 | 0 | llvm_unreachable("bad evaluation kind"); |
5371 | 0 | } |
5372 | | |
5373 | | //===--------------------------------------------------------------------===// |
5374 | | // Expression Emission |
5375 | | //===--------------------------------------------------------------------===// |
5376 | | |
5377 | | RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, |
5378 | 0 | ReturnValueSlot ReturnValue) { |
5379 | | // Builtins never have block type. |
5380 | 0 | if (E->getCallee()->getType()->isBlockPointerType()) |
5381 | 0 | return EmitBlockCallExpr(E, ReturnValue); |
5382 | | |
5383 | 0 | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) |
5384 | 0 | return EmitCXXMemberCallExpr(CE, ReturnValue); |
5385 | | |
5386 | 0 | if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) |
5387 | 0 | return EmitCUDAKernelCallExpr(CE, ReturnValue); |
5388 | | |
5389 | | // A CXXOperatorCallExpr is created even for explicit object methods, but |
5390 | | // these should be treated like static function call. |
5391 | 0 | if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) |
5392 | 0 | if (const auto *MD = |
5393 | 0 | dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl()); |
5394 | 0 | MD && MD->isImplicitObjectMemberFunction()) |
5395 | 0 | return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); |
5396 | | |
5397 | 0 | CGCallee callee = EmitCallee(E->getCallee()); |
5398 | |
|
5399 | 0 | if (callee.isBuiltin()) { |
5400 | 0 | return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), |
5401 | 0 | E, ReturnValue); |
5402 | 0 | } |
5403 | | |
5404 | 0 | if (callee.isPseudoDestructor()) { |
5405 | 0 | return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr()); |
5406 | 0 | } |
5407 | | |
5408 | 0 | return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue); |
5409 | 0 | } |
5410 | | |
5411 | | /// Emit a CallExpr without considering whether it might be a subclass. |
5412 | | RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, |
5413 | 0 | ReturnValueSlot ReturnValue) { |
5414 | 0 | CGCallee Callee = EmitCallee(E->getCallee()); |
5415 | 0 | return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue); |
5416 | 0 | } |
5417 | | |
5418 | | // Detect the unusual situation where an inline version is shadowed by a |
5419 | | // non-inline version. In that case we should pick the external one |
5420 | | // everywhere. That's GCC behavior too. |
5421 | 0 | static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { |
5422 | 0 | for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) |
5423 | 0 | if (!PD->isInlineBuiltinDeclaration()) |
5424 | 0 | return false; |
5425 | 0 | return true; |
5426 | 0 | } |
5427 | | |
5428 | 0 | static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { |
5429 | 0 | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
5430 | |
|
5431 | 0 | if (auto builtinID = FD->getBuiltinID()) { |
5432 | 0 | std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); |
5433 | 0 | std::string NoBuiltins = "no-builtins"; |
5434 | |
|
5435 | 0 | StringRef Ident = CGF.CGM.getMangledName(GD); |
5436 | 0 | std::string FDInlineName = (Ident + ".inline").str(); |
5437 | |
|
5438 | 0 | bool IsPredefinedLibFunction = |
5439 | 0 | CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID); |
5440 | 0 | bool HasAttributeNoBuiltin = |
5441 | 0 | CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) || |
5442 | 0 | CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins); |
5443 | | |
5444 | | // When directing calling an inline builtin, call it through it's mangled |
5445 | | // name to make it clear it's not the actual builtin. |
5446 | 0 | if (CGF.CurFn->getName() != FDInlineName && |
5447 | 0 | OnlyHasInlineBuiltinDeclaration(FD)) { |
5448 | 0 | llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD); |
5449 | 0 | llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr); |
5450 | 0 | llvm::Module *M = Fn->getParent(); |
5451 | 0 | llvm::Function *Clone = M->getFunction(FDInlineName); |
5452 | 0 | if (!Clone) { |
5453 | 0 | Clone = llvm::Function::Create(Fn->getFunctionType(), |
5454 | 0 | llvm::GlobalValue::InternalLinkage, |
5455 | 0 | Fn->getAddressSpace(), FDInlineName, M); |
5456 | 0 | Clone->addFnAttr(llvm::Attribute::AlwaysInline); |
5457 | 0 | } |
5458 | 0 | return CGCallee::forDirect(Clone, GD); |
5459 | 0 | } |
5460 | | |
5461 | | // Replaceable builtins provide their own implementation of a builtin. If we |
5462 | | // are in an inline builtin implementation, avoid trivial infinite |
5463 | | // recursion. Honor __attribute__((no_builtin("foo"))) or |
5464 | | // __attribute__((no_builtin)) on the current function unless foo is |
5465 | | // not a predefined library function which means we must generate the |
5466 | | // builtin no matter what. |
5467 | 0 | else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) |
5468 | 0 | return CGCallee::forBuiltin(builtinID, FD); |
5469 | 0 | } |
5470 | | |
5471 | 0 | llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD); |
5472 | 0 | if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice && |
5473 | 0 | FD->hasAttr<CUDAGlobalAttr>()) |
5474 | 0 | CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub( |
5475 | 0 | cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts())); |
5476 | |
|
5477 | 0 | return CGCallee::forDirect(CalleePtr, GD); |
5478 | 0 | } |
5479 | | |
5480 | 0 | CGCallee CodeGenFunction::EmitCallee(const Expr *E) { |
5481 | 0 | E = E->IgnoreParens(); |
5482 | | |
5483 | | // Look through function-to-pointer decay. |
5484 | 0 | if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) { |
5485 | 0 | if (ICE->getCastKind() == CK_FunctionToPointerDecay || |
5486 | 0 | ICE->getCastKind() == CK_BuiltinFnToFnPtr) { |
5487 | 0 | return EmitCallee(ICE->getSubExpr()); |
5488 | 0 | } |
5489 | | |
5490 | | // Resolve direct calls. |
5491 | 0 | } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) { |
5492 | 0 | if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) { |
5493 | 0 | return EmitDirectCallee(*this, FD); |
5494 | 0 | } |
5495 | 0 | } else if (auto ME = dyn_cast<MemberExpr>(E)) { |
5496 | 0 | if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) { |
5497 | 0 | EmitIgnoredExpr(ME->getBase()); |
5498 | 0 | return EmitDirectCallee(*this, FD); |
5499 | 0 | } |
5500 | | |
5501 | | // Look through template substitutions. |
5502 | 0 | } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { |
5503 | 0 | return EmitCallee(NTTP->getReplacement()); |
5504 | | |
5505 | | // Treat pseudo-destructor calls differently. |
5506 | 0 | } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) { |
5507 | 0 | return CGCallee::forPseudoDestructor(PDE); |
5508 | 0 | } |
5509 | | |
5510 | | // Otherwise, we have an indirect reference. |
5511 | 0 | llvm::Value *calleePtr; |
5512 | 0 | QualType functionType; |
5513 | 0 | if (auto ptrType = E->getType()->getAs<PointerType>()) { |
5514 | 0 | calleePtr = EmitScalarExpr(E); |
5515 | 0 | functionType = ptrType->getPointeeType(); |
5516 | 0 | } else { |
5517 | 0 | functionType = E->getType(); |
5518 | 0 | calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this); |
5519 | 0 | } |
5520 | 0 | assert(functionType->isFunctionType()); |
5521 | | |
5522 | 0 | GlobalDecl GD; |
5523 | 0 | if (const auto *VD = |
5524 | 0 | dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) |
5525 | 0 | GD = GlobalDecl(VD); |
5526 | |
|
5527 | 0 | CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); |
5528 | 0 | CGCallee callee(calleeInfo, calleePtr); |
5529 | 0 | return callee; |
5530 | 0 | } |
5531 | | |
5532 | 0 | LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { |
5533 | | // Comma expressions just emit their LHS then their RHS as an l-value. |
5534 | 0 | if (E->getOpcode() == BO_Comma) { |
5535 | 0 | EmitIgnoredExpr(E->getLHS()); |
5536 | 0 | EnsureInsertPoint(); |
5537 | 0 | return EmitLValue(E->getRHS()); |
5538 | 0 | } |
5539 | | |
5540 | 0 | if (E->getOpcode() == BO_PtrMemD || |
5541 | 0 | E->getOpcode() == BO_PtrMemI) |
5542 | 0 | return EmitPointerToDataMemberBinaryExpr(E); |
5543 | | |
5544 | 0 | assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); |
5545 | | |
5546 | | // Note that in all of these cases, __block variables need the RHS |
5547 | | // evaluated first just in case the variable gets moved by the RHS. |
5548 | | |
5549 | 0 | switch (getEvaluationKind(E->getType())) { |
5550 | 0 | case TEK_Scalar: { |
5551 | 0 | switch (E->getLHS()->getType().getObjCLifetime()) { |
5552 | 0 | case Qualifiers::OCL_Strong: |
5553 | 0 | return EmitARCStoreStrong(E, /*ignored*/ false).first; |
5554 | | |
5555 | 0 | case Qualifiers::OCL_Autoreleasing: |
5556 | 0 | return EmitARCStoreAutoreleasing(E).first; |
5557 | | |
5558 | | // No reason to do any of these differently. |
5559 | 0 | case Qualifiers::OCL_None: |
5560 | 0 | case Qualifiers::OCL_ExplicitNone: |
5561 | 0 | case Qualifiers::OCL_Weak: |
5562 | 0 | break; |
5563 | 0 | } |
5564 | | |
5565 | 0 | RValue RV = EmitAnyExpr(E->getRHS()); |
5566 | 0 | LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); |
5567 | 0 | if (RV.isScalar()) |
5568 | 0 | EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc()); |
5569 | 0 | EmitStoreThroughLValue(RV, LV); |
5570 | 0 | if (getLangOpts().OpenMP) |
5571 | 0 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, |
5572 | 0 | E->getLHS()); |
5573 | 0 | return LV; |
5574 | 0 | } |
5575 | | |
5576 | 0 | case TEK_Complex: |
5577 | 0 | return EmitComplexAssignmentLValue(E); |
5578 | | |
5579 | 0 | case TEK_Aggregate: |
5580 | 0 | return EmitAggExprToLValue(E); |
5581 | 0 | } |
5582 | 0 | llvm_unreachable("bad evaluation kind"); |
5583 | 0 | } |
5584 | | |
5585 | 0 | LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { |
5586 | 0 | RValue RV = EmitCallExpr(E); |
5587 | |
|
5588 | 0 | if (!RV.isScalar()) |
5589 | 0 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5590 | 0 | AlignmentSource::Decl); |
5591 | | |
5592 | 0 | assert(E->getCallReturnType(getContext())->isReferenceType() && |
5593 | 0 | "Can't have a scalar return unless the return type is a " |
5594 | 0 | "reference type!"); |
5595 | | |
5596 | 0 | return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); |
5597 | 0 | } |
5598 | | |
5599 | 0 | LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { |
5600 | | // FIXME: This shouldn't require another copy. |
5601 | 0 | return EmitAggExprToLValue(E); |
5602 | 0 | } |
5603 | | |
5604 | 0 | LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { |
5605 | 0 | assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() |
5606 | 0 | && "binding l-value to type which needs a temporary"); |
5607 | 0 | AggValueSlot Slot = CreateAggTemp(E->getType()); |
5608 | 0 | EmitCXXConstructExpr(E, Slot); |
5609 | 0 | return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); |
5610 | 0 | } |
5611 | | |
5612 | | LValue |
5613 | 0 | CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { |
5614 | 0 | return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType()); |
5615 | 0 | } |
5616 | | |
5617 | 0 | Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { |
5618 | 0 | return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()) |
5619 | 0 | .withElementType(ConvertType(E->getType())); |
5620 | 0 | } |
5621 | | |
5622 | 0 | LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { |
5623 | 0 | return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), |
5624 | 0 | AlignmentSource::Decl); |
5625 | 0 | } |
5626 | | |
5627 | | LValue |
5628 | 0 | CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { |
5629 | 0 | AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); |
5630 | 0 | Slot.setExternallyDestructed(); |
5631 | 0 | EmitAggExpr(E->getSubExpr(), Slot); |
5632 | 0 | EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress()); |
5633 | 0 | return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); |
5634 | 0 | } |
5635 | | |
5636 | 0 | LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { |
5637 | 0 | RValue RV = EmitObjCMessageExpr(E); |
5638 | |
|
5639 | 0 | if (!RV.isScalar()) |
5640 | 0 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5641 | 0 | AlignmentSource::Decl); |
5642 | | |
5643 | 0 | assert(E->getMethodDecl()->getReturnType()->isReferenceType() && |
5644 | 0 | "Can't have a scalar return unless the return type is a " |
5645 | 0 | "reference type!"); |
5646 | | |
5647 | 0 | return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); |
5648 | 0 | } |
5649 | | |
5650 | 0 | LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { |
5651 | 0 | Address V = |
5652 | 0 | CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector()); |
5653 | 0 | return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); |
5654 | 0 | } |
5655 | | |
5656 | | llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, |
5657 | 0 | const ObjCIvarDecl *Ivar) { |
5658 | 0 | return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); |
5659 | 0 | } |
5660 | | |
5661 | | llvm::Value * |
5662 | | CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, |
5663 | 0 | const ObjCIvarDecl *Ivar) { |
5664 | 0 | llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar); |
5665 | 0 | QualType PointerDiffType = getContext().getPointerDiffType(); |
5666 | 0 | return Builder.CreateZExtOrTrunc(OffsetValue, |
5667 | 0 | getTypes().ConvertType(PointerDiffType)); |
5668 | 0 | } |
5669 | | |
5670 | | LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, |
5671 | | llvm::Value *BaseValue, |
5672 | | const ObjCIvarDecl *Ivar, |
5673 | 0 | unsigned CVRQualifiers) { |
5674 | 0 | return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, |
5675 | 0 | Ivar, CVRQualifiers); |
5676 | 0 | } |
5677 | | |
5678 | 0 | LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { |
5679 | | // FIXME: A lot of the code below could be shared with EmitMemberExpr. |
5680 | 0 | llvm::Value *BaseValue = nullptr; |
5681 | 0 | const Expr *BaseExpr = E->getBase(); |
5682 | 0 | Qualifiers BaseQuals; |
5683 | 0 | QualType ObjectTy; |
5684 | 0 | if (E->isArrow()) { |
5685 | 0 | BaseValue = EmitScalarExpr(BaseExpr); |
5686 | 0 | ObjectTy = BaseExpr->getType()->getPointeeType(); |
5687 | 0 | BaseQuals = ObjectTy.getQualifiers(); |
5688 | 0 | } else { |
5689 | 0 | LValue BaseLV = EmitLValue(BaseExpr); |
5690 | 0 | BaseValue = BaseLV.getPointer(*this); |
5691 | 0 | ObjectTy = BaseExpr->getType(); |
5692 | 0 | BaseQuals = ObjectTy.getQualifiers(); |
5693 | 0 | } |
5694 | |
|
5695 | 0 | LValue LV = |
5696 | 0 | EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), |
5697 | 0 | BaseQuals.getCVRQualifiers()); |
5698 | 0 | setObjCGCLValueClass(getContext(), E, LV); |
5699 | 0 | return LV; |
5700 | 0 | } |
5701 | | |
5702 | 0 | LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { |
5703 | | // Can only get l-value for message expression returning aggregate type |
5704 | 0 | RValue RV = EmitAnyExprToTemp(E); |
5705 | 0 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5706 | 0 | AlignmentSource::Decl); |
5707 | 0 | } |
5708 | | |
5709 | | RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, |
5710 | | const CallExpr *E, ReturnValueSlot ReturnValue, |
5711 | 0 | llvm::Value *Chain) { |
5712 | | // Get the actual function type. The callee type will always be a pointer to |
5713 | | // function type or a block pointer type. |
5714 | 0 | assert(CalleeType->isFunctionPointerType() && |
5715 | 0 | "Call must have function pointer type!"); |
5716 | | |
5717 | 0 | const Decl *TargetDecl = |
5718 | 0 | OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); |
5719 | |
|
5720 | 0 | assert((!isa_and_present<FunctionDecl>(TargetDecl) || |
5721 | 0 | !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) && |
5722 | 0 | "trying to emit a call to an immediate function"); |
5723 | | |
5724 | 0 | CalleeType = getContext().getCanonicalType(CalleeType); |
5725 | |
|
5726 | 0 | auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType(); |
5727 | |
|
5728 | 0 | CGCallee Callee = OrigCallee; |
5729 | |
|
5730 | 0 | if (SanOpts.has(SanitizerKind::Function) && |
5731 | 0 | (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && |
5732 | 0 | !isa<FunctionNoProtoType>(PointeeType)) { |
5733 | 0 | if (llvm::Constant *PrefixSig = |
5734 | 0 | CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { |
5735 | 0 | SanitizerScope SanScope(this); |
5736 | 0 | auto *TypeHash = getUBSanFunctionTypeHash(PointeeType); |
5737 | |
|
5738 | 0 | llvm::Type *PrefixSigType = PrefixSig->getType(); |
5739 | 0 | llvm::StructType *PrefixStructTy = llvm::StructType::get( |
5740 | 0 | CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true); |
5741 | |
|
5742 | 0 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5743 | | |
5744 | | // On 32-bit Arm, the low bit of a function pointer indicates whether |
5745 | | // it's using the Arm or Thumb instruction set. The actual first |
5746 | | // instruction lives at the same address either way, so we must clear |
5747 | | // that low bit before using the function address to find the prefix |
5748 | | // structure. |
5749 | | // |
5750 | | // This applies to both Arm and Thumb target triples, because |
5751 | | // either one could be used in an interworking context where it |
5752 | | // might be passed function pointers of both types. |
5753 | 0 | llvm::Value *AlignedCalleePtr; |
5754 | 0 | if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) { |
5755 | 0 | llvm::Value *CalleeAddress = |
5756 | 0 | Builder.CreatePtrToInt(CalleePtr, IntPtrTy); |
5757 | 0 | llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1); |
5758 | 0 | llvm::Value *AlignedCalleeAddress = |
5759 | 0 | Builder.CreateAnd(CalleeAddress, Mask); |
5760 | 0 | AlignedCalleePtr = |
5761 | 0 | Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType()); |
5762 | 0 | } else { |
5763 | 0 | AlignedCalleePtr = CalleePtr; |
5764 | 0 | } |
5765 | |
|
5766 | 0 | llvm::Value *CalleePrefixStruct = AlignedCalleePtr; |
5767 | 0 | llvm::Value *CalleeSigPtr = |
5768 | 0 | Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0); |
5769 | 0 | llvm::Value *CalleeSig = |
5770 | 0 | Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign()); |
5771 | 0 | llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); |
5772 | |
|
5773 | 0 | llvm::BasicBlock *Cont = createBasicBlock("cont"); |
5774 | 0 | llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); |
5775 | 0 | Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); |
5776 | |
|
5777 | 0 | EmitBlock(TypeCheck); |
5778 | 0 | llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad( |
5779 | 0 | Int32Ty, |
5780 | 0 | Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1), |
5781 | 0 | getPointerAlign()); |
5782 | 0 | llvm::Value *CalleeTypeHashMatch = |
5783 | 0 | Builder.CreateICmpEQ(CalleeTypeHash, TypeHash); |
5784 | 0 | llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()), |
5785 | 0 | EmitCheckTypeDescriptor(CalleeType)}; |
5786 | 0 | EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function), |
5787 | 0 | SanitizerHandler::FunctionTypeMismatch, StaticData, |
5788 | 0 | {CalleePtr}); |
5789 | |
|
5790 | 0 | Builder.CreateBr(Cont); |
5791 | 0 | EmitBlock(Cont); |
5792 | 0 | } |
5793 | 0 | } |
5794 | |
|
5795 | 0 | const auto *FnType = cast<FunctionType>(PointeeType); |
5796 | | |
5797 | | // If we are checking indirect calls and this call is indirect, check that the |
5798 | | // function pointer is a member of the bit set for the function type. |
5799 | 0 | if (SanOpts.has(SanitizerKind::CFIICall) && |
5800 | 0 | (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { |
5801 | 0 | SanitizerScope SanScope(this); |
5802 | 0 | EmitSanitizerStatReport(llvm::SanStat_CFI_ICall); |
5803 | |
|
5804 | 0 | llvm::Metadata *MD; |
5805 | 0 | if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) |
5806 | 0 | MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0)); |
5807 | 0 | else |
5808 | 0 | MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0)); |
5809 | |
|
5810 | 0 | llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD); |
5811 | |
|
5812 | 0 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5813 | 0 | llvm::Value *TypeTest = Builder.CreateCall( |
5814 | 0 | CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId}); |
5815 | |
|
5816 | 0 | auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); |
5817 | 0 | llvm::Constant *StaticData[] = { |
5818 | 0 | llvm::ConstantInt::get(Int8Ty, CFITCK_ICall), |
5819 | 0 | EmitCheckSourceLocation(E->getBeginLoc()), |
5820 | 0 | EmitCheckTypeDescriptor(QualType(FnType, 0)), |
5821 | 0 | }; |
5822 | 0 | if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { |
5823 | 0 | EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId, |
5824 | 0 | CalleePtr, StaticData); |
5825 | 0 | } else { |
5826 | 0 | EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall), |
5827 | 0 | SanitizerHandler::CFICheckFail, StaticData, |
5828 | 0 | {CalleePtr, llvm::UndefValue::get(IntPtrTy)}); |
5829 | 0 | } |
5830 | 0 | } |
5831 | |
|
5832 | 0 | CallArgList Args; |
5833 | 0 | if (Chain) |
5834 | 0 | Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy); |
5835 | | |
5836 | | // C++17 requires that we evaluate arguments to a call using assignment syntax |
5837 | | // right-to-left, and that we evaluate arguments to certain other operators |
5838 | | // left-to-right. Note that we allow this to override the order dictated by |
5839 | | // the calling convention on the MS ABI, which means that parameter |
5840 | | // destruction order is not necessarily reverse construction order. |
5841 | | // FIXME: Revisit this based on C++ committee response to unimplementability. |
5842 | 0 | EvaluationOrder Order = EvaluationOrder::Default; |
5843 | 0 | if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) { |
5844 | 0 | if (OCE->isAssignmentOp()) |
5845 | 0 | Order = EvaluationOrder::ForceRightToLeft; |
5846 | 0 | else { |
5847 | 0 | switch (OCE->getOperator()) { |
5848 | 0 | case OO_LessLess: |
5849 | 0 | case OO_GreaterGreater: |
5850 | 0 | case OO_AmpAmp: |
5851 | 0 | case OO_PipePipe: |
5852 | 0 | case OO_Comma: |
5853 | 0 | case OO_ArrowStar: |
5854 | 0 | Order = EvaluationOrder::ForceLeftToRight; |
5855 | 0 | break; |
5856 | 0 | default: |
5857 | 0 | break; |
5858 | 0 | } |
5859 | 0 | } |
5860 | 0 | } |
5861 | | |
5862 | 0 | EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(), |
5863 | 0 | E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); |
5864 | |
|
5865 | 0 | const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( |
5866 | 0 | Args, FnType, /*ChainCall=*/Chain); |
5867 | | |
5868 | | // C99 6.5.2.2p6: |
5869 | | // If the expression that denotes the called function has a type |
5870 | | // that does not include a prototype, [the default argument |
5871 | | // promotions are performed]. If the number of arguments does not |
5872 | | // equal the number of parameters, the behavior is undefined. If |
5873 | | // the function is defined with a type that includes a prototype, |
5874 | | // and either the prototype ends with an ellipsis (, ...) or the |
5875 | | // types of the arguments after promotion are not compatible with |
5876 | | // the types of the parameters, the behavior is undefined. If the |
5877 | | // function is defined with a type that does not include a |
5878 | | // prototype, and the types of the arguments after promotion are |
5879 | | // not compatible with those of the parameters after promotion, |
5880 | | // the behavior is undefined [except in some trivial cases]. |
5881 | | // That is, in the general case, we should assume that a call |
5882 | | // through an unprototyped function type works like a *non-variadic* |
5883 | | // call. The way we make this work is to cast to the exact type |
5884 | | // of the promoted arguments. |
5885 | | // |
5886 | | // Chain calls use this same code path to add the invisible chain parameter |
5887 | | // to the function type. |
5888 | 0 | if (isa<FunctionNoProtoType>(FnType) || Chain) { |
5889 | 0 | llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); |
5890 | 0 | int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); |
5891 | 0 | CalleeTy = CalleeTy->getPointerTo(AS); |
5892 | |
|
5893 | 0 | llvm::Value *CalleePtr = Callee.getFunctionPointer(); |
5894 | 0 | CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); |
5895 | 0 | Callee.setFunctionPointer(CalleePtr); |
5896 | 0 | } |
5897 | | |
5898 | | // HIP function pointer contains kernel handle when it is used in triple |
5899 | | // chevron. The kernel stub needs to be loaded from kernel handle and used |
5900 | | // as callee. |
5901 | 0 | if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice && |
5902 | 0 | isa<CUDAKernelCallExpr>(E) && |
5903 | 0 | (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { |
5904 | 0 | llvm::Value *Handle = Callee.getFunctionPointer(); |
5905 | 0 | auto *Stub = Builder.CreateLoad( |
5906 | 0 | Address(Handle, Handle->getType(), CGM.getPointerAlign())); |
5907 | 0 | Callee.setFunctionPointer(Stub); |
5908 | 0 | } |
5909 | 0 | llvm::CallBase *CallOrInvoke = nullptr; |
5910 | 0 | RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke, |
5911 | 0 | E == MustTailCall, E->getExprLoc()); |
5912 | | |
5913 | | // Generate function declaration DISuprogram in order to be used |
5914 | | // in debug info about call sites. |
5915 | 0 | if (CGDebugInfo *DI = getDebugInfo()) { |
5916 | 0 | if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { |
5917 | 0 | FunctionArgList Args; |
5918 | 0 | QualType ResTy = BuildFunctionArgList(CalleeDecl, Args); |
5919 | 0 | DI->EmitFuncDeclForCallSite(CallOrInvoke, |
5920 | 0 | DI->getFunctionType(CalleeDecl, ResTy, Args), |
5921 | 0 | CalleeDecl); |
5922 | 0 | } |
5923 | 0 | } |
5924 | |
|
5925 | 0 | return Call; |
5926 | 0 | } |
5927 | | |
5928 | | LValue CodeGenFunction:: |
5929 | 0 | EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { |
5930 | 0 | Address BaseAddr = Address::invalid(); |
5931 | 0 | if (E->getOpcode() == BO_PtrMemI) { |
5932 | 0 | BaseAddr = EmitPointerWithAlignment(E->getLHS()); |
5933 | 0 | } else { |
5934 | 0 | BaseAddr = EmitLValue(E->getLHS()).getAddress(*this); |
5935 | 0 | } |
5936 | |
|
5937 | 0 | llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); |
5938 | 0 | const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>(); |
5939 | |
|
5940 | 0 | LValueBaseInfo BaseInfo; |
5941 | 0 | TBAAAccessInfo TBAAInfo; |
5942 | 0 | Address MemberAddr = |
5943 | 0 | EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo, |
5944 | 0 | &TBAAInfo); |
5945 | |
|
5946 | 0 | return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo); |
5947 | 0 | } |
5948 | | |
5949 | | /// Given the address of a temporary variable, produce an r-value of |
5950 | | /// its type. |
5951 | | RValue CodeGenFunction::convertTempToRValue(Address addr, |
5952 | | QualType type, |
5953 | 0 | SourceLocation loc) { |
5954 | 0 | LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl); |
5955 | 0 | switch (getEvaluationKind(type)) { |
5956 | 0 | case TEK_Complex: |
5957 | 0 | return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); |
5958 | 0 | case TEK_Aggregate: |
5959 | 0 | return lvalue.asAggregateRValue(*this); |
5960 | 0 | case TEK_Scalar: |
5961 | 0 | return RValue::get(EmitLoadOfScalar(lvalue, loc)); |
5962 | 0 | } |
5963 | 0 | llvm_unreachable("bad evaluation kind"); |
5964 | 0 | } |
5965 | | |
5966 | 0 | void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { |
5967 | 0 | assert(Val->getType()->isFPOrFPVectorTy()); |
5968 | 0 | if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) |
5969 | 0 | return; |
5970 | | |
5971 | 0 | llvm::MDBuilder MDHelper(getLLVMContext()); |
5972 | 0 | llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); |
5973 | |
|
5974 | 0 | cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); |
5975 | 0 | } |
5976 | | |
5977 | 0 | void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) { |
5978 | 0 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
5979 | 0 | if (!EltTy->isFloatTy()) |
5980 | 0 | return; |
5981 | | |
5982 | 0 | if ((getLangOpts().OpenCL && |
5983 | 0 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
5984 | 0 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
5985 | 0 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
5986 | | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp |
5987 | | // |
5988 | | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
5989 | | // build option allows an application to specify that single precision |
5990 | | // floating-point divide (x/y and 1/x) and sqrt used in the program |
5991 | | // source are correctly rounded. |
5992 | | // |
5993 | | // TODO: CUDA has a prec-sqrt flag |
5994 | 0 | SetFPAccuracy(Val, 3.0f); |
5995 | 0 | } |
5996 | 0 | } |
5997 | | |
5998 | 0 | void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) { |
5999 | 0 | llvm::Type *EltTy = Val->getType()->getScalarType(); |
6000 | 0 | if (!EltTy->isFloatTy()) |
6001 | 0 | return; |
6002 | | |
6003 | 0 | if ((getLangOpts().OpenCL && |
6004 | 0 | !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || |
6005 | 0 | (getLangOpts().HIP && getLangOpts().CUDAIsDevice && |
6006 | 0 | !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { |
6007 | | // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp |
6008 | | // |
6009 | | // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt |
6010 | | // build option allows an application to specify that single precision |
6011 | | // floating-point divide (x/y and 1/x) and sqrt used in the program |
6012 | | // source are correctly rounded. |
6013 | | // |
6014 | | // TODO: CUDA has a prec-div flag |
6015 | 0 | SetFPAccuracy(Val, 2.5f); |
6016 | 0 | } |
6017 | 0 | } |
6018 | | |
6019 | | namespace { |
6020 | | struct LValueOrRValue { |
6021 | | LValue LV; |
6022 | | RValue RV; |
6023 | | }; |
6024 | | } |
6025 | | |
6026 | | static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, |
6027 | | const PseudoObjectExpr *E, |
6028 | | bool forLValue, |
6029 | 0 | AggValueSlot slot) { |
6030 | 0 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
6031 | | |
6032 | | // Find the result expression, if any. |
6033 | 0 | const Expr *resultExpr = E->getResultExpr(); |
6034 | 0 | LValueOrRValue result; |
6035 | |
|
6036 | 0 | for (PseudoObjectExpr::const_semantics_iterator |
6037 | 0 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { |
6038 | 0 | const Expr *semantic = *i; |
6039 | | |
6040 | | // If this semantic expression is an opaque value, bind it |
6041 | | // to the result of its source expression. |
6042 | 0 | if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) { |
6043 | | // Skip unique OVEs. |
6044 | 0 | if (ov->isUnique()) { |
6045 | 0 | assert(ov != resultExpr && |
6046 | 0 | "A unique OVE cannot be used as the result expression"); |
6047 | 0 | continue; |
6048 | 0 | } |
6049 | | |
6050 | | // If this is the result expression, we may need to evaluate |
6051 | | // directly into the slot. |
6052 | 0 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
6053 | 0 | OVMA opaqueData; |
6054 | 0 | if (ov == resultExpr && ov->isPRValue() && !forLValue && |
6055 | 0 | CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { |
6056 | 0 | CGF.EmitAggExpr(ov->getSourceExpr(), slot); |
6057 | 0 | LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(), |
6058 | 0 | AlignmentSource::Decl); |
6059 | 0 | opaqueData = OVMA::bind(CGF, ov, LV); |
6060 | 0 | result.RV = slot.asRValue(); |
6061 | | |
6062 | | // Otherwise, emit as normal. |
6063 | 0 | } else { |
6064 | 0 | opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); |
6065 | | |
6066 | | // If this is the result, also evaluate the result now. |
6067 | 0 | if (ov == resultExpr) { |
6068 | 0 | if (forLValue) |
6069 | 0 | result.LV = CGF.EmitLValue(ov); |
6070 | 0 | else |
6071 | 0 | result.RV = CGF.EmitAnyExpr(ov, slot); |
6072 | 0 | } |
6073 | 0 | } |
6074 | |
|
6075 | 0 | opaques.push_back(opaqueData); |
6076 | | |
6077 | | // Otherwise, if the expression is the result, evaluate it |
6078 | | // and remember the result. |
6079 | 0 | } else if (semantic == resultExpr) { |
6080 | 0 | if (forLValue) |
6081 | 0 | result.LV = CGF.EmitLValue(semantic); |
6082 | 0 | else |
6083 | 0 | result.RV = CGF.EmitAnyExpr(semantic, slot); |
6084 | | |
6085 | | // Otherwise, evaluate the expression in an ignored context. |
6086 | 0 | } else { |
6087 | 0 | CGF.EmitIgnoredExpr(semantic); |
6088 | 0 | } |
6089 | 0 | } |
6090 | | |
6091 | | // Unbind all the opaques now. |
6092 | 0 | for (unsigned i = 0, e = opaques.size(); i != e; ++i) |
6093 | 0 | opaques[i].unbind(CGF); |
6094 | |
|
6095 | 0 | return result; |
6096 | 0 | } |
6097 | | |
6098 | | RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, |
6099 | 0 | AggValueSlot slot) { |
6100 | 0 | return emitPseudoObjectExpr(*this, E, false, slot).RV; |
6101 | 0 | } |
6102 | | |
6103 | 0 | LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { |
6104 | 0 | return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; |
6105 | 0 | } |