/src/llvm-project/clang/lib/CodeGen/CGObjC.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Objective-C code as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGDebugInfo.h" |
14 | | #include "CGObjCRuntime.h" |
15 | | #include "CodeGenFunction.h" |
16 | | #include "CodeGenModule.h" |
17 | | #include "ConstantEmitter.h" |
18 | | #include "TargetInfo.h" |
19 | | #include "clang/AST/ASTContext.h" |
20 | | #include "clang/AST/Attr.h" |
21 | | #include "clang/AST/DeclObjC.h" |
22 | | #include "clang/AST/StmtObjC.h" |
23 | | #include "clang/Basic/Diagnostic.h" |
24 | | #include "clang/CodeGen/CGFunctionInfo.h" |
25 | | #include "clang/CodeGen/CodeGenABITypes.h" |
26 | | #include "llvm/ADT/STLExtras.h" |
27 | | #include "llvm/Analysis/ObjCARCUtil.h" |
28 | | #include "llvm/BinaryFormat/MachO.h" |
29 | | #include "llvm/IR/Constants.h" |
30 | | #include "llvm/IR/DataLayout.h" |
31 | | #include "llvm/IR/InlineAsm.h" |
32 | | #include <optional> |
33 | | using namespace clang; |
34 | | using namespace CodeGen; |
35 | | |
36 | | typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; |
37 | | static TryEmitResult |
38 | | tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); |
39 | | static RValue AdjustObjCObjectType(CodeGenFunction &CGF, |
40 | | QualType ET, |
41 | | RValue Result); |
42 | | |
43 | | /// Given the address of a variable of pointer type, find the correct |
44 | | /// null to store into it. |
45 | 0 | static llvm::Constant *getNullForVariable(Address addr) { |
46 | 0 | llvm::Type *type = addr.getElementType(); |
47 | 0 | return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); |
48 | 0 | } |
49 | | |
50 | | /// Emits an instance of NSConstantString representing the object. |
51 | | llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) |
52 | 0 | { |
53 | 0 | llvm::Constant *C = |
54 | 0 | CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); |
55 | 0 | return C; |
56 | 0 | } |
57 | | |
58 | | /// EmitObjCBoxedExpr - This routine generates code to call |
59 | | /// the appropriate expression boxing method. This will either be |
60 | | /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], |
61 | | /// or [NSValue valueWithBytes:objCType:]. |
62 | | /// |
63 | | llvm::Value * |
64 | 0 | CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { |
65 | | // Generate the correct selector for this literal's concrete type. |
66 | | // Get the method. |
67 | 0 | const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); |
68 | 0 | const Expr *SubExpr = E->getSubExpr(); |
69 | |
|
70 | 0 | if (E->isExpressibleAsConstantInitializer()) { |
71 | 0 | ConstantEmitter ConstEmitter(CGM); |
72 | 0 | return ConstEmitter.tryEmitAbstract(E, E->getType()); |
73 | 0 | } |
74 | | |
75 | 0 | assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); |
76 | 0 | Selector Sel = BoxingMethod->getSelector(); |
77 | | |
78 | | // Generate a reference to the class pointer, which will be the receiver. |
79 | | // Assumes that the method was introduced in the class that should be |
80 | | // messaged (avoids pulling it out of the result type). |
81 | 0 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
82 | 0 | const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); |
83 | 0 | llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); |
84 | |
|
85 | 0 | CallArgList Args; |
86 | 0 | const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); |
87 | 0 | QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); |
88 | | |
89 | | // ObjCBoxedExpr supports boxing of structs and unions |
90 | | // via [NSValue valueWithBytes:objCType:] |
91 | 0 | const QualType ValueType(SubExpr->getType().getCanonicalType()); |
92 | 0 | if (ValueType->isObjCBoxableRecordType()) { |
93 | | // Emit CodeGen for first parameter |
94 | | // and cast value to correct type |
95 | 0 | Address Temporary = CreateMemTemp(SubExpr->getType()); |
96 | 0 | EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); |
97 | 0 | llvm::Value *BitCast = |
98 | 0 | Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT)); |
99 | 0 | Args.add(RValue::get(BitCast), ArgQT); |
100 | | |
101 | | // Create char array to store type encoding |
102 | 0 | std::string Str; |
103 | 0 | getContext().getObjCEncodingForType(ValueType, Str); |
104 | 0 | llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); |
105 | | |
106 | | // Cast type encoding to correct type |
107 | 0 | const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; |
108 | 0 | QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); |
109 | 0 | llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); |
110 | |
|
111 | 0 | Args.add(RValue::get(Cast), EncodingQT); |
112 | 0 | } else { |
113 | 0 | Args.add(EmitAnyExpr(SubExpr), ArgQT); |
114 | 0 | } |
115 | |
|
116 | 0 | RValue result = Runtime.GenerateMessageSend( |
117 | 0 | *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, |
118 | 0 | Args, ClassDecl, BoxingMethod); |
119 | 0 | return Builder.CreateBitCast(result.getScalarVal(), |
120 | 0 | ConvertType(E->getType())); |
121 | 0 | } |
122 | | |
123 | | llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, |
124 | 0 | const ObjCMethodDecl *MethodWithObjects) { |
125 | 0 | ASTContext &Context = CGM.getContext(); |
126 | 0 | const ObjCDictionaryLiteral *DLE = nullptr; |
127 | 0 | const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); |
128 | 0 | if (!ALE) |
129 | 0 | DLE = cast<ObjCDictionaryLiteral>(E); |
130 | | |
131 | | // Optimize empty collections by referencing constants, when available. |
132 | 0 | uint64_t NumElements = |
133 | 0 | ALE ? ALE->getNumElements() : DLE->getNumElements(); |
134 | 0 | if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { |
135 | 0 | StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; |
136 | 0 | QualType IdTy(CGM.getContext().getObjCIdType()); |
137 | 0 | llvm::Constant *Constant = |
138 | 0 | CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); |
139 | 0 | LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); |
140 | 0 | llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); |
141 | 0 | cast<llvm::LoadInst>(Ptr)->setMetadata( |
142 | 0 | llvm::LLVMContext::MD_invariant_load, |
143 | 0 | llvm::MDNode::get(getLLVMContext(), std::nullopt)); |
144 | 0 | return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); |
145 | 0 | } |
146 | | |
147 | | // Compute the type of the array we're initializing. |
148 | 0 | llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), |
149 | 0 | NumElements); |
150 | 0 | QualType ElementType = Context.getObjCIdType().withConst(); |
151 | 0 | QualType ElementArrayType = Context.getConstantArrayType( |
152 | 0 | ElementType, APNumElements, nullptr, ArraySizeModifier::Normal, |
153 | 0 | /*IndexTypeQuals=*/0); |
154 | | |
155 | | // Allocate the temporary array(s). |
156 | 0 | Address Objects = CreateMemTemp(ElementArrayType, "objects"); |
157 | 0 | Address Keys = Address::invalid(); |
158 | 0 | if (DLE) |
159 | 0 | Keys = CreateMemTemp(ElementArrayType, "keys"); |
160 | | |
161 | | // In ARC, we may need to do extra work to keep all the keys and |
162 | | // values alive until after the call. |
163 | 0 | SmallVector<llvm::Value *, 16> NeededObjects; |
164 | 0 | bool TrackNeededObjects = |
165 | 0 | (getLangOpts().ObjCAutoRefCount && |
166 | 0 | CGM.getCodeGenOpts().OptimizationLevel != 0); |
167 | | |
168 | | // Perform the actual initialialization of the array(s). |
169 | 0 | for (uint64_t i = 0; i < NumElements; i++) { |
170 | 0 | if (ALE) { |
171 | | // Emit the element and store it to the appropriate array slot. |
172 | 0 | const Expr *Rhs = ALE->getElement(i); |
173 | 0 | LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), |
174 | 0 | ElementType, AlignmentSource::Decl); |
175 | |
|
176 | 0 | llvm::Value *value = EmitScalarExpr(Rhs); |
177 | 0 | EmitStoreThroughLValue(RValue::get(value), LV, true); |
178 | 0 | if (TrackNeededObjects) { |
179 | 0 | NeededObjects.push_back(value); |
180 | 0 | } |
181 | 0 | } else { |
182 | | // Emit the key and store it to the appropriate array slot. |
183 | 0 | const Expr *Key = DLE->getKeyValueElement(i).Key; |
184 | 0 | LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i), |
185 | 0 | ElementType, AlignmentSource::Decl); |
186 | 0 | llvm::Value *keyValue = EmitScalarExpr(Key); |
187 | 0 | EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); |
188 | | |
189 | | // Emit the value and store it to the appropriate array slot. |
190 | 0 | const Expr *Value = DLE->getKeyValueElement(i).Value; |
191 | 0 | LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), |
192 | 0 | ElementType, AlignmentSource::Decl); |
193 | 0 | llvm::Value *valueValue = EmitScalarExpr(Value); |
194 | 0 | EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); |
195 | 0 | if (TrackNeededObjects) { |
196 | 0 | NeededObjects.push_back(keyValue); |
197 | 0 | NeededObjects.push_back(valueValue); |
198 | 0 | } |
199 | 0 | } |
200 | 0 | } |
201 | | |
202 | | // Generate the argument list. |
203 | 0 | CallArgList Args; |
204 | 0 | ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); |
205 | 0 | const ParmVarDecl *argDecl = *PI++; |
206 | 0 | QualType ArgQT = argDecl->getType().getUnqualifiedType(); |
207 | 0 | Args.add(RValue::get(Objects.getPointer()), ArgQT); |
208 | 0 | if (DLE) { |
209 | 0 | argDecl = *PI++; |
210 | 0 | ArgQT = argDecl->getType().getUnqualifiedType(); |
211 | 0 | Args.add(RValue::get(Keys.getPointer()), ArgQT); |
212 | 0 | } |
213 | 0 | argDecl = *PI; |
214 | 0 | ArgQT = argDecl->getType().getUnqualifiedType(); |
215 | 0 | llvm::Value *Count = |
216 | 0 | llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); |
217 | 0 | Args.add(RValue::get(Count), ArgQT); |
218 | | |
219 | | // Generate a reference to the class pointer, which will be the receiver. |
220 | 0 | Selector Sel = MethodWithObjects->getSelector(); |
221 | 0 | QualType ResultType = E->getType(); |
222 | 0 | const ObjCObjectPointerType *InterfacePointerType |
223 | 0 | = ResultType->getAsObjCInterfacePointerType(); |
224 | 0 | assert(InterfacePointerType && "Unexpected InterfacePointerType - null"); |
225 | 0 | ObjCInterfaceDecl *Class |
226 | 0 | = InterfacePointerType->getObjectType()->getInterface(); |
227 | 0 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
228 | 0 | llvm::Value *Receiver = Runtime.GetClass(*this, Class); |
229 | | |
230 | | // Generate the message send. |
231 | 0 | RValue result = Runtime.GenerateMessageSend( |
232 | 0 | *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, |
233 | 0 | Receiver, Args, Class, MethodWithObjects); |
234 | | |
235 | | // The above message send needs these objects, but in ARC they are |
236 | | // passed in a buffer that is essentially __unsafe_unretained. |
237 | | // Therefore we must prevent the optimizer from releasing them until |
238 | | // after the call. |
239 | 0 | if (TrackNeededObjects) { |
240 | 0 | EmitARCIntrinsicUse(NeededObjects); |
241 | 0 | } |
242 | |
|
243 | 0 | return Builder.CreateBitCast(result.getScalarVal(), |
244 | 0 | ConvertType(E->getType())); |
245 | 0 | } |
246 | | |
247 | 0 | llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { |
248 | 0 | return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); |
249 | 0 | } |
250 | | |
251 | | llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( |
252 | 0 | const ObjCDictionaryLiteral *E) { |
253 | 0 | return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); |
254 | 0 | } |
255 | | |
256 | | /// Emit a selector. |
257 | 0 | llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { |
258 | | // Untyped selector. |
259 | | // Note that this implementation allows for non-constant strings to be passed |
260 | | // as arguments to @selector(). Currently, the only thing preventing this |
261 | | // behaviour is the type checking in the front end. |
262 | 0 | return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); |
263 | 0 | } |
264 | | |
265 | 0 | llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { |
266 | | // FIXME: This should pass the Decl not the name. |
267 | 0 | return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); |
268 | 0 | } |
269 | | |
270 | | /// Adjust the type of an Objective-C object that doesn't match up due |
271 | | /// to type erasure at various points, e.g., related result types or the use |
272 | | /// of parameterized classes. |
273 | | static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, |
274 | 0 | RValue Result) { |
275 | 0 | if (!ExpT->isObjCRetainableType()) |
276 | 0 | return Result; |
277 | | |
278 | | // If the converted types are the same, we're done. |
279 | 0 | llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); |
280 | 0 | if (ExpLLVMTy == Result.getScalarVal()->getType()) |
281 | 0 | return Result; |
282 | | |
283 | | // We have applied a substitution. Cast the rvalue appropriately. |
284 | 0 | return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), |
285 | 0 | ExpLLVMTy)); |
286 | 0 | } |
287 | | |
288 | | /// Decide whether to extend the lifetime of the receiver of a |
289 | | /// returns-inner-pointer message. |
290 | | static bool |
291 | 0 | shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { |
292 | 0 | switch (message->getReceiverKind()) { |
293 | | |
294 | | // For a normal instance message, we should extend unless the |
295 | | // receiver is loaded from a variable with precise lifetime. |
296 | 0 | case ObjCMessageExpr::Instance: { |
297 | 0 | const Expr *receiver = message->getInstanceReceiver(); |
298 | | |
299 | | // Look through OVEs. |
300 | 0 | if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { |
301 | 0 | if (opaque->getSourceExpr()) |
302 | 0 | receiver = opaque->getSourceExpr()->IgnoreParens(); |
303 | 0 | } |
304 | |
|
305 | 0 | const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); |
306 | 0 | if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; |
307 | 0 | receiver = ice->getSubExpr()->IgnoreParens(); |
308 | | |
309 | | // Look through OVEs. |
310 | 0 | if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { |
311 | 0 | if (opaque->getSourceExpr()) |
312 | 0 | receiver = opaque->getSourceExpr()->IgnoreParens(); |
313 | 0 | } |
314 | | |
315 | | // Only __strong variables. |
316 | 0 | if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) |
317 | 0 | return true; |
318 | | |
319 | | // All ivars and fields have precise lifetime. |
320 | 0 | if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) |
321 | 0 | return false; |
322 | | |
323 | | // Otherwise, check for variables. |
324 | 0 | const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); |
325 | 0 | if (!declRef) return true; |
326 | 0 | const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); |
327 | 0 | if (!var) return true; |
328 | | |
329 | | // All variables have precise lifetime except local variables with |
330 | | // automatic storage duration that aren't specially marked. |
331 | 0 | return (var->hasLocalStorage() && |
332 | 0 | !var->hasAttr<ObjCPreciseLifetimeAttr>()); |
333 | 0 | } |
334 | | |
335 | 0 | case ObjCMessageExpr::Class: |
336 | 0 | case ObjCMessageExpr::SuperClass: |
337 | | // It's never necessary for class objects. |
338 | 0 | return false; |
339 | | |
340 | 0 | case ObjCMessageExpr::SuperInstance: |
341 | | // We generally assume that 'self' lives throughout a method call. |
342 | 0 | return false; |
343 | 0 | } |
344 | | |
345 | 0 | llvm_unreachable("invalid receiver kind"); |
346 | 0 | } |
347 | | |
348 | | /// Given an expression of ObjC pointer type, check whether it was |
349 | | /// immediately loaded from an ARC __weak l-value. |
350 | 0 | static const Expr *findWeakLValue(const Expr *E) { |
351 | 0 | assert(E->getType()->isObjCRetainableType()); |
352 | 0 | E = E->IgnoreParens(); |
353 | 0 | if (auto CE = dyn_cast<CastExpr>(E)) { |
354 | 0 | if (CE->getCastKind() == CK_LValueToRValue) { |
355 | 0 | if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) |
356 | 0 | return CE->getSubExpr(); |
357 | 0 | } |
358 | 0 | } |
359 | | |
360 | 0 | return nullptr; |
361 | 0 | } |
362 | | |
363 | | /// The ObjC runtime may provide entrypoints that are likely to be faster |
364 | | /// than an ordinary message send of the appropriate selector. |
365 | | /// |
366 | | /// The entrypoints are guaranteed to be equivalent to just sending the |
367 | | /// corresponding message. If the entrypoint is implemented naively as just a |
368 | | /// message send, using it is a trade-off: it sacrifices a few cycles of |
369 | | /// overhead to save a small amount of code. However, it's possible for |
370 | | /// runtimes to detect and special-case classes that use "standard" |
371 | | /// behavior; if that's dynamically a large proportion of all objects, using |
372 | | /// the entrypoint will also be faster than using a message send. |
373 | | /// |
374 | | /// If the runtime does support a required entrypoint, then this method will |
375 | | /// generate a call and return the resulting value. Otherwise it will return |
376 | | /// std::nullopt and the caller can generate a msgSend instead. |
377 | | static std::optional<llvm::Value *> tryGenerateSpecializedMessageSend( |
378 | | CodeGenFunction &CGF, QualType ResultType, llvm::Value *Receiver, |
379 | | const CallArgList &Args, Selector Sel, const ObjCMethodDecl *method, |
380 | 0 | bool isClassMessage) { |
381 | 0 | auto &CGM = CGF.CGM; |
382 | 0 | if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) |
383 | 0 | return std::nullopt; |
384 | | |
385 | 0 | auto &Runtime = CGM.getLangOpts().ObjCRuntime; |
386 | 0 | switch (Sel.getMethodFamily()) { |
387 | 0 | case OMF_alloc: |
388 | 0 | if (isClassMessage && |
389 | 0 | Runtime.shouldUseRuntimeFunctionsForAlloc() && |
390 | 0 | ResultType->isObjCObjectPointerType()) { |
391 | | // [Foo alloc] -> objc_alloc(Foo) or |
392 | | // [self alloc] -> objc_alloc(self) |
393 | 0 | if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc") |
394 | 0 | return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType)); |
395 | | // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or |
396 | | // [self allocWithZone:nil] -> objc_allocWithZone(self) |
397 | 0 | if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 && |
398 | 0 | Args.size() == 1 && Args.front().getType()->isPointerType() && |
399 | 0 | Sel.getNameForSlot(0) == "allocWithZone") { |
400 | 0 | const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); |
401 | 0 | if (isa<llvm::ConstantPointerNull>(arg)) |
402 | 0 | return CGF.EmitObjCAllocWithZone(Receiver, |
403 | 0 | CGF.ConvertType(ResultType)); |
404 | 0 | return std::nullopt; |
405 | 0 | } |
406 | 0 | } |
407 | 0 | break; |
408 | | |
409 | 0 | case OMF_autorelease: |
410 | 0 | if (ResultType->isObjCObjectPointerType() && |
411 | 0 | CGM.getLangOpts().getGC() == LangOptions::NonGC && |
412 | 0 | Runtime.shouldUseARCFunctionsForRetainRelease()) |
413 | 0 | return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType)); |
414 | 0 | break; |
415 | | |
416 | 0 | case OMF_retain: |
417 | 0 | if (ResultType->isObjCObjectPointerType() && |
418 | 0 | CGM.getLangOpts().getGC() == LangOptions::NonGC && |
419 | 0 | Runtime.shouldUseARCFunctionsForRetainRelease()) |
420 | 0 | return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType)); |
421 | 0 | break; |
422 | | |
423 | 0 | case OMF_release: |
424 | 0 | if (ResultType->isVoidType() && |
425 | 0 | CGM.getLangOpts().getGC() == LangOptions::NonGC && |
426 | 0 | Runtime.shouldUseARCFunctionsForRetainRelease()) { |
427 | 0 | CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime); |
428 | 0 | return nullptr; |
429 | 0 | } |
430 | 0 | break; |
431 | | |
432 | 0 | default: |
433 | 0 | break; |
434 | 0 | } |
435 | 0 | return std::nullopt; |
436 | 0 | } |
437 | | |
438 | | CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend( |
439 | | CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, |
440 | | Selector Sel, llvm::Value *Receiver, const CallArgList &Args, |
441 | | const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method, |
442 | 0 | bool isClassMessage) { |
443 | 0 | if (std::optional<llvm::Value *> SpecializedResult = |
444 | 0 | tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args, |
445 | 0 | Sel, Method, isClassMessage)) { |
446 | 0 | return RValue::get(*SpecializedResult); |
447 | 0 | } |
448 | 0 | return GenerateMessageSend(CGF, Return, ResultType, Sel, Receiver, Args, OID, |
449 | 0 | Method); |
450 | 0 | } |
451 | | |
452 | | static void AppendFirstImpliedRuntimeProtocols( |
453 | | const ObjCProtocolDecl *PD, |
454 | 0 | llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) { |
455 | 0 | if (!PD->isNonRuntimeProtocol()) { |
456 | 0 | const auto *Can = PD->getCanonicalDecl(); |
457 | 0 | PDs.insert(Can); |
458 | 0 | return; |
459 | 0 | } |
460 | | |
461 | 0 | for (const auto *ParentPD : PD->protocols()) |
462 | 0 | AppendFirstImpliedRuntimeProtocols(ParentPD, PDs); |
463 | 0 | } |
464 | | |
465 | | std::vector<const ObjCProtocolDecl *> |
466 | | CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin, |
467 | 0 | ObjCProtocolDecl::protocol_iterator end) { |
468 | 0 | std::vector<const ObjCProtocolDecl *> RuntimePds; |
469 | 0 | llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs; |
470 | |
|
471 | 0 | for (; begin != end; ++begin) { |
472 | 0 | const auto *It = *begin; |
473 | 0 | const auto *Can = It->getCanonicalDecl(); |
474 | 0 | if (Can->isNonRuntimeProtocol()) |
475 | 0 | NonRuntimePDs.insert(Can); |
476 | 0 | else |
477 | 0 | RuntimePds.push_back(Can); |
478 | 0 | } |
479 | | |
480 | | // If there are no non-runtime protocols then we can just stop now. |
481 | 0 | if (NonRuntimePDs.empty()) |
482 | 0 | return RuntimePds; |
483 | | |
484 | | // Else we have to search through the non-runtime protocol's inheritancy |
485 | | // hierarchy DAG stopping whenever a branch either finds a runtime protocol or |
486 | | // a non-runtime protocol without any parents. These are the "first-implied" |
487 | | // protocols from a non-runtime protocol. |
488 | 0 | llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos; |
489 | 0 | for (const auto *PD : NonRuntimePDs) |
490 | 0 | AppendFirstImpliedRuntimeProtocols(PD, FirstImpliedProtos); |
491 | | |
492 | | // Walk the Runtime list to get all protocols implied via the inclusion of |
493 | | // this protocol, e.g. all protocols it inherits from including itself. |
494 | 0 | llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols; |
495 | 0 | for (const auto *PD : RuntimePds) { |
496 | 0 | const auto *Can = PD->getCanonicalDecl(); |
497 | 0 | AllImpliedProtocols.insert(Can); |
498 | 0 | Can->getImpliedProtocols(AllImpliedProtocols); |
499 | 0 | } |
500 | | |
501 | | // Similar to above, walk the list of first-implied protocols to find the set |
502 | | // all the protocols implied excluding the listed protocols themselves since |
503 | | // they are not yet a part of the `RuntimePds` list. |
504 | 0 | for (const auto *PD : FirstImpliedProtos) { |
505 | 0 | PD->getImpliedProtocols(AllImpliedProtocols); |
506 | 0 | } |
507 | | |
508 | | // From the first-implied list we have to finish building the final protocol |
509 | | // list. If a protocol in the first-implied list was already implied via some |
510 | | // inheritance path through some other protocols then it would be redundant to |
511 | | // add it here and so we skip over it. |
512 | 0 | for (const auto *PD : FirstImpliedProtos) { |
513 | 0 | if (!AllImpliedProtocols.contains(PD)) { |
514 | 0 | RuntimePds.push_back(PD); |
515 | 0 | } |
516 | 0 | } |
517 | |
|
518 | 0 | return RuntimePds; |
519 | 0 | } |
520 | | |
521 | | /// Instead of '[[MyClass alloc] init]', try to generate |
522 | | /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the |
523 | | /// caller side, as well as the optimized objc_alloc. |
524 | | static std::optional<llvm::Value *> |
525 | 0 | tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) { |
526 | 0 | auto &Runtime = CGF.getLangOpts().ObjCRuntime; |
527 | 0 | if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit()) |
528 | 0 | return std::nullopt; |
529 | | |
530 | | // Match the exact pattern '[[MyClass alloc] init]'. |
531 | 0 | Selector Sel = OME->getSelector(); |
532 | 0 | if (OME->getReceiverKind() != ObjCMessageExpr::Instance || |
533 | 0 | !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() || |
534 | 0 | Sel.getNameForSlot(0) != "init") |
535 | 0 | return std::nullopt; |
536 | | |
537 | | // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' |
538 | | // with 'cls' a Class. |
539 | 0 | auto *SubOME = |
540 | 0 | dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts()); |
541 | 0 | if (!SubOME) |
542 | 0 | return std::nullopt; |
543 | 0 | Selector SubSel = SubOME->getSelector(); |
544 | |
|
545 | 0 | if (!SubOME->getType()->isObjCObjectPointerType() || |
546 | 0 | !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc") |
547 | 0 | return std::nullopt; |
548 | | |
549 | 0 | llvm::Value *Receiver = nullptr; |
550 | 0 | switch (SubOME->getReceiverKind()) { |
551 | 0 | case ObjCMessageExpr::Instance: |
552 | 0 | if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType()) |
553 | 0 | return std::nullopt; |
554 | 0 | Receiver = CGF.EmitScalarExpr(SubOME->getInstanceReceiver()); |
555 | 0 | break; |
556 | | |
557 | 0 | case ObjCMessageExpr::Class: { |
558 | 0 | QualType ReceiverType = SubOME->getClassReceiver(); |
559 | 0 | const ObjCObjectType *ObjTy = ReceiverType->castAs<ObjCObjectType>(); |
560 | 0 | const ObjCInterfaceDecl *ID = ObjTy->getInterface(); |
561 | 0 | assert(ID && "null interface should be impossible here"); |
562 | 0 | Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID); |
563 | 0 | break; |
564 | 0 | } |
565 | 0 | case ObjCMessageExpr::SuperInstance: |
566 | 0 | case ObjCMessageExpr::SuperClass: |
567 | 0 | return std::nullopt; |
568 | 0 | } |
569 | | |
570 | 0 | return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType())); |
571 | 0 | } |
572 | | |
573 | | RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, |
574 | 0 | ReturnValueSlot Return) { |
575 | | // Only the lookup mechanism and first two arguments of the method |
576 | | // implementation vary between runtimes. We can get the receiver and |
577 | | // arguments in generic code. |
578 | |
|
579 | 0 | bool isDelegateInit = E->isDelegateInitCall(); |
580 | |
|
581 | 0 | const ObjCMethodDecl *method = E->getMethodDecl(); |
582 | | |
583 | | // If the method is -retain, and the receiver's being loaded from |
584 | | // a __weak variable, peephole the entire operation to objc_loadWeakRetained. |
585 | 0 | if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && |
586 | 0 | method->getMethodFamily() == OMF_retain) { |
587 | 0 | if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { |
588 | 0 | LValue lvalue = EmitLValue(lvalueExpr); |
589 | 0 | llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this)); |
590 | 0 | return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); |
591 | 0 | } |
592 | 0 | } |
593 | | |
594 | 0 | if (std::optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E)) |
595 | 0 | return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val)); |
596 | | |
597 | | // We don't retain the receiver in delegate init calls, and this is |
598 | | // safe because the receiver value is always loaded from 'self', |
599 | | // which we zero out. We don't want to Block_copy block receivers, |
600 | | // though. |
601 | 0 | bool retainSelf = |
602 | 0 | (!isDelegateInit && |
603 | 0 | CGM.getLangOpts().ObjCAutoRefCount && |
604 | 0 | method && |
605 | 0 | method->hasAttr<NSConsumesSelfAttr>()); |
606 | |
|
607 | 0 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
608 | 0 | bool isSuperMessage = false; |
609 | 0 | bool isClassMessage = false; |
610 | 0 | ObjCInterfaceDecl *OID = nullptr; |
611 | | // Find the receiver |
612 | 0 | QualType ReceiverType; |
613 | 0 | llvm::Value *Receiver = nullptr; |
614 | 0 | switch (E->getReceiverKind()) { |
615 | 0 | case ObjCMessageExpr::Instance: |
616 | 0 | ReceiverType = E->getInstanceReceiver()->getType(); |
617 | 0 | isClassMessage = ReceiverType->isObjCClassType(); |
618 | 0 | if (retainSelf) { |
619 | 0 | TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, |
620 | 0 | E->getInstanceReceiver()); |
621 | 0 | Receiver = ter.getPointer(); |
622 | 0 | if (ter.getInt()) retainSelf = false; |
623 | 0 | } else |
624 | 0 | Receiver = EmitScalarExpr(E->getInstanceReceiver()); |
625 | 0 | break; |
626 | | |
627 | 0 | case ObjCMessageExpr::Class: { |
628 | 0 | ReceiverType = E->getClassReceiver(); |
629 | 0 | OID = ReceiverType->castAs<ObjCObjectType>()->getInterface(); |
630 | 0 | assert(OID && "Invalid Objective-C class message send"); |
631 | 0 | Receiver = Runtime.GetClass(*this, OID); |
632 | 0 | isClassMessage = true; |
633 | 0 | break; |
634 | 0 | } |
635 | | |
636 | 0 | case ObjCMessageExpr::SuperInstance: |
637 | 0 | ReceiverType = E->getSuperType(); |
638 | 0 | Receiver = LoadObjCSelf(); |
639 | 0 | isSuperMessage = true; |
640 | 0 | break; |
641 | | |
642 | 0 | case ObjCMessageExpr::SuperClass: |
643 | 0 | ReceiverType = E->getSuperType(); |
644 | 0 | Receiver = LoadObjCSelf(); |
645 | 0 | isSuperMessage = true; |
646 | 0 | isClassMessage = true; |
647 | 0 | break; |
648 | 0 | } |
649 | | |
650 | 0 | if (retainSelf) |
651 | 0 | Receiver = EmitARCRetainNonBlock(Receiver); |
652 | | |
653 | | // In ARC, we sometimes want to "extend the lifetime" |
654 | | // (i.e. retain+autorelease) of receivers of returns-inner-pointer |
655 | | // messages. |
656 | 0 | if (getLangOpts().ObjCAutoRefCount && method && |
657 | 0 | method->hasAttr<ObjCReturnsInnerPointerAttr>() && |
658 | 0 | shouldExtendReceiverForInnerPointerMessage(E)) |
659 | 0 | Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); |
660 | |
|
661 | 0 | QualType ResultType = method ? method->getReturnType() : E->getType(); |
662 | |
|
663 | 0 | CallArgList Args; |
664 | 0 | EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); |
665 | | |
666 | | // For delegate init calls in ARC, do an unsafe store of null into |
667 | | // self. This represents the call taking direct ownership of that |
668 | | // value. We have to do this after emitting the other call |
669 | | // arguments because they might also reference self, but we don't |
670 | | // have to worry about any of them modifying self because that would |
671 | | // be an undefined read and write of an object in unordered |
672 | | // expressions. |
673 | 0 | if (isDelegateInit) { |
674 | 0 | assert(getLangOpts().ObjCAutoRefCount && |
675 | 0 | "delegate init calls should only be marked in ARC"); |
676 | | |
677 | | // Do an unsafe store of null into self. |
678 | 0 | Address selfAddr = |
679 | 0 | GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); |
680 | 0 | Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); |
681 | 0 | } |
682 | | |
683 | 0 | RValue result; |
684 | 0 | if (isSuperMessage) { |
685 | | // super is only valid in an Objective-C method |
686 | 0 | const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); |
687 | 0 | bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); |
688 | 0 | result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, |
689 | 0 | E->getSelector(), |
690 | 0 | OMD->getClassInterface(), |
691 | 0 | isCategoryImpl, |
692 | 0 | Receiver, |
693 | 0 | isClassMessage, |
694 | 0 | Args, |
695 | 0 | method); |
696 | 0 | } else { |
697 | | // Call runtime methods directly if we can. |
698 | 0 | result = Runtime.GeneratePossiblySpecializedMessageSend( |
699 | 0 | *this, Return, ResultType, E->getSelector(), Receiver, Args, OID, |
700 | 0 | method, isClassMessage); |
701 | 0 | } |
702 | | |
703 | | // For delegate init calls in ARC, implicitly store the result of |
704 | | // the call back into self. This takes ownership of the value. |
705 | 0 | if (isDelegateInit) { |
706 | 0 | Address selfAddr = |
707 | 0 | GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); |
708 | 0 | llvm::Value *newSelf = result.getScalarVal(); |
709 | | |
710 | | // The delegate return type isn't necessarily a matching type; in |
711 | | // fact, it's quite likely to be 'id'. |
712 | 0 | llvm::Type *selfTy = selfAddr.getElementType(); |
713 | 0 | newSelf = Builder.CreateBitCast(newSelf, selfTy); |
714 | |
|
715 | 0 | Builder.CreateStore(newSelf, selfAddr); |
716 | 0 | } |
717 | |
|
718 | 0 | return AdjustObjCObjectType(*this, E->getType(), result); |
719 | 0 | } |
720 | | |
721 | | namespace { |
722 | | struct FinishARCDealloc final : EHScopeStack::Cleanup { |
723 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
724 | 0 | const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); |
725 | |
|
726 | 0 | const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); |
727 | 0 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
728 | 0 | if (!iface->getSuperClass()) return; |
729 | | |
730 | 0 | bool isCategory = isa<ObjCCategoryImplDecl>(impl); |
731 | | |
732 | | // Call [super dealloc] if we have a superclass. |
733 | 0 | llvm::Value *self = CGF.LoadObjCSelf(); |
734 | |
|
735 | 0 | CallArgList args; |
736 | 0 | CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), |
737 | 0 | CGF.getContext().VoidTy, |
738 | 0 | method->getSelector(), |
739 | 0 | iface, |
740 | 0 | isCategory, |
741 | 0 | self, |
742 | 0 | /*is class msg*/ false, |
743 | 0 | args, |
744 | 0 | method); |
745 | 0 | } |
746 | | }; |
747 | | } |
748 | | |
749 | | /// StartObjCMethod - Begin emission of an ObjCMethod. This generates |
750 | | /// the LLVM function and sets the other context used by |
751 | | /// CodeGenFunction. |
752 | | void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, |
753 | 0 | const ObjCContainerDecl *CD) { |
754 | 0 | SourceLocation StartLoc = OMD->getBeginLoc(); |
755 | 0 | FunctionArgList args; |
756 | | // Check if we should generate debug info for this method. |
757 | 0 | if (OMD->hasAttr<NoDebugAttr>()) |
758 | 0 | DebugInfo = nullptr; // disable debug info indefinitely for this function |
759 | |
|
760 | 0 | llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); |
761 | |
|
762 | 0 | const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); |
763 | 0 | if (OMD->isDirectMethod()) { |
764 | 0 | Fn->setVisibility(llvm::Function::HiddenVisibility); |
765 | 0 | CGM.SetLLVMFunctionAttributes(OMD, FI, Fn, /*IsThunk=*/false); |
766 | 0 | CGM.SetLLVMFunctionAttributesForDefinition(OMD, Fn); |
767 | 0 | } else { |
768 | 0 | CGM.SetInternalFunctionAttributes(OMD, Fn, FI); |
769 | 0 | } |
770 | |
|
771 | 0 | args.push_back(OMD->getSelfDecl()); |
772 | 0 | if (!OMD->isDirectMethod()) |
773 | 0 | args.push_back(OMD->getCmdDecl()); |
774 | |
|
775 | 0 | args.append(OMD->param_begin(), OMD->param_end()); |
776 | |
|
777 | 0 | CurGD = OMD; |
778 | 0 | CurEHLocation = OMD->getEndLoc(); |
779 | |
|
780 | 0 | StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, |
781 | 0 | OMD->getLocation(), StartLoc); |
782 | |
|
783 | 0 | if (OMD->isDirectMethod()) { |
784 | | // This function is a direct call, it has to implement a nil check |
785 | | // on entry. |
786 | | // |
787 | | // TODO: possibly have several entry points to elide the check |
788 | 0 | CGM.getObjCRuntime().GenerateDirectMethodPrologue(*this, Fn, OMD, CD); |
789 | 0 | } |
790 | | |
791 | | // In ARC, certain methods get an extra cleanup. |
792 | 0 | if (CGM.getLangOpts().ObjCAutoRefCount && |
793 | 0 | OMD->isInstanceMethod() && |
794 | 0 | OMD->getSelector().isUnarySelector()) { |
795 | 0 | const IdentifierInfo *ident = |
796 | 0 | OMD->getSelector().getIdentifierInfoForSlot(0); |
797 | 0 | if (ident->isStr("dealloc")) |
798 | 0 | EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); |
799 | 0 | } |
800 | 0 | } |
801 | | |
802 | | static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
803 | | LValue lvalue, QualType type); |
804 | | |
805 | | /// Generate an Objective-C method. An Objective-C method is a C function with |
806 | | /// its pointer, name, and types registered in the class structure. |
807 | 0 | void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { |
808 | 0 | StartObjCMethod(OMD, OMD->getClassInterface()); |
809 | 0 | PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); |
810 | 0 | assert(isa<CompoundStmt>(OMD->getBody())); |
811 | 0 | incrementProfileCounter(OMD->getBody()); |
812 | 0 | EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); |
813 | 0 | FinishFunction(OMD->getBodyRBrace()); |
814 | 0 | } |
815 | | |
816 | | /// emitStructGetterCall - Call the runtime function to load a property |
817 | | /// into the return value slot. |
818 | | static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, |
819 | 0 | bool isAtomic, bool hasStrong) { |
820 | 0 | ASTContext &Context = CGF.getContext(); |
821 | |
|
822 | 0 | llvm::Value *src = |
823 | 0 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
824 | 0 | .getPointer(CGF); |
825 | | |
826 | | // objc_copyStruct (ReturnValue, &structIvar, |
827 | | // sizeof (Type of Ivar), isAtomic, false); |
828 | 0 | CallArgList args; |
829 | |
|
830 | 0 | llvm::Value *dest = CGF.ReturnValue.getPointer(); |
831 | 0 | args.add(RValue::get(dest), Context.VoidPtrTy); |
832 | 0 | args.add(RValue::get(src), Context.VoidPtrTy); |
833 | |
|
834 | 0 | CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); |
835 | 0 | args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); |
836 | 0 | args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); |
837 | 0 | args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); |
838 | |
|
839 | 0 | llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); |
840 | 0 | CGCallee callee = CGCallee::forDirect(fn); |
841 | 0 | CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), |
842 | 0 | callee, ReturnValueSlot(), args); |
843 | 0 | } |
844 | | |
845 | | /// Determine whether the given architecture supports unaligned atomic |
846 | | /// accesses. They don't have to be fast, just faster than a function |
847 | | /// call and a mutex. |
848 | 0 | static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { |
849 | | // FIXME: Allow unaligned atomic load/store on x86. (It is not |
850 | | // currently supported by the backend.) |
851 | 0 | return false; |
852 | 0 | } |
853 | | |
854 | | /// Return the maximum size that permits atomic accesses for the given |
855 | | /// architecture. |
856 | | static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, |
857 | 0 | llvm::Triple::ArchType arch) { |
858 | | // ARM has 8-byte atomic accesses, but it's not clear whether we |
859 | | // want to rely on them here. |
860 | | |
861 | | // In the default case, just assume that any size up to a pointer is |
862 | | // fine given adequate alignment. |
863 | 0 | return CharUnits::fromQuantity(CGM.PointerSizeInBytes); |
864 | 0 | } |
865 | | |
866 | | namespace { |
867 | | class PropertyImplStrategy { |
868 | | public: |
869 | | enum StrategyKind { |
870 | | /// The 'native' strategy is to use the architecture's provided |
871 | | /// reads and writes. |
872 | | Native, |
873 | | |
874 | | /// Use objc_setProperty and objc_getProperty. |
875 | | GetSetProperty, |
876 | | |
877 | | /// Use objc_setProperty for the setter, but use expression |
878 | | /// evaluation for the getter. |
879 | | SetPropertyAndExpressionGet, |
880 | | |
881 | | /// Use objc_copyStruct. |
882 | | CopyStruct, |
883 | | |
884 | | /// The 'expression' strategy is to emit normal assignment or |
885 | | /// lvalue-to-rvalue expressions. |
886 | | Expression |
887 | | }; |
888 | | |
889 | 0 | StrategyKind getKind() const { return StrategyKind(Kind); } |
890 | | |
891 | 0 | bool hasStrongMember() const { return HasStrong; } |
892 | 0 | bool isAtomic() const { return IsAtomic; } |
893 | 0 | bool isCopy() const { return IsCopy; } |
894 | | |
895 | 0 | CharUnits getIvarSize() const { return IvarSize; } |
896 | 0 | CharUnits getIvarAlignment() const { return IvarAlignment; } |
897 | | |
898 | | PropertyImplStrategy(CodeGenModule &CGM, |
899 | | const ObjCPropertyImplDecl *propImpl); |
900 | | |
901 | | private: |
902 | | unsigned Kind : 8; |
903 | | unsigned IsAtomic : 1; |
904 | | unsigned IsCopy : 1; |
905 | | unsigned HasStrong : 1; |
906 | | |
907 | | CharUnits IvarSize; |
908 | | CharUnits IvarAlignment; |
909 | | }; |
910 | | } |
911 | | |
912 | | /// Pick an implementation strategy for the given property synthesis. |
913 | | PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, |
914 | 0 | const ObjCPropertyImplDecl *propImpl) { |
915 | 0 | const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); |
916 | 0 | ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); |
917 | |
|
918 | 0 | IsCopy = (setterKind == ObjCPropertyDecl::Copy); |
919 | 0 | IsAtomic = prop->isAtomic(); |
920 | 0 | HasStrong = false; // doesn't matter here. |
921 | | |
922 | | // Evaluate the ivar's size and alignment. |
923 | 0 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
924 | 0 | QualType ivarType = ivar->getType(); |
925 | 0 | auto TInfo = CGM.getContext().getTypeInfoInChars(ivarType); |
926 | 0 | IvarSize = TInfo.Width; |
927 | 0 | IvarAlignment = TInfo.Align; |
928 | | |
929 | | // If we have a copy property, we always have to use setProperty. |
930 | | // If the property is atomic we need to use getProperty, but in |
931 | | // the nonatomic case we can just use expression. |
932 | 0 | if (IsCopy) { |
933 | 0 | Kind = IsAtomic ? GetSetProperty : SetPropertyAndExpressionGet; |
934 | 0 | return; |
935 | 0 | } |
936 | | |
937 | | // Handle retain. |
938 | 0 | if (setterKind == ObjCPropertyDecl::Retain) { |
939 | | // In GC-only, there's nothing special that needs to be done. |
940 | 0 | if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { |
941 | | // fallthrough |
942 | | |
943 | | // In ARC, if the property is non-atomic, use expression emission, |
944 | | // which translates to objc_storeStrong. This isn't required, but |
945 | | // it's slightly nicer. |
946 | 0 | } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { |
947 | | // Using standard expression emission for the setter is only |
948 | | // acceptable if the ivar is __strong, which won't be true if |
949 | | // the property is annotated with __attribute__((NSObject)). |
950 | | // TODO: falling all the way back to objc_setProperty here is |
951 | | // just laziness, though; we could still use objc_storeStrong |
952 | | // if we hacked it right. |
953 | 0 | if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) |
954 | 0 | Kind = Expression; |
955 | 0 | else |
956 | 0 | Kind = SetPropertyAndExpressionGet; |
957 | 0 | return; |
958 | | |
959 | | // Otherwise, we need to at least use setProperty. However, if |
960 | | // the property isn't atomic, we can use normal expression |
961 | | // emission for the getter. |
962 | 0 | } else if (!IsAtomic) { |
963 | 0 | Kind = SetPropertyAndExpressionGet; |
964 | 0 | return; |
965 | | |
966 | | // Otherwise, we have to use both setProperty and getProperty. |
967 | 0 | } else { |
968 | 0 | Kind = GetSetProperty; |
969 | 0 | return; |
970 | 0 | } |
971 | 0 | } |
972 | | |
973 | | // If we're not atomic, just use expression accesses. |
974 | 0 | if (!IsAtomic) { |
975 | 0 | Kind = Expression; |
976 | 0 | return; |
977 | 0 | } |
978 | | |
979 | | // Properties on bitfield ivars need to be emitted using expression |
980 | | // accesses even if they're nominally atomic. |
981 | 0 | if (ivar->isBitField()) { |
982 | 0 | Kind = Expression; |
983 | 0 | return; |
984 | 0 | } |
985 | | |
986 | | // GC-qualified or ARC-qualified ivars need to be emitted as |
987 | | // expressions. This actually works out to being atomic anyway, |
988 | | // except for ARC __strong, but that should trigger the above code. |
989 | 0 | if (ivarType.hasNonTrivialObjCLifetime() || |
990 | 0 | (CGM.getLangOpts().getGC() && |
991 | 0 | CGM.getContext().getObjCGCAttrKind(ivarType))) { |
992 | 0 | Kind = Expression; |
993 | 0 | return; |
994 | 0 | } |
995 | | |
996 | | // Compute whether the ivar has strong members. |
997 | 0 | if (CGM.getLangOpts().getGC()) |
998 | 0 | if (const RecordType *recordType = ivarType->getAs<RecordType>()) |
999 | 0 | HasStrong = recordType->getDecl()->hasObjectMember(); |
1000 | | |
1001 | | // We can never access structs with object members with a native |
1002 | | // access, because we need to use write barriers. This is what |
1003 | | // objc_copyStruct is for. |
1004 | 0 | if (HasStrong) { |
1005 | 0 | Kind = CopyStruct; |
1006 | 0 | return; |
1007 | 0 | } |
1008 | | |
1009 | | // Otherwise, this is target-dependent and based on the size and |
1010 | | // alignment of the ivar. |
1011 | | |
1012 | | // If the size of the ivar is not a power of two, give up. We don't |
1013 | | // want to get into the business of doing compare-and-swaps. |
1014 | 0 | if (!IvarSize.isPowerOfTwo()) { |
1015 | 0 | Kind = CopyStruct; |
1016 | 0 | return; |
1017 | 0 | } |
1018 | | |
1019 | 0 | llvm::Triple::ArchType arch = |
1020 | 0 | CGM.getTarget().getTriple().getArch(); |
1021 | | |
1022 | | // Most architectures require memory to fit within a single cache |
1023 | | // line, so the alignment has to be at least the size of the access. |
1024 | | // Otherwise we have to grab a lock. |
1025 | 0 | if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { |
1026 | 0 | Kind = CopyStruct; |
1027 | 0 | return; |
1028 | 0 | } |
1029 | | |
1030 | | // If the ivar's size exceeds the architecture's maximum atomic |
1031 | | // access size, we have to use CopyStruct. |
1032 | 0 | if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { |
1033 | 0 | Kind = CopyStruct; |
1034 | 0 | return; |
1035 | 0 | } |
1036 | | |
1037 | | // Otherwise, we can use native loads and stores. |
1038 | 0 | Kind = Native; |
1039 | 0 | } |
1040 | | |
1041 | | /// Generate an Objective-C property getter function. |
1042 | | /// |
1043 | | /// The given Decl must be an ObjCImplementationDecl. \@synthesize |
1044 | | /// is illegal within a category. |
1045 | | void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, |
1046 | 0 | const ObjCPropertyImplDecl *PID) { |
1047 | 0 | llvm::Constant *AtomicHelperFn = |
1048 | 0 | CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); |
1049 | 0 | ObjCMethodDecl *OMD = PID->getGetterMethodDecl(); |
1050 | 0 | assert(OMD && "Invalid call to generate getter (empty method)"); |
1051 | 0 | StartObjCMethod(OMD, IMP->getClassInterface()); |
1052 | |
|
1053 | 0 | generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); |
1054 | |
|
1055 | 0 | FinishFunction(OMD->getEndLoc()); |
1056 | 0 | } |
1057 | | |
1058 | 0 | static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { |
1059 | 0 | const Expr *getter = propImpl->getGetterCXXConstructor(); |
1060 | 0 | if (!getter) return true; |
1061 | | |
1062 | | // Sema only makes only of these when the ivar has a C++ class type, |
1063 | | // so the form is pretty constrained. |
1064 | | |
1065 | | // If the property has a reference type, we might just be binding a |
1066 | | // reference, in which case the result will be a gl-value. We should |
1067 | | // treat this as a non-trivial operation. |
1068 | 0 | if (getter->isGLValue()) |
1069 | 0 | return false; |
1070 | | |
1071 | | // If we selected a trivial copy-constructor, we're okay. |
1072 | 0 | if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) |
1073 | 0 | return (construct->getConstructor()->isTrivial()); |
1074 | | |
1075 | | // The constructor might require cleanups (in which case it's never |
1076 | | // trivial). |
1077 | 0 | assert(isa<ExprWithCleanups>(getter)); |
1078 | 0 | return false; |
1079 | 0 | } |
1080 | | |
1081 | | /// emitCPPObjectAtomicGetterCall - Call the runtime function to |
1082 | | /// copy the ivar into the resturn slot. |
1083 | | static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, |
1084 | | llvm::Value *returnAddr, |
1085 | | ObjCIvarDecl *ivar, |
1086 | 0 | llvm::Constant *AtomicHelperFn) { |
1087 | | // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, |
1088 | | // AtomicHelperFn); |
1089 | 0 | CallArgList args; |
1090 | | |
1091 | | // The 1st argument is the return Slot. |
1092 | 0 | args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); |
1093 | | |
1094 | | // The 2nd argument is the address of the ivar. |
1095 | 0 | llvm::Value *ivarAddr = |
1096 | 0 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
1097 | 0 | .getPointer(CGF); |
1098 | 0 | args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); |
1099 | | |
1100 | | // Third argument is the helper function. |
1101 | 0 | args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); |
1102 | |
|
1103 | 0 | llvm::FunctionCallee copyCppAtomicObjectFn = |
1104 | 0 | CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); |
1105 | 0 | CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); |
1106 | 0 | CGF.EmitCall( |
1107 | 0 | CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), |
1108 | 0 | callee, ReturnValueSlot(), args); |
1109 | 0 | } |
1110 | | |
1111 | | // emitCmdValueForGetterSetterBody - Handle emitting the load necessary for |
1112 | | // the `_cmd` selector argument for getter/setter bodies. For direct methods, |
1113 | | // this returns an undefined/poison value; this matches behavior prior to `_cmd` |
1114 | | // being removed from the direct method ABI as the getter/setter caller would |
1115 | | // never load one. For non-direct methods, this emits a load of the implicit |
1116 | | // `_cmd` storage. |
1117 | | static llvm::Value *emitCmdValueForGetterSetterBody(CodeGenFunction &CGF, |
1118 | 0 | ObjCMethodDecl *MD) { |
1119 | 0 | if (MD->isDirectMethod()) { |
1120 | | // Direct methods do not have a `_cmd` argument. Emit an undefined/poison |
1121 | | // value. This will be passed to objc_getProperty/objc_setProperty, which |
1122 | | // has not appeared bothered by the `_cmd` argument being undefined before. |
1123 | 0 | llvm::Type *selType = CGF.ConvertType(CGF.getContext().getObjCSelType()); |
1124 | 0 | return llvm::PoisonValue::get(selType); |
1125 | 0 | } |
1126 | | |
1127 | 0 | return CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(MD->getCmdDecl()), "cmd"); |
1128 | 0 | } |
1129 | | |
1130 | | void |
1131 | | CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, |
1132 | | const ObjCPropertyImplDecl *propImpl, |
1133 | | const ObjCMethodDecl *GetterMethodDecl, |
1134 | 0 | llvm::Constant *AtomicHelperFn) { |
1135 | |
|
1136 | 0 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1137 | |
|
1138 | 0 | if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
1139 | 0 | if (!AtomicHelperFn) { |
1140 | 0 | LValue Src = |
1141 | 0 | EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); |
1142 | 0 | LValue Dst = MakeAddrLValue(ReturnValue, ivar->getType()); |
1143 | 0 | callCStructCopyConstructor(Dst, Src); |
1144 | 0 | } else { |
1145 | 0 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1146 | 0 | emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), ivar, |
1147 | 0 | AtomicHelperFn); |
1148 | 0 | } |
1149 | 0 | return; |
1150 | 0 | } |
1151 | | |
1152 | | // If there's a non-trivial 'get' expression, we just have to emit that. |
1153 | 0 | if (!hasTrivialGetExpr(propImpl)) { |
1154 | 0 | if (!AtomicHelperFn) { |
1155 | 0 | auto *ret = ReturnStmt::Create(getContext(), SourceLocation(), |
1156 | 0 | propImpl->getGetterCXXConstructor(), |
1157 | 0 | /* NRVOCandidate=*/nullptr); |
1158 | 0 | EmitReturnStmt(*ret); |
1159 | 0 | } |
1160 | 0 | else { |
1161 | 0 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1162 | 0 | emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), |
1163 | 0 | ivar, AtomicHelperFn); |
1164 | 0 | } |
1165 | 0 | return; |
1166 | 0 | } |
1167 | | |
1168 | 0 | const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); |
1169 | 0 | QualType propType = prop->getType(); |
1170 | 0 | ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl(); |
1171 | | |
1172 | | // Pick an implementation strategy. |
1173 | 0 | PropertyImplStrategy strategy(CGM, propImpl); |
1174 | 0 | switch (strategy.getKind()) { |
1175 | 0 | case PropertyImplStrategy::Native: { |
1176 | | // We don't need to do anything for a zero-size struct. |
1177 | 0 | if (strategy.getIvarSize().isZero()) |
1178 | 0 | return; |
1179 | | |
1180 | 0 | LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); |
1181 | | |
1182 | | // Currently, all atomic accesses have to be through integer |
1183 | | // types, so there's no point in trying to pick a prettier type. |
1184 | 0 | uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); |
1185 | 0 | llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); |
1186 | | |
1187 | | // Perform an atomic load. This does not impose ordering constraints. |
1188 | 0 | Address ivarAddr = LV.getAddress(*this); |
1189 | 0 | ivarAddr = ivarAddr.withElementType(bitcastType); |
1190 | 0 | llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); |
1191 | 0 | load->setAtomic(llvm::AtomicOrdering::Unordered); |
1192 | | |
1193 | | // Store that value into the return address. Doing this with a |
1194 | | // bitcast is likely to produce some pretty ugly IR, but it's not |
1195 | | // the *most* terrible thing in the world. |
1196 | 0 | llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); |
1197 | 0 | uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); |
1198 | 0 | llvm::Value *ivarVal = load; |
1199 | 0 | if (ivarSize > retTySize) { |
1200 | 0 | bitcastType = llvm::Type::getIntNTy(getLLVMContext(), retTySize); |
1201 | 0 | ivarVal = Builder.CreateTrunc(load, bitcastType); |
1202 | 0 | } |
1203 | 0 | Builder.CreateStore(ivarVal, ReturnValue.withElementType(bitcastType)); |
1204 | | |
1205 | | // Make sure we don't do an autorelease. |
1206 | 0 | AutoreleaseResult = false; |
1207 | 0 | return; |
1208 | 0 | } |
1209 | | |
1210 | 0 | case PropertyImplStrategy::GetSetProperty: { |
1211 | 0 | llvm::FunctionCallee getPropertyFn = |
1212 | 0 | CGM.getObjCRuntime().GetPropertyGetFunction(); |
1213 | 0 | if (!getPropertyFn) { |
1214 | 0 | CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); |
1215 | 0 | return; |
1216 | 0 | } |
1217 | 0 | CGCallee callee = CGCallee::forDirect(getPropertyFn); |
1218 | | |
1219 | | // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). |
1220 | | // FIXME: Can't this be simpler? This might even be worse than the |
1221 | | // corresponding gcc code. |
1222 | 0 | llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, getterMethod); |
1223 | 0 | llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); |
1224 | 0 | llvm::Value *ivarOffset = |
1225 | 0 | EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar); |
1226 | |
|
1227 | 0 | CallArgList args; |
1228 | 0 | args.add(RValue::get(self), getContext().getObjCIdType()); |
1229 | 0 | args.add(RValue::get(cmd), getContext().getObjCSelType()); |
1230 | 0 | args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); |
1231 | 0 | args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), |
1232 | 0 | getContext().BoolTy); |
1233 | | |
1234 | | // FIXME: We shouldn't need to get the function info here, the |
1235 | | // runtime already should have computed it to build the function. |
1236 | 0 | llvm::CallBase *CallInstruction; |
1237 | 0 | RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall( |
1238 | 0 | getContext().getObjCIdType(), args), |
1239 | 0 | callee, ReturnValueSlot(), args, &CallInstruction); |
1240 | 0 | if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) |
1241 | 0 | call->setTailCall(); |
1242 | | |
1243 | | // We need to fix the type here. Ivars with copy & retain are |
1244 | | // always objects so we don't need to worry about complex or |
1245 | | // aggregates. |
1246 | 0 | RV = RValue::get(Builder.CreateBitCast( |
1247 | 0 | RV.getScalarVal(), |
1248 | 0 | getTypes().ConvertType(getterMethod->getReturnType()))); |
1249 | |
|
1250 | 0 | EmitReturnOfRValue(RV, propType); |
1251 | | |
1252 | | // objc_getProperty does an autorelease, so we should suppress ours. |
1253 | 0 | AutoreleaseResult = false; |
1254 | |
|
1255 | 0 | return; |
1256 | 0 | } |
1257 | | |
1258 | 0 | case PropertyImplStrategy::CopyStruct: |
1259 | 0 | emitStructGetterCall(*this, ivar, strategy.isAtomic(), |
1260 | 0 | strategy.hasStrongMember()); |
1261 | 0 | return; |
1262 | | |
1263 | 0 | case PropertyImplStrategy::Expression: |
1264 | 0 | case PropertyImplStrategy::SetPropertyAndExpressionGet: { |
1265 | 0 | LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); |
1266 | |
|
1267 | 0 | QualType ivarType = ivar->getType(); |
1268 | 0 | switch (getEvaluationKind(ivarType)) { |
1269 | 0 | case TEK_Complex: { |
1270 | 0 | ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); |
1271 | 0 | EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), |
1272 | 0 | /*init*/ true); |
1273 | 0 | return; |
1274 | 0 | } |
1275 | 0 | case TEK_Aggregate: { |
1276 | | // The return value slot is guaranteed to not be aliased, but |
1277 | | // that's not necessarily the same as "on the stack", so |
1278 | | // we still potentially need objc_memmove_collectable. |
1279 | 0 | EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType), |
1280 | 0 | /* Src= */ LV, ivarType, getOverlapForReturnValue()); |
1281 | 0 | return; |
1282 | 0 | } |
1283 | 0 | case TEK_Scalar: { |
1284 | 0 | llvm::Value *value; |
1285 | 0 | if (propType->isReferenceType()) { |
1286 | 0 | value = LV.getAddress(*this).getPointer(); |
1287 | 0 | } else { |
1288 | | // We want to load and autoreleaseReturnValue ARC __weak ivars. |
1289 | 0 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
1290 | 0 | if (getLangOpts().ObjCAutoRefCount) { |
1291 | 0 | value = emitARCRetainLoadOfScalar(*this, LV, ivarType); |
1292 | 0 | } else { |
1293 | 0 | value = EmitARCLoadWeak(LV.getAddress(*this)); |
1294 | 0 | } |
1295 | | |
1296 | | // Otherwise we want to do a simple load, suppressing the |
1297 | | // final autorelease. |
1298 | 0 | } else { |
1299 | 0 | value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); |
1300 | 0 | AutoreleaseResult = false; |
1301 | 0 | } |
1302 | |
|
1303 | 0 | value = Builder.CreateBitCast( |
1304 | 0 | value, ConvertType(GetterMethodDecl->getReturnType())); |
1305 | 0 | } |
1306 | |
|
1307 | 0 | EmitReturnOfRValue(RValue::get(value), propType); |
1308 | 0 | return; |
1309 | 0 | } |
1310 | 0 | } |
1311 | 0 | llvm_unreachable("bad evaluation kind"); |
1312 | 0 | } |
1313 | |
|
1314 | 0 | } |
1315 | 0 | llvm_unreachable("bad @property implementation strategy!"); |
1316 | 0 | } |
1317 | | |
1318 | | /// emitStructSetterCall - Call the runtime function to store the value |
1319 | | /// from the first formal parameter into the given ivar. |
1320 | | static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, |
1321 | 0 | ObjCIvarDecl *ivar) { |
1322 | | // objc_copyStruct (&structIvar, &Arg, |
1323 | | // sizeof (struct something), true, false); |
1324 | 0 | CallArgList args; |
1325 | | |
1326 | | // The first argument is the address of the ivar. |
1327 | 0 | llvm::Value *ivarAddr = |
1328 | 0 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
1329 | 0 | .getPointer(CGF); |
1330 | 0 | ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); |
1331 | 0 | args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); |
1332 | | |
1333 | | // The second argument is the address of the parameter variable. |
1334 | 0 | ParmVarDecl *argVar = *OMD->param_begin(); |
1335 | 0 | DeclRefExpr argRef(CGF.getContext(), argVar, false, |
1336 | 0 | argVar->getType().getNonReferenceType(), VK_LValue, |
1337 | 0 | SourceLocation()); |
1338 | 0 | llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); |
1339 | 0 | args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); |
1340 | | |
1341 | | // The third argument is the sizeof the type. |
1342 | 0 | llvm::Value *size = |
1343 | 0 | CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); |
1344 | 0 | args.add(RValue::get(size), CGF.getContext().getSizeType()); |
1345 | | |
1346 | | // The fourth argument is the 'isAtomic' flag. |
1347 | 0 | args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); |
1348 | | |
1349 | | // The fifth argument is the 'hasStrong' flag. |
1350 | | // FIXME: should this really always be false? |
1351 | 0 | args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); |
1352 | |
|
1353 | 0 | llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); |
1354 | 0 | CGCallee callee = CGCallee::forDirect(fn); |
1355 | 0 | CGF.EmitCall( |
1356 | 0 | CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), |
1357 | 0 | callee, ReturnValueSlot(), args); |
1358 | 0 | } |
1359 | | |
1360 | | /// emitCPPObjectAtomicSetterCall - Call the runtime function to store |
1361 | | /// the value from the first formal parameter into the given ivar, using |
1362 | | /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. |
1363 | | static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, |
1364 | | ObjCMethodDecl *OMD, |
1365 | | ObjCIvarDecl *ivar, |
1366 | 0 | llvm::Constant *AtomicHelperFn) { |
1367 | | // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, |
1368 | | // AtomicHelperFn); |
1369 | 0 | CallArgList args; |
1370 | | |
1371 | | // The first argument is the address of the ivar. |
1372 | 0 | llvm::Value *ivarAddr = |
1373 | 0 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
1374 | 0 | .getPointer(CGF); |
1375 | 0 | args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); |
1376 | | |
1377 | | // The second argument is the address of the parameter variable. |
1378 | 0 | ParmVarDecl *argVar = *OMD->param_begin(); |
1379 | 0 | DeclRefExpr argRef(CGF.getContext(), argVar, false, |
1380 | 0 | argVar->getType().getNonReferenceType(), VK_LValue, |
1381 | 0 | SourceLocation()); |
1382 | 0 | llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); |
1383 | 0 | args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); |
1384 | | |
1385 | | // Third argument is the helper function. |
1386 | 0 | args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); |
1387 | |
|
1388 | 0 | llvm::FunctionCallee fn = |
1389 | 0 | CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); |
1390 | 0 | CGCallee callee = CGCallee::forDirect(fn); |
1391 | 0 | CGF.EmitCall( |
1392 | 0 | CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), |
1393 | 0 | callee, ReturnValueSlot(), args); |
1394 | 0 | } |
1395 | | |
1396 | | |
1397 | 0 | static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { |
1398 | 0 | Expr *setter = PID->getSetterCXXAssignment(); |
1399 | 0 | if (!setter) return true; |
1400 | | |
1401 | | // Sema only makes only of these when the ivar has a C++ class type, |
1402 | | // so the form is pretty constrained. |
1403 | | |
1404 | | // An operator call is trivial if the function it calls is trivial. |
1405 | | // This also implies that there's nothing non-trivial going on with |
1406 | | // the arguments, because operator= can only be trivial if it's a |
1407 | | // synthesized assignment operator and therefore both parameters are |
1408 | | // references. |
1409 | 0 | if (CallExpr *call = dyn_cast<CallExpr>(setter)) { |
1410 | 0 | if (const FunctionDecl *callee |
1411 | 0 | = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) |
1412 | 0 | if (callee->isTrivial()) |
1413 | 0 | return true; |
1414 | 0 | return false; |
1415 | 0 | } |
1416 | | |
1417 | 0 | assert(isa<ExprWithCleanups>(setter)); |
1418 | 0 | return false; |
1419 | 0 | } |
1420 | | |
1421 | 0 | static bool UseOptimizedSetter(CodeGenModule &CGM) { |
1422 | 0 | if (CGM.getLangOpts().getGC() != LangOptions::NonGC) |
1423 | 0 | return false; |
1424 | 0 | return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); |
1425 | 0 | } |
1426 | | |
1427 | | void |
1428 | | CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, |
1429 | | const ObjCPropertyImplDecl *propImpl, |
1430 | 0 | llvm::Constant *AtomicHelperFn) { |
1431 | 0 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1432 | 0 | ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl(); |
1433 | |
|
1434 | 0 | if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
1435 | 0 | ParmVarDecl *PVD = *setterMethod->param_begin(); |
1436 | 0 | if (!AtomicHelperFn) { |
1437 | | // Call the move assignment operator instead of calling the copy |
1438 | | // assignment operator and destructor. |
1439 | 0 | LValue Dst = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, |
1440 | 0 | /*quals*/ 0); |
1441 | 0 | LValue Src = MakeAddrLValue(GetAddrOfLocalVar(PVD), ivar->getType()); |
1442 | 0 | callCStructMoveAssignmentOperator(Dst, Src); |
1443 | 0 | } else { |
1444 | | // If atomic, assignment is called via a locking api. |
1445 | 0 | emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, AtomicHelperFn); |
1446 | 0 | } |
1447 | | // Decativate the destructor for the setter parameter. |
1448 | 0 | DeactivateCleanupBlock(CalleeDestructedParamCleanups[PVD], AllocaInsertPt); |
1449 | 0 | return; |
1450 | 0 | } |
1451 | | |
1452 | | // Just use the setter expression if Sema gave us one and it's |
1453 | | // non-trivial. |
1454 | 0 | if (!hasTrivialSetExpr(propImpl)) { |
1455 | 0 | if (!AtomicHelperFn) |
1456 | | // If non-atomic, assignment is called directly. |
1457 | 0 | EmitStmt(propImpl->getSetterCXXAssignment()); |
1458 | 0 | else |
1459 | | // If atomic, assignment is called via a locking api. |
1460 | 0 | emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, |
1461 | 0 | AtomicHelperFn); |
1462 | 0 | return; |
1463 | 0 | } |
1464 | | |
1465 | 0 | PropertyImplStrategy strategy(CGM, propImpl); |
1466 | 0 | switch (strategy.getKind()) { |
1467 | 0 | case PropertyImplStrategy::Native: { |
1468 | | // We don't need to do anything for a zero-size struct. |
1469 | 0 | if (strategy.getIvarSize().isZero()) |
1470 | 0 | return; |
1471 | | |
1472 | 0 | Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); |
1473 | |
|
1474 | 0 | LValue ivarLValue = |
1475 | 0 | EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); |
1476 | 0 | Address ivarAddr = ivarLValue.getAddress(*this); |
1477 | | |
1478 | | // Currently, all atomic accesses have to be through integer |
1479 | | // types, so there's no point in trying to pick a prettier type. |
1480 | 0 | llvm::Type *castType = llvm::Type::getIntNTy( |
1481 | 0 | getLLVMContext(), getContext().toBits(strategy.getIvarSize())); |
1482 | | |
1483 | | // Cast both arguments to the chosen operation type. |
1484 | 0 | argAddr = argAddr.withElementType(castType); |
1485 | 0 | ivarAddr = ivarAddr.withElementType(castType); |
1486 | |
|
1487 | 0 | llvm::Value *load = Builder.CreateLoad(argAddr); |
1488 | | |
1489 | | // Perform an atomic store. There are no memory ordering requirements. |
1490 | 0 | llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); |
1491 | 0 | store->setAtomic(llvm::AtomicOrdering::Unordered); |
1492 | 0 | return; |
1493 | 0 | } |
1494 | | |
1495 | 0 | case PropertyImplStrategy::GetSetProperty: |
1496 | 0 | case PropertyImplStrategy::SetPropertyAndExpressionGet: { |
1497 | |
|
1498 | 0 | llvm::FunctionCallee setOptimizedPropertyFn = nullptr; |
1499 | 0 | llvm::FunctionCallee setPropertyFn = nullptr; |
1500 | 0 | if (UseOptimizedSetter(CGM)) { |
1501 | | // 10.8 and iOS 6.0 code and GC is off |
1502 | 0 | setOptimizedPropertyFn = |
1503 | 0 | CGM.getObjCRuntime().GetOptimizedPropertySetFunction( |
1504 | 0 | strategy.isAtomic(), strategy.isCopy()); |
1505 | 0 | if (!setOptimizedPropertyFn) { |
1506 | 0 | CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); |
1507 | 0 | return; |
1508 | 0 | } |
1509 | 0 | } |
1510 | 0 | else { |
1511 | 0 | setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); |
1512 | 0 | if (!setPropertyFn) { |
1513 | 0 | CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); |
1514 | 0 | return; |
1515 | 0 | } |
1516 | 0 | } |
1517 | | |
1518 | | // Emit objc_setProperty((id) self, _cmd, offset, arg, |
1519 | | // <is-atomic>, <is-copy>). |
1520 | 0 | llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, setterMethod); |
1521 | 0 | llvm::Value *self = |
1522 | 0 | Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); |
1523 | 0 | llvm::Value *ivarOffset = |
1524 | 0 | EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar); |
1525 | 0 | Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); |
1526 | 0 | llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); |
1527 | 0 | arg = Builder.CreateBitCast(arg, VoidPtrTy); |
1528 | |
|
1529 | 0 | CallArgList args; |
1530 | 0 | args.add(RValue::get(self), getContext().getObjCIdType()); |
1531 | 0 | args.add(RValue::get(cmd), getContext().getObjCSelType()); |
1532 | 0 | if (setOptimizedPropertyFn) { |
1533 | 0 | args.add(RValue::get(arg), getContext().getObjCIdType()); |
1534 | 0 | args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); |
1535 | 0 | CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); |
1536 | 0 | EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), |
1537 | 0 | callee, ReturnValueSlot(), args); |
1538 | 0 | } else { |
1539 | 0 | args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); |
1540 | 0 | args.add(RValue::get(arg), getContext().getObjCIdType()); |
1541 | 0 | args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), |
1542 | 0 | getContext().BoolTy); |
1543 | 0 | args.add(RValue::get(Builder.getInt1(strategy.isCopy())), |
1544 | 0 | getContext().BoolTy); |
1545 | | // FIXME: We shouldn't need to get the function info here, the runtime |
1546 | | // already should have computed it to build the function. |
1547 | 0 | CGCallee callee = CGCallee::forDirect(setPropertyFn); |
1548 | 0 | EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), |
1549 | 0 | callee, ReturnValueSlot(), args); |
1550 | 0 | } |
1551 | |
|
1552 | 0 | return; |
1553 | 0 | } |
1554 | | |
1555 | 0 | case PropertyImplStrategy::CopyStruct: |
1556 | 0 | emitStructSetterCall(*this, setterMethod, ivar); |
1557 | 0 | return; |
1558 | | |
1559 | 0 | case PropertyImplStrategy::Expression: |
1560 | 0 | break; |
1561 | 0 | } |
1562 | | |
1563 | | // Otherwise, fake up some ASTs and emit a normal assignment. |
1564 | 0 | ValueDecl *selfDecl = setterMethod->getSelfDecl(); |
1565 | 0 | DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(), |
1566 | 0 | VK_LValue, SourceLocation()); |
1567 | 0 | ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(), |
1568 | 0 | CK_LValueToRValue, &self, VK_PRValue, |
1569 | 0 | FPOptionsOverride()); |
1570 | 0 | ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), |
1571 | 0 | SourceLocation(), SourceLocation(), |
1572 | 0 | &selfLoad, true, true); |
1573 | |
|
1574 | 0 | ParmVarDecl *argDecl = *setterMethod->param_begin(); |
1575 | 0 | QualType argType = argDecl->getType().getNonReferenceType(); |
1576 | 0 | DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue, |
1577 | 0 | SourceLocation()); |
1578 | 0 | ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, |
1579 | 0 | argType.getUnqualifiedType(), CK_LValueToRValue, |
1580 | 0 | &arg, VK_PRValue, FPOptionsOverride()); |
1581 | | |
1582 | | // The property type can differ from the ivar type in some situations with |
1583 | | // Objective-C pointer types, we can always bit cast the RHS in these cases. |
1584 | | // The following absurdity is just to ensure well-formed IR. |
1585 | 0 | CastKind argCK = CK_NoOp; |
1586 | 0 | if (ivarRef.getType()->isObjCObjectPointerType()) { |
1587 | 0 | if (argLoad.getType()->isObjCObjectPointerType()) |
1588 | 0 | argCK = CK_BitCast; |
1589 | 0 | else if (argLoad.getType()->isBlockPointerType()) |
1590 | 0 | argCK = CK_BlockPointerToObjCPointerCast; |
1591 | 0 | else |
1592 | 0 | argCK = CK_CPointerToObjCPointerCast; |
1593 | 0 | } else if (ivarRef.getType()->isBlockPointerType()) { |
1594 | 0 | if (argLoad.getType()->isBlockPointerType()) |
1595 | 0 | argCK = CK_BitCast; |
1596 | 0 | else |
1597 | 0 | argCK = CK_AnyPointerToBlockPointerCast; |
1598 | 0 | } else if (ivarRef.getType()->isPointerType()) { |
1599 | 0 | argCK = CK_BitCast; |
1600 | 0 | } else if (argLoad.getType()->isAtomicType() && |
1601 | 0 | !ivarRef.getType()->isAtomicType()) { |
1602 | 0 | argCK = CK_AtomicToNonAtomic; |
1603 | 0 | } else if (!argLoad.getType()->isAtomicType() && |
1604 | 0 | ivarRef.getType()->isAtomicType()) { |
1605 | 0 | argCK = CK_NonAtomicToAtomic; |
1606 | 0 | } |
1607 | 0 | ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK, |
1608 | 0 | &argLoad, VK_PRValue, FPOptionsOverride()); |
1609 | 0 | Expr *finalArg = &argLoad; |
1610 | 0 | if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), |
1611 | 0 | argLoad.getType())) |
1612 | 0 | finalArg = &argCast; |
1613 | |
|
1614 | 0 | BinaryOperator *assign = BinaryOperator::Create( |
1615 | 0 | getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), |
1616 | 0 | VK_PRValue, OK_Ordinary, SourceLocation(), FPOptionsOverride()); |
1617 | 0 | EmitStmt(assign); |
1618 | 0 | } |
1619 | | |
1620 | | /// Generate an Objective-C property setter function. |
1621 | | /// |
1622 | | /// The given Decl must be an ObjCImplementationDecl. \@synthesize |
1623 | | /// is illegal within a category. |
1624 | | void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, |
1625 | 0 | const ObjCPropertyImplDecl *PID) { |
1626 | 0 | llvm::Constant *AtomicHelperFn = |
1627 | 0 | CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); |
1628 | 0 | ObjCMethodDecl *OMD = PID->getSetterMethodDecl(); |
1629 | 0 | assert(OMD && "Invalid call to generate setter (empty method)"); |
1630 | 0 | StartObjCMethod(OMD, IMP->getClassInterface()); |
1631 | |
|
1632 | 0 | generateObjCSetterBody(IMP, PID, AtomicHelperFn); |
1633 | |
|
1634 | 0 | FinishFunction(OMD->getEndLoc()); |
1635 | 0 | } |
1636 | | |
1637 | | namespace { |
1638 | | struct DestroyIvar final : EHScopeStack::Cleanup { |
1639 | | private: |
1640 | | llvm::Value *addr; |
1641 | | const ObjCIvarDecl *ivar; |
1642 | | CodeGenFunction::Destroyer *destroyer; |
1643 | | bool useEHCleanupForArray; |
1644 | | public: |
1645 | | DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, |
1646 | | CodeGenFunction::Destroyer *destroyer, |
1647 | | bool useEHCleanupForArray) |
1648 | | : addr(addr), ivar(ivar), destroyer(destroyer), |
1649 | 0 | useEHCleanupForArray(useEHCleanupForArray) {} |
1650 | | |
1651 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1652 | 0 | LValue lvalue |
1653 | 0 | = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); |
1654 | 0 | CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer, |
1655 | 0 | flags.isForNormalCleanup() && useEHCleanupForArray); |
1656 | 0 | } |
1657 | | }; |
1658 | | } |
1659 | | |
1660 | | /// Like CodeGenFunction::destroyARCStrong, but do it with a call. |
1661 | | static void destroyARCStrongWithStore(CodeGenFunction &CGF, |
1662 | | Address addr, |
1663 | 0 | QualType type) { |
1664 | 0 | llvm::Value *null = getNullForVariable(addr); |
1665 | 0 | CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); |
1666 | 0 | } |
1667 | | |
1668 | | static void emitCXXDestructMethod(CodeGenFunction &CGF, |
1669 | 0 | ObjCImplementationDecl *impl) { |
1670 | 0 | CodeGenFunction::RunCleanupsScope scope(CGF); |
1671 | |
|
1672 | 0 | llvm::Value *self = CGF.LoadObjCSelf(); |
1673 | |
|
1674 | 0 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
1675 | 0 | for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); |
1676 | 0 | ivar; ivar = ivar->getNextIvar()) { |
1677 | 0 | QualType type = ivar->getType(); |
1678 | | |
1679 | | // Check whether the ivar is a destructible type. |
1680 | 0 | QualType::DestructionKind dtorKind = type.isDestructedType(); |
1681 | 0 | if (!dtorKind) continue; |
1682 | | |
1683 | 0 | CodeGenFunction::Destroyer *destroyer = nullptr; |
1684 | | |
1685 | | // Use a call to objc_storeStrong to destroy strong ivars, for the |
1686 | | // general benefit of the tools. |
1687 | 0 | if (dtorKind == QualType::DK_objc_strong_lifetime) { |
1688 | 0 | destroyer = destroyARCStrongWithStore; |
1689 | | |
1690 | | // Otherwise use the default for the destruction kind. |
1691 | 0 | } else { |
1692 | 0 | destroyer = CGF.getDestroyer(dtorKind); |
1693 | 0 | } |
1694 | |
|
1695 | 0 | CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); |
1696 | |
|
1697 | 0 | CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, |
1698 | 0 | cleanupKind & EHCleanup); |
1699 | 0 | } |
1700 | |
|
1701 | 0 | assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); |
1702 | 0 | } |
1703 | | |
1704 | | void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, |
1705 | | ObjCMethodDecl *MD, |
1706 | 0 | bool ctor) { |
1707 | 0 | MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); |
1708 | 0 | StartObjCMethod(MD, IMP->getClassInterface()); |
1709 | | |
1710 | | // Emit .cxx_construct. |
1711 | 0 | if (ctor) { |
1712 | | // Suppress the final autorelease in ARC. |
1713 | 0 | AutoreleaseResult = false; |
1714 | |
|
1715 | 0 | for (const auto *IvarInit : IMP->inits()) { |
1716 | 0 | FieldDecl *Field = IvarInit->getAnyMember(); |
1717 | 0 | ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); |
1718 | 0 | LValue LV = EmitLValueForIvar(TypeOfSelfObject(), |
1719 | 0 | LoadObjCSelf(), Ivar, 0); |
1720 | 0 | EmitAggExpr(IvarInit->getInit(), |
1721 | 0 | AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed, |
1722 | 0 | AggValueSlot::DoesNotNeedGCBarriers, |
1723 | 0 | AggValueSlot::IsNotAliased, |
1724 | 0 | AggValueSlot::DoesNotOverlap)); |
1725 | 0 | } |
1726 | | // constructor returns 'self'. |
1727 | 0 | CodeGenTypes &Types = CGM.getTypes(); |
1728 | 0 | QualType IdTy(CGM.getContext().getObjCIdType()); |
1729 | 0 | llvm::Value *SelfAsId = |
1730 | 0 | Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); |
1731 | 0 | EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); |
1732 | | |
1733 | | // Emit .cxx_destruct. |
1734 | 0 | } else { |
1735 | 0 | emitCXXDestructMethod(*this, IMP); |
1736 | 0 | } |
1737 | 0 | FinishFunction(); |
1738 | 0 | } |
1739 | | |
1740 | 0 | llvm::Value *CodeGenFunction::LoadObjCSelf() { |
1741 | 0 | VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); |
1742 | 0 | DeclRefExpr DRE(getContext(), Self, |
1743 | 0 | /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), |
1744 | 0 | Self->getType(), VK_LValue, SourceLocation()); |
1745 | 0 | return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); |
1746 | 0 | } |
1747 | | |
1748 | 0 | QualType CodeGenFunction::TypeOfSelfObject() { |
1749 | 0 | const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); |
1750 | 0 | ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); |
1751 | 0 | const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( |
1752 | 0 | getContext().getCanonicalType(selfDecl->getType())); |
1753 | 0 | return PTy->getPointeeType(); |
1754 | 0 | } |
1755 | | |
1756 | 0 | void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ |
1757 | 0 | llvm::FunctionCallee EnumerationMutationFnPtr = |
1758 | 0 | CGM.getObjCRuntime().EnumerationMutationFunction(); |
1759 | 0 | if (!EnumerationMutationFnPtr) { |
1760 | 0 | CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); |
1761 | 0 | return; |
1762 | 0 | } |
1763 | 0 | CGCallee EnumerationMutationFn = |
1764 | 0 | CGCallee::forDirect(EnumerationMutationFnPtr); |
1765 | |
|
1766 | 0 | CGDebugInfo *DI = getDebugInfo(); |
1767 | 0 | if (DI) |
1768 | 0 | DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); |
1769 | |
|
1770 | 0 | RunCleanupsScope ForScope(*this); |
1771 | | |
1772 | | // The local variable comes into scope immediately. |
1773 | 0 | AutoVarEmission variable = AutoVarEmission::invalid(); |
1774 | 0 | if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) |
1775 | 0 | variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); |
1776 | |
|
1777 | 0 | JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); |
1778 | | |
1779 | | // Fast enumeration state. |
1780 | 0 | QualType StateTy = CGM.getObjCFastEnumerationStateType(); |
1781 | 0 | Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); |
1782 | 0 | EmitNullInitialization(StatePtr, StateTy); |
1783 | | |
1784 | | // Number of elements in the items array. |
1785 | 0 | static const unsigned NumItems = 16; |
1786 | | |
1787 | | // Fetch the countByEnumeratingWithState:objects:count: selector. |
1788 | 0 | IdentifierInfo *II[] = { |
1789 | 0 | &CGM.getContext().Idents.get("countByEnumeratingWithState"), |
1790 | 0 | &CGM.getContext().Idents.get("objects"), |
1791 | 0 | &CGM.getContext().Idents.get("count") |
1792 | 0 | }; |
1793 | 0 | Selector FastEnumSel = |
1794 | 0 | CGM.getContext().Selectors.getSelector(std::size(II), &II[0]); |
1795 | |
|
1796 | 0 | QualType ItemsTy = getContext().getConstantArrayType( |
1797 | 0 | getContext().getObjCIdType(), llvm::APInt(32, NumItems), nullptr, |
1798 | 0 | ArraySizeModifier::Normal, 0); |
1799 | 0 | Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); |
1800 | | |
1801 | | // Emit the collection pointer. In ARC, we do a retain. |
1802 | 0 | llvm::Value *Collection; |
1803 | 0 | if (getLangOpts().ObjCAutoRefCount) { |
1804 | 0 | Collection = EmitARCRetainScalarExpr(S.getCollection()); |
1805 | | |
1806 | | // Enter a cleanup to do the release. |
1807 | 0 | EmitObjCConsumeObject(S.getCollection()->getType(), Collection); |
1808 | 0 | } else { |
1809 | 0 | Collection = EmitScalarExpr(S.getCollection()); |
1810 | 0 | } |
1811 | | |
1812 | | // The 'continue' label needs to appear within the cleanup for the |
1813 | | // collection object. |
1814 | 0 | JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); |
1815 | | |
1816 | | // Send it our message: |
1817 | 0 | CallArgList Args; |
1818 | | |
1819 | | // The first argument is a temporary of the enumeration-state type. |
1820 | 0 | Args.add(RValue::get(StatePtr.getPointer()), |
1821 | 0 | getContext().getPointerType(StateTy)); |
1822 | | |
1823 | | // The second argument is a temporary array with space for NumItems |
1824 | | // pointers. We'll actually be loading elements from the array |
1825 | | // pointer written into the control state; this buffer is so that |
1826 | | // collections that *aren't* backed by arrays can still queue up |
1827 | | // batches of elements. |
1828 | 0 | Args.add(RValue::get(ItemsPtr.getPointer()), |
1829 | 0 | getContext().getPointerType(ItemsTy)); |
1830 | | |
1831 | | // The third argument is the capacity of that temporary array. |
1832 | 0 | llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); |
1833 | 0 | llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems); |
1834 | 0 | Args.add(RValue::get(Count), getContext().getNSUIntegerType()); |
1835 | | |
1836 | | // Start the enumeration. |
1837 | 0 | RValue CountRV = |
1838 | 0 | CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), |
1839 | 0 | getContext().getNSUIntegerType(), |
1840 | 0 | FastEnumSel, Collection, Args); |
1841 | | |
1842 | | // The initial number of objects that were returned in the buffer. |
1843 | 0 | llvm::Value *initialBufferLimit = CountRV.getScalarVal(); |
1844 | |
|
1845 | 0 | llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); |
1846 | 0 | llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); |
1847 | |
|
1848 | 0 | llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy); |
1849 | | |
1850 | | // If the limit pointer was zero to begin with, the collection is |
1851 | | // empty; skip all this. Set the branch weight assuming this has the same |
1852 | | // probability of exiting the loop as any other loop exit. |
1853 | 0 | uint64_t EntryCount = getCurrentProfileCount(); |
1854 | 0 | Builder.CreateCondBr( |
1855 | 0 | Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, |
1856 | 0 | LoopInitBB, |
1857 | 0 | createProfileWeights(EntryCount, getProfileCount(S.getBody()))); |
1858 | | |
1859 | | // Otherwise, initialize the loop. |
1860 | 0 | EmitBlock(LoopInitBB); |
1861 | | |
1862 | | // Save the initial mutations value. This is the value at an |
1863 | | // address that was written into the state object by |
1864 | | // countByEnumeratingWithState:objects:count:. |
1865 | 0 | Address StateMutationsPtrPtr = |
1866 | 0 | Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); |
1867 | 0 | llvm::Value *StateMutationsPtr |
1868 | 0 | = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); |
1869 | |
|
1870 | 0 | llvm::Type *UnsignedLongTy = ConvertType(getContext().UnsignedLongTy); |
1871 | 0 | llvm::Value *initialMutations = |
1872 | 0 | Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, |
1873 | 0 | getPointerAlign(), "forcoll.initial-mutations"); |
1874 | | |
1875 | | // Start looping. This is the point we return to whenever we have a |
1876 | | // fresh, non-empty batch of objects. |
1877 | 0 | llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); |
1878 | 0 | EmitBlock(LoopBodyBB); |
1879 | | |
1880 | | // The current index into the buffer. |
1881 | 0 | llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index"); |
1882 | 0 | index->addIncoming(zero, LoopInitBB); |
1883 | | |
1884 | | // The current buffer size. |
1885 | 0 | llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count"); |
1886 | 0 | count->addIncoming(initialBufferLimit, LoopInitBB); |
1887 | |
|
1888 | 0 | incrementProfileCounter(&S); |
1889 | | |
1890 | | // Check whether the mutations value has changed from where it was |
1891 | | // at start. StateMutationsPtr should actually be invariant between |
1892 | | // refreshes. |
1893 | 0 | StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); |
1894 | 0 | llvm::Value *currentMutations |
1895 | 0 | = Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, |
1896 | 0 | getPointerAlign(), "statemutations"); |
1897 | |
|
1898 | 0 | llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); |
1899 | 0 | llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); |
1900 | |
|
1901 | 0 | Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), |
1902 | 0 | WasNotMutatedBB, WasMutatedBB); |
1903 | | |
1904 | | // If so, call the enumeration-mutation function. |
1905 | 0 | EmitBlock(WasMutatedBB); |
1906 | 0 | llvm::Type *ObjCIdType = ConvertType(getContext().getObjCIdType()); |
1907 | 0 | llvm::Value *V = |
1908 | 0 | Builder.CreateBitCast(Collection, ObjCIdType); |
1909 | 0 | CallArgList Args2; |
1910 | 0 | Args2.add(RValue::get(V), getContext().getObjCIdType()); |
1911 | | // FIXME: We shouldn't need to get the function info here, the runtime already |
1912 | | // should have computed it to build the function. |
1913 | 0 | EmitCall( |
1914 | 0 | CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), |
1915 | 0 | EnumerationMutationFn, ReturnValueSlot(), Args2); |
1916 | | |
1917 | | // Otherwise, or if the mutation function returns, just continue. |
1918 | 0 | EmitBlock(WasNotMutatedBB); |
1919 | | |
1920 | | // Initialize the element variable. |
1921 | 0 | RunCleanupsScope elementVariableScope(*this); |
1922 | 0 | bool elementIsVariable; |
1923 | 0 | LValue elementLValue; |
1924 | 0 | QualType elementType; |
1925 | 0 | if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { |
1926 | | // Initialize the variable, in case it's a __block variable or something. |
1927 | 0 | EmitAutoVarInit(variable); |
1928 | |
|
1929 | 0 | const VarDecl *D = cast<VarDecl>(SD->getSingleDecl()); |
1930 | 0 | DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false, |
1931 | 0 | D->getType(), VK_LValue, SourceLocation()); |
1932 | 0 | elementLValue = EmitLValue(&tempDRE); |
1933 | 0 | elementType = D->getType(); |
1934 | 0 | elementIsVariable = true; |
1935 | |
|
1936 | 0 | if (D->isARCPseudoStrong()) |
1937 | 0 | elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); |
1938 | 0 | } else { |
1939 | 0 | elementLValue = LValue(); // suppress warning |
1940 | 0 | elementType = cast<Expr>(S.getElement())->getType(); |
1941 | 0 | elementIsVariable = false; |
1942 | 0 | } |
1943 | 0 | llvm::Type *convertedElementType = ConvertType(elementType); |
1944 | | |
1945 | | // Fetch the buffer out of the enumeration state. |
1946 | | // TODO: this pointer should actually be invariant between |
1947 | | // refreshes, which would help us do certain loop optimizations. |
1948 | 0 | Address StateItemsPtr = |
1949 | 0 | Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); |
1950 | 0 | llvm::Value *EnumStateItems = |
1951 | 0 | Builder.CreateLoad(StateItemsPtr, "stateitems"); |
1952 | | |
1953 | | // Fetch the value at the current index from the buffer. |
1954 | 0 | llvm::Value *CurrentItemPtr = Builder.CreateGEP( |
1955 | 0 | ObjCIdType, EnumStateItems, index, "currentitem.ptr"); |
1956 | 0 | llvm::Value *CurrentItem = |
1957 | 0 | Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign()); |
1958 | |
|
1959 | 0 | if (SanOpts.has(SanitizerKind::ObjCCast)) { |
1960 | | // Before using an item from the collection, check that the implicit cast |
1961 | | // from id to the element type is valid. This is done with instrumentation |
1962 | | // roughly corresponding to: |
1963 | | // |
1964 | | // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ } |
1965 | 0 | const ObjCObjectPointerType *ObjPtrTy = |
1966 | 0 | elementType->getAsObjCInterfacePointerType(); |
1967 | 0 | const ObjCInterfaceType *InterfaceTy = |
1968 | 0 | ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr; |
1969 | 0 | if (InterfaceTy) { |
1970 | 0 | SanitizerScope SanScope(this); |
1971 | 0 | auto &C = CGM.getContext(); |
1972 | 0 | assert(InterfaceTy->getDecl() && "No decl for ObjC interface type"); |
1973 | 0 | Selector IsKindOfClassSel = GetUnarySelector("isKindOfClass", C); |
1974 | 0 | CallArgList IsKindOfClassArgs; |
1975 | 0 | llvm::Value *Cls = |
1976 | 0 | CGM.getObjCRuntime().GetClass(*this, InterfaceTy->getDecl()); |
1977 | 0 | IsKindOfClassArgs.add(RValue::get(Cls), C.getObjCClassType()); |
1978 | 0 | llvm::Value *IsClass = |
1979 | 0 | CGM.getObjCRuntime() |
1980 | 0 | .GenerateMessageSend(*this, ReturnValueSlot(), C.BoolTy, |
1981 | 0 | IsKindOfClassSel, CurrentItem, |
1982 | 0 | IsKindOfClassArgs) |
1983 | 0 | .getScalarVal(); |
1984 | 0 | llvm::Constant *StaticData[] = { |
1985 | 0 | EmitCheckSourceLocation(S.getBeginLoc()), |
1986 | 0 | EmitCheckTypeDescriptor(QualType(InterfaceTy, 0))}; |
1987 | 0 | EmitCheck({{IsClass, SanitizerKind::ObjCCast}}, |
1988 | 0 | SanitizerHandler::InvalidObjCCast, |
1989 | 0 | ArrayRef<llvm::Constant *>(StaticData), CurrentItem); |
1990 | 0 | } |
1991 | 0 | } |
1992 | | |
1993 | | // Cast that value to the right type. |
1994 | 0 | CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, |
1995 | 0 | "currentitem"); |
1996 | | |
1997 | | // Make sure we have an l-value. Yes, this gets evaluated every |
1998 | | // time through the loop. |
1999 | 0 | if (!elementIsVariable) { |
2000 | 0 | elementLValue = EmitLValue(cast<Expr>(S.getElement())); |
2001 | 0 | EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); |
2002 | 0 | } else { |
2003 | 0 | EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, |
2004 | 0 | /*isInit*/ true); |
2005 | 0 | } |
2006 | | |
2007 | | // If we do have an element variable, this assignment is the end of |
2008 | | // its initialization. |
2009 | 0 | if (elementIsVariable) |
2010 | 0 | EmitAutoVarCleanups(variable); |
2011 | | |
2012 | | // Perform the loop body, setting up break and continue labels. |
2013 | 0 | BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); |
2014 | 0 | { |
2015 | 0 | RunCleanupsScope Scope(*this); |
2016 | 0 | EmitStmt(S.getBody()); |
2017 | 0 | } |
2018 | 0 | BreakContinueStack.pop_back(); |
2019 | | |
2020 | | // Destroy the element variable now. |
2021 | 0 | elementVariableScope.ForceCleanup(); |
2022 | | |
2023 | | // Check whether there are more elements. |
2024 | 0 | EmitBlock(AfterBody.getBlock()); |
2025 | |
|
2026 | 0 | llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); |
2027 | | |
2028 | | // First we check in the local buffer. |
2029 | 0 | llvm::Value *indexPlusOne = |
2030 | 0 | Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); |
2031 | | |
2032 | | // If we haven't overrun the buffer yet, we can continue. |
2033 | | // Set the branch weights based on the simplifying assumption that this is |
2034 | | // like a while-loop, i.e., ignoring that the false branch fetches more |
2035 | | // elements and then returns to the loop. |
2036 | 0 | Builder.CreateCondBr( |
2037 | 0 | Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, |
2038 | 0 | createProfileWeights(getProfileCount(S.getBody()), EntryCount)); |
2039 | |
|
2040 | 0 | index->addIncoming(indexPlusOne, AfterBody.getBlock()); |
2041 | 0 | count->addIncoming(count, AfterBody.getBlock()); |
2042 | | |
2043 | | // Otherwise, we have to fetch more elements. |
2044 | 0 | EmitBlock(FetchMoreBB); |
2045 | |
|
2046 | 0 | CountRV = |
2047 | 0 | CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), |
2048 | 0 | getContext().getNSUIntegerType(), |
2049 | 0 | FastEnumSel, Collection, Args); |
2050 | | |
2051 | | // If we got a zero count, we're done. |
2052 | 0 | llvm::Value *refetchCount = CountRV.getScalarVal(); |
2053 | | |
2054 | | // (note that the message send might split FetchMoreBB) |
2055 | 0 | index->addIncoming(zero, Builder.GetInsertBlock()); |
2056 | 0 | count->addIncoming(refetchCount, Builder.GetInsertBlock()); |
2057 | |
|
2058 | 0 | Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), |
2059 | 0 | EmptyBB, LoopBodyBB); |
2060 | | |
2061 | | // No more elements. |
2062 | 0 | EmitBlock(EmptyBB); |
2063 | |
|
2064 | 0 | if (!elementIsVariable) { |
2065 | | // If the element was not a declaration, set it to be null. |
2066 | |
|
2067 | 0 | llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); |
2068 | 0 | elementLValue = EmitLValue(cast<Expr>(S.getElement())); |
2069 | 0 | EmitStoreThroughLValue(RValue::get(null), elementLValue); |
2070 | 0 | } |
2071 | |
|
2072 | 0 | if (DI) |
2073 | 0 | DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); |
2074 | |
|
2075 | 0 | ForScope.ForceCleanup(); |
2076 | 0 | EmitBlock(LoopEnd.getBlock()); |
2077 | 0 | } |
2078 | | |
2079 | 0 | void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { |
2080 | 0 | CGM.getObjCRuntime().EmitTryStmt(*this, S); |
2081 | 0 | } |
2082 | | |
2083 | 0 | void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { |
2084 | 0 | CGM.getObjCRuntime().EmitThrowStmt(*this, S); |
2085 | 0 | } |
2086 | | |
2087 | | void CodeGenFunction::EmitObjCAtSynchronizedStmt( |
2088 | 0 | const ObjCAtSynchronizedStmt &S) { |
2089 | 0 | CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); |
2090 | 0 | } |
2091 | | |
2092 | | namespace { |
2093 | | struct CallObjCRelease final : EHScopeStack::Cleanup { |
2094 | 0 | CallObjCRelease(llvm::Value *object) : object(object) {} |
2095 | | llvm::Value *object; |
2096 | | |
2097 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2098 | | // Releases at the end of the full-expression are imprecise. |
2099 | 0 | CGF.EmitARCRelease(object, ARCImpreciseLifetime); |
2100 | 0 | } |
2101 | | }; |
2102 | | } |
2103 | | |
2104 | | /// Produce the code for a CK_ARCConsumeObject. Does a primitive |
2105 | | /// release at the end of the full-expression. |
2106 | | llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, |
2107 | 0 | llvm::Value *object) { |
2108 | | // If we're in a conditional branch, we need to make the cleanup |
2109 | | // conditional. |
2110 | 0 | pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); |
2111 | 0 | return object; |
2112 | 0 | } |
2113 | | |
2114 | | llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, |
2115 | 0 | llvm::Value *value) { |
2116 | 0 | return EmitARCRetainAutorelease(type, value); |
2117 | 0 | } |
2118 | | |
2119 | | /// Given a number of pointers, inform the optimizer that they're |
2120 | | /// being intrinsically used up until this point in the program. |
2121 | 0 | void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { |
2122 | 0 | llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use; |
2123 | 0 | if (!fn) |
2124 | 0 | fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use); |
2125 | | |
2126 | | // This isn't really a "runtime" function, but as an intrinsic it |
2127 | | // doesn't really matter as long as we align things up. |
2128 | 0 | EmitNounwindRuntimeCall(fn, values); |
2129 | 0 | } |
2130 | | |
2131 | | /// Emit a call to "clang.arc.noop.use", which consumes the result of a call |
2132 | | /// that has operand bundle "clang.arc.attachedcall". |
2133 | 0 | void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) { |
2134 | 0 | llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use; |
2135 | 0 | if (!fn) |
2136 | 0 | fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_noop_use); |
2137 | 0 | EmitNounwindRuntimeCall(fn, values); |
2138 | 0 | } |
2139 | | |
2140 | 0 | static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) { |
2141 | 0 | if (auto *F = dyn_cast<llvm::Function>(RTF)) { |
2142 | | // If the target runtime doesn't naturally support ARC, emit weak |
2143 | | // references to the runtime support library. We don't really |
2144 | | // permit this to fail, but we need a particular relocation style. |
2145 | 0 | if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && |
2146 | 0 | !CGM.getTriple().isOSBinFormatCOFF()) { |
2147 | 0 | F->setLinkage(llvm::Function::ExternalWeakLinkage); |
2148 | 0 | } |
2149 | 0 | } |
2150 | 0 | } |
2151 | | |
2152 | | static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, |
2153 | 0 | llvm::FunctionCallee RTF) { |
2154 | 0 | setARCRuntimeFunctionLinkage(CGM, RTF.getCallee()); |
2155 | 0 | } |
2156 | | |
2157 | | static llvm::Function *getARCIntrinsic(llvm::Intrinsic::ID IntID, |
2158 | 0 | CodeGenModule &CGM) { |
2159 | 0 | llvm::Function *fn = CGM.getIntrinsic(IntID); |
2160 | 0 | setARCRuntimeFunctionLinkage(CGM, fn); |
2161 | 0 | return fn; |
2162 | 0 | } |
2163 | | |
2164 | | /// Perform an operation having the signature |
2165 | | /// i8* (i8*) |
2166 | | /// where a null input causes a no-op and returns null. |
2167 | | static llvm::Value *emitARCValueOperation( |
2168 | | CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType, |
2169 | | llvm::Function *&fn, llvm::Intrinsic::ID IntID, |
2170 | 0 | llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) { |
2171 | 0 | if (isa<llvm::ConstantPointerNull>(value)) |
2172 | 0 | return value; |
2173 | | |
2174 | 0 | if (!fn) |
2175 | 0 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2176 | | |
2177 | | // Cast the argument to 'id'. |
2178 | 0 | llvm::Type *origType = returnType ? returnType : value->getType(); |
2179 | 0 | value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); |
2180 | | |
2181 | | // Call the function. |
2182 | 0 | llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); |
2183 | 0 | call->setTailCallKind(tailKind); |
2184 | | |
2185 | | // Cast the result back to the original type. |
2186 | 0 | return CGF.Builder.CreateBitCast(call, origType); |
2187 | 0 | } |
2188 | | |
2189 | | /// Perform an operation having the following signature: |
2190 | | /// i8* (i8**) |
2191 | | static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, |
2192 | | llvm::Function *&fn, |
2193 | 0 | llvm::Intrinsic::ID IntID) { |
2194 | 0 | if (!fn) |
2195 | 0 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2196 | |
|
2197 | 0 | return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); |
2198 | 0 | } |
2199 | | |
2200 | | /// Perform an operation having the following signature: |
2201 | | /// i8* (i8**, i8*) |
2202 | | static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, |
2203 | | llvm::Value *value, |
2204 | | llvm::Function *&fn, |
2205 | | llvm::Intrinsic::ID IntID, |
2206 | 0 | bool ignored) { |
2207 | 0 | assert(addr.getElementType() == value->getType()); |
2208 | | |
2209 | 0 | if (!fn) |
2210 | 0 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2211 | |
|
2212 | 0 | llvm::Type *origType = value->getType(); |
2213 | |
|
2214 | 0 | llvm::Value *args[] = { |
2215 | 0 | CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), |
2216 | 0 | CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) |
2217 | 0 | }; |
2218 | 0 | llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); |
2219 | |
|
2220 | 0 | if (ignored) return nullptr; |
2221 | | |
2222 | 0 | return CGF.Builder.CreateBitCast(result, origType); |
2223 | 0 | } |
2224 | | |
2225 | | /// Perform an operation having the following signature: |
2226 | | /// void (i8**, i8**) |
2227 | | static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, |
2228 | | llvm::Function *&fn, |
2229 | 0 | llvm::Intrinsic::ID IntID) { |
2230 | 0 | assert(dst.getType() == src.getType()); |
2231 | | |
2232 | 0 | if (!fn) |
2233 | 0 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2234 | |
|
2235 | 0 | llvm::Value *args[] = { |
2236 | 0 | CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), |
2237 | 0 | CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) |
2238 | 0 | }; |
2239 | 0 | CGF.EmitNounwindRuntimeCall(fn, args); |
2240 | 0 | } |
2241 | | |
2242 | | /// Perform an operation having the signature |
2243 | | /// i8* (i8*) |
2244 | | /// where a null input causes a no-op and returns null. |
2245 | | static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF, |
2246 | | llvm::Value *value, |
2247 | | llvm::Type *returnType, |
2248 | | llvm::FunctionCallee &fn, |
2249 | 0 | StringRef fnName) { |
2250 | 0 | if (isa<llvm::ConstantPointerNull>(value)) |
2251 | 0 | return value; |
2252 | | |
2253 | 0 | if (!fn) { |
2254 | 0 | llvm::FunctionType *fnType = |
2255 | 0 | llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); |
2256 | 0 | fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName); |
2257 | | |
2258 | | // We have Native ARC, so set nonlazybind attribute for performance |
2259 | 0 | if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) |
2260 | 0 | if (fnName == "objc_retain") |
2261 | 0 | f->addFnAttr(llvm::Attribute::NonLazyBind); |
2262 | 0 | } |
2263 | | |
2264 | | // Cast the argument to 'id'. |
2265 | 0 | llvm::Type *origType = returnType ? returnType : value->getType(); |
2266 | 0 | value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); |
2267 | | |
2268 | | // Call the function. |
2269 | 0 | llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value); |
2270 | | |
2271 | | // Mark calls to objc_autorelease as tail on the assumption that methods |
2272 | | // overriding autorelease do not touch anything on the stack. |
2273 | 0 | if (fnName == "objc_autorelease") |
2274 | 0 | if (auto *Call = dyn_cast<llvm::CallInst>(Inst)) |
2275 | 0 | Call->setTailCall(); |
2276 | | |
2277 | | // Cast the result back to the original type. |
2278 | 0 | return CGF.Builder.CreateBitCast(Inst, origType); |
2279 | 0 | } |
2280 | | |
2281 | | /// Produce the code to do a retain. Based on the type, calls one of: |
2282 | | /// call i8* \@objc_retain(i8* %value) |
2283 | | /// call i8* \@objc_retainBlock(i8* %value) |
2284 | 0 | llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { |
2285 | 0 | if (type->isBlockPointerType()) |
2286 | 0 | return EmitARCRetainBlock(value, /*mandatory*/ false); |
2287 | 0 | else |
2288 | 0 | return EmitARCRetainNonBlock(value); |
2289 | 0 | } |
2290 | | |
2291 | | /// Retain the given object, with normal retain semantics. |
2292 | | /// call i8* \@objc_retain(i8* %value) |
2293 | 0 | llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { |
2294 | 0 | return emitARCValueOperation(*this, value, nullptr, |
2295 | 0 | CGM.getObjCEntrypoints().objc_retain, |
2296 | 0 | llvm::Intrinsic::objc_retain); |
2297 | 0 | } |
2298 | | |
2299 | | /// Retain the given block, with _Block_copy semantics. |
2300 | | /// call i8* \@objc_retainBlock(i8* %value) |
2301 | | /// |
2302 | | /// \param mandatory - If false, emit the call with metadata |
2303 | | /// indicating that it's okay for the optimizer to eliminate this call |
2304 | | /// if it can prove that the block never escapes except down the stack. |
2305 | | llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, |
2306 | 0 | bool mandatory) { |
2307 | 0 | llvm::Value *result |
2308 | 0 | = emitARCValueOperation(*this, value, nullptr, |
2309 | 0 | CGM.getObjCEntrypoints().objc_retainBlock, |
2310 | 0 | llvm::Intrinsic::objc_retainBlock); |
2311 | | |
2312 | | // If the copy isn't mandatory, add !clang.arc.copy_on_escape to |
2313 | | // tell the optimizer that it doesn't need to do this copy if the |
2314 | | // block doesn't escape, where being passed as an argument doesn't |
2315 | | // count as escaping. |
2316 | 0 | if (!mandatory && isa<llvm::Instruction>(result)) { |
2317 | 0 | llvm::CallInst *call |
2318 | 0 | = cast<llvm::CallInst>(result->stripPointerCasts()); |
2319 | 0 | assert(call->getCalledOperand() == |
2320 | 0 | CGM.getObjCEntrypoints().objc_retainBlock); |
2321 | | |
2322 | 0 | call->setMetadata("clang.arc.copy_on_escape", |
2323 | 0 | llvm::MDNode::get(Builder.getContext(), std::nullopt)); |
2324 | 0 | } |
2325 | | |
2326 | 0 | return result; |
2327 | 0 | } |
2328 | | |
2329 | 0 | static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { |
2330 | | // Fetch the void(void) inline asm which marks that we're going to |
2331 | | // do something with the autoreleased return value. |
2332 | 0 | llvm::InlineAsm *&marker |
2333 | 0 | = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; |
2334 | 0 | if (!marker) { |
2335 | 0 | StringRef assembly |
2336 | 0 | = CGF.CGM.getTargetCodeGenInfo() |
2337 | 0 | .getARCRetainAutoreleasedReturnValueMarker(); |
2338 | | |
2339 | | // If we have an empty assembly string, there's nothing to do. |
2340 | 0 | if (assembly.empty()) { |
2341 | | |
2342 | | // Otherwise, at -O0, build an inline asm that we're going to call |
2343 | | // in a moment. |
2344 | 0 | } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2345 | 0 | llvm::FunctionType *type = |
2346 | 0 | llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); |
2347 | |
|
2348 | 0 | marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); |
2349 | | |
2350 | | // If we're at -O1 and above, we don't want to litter the code |
2351 | | // with this marker yet, so leave a breadcrumb for the ARC |
2352 | | // optimizer to pick up. |
2353 | 0 | } else { |
2354 | 0 | const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr(); |
2355 | 0 | if (!CGF.CGM.getModule().getModuleFlag(retainRVMarkerKey)) { |
2356 | 0 | auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly); |
2357 | 0 | CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, |
2358 | 0 | retainRVMarkerKey, str); |
2359 | 0 | } |
2360 | 0 | } |
2361 | 0 | } |
2362 | | |
2363 | | // Call the marker asm if we made one, which we do only at -O0. |
2364 | 0 | if (marker) |
2365 | 0 | CGF.Builder.CreateCall(marker, std::nullopt, |
2366 | 0 | CGF.getBundlesForFunclet(marker)); |
2367 | 0 | } |
2368 | | |
2369 | | static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value, |
2370 | | bool IsRetainRV, |
2371 | 0 | CodeGenFunction &CGF) { |
2372 | 0 | emitAutoreleasedReturnValueMarker(CGF); |
2373 | | |
2374 | | // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting |
2375 | | // retainRV or claimRV calls in the IR. We currently do this only when the |
2376 | | // optimization level isn't -O0 since global-isel, which is currently run at |
2377 | | // -O0, doesn't know about the operand bundle. |
2378 | 0 | ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints(); |
2379 | 0 | llvm::Function *&EP = IsRetainRV |
2380 | 0 | ? EPs.objc_retainAutoreleasedReturnValue |
2381 | 0 | : EPs.objc_unsafeClaimAutoreleasedReturnValue; |
2382 | 0 | llvm::Intrinsic::ID IID = |
2383 | 0 | IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue |
2384 | 0 | : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue; |
2385 | 0 | EP = getARCIntrinsic(IID, CGF.CGM); |
2386 | |
|
2387 | 0 | llvm::Triple::ArchType Arch = CGF.CGM.getTriple().getArch(); |
2388 | | |
2389 | | // FIXME: Do this on all targets and at -O0 too. This can be enabled only if |
2390 | | // the target backend knows how to handle the operand bundle. |
2391 | 0 | if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 && |
2392 | 0 | (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::x86_64)) { |
2393 | 0 | llvm::Value *bundleArgs[] = {EP}; |
2394 | 0 | llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs); |
2395 | 0 | auto *oldCall = cast<llvm::CallBase>(value); |
2396 | 0 | llvm::CallBase *newCall = llvm::CallBase::addOperandBundle( |
2397 | 0 | oldCall, llvm::LLVMContext::OB_clang_arc_attachedcall, OB, oldCall); |
2398 | 0 | newCall->copyMetadata(*oldCall); |
2399 | 0 | oldCall->replaceAllUsesWith(newCall); |
2400 | 0 | oldCall->eraseFromParent(); |
2401 | 0 | CGF.EmitARCNoopIntrinsicUse(newCall); |
2402 | 0 | return newCall; |
2403 | 0 | } |
2404 | | |
2405 | 0 | bool isNoTail = |
2406 | 0 | CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail(); |
2407 | 0 | llvm::CallInst::TailCallKind tailKind = |
2408 | 0 | isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None; |
2409 | 0 | return emitARCValueOperation(CGF, value, nullptr, EP, IID, tailKind); |
2410 | 0 | } |
2411 | | |
2412 | | /// Retain the given object which is the result of a function call. |
2413 | | /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) |
2414 | | /// |
2415 | | /// Yes, this function name is one character away from a different |
2416 | | /// call with completely different semantics. |
2417 | | llvm::Value * |
2418 | 0 | CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { |
2419 | 0 | return emitOptimizedARCReturnCall(value, true, *this); |
2420 | 0 | } |
2421 | | |
2422 | | /// Claim a possibly-autoreleased return value at +0. This is only |
2423 | | /// valid to do in contexts which do not rely on the retain to keep |
2424 | | /// the object valid for all of its uses; for example, when |
2425 | | /// the value is ignored, or when it is being assigned to an |
2426 | | /// __unsafe_unretained variable. |
2427 | | /// |
2428 | | /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) |
2429 | | llvm::Value * |
2430 | 0 | CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { |
2431 | 0 | return emitOptimizedARCReturnCall(value, false, *this); |
2432 | 0 | } |
2433 | | |
2434 | | /// Release the given object. |
2435 | | /// call void \@objc_release(i8* %value) |
2436 | | void CodeGenFunction::EmitARCRelease(llvm::Value *value, |
2437 | 0 | ARCPreciseLifetime_t precise) { |
2438 | 0 | if (isa<llvm::ConstantPointerNull>(value)) return; |
2439 | | |
2440 | 0 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release; |
2441 | 0 | if (!fn) |
2442 | 0 | fn = getARCIntrinsic(llvm::Intrinsic::objc_release, CGM); |
2443 | | |
2444 | | // Cast the argument to 'id'. |
2445 | 0 | value = Builder.CreateBitCast(value, Int8PtrTy); |
2446 | | |
2447 | | // Call objc_release. |
2448 | 0 | llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); |
2449 | |
|
2450 | 0 | if (precise == ARCImpreciseLifetime) { |
2451 | 0 | call->setMetadata("clang.imprecise_release", |
2452 | 0 | llvm::MDNode::get(Builder.getContext(), std::nullopt)); |
2453 | 0 | } |
2454 | 0 | } |
2455 | | |
2456 | | /// Destroy a __strong variable. |
2457 | | /// |
2458 | | /// At -O0, emit a call to store 'null' into the address; |
2459 | | /// instrumenting tools prefer this because the address is exposed, |
2460 | | /// but it's relatively cumbersome to optimize. |
2461 | | /// |
2462 | | /// At -O1 and above, just load and call objc_release. |
2463 | | /// |
2464 | | /// call void \@objc_storeStrong(i8** %addr, i8* null) |
2465 | | void CodeGenFunction::EmitARCDestroyStrong(Address addr, |
2466 | 0 | ARCPreciseLifetime_t precise) { |
2467 | 0 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2468 | 0 | llvm::Value *null = getNullForVariable(addr); |
2469 | 0 | EmitARCStoreStrongCall(addr, null, /*ignored*/ true); |
2470 | 0 | return; |
2471 | 0 | } |
2472 | | |
2473 | 0 | llvm::Value *value = Builder.CreateLoad(addr); |
2474 | 0 | EmitARCRelease(value, precise); |
2475 | 0 | } |
2476 | | |
2477 | | /// Store into a strong object. Always calls this: |
2478 | | /// call void \@objc_storeStrong(i8** %addr, i8* %value) |
2479 | | llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, |
2480 | | llvm::Value *value, |
2481 | 0 | bool ignored) { |
2482 | 0 | assert(addr.getElementType() == value->getType()); |
2483 | | |
2484 | 0 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong; |
2485 | 0 | if (!fn) |
2486 | 0 | fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM); |
2487 | |
|
2488 | 0 | llvm::Value *args[] = { |
2489 | 0 | Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), |
2490 | 0 | Builder.CreateBitCast(value, Int8PtrTy) |
2491 | 0 | }; |
2492 | 0 | EmitNounwindRuntimeCall(fn, args); |
2493 | |
|
2494 | 0 | if (ignored) return nullptr; |
2495 | 0 | return value; |
2496 | 0 | } |
2497 | | |
2498 | | /// Store into a strong object. Sometimes calls this: |
2499 | | /// call void \@objc_storeStrong(i8** %addr, i8* %value) |
2500 | | /// Other times, breaks it down into components. |
2501 | | llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, |
2502 | | llvm::Value *newValue, |
2503 | 0 | bool ignored) { |
2504 | 0 | QualType type = dst.getType(); |
2505 | 0 | bool isBlock = type->isBlockPointerType(); |
2506 | | |
2507 | | // Use a store barrier at -O0 unless this is a block type or the |
2508 | | // lvalue is inadequately aligned. |
2509 | 0 | if (shouldUseFusedARCCalls() && |
2510 | 0 | !isBlock && |
2511 | 0 | (dst.getAlignment().isZero() || |
2512 | 0 | dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { |
2513 | 0 | return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored); |
2514 | 0 | } |
2515 | | |
2516 | | // Otherwise, split it out. |
2517 | | |
2518 | | // Retain the new value. |
2519 | 0 | newValue = EmitARCRetain(type, newValue); |
2520 | | |
2521 | | // Read the old value. |
2522 | 0 | llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); |
2523 | | |
2524 | | // Store. We do this before the release so that any deallocs won't |
2525 | | // see the old value. |
2526 | 0 | EmitStoreOfScalar(newValue, dst); |
2527 | | |
2528 | | // Finally, release the old value. |
2529 | 0 | EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); |
2530 | |
|
2531 | 0 | return newValue; |
2532 | 0 | } |
2533 | | |
2534 | | /// Autorelease the given object. |
2535 | | /// call i8* \@objc_autorelease(i8* %value) |
2536 | 0 | llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { |
2537 | 0 | return emitARCValueOperation(*this, value, nullptr, |
2538 | 0 | CGM.getObjCEntrypoints().objc_autorelease, |
2539 | 0 | llvm::Intrinsic::objc_autorelease); |
2540 | 0 | } |
2541 | | |
2542 | | /// Autorelease the given object. |
2543 | | /// call i8* \@objc_autoreleaseReturnValue(i8* %value) |
2544 | | llvm::Value * |
2545 | 0 | CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { |
2546 | 0 | return emitARCValueOperation(*this, value, nullptr, |
2547 | 0 | CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, |
2548 | 0 | llvm::Intrinsic::objc_autoreleaseReturnValue, |
2549 | 0 | llvm::CallInst::TCK_Tail); |
2550 | 0 | } |
2551 | | |
2552 | | /// Do a fused retain/autorelease of the given object. |
2553 | | /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) |
2554 | | llvm::Value * |
2555 | 0 | CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { |
2556 | 0 | return emitARCValueOperation(*this, value, nullptr, |
2557 | 0 | CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, |
2558 | 0 | llvm::Intrinsic::objc_retainAutoreleaseReturnValue, |
2559 | 0 | llvm::CallInst::TCK_Tail); |
2560 | 0 | } |
2561 | | |
2562 | | /// Do a fused retain/autorelease of the given object. |
2563 | | /// call i8* \@objc_retainAutorelease(i8* %value) |
2564 | | /// or |
2565 | | /// %retain = call i8* \@objc_retainBlock(i8* %value) |
2566 | | /// call i8* \@objc_autorelease(i8* %retain) |
2567 | | llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, |
2568 | 0 | llvm::Value *value) { |
2569 | 0 | if (!type->isBlockPointerType()) |
2570 | 0 | return EmitARCRetainAutoreleaseNonBlock(value); |
2571 | | |
2572 | 0 | if (isa<llvm::ConstantPointerNull>(value)) return value; |
2573 | | |
2574 | 0 | llvm::Type *origType = value->getType(); |
2575 | 0 | value = Builder.CreateBitCast(value, Int8PtrTy); |
2576 | 0 | value = EmitARCRetainBlock(value, /*mandatory*/ true); |
2577 | 0 | value = EmitARCAutorelease(value); |
2578 | 0 | return Builder.CreateBitCast(value, origType); |
2579 | 0 | } |
2580 | | |
2581 | | /// Do a fused retain/autorelease of the given object. |
2582 | | /// call i8* \@objc_retainAutorelease(i8* %value) |
2583 | | llvm::Value * |
2584 | 0 | CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { |
2585 | 0 | return emitARCValueOperation(*this, value, nullptr, |
2586 | 0 | CGM.getObjCEntrypoints().objc_retainAutorelease, |
2587 | 0 | llvm::Intrinsic::objc_retainAutorelease); |
2588 | 0 | } |
2589 | | |
2590 | | /// i8* \@objc_loadWeak(i8** %addr) |
2591 | | /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). |
2592 | 0 | llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { |
2593 | 0 | return emitARCLoadOperation(*this, addr, |
2594 | 0 | CGM.getObjCEntrypoints().objc_loadWeak, |
2595 | 0 | llvm::Intrinsic::objc_loadWeak); |
2596 | 0 | } |
2597 | | |
2598 | | /// i8* \@objc_loadWeakRetained(i8** %addr) |
2599 | 0 | llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { |
2600 | 0 | return emitARCLoadOperation(*this, addr, |
2601 | 0 | CGM.getObjCEntrypoints().objc_loadWeakRetained, |
2602 | 0 | llvm::Intrinsic::objc_loadWeakRetained); |
2603 | 0 | } |
2604 | | |
2605 | | /// i8* \@objc_storeWeak(i8** %addr, i8* %value) |
2606 | | /// Returns %value. |
2607 | | llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, |
2608 | | llvm::Value *value, |
2609 | 0 | bool ignored) { |
2610 | 0 | return emitARCStoreOperation(*this, addr, value, |
2611 | 0 | CGM.getObjCEntrypoints().objc_storeWeak, |
2612 | 0 | llvm::Intrinsic::objc_storeWeak, ignored); |
2613 | 0 | } |
2614 | | |
2615 | | /// i8* \@objc_initWeak(i8** %addr, i8* %value) |
2616 | | /// Returns %value. %addr is known to not have a current weak entry. |
2617 | | /// Essentially equivalent to: |
2618 | | /// *addr = nil; objc_storeWeak(addr, value); |
2619 | 0 | void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { |
2620 | | // If we're initializing to null, just write null to memory; no need |
2621 | | // to get the runtime involved. But don't do this if optimization |
2622 | | // is enabled, because accounting for this would make the optimizer |
2623 | | // much more complicated. |
2624 | 0 | if (isa<llvm::ConstantPointerNull>(value) && |
2625 | 0 | CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2626 | 0 | Builder.CreateStore(value, addr); |
2627 | 0 | return; |
2628 | 0 | } |
2629 | | |
2630 | 0 | emitARCStoreOperation(*this, addr, value, |
2631 | 0 | CGM.getObjCEntrypoints().objc_initWeak, |
2632 | 0 | llvm::Intrinsic::objc_initWeak, /*ignored*/ true); |
2633 | 0 | } |
2634 | | |
2635 | | /// void \@objc_destroyWeak(i8** %addr) |
2636 | | /// Essentially objc_storeWeak(addr, nil). |
2637 | 0 | void CodeGenFunction::EmitARCDestroyWeak(Address addr) { |
2638 | 0 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; |
2639 | 0 | if (!fn) |
2640 | 0 | fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM); |
2641 | |
|
2642 | 0 | EmitNounwindRuntimeCall(fn, addr.getPointer()); |
2643 | 0 | } |
2644 | | |
2645 | | /// void \@objc_moveWeak(i8** %dest, i8** %src) |
2646 | | /// Disregards the current value in %dest. Leaves %src pointing to nothing. |
2647 | | /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). |
2648 | 0 | void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { |
2649 | 0 | emitARCCopyOperation(*this, dst, src, |
2650 | 0 | CGM.getObjCEntrypoints().objc_moveWeak, |
2651 | 0 | llvm::Intrinsic::objc_moveWeak); |
2652 | 0 | } |
2653 | | |
2654 | | /// void \@objc_copyWeak(i8** %dest, i8** %src) |
2655 | | /// Disregards the current value in %dest. Essentially |
2656 | | /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) |
2657 | 0 | void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { |
2658 | 0 | emitARCCopyOperation(*this, dst, src, |
2659 | 0 | CGM.getObjCEntrypoints().objc_copyWeak, |
2660 | 0 | llvm::Intrinsic::objc_copyWeak); |
2661 | 0 | } |
2662 | | |
2663 | | void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, |
2664 | 0 | Address SrcAddr) { |
2665 | 0 | llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); |
2666 | 0 | Object = EmitObjCConsumeObject(Ty, Object); |
2667 | 0 | EmitARCStoreWeak(DstAddr, Object, false); |
2668 | 0 | } |
2669 | | |
2670 | | void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, |
2671 | 0 | Address SrcAddr) { |
2672 | 0 | llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); |
2673 | 0 | Object = EmitObjCConsumeObject(Ty, Object); |
2674 | 0 | EmitARCStoreWeak(DstAddr, Object, false); |
2675 | 0 | EmitARCDestroyWeak(SrcAddr); |
2676 | 0 | } |
2677 | | |
2678 | | /// Produce the code to do a objc_autoreleasepool_push. |
2679 | | /// call i8* \@objc_autoreleasePoolPush(void) |
2680 | 0 | llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { |
2681 | 0 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; |
2682 | 0 | if (!fn) |
2683 | 0 | fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush, CGM); |
2684 | |
|
2685 | 0 | return EmitNounwindRuntimeCall(fn); |
2686 | 0 | } |
2687 | | |
2688 | | /// Produce the code to do a primitive release. |
2689 | | /// call void \@objc_autoreleasePoolPop(i8* %ptr) |
2690 | 0 | void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { |
2691 | 0 | assert(value->getType() == Int8PtrTy); |
2692 | | |
2693 | 0 | if (getInvokeDest()) { |
2694 | | // Call the runtime method not the intrinsic if we are handling exceptions |
2695 | 0 | llvm::FunctionCallee &fn = |
2696 | 0 | CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke; |
2697 | 0 | if (!fn) { |
2698 | 0 | llvm::FunctionType *fnType = |
2699 | 0 | llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); |
2700 | 0 | fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop"); |
2701 | 0 | setARCRuntimeFunctionLinkage(CGM, fn); |
2702 | 0 | } |
2703 | | |
2704 | | // objc_autoreleasePoolPop can throw. |
2705 | 0 | EmitRuntimeCallOrInvoke(fn, value); |
2706 | 0 | } else { |
2707 | 0 | llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; |
2708 | 0 | if (!fn) |
2709 | 0 | fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop, CGM); |
2710 | |
|
2711 | 0 | EmitRuntimeCall(fn, value); |
2712 | 0 | } |
2713 | 0 | } |
2714 | | |
2715 | | /// Produce the code to do an MRR version objc_autoreleasepool_push. |
2716 | | /// Which is: [[NSAutoreleasePool alloc] init]; |
2717 | | /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. |
2718 | | /// init is declared as: - (id) init; in its NSObject super class. |
2719 | | /// |
2720 | 0 | llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { |
2721 | 0 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
2722 | 0 | llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); |
2723 | | // [NSAutoreleasePool alloc] |
2724 | 0 | IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); |
2725 | 0 | Selector AllocSel = getContext().Selectors.getSelector(0, &II); |
2726 | 0 | CallArgList Args; |
2727 | 0 | RValue AllocRV = |
2728 | 0 | Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
2729 | 0 | getContext().getObjCIdType(), |
2730 | 0 | AllocSel, Receiver, Args); |
2731 | | |
2732 | | // [Receiver init] |
2733 | 0 | Receiver = AllocRV.getScalarVal(); |
2734 | 0 | II = &CGM.getContext().Idents.get("init"); |
2735 | 0 | Selector InitSel = getContext().Selectors.getSelector(0, &II); |
2736 | 0 | RValue InitRV = |
2737 | 0 | Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
2738 | 0 | getContext().getObjCIdType(), |
2739 | 0 | InitSel, Receiver, Args); |
2740 | 0 | return InitRV.getScalarVal(); |
2741 | 0 | } |
2742 | | |
2743 | | /// Allocate the given objc object. |
2744 | | /// call i8* \@objc_alloc(i8* %value) |
2745 | | llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, |
2746 | 0 | llvm::Type *resultType) { |
2747 | 0 | return emitObjCValueOperation(*this, value, resultType, |
2748 | 0 | CGM.getObjCEntrypoints().objc_alloc, |
2749 | 0 | "objc_alloc"); |
2750 | 0 | } |
2751 | | |
2752 | | /// Allocate the given objc object. |
2753 | | /// call i8* \@objc_allocWithZone(i8* %value) |
2754 | | llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, |
2755 | 0 | llvm::Type *resultType) { |
2756 | 0 | return emitObjCValueOperation(*this, value, resultType, |
2757 | 0 | CGM.getObjCEntrypoints().objc_allocWithZone, |
2758 | 0 | "objc_allocWithZone"); |
2759 | 0 | } |
2760 | | |
2761 | | llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, |
2762 | 0 | llvm::Type *resultType) { |
2763 | 0 | return emitObjCValueOperation(*this, value, resultType, |
2764 | 0 | CGM.getObjCEntrypoints().objc_alloc_init, |
2765 | 0 | "objc_alloc_init"); |
2766 | 0 | } |
2767 | | |
2768 | | /// Produce the code to do a primitive release. |
2769 | | /// [tmp drain]; |
2770 | 0 | void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { |
2771 | 0 | IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); |
2772 | 0 | Selector DrainSel = getContext().Selectors.getSelector(0, &II); |
2773 | 0 | CallArgList Args; |
2774 | 0 | CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), |
2775 | 0 | getContext().VoidTy, DrainSel, Arg, Args); |
2776 | 0 | } |
2777 | | |
2778 | | void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, |
2779 | | Address addr, |
2780 | 0 | QualType type) { |
2781 | 0 | CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); |
2782 | 0 | } |
2783 | | |
2784 | | void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, |
2785 | | Address addr, |
2786 | 0 | QualType type) { |
2787 | 0 | CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); |
2788 | 0 | } |
2789 | | |
2790 | | void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, |
2791 | | Address addr, |
2792 | 0 | QualType type) { |
2793 | 0 | CGF.EmitARCDestroyWeak(addr); |
2794 | 0 | } |
2795 | | |
2796 | | void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, |
2797 | 0 | QualType type) { |
2798 | 0 | llvm::Value *value = CGF.Builder.CreateLoad(addr); |
2799 | 0 | CGF.EmitARCIntrinsicUse(value); |
2800 | 0 | } |
2801 | | |
2802 | | /// Autorelease the given object. |
2803 | | /// call i8* \@objc_autorelease(i8* %value) |
2804 | | llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value, |
2805 | 0 | llvm::Type *returnType) { |
2806 | 0 | return emitObjCValueOperation( |
2807 | 0 | *this, value, returnType, |
2808 | 0 | CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction, |
2809 | 0 | "objc_autorelease"); |
2810 | 0 | } |
2811 | | |
2812 | | /// Retain the given object, with normal retain semantics. |
2813 | | /// call i8* \@objc_retain(i8* %value) |
2814 | | llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value, |
2815 | 0 | llvm::Type *returnType) { |
2816 | 0 | return emitObjCValueOperation( |
2817 | 0 | *this, value, returnType, |
2818 | 0 | CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain"); |
2819 | 0 | } |
2820 | | |
2821 | | /// Release the given object. |
2822 | | /// call void \@objc_release(i8* %value) |
2823 | | void CodeGenFunction::EmitObjCRelease(llvm::Value *value, |
2824 | 0 | ARCPreciseLifetime_t precise) { |
2825 | 0 | if (isa<llvm::ConstantPointerNull>(value)) return; |
2826 | | |
2827 | 0 | llvm::FunctionCallee &fn = |
2828 | 0 | CGM.getObjCEntrypoints().objc_releaseRuntimeFunction; |
2829 | 0 | if (!fn) { |
2830 | 0 | llvm::FunctionType *fnType = |
2831 | 0 | llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); |
2832 | 0 | fn = CGM.CreateRuntimeFunction(fnType, "objc_release"); |
2833 | 0 | setARCRuntimeFunctionLinkage(CGM, fn); |
2834 | | // We have Native ARC, so set nonlazybind attribute for performance |
2835 | 0 | if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) |
2836 | 0 | f->addFnAttr(llvm::Attribute::NonLazyBind); |
2837 | 0 | } |
2838 | | |
2839 | | // Cast the argument to 'id'. |
2840 | 0 | value = Builder.CreateBitCast(value, Int8PtrTy); |
2841 | | |
2842 | | // Call objc_release. |
2843 | 0 | llvm::CallBase *call = EmitCallOrInvoke(fn, value); |
2844 | |
|
2845 | 0 | if (precise == ARCImpreciseLifetime) { |
2846 | 0 | call->setMetadata("clang.imprecise_release", |
2847 | 0 | llvm::MDNode::get(Builder.getContext(), std::nullopt)); |
2848 | 0 | } |
2849 | 0 | } |
2850 | | |
2851 | | namespace { |
2852 | | struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { |
2853 | | llvm::Value *Token; |
2854 | | |
2855 | 0 | CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} |
2856 | | |
2857 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2858 | 0 | CGF.EmitObjCAutoreleasePoolPop(Token); |
2859 | 0 | } |
2860 | | }; |
2861 | | struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { |
2862 | | llvm::Value *Token; |
2863 | | |
2864 | 0 | CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} |
2865 | | |
2866 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2867 | 0 | CGF.EmitObjCMRRAutoreleasePoolPop(Token); |
2868 | 0 | } |
2869 | | }; |
2870 | | } |
2871 | | |
2872 | 0 | void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { |
2873 | 0 | if (CGM.getLangOpts().ObjCAutoRefCount) |
2874 | 0 | EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); |
2875 | 0 | else |
2876 | 0 | EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); |
2877 | 0 | } |
2878 | | |
2879 | 0 | static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { |
2880 | 0 | switch (lifetime) { |
2881 | 0 | case Qualifiers::OCL_None: |
2882 | 0 | case Qualifiers::OCL_ExplicitNone: |
2883 | 0 | case Qualifiers::OCL_Strong: |
2884 | 0 | case Qualifiers::OCL_Autoreleasing: |
2885 | 0 | return true; |
2886 | | |
2887 | 0 | case Qualifiers::OCL_Weak: |
2888 | 0 | return false; |
2889 | 0 | } |
2890 | | |
2891 | 0 | llvm_unreachable("impossible lifetime!"); |
2892 | 0 | } |
2893 | | |
2894 | | static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
2895 | | LValue lvalue, |
2896 | 0 | QualType type) { |
2897 | 0 | llvm::Value *result; |
2898 | 0 | bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime()); |
2899 | 0 | if (shouldRetain) { |
2900 | 0 | result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); |
2901 | 0 | } else { |
2902 | 0 | assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); |
2903 | 0 | result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF)); |
2904 | 0 | } |
2905 | 0 | return TryEmitResult(result, !shouldRetain); |
2906 | 0 | } |
2907 | | |
2908 | | static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
2909 | 0 | const Expr *e) { |
2910 | 0 | e = e->IgnoreParens(); |
2911 | 0 | QualType type = e->getType(); |
2912 | | |
2913 | | // If we're loading retained from a __strong xvalue, we can avoid |
2914 | | // an extra retain/release pair by zeroing out the source of this |
2915 | | // "move" operation. |
2916 | 0 | if (e->isXValue() && |
2917 | 0 | !type.isConstQualified() && |
2918 | 0 | type.getObjCLifetime() == Qualifiers::OCL_Strong) { |
2919 | | // Emit the lvalue. |
2920 | 0 | LValue lv = CGF.EmitLValue(e); |
2921 | | |
2922 | | // Load the object pointer. |
2923 | 0 | llvm::Value *result = CGF.EmitLoadOfLValue(lv, |
2924 | 0 | SourceLocation()).getScalarVal(); |
2925 | | |
2926 | | // Set the source pointer to NULL. |
2927 | 0 | CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv); |
2928 | |
|
2929 | 0 | return TryEmitResult(result, true); |
2930 | 0 | } |
2931 | | |
2932 | | // As a very special optimization, in ARC++, if the l-value is the |
2933 | | // result of a non-volatile assignment, do a simple retain of the |
2934 | | // result of the call to objc_storeWeak instead of reloading. |
2935 | 0 | if (CGF.getLangOpts().CPlusPlus && |
2936 | 0 | !type.isVolatileQualified() && |
2937 | 0 | type.getObjCLifetime() == Qualifiers::OCL_Weak && |
2938 | 0 | isa<BinaryOperator>(e) && |
2939 | 0 | cast<BinaryOperator>(e)->getOpcode() == BO_Assign) |
2940 | 0 | return TryEmitResult(CGF.EmitScalarExpr(e), false); |
2941 | | |
2942 | | // Try to emit code for scalar constant instead of emitting LValue and |
2943 | | // loading it because we are not guaranteed to have an l-value. One of such |
2944 | | // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. |
2945 | 0 | if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) { |
2946 | 0 | auto *DRE = const_cast<DeclRefExpr *>(decl_expr); |
2947 | 0 | if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE)) |
2948 | 0 | return TryEmitResult(CGF.emitScalarConstant(constant, DRE), |
2949 | 0 | !shouldRetainObjCLifetime(type.getObjCLifetime())); |
2950 | 0 | } |
2951 | | |
2952 | 0 | return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); |
2953 | 0 | } |
2954 | | |
2955 | | typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, |
2956 | | llvm::Value *value)> |
2957 | | ValueTransform; |
2958 | | |
2959 | | /// Insert code immediately after a call. |
2960 | | |
2961 | | // FIXME: We should find a way to emit the runtime call immediately |
2962 | | // after the call is emitted to eliminate the need for this function. |
2963 | | static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, |
2964 | | llvm::Value *value, |
2965 | | ValueTransform doAfterCall, |
2966 | 0 | ValueTransform doFallback) { |
2967 | 0 | CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); |
2968 | 0 | auto *callBase = dyn_cast<llvm::CallBase>(value); |
2969 | |
|
2970 | 0 | if (callBase && llvm::objcarc::hasAttachedCallOpBundle(callBase)) { |
2971 | | // Fall back if the call base has operand bundle "clang.arc.attachedcall". |
2972 | 0 | value = doFallback(CGF, value); |
2973 | 0 | } else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { |
2974 | | // Place the retain immediately following the call. |
2975 | 0 | CGF.Builder.SetInsertPoint(call->getParent(), |
2976 | 0 | ++llvm::BasicBlock::iterator(call)); |
2977 | 0 | value = doAfterCall(CGF, value); |
2978 | 0 | } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { |
2979 | | // Place the retain at the beginning of the normal destination block. |
2980 | 0 | llvm::BasicBlock *BB = invoke->getNormalDest(); |
2981 | 0 | CGF.Builder.SetInsertPoint(BB, BB->begin()); |
2982 | 0 | value = doAfterCall(CGF, value); |
2983 | | |
2984 | | // Bitcasts can arise because of related-result returns. Rewrite |
2985 | | // the operand. |
2986 | 0 | } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { |
2987 | | // Change the insert point to avoid emitting the fall-back call after the |
2988 | | // bitcast. |
2989 | 0 | CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator()); |
2990 | 0 | llvm::Value *operand = bitcast->getOperand(0); |
2991 | 0 | operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); |
2992 | 0 | bitcast->setOperand(0, operand); |
2993 | 0 | value = bitcast; |
2994 | 0 | } else { |
2995 | 0 | auto *phi = dyn_cast<llvm::PHINode>(value); |
2996 | 0 | if (phi && phi->getNumIncomingValues() == 2 && |
2997 | 0 | isa<llvm::ConstantPointerNull>(phi->getIncomingValue(1)) && |
2998 | 0 | isa<llvm::CallBase>(phi->getIncomingValue(0))) { |
2999 | | // Handle phi instructions that are generated when it's necessary to check |
3000 | | // whether the receiver of a message is null. |
3001 | 0 | llvm::Value *inVal = phi->getIncomingValue(0); |
3002 | 0 | inVal = emitARCOperationAfterCall(CGF, inVal, doAfterCall, doFallback); |
3003 | 0 | phi->setIncomingValue(0, inVal); |
3004 | 0 | value = phi; |
3005 | 0 | } else { |
3006 | | // Generic fall-back case. |
3007 | | // Retain using the non-block variant: we never need to do a copy |
3008 | | // of a block that's been returned to us. |
3009 | 0 | value = doFallback(CGF, value); |
3010 | 0 | } |
3011 | 0 | } |
3012 | |
|
3013 | 0 | CGF.Builder.restoreIP(ip); |
3014 | 0 | return value; |
3015 | 0 | } |
3016 | | |
3017 | | /// Given that the given expression is some sort of call (which does |
3018 | | /// not return retained), emit a retain following it. |
3019 | | static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, |
3020 | 0 | const Expr *e) { |
3021 | 0 | llvm::Value *value = CGF.EmitScalarExpr(e); |
3022 | 0 | return emitARCOperationAfterCall(CGF, value, |
3023 | 0 | [](CodeGenFunction &CGF, llvm::Value *value) { |
3024 | 0 | return CGF.EmitARCRetainAutoreleasedReturnValue(value); |
3025 | 0 | }, |
3026 | 0 | [](CodeGenFunction &CGF, llvm::Value *value) { |
3027 | 0 | return CGF.EmitARCRetainNonBlock(value); |
3028 | 0 | }); |
3029 | 0 | } |
3030 | | |
3031 | | /// Given that the given expression is some sort of call (which does |
3032 | | /// not return retained), perform an unsafeClaim following it. |
3033 | | static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, |
3034 | 0 | const Expr *e) { |
3035 | 0 | llvm::Value *value = CGF.EmitScalarExpr(e); |
3036 | 0 | return emitARCOperationAfterCall(CGF, value, |
3037 | 0 | [](CodeGenFunction &CGF, llvm::Value *value) { |
3038 | 0 | return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); |
3039 | 0 | }, |
3040 | 0 | [](CodeGenFunction &CGF, llvm::Value *value) { |
3041 | 0 | return value; |
3042 | 0 | }); |
3043 | 0 | } |
3044 | | |
3045 | | llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, |
3046 | 0 | bool allowUnsafeClaim) { |
3047 | 0 | if (allowUnsafeClaim && |
3048 | 0 | CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { |
3049 | 0 | return emitARCUnsafeClaimCallResult(*this, E); |
3050 | 0 | } else { |
3051 | 0 | llvm::Value *value = emitARCRetainCallResult(*this, E); |
3052 | 0 | return EmitObjCConsumeObject(E->getType(), value); |
3053 | 0 | } |
3054 | 0 | } |
3055 | | |
3056 | | /// Determine whether it might be important to emit a separate |
3057 | | /// objc_retain_block on the result of the given expression, or |
3058 | | /// whether it's okay to just emit it in a +1 context. |
3059 | 0 | static bool shouldEmitSeparateBlockRetain(const Expr *e) { |
3060 | 0 | assert(e->getType()->isBlockPointerType()); |
3061 | 0 | e = e->IgnoreParens(); |
3062 | | |
3063 | | // For future goodness, emit block expressions directly in +1 |
3064 | | // contexts if we can. |
3065 | 0 | if (isa<BlockExpr>(e)) |
3066 | 0 | return false; |
3067 | | |
3068 | 0 | if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { |
3069 | 0 | switch (cast->getCastKind()) { |
3070 | | // Emitting these operations in +1 contexts is goodness. |
3071 | 0 | case CK_LValueToRValue: |
3072 | 0 | case CK_ARCReclaimReturnedObject: |
3073 | 0 | case CK_ARCConsumeObject: |
3074 | 0 | case CK_ARCProduceObject: |
3075 | 0 | return false; |
3076 | | |
3077 | | // These operations preserve a block type. |
3078 | 0 | case CK_NoOp: |
3079 | 0 | case CK_BitCast: |
3080 | 0 | return shouldEmitSeparateBlockRetain(cast->getSubExpr()); |
3081 | | |
3082 | | // These operations are known to be bad (or haven't been considered). |
3083 | 0 | case CK_AnyPointerToBlockPointerCast: |
3084 | 0 | default: |
3085 | 0 | return true; |
3086 | 0 | } |
3087 | 0 | } |
3088 | | |
3089 | 0 | return true; |
3090 | 0 | } |
3091 | | |
3092 | | namespace { |
3093 | | /// A CRTP base class for emitting expressions of retainable object |
3094 | | /// pointer type in ARC. |
3095 | | template <typename Impl, typename Result> class ARCExprEmitter { |
3096 | | protected: |
3097 | | CodeGenFunction &CGF; |
3098 | 0 | Impl &asImpl() { return *static_cast<Impl*>(this); } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::asImpl() Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::asImpl() |
3099 | | |
3100 | 0 | ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::ARCExprEmitter(clang::CodeGen::CodeGenFunction&) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::ARCExprEmitter(clang::CodeGen::CodeGenFunction&) |
3101 | | |
3102 | | public: |
3103 | | Result visit(const Expr *e); |
3104 | | Result visitCastExpr(const CastExpr *e); |
3105 | | Result visitPseudoObjectExpr(const PseudoObjectExpr *e); |
3106 | | Result visitBlockExpr(const BlockExpr *e); |
3107 | | Result visitBinaryOperator(const BinaryOperator *e); |
3108 | | Result visitBinAssign(const BinaryOperator *e); |
3109 | | Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); |
3110 | | Result visitBinAssignAutoreleasing(const BinaryOperator *e); |
3111 | | Result visitBinAssignWeak(const BinaryOperator *e); |
3112 | | Result visitBinAssignStrong(const BinaryOperator *e); |
3113 | | |
3114 | | // Minimal implementation: |
3115 | | // Result visitLValueToRValue(const Expr *e) |
3116 | | // Result visitConsumeObject(const Expr *e) |
3117 | | // Result visitExtendBlockObject(const Expr *e) |
3118 | | // Result visitReclaimReturnedObject(const Expr *e) |
3119 | | // Result visitCall(const Expr *e) |
3120 | | // Result visitExpr(const Expr *e) |
3121 | | // |
3122 | | // Result emitBitCast(Result result, llvm::Type *resultType) |
3123 | | // llvm::Value *getValueOfResult(Result result) |
3124 | | }; |
3125 | | } |
3126 | | |
3127 | | /// Try to emit a PseudoObjectExpr under special ARC rules. |
3128 | | /// |
3129 | | /// This massively duplicates emitPseudoObjectRValue. |
3130 | | template <typename Impl, typename Result> |
3131 | | Result |
3132 | 0 | ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { |
3133 | 0 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
3134 | | |
3135 | | // Find the result expression. |
3136 | 0 | const Expr *resultExpr = E->getResultExpr(); |
3137 | 0 | assert(resultExpr); |
3138 | 0 | Result result; |
3139 | |
|
3140 | 0 | for (PseudoObjectExpr::const_semantics_iterator |
3141 | 0 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { |
3142 | 0 | const Expr *semantic = *i; |
3143 | | |
3144 | | // If this semantic expression is an opaque value, bind it |
3145 | | // to the result of its source expression. |
3146 | 0 | if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { |
3147 | 0 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
3148 | 0 | OVMA opaqueData; |
3149 | | |
3150 | | // If this semantic is the result of the pseudo-object |
3151 | | // expression, try to evaluate the source as +1. |
3152 | 0 | if (ov == resultExpr) { |
3153 | 0 | assert(!OVMA::shouldBindAsLValue(ov)); |
3154 | 0 | result = asImpl().visit(ov->getSourceExpr()); |
3155 | 0 | opaqueData = OVMA::bind(CGF, ov, |
3156 | 0 | RValue::get(asImpl().getValueOfResult(result))); |
3157 | | |
3158 | | // Otherwise, just bind it. |
3159 | 0 | } else { |
3160 | 0 | opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); |
3161 | 0 | } |
3162 | 0 | opaques.push_back(opaqueData); |
3163 | | |
3164 | | // Otherwise, if the expression is the result, evaluate it |
3165 | | // and remember the result. |
3166 | 0 | } else if (semantic == resultExpr) { |
3167 | 0 | result = asImpl().visit(semantic); |
3168 | | |
3169 | | // Otherwise, evaluate the expression in an ignored context. |
3170 | 0 | } else { |
3171 | 0 | CGF.EmitIgnoredExpr(semantic); |
3172 | 0 | } |
3173 | 0 | } |
3174 | | |
3175 | | // Unbind all the opaques now. |
3176 | 0 | for (unsigned i = 0, e = opaques.size(); i != e; ++i) |
3177 | 0 | opaques[i].unbind(CGF); |
3178 | |
|
3179 | 0 | return result; |
3180 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitPseudoObjectExpr(clang::PseudoObjectExpr const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitPseudoObjectExpr(clang::PseudoObjectExpr const*) |
3181 | | |
3182 | | template <typename Impl, typename Result> |
3183 | 0 | Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) { |
3184 | | // The default implementation just forwards the expression to visitExpr. |
3185 | 0 | return asImpl().visitExpr(e); |
3186 | 0 | } |
3187 | | |
3188 | | template <typename Impl, typename Result> |
3189 | 0 | Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { |
3190 | 0 | switch (e->getCastKind()) { |
3191 | | |
3192 | | // No-op casts don't change the type, so we just ignore them. |
3193 | 0 | case CK_NoOp: |
3194 | 0 | return asImpl().visit(e->getSubExpr()); |
3195 | | |
3196 | | // These casts can change the type. |
3197 | 0 | case CK_CPointerToObjCPointerCast: |
3198 | 0 | case CK_BlockPointerToObjCPointerCast: |
3199 | 0 | case CK_AnyPointerToBlockPointerCast: |
3200 | 0 | case CK_BitCast: { |
3201 | 0 | llvm::Type *resultType = CGF.ConvertType(e->getType()); |
3202 | 0 | assert(e->getSubExpr()->getType()->hasPointerRepresentation()); |
3203 | 0 | Result result = asImpl().visit(e->getSubExpr()); |
3204 | 0 | return asImpl().emitBitCast(result, resultType); |
3205 | 0 | } |
3206 | | |
3207 | | // Handle some casts specially. |
3208 | 0 | case CK_LValueToRValue: |
3209 | 0 | return asImpl().visitLValueToRValue(e->getSubExpr()); |
3210 | 0 | case CK_ARCConsumeObject: |
3211 | 0 | return asImpl().visitConsumeObject(e->getSubExpr()); |
3212 | 0 | case CK_ARCExtendBlockObject: |
3213 | 0 | return asImpl().visitExtendBlockObject(e->getSubExpr()); |
3214 | 0 | case CK_ARCReclaimReturnedObject: |
3215 | 0 | return asImpl().visitReclaimReturnedObject(e->getSubExpr()); |
3216 | | |
3217 | | // Otherwise, use the default logic. |
3218 | 0 | default: |
3219 | 0 | return asImpl().visitExpr(e); |
3220 | 0 | } |
3221 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitCastExpr(clang::CastExpr const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitCastExpr(clang::CastExpr const*) |
3222 | | |
3223 | | template <typename Impl, typename Result> |
3224 | | Result |
3225 | 0 | ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { |
3226 | 0 | switch (e->getOpcode()) { |
3227 | 0 | case BO_Comma: |
3228 | 0 | CGF.EmitIgnoredExpr(e->getLHS()); |
3229 | 0 | CGF.EnsureInsertPoint(); |
3230 | 0 | return asImpl().visit(e->getRHS()); |
3231 | | |
3232 | 0 | case BO_Assign: |
3233 | 0 | return asImpl().visitBinAssign(e); |
3234 | | |
3235 | 0 | default: |
3236 | 0 | return asImpl().visitExpr(e); |
3237 | 0 | } |
3238 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinaryOperator(clang::BinaryOperator const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinaryOperator(clang::BinaryOperator const*) |
3239 | | |
3240 | | template <typename Impl, typename Result> |
3241 | 0 | Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { |
3242 | 0 | switch (e->getLHS()->getType().getObjCLifetime()) { |
3243 | 0 | case Qualifiers::OCL_ExplicitNone: |
3244 | 0 | return asImpl().visitBinAssignUnsafeUnretained(e); |
3245 | | |
3246 | 0 | case Qualifiers::OCL_Weak: |
3247 | 0 | return asImpl().visitBinAssignWeak(e); |
3248 | | |
3249 | 0 | case Qualifiers::OCL_Autoreleasing: |
3250 | 0 | return asImpl().visitBinAssignAutoreleasing(e); |
3251 | | |
3252 | 0 | case Qualifiers::OCL_Strong: |
3253 | 0 | return asImpl().visitBinAssignStrong(e); |
3254 | | |
3255 | 0 | case Qualifiers::OCL_None: |
3256 | 0 | return asImpl().visitExpr(e); |
3257 | 0 | } |
3258 | 0 | llvm_unreachable("bad ObjC ownership qualifier"); |
3259 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssign(clang::BinaryOperator const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssign(clang::BinaryOperator const*) |
3260 | | |
3261 | | /// The default rule for __unsafe_unretained emits the RHS recursively, |
3262 | | /// stores into the unsafe variable, and propagates the result outward. |
3263 | | template <typename Impl, typename Result> |
3264 | | Result ARCExprEmitter<Impl,Result>:: |
3265 | 0 | visitBinAssignUnsafeUnretained(const BinaryOperator *e) { |
3266 | | // Recursively emit the RHS. |
3267 | | // For __block safety, do this before emitting the LHS. |
3268 | 0 | Result result = asImpl().visit(e->getRHS()); |
3269 | | |
3270 | | // Perform the store. |
3271 | 0 | LValue lvalue = |
3272 | 0 | CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); |
3273 | 0 | CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), |
3274 | 0 | lvalue); |
3275 | |
|
3276 | 0 | return result; |
3277 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignUnsafeUnretained(clang::BinaryOperator const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignUnsafeUnretained(clang::BinaryOperator const*) |
3278 | | |
3279 | | template <typename Impl, typename Result> |
3280 | | Result |
3281 | 0 | ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { |
3282 | 0 | return asImpl().visitExpr(e); |
3283 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignAutoreleasing(clang::BinaryOperator const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignAutoreleasing(clang::BinaryOperator const*) |
3284 | | |
3285 | | template <typename Impl, typename Result> |
3286 | | Result |
3287 | 0 | ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { |
3288 | 0 | return asImpl().visitExpr(e); |
3289 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignWeak(clang::BinaryOperator const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignWeak(clang::BinaryOperator const*) |
3290 | | |
3291 | | template <typename Impl, typename Result> |
3292 | | Result |
3293 | 0 | ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { |
3294 | 0 | return asImpl().visitExpr(e); |
3295 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignStrong(clang::BinaryOperator const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignStrong(clang::BinaryOperator const*) |
3296 | | |
3297 | | /// The general expression-emission logic. |
3298 | | template <typename Impl, typename Result> |
3299 | 0 | Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { |
3300 | | // We should *never* see a nested full-expression here, because if |
3301 | | // we fail to emit at +1, our caller must not retain after we close |
3302 | | // out the full-expression. This isn't as important in the unsafe |
3303 | | // emitter. |
3304 | 0 | assert(!isa<ExprWithCleanups>(e)); |
3305 | | |
3306 | | // Look through parens, __extension__, generic selection, etc. |
3307 | 0 | e = e->IgnoreParens(); |
3308 | | |
3309 | | // Handle certain kinds of casts. |
3310 | 0 | if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { |
3311 | 0 | return asImpl().visitCastExpr(ce); |
3312 | | |
3313 | | // Handle the comma operator. |
3314 | 0 | } else if (auto op = dyn_cast<BinaryOperator>(e)) { |
3315 | 0 | return asImpl().visitBinaryOperator(op); |
3316 | | |
3317 | | // TODO: handle conditional operators here |
3318 | | |
3319 | | // For calls and message sends, use the retained-call logic. |
3320 | | // Delegate inits are a special case in that they're the only |
3321 | | // returns-retained expression that *isn't* surrounded by |
3322 | | // a consume. |
3323 | 0 | } else if (isa<CallExpr>(e) || |
3324 | 0 | (isa<ObjCMessageExpr>(e) && |
3325 | 0 | !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { |
3326 | 0 | return asImpl().visitCall(e); |
3327 | | |
3328 | | // Look through pseudo-object expressions. |
3329 | 0 | } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { |
3330 | 0 | return asImpl().visitPseudoObjectExpr(pseudo); |
3331 | 0 | } else if (auto *be = dyn_cast<BlockExpr>(e)) |
3332 | 0 | return asImpl().visitBlockExpr(be); |
3333 | | |
3334 | 0 | return asImpl().visitExpr(e); |
3335 | 0 | } Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visit(clang::Expr const*) Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visit(clang::Expr const*) |
3336 | | |
3337 | | namespace { |
3338 | | |
3339 | | /// An emitter for +1 results. |
3340 | | struct ARCRetainExprEmitter : |
3341 | | public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { |
3342 | | |
3343 | 0 | ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} |
3344 | | |
3345 | 0 | llvm::Value *getValueOfResult(TryEmitResult result) { |
3346 | 0 | return result.getPointer(); |
3347 | 0 | } |
3348 | | |
3349 | 0 | TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { |
3350 | 0 | llvm::Value *value = result.getPointer(); |
3351 | 0 | value = CGF.Builder.CreateBitCast(value, resultType); |
3352 | 0 | result.setPointer(value); |
3353 | 0 | return result; |
3354 | 0 | } |
3355 | | |
3356 | 0 | TryEmitResult visitLValueToRValue(const Expr *e) { |
3357 | 0 | return tryEmitARCRetainLoadOfScalar(CGF, e); |
3358 | 0 | } |
3359 | | |
3360 | | /// For consumptions, just emit the subexpression and thus elide |
3361 | | /// the retain/release pair. |
3362 | 0 | TryEmitResult visitConsumeObject(const Expr *e) { |
3363 | 0 | llvm::Value *result = CGF.EmitScalarExpr(e); |
3364 | 0 | return TryEmitResult(result, true); |
3365 | 0 | } |
3366 | | |
3367 | 0 | TryEmitResult visitBlockExpr(const BlockExpr *e) { |
3368 | 0 | TryEmitResult result = visitExpr(e); |
3369 | | // Avoid the block-retain if this is a block literal that doesn't need to be |
3370 | | // copied to the heap. |
3371 | 0 | if (CGF.CGM.getCodeGenOpts().ObjCAvoidHeapifyLocalBlocks && |
3372 | 0 | e->getBlockDecl()->canAvoidCopyToHeap()) |
3373 | 0 | result.setInt(true); |
3374 | 0 | return result; |
3375 | 0 | } |
3376 | | |
3377 | | /// Block extends are net +0. Naively, we could just recurse on |
3378 | | /// the subexpression, but actually we need to ensure that the |
3379 | | /// value is copied as a block, so there's a little filter here. |
3380 | 0 | TryEmitResult visitExtendBlockObject(const Expr *e) { |
3381 | 0 | llvm::Value *result; // will be a +0 value |
3382 | | |
3383 | | // If we can't safely assume the sub-expression will produce a |
3384 | | // block-copied value, emit the sub-expression at +0. |
3385 | 0 | if (shouldEmitSeparateBlockRetain(e)) { |
3386 | 0 | result = CGF.EmitScalarExpr(e); |
3387 | | |
3388 | | // Otherwise, try to emit the sub-expression at +1 recursively. |
3389 | 0 | } else { |
3390 | 0 | TryEmitResult subresult = asImpl().visit(e); |
3391 | | |
3392 | | // If that produced a retained value, just use that. |
3393 | 0 | if (subresult.getInt()) { |
3394 | 0 | return subresult; |
3395 | 0 | } |
3396 | | |
3397 | | // Otherwise it's +0. |
3398 | 0 | result = subresult.getPointer(); |
3399 | 0 | } |
3400 | | |
3401 | | // Retain the object as a block. |
3402 | 0 | result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); |
3403 | 0 | return TryEmitResult(result, true); |
3404 | 0 | } |
3405 | | |
3406 | | /// For reclaims, emit the subexpression as a retained call and |
3407 | | /// skip the consumption. |
3408 | 0 | TryEmitResult visitReclaimReturnedObject(const Expr *e) { |
3409 | 0 | llvm::Value *result = emitARCRetainCallResult(CGF, e); |
3410 | 0 | return TryEmitResult(result, true); |
3411 | 0 | } |
3412 | | |
3413 | | /// When we have an undecorated call, retroactively do a claim. |
3414 | 0 | TryEmitResult visitCall(const Expr *e) { |
3415 | 0 | llvm::Value *result = emitARCRetainCallResult(CGF, e); |
3416 | 0 | return TryEmitResult(result, true); |
3417 | 0 | } |
3418 | | |
3419 | | // TODO: maybe special-case visitBinAssignWeak? |
3420 | | |
3421 | 0 | TryEmitResult visitExpr(const Expr *e) { |
3422 | | // We didn't find an obvious production, so emit what we've got and |
3423 | | // tell the caller that we didn't manage to retain. |
3424 | 0 | llvm::Value *result = CGF.EmitScalarExpr(e); |
3425 | 0 | return TryEmitResult(result, false); |
3426 | 0 | } |
3427 | | }; |
3428 | | } |
3429 | | |
3430 | | static TryEmitResult |
3431 | 0 | tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { |
3432 | 0 | return ARCRetainExprEmitter(CGF).visit(e); |
3433 | 0 | } |
3434 | | |
3435 | | static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
3436 | | LValue lvalue, |
3437 | 0 | QualType type) { |
3438 | 0 | TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); |
3439 | 0 | llvm::Value *value = result.getPointer(); |
3440 | 0 | if (!result.getInt()) |
3441 | 0 | value = CGF.EmitARCRetain(type, value); |
3442 | 0 | return value; |
3443 | 0 | } |
3444 | | |
3445 | | /// EmitARCRetainScalarExpr - Semantically equivalent to |
3446 | | /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a |
3447 | | /// best-effort attempt to peephole expressions that naturally produce |
3448 | | /// retained objects. |
3449 | 0 | llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { |
3450 | | // The retain needs to happen within the full-expression. |
3451 | 0 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { |
3452 | 0 | RunCleanupsScope scope(*this); |
3453 | 0 | return EmitARCRetainScalarExpr(cleanups->getSubExpr()); |
3454 | 0 | } |
3455 | | |
3456 | 0 | TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); |
3457 | 0 | llvm::Value *value = result.getPointer(); |
3458 | 0 | if (!result.getInt()) |
3459 | 0 | value = EmitARCRetain(e->getType(), value); |
3460 | 0 | return value; |
3461 | 0 | } |
3462 | | |
3463 | | llvm::Value * |
3464 | 0 | CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { |
3465 | | // The retain needs to happen within the full-expression. |
3466 | 0 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { |
3467 | 0 | RunCleanupsScope scope(*this); |
3468 | 0 | return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); |
3469 | 0 | } |
3470 | | |
3471 | 0 | TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); |
3472 | 0 | llvm::Value *value = result.getPointer(); |
3473 | 0 | if (result.getInt()) |
3474 | 0 | value = EmitARCAutorelease(value); |
3475 | 0 | else |
3476 | 0 | value = EmitARCRetainAutorelease(e->getType(), value); |
3477 | 0 | return value; |
3478 | 0 | } |
3479 | | |
3480 | 0 | llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { |
3481 | 0 | llvm::Value *result; |
3482 | 0 | bool doRetain; |
3483 | |
|
3484 | 0 | if (shouldEmitSeparateBlockRetain(e)) { |
3485 | 0 | result = EmitScalarExpr(e); |
3486 | 0 | doRetain = true; |
3487 | 0 | } else { |
3488 | 0 | TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); |
3489 | 0 | result = subresult.getPointer(); |
3490 | 0 | doRetain = !subresult.getInt(); |
3491 | 0 | } |
3492 | |
|
3493 | 0 | if (doRetain) |
3494 | 0 | result = EmitARCRetainBlock(result, /*mandatory*/ true); |
3495 | 0 | return EmitObjCConsumeObject(e->getType(), result); |
3496 | 0 | } |
3497 | | |
3498 | 0 | llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { |
3499 | | // In ARC, retain and autorelease the expression. |
3500 | 0 | if (getLangOpts().ObjCAutoRefCount) { |
3501 | | // Do so before running any cleanups for the full-expression. |
3502 | | // EmitARCRetainAutoreleaseScalarExpr does this for us. |
3503 | 0 | return EmitARCRetainAutoreleaseScalarExpr(expr); |
3504 | 0 | } |
3505 | | |
3506 | | // Otherwise, use the normal scalar-expression emission. The |
3507 | | // exception machinery doesn't do anything special with the |
3508 | | // exception like retaining it, so there's no safety associated with |
3509 | | // only running cleanups after the throw has started, and when it |
3510 | | // matters it tends to be substantially inferior code. |
3511 | 0 | return EmitScalarExpr(expr); |
3512 | 0 | } |
3513 | | |
3514 | | namespace { |
3515 | | |
3516 | | /// An emitter for assigning into an __unsafe_unretained context. |
3517 | | struct ARCUnsafeUnretainedExprEmitter : |
3518 | | public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { |
3519 | | |
3520 | 0 | ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} |
3521 | | |
3522 | 0 | llvm::Value *getValueOfResult(llvm::Value *value) { |
3523 | 0 | return value; |
3524 | 0 | } |
3525 | | |
3526 | 0 | llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { |
3527 | 0 | return CGF.Builder.CreateBitCast(value, resultType); |
3528 | 0 | } |
3529 | | |
3530 | 0 | llvm::Value *visitLValueToRValue(const Expr *e) { |
3531 | 0 | return CGF.EmitScalarExpr(e); |
3532 | 0 | } |
3533 | | |
3534 | | /// For consumptions, just emit the subexpression and perform the |
3535 | | /// consumption like normal. |
3536 | 0 | llvm::Value *visitConsumeObject(const Expr *e) { |
3537 | 0 | llvm::Value *value = CGF.EmitScalarExpr(e); |
3538 | 0 | return CGF.EmitObjCConsumeObject(e->getType(), value); |
3539 | 0 | } |
3540 | | |
3541 | | /// No special logic for block extensions. (This probably can't |
3542 | | /// actually happen in this emitter, though.) |
3543 | 0 | llvm::Value *visitExtendBlockObject(const Expr *e) { |
3544 | 0 | return CGF.EmitARCExtendBlockObject(e); |
3545 | 0 | } |
3546 | | |
3547 | | /// For reclaims, perform an unsafeClaim if that's enabled. |
3548 | 0 | llvm::Value *visitReclaimReturnedObject(const Expr *e) { |
3549 | 0 | return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); |
3550 | 0 | } |
3551 | | |
3552 | | /// When we have an undecorated call, just emit it without adding |
3553 | | /// the unsafeClaim. |
3554 | 0 | llvm::Value *visitCall(const Expr *e) { |
3555 | 0 | return CGF.EmitScalarExpr(e); |
3556 | 0 | } |
3557 | | |
3558 | | /// Just do normal scalar emission in the default case. |
3559 | 0 | llvm::Value *visitExpr(const Expr *e) { |
3560 | 0 | return CGF.EmitScalarExpr(e); |
3561 | 0 | } |
3562 | | }; |
3563 | | } |
3564 | | |
3565 | | static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, |
3566 | 0 | const Expr *e) { |
3567 | 0 | return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); |
3568 | 0 | } |
3569 | | |
3570 | | /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to |
3571 | | /// immediately releasing the resut of EmitARCRetainScalarExpr, but |
3572 | | /// avoiding any spurious retains, including by performing reclaims |
3573 | | /// with objc_unsafeClaimAutoreleasedReturnValue. |
3574 | 0 | llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { |
3575 | | // Look through full-expressions. |
3576 | 0 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { |
3577 | 0 | RunCleanupsScope scope(*this); |
3578 | 0 | return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); |
3579 | 0 | } |
3580 | | |
3581 | 0 | return emitARCUnsafeUnretainedScalarExpr(*this, e); |
3582 | 0 | } |
3583 | | |
3584 | | std::pair<LValue,llvm::Value*> |
3585 | | CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, |
3586 | 0 | bool ignored) { |
3587 | | // Evaluate the RHS first. If we're ignoring the result, assume |
3588 | | // that we can emit at an unsafe +0. |
3589 | 0 | llvm::Value *value; |
3590 | 0 | if (ignored) { |
3591 | 0 | value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); |
3592 | 0 | } else { |
3593 | 0 | value = EmitScalarExpr(e->getRHS()); |
3594 | 0 | } |
3595 | | |
3596 | | // Emit the LHS and perform the store. |
3597 | 0 | LValue lvalue = EmitLValue(e->getLHS()); |
3598 | 0 | EmitStoreOfScalar(value, lvalue); |
3599 | |
|
3600 | 0 | return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); |
3601 | 0 | } |
3602 | | |
3603 | | std::pair<LValue,llvm::Value*> |
3604 | | CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, |
3605 | 0 | bool ignored) { |
3606 | | // Evaluate the RHS first. |
3607 | 0 | TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); |
3608 | 0 | llvm::Value *value = result.getPointer(); |
3609 | |
|
3610 | 0 | bool hasImmediateRetain = result.getInt(); |
3611 | | |
3612 | | // If we didn't emit a retained object, and the l-value is of block |
3613 | | // type, then we need to emit the block-retain immediately in case |
3614 | | // it invalidates the l-value. |
3615 | 0 | if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { |
3616 | 0 | value = EmitARCRetainBlock(value, /*mandatory*/ false); |
3617 | 0 | hasImmediateRetain = true; |
3618 | 0 | } |
3619 | |
|
3620 | 0 | LValue lvalue = EmitLValue(e->getLHS()); |
3621 | | |
3622 | | // If the RHS was emitted retained, expand this. |
3623 | 0 | if (hasImmediateRetain) { |
3624 | 0 | llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); |
3625 | 0 | EmitStoreOfScalar(value, lvalue); |
3626 | 0 | EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); |
3627 | 0 | } else { |
3628 | 0 | value = EmitARCStoreStrong(lvalue, value, ignored); |
3629 | 0 | } |
3630 | |
|
3631 | 0 | return std::pair<LValue,llvm::Value*>(lvalue, value); |
3632 | 0 | } |
3633 | | |
3634 | | std::pair<LValue,llvm::Value*> |
3635 | 0 | CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { |
3636 | 0 | llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); |
3637 | 0 | LValue lvalue = EmitLValue(e->getLHS()); |
3638 | |
|
3639 | 0 | EmitStoreOfScalar(value, lvalue); |
3640 | |
|
3641 | 0 | return std::pair<LValue,llvm::Value*>(lvalue, value); |
3642 | 0 | } |
3643 | | |
3644 | | void CodeGenFunction::EmitObjCAutoreleasePoolStmt( |
3645 | 0 | const ObjCAutoreleasePoolStmt &ARPS) { |
3646 | 0 | const Stmt *subStmt = ARPS.getSubStmt(); |
3647 | 0 | const CompoundStmt &S = cast<CompoundStmt>(*subStmt); |
3648 | |
|
3649 | 0 | CGDebugInfo *DI = getDebugInfo(); |
3650 | 0 | if (DI) |
3651 | 0 | DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); |
3652 | | |
3653 | | // Keep track of the current cleanup stack depth. |
3654 | 0 | RunCleanupsScope Scope(*this); |
3655 | 0 | if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { |
3656 | 0 | llvm::Value *token = EmitObjCAutoreleasePoolPush(); |
3657 | 0 | EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); |
3658 | 0 | } else { |
3659 | 0 | llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); |
3660 | 0 | EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); |
3661 | 0 | } |
3662 | |
|
3663 | 0 | for (const auto *I : S.body()) |
3664 | 0 | EmitStmt(I); |
3665 | |
|
3666 | 0 | if (DI) |
3667 | 0 | DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); |
3668 | 0 | } |
3669 | | |
3670 | | /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, |
3671 | | /// make sure it survives garbage collection until this point. |
3672 | 0 | void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { |
3673 | | // We just use an inline assembly. |
3674 | 0 | llvm::FunctionType *extenderType |
3675 | 0 | = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); |
3676 | 0 | llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType, |
3677 | 0 | /* assembly */ "", |
3678 | 0 | /* constraints */ "r", |
3679 | 0 | /* side effects */ true); |
3680 | |
|
3681 | 0 | EmitNounwindRuntimeCall(extender, object); |
3682 | 0 | } |
3683 | | |
3684 | | /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with |
3685 | | /// non-trivial copy assignment function, produce following helper function. |
3686 | | /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } |
3687 | | /// |
3688 | | llvm::Constant * |
3689 | | CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( |
3690 | 0 | const ObjCPropertyImplDecl *PID) { |
3691 | 0 | const ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
3692 | 0 | if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) |
3693 | 0 | return nullptr; |
3694 | | |
3695 | 0 | QualType Ty = PID->getPropertyIvarDecl()->getType(); |
3696 | 0 | ASTContext &C = getContext(); |
3697 | |
|
3698 | 0 | if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
3699 | | // Call the move assignment operator instead of calling the copy assignment |
3700 | | // operator and destructor. |
3701 | 0 | CharUnits Alignment = C.getTypeAlignInChars(Ty); |
3702 | 0 | llvm::Constant *Fn = getNonTrivialCStructMoveAssignmentOperator( |
3703 | 0 | CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty); |
3704 | 0 | return Fn; |
3705 | 0 | } |
3706 | | |
3707 | 0 | if (!getLangOpts().CPlusPlus || |
3708 | 0 | !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) |
3709 | 0 | return nullptr; |
3710 | 0 | if (!Ty->isRecordType()) |
3711 | 0 | return nullptr; |
3712 | 0 | llvm::Constant *HelperFn = nullptr; |
3713 | 0 | if (hasTrivialSetExpr(PID)) |
3714 | 0 | return nullptr; |
3715 | 0 | assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); |
3716 | 0 | if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) |
3717 | 0 | return HelperFn; |
3718 | | |
3719 | 0 | IdentifierInfo *II |
3720 | 0 | = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); |
3721 | |
|
3722 | 0 | QualType ReturnTy = C.VoidTy; |
3723 | 0 | QualType DestTy = C.getPointerType(Ty); |
3724 | 0 | QualType SrcTy = Ty; |
3725 | 0 | SrcTy.addConst(); |
3726 | 0 | SrcTy = C.getPointerType(SrcTy); |
3727 | |
|
3728 | 0 | SmallVector<QualType, 2> ArgTys; |
3729 | 0 | ArgTys.push_back(DestTy); |
3730 | 0 | ArgTys.push_back(SrcTy); |
3731 | 0 | QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); |
3732 | |
|
3733 | 0 | FunctionDecl *FD = FunctionDecl::Create( |
3734 | 0 | C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
3735 | 0 | FunctionTy, nullptr, SC_Static, false, false, false); |
3736 | |
|
3737 | 0 | FunctionArgList args; |
3738 | 0 | ParmVarDecl *Params[2]; |
3739 | 0 | ParmVarDecl *DstDecl = ParmVarDecl::Create( |
3740 | 0 | C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, |
3741 | 0 | C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, |
3742 | 0 | /*DefArg=*/nullptr); |
3743 | 0 | args.push_back(Params[0] = DstDecl); |
3744 | 0 | ParmVarDecl *SrcDecl = ParmVarDecl::Create( |
3745 | 0 | C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, |
3746 | 0 | C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, |
3747 | 0 | /*DefArg=*/nullptr); |
3748 | 0 | args.push_back(Params[1] = SrcDecl); |
3749 | 0 | FD->setParams(Params); |
3750 | |
|
3751 | 0 | const CGFunctionInfo &FI = |
3752 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); |
3753 | |
|
3754 | 0 | llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); |
3755 | |
|
3756 | 0 | llvm::Function *Fn = |
3757 | 0 | llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, |
3758 | 0 | "__assign_helper_atomic_property_", |
3759 | 0 | &CGM.getModule()); |
3760 | |
|
3761 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); |
3762 | |
|
3763 | 0 | StartFunction(FD, ReturnTy, Fn, FI, args); |
3764 | |
|
3765 | 0 | DeclRefExpr DstExpr(C, DstDecl, false, DestTy, VK_PRValue, SourceLocation()); |
3766 | 0 | UnaryOperator *DST = UnaryOperator::Create( |
3767 | 0 | C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3768 | 0 | SourceLocation(), false, FPOptionsOverride()); |
3769 | |
|
3770 | 0 | DeclRefExpr SrcExpr(C, SrcDecl, false, SrcTy, VK_PRValue, SourceLocation()); |
3771 | 0 | UnaryOperator *SRC = UnaryOperator::Create( |
3772 | 0 | C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3773 | 0 | SourceLocation(), false, FPOptionsOverride()); |
3774 | |
|
3775 | 0 | Expr *Args[2] = {DST, SRC}; |
3776 | 0 | CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); |
3777 | 0 | CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create( |
3778 | 0 | C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(), |
3779 | 0 | VK_LValue, SourceLocation(), FPOptionsOverride()); |
3780 | |
|
3781 | 0 | EmitStmt(TheCall); |
3782 | |
|
3783 | 0 | FinishFunction(); |
3784 | 0 | HelperFn = Fn; |
3785 | 0 | CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); |
3786 | 0 | return HelperFn; |
3787 | 0 | } |
3788 | | |
3789 | | llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( |
3790 | 0 | const ObjCPropertyImplDecl *PID) { |
3791 | 0 | const ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
3792 | 0 | if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) |
3793 | 0 | return nullptr; |
3794 | | |
3795 | 0 | QualType Ty = PD->getType(); |
3796 | 0 | ASTContext &C = getContext(); |
3797 | |
|
3798 | 0 | if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
3799 | 0 | CharUnits Alignment = C.getTypeAlignInChars(Ty); |
3800 | 0 | llvm::Constant *Fn = getNonTrivialCStructCopyConstructor( |
3801 | 0 | CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty); |
3802 | 0 | return Fn; |
3803 | 0 | } |
3804 | | |
3805 | 0 | if (!getLangOpts().CPlusPlus || |
3806 | 0 | !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) |
3807 | 0 | return nullptr; |
3808 | 0 | if (!Ty->isRecordType()) |
3809 | 0 | return nullptr; |
3810 | 0 | llvm::Constant *HelperFn = nullptr; |
3811 | 0 | if (hasTrivialGetExpr(PID)) |
3812 | 0 | return nullptr; |
3813 | 0 | assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); |
3814 | 0 | if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) |
3815 | 0 | return HelperFn; |
3816 | | |
3817 | 0 | IdentifierInfo *II = |
3818 | 0 | &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); |
3819 | |
|
3820 | 0 | QualType ReturnTy = C.VoidTy; |
3821 | 0 | QualType DestTy = C.getPointerType(Ty); |
3822 | 0 | QualType SrcTy = Ty; |
3823 | 0 | SrcTy.addConst(); |
3824 | 0 | SrcTy = C.getPointerType(SrcTy); |
3825 | |
|
3826 | 0 | SmallVector<QualType, 2> ArgTys; |
3827 | 0 | ArgTys.push_back(DestTy); |
3828 | 0 | ArgTys.push_back(SrcTy); |
3829 | 0 | QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); |
3830 | |
|
3831 | 0 | FunctionDecl *FD = FunctionDecl::Create( |
3832 | 0 | C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
3833 | 0 | FunctionTy, nullptr, SC_Static, false, false, false); |
3834 | |
|
3835 | 0 | FunctionArgList args; |
3836 | 0 | ParmVarDecl *Params[2]; |
3837 | 0 | ParmVarDecl *DstDecl = ParmVarDecl::Create( |
3838 | 0 | C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, |
3839 | 0 | C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, |
3840 | 0 | /*DefArg=*/nullptr); |
3841 | 0 | args.push_back(Params[0] = DstDecl); |
3842 | 0 | ParmVarDecl *SrcDecl = ParmVarDecl::Create( |
3843 | 0 | C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, |
3844 | 0 | C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, |
3845 | 0 | /*DefArg=*/nullptr); |
3846 | 0 | args.push_back(Params[1] = SrcDecl); |
3847 | 0 | FD->setParams(Params); |
3848 | |
|
3849 | 0 | const CGFunctionInfo &FI = |
3850 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); |
3851 | |
|
3852 | 0 | llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); |
3853 | |
|
3854 | 0 | llvm::Function *Fn = llvm::Function::Create( |
3855 | 0 | LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_", |
3856 | 0 | &CGM.getModule()); |
3857 | |
|
3858 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); |
3859 | |
|
3860 | 0 | StartFunction(FD, ReturnTy, Fn, FI, args); |
3861 | |
|
3862 | 0 | DeclRefExpr SrcExpr(getContext(), SrcDecl, false, SrcTy, VK_PRValue, |
3863 | 0 | SourceLocation()); |
3864 | |
|
3865 | 0 | UnaryOperator *SRC = UnaryOperator::Create( |
3866 | 0 | C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3867 | 0 | SourceLocation(), false, FPOptionsOverride()); |
3868 | |
|
3869 | 0 | CXXConstructExpr *CXXConstExpr = |
3870 | 0 | cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); |
3871 | |
|
3872 | 0 | SmallVector<Expr*, 4> ConstructorArgs; |
3873 | 0 | ConstructorArgs.push_back(SRC); |
3874 | 0 | ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), |
3875 | 0 | CXXConstExpr->arg_end()); |
3876 | |
|
3877 | 0 | CXXConstructExpr *TheCXXConstructExpr = |
3878 | 0 | CXXConstructExpr::Create(C, Ty, SourceLocation(), |
3879 | 0 | CXXConstExpr->getConstructor(), |
3880 | 0 | CXXConstExpr->isElidable(), |
3881 | 0 | ConstructorArgs, |
3882 | 0 | CXXConstExpr->hadMultipleCandidates(), |
3883 | 0 | CXXConstExpr->isListInitialization(), |
3884 | 0 | CXXConstExpr->isStdInitListInitialization(), |
3885 | 0 | CXXConstExpr->requiresZeroInitialization(), |
3886 | 0 | CXXConstExpr->getConstructionKind(), |
3887 | 0 | SourceRange()); |
3888 | |
|
3889 | 0 | DeclRefExpr DstExpr(getContext(), DstDecl, false, DestTy, VK_PRValue, |
3890 | 0 | SourceLocation()); |
3891 | |
|
3892 | 0 | RValue DV = EmitAnyExpr(&DstExpr); |
3893 | 0 | CharUnits Alignment = |
3894 | 0 | getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); |
3895 | 0 | EmitAggExpr(TheCXXConstructExpr, |
3896 | 0 | AggValueSlot::forAddr( |
3897 | 0 | Address(DV.getScalarVal(), ConvertTypeForMem(Ty), Alignment), |
3898 | 0 | Qualifiers(), AggValueSlot::IsDestructed, |
3899 | 0 | AggValueSlot::DoesNotNeedGCBarriers, |
3900 | 0 | AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); |
3901 | |
|
3902 | 0 | FinishFunction(); |
3903 | 0 | HelperFn = Fn; |
3904 | 0 | CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); |
3905 | 0 | return HelperFn; |
3906 | 0 | } |
3907 | | |
3908 | | llvm::Value * |
3909 | 0 | CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { |
3910 | | // Get selectors for retain/autorelease. |
3911 | 0 | IdentifierInfo *CopyID = &getContext().Idents.get("copy"); |
3912 | 0 | Selector CopySelector = |
3913 | 0 | getContext().Selectors.getNullarySelector(CopyID); |
3914 | 0 | IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); |
3915 | 0 | Selector AutoreleaseSelector = |
3916 | 0 | getContext().Selectors.getNullarySelector(AutoreleaseID); |
3917 | | |
3918 | | // Emit calls to retain/autorelease. |
3919 | 0 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
3920 | 0 | llvm::Value *Val = Block; |
3921 | 0 | RValue Result; |
3922 | 0 | Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
3923 | 0 | Ty, CopySelector, |
3924 | 0 | Val, CallArgList(), nullptr, nullptr); |
3925 | 0 | Val = Result.getScalarVal(); |
3926 | 0 | Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
3927 | 0 | Ty, AutoreleaseSelector, |
3928 | 0 | Val, CallArgList(), nullptr, nullptr); |
3929 | 0 | Val = Result.getScalarVal(); |
3930 | 0 | return Val; |
3931 | 0 | } |
3932 | | |
3933 | 0 | static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) { |
3934 | 0 | switch (TT.getOS()) { |
3935 | 0 | case llvm::Triple::Darwin: |
3936 | 0 | case llvm::Triple::MacOSX: |
3937 | 0 | return llvm::MachO::PLATFORM_MACOS; |
3938 | 0 | case llvm::Triple::IOS: |
3939 | 0 | return llvm::MachO::PLATFORM_IOS; |
3940 | 0 | case llvm::Triple::TvOS: |
3941 | 0 | return llvm::MachO::PLATFORM_TVOS; |
3942 | 0 | case llvm::Triple::WatchOS: |
3943 | 0 | return llvm::MachO::PLATFORM_WATCHOS; |
3944 | 0 | case llvm::Triple::DriverKit: |
3945 | 0 | return llvm::MachO::PLATFORM_DRIVERKIT; |
3946 | 0 | default: |
3947 | 0 | return llvm::MachO::PLATFORM_UNKNOWN; |
3948 | 0 | } |
3949 | 0 | } |
3950 | | |
3951 | | static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF, |
3952 | 0 | const VersionTuple &Version) { |
3953 | 0 | CodeGenModule &CGM = CGF.CGM; |
3954 | | // Note: we intend to support multi-platform version checks, so reserve |
3955 | | // the room for a dual platform checking invocation that will be |
3956 | | // implemented in the future. |
3957 | 0 | llvm::SmallVector<llvm::Value *, 8> Args; |
3958 | |
|
3959 | 0 | auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) { |
3960 | 0 | std::optional<unsigned> Min = Version.getMinor(), |
3961 | 0 | SMin = Version.getSubminor(); |
3962 | 0 | Args.push_back( |
3963 | 0 | llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT))); |
3964 | 0 | Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor())); |
3965 | 0 | Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0))); |
3966 | 0 | Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))); |
3967 | 0 | }; |
3968 | |
|
3969 | 0 | assert(!Version.empty() && "unexpected empty version"); |
3970 | 0 | EmitArgs(Version, CGM.getTarget().getTriple()); |
3971 | |
|
3972 | 0 | if (!CGM.IsPlatformVersionAtLeastFn) { |
3973 | 0 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3974 | 0 | CGM.Int32Ty, {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty}, |
3975 | 0 | false); |
3976 | 0 | CGM.IsPlatformVersionAtLeastFn = |
3977 | 0 | CGM.CreateRuntimeFunction(FTy, "__isPlatformVersionAtLeast"); |
3978 | 0 | } |
3979 | |
|
3980 | 0 | llvm::Value *Check = |
3981 | 0 | CGF.EmitNounwindRuntimeCall(CGM.IsPlatformVersionAtLeastFn, Args); |
3982 | 0 | return CGF.Builder.CreateICmpNE(Check, |
3983 | 0 | llvm::Constant::getNullValue(CGM.Int32Ty)); |
3984 | 0 | } |
3985 | | |
3986 | | llvm::Value * |
3987 | 0 | CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) { |
3988 | | // Darwin uses the new __isPlatformVersionAtLeast family of routines. |
3989 | 0 | if (CGM.getTarget().getTriple().isOSDarwin()) |
3990 | 0 | return emitIsPlatformVersionAtLeast(*this, Version); |
3991 | | |
3992 | 0 | if (!CGM.IsOSVersionAtLeastFn) { |
3993 | 0 | llvm::FunctionType *FTy = |
3994 | 0 | llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); |
3995 | 0 | CGM.IsOSVersionAtLeastFn = |
3996 | 0 | CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); |
3997 | 0 | } |
3998 | |
|
3999 | 0 | std::optional<unsigned> Min = Version.getMinor(), |
4000 | 0 | SMin = Version.getSubminor(); |
4001 | 0 | llvm::Value *Args[] = { |
4002 | 0 | llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()), |
4003 | 0 | llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)), |
4004 | 0 | llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))}; |
4005 | |
|
4006 | 0 | llvm::Value *CallRes = |
4007 | 0 | EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); |
4008 | |
|
4009 | 0 | return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); |
4010 | 0 | } |
4011 | | |
4012 | | static bool isFoundationNeededForDarwinAvailabilityCheck( |
4013 | 0 | const llvm::Triple &TT, const VersionTuple &TargetVersion) { |
4014 | 0 | VersionTuple FoundationDroppedInVersion; |
4015 | 0 | switch (TT.getOS()) { |
4016 | 0 | case llvm::Triple::IOS: |
4017 | 0 | case llvm::Triple::TvOS: |
4018 | 0 | FoundationDroppedInVersion = VersionTuple(/*Major=*/13); |
4019 | 0 | break; |
4020 | 0 | case llvm::Triple::WatchOS: |
4021 | 0 | FoundationDroppedInVersion = VersionTuple(/*Major=*/6); |
4022 | 0 | break; |
4023 | 0 | case llvm::Triple::Darwin: |
4024 | 0 | case llvm::Triple::MacOSX: |
4025 | 0 | FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15); |
4026 | 0 | break; |
4027 | 0 | case llvm::Triple::DriverKit: |
4028 | | // DriverKit doesn't need Foundation. |
4029 | 0 | return false; |
4030 | 0 | default: |
4031 | 0 | llvm_unreachable("Unexpected OS"); |
4032 | 0 | } |
4033 | 0 | return TargetVersion < FoundationDroppedInVersion; |
4034 | 0 | } |
4035 | | |
4036 | 0 | void CodeGenModule::emitAtAvailableLinkGuard() { |
4037 | 0 | if (!IsPlatformVersionAtLeastFn) |
4038 | 0 | return; |
4039 | | // @available requires CoreFoundation only on Darwin. |
4040 | 0 | if (!Target.getTriple().isOSDarwin()) |
4041 | 0 | return; |
4042 | | // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or |
4043 | | // watchOS 6+. |
4044 | 0 | if (!isFoundationNeededForDarwinAvailabilityCheck( |
4045 | 0 | Target.getTriple(), Target.getPlatformMinVersion())) |
4046 | 0 | return; |
4047 | | // Add -framework CoreFoundation to the linker commands. We still want to |
4048 | | // emit the core foundation reference down below because otherwise if |
4049 | | // CoreFoundation is not used in the code, the linker won't link the |
4050 | | // framework. |
4051 | 0 | auto &Context = getLLVMContext(); |
4052 | 0 | llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), |
4053 | 0 | llvm::MDString::get(Context, "CoreFoundation")}; |
4054 | 0 | LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); |
4055 | | // Emit a reference to a symbol from CoreFoundation to ensure that |
4056 | | // CoreFoundation is linked into the final binary. |
4057 | 0 | llvm::FunctionType *FTy = |
4058 | 0 | llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); |
4059 | 0 | llvm::FunctionCallee CFFunc = |
4060 | 0 | CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); |
4061 | |
|
4062 | 0 | llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); |
4063 | 0 | llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction( |
4064 | 0 | CheckFTy, "__clang_at_available_requires_core_foundation_framework", |
4065 | 0 | llvm::AttributeList(), /*Local=*/true); |
4066 | 0 | llvm::Function *CFLinkCheckFunc = |
4067 | 0 | cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts()); |
4068 | 0 | if (CFLinkCheckFunc->empty()) { |
4069 | 0 | CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); |
4070 | 0 | CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); |
4071 | 0 | CodeGenFunction CGF(*this); |
4072 | 0 | CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); |
4073 | 0 | CGF.EmitNounwindRuntimeCall(CFFunc, |
4074 | 0 | llvm::Constant::getNullValue(VoidPtrTy)); |
4075 | 0 | CGF.Builder.CreateUnreachable(); |
4076 | 0 | addCompilerUsedGlobal(CFLinkCheckFunc); |
4077 | 0 | } |
4078 | 0 | } |
4079 | | |
4080 | 23 | CGObjCRuntime::~CGObjCRuntime() {} |