/src/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file contains code dealing with the IR generation for cleanups |
10 | | // and related information. |
11 | | // |
12 | | // A "cleanup" is a piece of code which needs to be executed whenever |
13 | | // control transfers out of a particular scope. This can be |
14 | | // conditionalized to occur only on exceptional control flow, only on |
15 | | // normal control flow, or both. |
16 | | // |
17 | | //===----------------------------------------------------------------------===// |
18 | | |
19 | | #include "CGCleanup.h" |
20 | | #include "CodeGenFunction.h" |
21 | | #include "llvm/Support/SaveAndRestore.h" |
22 | | |
23 | | using namespace clang; |
24 | | using namespace CodeGen; |
25 | | |
26 | 0 | bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { |
27 | 0 | if (rv.isScalar()) |
28 | 0 | return DominatingLLVMValue::needsSaving(rv.getScalarVal()); |
29 | 0 | if (rv.isAggregate()) |
30 | 0 | return DominatingLLVMValue::needsSaving(rv.getAggregatePointer()); |
31 | 0 | return true; |
32 | 0 | } |
33 | | |
34 | | DominatingValue<RValue>::saved_type |
35 | 0 | DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { |
36 | 0 | if (rv.isScalar()) { |
37 | 0 | llvm::Value *V = rv.getScalarVal(); |
38 | | |
39 | | // These automatically dominate and don't need to be saved. |
40 | 0 | if (!DominatingLLVMValue::needsSaving(V)) |
41 | 0 | return saved_type(V, nullptr, ScalarLiteral); |
42 | | |
43 | | // Everything else needs an alloca. |
44 | 0 | Address addr = |
45 | 0 | CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue"); |
46 | 0 | CGF.Builder.CreateStore(V, addr); |
47 | 0 | return saved_type(addr.getPointer(), nullptr, ScalarAddress); |
48 | 0 | } |
49 | | |
50 | 0 | if (rv.isComplex()) { |
51 | 0 | CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); |
52 | 0 | llvm::Type *ComplexTy = |
53 | 0 | llvm::StructType::get(V.first->getType(), V.second->getType()); |
54 | 0 | Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex"); |
55 | 0 | CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0)); |
56 | 0 | CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1)); |
57 | 0 | return saved_type(addr.getPointer(), nullptr, ComplexAddress); |
58 | 0 | } |
59 | | |
60 | 0 | assert(rv.isAggregate()); |
61 | 0 | Address V = rv.getAggregateAddress(); // TODO: volatile? |
62 | 0 | if (!DominatingLLVMValue::needsSaving(V.getPointer())) |
63 | 0 | return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral, |
64 | 0 | V.getAlignment().getQuantity()); |
65 | | |
66 | 0 | Address addr = |
67 | 0 | CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue"); |
68 | 0 | CGF.Builder.CreateStore(V.getPointer(), addr); |
69 | 0 | return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress, |
70 | 0 | V.getAlignment().getQuantity()); |
71 | 0 | } |
72 | | |
73 | | /// Given a saved r-value produced by SaveRValue, perform the code |
74 | | /// necessary to restore it to usability at the current insertion |
75 | | /// point. |
76 | 0 | RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { |
77 | 0 | auto getSavingAddress = [&](llvm::Value *value) { |
78 | 0 | auto *AI = cast<llvm::AllocaInst>(value); |
79 | 0 | return Address(value, AI->getAllocatedType(), |
80 | 0 | CharUnits::fromQuantity(AI->getAlign().value())); |
81 | 0 | }; |
82 | 0 | switch (K) { |
83 | 0 | case ScalarLiteral: |
84 | 0 | return RValue::get(Value); |
85 | 0 | case ScalarAddress: |
86 | 0 | return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value))); |
87 | 0 | case AggregateLiteral: |
88 | 0 | return RValue::getAggregate( |
89 | 0 | Address(Value, ElementType, CharUnits::fromQuantity(Align))); |
90 | 0 | case AggregateAddress: { |
91 | 0 | auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value)); |
92 | 0 | return RValue::getAggregate( |
93 | 0 | Address(addr, ElementType, CharUnits::fromQuantity(Align))); |
94 | 0 | } |
95 | 0 | case ComplexAddress: { |
96 | 0 | Address address = getSavingAddress(Value); |
97 | 0 | llvm::Value *real = |
98 | 0 | CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 0)); |
99 | 0 | llvm::Value *imag = |
100 | 0 | CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 1)); |
101 | 0 | return RValue::getComplex(real, imag); |
102 | 0 | } |
103 | 0 | } |
104 | | |
105 | 0 | llvm_unreachable("bad saved r-value kind"); |
106 | 0 | } |
107 | | |
108 | | /// Push an entry of the given size onto this protected-scope stack. |
109 | 0 | char *EHScopeStack::allocate(size_t Size) { |
110 | 0 | Size = llvm::alignTo(Size, ScopeStackAlignment); |
111 | 0 | if (!StartOfBuffer) { |
112 | 0 | unsigned Capacity = 1024; |
113 | 0 | while (Capacity < Size) Capacity *= 2; |
114 | 0 | StartOfBuffer = new char[Capacity]; |
115 | 0 | StartOfData = EndOfBuffer = StartOfBuffer + Capacity; |
116 | 0 | } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { |
117 | 0 | unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; |
118 | 0 | unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); |
119 | |
|
120 | 0 | unsigned NewCapacity = CurrentCapacity; |
121 | 0 | do { |
122 | 0 | NewCapacity *= 2; |
123 | 0 | } while (NewCapacity < UsedCapacity + Size); |
124 | |
|
125 | 0 | char *NewStartOfBuffer = new char[NewCapacity]; |
126 | 0 | char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; |
127 | 0 | char *NewStartOfData = NewEndOfBuffer - UsedCapacity; |
128 | 0 | memcpy(NewStartOfData, StartOfData, UsedCapacity); |
129 | 0 | delete [] StartOfBuffer; |
130 | 0 | StartOfBuffer = NewStartOfBuffer; |
131 | 0 | EndOfBuffer = NewEndOfBuffer; |
132 | 0 | StartOfData = NewStartOfData; |
133 | 0 | } |
134 | |
|
135 | 0 | assert(StartOfBuffer + Size <= StartOfData); |
136 | 0 | StartOfData -= Size; |
137 | 0 | return StartOfData; |
138 | 0 | } |
139 | | |
140 | 0 | void EHScopeStack::deallocate(size_t Size) { |
141 | 0 | StartOfData += llvm::alignTo(Size, ScopeStackAlignment); |
142 | 0 | } |
143 | | |
144 | | bool EHScopeStack::containsOnlyLifetimeMarkers( |
145 | 0 | EHScopeStack::stable_iterator Old) const { |
146 | 0 | for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) { |
147 | 0 | EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it); |
148 | 0 | if (!cleanup || !cleanup->isLifetimeMarker()) |
149 | 0 | return false; |
150 | 0 | } |
151 | | |
152 | 0 | return true; |
153 | 0 | } |
154 | | |
155 | 0 | bool EHScopeStack::requiresLandingPad() const { |
156 | 0 | for (stable_iterator si = getInnermostEHScope(); si != stable_end(); ) { |
157 | | // Skip lifetime markers. |
158 | 0 | if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) |
159 | 0 | if (cleanup->isLifetimeMarker()) { |
160 | 0 | si = cleanup->getEnclosingEHScope(); |
161 | 0 | continue; |
162 | 0 | } |
163 | 0 | return true; |
164 | 0 | } |
165 | | |
166 | 0 | return false; |
167 | 0 | } |
168 | | |
169 | | EHScopeStack::stable_iterator |
170 | 0 | EHScopeStack::getInnermostActiveNormalCleanup() const { |
171 | 0 | for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); |
172 | 0 | si != se; ) { |
173 | 0 | EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si)); |
174 | 0 | if (cleanup.isActive()) return si; |
175 | 0 | si = cleanup.getEnclosingNormalCleanup(); |
176 | 0 | } |
177 | 0 | return stable_end(); |
178 | 0 | } |
179 | | |
180 | | |
181 | 0 | void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { |
182 | 0 | char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); |
183 | 0 | bool IsNormalCleanup = Kind & NormalCleanup; |
184 | 0 | bool IsEHCleanup = Kind & EHCleanup; |
185 | 0 | bool IsLifetimeMarker = Kind & LifetimeMarker; |
186 | | |
187 | | // Per C++ [except.terminate], it is implementation-defined whether none, |
188 | | // some, or all cleanups are called before std::terminate. Thus, when |
189 | | // terminate is the current EH scope, we may skip adding any EH cleanup |
190 | | // scopes. |
191 | 0 | if (InnermostEHScope != stable_end() && |
192 | 0 | find(InnermostEHScope)->getKind() == EHScope::Terminate) |
193 | 0 | IsEHCleanup = false; |
194 | |
|
195 | 0 | EHCleanupScope *Scope = |
196 | 0 | new (Buffer) EHCleanupScope(IsNormalCleanup, |
197 | 0 | IsEHCleanup, |
198 | 0 | Size, |
199 | 0 | BranchFixups.size(), |
200 | 0 | InnermostNormalCleanup, |
201 | 0 | InnermostEHScope); |
202 | 0 | if (IsNormalCleanup) |
203 | 0 | InnermostNormalCleanup = stable_begin(); |
204 | 0 | if (IsEHCleanup) |
205 | 0 | InnermostEHScope = stable_begin(); |
206 | 0 | if (IsLifetimeMarker) |
207 | 0 | Scope->setLifetimeMarker(); |
208 | | |
209 | | // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup |
210 | | // If exceptions are disabled/ignored and SEH is not in use, then there is no |
211 | | // invoke destination. SEH "works" even if exceptions are off. In practice, |
212 | | // this means that C++ destructors and other EH cleanups don't run, which is |
213 | | // consistent with MSVC's behavior, except in the presence of -EHa. |
214 | | // Check getInvokeDest() to generate llvm.seh.scope.begin() as needed. |
215 | 0 | if (CGF->getLangOpts().EHAsynch && IsEHCleanup && !IsLifetimeMarker && |
216 | 0 | CGF->getTarget().getCXXABI().isMicrosoft() && CGF->getInvokeDest()) |
217 | 0 | CGF->EmitSehCppScopeBegin(); |
218 | |
|
219 | 0 | return Scope->getCleanupBuffer(); |
220 | 0 | } |
221 | | |
222 | 0 | void EHScopeStack::popCleanup() { |
223 | 0 | assert(!empty() && "popping exception stack when not empty"); |
224 | | |
225 | 0 | assert(isa<EHCleanupScope>(*begin())); |
226 | 0 | EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); |
227 | 0 | InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); |
228 | 0 | InnermostEHScope = Cleanup.getEnclosingEHScope(); |
229 | 0 | deallocate(Cleanup.getAllocatedSize()); |
230 | | |
231 | | // Destroy the cleanup. |
232 | 0 | Cleanup.Destroy(); |
233 | | |
234 | | // Check whether we can shrink the branch-fixups stack. |
235 | 0 | if (!BranchFixups.empty()) { |
236 | | // If we no longer have any normal cleanups, all the fixups are |
237 | | // complete. |
238 | 0 | if (!hasNormalCleanups()) |
239 | 0 | BranchFixups.clear(); |
240 | | |
241 | | // Otherwise we can still trim out unnecessary nulls. |
242 | 0 | else |
243 | 0 | popNullFixups(); |
244 | 0 | } |
245 | 0 | } |
246 | | |
247 | 0 | EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { |
248 | 0 | assert(getInnermostEHScope() == stable_end()); |
249 | 0 | char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters)); |
250 | 0 | EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); |
251 | 0 | InnermostEHScope = stable_begin(); |
252 | 0 | return filter; |
253 | 0 | } |
254 | | |
255 | 0 | void EHScopeStack::popFilter() { |
256 | 0 | assert(!empty() && "popping exception stack when not empty"); |
257 | | |
258 | 0 | EHFilterScope &filter = cast<EHFilterScope>(*begin()); |
259 | 0 | deallocate(EHFilterScope::getSizeForNumFilters(filter.getNumFilters())); |
260 | |
|
261 | 0 | InnermostEHScope = filter.getEnclosingEHScope(); |
262 | 0 | } |
263 | | |
264 | 0 | EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { |
265 | 0 | char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); |
266 | 0 | EHCatchScope *scope = |
267 | 0 | new (buffer) EHCatchScope(numHandlers, InnermostEHScope); |
268 | 0 | InnermostEHScope = stable_begin(); |
269 | 0 | return scope; |
270 | 0 | } |
271 | | |
272 | 0 | void EHScopeStack::pushTerminate() { |
273 | 0 | char *Buffer = allocate(EHTerminateScope::getSize()); |
274 | 0 | new (Buffer) EHTerminateScope(InnermostEHScope); |
275 | 0 | InnermostEHScope = stable_begin(); |
276 | 0 | } |
277 | | |
278 | | /// Remove any 'null' fixups on the stack. However, we can't pop more |
279 | | /// fixups than the fixup depth on the innermost normal cleanup, or |
280 | | /// else fixups that we try to add to that cleanup will end up in the |
281 | | /// wrong place. We *could* try to shrink fixup depths, but that's |
282 | | /// actually a lot of work for little benefit. |
283 | 0 | void EHScopeStack::popNullFixups() { |
284 | | // We expect this to only be called when there's still an innermost |
285 | | // normal cleanup; otherwise there really shouldn't be any fixups. |
286 | 0 | assert(hasNormalCleanups()); |
287 | | |
288 | 0 | EHScopeStack::iterator it = find(InnermostNormalCleanup); |
289 | 0 | unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); |
290 | 0 | assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); |
291 | | |
292 | 0 | while (BranchFixups.size() > MinSize && |
293 | 0 | BranchFixups.back().Destination == nullptr) |
294 | 0 | BranchFixups.pop_back(); |
295 | 0 | } |
296 | | |
297 | 0 | Address CodeGenFunction::createCleanupActiveFlag() { |
298 | | // Create a variable to decide whether the cleanup needs to be run. |
299 | 0 | Address active = CreateTempAllocaWithoutCast( |
300 | 0 | Builder.getInt1Ty(), CharUnits::One(), "cleanup.cond"); |
301 | | |
302 | | // Initialize it to false at a site that's guaranteed to be run |
303 | | // before each evaluation. |
304 | 0 | setBeforeOutermostConditional(Builder.getFalse(), active); |
305 | | |
306 | | // Initialize it to true at the current location. |
307 | 0 | Builder.CreateStore(Builder.getTrue(), active); |
308 | |
|
309 | 0 | return active; |
310 | 0 | } |
311 | | |
312 | 0 | void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { |
313 | | // Set that as the active flag in the cleanup. |
314 | 0 | EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); |
315 | 0 | assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?"); |
316 | 0 | cleanup.setActiveFlag(ActiveFlag); |
317 | |
|
318 | 0 | if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); |
319 | 0 | if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); |
320 | 0 | } |
321 | | |
322 | 0 | void EHScopeStack::Cleanup::anchor() {} |
323 | | |
324 | | static void createStoreInstBefore(llvm::Value *value, Address addr, |
325 | 0 | llvm::Instruction *beforeInst) { |
326 | 0 | auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst); |
327 | 0 | store->setAlignment(addr.getAlignment().getAsAlign()); |
328 | 0 | } |
329 | | |
330 | | static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name, |
331 | 0 | llvm::Instruction *beforeInst) { |
332 | 0 | return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name, |
333 | 0 | false, addr.getAlignment().getAsAlign(), |
334 | 0 | beforeInst); |
335 | 0 | } |
336 | | |
337 | | /// All the branch fixups on the EH stack have propagated out past the |
338 | | /// outermost normal cleanup; resolve them all by adding cases to the |
339 | | /// given switch instruction. |
340 | | static void ResolveAllBranchFixups(CodeGenFunction &CGF, |
341 | | llvm::SwitchInst *Switch, |
342 | 0 | llvm::BasicBlock *CleanupEntry) { |
343 | 0 | llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; |
344 | |
|
345 | 0 | for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { |
346 | | // Skip this fixup if its destination isn't set. |
347 | 0 | BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); |
348 | 0 | if (Fixup.Destination == nullptr) continue; |
349 | | |
350 | | // If there isn't an OptimisticBranchBlock, then InitialBranch is |
351 | | // still pointing directly to its destination; forward it to the |
352 | | // appropriate cleanup entry. This is required in the specific |
353 | | // case of |
354 | | // { std::string s; goto lbl; } |
355 | | // lbl: |
356 | | // i.e. where there's an unresolved fixup inside a single cleanup |
357 | | // entry which we're currently popping. |
358 | 0 | if (Fixup.OptimisticBranchBlock == nullptr) { |
359 | 0 | createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex), |
360 | 0 | CGF.getNormalCleanupDestSlot(), |
361 | 0 | Fixup.InitialBranch); |
362 | 0 | Fixup.InitialBranch->setSuccessor(0, CleanupEntry); |
363 | 0 | } |
364 | | |
365 | | // Don't add this case to the switch statement twice. |
366 | 0 | if (!CasesAdded.insert(Fixup.Destination).second) |
367 | 0 | continue; |
368 | | |
369 | 0 | Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), |
370 | 0 | Fixup.Destination); |
371 | 0 | } |
372 | |
|
373 | 0 | CGF.EHStack.clearFixups(); |
374 | 0 | } |
375 | | |
376 | | /// Transitions the terminator of the given exit-block of a cleanup to |
377 | | /// be a cleanup switch. |
378 | | static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, |
379 | 0 | llvm::BasicBlock *Block) { |
380 | | // If it's a branch, turn it into a switch whose default |
381 | | // destination is its original target. |
382 | 0 | llvm::Instruction *Term = Block->getTerminator(); |
383 | 0 | assert(Term && "can't transition block without terminator"); |
384 | | |
385 | 0 | if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { |
386 | 0 | assert(Br->isUnconditional()); |
387 | 0 | auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(), |
388 | 0 | "cleanup.dest", Term); |
389 | 0 | llvm::SwitchInst *Switch = |
390 | 0 | llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); |
391 | 0 | Br->eraseFromParent(); |
392 | 0 | return Switch; |
393 | 0 | } else { |
394 | 0 | return cast<llvm::SwitchInst>(Term); |
395 | 0 | } |
396 | 0 | } |
397 | | |
398 | 0 | void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { |
399 | 0 | assert(Block && "resolving a null target block"); |
400 | 0 | if (!EHStack.getNumBranchFixups()) return; |
401 | | |
402 | 0 | assert(EHStack.hasNormalCleanups() && |
403 | 0 | "branch fixups exist with no normal cleanups on stack"); |
404 | | |
405 | 0 | llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; |
406 | 0 | bool ResolvedAny = false; |
407 | |
|
408 | 0 | for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { |
409 | | // Skip this fixup if its destination doesn't match. |
410 | 0 | BranchFixup &Fixup = EHStack.getBranchFixup(I); |
411 | 0 | if (Fixup.Destination != Block) continue; |
412 | | |
413 | 0 | Fixup.Destination = nullptr; |
414 | 0 | ResolvedAny = true; |
415 | | |
416 | | // If it doesn't have an optimistic branch block, LatestBranch is |
417 | | // already pointing to the right place. |
418 | 0 | llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; |
419 | 0 | if (!BranchBB) |
420 | 0 | continue; |
421 | | |
422 | | // Don't process the same optimistic branch block twice. |
423 | 0 | if (!ModifiedOptimisticBlocks.insert(BranchBB).second) |
424 | 0 | continue; |
425 | | |
426 | 0 | llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); |
427 | | |
428 | | // Add a case to the switch. |
429 | 0 | Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); |
430 | 0 | } |
431 | |
|
432 | 0 | if (ResolvedAny) |
433 | 0 | EHStack.popNullFixups(); |
434 | 0 | } |
435 | | |
436 | | /// Pops cleanup blocks until the given savepoint is reached. |
437 | | void CodeGenFunction::PopCleanupBlocks( |
438 | | EHScopeStack::stable_iterator Old, |
439 | 0 | std::initializer_list<llvm::Value **> ValuesToReload) { |
440 | 0 | assert(Old.isValid()); |
441 | | |
442 | 0 | bool HadBranches = false; |
443 | 0 | while (EHStack.stable_begin() != Old) { |
444 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); |
445 | 0 | HadBranches |= Scope.hasBranches(); |
446 | | |
447 | | // As long as Old strictly encloses the scope's enclosing normal |
448 | | // cleanup, we're going to emit another normal cleanup which |
449 | | // fallthrough can propagate through. |
450 | 0 | bool FallThroughIsBranchThrough = |
451 | 0 | Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); |
452 | |
|
453 | 0 | PopCleanupBlock(FallThroughIsBranchThrough); |
454 | 0 | } |
455 | | |
456 | | // If we didn't have any branches, the insertion point before cleanups must |
457 | | // dominate the current insertion point and we don't need to reload any |
458 | | // values. |
459 | 0 | if (!HadBranches) |
460 | 0 | return; |
461 | | |
462 | | // Spill and reload all values that the caller wants to be live at the current |
463 | | // insertion point. |
464 | 0 | for (llvm::Value **ReloadedValue : ValuesToReload) { |
465 | 0 | auto *Inst = dyn_cast_or_null<llvm::Instruction>(*ReloadedValue); |
466 | 0 | if (!Inst) |
467 | 0 | continue; |
468 | | |
469 | | // Don't spill static allocas, they dominate all cleanups. These are created |
470 | | // by binding a reference to a local variable or temporary. |
471 | 0 | auto *AI = dyn_cast<llvm::AllocaInst>(Inst); |
472 | 0 | if (AI && AI->isStaticAlloca()) |
473 | 0 | continue; |
474 | | |
475 | 0 | Address Tmp = |
476 | 0 | CreateDefaultAlignTempAlloca(Inst->getType(), "tmp.exprcleanup"); |
477 | | |
478 | | // Find an insertion point after Inst and spill it to the temporary. |
479 | 0 | llvm::BasicBlock::iterator InsertBefore; |
480 | 0 | if (auto *Invoke = dyn_cast<llvm::InvokeInst>(Inst)) |
481 | 0 | InsertBefore = Invoke->getNormalDest()->getFirstInsertionPt(); |
482 | 0 | else |
483 | 0 | InsertBefore = std::next(Inst->getIterator()); |
484 | 0 | CGBuilderTy(CGM, &*InsertBefore).CreateStore(Inst, Tmp); |
485 | | |
486 | | // Reload the value at the current insertion point. |
487 | 0 | *ReloadedValue = Builder.CreateLoad(Tmp); |
488 | 0 | } |
489 | 0 | } |
490 | | |
491 | | /// Pops cleanup blocks until the given savepoint is reached, then add the |
492 | | /// cleanups from the given savepoint in the lifetime-extended cleanups stack. |
493 | | void CodeGenFunction::PopCleanupBlocks( |
494 | | EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize, |
495 | 0 | std::initializer_list<llvm::Value **> ValuesToReload) { |
496 | 0 | PopCleanupBlocks(Old, ValuesToReload); |
497 | | |
498 | | // Move our deferred cleanups onto the EH stack. |
499 | 0 | for (size_t I = OldLifetimeExtendedSize, |
500 | 0 | E = LifetimeExtendedCleanupStack.size(); I != E; /**/) { |
501 | | // Alignment should be guaranteed by the vptrs in the individual cleanups. |
502 | 0 | assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) && |
503 | 0 | "misaligned cleanup stack entry"); |
504 | | |
505 | 0 | LifetimeExtendedCleanupHeader &Header = |
506 | 0 | reinterpret_cast<LifetimeExtendedCleanupHeader&>( |
507 | 0 | LifetimeExtendedCleanupStack[I]); |
508 | 0 | I += sizeof(Header); |
509 | |
|
510 | 0 | EHStack.pushCopyOfCleanup(Header.getKind(), |
511 | 0 | &LifetimeExtendedCleanupStack[I], |
512 | 0 | Header.getSize()); |
513 | 0 | I += Header.getSize(); |
514 | |
|
515 | 0 | if (Header.isConditional()) { |
516 | 0 | Address ActiveFlag = |
517 | 0 | reinterpret_cast<Address &>(LifetimeExtendedCleanupStack[I]); |
518 | 0 | initFullExprCleanupWithFlag(ActiveFlag); |
519 | 0 | I += sizeof(ActiveFlag); |
520 | 0 | } |
521 | 0 | } |
522 | 0 | LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); |
523 | 0 | } |
524 | | |
525 | | static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, |
526 | 0 | EHCleanupScope &Scope) { |
527 | 0 | assert(Scope.isNormalCleanup()); |
528 | 0 | llvm::BasicBlock *Entry = Scope.getNormalBlock(); |
529 | 0 | if (!Entry) { |
530 | 0 | Entry = CGF.createBasicBlock("cleanup"); |
531 | 0 | Scope.setNormalBlock(Entry); |
532 | 0 | } |
533 | 0 | return Entry; |
534 | 0 | } |
535 | | |
536 | | /// Attempts to reduce a cleanup's entry block to a fallthrough. This |
537 | | /// is basically llvm::MergeBlockIntoPredecessor, except |
538 | | /// simplified/optimized for the tighter constraints on cleanup blocks. |
539 | | /// |
540 | | /// Returns the new block, whatever it is. |
541 | | static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, |
542 | 0 | llvm::BasicBlock *Entry) { |
543 | 0 | llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); |
544 | 0 | if (!Pred) return Entry; |
545 | | |
546 | 0 | llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); |
547 | 0 | if (!Br || Br->isConditional()) return Entry; |
548 | 0 | assert(Br->getSuccessor(0) == Entry); |
549 | | |
550 | | // If we were previously inserting at the end of the cleanup entry |
551 | | // block, we'll need to continue inserting at the end of the |
552 | | // predecessor. |
553 | 0 | bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; |
554 | 0 | assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); |
555 | | |
556 | | // Kill the branch. |
557 | 0 | Br->eraseFromParent(); |
558 | | |
559 | | // Replace all uses of the entry with the predecessor, in case there |
560 | | // are phis in the cleanup. |
561 | 0 | Entry->replaceAllUsesWith(Pred); |
562 | | |
563 | | // Merge the blocks. |
564 | 0 | Pred->splice(Pred->end(), Entry); |
565 | | |
566 | | // Kill the entry block. |
567 | 0 | Entry->eraseFromParent(); |
568 | |
|
569 | 0 | if (WasInsertBlock) |
570 | 0 | CGF.Builder.SetInsertPoint(Pred); |
571 | |
|
572 | 0 | return Pred; |
573 | 0 | } |
574 | | |
575 | | static void EmitCleanup(CodeGenFunction &CGF, |
576 | | EHScopeStack::Cleanup *Fn, |
577 | | EHScopeStack::Cleanup::Flags flags, |
578 | 0 | Address ActiveFlag) { |
579 | | // If there's an active flag, load it and skip the cleanup if it's |
580 | | // false. |
581 | 0 | llvm::BasicBlock *ContBB = nullptr; |
582 | 0 | if (ActiveFlag.isValid()) { |
583 | 0 | ContBB = CGF.createBasicBlock("cleanup.done"); |
584 | 0 | llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); |
585 | 0 | llvm::Value *IsActive |
586 | 0 | = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); |
587 | 0 | CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); |
588 | 0 | CGF.EmitBlock(CleanupBB); |
589 | 0 | } |
590 | | |
591 | | // Ask the cleanup to emit itself. |
592 | 0 | Fn->Emit(CGF, flags); |
593 | 0 | assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); |
594 | | |
595 | | // Emit the continuation block if there was an active flag. |
596 | 0 | if (ActiveFlag.isValid()) |
597 | 0 | CGF.EmitBlock(ContBB); |
598 | 0 | } |
599 | | |
600 | | static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, |
601 | | llvm::BasicBlock *From, |
602 | 0 | llvm::BasicBlock *To) { |
603 | | // Exit is the exit block of a cleanup, so it always terminates in |
604 | | // an unconditional branch or a switch. |
605 | 0 | llvm::Instruction *Term = Exit->getTerminator(); |
606 | |
|
607 | 0 | if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { |
608 | 0 | assert(Br->isUnconditional() && Br->getSuccessor(0) == From); |
609 | 0 | Br->setSuccessor(0, To); |
610 | 0 | } else { |
611 | 0 | llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); |
612 | 0 | for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) |
613 | 0 | if (Switch->getSuccessor(I) == From) |
614 | 0 | Switch->setSuccessor(I, To); |
615 | 0 | } |
616 | 0 | } |
617 | | |
618 | | /// We don't need a normal entry block for the given cleanup. |
619 | | /// Optimistic fixup branches can cause these blocks to come into |
620 | | /// existence anyway; if so, destroy it. |
621 | | /// |
622 | | /// The validity of this transformation is very much specific to the |
623 | | /// exact ways in which we form branches to cleanup entries. |
624 | | static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, |
625 | 0 | EHCleanupScope &scope) { |
626 | 0 | llvm::BasicBlock *entry = scope.getNormalBlock(); |
627 | 0 | if (!entry) return; |
628 | | |
629 | | // Replace all the uses with unreachable. |
630 | 0 | llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); |
631 | 0 | for (llvm::BasicBlock::use_iterator |
632 | 0 | i = entry->use_begin(), e = entry->use_end(); i != e; ) { |
633 | 0 | llvm::Use &use = *i; |
634 | 0 | ++i; |
635 | |
|
636 | 0 | use.set(unreachableBB); |
637 | | |
638 | | // The only uses should be fixup switches. |
639 | 0 | llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); |
640 | 0 | if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { |
641 | | // Replace the switch with a branch. |
642 | 0 | llvm::BranchInst::Create(si->case_begin()->getCaseSuccessor(), si); |
643 | | |
644 | | // The switch operand is a load from the cleanup-dest alloca. |
645 | 0 | llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); |
646 | | |
647 | | // Destroy the switch. |
648 | 0 | si->eraseFromParent(); |
649 | | |
650 | | // Destroy the load. |
651 | 0 | assert(condition->getOperand(0) == CGF.NormalCleanupDest.getPointer()); |
652 | 0 | assert(condition->use_empty()); |
653 | 0 | condition->eraseFromParent(); |
654 | 0 | } |
655 | 0 | } |
656 | |
|
657 | 0 | assert(entry->use_empty()); |
658 | 0 | delete entry; |
659 | 0 | } |
660 | | |
661 | | /// Pops a cleanup block. If the block includes a normal cleanup, the |
662 | | /// current insertion point is threaded through the cleanup, as are |
663 | | /// any branch fixups on the cleanup. |
664 | 0 | void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { |
665 | 0 | assert(!EHStack.empty() && "cleanup stack is empty!"); |
666 | 0 | assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); |
667 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); |
668 | 0 | assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); |
669 | | |
670 | | // Remember activation information. |
671 | 0 | bool IsActive = Scope.isActive(); |
672 | 0 | Address NormalActiveFlag = |
673 | 0 | Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() |
674 | 0 | : Address::invalid(); |
675 | 0 | Address EHActiveFlag = |
676 | 0 | Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() |
677 | 0 | : Address::invalid(); |
678 | | |
679 | | // Check whether we need an EH cleanup. This is only true if we've |
680 | | // generated a lazy EH cleanup block. |
681 | 0 | llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); |
682 | 0 | assert(Scope.hasEHBranches() == (EHEntry != nullptr)); |
683 | 0 | bool RequiresEHCleanup = (EHEntry != nullptr); |
684 | 0 | EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); |
685 | | |
686 | | // Check the three conditions which might require a normal cleanup: |
687 | | |
688 | | // - whether there are branch fix-ups through this cleanup |
689 | 0 | unsigned FixupDepth = Scope.getFixupDepth(); |
690 | 0 | bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; |
691 | | |
692 | | // - whether there are branch-throughs or branch-afters |
693 | 0 | bool HasExistingBranches = Scope.hasBranches(); |
694 | | |
695 | | // - whether there's a fallthrough |
696 | 0 | llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); |
697 | 0 | bool HasFallthrough = (FallthroughSource != nullptr && IsActive); |
698 | | |
699 | | // Branch-through fall-throughs leave the insertion point set to the |
700 | | // end of the last cleanup, which points to the current scope. The |
701 | | // rest of IR gen doesn't need to worry about this; it only happens |
702 | | // during the execution of PopCleanupBlocks(). |
703 | 0 | bool HasPrebranchedFallthrough = |
704 | 0 | (FallthroughSource && FallthroughSource->getTerminator()); |
705 | | |
706 | | // If this is a normal cleanup, then having a prebranched |
707 | | // fallthrough implies that the fallthrough source unconditionally |
708 | | // jumps here. |
709 | 0 | assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || |
710 | 0 | (Scope.getNormalBlock() && |
711 | 0 | FallthroughSource->getTerminator()->getSuccessor(0) |
712 | 0 | == Scope.getNormalBlock())); |
713 | | |
714 | 0 | bool RequiresNormalCleanup = false; |
715 | 0 | if (Scope.isNormalCleanup() && |
716 | 0 | (HasFixups || HasExistingBranches || HasFallthrough)) { |
717 | 0 | RequiresNormalCleanup = true; |
718 | 0 | } |
719 | | |
720 | | // If we have a prebranched fallthrough into an inactive normal |
721 | | // cleanup, rewrite it so that it leads to the appropriate place. |
722 | 0 | if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { |
723 | 0 | llvm::BasicBlock *prebranchDest; |
724 | | |
725 | | // If the prebranch is semantically branching through the next |
726 | | // cleanup, just forward it to the next block, leaving the |
727 | | // insertion point in the prebranched block. |
728 | 0 | if (FallthroughIsBranchThrough) { |
729 | 0 | EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup()); |
730 | 0 | prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing)); |
731 | | |
732 | | // Otherwise, we need to make a new block. If the normal cleanup |
733 | | // isn't being used at all, we could actually reuse the normal |
734 | | // entry block, but this is simpler, and it avoids conflicts with |
735 | | // dead optimistic fixup branches. |
736 | 0 | } else { |
737 | 0 | prebranchDest = createBasicBlock("forwarded-prebranch"); |
738 | 0 | EmitBlock(prebranchDest); |
739 | 0 | } |
740 | |
|
741 | 0 | llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); |
742 | 0 | assert(normalEntry && !normalEntry->use_empty()); |
743 | | |
744 | 0 | ForwardPrebranchedFallthrough(FallthroughSource, |
745 | 0 | normalEntry, prebranchDest); |
746 | 0 | } |
747 | | |
748 | | // If we don't need the cleanup at all, we're done. |
749 | 0 | if (!RequiresNormalCleanup && !RequiresEHCleanup) { |
750 | 0 | destroyOptimisticNormalEntry(*this, Scope); |
751 | 0 | EHStack.popCleanup(); // safe because there are no fixups |
752 | 0 | assert(EHStack.getNumBranchFixups() == 0 || |
753 | 0 | EHStack.hasNormalCleanups()); |
754 | 0 | return; |
755 | 0 | } |
756 | | |
757 | | // Copy the cleanup emission data out. This uses either a stack |
758 | | // array or malloc'd memory, depending on the size, which is |
759 | | // behavior that SmallVector would provide, if we could use it |
760 | | // here. Unfortunately, if you ask for a SmallVector<char>, the |
761 | | // alignment isn't sufficient. |
762 | 0 | auto *CleanupSource = reinterpret_cast<char *>(Scope.getCleanupBuffer()); |
763 | 0 | alignas(EHScopeStack::ScopeStackAlignment) char |
764 | 0 | CleanupBufferStack[8 * sizeof(void *)]; |
765 | 0 | std::unique_ptr<char[]> CleanupBufferHeap; |
766 | 0 | size_t CleanupSize = Scope.getCleanupSize(); |
767 | 0 | EHScopeStack::Cleanup *Fn; |
768 | |
|
769 | 0 | if (CleanupSize <= sizeof(CleanupBufferStack)) { |
770 | 0 | memcpy(CleanupBufferStack, CleanupSource, CleanupSize); |
771 | 0 | Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferStack); |
772 | 0 | } else { |
773 | 0 | CleanupBufferHeap.reset(new char[CleanupSize]); |
774 | 0 | memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize); |
775 | 0 | Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferHeap.get()); |
776 | 0 | } |
777 | |
|
778 | 0 | EHScopeStack::Cleanup::Flags cleanupFlags; |
779 | 0 | if (Scope.isNormalCleanup()) |
780 | 0 | cleanupFlags.setIsNormalCleanupKind(); |
781 | 0 | if (Scope.isEHCleanup()) |
782 | 0 | cleanupFlags.setIsEHCleanupKind(); |
783 | | |
784 | | // Under -EHa, invoke seh.scope.end() to mark scope end before dtor |
785 | 0 | bool IsEHa = getLangOpts().EHAsynch && !Scope.isLifetimeMarker(); |
786 | 0 | const EHPersonality &Personality = EHPersonality::get(*this); |
787 | 0 | if (!RequiresNormalCleanup) { |
788 | | // Mark CPP scope end for passed-by-value Arg temp |
789 | | // per Windows ABI which is "normally" Cleanup in callee |
790 | 0 | if (IsEHa && getInvokeDest() && Builder.GetInsertBlock()) { |
791 | 0 | if (Personality.isMSVCXXPersonality()) |
792 | 0 | EmitSehCppScopeEnd(); |
793 | 0 | } |
794 | 0 | destroyOptimisticNormalEntry(*this, Scope); |
795 | 0 | EHStack.popCleanup(); |
796 | 0 | } else { |
797 | | // If we have a fallthrough and no other need for the cleanup, |
798 | | // emit it directly. |
799 | 0 | if (HasFallthrough && !HasPrebranchedFallthrough && !HasFixups && |
800 | 0 | !HasExistingBranches) { |
801 | | |
802 | | // mark SEH scope end for fall-through flow |
803 | 0 | if (IsEHa && getInvokeDest()) { |
804 | 0 | if (Personality.isMSVCXXPersonality()) |
805 | 0 | EmitSehCppScopeEnd(); |
806 | 0 | else |
807 | 0 | EmitSehTryScopeEnd(); |
808 | 0 | } |
809 | |
|
810 | 0 | destroyOptimisticNormalEntry(*this, Scope); |
811 | 0 | EHStack.popCleanup(); |
812 | |
|
813 | 0 | EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); |
814 | | |
815 | | // Otherwise, the best approach is to thread everything through |
816 | | // the cleanup block and then try to clean up after ourselves. |
817 | 0 | } else { |
818 | | // Force the entry block to exist. |
819 | 0 | llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); |
820 | | |
821 | | // I. Set up the fallthrough edge in. |
822 | |
|
823 | 0 | CGBuilderTy::InsertPoint savedInactiveFallthroughIP; |
824 | | |
825 | | // If there's a fallthrough, we need to store the cleanup |
826 | | // destination index. For fall-throughs this is always zero. |
827 | 0 | if (HasFallthrough) { |
828 | 0 | if (!HasPrebranchedFallthrough) |
829 | 0 | Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); |
830 | | |
831 | | // Otherwise, save and clear the IP if we don't have fallthrough |
832 | | // because the cleanup is inactive. |
833 | 0 | } else if (FallthroughSource) { |
834 | 0 | assert(!IsActive && "source without fallthrough for active cleanup"); |
835 | 0 | savedInactiveFallthroughIP = Builder.saveAndClearIP(); |
836 | 0 | } |
837 | | |
838 | | // II. Emit the entry block. This implicitly branches to it if |
839 | | // we have fallthrough. All the fixups and existing branches |
840 | | // should already be branched to it. |
841 | 0 | EmitBlock(NormalEntry); |
842 | | |
843 | | // intercept normal cleanup to mark SEH scope end |
844 | 0 | if (IsEHa && getInvokeDest()) { |
845 | 0 | if (Personality.isMSVCXXPersonality()) |
846 | 0 | EmitSehCppScopeEnd(); |
847 | 0 | else |
848 | 0 | EmitSehTryScopeEnd(); |
849 | 0 | } |
850 | | |
851 | | // III. Figure out where we're going and build the cleanup |
852 | | // epilogue. |
853 | |
|
854 | 0 | bool HasEnclosingCleanups = |
855 | 0 | (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); |
856 | | |
857 | | // Compute the branch-through dest if we need it: |
858 | | // - if there are branch-throughs threaded through the scope |
859 | | // - if fall-through is a branch-through |
860 | | // - if there are fixups that will be optimistically forwarded |
861 | | // to the enclosing cleanup |
862 | 0 | llvm::BasicBlock *BranchThroughDest = nullptr; |
863 | 0 | if (Scope.hasBranchThroughs() || |
864 | 0 | (FallthroughSource && FallthroughIsBranchThrough) || |
865 | 0 | (HasFixups && HasEnclosingCleanups)) { |
866 | 0 | assert(HasEnclosingCleanups); |
867 | 0 | EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); |
868 | 0 | BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); |
869 | 0 | } |
870 | | |
871 | 0 | llvm::BasicBlock *FallthroughDest = nullptr; |
872 | 0 | SmallVector<llvm::Instruction*, 2> InstsToAppend; |
873 | | |
874 | | // If there's exactly one branch-after and no other threads, |
875 | | // we can route it without a switch. |
876 | | // Skip for SEH, since ExitSwitch is used to generate code to indicate |
877 | | // abnormal termination. (SEH: Except _leave and fall-through at |
878 | | // the end, all other exits in a _try (return/goto/continue/break) |
879 | | // are considered as abnormal terminations, using NormalCleanupDestSlot |
880 | | // to indicate abnormal termination) |
881 | 0 | if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && |
882 | 0 | !currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) { |
883 | 0 | assert(!BranchThroughDest || !IsActive); |
884 | | |
885 | | // Clean up the possibly dead store to the cleanup dest slot. |
886 | 0 | llvm::Instruction *NormalCleanupDestSlot = |
887 | 0 | cast<llvm::Instruction>(getNormalCleanupDestSlot().getPointer()); |
888 | 0 | if (NormalCleanupDestSlot->hasOneUse()) { |
889 | 0 | NormalCleanupDestSlot->user_back()->eraseFromParent(); |
890 | 0 | NormalCleanupDestSlot->eraseFromParent(); |
891 | 0 | NormalCleanupDest = Address::invalid(); |
892 | 0 | } |
893 | |
|
894 | 0 | llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); |
895 | 0 | InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); |
896 | | |
897 | | // Build a switch-out if we need it: |
898 | | // - if there are branch-afters threaded through the scope |
899 | | // - if fall-through is a branch-after |
900 | | // - if there are fixups that have nowhere left to go and |
901 | | // so must be immediately resolved |
902 | 0 | } else if (Scope.getNumBranchAfters() || |
903 | 0 | (HasFallthrough && !FallthroughIsBranchThrough) || |
904 | 0 | (HasFixups && !HasEnclosingCleanups)) { |
905 | |
|
906 | 0 | llvm::BasicBlock *Default = |
907 | 0 | (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); |
908 | | |
909 | | // TODO: base this on the number of branch-afters and fixups |
910 | 0 | const unsigned SwitchCapacity = 10; |
911 | | |
912 | | // pass the abnormal exit flag to Fn (SEH cleanup) |
913 | 0 | cleanupFlags.setHasExitSwitch(); |
914 | |
|
915 | 0 | llvm::LoadInst *Load = |
916 | 0 | createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest", |
917 | 0 | nullptr); |
918 | 0 | llvm::SwitchInst *Switch = |
919 | 0 | llvm::SwitchInst::Create(Load, Default, SwitchCapacity); |
920 | |
|
921 | 0 | InstsToAppend.push_back(Load); |
922 | 0 | InstsToAppend.push_back(Switch); |
923 | | |
924 | | // Branch-after fallthrough. |
925 | 0 | if (FallthroughSource && !FallthroughIsBranchThrough) { |
926 | 0 | FallthroughDest = createBasicBlock("cleanup.cont"); |
927 | 0 | if (HasFallthrough) |
928 | 0 | Switch->addCase(Builder.getInt32(0), FallthroughDest); |
929 | 0 | } |
930 | |
|
931 | 0 | for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { |
932 | 0 | Switch->addCase(Scope.getBranchAfterIndex(I), |
933 | 0 | Scope.getBranchAfterBlock(I)); |
934 | 0 | } |
935 | | |
936 | | // If there aren't any enclosing cleanups, we can resolve all |
937 | | // the fixups now. |
938 | 0 | if (HasFixups && !HasEnclosingCleanups) |
939 | 0 | ResolveAllBranchFixups(*this, Switch, NormalEntry); |
940 | 0 | } else { |
941 | | // We should always have a branch-through destination in this case. |
942 | 0 | assert(BranchThroughDest); |
943 | 0 | InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); |
944 | 0 | } |
945 | | |
946 | | // IV. Pop the cleanup and emit it. |
947 | 0 | EHStack.popCleanup(); |
948 | 0 | assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); |
949 | | |
950 | 0 | EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); |
951 | | |
952 | | // Append the prepared cleanup prologue from above. |
953 | 0 | llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); |
954 | 0 | for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) |
955 | 0 | InstsToAppend[I]->insertInto(NormalExit, NormalExit->end()); |
956 | | |
957 | | // Optimistically hope that any fixups will continue falling through. |
958 | 0 | for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); |
959 | 0 | I < E; ++I) { |
960 | 0 | BranchFixup &Fixup = EHStack.getBranchFixup(I); |
961 | 0 | if (!Fixup.Destination) continue; |
962 | 0 | if (!Fixup.OptimisticBranchBlock) { |
963 | 0 | createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex), |
964 | 0 | getNormalCleanupDestSlot(), |
965 | 0 | Fixup.InitialBranch); |
966 | 0 | Fixup.InitialBranch->setSuccessor(0, NormalEntry); |
967 | 0 | } |
968 | 0 | Fixup.OptimisticBranchBlock = NormalExit; |
969 | 0 | } |
970 | | |
971 | | // V. Set up the fallthrough edge out. |
972 | | |
973 | | // Case 1: a fallthrough source exists but doesn't branch to the |
974 | | // cleanup because the cleanup is inactive. |
975 | 0 | if (!HasFallthrough && FallthroughSource) { |
976 | | // Prebranched fallthrough was forwarded earlier. |
977 | | // Non-prebranched fallthrough doesn't need to be forwarded. |
978 | | // Either way, all we need to do is restore the IP we cleared before. |
979 | 0 | assert(!IsActive); |
980 | 0 | Builder.restoreIP(savedInactiveFallthroughIP); |
981 | | |
982 | | // Case 2: a fallthrough source exists and should branch to the |
983 | | // cleanup, but we're not supposed to branch through to the next |
984 | | // cleanup. |
985 | 0 | } else if (HasFallthrough && FallthroughDest) { |
986 | 0 | assert(!FallthroughIsBranchThrough); |
987 | 0 | EmitBlock(FallthroughDest); |
988 | | |
989 | | // Case 3: a fallthrough source exists and should branch to the |
990 | | // cleanup and then through to the next. |
991 | 0 | } else if (HasFallthrough) { |
992 | | // Everything is already set up for this. |
993 | | |
994 | | // Case 4: no fallthrough source exists. |
995 | 0 | } else { |
996 | 0 | Builder.ClearInsertionPoint(); |
997 | 0 | } |
998 | | |
999 | | // VI. Assorted cleaning. |
1000 | | |
1001 | | // Check whether we can merge NormalEntry into a single predecessor. |
1002 | | // This might invalidate (non-IR) pointers to NormalEntry. |
1003 | 0 | llvm::BasicBlock *NewNormalEntry = |
1004 | 0 | SimplifyCleanupEntry(*this, NormalEntry); |
1005 | | |
1006 | | // If it did invalidate those pointers, and NormalEntry was the same |
1007 | | // as NormalExit, go back and patch up the fixups. |
1008 | 0 | if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) |
1009 | 0 | for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); |
1010 | 0 | I < E; ++I) |
1011 | 0 | EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; |
1012 | 0 | } |
1013 | 0 | } |
1014 | | |
1015 | 0 | assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); |
1016 | | |
1017 | | // Emit the EH cleanup if required. |
1018 | 0 | if (RequiresEHCleanup) { |
1019 | 0 | CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); |
1020 | |
|
1021 | 0 | EmitBlock(EHEntry); |
1022 | |
|
1023 | 0 | llvm::BasicBlock *NextAction = getEHDispatchBlock(EHParent); |
1024 | | |
1025 | | // Push a terminate scope or cleanupendpad scope around the potentially |
1026 | | // throwing cleanups. For funclet EH personalities, the cleanupendpad models |
1027 | | // program termination when cleanups throw. |
1028 | 0 | bool PushedTerminate = false; |
1029 | 0 | SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); |
1030 | 0 | llvm::CleanupPadInst *CPI = nullptr; |
1031 | |
|
1032 | 0 | const EHPersonality &Personality = EHPersonality::get(*this); |
1033 | 0 | if (Personality.usesFuncletPads()) { |
1034 | 0 | llvm::Value *ParentPad = CurrentFuncletPad; |
1035 | 0 | if (!ParentPad) |
1036 | 0 | ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext()); |
1037 | 0 | CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad); |
1038 | 0 | } |
1039 | | |
1040 | | // Non-MSVC personalities need to terminate when an EH cleanup throws. |
1041 | 0 | if (!Personality.isMSVCPersonality()) { |
1042 | 0 | EHStack.pushTerminate(); |
1043 | 0 | PushedTerminate = true; |
1044 | 0 | } else if (IsEHa && getInvokeDest()) { |
1045 | 0 | EmitSehCppScopeEnd(); |
1046 | 0 | } |
1047 | | |
1048 | | // We only actually emit the cleanup code if the cleanup is either |
1049 | | // active or was used before it was deactivated. |
1050 | 0 | if (EHActiveFlag.isValid() || IsActive) { |
1051 | 0 | cleanupFlags.setIsForEHCleanup(); |
1052 | 0 | EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); |
1053 | 0 | } |
1054 | |
|
1055 | 0 | if (CPI) |
1056 | 0 | Builder.CreateCleanupRet(CPI, NextAction); |
1057 | 0 | else |
1058 | 0 | Builder.CreateBr(NextAction); |
1059 | | |
1060 | | // Leave the terminate scope. |
1061 | 0 | if (PushedTerminate) |
1062 | 0 | EHStack.popTerminate(); |
1063 | |
|
1064 | 0 | Builder.restoreIP(SavedIP); |
1065 | |
|
1066 | 0 | SimplifyCleanupEntry(*this, EHEntry); |
1067 | 0 | } |
1068 | 0 | } |
1069 | | |
1070 | | /// isObviouslyBranchWithoutCleanups - Return true if a branch to the |
1071 | | /// specified destination obviously has no cleanups to run. 'false' is always |
1072 | | /// a conservatively correct answer for this method. |
1073 | 0 | bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { |
1074 | 0 | assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) |
1075 | 0 | && "stale jump destination"); |
1076 | | |
1077 | | // Calculate the innermost active normal cleanup. |
1078 | 0 | EHScopeStack::stable_iterator TopCleanup = |
1079 | 0 | EHStack.getInnermostActiveNormalCleanup(); |
1080 | | |
1081 | | // If we're not in an active normal cleanup scope, or if the |
1082 | | // destination scope is within the innermost active normal cleanup |
1083 | | // scope, we don't need to worry about fixups. |
1084 | 0 | if (TopCleanup == EHStack.stable_end() || |
1085 | 0 | TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid |
1086 | 0 | return true; |
1087 | | |
1088 | | // Otherwise, we might need some cleanups. |
1089 | 0 | return false; |
1090 | 0 | } |
1091 | | |
1092 | | |
1093 | | /// Terminate the current block by emitting a branch which might leave |
1094 | | /// the current cleanup-protected scope. The target scope may not yet |
1095 | | /// be known, in which case this will require a fixup. |
1096 | | /// |
1097 | | /// As a side-effect, this method clears the insertion point. |
1098 | 0 | void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { |
1099 | 0 | assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) |
1100 | 0 | && "stale jump destination"); |
1101 | | |
1102 | 0 | if (!HaveInsertPoint()) |
1103 | 0 | return; |
1104 | | |
1105 | | // Create the branch. |
1106 | 0 | llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); |
1107 | | |
1108 | | // Calculate the innermost active normal cleanup. |
1109 | 0 | EHScopeStack::stable_iterator |
1110 | 0 | TopCleanup = EHStack.getInnermostActiveNormalCleanup(); |
1111 | | |
1112 | | // If we're not in an active normal cleanup scope, or if the |
1113 | | // destination scope is within the innermost active normal cleanup |
1114 | | // scope, we don't need to worry about fixups. |
1115 | 0 | if (TopCleanup == EHStack.stable_end() || |
1116 | 0 | TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid |
1117 | 0 | Builder.ClearInsertionPoint(); |
1118 | 0 | return; |
1119 | 0 | } |
1120 | | |
1121 | | // If we can't resolve the destination cleanup scope, just add this |
1122 | | // to the current cleanup scope as a branch fixup. |
1123 | 0 | if (!Dest.getScopeDepth().isValid()) { |
1124 | 0 | BranchFixup &Fixup = EHStack.addBranchFixup(); |
1125 | 0 | Fixup.Destination = Dest.getBlock(); |
1126 | 0 | Fixup.DestinationIndex = Dest.getDestIndex(); |
1127 | 0 | Fixup.InitialBranch = BI; |
1128 | 0 | Fixup.OptimisticBranchBlock = nullptr; |
1129 | |
|
1130 | 0 | Builder.ClearInsertionPoint(); |
1131 | 0 | return; |
1132 | 0 | } |
1133 | | |
1134 | | // Otherwise, thread through all the normal cleanups in scope. |
1135 | | |
1136 | | // Store the index at the start. |
1137 | 0 | llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); |
1138 | 0 | createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI); |
1139 | | |
1140 | | // Adjust BI to point to the first cleanup block. |
1141 | 0 | { |
1142 | 0 | EHCleanupScope &Scope = |
1143 | 0 | cast<EHCleanupScope>(*EHStack.find(TopCleanup)); |
1144 | 0 | BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); |
1145 | 0 | } |
1146 | | |
1147 | | // Add this destination to all the scopes involved. |
1148 | 0 | EHScopeStack::stable_iterator I = TopCleanup; |
1149 | 0 | EHScopeStack::stable_iterator E = Dest.getScopeDepth(); |
1150 | 0 | if (E.strictlyEncloses(I)) { |
1151 | 0 | while (true) { |
1152 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); |
1153 | 0 | assert(Scope.isNormalCleanup()); |
1154 | 0 | I = Scope.getEnclosingNormalCleanup(); |
1155 | | |
1156 | | // If this is the last cleanup we're propagating through, tell it |
1157 | | // that there's a resolved jump moving through it. |
1158 | 0 | if (!E.strictlyEncloses(I)) { |
1159 | 0 | Scope.addBranchAfter(Index, Dest.getBlock()); |
1160 | 0 | break; |
1161 | 0 | } |
1162 | | |
1163 | | // Otherwise, tell the scope that there's a jump propagating |
1164 | | // through it. If this isn't new information, all the rest of |
1165 | | // the work has been done before. |
1166 | 0 | if (!Scope.addBranchThrough(Dest.getBlock())) |
1167 | 0 | break; |
1168 | 0 | } |
1169 | 0 | } |
1170 | |
|
1171 | 0 | Builder.ClearInsertionPoint(); |
1172 | 0 | } |
1173 | | |
1174 | | static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, |
1175 | 0 | EHScopeStack::stable_iterator C) { |
1176 | | // If we needed a normal block for any reason, that counts. |
1177 | 0 | if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) |
1178 | 0 | return true; |
1179 | | |
1180 | | // Check whether any enclosed cleanups were needed. |
1181 | 0 | for (EHScopeStack::stable_iterator |
1182 | 0 | I = EHStack.getInnermostNormalCleanup(); |
1183 | 0 | I != C; ) { |
1184 | 0 | assert(C.strictlyEncloses(I)); |
1185 | 0 | EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); |
1186 | 0 | if (S.getNormalBlock()) return true; |
1187 | 0 | I = S.getEnclosingNormalCleanup(); |
1188 | 0 | } |
1189 | | |
1190 | 0 | return false; |
1191 | 0 | } |
1192 | | |
1193 | | static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, |
1194 | 0 | EHScopeStack::stable_iterator cleanup) { |
1195 | | // If we needed an EH block for any reason, that counts. |
1196 | 0 | if (EHStack.find(cleanup)->hasEHBranches()) |
1197 | 0 | return true; |
1198 | | |
1199 | | // Check whether any enclosed cleanups were needed. |
1200 | 0 | for (EHScopeStack::stable_iterator |
1201 | 0 | i = EHStack.getInnermostEHScope(); i != cleanup; ) { |
1202 | 0 | assert(cleanup.strictlyEncloses(i)); |
1203 | | |
1204 | 0 | EHScope &scope = *EHStack.find(i); |
1205 | 0 | if (scope.hasEHBranches()) |
1206 | 0 | return true; |
1207 | | |
1208 | 0 | i = scope.getEnclosingEHScope(); |
1209 | 0 | } |
1210 | | |
1211 | 0 | return false; |
1212 | 0 | } |
1213 | | |
1214 | | enum ForActivation_t { |
1215 | | ForActivation, |
1216 | | ForDeactivation |
1217 | | }; |
1218 | | |
1219 | | /// The given cleanup block is changing activation state. Configure a |
1220 | | /// cleanup variable if necessary. |
1221 | | /// |
1222 | | /// It would be good if we had some way of determining if there were |
1223 | | /// extra uses *after* the change-over point. |
1224 | | static void SetupCleanupBlockActivation(CodeGenFunction &CGF, |
1225 | | EHScopeStack::stable_iterator C, |
1226 | | ForActivation_t kind, |
1227 | 0 | llvm::Instruction *dominatingIP) { |
1228 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); |
1229 | | |
1230 | | // We always need the flag if we're activating the cleanup in a |
1231 | | // conditional context, because we have to assume that the current |
1232 | | // location doesn't necessarily dominate the cleanup's code. |
1233 | 0 | bool isActivatedInConditional = |
1234 | 0 | (kind == ForActivation && CGF.isInConditionalBranch()); |
1235 | |
|
1236 | 0 | bool needFlag = false; |
1237 | | |
1238 | | // Calculate whether the cleanup was used: |
1239 | | |
1240 | | // - as a normal cleanup |
1241 | 0 | if (Scope.isNormalCleanup() && |
1242 | 0 | (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) { |
1243 | 0 | Scope.setTestFlagInNormalCleanup(); |
1244 | 0 | needFlag = true; |
1245 | 0 | } |
1246 | | |
1247 | | // - as an EH cleanup |
1248 | 0 | if (Scope.isEHCleanup() && |
1249 | 0 | (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) { |
1250 | 0 | Scope.setTestFlagInEHCleanup(); |
1251 | 0 | needFlag = true; |
1252 | 0 | } |
1253 | | |
1254 | | // If it hasn't yet been used as either, we're done. |
1255 | 0 | if (!needFlag) return; |
1256 | | |
1257 | 0 | Address var = Scope.getActiveFlag(); |
1258 | 0 | if (!var.isValid()) { |
1259 | 0 | var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(), |
1260 | 0 | "cleanup.isactive"); |
1261 | 0 | Scope.setActiveFlag(var); |
1262 | |
|
1263 | 0 | assert(dominatingIP && "no existing variable and no dominating IP!"); |
1264 | | |
1265 | | // Initialize to true or false depending on whether it was |
1266 | | // active up to this point. |
1267 | 0 | llvm::Constant *value = CGF.Builder.getInt1(kind == ForDeactivation); |
1268 | | |
1269 | | // If we're in a conditional block, ignore the dominating IP and |
1270 | | // use the outermost conditional branch. |
1271 | 0 | if (CGF.isInConditionalBranch()) { |
1272 | 0 | CGF.setBeforeOutermostConditional(value, var); |
1273 | 0 | } else { |
1274 | 0 | createStoreInstBefore(value, var, dominatingIP); |
1275 | 0 | } |
1276 | 0 | } |
1277 | | |
1278 | 0 | CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var); |
1279 | 0 | } |
1280 | | |
1281 | | /// Activate a cleanup that was created in an inactivated state. |
1282 | | void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, |
1283 | 0 | llvm::Instruction *dominatingIP) { |
1284 | 0 | assert(C != EHStack.stable_end() && "activating bottom of stack?"); |
1285 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); |
1286 | 0 | assert(!Scope.isActive() && "double activation"); |
1287 | | |
1288 | 0 | SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP); |
1289 | |
|
1290 | 0 | Scope.setActive(true); |
1291 | 0 | } |
1292 | | |
1293 | | /// Deactive a cleanup that was created in an active state. |
1294 | | void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, |
1295 | 0 | llvm::Instruction *dominatingIP) { |
1296 | 0 | assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); |
1297 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); |
1298 | 0 | assert(Scope.isActive() && "double deactivation"); |
1299 | | |
1300 | | // If it's the top of the stack, just pop it, but do so only if it belongs |
1301 | | // to the current RunCleanupsScope. |
1302 | 0 | if (C == EHStack.stable_begin() && |
1303 | 0 | CurrentCleanupScopeDepth.strictlyEncloses(C)) { |
1304 | | // Per comment below, checking EHAsynch is not really necessary |
1305 | | // it's there to assure zero-impact w/o EHAsynch option |
1306 | 0 | if (!Scope.isNormalCleanup() && getLangOpts().EHAsynch) { |
1307 | 0 | PopCleanupBlock(); |
1308 | 0 | } else { |
1309 | | // If it's a normal cleanup, we need to pretend that the |
1310 | | // fallthrough is unreachable. |
1311 | 0 | CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); |
1312 | 0 | PopCleanupBlock(); |
1313 | 0 | Builder.restoreIP(SavedIP); |
1314 | 0 | } |
1315 | 0 | return; |
1316 | 0 | } |
1317 | | |
1318 | | // Otherwise, follow the general case. |
1319 | 0 | SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); |
1320 | |
|
1321 | 0 | Scope.setActive(false); |
1322 | 0 | } |
1323 | | |
1324 | 0 | Address CodeGenFunction::getNormalCleanupDestSlot() { |
1325 | 0 | if (!NormalCleanupDest.isValid()) |
1326 | 0 | NormalCleanupDest = |
1327 | 0 | CreateDefaultAlignTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); |
1328 | 0 | return NormalCleanupDest; |
1329 | 0 | } |
1330 | | |
1331 | | /// Emits all the code to cause the given temporary to be cleaned up. |
1332 | | void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, |
1333 | | QualType TempType, |
1334 | 0 | Address Ptr) { |
1335 | 0 | pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, |
1336 | 0 | /*useEHCleanup*/ true); |
1337 | 0 | } |
1338 | | |
1339 | | // Need to set "funclet" in OperandBundle properly for noThrow |
1340 | | // intrinsic (see CGCall.cpp) |
1341 | | static void EmitSehScope(CodeGenFunction &CGF, |
1342 | 0 | llvm::FunctionCallee &SehCppScope) { |
1343 | 0 | llvm::BasicBlock *InvokeDest = CGF.getInvokeDest(); |
1344 | 0 | assert(CGF.Builder.GetInsertBlock() && InvokeDest); |
1345 | 0 | llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont"); |
1346 | 0 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
1347 | 0 | CGF.getBundlesForFunclet(SehCppScope.getCallee()); |
1348 | 0 | if (CGF.CurrentFuncletPad) |
1349 | 0 | BundleList.emplace_back("funclet", CGF.CurrentFuncletPad); |
1350 | 0 | CGF.Builder.CreateInvoke(SehCppScope, Cont, InvokeDest, std::nullopt, |
1351 | 0 | BundleList); |
1352 | 0 | CGF.EmitBlock(Cont); |
1353 | 0 | } |
1354 | | |
1355 | | // Invoke a llvm.seh.scope.begin at the beginning of a CPP scope for -EHa |
1356 | 0 | void CodeGenFunction::EmitSehCppScopeBegin() { |
1357 | 0 | assert(getLangOpts().EHAsynch); |
1358 | 0 | llvm::FunctionType *FTy = |
1359 | 0 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1360 | 0 | llvm::FunctionCallee SehCppScope = |
1361 | 0 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.scope.begin"); |
1362 | 0 | EmitSehScope(*this, SehCppScope); |
1363 | 0 | } |
1364 | | |
1365 | | // Invoke a llvm.seh.scope.end at the end of a CPP scope for -EHa |
1366 | | // llvm.seh.scope.end is emitted before popCleanup, so it's "invoked" |
1367 | 0 | void CodeGenFunction::EmitSehCppScopeEnd() { |
1368 | 0 | assert(getLangOpts().EHAsynch); |
1369 | 0 | llvm::FunctionType *FTy = |
1370 | 0 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1371 | 0 | llvm::FunctionCallee SehCppScope = |
1372 | 0 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.scope.end"); |
1373 | 0 | EmitSehScope(*this, SehCppScope); |
1374 | 0 | } |
1375 | | |
1376 | | // Invoke a llvm.seh.try.begin at the beginning of a SEH scope for -EHa |
1377 | 0 | void CodeGenFunction::EmitSehTryScopeBegin() { |
1378 | 0 | assert(getLangOpts().EHAsynch); |
1379 | 0 | llvm::FunctionType *FTy = |
1380 | 0 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1381 | 0 | llvm::FunctionCallee SehCppScope = |
1382 | 0 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.begin"); |
1383 | 0 | EmitSehScope(*this, SehCppScope); |
1384 | 0 | } |
1385 | | |
1386 | | // Invoke a llvm.seh.try.end at the end of a SEH scope for -EHa |
1387 | 0 | void CodeGenFunction::EmitSehTryScopeEnd() { |
1388 | 0 | assert(getLangOpts().EHAsynch); |
1389 | 0 | llvm::FunctionType *FTy = |
1390 | 0 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1391 | 0 | llvm::FunctionCallee SehCppScope = |
1392 | 0 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.end"); |
1393 | 0 | EmitSehScope(*this, SehCppScope); |
1394 | 0 | } |