/src/llvm-project/clang/lib/CodeGen/CGStmt.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Stmt nodes as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGDebugInfo.h" |
14 | | #include "CGOpenMPRuntime.h" |
15 | | #include "CodeGenFunction.h" |
16 | | #include "CodeGenModule.h" |
17 | | #include "TargetInfo.h" |
18 | | #include "clang/AST/Attr.h" |
19 | | #include "clang/AST/Expr.h" |
20 | | #include "clang/AST/Stmt.h" |
21 | | #include "clang/AST/StmtVisitor.h" |
22 | | #include "clang/Basic/Builtins.h" |
23 | | #include "clang/Basic/DiagnosticSema.h" |
24 | | #include "clang/Basic/PrettyStackTrace.h" |
25 | | #include "clang/Basic/SourceManager.h" |
26 | | #include "clang/Basic/TargetInfo.h" |
27 | | #include "llvm/ADT/ArrayRef.h" |
28 | | #include "llvm/ADT/DenseMap.h" |
29 | | #include "llvm/ADT/SmallSet.h" |
30 | | #include "llvm/ADT/StringExtras.h" |
31 | | #include "llvm/IR/Assumptions.h" |
32 | | #include "llvm/IR/DataLayout.h" |
33 | | #include "llvm/IR/InlineAsm.h" |
34 | | #include "llvm/IR/Intrinsics.h" |
35 | | #include "llvm/IR/MDBuilder.h" |
36 | | #include "llvm/Support/SaveAndRestore.h" |
37 | | #include <optional> |
38 | | |
39 | | using namespace clang; |
40 | | using namespace CodeGen; |
41 | | |
42 | | //===----------------------------------------------------------------------===// |
43 | | // Statement Emission |
44 | | //===----------------------------------------------------------------------===// |
45 | | |
46 | 0 | void CodeGenFunction::EmitStopPoint(const Stmt *S) { |
47 | 0 | if (CGDebugInfo *DI = getDebugInfo()) { |
48 | 0 | SourceLocation Loc; |
49 | 0 | Loc = S->getBeginLoc(); |
50 | 0 | DI->EmitLocation(Builder, Loc); |
51 | |
|
52 | 0 | LastStopPoint = Loc; |
53 | 0 | } |
54 | 0 | } |
55 | | |
56 | 0 | void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) { |
57 | 0 | assert(S && "Null statement?"); |
58 | 0 | PGO.setCurrentStmt(S); |
59 | | |
60 | | // These statements have their own debug info handling. |
61 | 0 | if (EmitSimpleStmt(S, Attrs)) |
62 | 0 | return; |
63 | | |
64 | | // Check if we are generating unreachable code. |
65 | 0 | if (!HaveInsertPoint()) { |
66 | | // If so, and the statement doesn't contain a label, then we do not need to |
67 | | // generate actual code. This is safe because (1) the current point is |
68 | | // unreachable, so we don't need to execute the code, and (2) we've already |
69 | | // handled the statements which update internal data structures (like the |
70 | | // local variable map) which could be used by subsequent statements. |
71 | 0 | if (!ContainsLabel(S)) { |
72 | | // Verify that any decl statements were handled as simple, they may be in |
73 | | // scope of subsequent reachable statements. |
74 | 0 | assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); |
75 | 0 | return; |
76 | 0 | } |
77 | | |
78 | | // Otherwise, make a new block to hold the code. |
79 | 0 | EnsureInsertPoint(); |
80 | 0 | } |
81 | | |
82 | | // Generate a stoppoint if we are emitting debug info. |
83 | 0 | EmitStopPoint(S); |
84 | | |
85 | | // Ignore all OpenMP directives except for simd if OpenMP with Simd is |
86 | | // enabled. |
87 | 0 | if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) { |
88 | 0 | if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) { |
89 | 0 | EmitSimpleOMPExecutableDirective(*D); |
90 | 0 | return; |
91 | 0 | } |
92 | 0 | } |
93 | | |
94 | 0 | switch (S->getStmtClass()) { |
95 | 0 | case Stmt::NoStmtClass: |
96 | 0 | case Stmt::CXXCatchStmtClass: |
97 | 0 | case Stmt::SEHExceptStmtClass: |
98 | 0 | case Stmt::SEHFinallyStmtClass: |
99 | 0 | case Stmt::MSDependentExistsStmtClass: |
100 | 0 | llvm_unreachable("invalid statement class to emit generically"); |
101 | 0 | case Stmt::NullStmtClass: |
102 | 0 | case Stmt::CompoundStmtClass: |
103 | 0 | case Stmt::DeclStmtClass: |
104 | 0 | case Stmt::LabelStmtClass: |
105 | 0 | case Stmt::AttributedStmtClass: |
106 | 0 | case Stmt::GotoStmtClass: |
107 | 0 | case Stmt::BreakStmtClass: |
108 | 0 | case Stmt::ContinueStmtClass: |
109 | 0 | case Stmt::DefaultStmtClass: |
110 | 0 | case Stmt::CaseStmtClass: |
111 | 0 | case Stmt::SEHLeaveStmtClass: |
112 | 0 | llvm_unreachable("should have emitted these statements as simple"); |
113 | |
|
114 | 0 | #define STMT(Type, Base) |
115 | 0 | #define ABSTRACT_STMT(Op) |
116 | 0 | #define EXPR(Type, Base) \ |
117 | 0 | case Stmt::Type##Class: |
118 | 0 | #include "clang/AST/StmtNodes.inc" |
119 | 0 | { |
120 | | // Remember the block we came in on. |
121 | 0 | llvm::BasicBlock *incoming = Builder.GetInsertBlock(); |
122 | 0 | assert(incoming && "expression emission must have an insertion point"); |
123 | | |
124 | 0 | EmitIgnoredExpr(cast<Expr>(S)); |
125 | |
|
126 | 0 | llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); |
127 | 0 | assert(outgoing && "expression emission cleared block!"); |
128 | | |
129 | | // The expression emitters assume (reasonably!) that the insertion |
130 | | // point is always set. To maintain that, the call-emission code |
131 | | // for noreturn functions has to enter a new block with no |
132 | | // predecessors. We want to kill that block and mark the current |
133 | | // insertion point unreachable in the common case of a call like |
134 | | // "exit();". Since expression emission doesn't otherwise create |
135 | | // blocks with no predecessors, we can just test for that. |
136 | | // However, we must be careful not to do this to our incoming |
137 | | // block, because *statement* emission does sometimes create |
138 | | // reachable blocks which will have no predecessors until later in |
139 | | // the function. This occurs with, e.g., labels that are not |
140 | | // reachable by fallthrough. |
141 | 0 | if (incoming != outgoing && outgoing->use_empty()) { |
142 | 0 | outgoing->eraseFromParent(); |
143 | 0 | Builder.ClearInsertionPoint(); |
144 | 0 | } |
145 | 0 | break; |
146 | 0 | } |
147 | | |
148 | 0 | case Stmt::IndirectGotoStmtClass: |
149 | 0 | EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; |
150 | | |
151 | 0 | case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; |
152 | 0 | case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break; |
153 | 0 | case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break; |
154 | 0 | case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break; |
155 | | |
156 | 0 | case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; |
157 | | |
158 | 0 | case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; |
159 | 0 | case Stmt::GCCAsmStmtClass: // Intentional fall-through. |
160 | 0 | case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; |
161 | 0 | case Stmt::CoroutineBodyStmtClass: |
162 | 0 | EmitCoroutineBody(cast<CoroutineBodyStmt>(*S)); |
163 | 0 | break; |
164 | 0 | case Stmt::CoreturnStmtClass: |
165 | 0 | EmitCoreturnStmt(cast<CoreturnStmt>(*S)); |
166 | 0 | break; |
167 | 0 | case Stmt::CapturedStmtClass: { |
168 | 0 | const CapturedStmt *CS = cast<CapturedStmt>(S); |
169 | 0 | EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); |
170 | 0 | } |
171 | 0 | break; |
172 | 0 | case Stmt::ObjCAtTryStmtClass: |
173 | 0 | EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); |
174 | 0 | break; |
175 | 0 | case Stmt::ObjCAtCatchStmtClass: |
176 | 0 | llvm_unreachable( |
177 | 0 | "@catch statements should be handled by EmitObjCAtTryStmt"); |
178 | 0 | case Stmt::ObjCAtFinallyStmtClass: |
179 | 0 | llvm_unreachable( |
180 | 0 | "@finally statements should be handled by EmitObjCAtTryStmt"); |
181 | 0 | case Stmt::ObjCAtThrowStmtClass: |
182 | 0 | EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); |
183 | 0 | break; |
184 | 0 | case Stmt::ObjCAtSynchronizedStmtClass: |
185 | 0 | EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); |
186 | 0 | break; |
187 | 0 | case Stmt::ObjCForCollectionStmtClass: |
188 | 0 | EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); |
189 | 0 | break; |
190 | 0 | case Stmt::ObjCAutoreleasePoolStmtClass: |
191 | 0 | EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); |
192 | 0 | break; |
193 | | |
194 | 0 | case Stmt::CXXTryStmtClass: |
195 | 0 | EmitCXXTryStmt(cast<CXXTryStmt>(*S)); |
196 | 0 | break; |
197 | 0 | case Stmt::CXXForRangeStmtClass: |
198 | 0 | EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs); |
199 | 0 | break; |
200 | 0 | case Stmt::SEHTryStmtClass: |
201 | 0 | EmitSEHTryStmt(cast<SEHTryStmt>(*S)); |
202 | 0 | break; |
203 | 0 | case Stmt::OMPMetaDirectiveClass: |
204 | 0 | EmitOMPMetaDirective(cast<OMPMetaDirective>(*S)); |
205 | 0 | break; |
206 | 0 | case Stmt::OMPCanonicalLoopClass: |
207 | 0 | EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S)); |
208 | 0 | break; |
209 | 0 | case Stmt::OMPParallelDirectiveClass: |
210 | 0 | EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); |
211 | 0 | break; |
212 | 0 | case Stmt::OMPSimdDirectiveClass: |
213 | 0 | EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); |
214 | 0 | break; |
215 | 0 | case Stmt::OMPTileDirectiveClass: |
216 | 0 | EmitOMPTileDirective(cast<OMPTileDirective>(*S)); |
217 | 0 | break; |
218 | 0 | case Stmt::OMPUnrollDirectiveClass: |
219 | 0 | EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S)); |
220 | 0 | break; |
221 | 0 | case Stmt::OMPForDirectiveClass: |
222 | 0 | EmitOMPForDirective(cast<OMPForDirective>(*S)); |
223 | 0 | break; |
224 | 0 | case Stmt::OMPForSimdDirectiveClass: |
225 | 0 | EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); |
226 | 0 | break; |
227 | 0 | case Stmt::OMPSectionsDirectiveClass: |
228 | 0 | EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); |
229 | 0 | break; |
230 | 0 | case Stmt::OMPSectionDirectiveClass: |
231 | 0 | EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); |
232 | 0 | break; |
233 | 0 | case Stmt::OMPSingleDirectiveClass: |
234 | 0 | EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); |
235 | 0 | break; |
236 | 0 | case Stmt::OMPMasterDirectiveClass: |
237 | 0 | EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); |
238 | 0 | break; |
239 | 0 | case Stmt::OMPCriticalDirectiveClass: |
240 | 0 | EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); |
241 | 0 | break; |
242 | 0 | case Stmt::OMPParallelForDirectiveClass: |
243 | 0 | EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); |
244 | 0 | break; |
245 | 0 | case Stmt::OMPParallelForSimdDirectiveClass: |
246 | 0 | EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); |
247 | 0 | break; |
248 | 0 | case Stmt::OMPParallelMasterDirectiveClass: |
249 | 0 | EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S)); |
250 | 0 | break; |
251 | 0 | case Stmt::OMPParallelSectionsDirectiveClass: |
252 | 0 | EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); |
253 | 0 | break; |
254 | 0 | case Stmt::OMPTaskDirectiveClass: |
255 | 0 | EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); |
256 | 0 | break; |
257 | 0 | case Stmt::OMPTaskyieldDirectiveClass: |
258 | 0 | EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); |
259 | 0 | break; |
260 | 0 | case Stmt::OMPErrorDirectiveClass: |
261 | 0 | EmitOMPErrorDirective(cast<OMPErrorDirective>(*S)); |
262 | 0 | break; |
263 | 0 | case Stmt::OMPBarrierDirectiveClass: |
264 | 0 | EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); |
265 | 0 | break; |
266 | 0 | case Stmt::OMPTaskwaitDirectiveClass: |
267 | 0 | EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); |
268 | 0 | break; |
269 | 0 | case Stmt::OMPTaskgroupDirectiveClass: |
270 | 0 | EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S)); |
271 | 0 | break; |
272 | 0 | case Stmt::OMPFlushDirectiveClass: |
273 | 0 | EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); |
274 | 0 | break; |
275 | 0 | case Stmt::OMPDepobjDirectiveClass: |
276 | 0 | EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S)); |
277 | 0 | break; |
278 | 0 | case Stmt::OMPScanDirectiveClass: |
279 | 0 | EmitOMPScanDirective(cast<OMPScanDirective>(*S)); |
280 | 0 | break; |
281 | 0 | case Stmt::OMPOrderedDirectiveClass: |
282 | 0 | EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); |
283 | 0 | break; |
284 | 0 | case Stmt::OMPAtomicDirectiveClass: |
285 | 0 | EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); |
286 | 0 | break; |
287 | 0 | case Stmt::OMPTargetDirectiveClass: |
288 | 0 | EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); |
289 | 0 | break; |
290 | 0 | case Stmt::OMPTeamsDirectiveClass: |
291 | 0 | EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); |
292 | 0 | break; |
293 | 0 | case Stmt::OMPCancellationPointDirectiveClass: |
294 | 0 | EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S)); |
295 | 0 | break; |
296 | 0 | case Stmt::OMPCancelDirectiveClass: |
297 | 0 | EmitOMPCancelDirective(cast<OMPCancelDirective>(*S)); |
298 | 0 | break; |
299 | 0 | case Stmt::OMPTargetDataDirectiveClass: |
300 | 0 | EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S)); |
301 | 0 | break; |
302 | 0 | case Stmt::OMPTargetEnterDataDirectiveClass: |
303 | 0 | EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S)); |
304 | 0 | break; |
305 | 0 | case Stmt::OMPTargetExitDataDirectiveClass: |
306 | 0 | EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S)); |
307 | 0 | break; |
308 | 0 | case Stmt::OMPTargetParallelDirectiveClass: |
309 | 0 | EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S)); |
310 | 0 | break; |
311 | 0 | case Stmt::OMPTargetParallelForDirectiveClass: |
312 | 0 | EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S)); |
313 | 0 | break; |
314 | 0 | case Stmt::OMPTaskLoopDirectiveClass: |
315 | 0 | EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S)); |
316 | 0 | break; |
317 | 0 | case Stmt::OMPTaskLoopSimdDirectiveClass: |
318 | 0 | EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S)); |
319 | 0 | break; |
320 | 0 | case Stmt::OMPMasterTaskLoopDirectiveClass: |
321 | 0 | EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S)); |
322 | 0 | break; |
323 | 0 | case Stmt::OMPMaskedTaskLoopDirectiveClass: |
324 | 0 | llvm_unreachable("masked taskloop directive not supported yet."); |
325 | 0 | break; |
326 | 0 | case Stmt::OMPMasterTaskLoopSimdDirectiveClass: |
327 | 0 | EmitOMPMasterTaskLoopSimdDirective( |
328 | 0 | cast<OMPMasterTaskLoopSimdDirective>(*S)); |
329 | 0 | break; |
330 | 0 | case Stmt::OMPMaskedTaskLoopSimdDirectiveClass: |
331 | 0 | llvm_unreachable("masked taskloop simd directive not supported yet."); |
332 | 0 | break; |
333 | 0 | case Stmt::OMPParallelMasterTaskLoopDirectiveClass: |
334 | 0 | EmitOMPParallelMasterTaskLoopDirective( |
335 | 0 | cast<OMPParallelMasterTaskLoopDirective>(*S)); |
336 | 0 | break; |
337 | 0 | case Stmt::OMPParallelMaskedTaskLoopDirectiveClass: |
338 | 0 | llvm_unreachable("parallel masked taskloop directive not supported yet."); |
339 | 0 | break; |
340 | 0 | case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass: |
341 | 0 | EmitOMPParallelMasterTaskLoopSimdDirective( |
342 | 0 | cast<OMPParallelMasterTaskLoopSimdDirective>(*S)); |
343 | 0 | break; |
344 | 0 | case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass: |
345 | 0 | llvm_unreachable( |
346 | 0 | "parallel masked taskloop simd directive not supported yet."); |
347 | 0 | break; |
348 | 0 | case Stmt::OMPDistributeDirectiveClass: |
349 | 0 | EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S)); |
350 | 0 | break; |
351 | 0 | case Stmt::OMPTargetUpdateDirectiveClass: |
352 | 0 | EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S)); |
353 | 0 | break; |
354 | 0 | case Stmt::OMPDistributeParallelForDirectiveClass: |
355 | 0 | EmitOMPDistributeParallelForDirective( |
356 | 0 | cast<OMPDistributeParallelForDirective>(*S)); |
357 | 0 | break; |
358 | 0 | case Stmt::OMPDistributeParallelForSimdDirectiveClass: |
359 | 0 | EmitOMPDistributeParallelForSimdDirective( |
360 | 0 | cast<OMPDistributeParallelForSimdDirective>(*S)); |
361 | 0 | break; |
362 | 0 | case Stmt::OMPDistributeSimdDirectiveClass: |
363 | 0 | EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S)); |
364 | 0 | break; |
365 | 0 | case Stmt::OMPTargetParallelForSimdDirectiveClass: |
366 | 0 | EmitOMPTargetParallelForSimdDirective( |
367 | 0 | cast<OMPTargetParallelForSimdDirective>(*S)); |
368 | 0 | break; |
369 | 0 | case Stmt::OMPTargetSimdDirectiveClass: |
370 | 0 | EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S)); |
371 | 0 | break; |
372 | 0 | case Stmt::OMPTeamsDistributeDirectiveClass: |
373 | 0 | EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S)); |
374 | 0 | break; |
375 | 0 | case Stmt::OMPTeamsDistributeSimdDirectiveClass: |
376 | 0 | EmitOMPTeamsDistributeSimdDirective( |
377 | 0 | cast<OMPTeamsDistributeSimdDirective>(*S)); |
378 | 0 | break; |
379 | 0 | case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: |
380 | 0 | EmitOMPTeamsDistributeParallelForSimdDirective( |
381 | 0 | cast<OMPTeamsDistributeParallelForSimdDirective>(*S)); |
382 | 0 | break; |
383 | 0 | case Stmt::OMPTeamsDistributeParallelForDirectiveClass: |
384 | 0 | EmitOMPTeamsDistributeParallelForDirective( |
385 | 0 | cast<OMPTeamsDistributeParallelForDirective>(*S)); |
386 | 0 | break; |
387 | 0 | case Stmt::OMPTargetTeamsDirectiveClass: |
388 | 0 | EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S)); |
389 | 0 | break; |
390 | 0 | case Stmt::OMPTargetTeamsDistributeDirectiveClass: |
391 | 0 | EmitOMPTargetTeamsDistributeDirective( |
392 | 0 | cast<OMPTargetTeamsDistributeDirective>(*S)); |
393 | 0 | break; |
394 | 0 | case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: |
395 | 0 | EmitOMPTargetTeamsDistributeParallelForDirective( |
396 | 0 | cast<OMPTargetTeamsDistributeParallelForDirective>(*S)); |
397 | 0 | break; |
398 | 0 | case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: |
399 | 0 | EmitOMPTargetTeamsDistributeParallelForSimdDirective( |
400 | 0 | cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S)); |
401 | 0 | break; |
402 | 0 | case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: |
403 | 0 | EmitOMPTargetTeamsDistributeSimdDirective( |
404 | 0 | cast<OMPTargetTeamsDistributeSimdDirective>(*S)); |
405 | 0 | break; |
406 | 0 | case Stmt::OMPInteropDirectiveClass: |
407 | 0 | EmitOMPInteropDirective(cast<OMPInteropDirective>(*S)); |
408 | 0 | break; |
409 | 0 | case Stmt::OMPDispatchDirectiveClass: |
410 | 0 | CGM.ErrorUnsupported(S, "OpenMP dispatch directive"); |
411 | 0 | break; |
412 | 0 | case Stmt::OMPScopeDirectiveClass: |
413 | 0 | llvm_unreachable("scope not supported with FE outlining"); |
414 | 0 | case Stmt::OMPMaskedDirectiveClass: |
415 | 0 | EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S)); |
416 | 0 | break; |
417 | 0 | case Stmt::OMPGenericLoopDirectiveClass: |
418 | 0 | EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S)); |
419 | 0 | break; |
420 | 0 | case Stmt::OMPTeamsGenericLoopDirectiveClass: |
421 | 0 | EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S)); |
422 | 0 | break; |
423 | 0 | case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: |
424 | 0 | EmitOMPTargetTeamsGenericLoopDirective( |
425 | 0 | cast<OMPTargetTeamsGenericLoopDirective>(*S)); |
426 | 0 | break; |
427 | 0 | case Stmt::OMPParallelGenericLoopDirectiveClass: |
428 | 0 | EmitOMPParallelGenericLoopDirective( |
429 | 0 | cast<OMPParallelGenericLoopDirective>(*S)); |
430 | 0 | break; |
431 | 0 | case Stmt::OMPTargetParallelGenericLoopDirectiveClass: |
432 | 0 | EmitOMPTargetParallelGenericLoopDirective( |
433 | 0 | cast<OMPTargetParallelGenericLoopDirective>(*S)); |
434 | 0 | break; |
435 | 0 | case Stmt::OMPParallelMaskedDirectiveClass: |
436 | 0 | EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S)); |
437 | 0 | break; |
438 | 0 | } |
439 | 0 | } |
440 | | |
441 | | bool CodeGenFunction::EmitSimpleStmt(const Stmt *S, |
442 | 0 | ArrayRef<const Attr *> Attrs) { |
443 | 0 | switch (S->getStmtClass()) { |
444 | 0 | default: |
445 | 0 | return false; |
446 | 0 | case Stmt::NullStmtClass: |
447 | 0 | break; |
448 | 0 | case Stmt::CompoundStmtClass: |
449 | 0 | EmitCompoundStmt(cast<CompoundStmt>(*S)); |
450 | 0 | break; |
451 | 0 | case Stmt::DeclStmtClass: |
452 | 0 | EmitDeclStmt(cast<DeclStmt>(*S)); |
453 | 0 | break; |
454 | 0 | case Stmt::LabelStmtClass: |
455 | 0 | EmitLabelStmt(cast<LabelStmt>(*S)); |
456 | 0 | break; |
457 | 0 | case Stmt::AttributedStmtClass: |
458 | 0 | EmitAttributedStmt(cast<AttributedStmt>(*S)); |
459 | 0 | break; |
460 | 0 | case Stmt::GotoStmtClass: |
461 | 0 | EmitGotoStmt(cast<GotoStmt>(*S)); |
462 | 0 | break; |
463 | 0 | case Stmt::BreakStmtClass: |
464 | 0 | EmitBreakStmt(cast<BreakStmt>(*S)); |
465 | 0 | break; |
466 | 0 | case Stmt::ContinueStmtClass: |
467 | 0 | EmitContinueStmt(cast<ContinueStmt>(*S)); |
468 | 0 | break; |
469 | 0 | case Stmt::DefaultStmtClass: |
470 | 0 | EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs); |
471 | 0 | break; |
472 | 0 | case Stmt::CaseStmtClass: |
473 | 0 | EmitCaseStmt(cast<CaseStmt>(*S), Attrs); |
474 | 0 | break; |
475 | 0 | case Stmt::SEHLeaveStmtClass: |
476 | 0 | EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); |
477 | 0 | break; |
478 | 0 | } |
479 | 0 | return true; |
480 | 0 | } |
481 | | |
482 | | /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, |
483 | | /// this captures the expression result of the last sub-statement and returns it |
484 | | /// (for use by the statement expression extension). |
485 | | Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, |
486 | 0 | AggValueSlot AggSlot) { |
487 | 0 | PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), |
488 | 0 | "LLVM IR generation of compound statement ('{}')"); |
489 | | |
490 | | // Keep track of the current cleanup stack depth, including debug scopes. |
491 | 0 | LexicalScope Scope(*this, S.getSourceRange()); |
492 | |
|
493 | 0 | return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); |
494 | 0 | } |
495 | | |
496 | | Address |
497 | | CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, |
498 | | bool GetLast, |
499 | 0 | AggValueSlot AggSlot) { |
500 | |
|
501 | 0 | const Stmt *ExprResult = S.getStmtExprResult(); |
502 | 0 | assert((!GetLast || (GetLast && ExprResult)) && |
503 | 0 | "If GetLast is true then the CompoundStmt must have a StmtExprResult"); |
504 | | |
505 | 0 | Address RetAlloca = Address::invalid(); |
506 | |
|
507 | 0 | for (auto *CurStmt : S.body()) { |
508 | 0 | if (GetLast && ExprResult == CurStmt) { |
509 | | // We have to special case labels here. They are statements, but when put |
510 | | // at the end of a statement expression, they yield the value of their |
511 | | // subexpression. Handle this by walking through all labels we encounter, |
512 | | // emitting them before we evaluate the subexpr. |
513 | | // Similar issues arise for attributed statements. |
514 | 0 | while (!isa<Expr>(ExprResult)) { |
515 | 0 | if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) { |
516 | 0 | EmitLabel(LS->getDecl()); |
517 | 0 | ExprResult = LS->getSubStmt(); |
518 | 0 | } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) { |
519 | | // FIXME: Update this if we ever have attributes that affect the |
520 | | // semantics of an expression. |
521 | 0 | ExprResult = AS->getSubStmt(); |
522 | 0 | } else { |
523 | 0 | llvm_unreachable("unknown value statement"); |
524 | 0 | } |
525 | 0 | } |
526 | |
|
527 | 0 | EnsureInsertPoint(); |
528 | |
|
529 | 0 | const Expr *E = cast<Expr>(ExprResult); |
530 | 0 | QualType ExprTy = E->getType(); |
531 | 0 | if (hasAggregateEvaluationKind(ExprTy)) { |
532 | 0 | EmitAggExpr(E, AggSlot); |
533 | 0 | } else { |
534 | | // We can't return an RValue here because there might be cleanups at |
535 | | // the end of the StmtExpr. Because of that, we have to emit the result |
536 | | // here into a temporary alloca. |
537 | 0 | RetAlloca = CreateMemTemp(ExprTy); |
538 | 0 | EmitAnyExprToMem(E, RetAlloca, Qualifiers(), |
539 | 0 | /*IsInit*/ false); |
540 | 0 | } |
541 | 0 | } else { |
542 | 0 | EmitStmt(CurStmt); |
543 | 0 | } |
544 | 0 | } |
545 | |
|
546 | 0 | return RetAlloca; |
547 | 0 | } |
548 | | |
549 | 0 | void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { |
550 | 0 | llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); |
551 | | |
552 | | // If there is a cleanup stack, then we it isn't worth trying to |
553 | | // simplify this block (we would need to remove it from the scope map |
554 | | // and cleanup entry). |
555 | 0 | if (!EHStack.empty()) |
556 | 0 | return; |
557 | | |
558 | | // Can only simplify direct branches. |
559 | 0 | if (!BI || !BI->isUnconditional()) |
560 | 0 | return; |
561 | | |
562 | | // Can only simplify empty blocks. |
563 | 0 | if (BI->getIterator() != BB->begin()) |
564 | 0 | return; |
565 | | |
566 | 0 | BB->replaceAllUsesWith(BI->getSuccessor(0)); |
567 | 0 | BI->eraseFromParent(); |
568 | 0 | BB->eraseFromParent(); |
569 | 0 | } |
570 | | |
571 | 0 | void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { |
572 | 0 | llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); |
573 | | |
574 | | // Fall out of the current block (if necessary). |
575 | 0 | EmitBranch(BB); |
576 | |
|
577 | 0 | if (IsFinished && BB->use_empty()) { |
578 | 0 | delete BB; |
579 | 0 | return; |
580 | 0 | } |
581 | | |
582 | | // Place the block after the current block, if possible, or else at |
583 | | // the end of the function. |
584 | 0 | if (CurBB && CurBB->getParent()) |
585 | 0 | CurFn->insert(std::next(CurBB->getIterator()), BB); |
586 | 0 | else |
587 | 0 | CurFn->insert(CurFn->end(), BB); |
588 | 0 | Builder.SetInsertPoint(BB); |
589 | 0 | } |
590 | | |
591 | 0 | void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { |
592 | | // Emit a branch from the current block to the target one if this |
593 | | // was a real block. If this was just a fall-through block after a |
594 | | // terminator, don't emit it. |
595 | 0 | llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); |
596 | |
|
597 | 0 | if (!CurBB || CurBB->getTerminator()) { |
598 | | // If there is no insert point or the previous block is already |
599 | | // terminated, don't touch it. |
600 | 0 | } else { |
601 | | // Otherwise, create a fall-through branch. |
602 | 0 | Builder.CreateBr(Target); |
603 | 0 | } |
604 | |
|
605 | 0 | Builder.ClearInsertionPoint(); |
606 | 0 | } |
607 | | |
608 | 0 | void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { |
609 | 0 | bool inserted = false; |
610 | 0 | for (llvm::User *u : block->users()) { |
611 | 0 | if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { |
612 | 0 | CurFn->insert(std::next(insn->getParent()->getIterator()), block); |
613 | 0 | inserted = true; |
614 | 0 | break; |
615 | 0 | } |
616 | 0 | } |
617 | |
|
618 | 0 | if (!inserted) |
619 | 0 | CurFn->insert(CurFn->end(), block); |
620 | |
|
621 | 0 | Builder.SetInsertPoint(block); |
622 | 0 | } |
623 | | |
624 | | CodeGenFunction::JumpDest |
625 | 0 | CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { |
626 | 0 | JumpDest &Dest = LabelMap[D]; |
627 | 0 | if (Dest.isValid()) return Dest; |
628 | | |
629 | | // Create, but don't insert, the new block. |
630 | 0 | Dest = JumpDest(createBasicBlock(D->getName()), |
631 | 0 | EHScopeStack::stable_iterator::invalid(), |
632 | 0 | NextCleanupDestIndex++); |
633 | 0 | return Dest; |
634 | 0 | } |
635 | | |
636 | 0 | void CodeGenFunction::EmitLabel(const LabelDecl *D) { |
637 | | // Add this label to the current lexical scope if we're within any |
638 | | // normal cleanups. Jumps "in" to this label --- when permitted by |
639 | | // the language --- may need to be routed around such cleanups. |
640 | 0 | if (EHStack.hasNormalCleanups() && CurLexicalScope) |
641 | 0 | CurLexicalScope->addLabel(D); |
642 | |
|
643 | 0 | JumpDest &Dest = LabelMap[D]; |
644 | | |
645 | | // If we didn't need a forward reference to this label, just go |
646 | | // ahead and create a destination at the current scope. |
647 | 0 | if (!Dest.isValid()) { |
648 | 0 | Dest = getJumpDestInCurrentScope(D->getName()); |
649 | | |
650 | | // Otherwise, we need to give this label a target depth and remove |
651 | | // it from the branch-fixups list. |
652 | 0 | } else { |
653 | 0 | assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); |
654 | 0 | Dest.setScopeDepth(EHStack.stable_begin()); |
655 | 0 | ResolveBranchFixups(Dest.getBlock()); |
656 | 0 | } |
657 | | |
658 | 0 | EmitBlock(Dest.getBlock()); |
659 | | |
660 | | // Emit debug info for labels. |
661 | 0 | if (CGDebugInfo *DI = getDebugInfo()) { |
662 | 0 | if (CGM.getCodeGenOpts().hasReducedDebugInfo()) { |
663 | 0 | DI->setLocation(D->getLocation()); |
664 | 0 | DI->EmitLabel(D, Builder); |
665 | 0 | } |
666 | 0 | } |
667 | |
|
668 | 0 | incrementProfileCounter(D->getStmt()); |
669 | 0 | } |
670 | | |
671 | | /// Change the cleanup scope of the labels in this lexical scope to |
672 | | /// match the scope of the enclosing context. |
673 | 0 | void CodeGenFunction::LexicalScope::rescopeLabels() { |
674 | 0 | assert(!Labels.empty()); |
675 | 0 | EHScopeStack::stable_iterator innermostScope |
676 | 0 | = CGF.EHStack.getInnermostNormalCleanup(); |
677 | | |
678 | | // Change the scope depth of all the labels. |
679 | 0 | for (SmallVectorImpl<const LabelDecl*>::const_iterator |
680 | 0 | i = Labels.begin(), e = Labels.end(); i != e; ++i) { |
681 | 0 | assert(CGF.LabelMap.count(*i)); |
682 | 0 | JumpDest &dest = CGF.LabelMap.find(*i)->second; |
683 | 0 | assert(dest.getScopeDepth().isValid()); |
684 | 0 | assert(innermostScope.encloses(dest.getScopeDepth())); |
685 | 0 | dest.setScopeDepth(innermostScope); |
686 | 0 | } |
687 | | |
688 | | // Reparent the labels if the new scope also has cleanups. |
689 | 0 | if (innermostScope != EHScopeStack::stable_end() && ParentScope) { |
690 | 0 | ParentScope->Labels.append(Labels.begin(), Labels.end()); |
691 | 0 | } |
692 | 0 | } |
693 | | |
694 | | |
695 | 0 | void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { |
696 | 0 | EmitLabel(S.getDecl()); |
697 | | |
698 | | // IsEHa - emit eha.scope.begin if it's a side entry of a scope |
699 | 0 | if (getLangOpts().EHAsynch && S.isSideEntry()) |
700 | 0 | EmitSehCppScopeBegin(); |
701 | |
|
702 | 0 | EmitStmt(S.getSubStmt()); |
703 | 0 | } |
704 | | |
705 | 0 | void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { |
706 | 0 | bool nomerge = false; |
707 | 0 | bool noinline = false; |
708 | 0 | bool alwaysinline = false; |
709 | 0 | const CallExpr *musttail = nullptr; |
710 | |
|
711 | 0 | for (const auto *A : S.getAttrs()) { |
712 | 0 | switch (A->getKind()) { |
713 | 0 | default: |
714 | 0 | break; |
715 | 0 | case attr::NoMerge: |
716 | 0 | nomerge = true; |
717 | 0 | break; |
718 | 0 | case attr::NoInline: |
719 | 0 | noinline = true; |
720 | 0 | break; |
721 | 0 | case attr::AlwaysInline: |
722 | 0 | alwaysinline = true; |
723 | 0 | break; |
724 | 0 | case attr::MustTail: |
725 | 0 | const Stmt *Sub = S.getSubStmt(); |
726 | 0 | const ReturnStmt *R = cast<ReturnStmt>(Sub); |
727 | 0 | musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens()); |
728 | 0 | break; |
729 | 0 | } |
730 | 0 | } |
731 | 0 | SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge); |
732 | 0 | SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline); |
733 | 0 | SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline); |
734 | 0 | SaveAndRestore save_musttail(MustTailCall, musttail); |
735 | 0 | EmitStmt(S.getSubStmt(), S.getAttrs()); |
736 | 0 | } |
737 | | |
738 | 0 | void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { |
739 | | // If this code is reachable then emit a stop point (if generating |
740 | | // debug info). We have to do this ourselves because we are on the |
741 | | // "simple" statement path. |
742 | 0 | if (HaveInsertPoint()) |
743 | 0 | EmitStopPoint(&S); |
744 | |
|
745 | 0 | EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); |
746 | 0 | } |
747 | | |
748 | | |
749 | 0 | void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { |
750 | 0 | if (const LabelDecl *Target = S.getConstantTarget()) { |
751 | 0 | EmitBranchThroughCleanup(getJumpDestForLabel(Target)); |
752 | 0 | return; |
753 | 0 | } |
754 | | |
755 | | // Ensure that we have an i8* for our PHI node. |
756 | 0 | llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), |
757 | 0 | Int8PtrTy, "addr"); |
758 | 0 | llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); |
759 | | |
760 | | // Get the basic block for the indirect goto. |
761 | 0 | llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); |
762 | | |
763 | | // The first instruction in the block has to be the PHI for the switch dest, |
764 | | // add an entry for this branch. |
765 | 0 | cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); |
766 | |
|
767 | 0 | EmitBranch(IndGotoBB); |
768 | 0 | } |
769 | | |
770 | 0 | void CodeGenFunction::EmitIfStmt(const IfStmt &S) { |
771 | | // The else branch of a consteval if statement is always the only branch that |
772 | | // can be runtime evaluated. |
773 | 0 | if (S.isConsteval()) { |
774 | 0 | const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse(); |
775 | 0 | if (Executed) { |
776 | 0 | RunCleanupsScope ExecutedScope(*this); |
777 | 0 | EmitStmt(Executed); |
778 | 0 | } |
779 | 0 | return; |
780 | 0 | } |
781 | | |
782 | | // C99 6.8.4.1: The first substatement is executed if the expression compares |
783 | | // unequal to 0. The condition must be a scalar type. |
784 | 0 | LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); |
785 | |
|
786 | 0 | if (S.getInit()) |
787 | 0 | EmitStmt(S.getInit()); |
788 | |
|
789 | 0 | if (S.getConditionVariable()) |
790 | 0 | EmitDecl(*S.getConditionVariable()); |
791 | | |
792 | | // If the condition constant folds and can be elided, try to avoid emitting |
793 | | // the condition and the dead arm of the if/else. |
794 | 0 | bool CondConstant; |
795 | 0 | if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant, |
796 | 0 | S.isConstexpr())) { |
797 | | // Figure out which block (then or else) is executed. |
798 | 0 | const Stmt *Executed = S.getThen(); |
799 | 0 | const Stmt *Skipped = S.getElse(); |
800 | 0 | if (!CondConstant) // Condition false? |
801 | 0 | std::swap(Executed, Skipped); |
802 | | |
803 | | // If the skipped block has no labels in it, just emit the executed block. |
804 | | // This avoids emitting dead code and simplifies the CFG substantially. |
805 | 0 | if (S.isConstexpr() || !ContainsLabel(Skipped)) { |
806 | 0 | if (CondConstant) |
807 | 0 | incrementProfileCounter(&S); |
808 | 0 | if (Executed) { |
809 | 0 | RunCleanupsScope ExecutedScope(*this); |
810 | 0 | EmitStmt(Executed); |
811 | 0 | } |
812 | 0 | return; |
813 | 0 | } |
814 | 0 | } |
815 | | |
816 | | // Otherwise, the condition did not fold, or we couldn't elide it. Just emit |
817 | | // the conditional branch. |
818 | 0 | llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); |
819 | 0 | llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); |
820 | 0 | llvm::BasicBlock *ElseBlock = ContBlock; |
821 | 0 | if (S.getElse()) |
822 | 0 | ElseBlock = createBasicBlock("if.else"); |
823 | | |
824 | | // Prefer the PGO based weights over the likelihood attribute. |
825 | | // When the build isn't optimized the metadata isn't used, so don't generate |
826 | | // it. |
827 | | // Also, differentiate between disabled PGO and a never executed branch with |
828 | | // PGO. Assuming PGO is in use: |
829 | | // - we want to ignore the [[likely]] attribute if the branch is never |
830 | | // executed, |
831 | | // - assuming the profile is poor, preserving the attribute may still be |
832 | | // beneficial. |
833 | | // As an approximation, preserve the attribute only if both the branch and the |
834 | | // parent context were not executed. |
835 | 0 | Stmt::Likelihood LH = Stmt::LH_None; |
836 | 0 | uint64_t ThenCount = getProfileCount(S.getThen()); |
837 | 0 | if (!ThenCount && !getCurrentProfileCount() && |
838 | 0 | CGM.getCodeGenOpts().OptimizationLevel) |
839 | 0 | LH = Stmt::getLikelihood(S.getThen(), S.getElse()); |
840 | | |
841 | | // When measuring MC/DC, always fully evaluate the condition up front using |
842 | | // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to |
843 | | // executing the body of the if.then or if.else. This is useful for when |
844 | | // there is a 'return' within the body, but this is particularly beneficial |
845 | | // when one if-stmt is nested within another if-stmt so that all of the MC/DC |
846 | | // updates are kept linear and consistent. |
847 | 0 | if (!CGM.getCodeGenOpts().MCDCCoverage) |
848 | 0 | EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH); |
849 | 0 | else { |
850 | 0 | llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); |
851 | 0 | Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock); |
852 | 0 | } |
853 | | |
854 | | // Emit the 'then' code. |
855 | 0 | EmitBlock(ThenBlock); |
856 | 0 | incrementProfileCounter(&S); |
857 | 0 | { |
858 | 0 | RunCleanupsScope ThenScope(*this); |
859 | 0 | EmitStmt(S.getThen()); |
860 | 0 | } |
861 | 0 | EmitBranch(ContBlock); |
862 | | |
863 | | // Emit the 'else' code if present. |
864 | 0 | if (const Stmt *Else = S.getElse()) { |
865 | 0 | { |
866 | | // There is no need to emit line number for an unconditional branch. |
867 | 0 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
868 | 0 | EmitBlock(ElseBlock); |
869 | 0 | } |
870 | 0 | { |
871 | 0 | RunCleanupsScope ElseScope(*this); |
872 | 0 | EmitStmt(Else); |
873 | 0 | } |
874 | 0 | { |
875 | | // There is no need to emit line number for an unconditional branch. |
876 | 0 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
877 | 0 | EmitBranch(ContBlock); |
878 | 0 | } |
879 | 0 | } |
880 | | |
881 | | // Emit the continuation block for code after the if. |
882 | 0 | EmitBlock(ContBlock, true); |
883 | 0 | } |
884 | | |
885 | | void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, |
886 | 0 | ArrayRef<const Attr *> WhileAttrs) { |
887 | | // Emit the header for the loop, which will also become |
888 | | // the continue target. |
889 | 0 | JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); |
890 | 0 | EmitBlock(LoopHeader.getBlock()); |
891 | | |
892 | | // Create an exit block for when the condition fails, which will |
893 | | // also become the break target. |
894 | 0 | JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); |
895 | | |
896 | | // Store the blocks to use for break and continue. |
897 | 0 | BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); |
898 | | |
899 | | // C++ [stmt.while]p2: |
900 | | // When the condition of a while statement is a declaration, the |
901 | | // scope of the variable that is declared extends from its point |
902 | | // of declaration (3.3.2) to the end of the while statement. |
903 | | // [...] |
904 | | // The object created in a condition is destroyed and created |
905 | | // with each iteration of the loop. |
906 | 0 | RunCleanupsScope ConditionScope(*this); |
907 | |
|
908 | 0 | if (S.getConditionVariable()) |
909 | 0 | EmitDecl(*S.getConditionVariable()); |
910 | | |
911 | | // Evaluate the conditional in the while header. C99 6.8.5.1: The |
912 | | // evaluation of the controlling expression takes place before each |
913 | | // execution of the loop body. |
914 | 0 | llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); |
915 | | |
916 | | // while(1) is common, avoid extra exit blocks. Be sure |
917 | | // to correctly handle break/continue though. |
918 | 0 | llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal); |
919 | 0 | bool CondIsConstInt = C != nullptr; |
920 | 0 | bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne(); |
921 | 0 | const SourceRange &R = S.getSourceRange(); |
922 | 0 | LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(), |
923 | 0 | WhileAttrs, SourceLocToDebugLoc(R.getBegin()), |
924 | 0 | SourceLocToDebugLoc(R.getEnd()), |
925 | 0 | checkIfLoopMustProgress(CondIsConstInt)); |
926 | | |
927 | | // As long as the condition is true, go to the loop body. |
928 | 0 | llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); |
929 | 0 | if (EmitBoolCondBranch) { |
930 | 0 | llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); |
931 | 0 | if (ConditionScope.requiresCleanups()) |
932 | 0 | ExitBlock = createBasicBlock("while.exit"); |
933 | 0 | llvm::MDNode *Weights = |
934 | 0 | createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); |
935 | 0 | if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) |
936 | 0 | BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( |
937 | 0 | BoolCondVal, Stmt::getLikelihood(S.getBody())); |
938 | 0 | Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights); |
939 | |
|
940 | 0 | if (ExitBlock != LoopExit.getBlock()) { |
941 | 0 | EmitBlock(ExitBlock); |
942 | 0 | EmitBranchThroughCleanup(LoopExit); |
943 | 0 | } |
944 | 0 | } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) { |
945 | 0 | CGM.getDiags().Report(A->getLocation(), |
946 | 0 | diag::warn_attribute_has_no_effect_on_infinite_loop) |
947 | 0 | << A << A->getRange(); |
948 | 0 | CGM.getDiags().Report( |
949 | 0 | S.getWhileLoc(), |
950 | 0 | diag::note_attribute_has_no_effect_on_infinite_loop_here) |
951 | 0 | << SourceRange(S.getWhileLoc(), S.getRParenLoc()); |
952 | 0 | } |
953 | | |
954 | | // Emit the loop body. We have to emit this in a cleanup scope |
955 | | // because it might be a singleton DeclStmt. |
956 | 0 | { |
957 | 0 | RunCleanupsScope BodyScope(*this); |
958 | 0 | EmitBlock(LoopBody); |
959 | 0 | incrementProfileCounter(&S); |
960 | 0 | EmitStmt(S.getBody()); |
961 | 0 | } |
962 | |
|
963 | 0 | BreakContinueStack.pop_back(); |
964 | | |
965 | | // Immediately force cleanup. |
966 | 0 | ConditionScope.ForceCleanup(); |
967 | |
|
968 | 0 | EmitStopPoint(&S); |
969 | | // Branch to the loop header again. |
970 | 0 | EmitBranch(LoopHeader.getBlock()); |
971 | |
|
972 | 0 | LoopStack.pop(); |
973 | | |
974 | | // Emit the exit block. |
975 | 0 | EmitBlock(LoopExit.getBlock(), true); |
976 | | |
977 | | // The LoopHeader typically is just a branch if we skipped emitting |
978 | | // a branch, try to erase it. |
979 | 0 | if (!EmitBoolCondBranch) |
980 | 0 | SimplifyForwardingBlocks(LoopHeader.getBlock()); |
981 | 0 | } |
982 | | |
983 | | void CodeGenFunction::EmitDoStmt(const DoStmt &S, |
984 | 0 | ArrayRef<const Attr *> DoAttrs) { |
985 | 0 | JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); |
986 | 0 | JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); |
987 | |
|
988 | 0 | uint64_t ParentCount = getCurrentProfileCount(); |
989 | | |
990 | | // Store the blocks to use for break and continue. |
991 | 0 | BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); |
992 | | |
993 | | // Emit the body of the loop. |
994 | 0 | llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); |
995 | |
|
996 | 0 | EmitBlockWithFallThrough(LoopBody, &S); |
997 | 0 | { |
998 | 0 | RunCleanupsScope BodyScope(*this); |
999 | 0 | EmitStmt(S.getBody()); |
1000 | 0 | } |
1001 | |
|
1002 | 0 | EmitBlock(LoopCond.getBlock()); |
1003 | | |
1004 | | // C99 6.8.5.2: "The evaluation of the controlling expression takes place |
1005 | | // after each execution of the loop body." |
1006 | | |
1007 | | // Evaluate the conditional in the while header. |
1008 | | // C99 6.8.5p2/p4: The first substatement is executed if the expression |
1009 | | // compares unequal to 0. The condition must be a scalar type. |
1010 | 0 | llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); |
1011 | |
|
1012 | 0 | BreakContinueStack.pop_back(); |
1013 | | |
1014 | | // "do {} while (0)" is common in macros, avoid extra blocks. Be sure |
1015 | | // to correctly handle break/continue though. |
1016 | 0 | llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal); |
1017 | 0 | bool CondIsConstInt = C; |
1018 | 0 | bool EmitBoolCondBranch = !C || !C->isZero(); |
1019 | |
|
1020 | 0 | const SourceRange &R = S.getSourceRange(); |
1021 | 0 | LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs, |
1022 | 0 | SourceLocToDebugLoc(R.getBegin()), |
1023 | 0 | SourceLocToDebugLoc(R.getEnd()), |
1024 | 0 | checkIfLoopMustProgress(CondIsConstInt)); |
1025 | | |
1026 | | // As long as the condition is true, iterate the loop. |
1027 | 0 | if (EmitBoolCondBranch) { |
1028 | 0 | uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; |
1029 | 0 | Builder.CreateCondBr( |
1030 | 0 | BoolCondVal, LoopBody, LoopExit.getBlock(), |
1031 | 0 | createProfileWeightsForLoop(S.getCond(), BackedgeCount)); |
1032 | 0 | } |
1033 | |
|
1034 | 0 | LoopStack.pop(); |
1035 | | |
1036 | | // Emit the exit block. |
1037 | 0 | EmitBlock(LoopExit.getBlock()); |
1038 | | |
1039 | | // The DoCond block typically is just a branch if we skipped |
1040 | | // emitting a branch, try to erase it. |
1041 | 0 | if (!EmitBoolCondBranch) |
1042 | 0 | SimplifyForwardingBlocks(LoopCond.getBlock()); |
1043 | 0 | } |
1044 | | |
1045 | | void CodeGenFunction::EmitForStmt(const ForStmt &S, |
1046 | 0 | ArrayRef<const Attr *> ForAttrs) { |
1047 | 0 | JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); |
1048 | |
|
1049 | 0 | LexicalScope ForScope(*this, S.getSourceRange()); |
1050 | | |
1051 | | // Evaluate the first part before the loop. |
1052 | 0 | if (S.getInit()) |
1053 | 0 | EmitStmt(S.getInit()); |
1054 | | |
1055 | | // Start the loop with a block that tests the condition. |
1056 | | // If there's an increment, the continue scope will be overwritten |
1057 | | // later. |
1058 | 0 | JumpDest CondDest = getJumpDestInCurrentScope("for.cond"); |
1059 | 0 | llvm::BasicBlock *CondBlock = CondDest.getBlock(); |
1060 | 0 | EmitBlock(CondBlock); |
1061 | |
|
1062 | 0 | Expr::EvalResult Result; |
1063 | 0 | bool CondIsConstInt = |
1064 | 0 | !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext()); |
1065 | |
|
1066 | 0 | const SourceRange &R = S.getSourceRange(); |
1067 | 0 | LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, |
1068 | 0 | SourceLocToDebugLoc(R.getBegin()), |
1069 | 0 | SourceLocToDebugLoc(R.getEnd()), |
1070 | 0 | checkIfLoopMustProgress(CondIsConstInt)); |
1071 | | |
1072 | | // Create a cleanup scope for the condition variable cleanups. |
1073 | 0 | LexicalScope ConditionScope(*this, S.getSourceRange()); |
1074 | | |
1075 | | // If the for loop doesn't have an increment we can just use the condition as |
1076 | | // the continue block. Otherwise, if there is no condition variable, we can |
1077 | | // form the continue block now. If there is a condition variable, we can't |
1078 | | // form the continue block until after we've emitted the condition, because |
1079 | | // the condition is in scope in the increment, but Sema's jump diagnostics |
1080 | | // ensure that there are no continues from the condition variable that jump |
1081 | | // to the loop increment. |
1082 | 0 | JumpDest Continue; |
1083 | 0 | if (!S.getInc()) |
1084 | 0 | Continue = CondDest; |
1085 | 0 | else if (!S.getConditionVariable()) |
1086 | 0 | Continue = getJumpDestInCurrentScope("for.inc"); |
1087 | 0 | BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); |
1088 | |
|
1089 | 0 | if (S.getCond()) { |
1090 | | // If the for statement has a condition scope, emit the local variable |
1091 | | // declaration. |
1092 | 0 | if (S.getConditionVariable()) { |
1093 | 0 | EmitDecl(*S.getConditionVariable()); |
1094 | | |
1095 | | // We have entered the condition variable's scope, so we're now able to |
1096 | | // jump to the continue block. |
1097 | 0 | Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest; |
1098 | 0 | BreakContinueStack.back().ContinueBlock = Continue; |
1099 | 0 | } |
1100 | |
|
1101 | 0 | llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); |
1102 | | // If there are any cleanups between here and the loop-exit scope, |
1103 | | // create a block to stage a loop exit along. |
1104 | 0 | if (ForScope.requiresCleanups()) |
1105 | 0 | ExitBlock = createBasicBlock("for.cond.cleanup"); |
1106 | | |
1107 | | // As long as the condition is true, iterate the loop. |
1108 | 0 | llvm::BasicBlock *ForBody = createBasicBlock("for.body"); |
1109 | | |
1110 | | // C99 6.8.5p2/p4: The first substatement is executed if the expression |
1111 | | // compares unequal to 0. The condition must be a scalar type. |
1112 | 0 | llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); |
1113 | 0 | llvm::MDNode *Weights = |
1114 | 0 | createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); |
1115 | 0 | if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) |
1116 | 0 | BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( |
1117 | 0 | BoolCondVal, Stmt::getLikelihood(S.getBody())); |
1118 | |
|
1119 | 0 | Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); |
1120 | |
|
1121 | 0 | if (ExitBlock != LoopExit.getBlock()) { |
1122 | 0 | EmitBlock(ExitBlock); |
1123 | 0 | EmitBranchThroughCleanup(LoopExit); |
1124 | 0 | } |
1125 | |
|
1126 | 0 | EmitBlock(ForBody); |
1127 | 0 | } else { |
1128 | | // Treat it as a non-zero constant. Don't even create a new block for the |
1129 | | // body, just fall into it. |
1130 | 0 | } |
1131 | 0 | incrementProfileCounter(&S); |
1132 | |
|
1133 | 0 | { |
1134 | | // Create a separate cleanup scope for the body, in case it is not |
1135 | | // a compound statement. |
1136 | 0 | RunCleanupsScope BodyScope(*this); |
1137 | 0 | EmitStmt(S.getBody()); |
1138 | 0 | } |
1139 | | |
1140 | | // If there is an increment, emit it next. |
1141 | 0 | if (S.getInc()) { |
1142 | 0 | EmitBlock(Continue.getBlock()); |
1143 | 0 | EmitStmt(S.getInc()); |
1144 | 0 | } |
1145 | |
|
1146 | 0 | BreakContinueStack.pop_back(); |
1147 | |
|
1148 | 0 | ConditionScope.ForceCleanup(); |
1149 | |
|
1150 | 0 | EmitStopPoint(&S); |
1151 | 0 | EmitBranch(CondBlock); |
1152 | |
|
1153 | 0 | ForScope.ForceCleanup(); |
1154 | |
|
1155 | 0 | LoopStack.pop(); |
1156 | | |
1157 | | // Emit the fall-through block. |
1158 | 0 | EmitBlock(LoopExit.getBlock(), true); |
1159 | 0 | } |
1160 | | |
1161 | | void |
1162 | | CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, |
1163 | 0 | ArrayRef<const Attr *> ForAttrs) { |
1164 | 0 | JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); |
1165 | |
|
1166 | 0 | LexicalScope ForScope(*this, S.getSourceRange()); |
1167 | | |
1168 | | // Evaluate the first pieces before the loop. |
1169 | 0 | if (S.getInit()) |
1170 | 0 | EmitStmt(S.getInit()); |
1171 | 0 | EmitStmt(S.getRangeStmt()); |
1172 | 0 | EmitStmt(S.getBeginStmt()); |
1173 | 0 | EmitStmt(S.getEndStmt()); |
1174 | | |
1175 | | // Start the loop with a block that tests the condition. |
1176 | | // If there's an increment, the continue scope will be overwritten |
1177 | | // later. |
1178 | 0 | llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); |
1179 | 0 | EmitBlock(CondBlock); |
1180 | |
|
1181 | 0 | const SourceRange &R = S.getSourceRange(); |
1182 | 0 | LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs, |
1183 | 0 | SourceLocToDebugLoc(R.getBegin()), |
1184 | 0 | SourceLocToDebugLoc(R.getEnd())); |
1185 | | |
1186 | | // If there are any cleanups between here and the loop-exit scope, |
1187 | | // create a block to stage a loop exit along. |
1188 | 0 | llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); |
1189 | 0 | if (ForScope.requiresCleanups()) |
1190 | 0 | ExitBlock = createBasicBlock("for.cond.cleanup"); |
1191 | | |
1192 | | // The loop body, consisting of the specified body and the loop variable. |
1193 | 0 | llvm::BasicBlock *ForBody = createBasicBlock("for.body"); |
1194 | | |
1195 | | // The body is executed if the expression, contextually converted |
1196 | | // to bool, is true. |
1197 | 0 | llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); |
1198 | 0 | llvm::MDNode *Weights = |
1199 | 0 | createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())); |
1200 | 0 | if (!Weights && CGM.getCodeGenOpts().OptimizationLevel) |
1201 | 0 | BoolCondVal = emitCondLikelihoodViaExpectIntrinsic( |
1202 | 0 | BoolCondVal, Stmt::getLikelihood(S.getBody())); |
1203 | 0 | Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights); |
1204 | |
|
1205 | 0 | if (ExitBlock != LoopExit.getBlock()) { |
1206 | 0 | EmitBlock(ExitBlock); |
1207 | 0 | EmitBranchThroughCleanup(LoopExit); |
1208 | 0 | } |
1209 | |
|
1210 | 0 | EmitBlock(ForBody); |
1211 | 0 | incrementProfileCounter(&S); |
1212 | | |
1213 | | // Create a block for the increment. In case of a 'continue', we jump there. |
1214 | 0 | JumpDest Continue = getJumpDestInCurrentScope("for.inc"); |
1215 | | |
1216 | | // Store the blocks to use for break and continue. |
1217 | 0 | BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); |
1218 | |
|
1219 | 0 | { |
1220 | | // Create a separate cleanup scope for the loop variable and body. |
1221 | 0 | LexicalScope BodyScope(*this, S.getSourceRange()); |
1222 | 0 | EmitStmt(S.getLoopVarStmt()); |
1223 | 0 | EmitStmt(S.getBody()); |
1224 | 0 | } |
1225 | |
|
1226 | 0 | EmitStopPoint(&S); |
1227 | | // If there is an increment, emit it next. |
1228 | 0 | EmitBlock(Continue.getBlock()); |
1229 | 0 | EmitStmt(S.getInc()); |
1230 | |
|
1231 | 0 | BreakContinueStack.pop_back(); |
1232 | |
|
1233 | 0 | EmitBranch(CondBlock); |
1234 | |
|
1235 | 0 | ForScope.ForceCleanup(); |
1236 | |
|
1237 | 0 | LoopStack.pop(); |
1238 | | |
1239 | | // Emit the fall-through block. |
1240 | 0 | EmitBlock(LoopExit.getBlock(), true); |
1241 | 0 | } |
1242 | | |
1243 | 0 | void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { |
1244 | 0 | if (RV.isScalar()) { |
1245 | 0 | Builder.CreateStore(RV.getScalarVal(), ReturnValue); |
1246 | 0 | } else if (RV.isAggregate()) { |
1247 | 0 | LValue Dest = MakeAddrLValue(ReturnValue, Ty); |
1248 | 0 | LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty); |
1249 | 0 | EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); |
1250 | 0 | } else { |
1251 | 0 | EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty), |
1252 | 0 | /*init*/ true); |
1253 | 0 | } |
1254 | 0 | EmitBranchThroughCleanup(ReturnBlock); |
1255 | 0 | } |
1256 | | |
1257 | | namespace { |
1258 | | // RAII struct used to save and restore a return statment's result expression. |
1259 | | struct SaveRetExprRAII { |
1260 | | SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF) |
1261 | 0 | : OldRetExpr(CGF.RetExpr), CGF(CGF) { |
1262 | 0 | CGF.RetExpr = RetExpr; |
1263 | 0 | } |
1264 | 0 | ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; } |
1265 | | const Expr *OldRetExpr; |
1266 | | CodeGenFunction &CGF; |
1267 | | }; |
1268 | | } // namespace |
1269 | | |
1270 | | /// If we have 'return f(...);', where both caller and callee are SwiftAsync, |
1271 | | /// codegen it as 'tail call ...; ret void;'. |
1272 | | static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder, |
1273 | 0 | const CGFunctionInfo *CurFnInfo) { |
1274 | 0 | auto calleeQualType = CE->getCallee()->getType(); |
1275 | 0 | const FunctionType *calleeType = nullptr; |
1276 | 0 | if (calleeQualType->isFunctionPointerType() || |
1277 | 0 | calleeQualType->isFunctionReferenceType() || |
1278 | 0 | calleeQualType->isBlockPointerType() || |
1279 | 0 | calleeQualType->isMemberFunctionPointerType()) { |
1280 | 0 | calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>(); |
1281 | 0 | } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) { |
1282 | 0 | calleeType = ty; |
1283 | 0 | } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) { |
1284 | 0 | if (auto methodDecl = CMCE->getMethodDecl()) { |
1285 | | // getMethodDecl() doesn't handle member pointers at the moment. |
1286 | 0 | calleeType = methodDecl->getType()->castAs<FunctionType>(); |
1287 | 0 | } else { |
1288 | 0 | return; |
1289 | 0 | } |
1290 | 0 | } else { |
1291 | 0 | return; |
1292 | 0 | } |
1293 | 0 | if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync && |
1294 | 0 | (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) { |
1295 | 0 | auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back()); |
1296 | 0 | CI->setTailCallKind(llvm::CallInst::TCK_MustTail); |
1297 | 0 | Builder.CreateRetVoid(); |
1298 | 0 | Builder.ClearInsertionPoint(); |
1299 | 0 | } |
1300 | 0 | } |
1301 | | |
1302 | | /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand |
1303 | | /// if the function returns void, or may be missing one if the function returns |
1304 | | /// non-void. Fun stuff :). |
1305 | 0 | void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { |
1306 | 0 | if (requiresReturnValueCheck()) { |
1307 | 0 | llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc()); |
1308 | 0 | auto *SLocPtr = |
1309 | 0 | new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false, |
1310 | 0 | llvm::GlobalVariable::PrivateLinkage, SLoc); |
1311 | 0 | SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
1312 | 0 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr); |
1313 | 0 | assert(ReturnLocation.isValid() && "No valid return location"); |
1314 | 0 | Builder.CreateStore(SLocPtr, ReturnLocation); |
1315 | 0 | } |
1316 | | |
1317 | | // Returning from an outlined SEH helper is UB, and we already warn on it. |
1318 | 0 | if (IsOutlinedSEHHelper) { |
1319 | 0 | Builder.CreateUnreachable(); |
1320 | 0 | Builder.ClearInsertionPoint(); |
1321 | 0 | } |
1322 | | |
1323 | | // Emit the result value, even if unused, to evaluate the side effects. |
1324 | 0 | const Expr *RV = S.getRetValue(); |
1325 | | |
1326 | | // Record the result expression of the return statement. The recorded |
1327 | | // expression is used to determine whether a block capture's lifetime should |
1328 | | // end at the end of the full expression as opposed to the end of the scope |
1329 | | // enclosing the block expression. |
1330 | | // |
1331 | | // This permits a small, easily-implemented exception to our over-conservative |
1332 | | // rules about not jumping to statements following block literals with |
1333 | | // non-trivial cleanups. |
1334 | 0 | SaveRetExprRAII SaveRetExpr(RV, *this); |
1335 | |
|
1336 | 0 | RunCleanupsScope cleanupScope(*this); |
1337 | 0 | if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV)) |
1338 | 0 | RV = EWC->getSubExpr(); |
1339 | | // FIXME: Clean this up by using an LValue for ReturnTemp, |
1340 | | // EmitStoreThroughLValue, and EmitAnyExpr. |
1341 | | // Check if the NRVO candidate was not globalized in OpenMP mode. |
1342 | 0 | if (getLangOpts().ElideConstructors && S.getNRVOCandidate() && |
1343 | 0 | S.getNRVOCandidate()->isNRVOVariable() && |
1344 | 0 | (!getLangOpts().OpenMP || |
1345 | 0 | !CGM.getOpenMPRuntime() |
1346 | 0 | .getAddressOfLocalVariable(*this, S.getNRVOCandidate()) |
1347 | 0 | .isValid())) { |
1348 | | // Apply the named return value optimization for this return statement, |
1349 | | // which means doing nothing: the appropriate result has already been |
1350 | | // constructed into the NRVO variable. |
1351 | | |
1352 | | // If there is an NRVO flag for this variable, set it to 1 into indicate |
1353 | | // that the cleanup code should not destroy the variable. |
1354 | 0 | if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) |
1355 | 0 | Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag); |
1356 | 0 | } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) { |
1357 | | // Make sure not to return anything, but evaluate the expression |
1358 | | // for side effects. |
1359 | 0 | if (RV) { |
1360 | 0 | EmitAnyExpr(RV); |
1361 | 0 | if (auto *CE = dyn_cast<CallExpr>(RV)) |
1362 | 0 | makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo); |
1363 | 0 | } |
1364 | 0 | } else if (!RV) { |
1365 | | // Do nothing (return value is left uninitialized) |
1366 | 0 | } else if (FnRetTy->isReferenceType()) { |
1367 | | // If this function returns a reference, take the address of the expression |
1368 | | // rather than the value. |
1369 | 0 | RValue Result = EmitReferenceBindingToExpr(RV); |
1370 | 0 | Builder.CreateStore(Result.getScalarVal(), ReturnValue); |
1371 | 0 | } else { |
1372 | 0 | switch (getEvaluationKind(RV->getType())) { |
1373 | 0 | case TEK_Scalar: |
1374 | 0 | Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); |
1375 | 0 | break; |
1376 | 0 | case TEK_Complex: |
1377 | 0 | EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()), |
1378 | 0 | /*isInit*/ true); |
1379 | 0 | break; |
1380 | 0 | case TEK_Aggregate: |
1381 | 0 | EmitAggExpr(RV, AggValueSlot::forAddr( |
1382 | 0 | ReturnValue, Qualifiers(), |
1383 | 0 | AggValueSlot::IsDestructed, |
1384 | 0 | AggValueSlot::DoesNotNeedGCBarriers, |
1385 | 0 | AggValueSlot::IsNotAliased, |
1386 | 0 | getOverlapForReturnValue())); |
1387 | 0 | break; |
1388 | 0 | } |
1389 | 0 | } |
1390 | | |
1391 | 0 | ++NumReturnExprs; |
1392 | 0 | if (!RV || RV->isEvaluatable(getContext())) |
1393 | 0 | ++NumSimpleReturnExprs; |
1394 | |
|
1395 | 0 | cleanupScope.ForceCleanup(); |
1396 | 0 | EmitBranchThroughCleanup(ReturnBlock); |
1397 | 0 | } |
1398 | | |
1399 | 0 | void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { |
1400 | | // As long as debug info is modeled with instructions, we have to ensure we |
1401 | | // have a place to insert here and write the stop point here. |
1402 | 0 | if (HaveInsertPoint()) |
1403 | 0 | EmitStopPoint(&S); |
1404 | |
|
1405 | 0 | for (const auto *I : S.decls()) |
1406 | 0 | EmitDecl(*I); |
1407 | 0 | } |
1408 | | |
1409 | 0 | void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { |
1410 | 0 | assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); |
1411 | | |
1412 | | // If this code is reachable then emit a stop point (if generating |
1413 | | // debug info). We have to do this ourselves because we are on the |
1414 | | // "simple" statement path. |
1415 | 0 | if (HaveInsertPoint()) |
1416 | 0 | EmitStopPoint(&S); |
1417 | |
|
1418 | 0 | EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); |
1419 | 0 | } |
1420 | | |
1421 | 0 | void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { |
1422 | 0 | assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); |
1423 | | |
1424 | | // If this code is reachable then emit a stop point (if generating |
1425 | | // debug info). We have to do this ourselves because we are on the |
1426 | | // "simple" statement path. |
1427 | 0 | if (HaveInsertPoint()) |
1428 | 0 | EmitStopPoint(&S); |
1429 | |
|
1430 | 0 | EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); |
1431 | 0 | } |
1432 | | |
1433 | | /// EmitCaseStmtRange - If case statement range is not too big then |
1434 | | /// add multiple cases to switch instruction, one for each value within |
1435 | | /// the range. If range is too big then emit "if" condition check. |
1436 | | void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S, |
1437 | 0 | ArrayRef<const Attr *> Attrs) { |
1438 | 0 | assert(S.getRHS() && "Expected RHS value in CaseStmt"); |
1439 | | |
1440 | 0 | llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); |
1441 | 0 | llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); |
1442 | | |
1443 | | // Emit the code for this case. We do this first to make sure it is |
1444 | | // properly chained from our predecessor before generating the |
1445 | | // switch machinery to enter this block. |
1446 | 0 | llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); |
1447 | 0 | EmitBlockWithFallThrough(CaseDest, &S); |
1448 | 0 | EmitStmt(S.getSubStmt()); |
1449 | | |
1450 | | // If range is empty, do nothing. |
1451 | 0 | if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) |
1452 | 0 | return; |
1453 | | |
1454 | 0 | Stmt::Likelihood LH = Stmt::getLikelihood(Attrs); |
1455 | 0 | llvm::APInt Range = RHS - LHS; |
1456 | | // FIXME: parameters such as this should not be hardcoded. |
1457 | 0 | if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { |
1458 | | // Range is small enough to add multiple switch instruction cases. |
1459 | 0 | uint64_t Total = getProfileCount(&S); |
1460 | 0 | unsigned NCases = Range.getZExtValue() + 1; |
1461 | | // We only have one region counter for the entire set of cases here, so we |
1462 | | // need to divide the weights evenly between the generated cases, ensuring |
1463 | | // that the total weight is preserved. E.g., a weight of 5 over three cases |
1464 | | // will be distributed as weights of 2, 2, and 1. |
1465 | 0 | uint64_t Weight = Total / NCases, Rem = Total % NCases; |
1466 | 0 | for (unsigned I = 0; I != NCases; ++I) { |
1467 | 0 | if (SwitchWeights) |
1468 | 0 | SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); |
1469 | 0 | else if (SwitchLikelihood) |
1470 | 0 | SwitchLikelihood->push_back(LH); |
1471 | |
|
1472 | 0 | if (Rem) |
1473 | 0 | Rem--; |
1474 | 0 | SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); |
1475 | 0 | ++LHS; |
1476 | 0 | } |
1477 | 0 | return; |
1478 | 0 | } |
1479 | | |
1480 | | // The range is too big. Emit "if" condition into a new block, |
1481 | | // making sure to save and restore the current insertion point. |
1482 | 0 | llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); |
1483 | | |
1484 | | // Push this test onto the chain of range checks (which terminates |
1485 | | // in the default basic block). The switch's default will be changed |
1486 | | // to the top of this chain after switch emission is complete. |
1487 | 0 | llvm::BasicBlock *FalseDest = CaseRangeBlock; |
1488 | 0 | CaseRangeBlock = createBasicBlock("sw.caserange"); |
1489 | |
|
1490 | 0 | CurFn->insert(CurFn->end(), CaseRangeBlock); |
1491 | 0 | Builder.SetInsertPoint(CaseRangeBlock); |
1492 | | |
1493 | | // Emit range check. |
1494 | 0 | llvm::Value *Diff = |
1495 | 0 | Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); |
1496 | 0 | llvm::Value *Cond = |
1497 | 0 | Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); |
1498 | |
|
1499 | 0 | llvm::MDNode *Weights = nullptr; |
1500 | 0 | if (SwitchWeights) { |
1501 | 0 | uint64_t ThisCount = getProfileCount(&S); |
1502 | 0 | uint64_t DefaultCount = (*SwitchWeights)[0]; |
1503 | 0 | Weights = createProfileWeights(ThisCount, DefaultCount); |
1504 | | |
1505 | | // Since we're chaining the switch default through each large case range, we |
1506 | | // need to update the weight for the default, ie, the first case, to include |
1507 | | // this case. |
1508 | 0 | (*SwitchWeights)[0] += ThisCount; |
1509 | 0 | } else if (SwitchLikelihood) |
1510 | 0 | Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH); |
1511 | |
|
1512 | 0 | Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); |
1513 | | |
1514 | | // Restore the appropriate insertion point. |
1515 | 0 | if (RestoreBB) |
1516 | 0 | Builder.SetInsertPoint(RestoreBB); |
1517 | 0 | else |
1518 | 0 | Builder.ClearInsertionPoint(); |
1519 | 0 | } |
1520 | | |
1521 | | void CodeGenFunction::EmitCaseStmt(const CaseStmt &S, |
1522 | 0 | ArrayRef<const Attr *> Attrs) { |
1523 | | // If there is no enclosing switch instance that we're aware of, then this |
1524 | | // case statement and its block can be elided. This situation only happens |
1525 | | // when we've constant-folded the switch, are emitting the constant case, |
1526 | | // and part of the constant case includes another case statement. For |
1527 | | // instance: switch (4) { case 4: do { case 5: } while (1); } |
1528 | 0 | if (!SwitchInsn) { |
1529 | 0 | EmitStmt(S.getSubStmt()); |
1530 | 0 | return; |
1531 | 0 | } |
1532 | | |
1533 | | // Handle case ranges. |
1534 | 0 | if (S.getRHS()) { |
1535 | 0 | EmitCaseStmtRange(S, Attrs); |
1536 | 0 | return; |
1537 | 0 | } |
1538 | | |
1539 | 0 | llvm::ConstantInt *CaseVal = |
1540 | 0 | Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); |
1541 | | |
1542 | | // Emit debuginfo for the case value if it is an enum value. |
1543 | 0 | const ConstantExpr *CE; |
1544 | 0 | if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS())) |
1545 | 0 | CE = dyn_cast<ConstantExpr>(ICE->getSubExpr()); |
1546 | 0 | else |
1547 | 0 | CE = dyn_cast<ConstantExpr>(S.getLHS()); |
1548 | 0 | if (CE) { |
1549 | 0 | if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) |
1550 | 0 | if (CGDebugInfo *Dbg = getDebugInfo()) |
1551 | 0 | if (CGM.getCodeGenOpts().hasReducedDebugInfo()) |
1552 | 0 | Dbg->EmitGlobalVariable(DE->getDecl(), |
1553 | 0 | APValue(llvm::APSInt(CaseVal->getValue()))); |
1554 | 0 | } |
1555 | |
|
1556 | 0 | if (SwitchLikelihood) |
1557 | 0 | SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs)); |
1558 | | |
1559 | | // If the body of the case is just a 'break', try to not emit an empty block. |
1560 | | // If we're profiling or we're not optimizing, leave the block in for better |
1561 | | // debug and coverage analysis. |
1562 | 0 | if (!CGM.getCodeGenOpts().hasProfileClangInstr() && |
1563 | 0 | CGM.getCodeGenOpts().OptimizationLevel > 0 && |
1564 | 0 | isa<BreakStmt>(S.getSubStmt())) { |
1565 | 0 | JumpDest Block = BreakContinueStack.back().BreakBlock; |
1566 | | |
1567 | | // Only do this optimization if there are no cleanups that need emitting. |
1568 | 0 | if (isObviouslyBranchWithoutCleanups(Block)) { |
1569 | 0 | if (SwitchWeights) |
1570 | 0 | SwitchWeights->push_back(getProfileCount(&S)); |
1571 | 0 | SwitchInsn->addCase(CaseVal, Block.getBlock()); |
1572 | | |
1573 | | // If there was a fallthrough into this case, make sure to redirect it to |
1574 | | // the end of the switch as well. |
1575 | 0 | if (Builder.GetInsertBlock()) { |
1576 | 0 | Builder.CreateBr(Block.getBlock()); |
1577 | 0 | Builder.ClearInsertionPoint(); |
1578 | 0 | } |
1579 | 0 | return; |
1580 | 0 | } |
1581 | 0 | } |
1582 | | |
1583 | 0 | llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); |
1584 | 0 | EmitBlockWithFallThrough(CaseDest, &S); |
1585 | 0 | if (SwitchWeights) |
1586 | 0 | SwitchWeights->push_back(getProfileCount(&S)); |
1587 | 0 | SwitchInsn->addCase(CaseVal, CaseDest); |
1588 | | |
1589 | | // Recursively emitting the statement is acceptable, but is not wonderful for |
1590 | | // code where we have many case statements nested together, i.e.: |
1591 | | // case 1: |
1592 | | // case 2: |
1593 | | // case 3: etc. |
1594 | | // Handling this recursively will create a new block for each case statement |
1595 | | // that falls through to the next case which is IR intensive. It also causes |
1596 | | // deep recursion which can run into stack depth limitations. Handle |
1597 | | // sequential non-range case statements specially. |
1598 | | // |
1599 | | // TODO When the next case has a likelihood attribute the code returns to the |
1600 | | // recursive algorithm. Maybe improve this case if it becomes common practice |
1601 | | // to use a lot of attributes. |
1602 | 0 | const CaseStmt *CurCase = &S; |
1603 | 0 | const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); |
1604 | | |
1605 | | // Otherwise, iteratively add consecutive cases to this switch stmt. |
1606 | 0 | while (NextCase && NextCase->getRHS() == nullptr) { |
1607 | 0 | CurCase = NextCase; |
1608 | 0 | llvm::ConstantInt *CaseVal = |
1609 | 0 | Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); |
1610 | |
|
1611 | 0 | if (SwitchWeights) |
1612 | 0 | SwitchWeights->push_back(getProfileCount(NextCase)); |
1613 | 0 | if (CGM.getCodeGenOpts().hasProfileClangInstr()) { |
1614 | 0 | CaseDest = createBasicBlock("sw.bb"); |
1615 | 0 | EmitBlockWithFallThrough(CaseDest, CurCase); |
1616 | 0 | } |
1617 | | // Since this loop is only executed when the CaseStmt has no attributes |
1618 | | // use a hard-coded value. |
1619 | 0 | if (SwitchLikelihood) |
1620 | 0 | SwitchLikelihood->push_back(Stmt::LH_None); |
1621 | |
|
1622 | 0 | SwitchInsn->addCase(CaseVal, CaseDest); |
1623 | 0 | NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); |
1624 | 0 | } |
1625 | | |
1626 | | // Generate a stop point for debug info if the case statement is |
1627 | | // followed by a default statement. A fallthrough case before a |
1628 | | // default case gets its own branch target. |
1629 | 0 | if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass) |
1630 | 0 | EmitStopPoint(CurCase); |
1631 | | |
1632 | | // Normal default recursion for non-cases. |
1633 | 0 | EmitStmt(CurCase->getSubStmt()); |
1634 | 0 | } |
1635 | | |
1636 | | void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S, |
1637 | 0 | ArrayRef<const Attr *> Attrs) { |
1638 | | // If there is no enclosing switch instance that we're aware of, then this |
1639 | | // default statement can be elided. This situation only happens when we've |
1640 | | // constant-folded the switch. |
1641 | 0 | if (!SwitchInsn) { |
1642 | 0 | EmitStmt(S.getSubStmt()); |
1643 | 0 | return; |
1644 | 0 | } |
1645 | | |
1646 | 0 | llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); |
1647 | 0 | assert(DefaultBlock->empty() && |
1648 | 0 | "EmitDefaultStmt: Default block already defined?"); |
1649 | | |
1650 | 0 | if (SwitchLikelihood) |
1651 | 0 | SwitchLikelihood->front() = Stmt::getLikelihood(Attrs); |
1652 | |
|
1653 | 0 | EmitBlockWithFallThrough(DefaultBlock, &S); |
1654 | |
|
1655 | 0 | EmitStmt(S.getSubStmt()); |
1656 | 0 | } |
1657 | | |
1658 | | /// CollectStatementsForCase - Given the body of a 'switch' statement and a |
1659 | | /// constant value that is being switched on, see if we can dead code eliminate |
1660 | | /// the body of the switch to a simple series of statements to emit. Basically, |
1661 | | /// on a switch (5) we want to find these statements: |
1662 | | /// case 5: |
1663 | | /// printf(...); <-- |
1664 | | /// ++i; <-- |
1665 | | /// break; |
1666 | | /// |
1667 | | /// and add them to the ResultStmts vector. If it is unsafe to do this |
1668 | | /// transformation (for example, one of the elided statements contains a label |
1669 | | /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' |
1670 | | /// should include statements after it (e.g. the printf() line is a substmt of |
1671 | | /// the case) then return CSFC_FallThrough. If we handled it and found a break |
1672 | | /// statement, then return CSFC_Success. |
1673 | | /// |
1674 | | /// If Case is non-null, then we are looking for the specified case, checking |
1675 | | /// that nothing we jump over contains labels. If Case is null, then we found |
1676 | | /// the case and are looking for the break. |
1677 | | /// |
1678 | | /// If the recursive walk actually finds our Case, then we set FoundCase to |
1679 | | /// true. |
1680 | | /// |
1681 | | enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; |
1682 | | static CSFC_Result CollectStatementsForCase(const Stmt *S, |
1683 | | const SwitchCase *Case, |
1684 | | bool &FoundCase, |
1685 | 0 | SmallVectorImpl<const Stmt*> &ResultStmts) { |
1686 | | // If this is a null statement, just succeed. |
1687 | 0 | if (!S) |
1688 | 0 | return Case ? CSFC_Success : CSFC_FallThrough; |
1689 | | |
1690 | | // If this is the switchcase (case 4: or default) that we're looking for, then |
1691 | | // we're in business. Just add the substatement. |
1692 | 0 | if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { |
1693 | 0 | if (S == Case) { |
1694 | 0 | FoundCase = true; |
1695 | 0 | return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, |
1696 | 0 | ResultStmts); |
1697 | 0 | } |
1698 | | |
1699 | | // Otherwise, this is some other case or default statement, just ignore it. |
1700 | 0 | return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, |
1701 | 0 | ResultStmts); |
1702 | 0 | } |
1703 | | |
1704 | | // If we are in the live part of the code and we found our break statement, |
1705 | | // return a success! |
1706 | 0 | if (!Case && isa<BreakStmt>(S)) |
1707 | 0 | return CSFC_Success; |
1708 | | |
1709 | | // If this is a switch statement, then it might contain the SwitchCase, the |
1710 | | // break, or neither. |
1711 | 0 | if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { |
1712 | | // Handle this as two cases: we might be looking for the SwitchCase (if so |
1713 | | // the skipped statements must be skippable) or we might already have it. |
1714 | 0 | CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); |
1715 | 0 | bool StartedInLiveCode = FoundCase; |
1716 | 0 | unsigned StartSize = ResultStmts.size(); |
1717 | | |
1718 | | // If we've not found the case yet, scan through looking for it. |
1719 | 0 | if (Case) { |
1720 | | // Keep track of whether we see a skipped declaration. The code could be |
1721 | | // using the declaration even if it is skipped, so we can't optimize out |
1722 | | // the decl if the kept statements might refer to it. |
1723 | 0 | bool HadSkippedDecl = false; |
1724 | | |
1725 | | // If we're looking for the case, just see if we can skip each of the |
1726 | | // substatements. |
1727 | 0 | for (; Case && I != E; ++I) { |
1728 | 0 | HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I); |
1729 | |
|
1730 | 0 | switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { |
1731 | 0 | case CSFC_Failure: return CSFC_Failure; |
1732 | 0 | case CSFC_Success: |
1733 | | // A successful result means that either 1) that the statement doesn't |
1734 | | // have the case and is skippable, or 2) does contain the case value |
1735 | | // and also contains the break to exit the switch. In the later case, |
1736 | | // we just verify the rest of the statements are elidable. |
1737 | 0 | if (FoundCase) { |
1738 | | // If we found the case and skipped declarations, we can't do the |
1739 | | // optimization. |
1740 | 0 | if (HadSkippedDecl) |
1741 | 0 | return CSFC_Failure; |
1742 | | |
1743 | 0 | for (++I; I != E; ++I) |
1744 | 0 | if (CodeGenFunction::ContainsLabel(*I, true)) |
1745 | 0 | return CSFC_Failure; |
1746 | 0 | return CSFC_Success; |
1747 | 0 | } |
1748 | 0 | break; |
1749 | 0 | case CSFC_FallThrough: |
1750 | | // If we have a fallthrough condition, then we must have found the |
1751 | | // case started to include statements. Consider the rest of the |
1752 | | // statements in the compound statement as candidates for inclusion. |
1753 | 0 | assert(FoundCase && "Didn't find case but returned fallthrough?"); |
1754 | | // We recursively found Case, so we're not looking for it anymore. |
1755 | 0 | Case = nullptr; |
1756 | | |
1757 | | // If we found the case and skipped declarations, we can't do the |
1758 | | // optimization. |
1759 | 0 | if (HadSkippedDecl) |
1760 | 0 | return CSFC_Failure; |
1761 | 0 | break; |
1762 | 0 | } |
1763 | 0 | } |
1764 | | |
1765 | 0 | if (!FoundCase) |
1766 | 0 | return CSFC_Success; |
1767 | | |
1768 | 0 | assert(!HadSkippedDecl && "fallthrough after skipping decl"); |
1769 | 0 | } |
1770 | | |
1771 | | // If we have statements in our range, then we know that the statements are |
1772 | | // live and need to be added to the set of statements we're tracking. |
1773 | 0 | bool AnyDecls = false; |
1774 | 0 | for (; I != E; ++I) { |
1775 | 0 | AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I); |
1776 | |
|
1777 | 0 | switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { |
1778 | 0 | case CSFC_Failure: return CSFC_Failure; |
1779 | 0 | case CSFC_FallThrough: |
1780 | | // A fallthrough result means that the statement was simple and just |
1781 | | // included in ResultStmt, keep adding them afterwards. |
1782 | 0 | break; |
1783 | 0 | case CSFC_Success: |
1784 | | // A successful result means that we found the break statement and |
1785 | | // stopped statement inclusion. We just ensure that any leftover stmts |
1786 | | // are skippable and return success ourselves. |
1787 | 0 | for (++I; I != E; ++I) |
1788 | 0 | if (CodeGenFunction::ContainsLabel(*I, true)) |
1789 | 0 | return CSFC_Failure; |
1790 | 0 | return CSFC_Success; |
1791 | 0 | } |
1792 | 0 | } |
1793 | | |
1794 | | // If we're about to fall out of a scope without hitting a 'break;', we |
1795 | | // can't perform the optimization if there were any decls in that scope |
1796 | | // (we'd lose their end-of-lifetime). |
1797 | 0 | if (AnyDecls) { |
1798 | | // If the entire compound statement was live, there's one more thing we |
1799 | | // can try before giving up: emit the whole thing as a single statement. |
1800 | | // We can do that unless the statement contains a 'break;'. |
1801 | | // FIXME: Such a break must be at the end of a construct within this one. |
1802 | | // We could emit this by just ignoring the BreakStmts entirely. |
1803 | 0 | if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) { |
1804 | 0 | ResultStmts.resize(StartSize); |
1805 | 0 | ResultStmts.push_back(S); |
1806 | 0 | } else { |
1807 | 0 | return CSFC_Failure; |
1808 | 0 | } |
1809 | 0 | } |
1810 | | |
1811 | 0 | return CSFC_FallThrough; |
1812 | 0 | } |
1813 | | |
1814 | | // Okay, this is some other statement that we don't handle explicitly, like a |
1815 | | // for statement or increment etc. If we are skipping over this statement, |
1816 | | // just verify it doesn't have labels, which would make it invalid to elide. |
1817 | 0 | if (Case) { |
1818 | 0 | if (CodeGenFunction::ContainsLabel(S, true)) |
1819 | 0 | return CSFC_Failure; |
1820 | 0 | return CSFC_Success; |
1821 | 0 | } |
1822 | | |
1823 | | // Otherwise, we want to include this statement. Everything is cool with that |
1824 | | // so long as it doesn't contain a break out of the switch we're in. |
1825 | 0 | if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; |
1826 | | |
1827 | | // Otherwise, everything is great. Include the statement and tell the caller |
1828 | | // that we fall through and include the next statement as well. |
1829 | 0 | ResultStmts.push_back(S); |
1830 | 0 | return CSFC_FallThrough; |
1831 | 0 | } |
1832 | | |
1833 | | /// FindCaseStatementsForValue - Find the case statement being jumped to and |
1834 | | /// then invoke CollectStatementsForCase to find the list of statements to emit |
1835 | | /// for a switch on constant. See the comment above CollectStatementsForCase |
1836 | | /// for more details. |
1837 | | static bool FindCaseStatementsForValue(const SwitchStmt &S, |
1838 | | const llvm::APSInt &ConstantCondValue, |
1839 | | SmallVectorImpl<const Stmt*> &ResultStmts, |
1840 | | ASTContext &C, |
1841 | 0 | const SwitchCase *&ResultCase) { |
1842 | | // First step, find the switch case that is being branched to. We can do this |
1843 | | // efficiently by scanning the SwitchCase list. |
1844 | 0 | const SwitchCase *Case = S.getSwitchCaseList(); |
1845 | 0 | const DefaultStmt *DefaultCase = nullptr; |
1846 | |
|
1847 | 0 | for (; Case; Case = Case->getNextSwitchCase()) { |
1848 | | // It's either a default or case. Just remember the default statement in |
1849 | | // case we're not jumping to any numbered cases. |
1850 | 0 | if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { |
1851 | 0 | DefaultCase = DS; |
1852 | 0 | continue; |
1853 | 0 | } |
1854 | | |
1855 | | // Check to see if this case is the one we're looking for. |
1856 | 0 | const CaseStmt *CS = cast<CaseStmt>(Case); |
1857 | | // Don't handle case ranges yet. |
1858 | 0 | if (CS->getRHS()) return false; |
1859 | | |
1860 | | // If we found our case, remember it as 'case'. |
1861 | 0 | if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) |
1862 | 0 | break; |
1863 | 0 | } |
1864 | | |
1865 | | // If we didn't find a matching case, we use a default if it exists, or we |
1866 | | // elide the whole switch body! |
1867 | 0 | if (!Case) { |
1868 | | // It is safe to elide the body of the switch if it doesn't contain labels |
1869 | | // etc. If it is safe, return successfully with an empty ResultStmts list. |
1870 | 0 | if (!DefaultCase) |
1871 | 0 | return !CodeGenFunction::ContainsLabel(&S); |
1872 | 0 | Case = DefaultCase; |
1873 | 0 | } |
1874 | | |
1875 | | // Ok, we know which case is being jumped to, try to collect all the |
1876 | | // statements that follow it. This can fail for a variety of reasons. Also, |
1877 | | // check to see that the recursive walk actually found our case statement. |
1878 | | // Insane cases like this can fail to find it in the recursive walk since we |
1879 | | // don't handle every stmt kind: |
1880 | | // switch (4) { |
1881 | | // while (1) { |
1882 | | // case 4: ... |
1883 | 0 | bool FoundCase = false; |
1884 | 0 | ResultCase = Case; |
1885 | 0 | return CollectStatementsForCase(S.getBody(), Case, FoundCase, |
1886 | 0 | ResultStmts) != CSFC_Failure && |
1887 | 0 | FoundCase; |
1888 | 0 | } |
1889 | | |
1890 | | static std::optional<SmallVector<uint64_t, 16>> |
1891 | 0 | getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) { |
1892 | | // Are there enough branches to weight them? |
1893 | 0 | if (Likelihoods.size() <= 1) |
1894 | 0 | return std::nullopt; |
1895 | | |
1896 | 0 | uint64_t NumUnlikely = 0; |
1897 | 0 | uint64_t NumNone = 0; |
1898 | 0 | uint64_t NumLikely = 0; |
1899 | 0 | for (const auto LH : Likelihoods) { |
1900 | 0 | switch (LH) { |
1901 | 0 | case Stmt::LH_Unlikely: |
1902 | 0 | ++NumUnlikely; |
1903 | 0 | break; |
1904 | 0 | case Stmt::LH_None: |
1905 | 0 | ++NumNone; |
1906 | 0 | break; |
1907 | 0 | case Stmt::LH_Likely: |
1908 | 0 | ++NumLikely; |
1909 | 0 | break; |
1910 | 0 | } |
1911 | 0 | } |
1912 | | |
1913 | | // Is there a likelihood attribute used? |
1914 | 0 | if (NumUnlikely == 0 && NumLikely == 0) |
1915 | 0 | return std::nullopt; |
1916 | | |
1917 | | // When multiple cases share the same code they can be combined during |
1918 | | // optimization. In that case the weights of the branch will be the sum of |
1919 | | // the individual weights. Make sure the combined sum of all neutral cases |
1920 | | // doesn't exceed the value of a single likely attribute. |
1921 | | // The additions both avoid divisions by 0 and make sure the weights of None |
1922 | | // don't exceed the weight of Likely. |
1923 | 0 | const uint64_t Likely = INT32_MAX / (NumLikely + 2); |
1924 | 0 | const uint64_t None = Likely / (NumNone + 1); |
1925 | 0 | const uint64_t Unlikely = 0; |
1926 | |
|
1927 | 0 | SmallVector<uint64_t, 16> Result; |
1928 | 0 | Result.reserve(Likelihoods.size()); |
1929 | 0 | for (const auto LH : Likelihoods) { |
1930 | 0 | switch (LH) { |
1931 | 0 | case Stmt::LH_Unlikely: |
1932 | 0 | Result.push_back(Unlikely); |
1933 | 0 | break; |
1934 | 0 | case Stmt::LH_None: |
1935 | 0 | Result.push_back(None); |
1936 | 0 | break; |
1937 | 0 | case Stmt::LH_Likely: |
1938 | 0 | Result.push_back(Likely); |
1939 | 0 | break; |
1940 | 0 | } |
1941 | 0 | } |
1942 | | |
1943 | 0 | return Result; |
1944 | 0 | } |
1945 | | |
1946 | 0 | void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { |
1947 | | // Handle nested switch statements. |
1948 | 0 | llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; |
1949 | 0 | SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; |
1950 | 0 | SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood; |
1951 | 0 | llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; |
1952 | | |
1953 | | // See if we can constant fold the condition of the switch and therefore only |
1954 | | // emit the live case statement (if any) of the switch. |
1955 | 0 | llvm::APSInt ConstantCondValue; |
1956 | 0 | if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { |
1957 | 0 | SmallVector<const Stmt*, 4> CaseStmts; |
1958 | 0 | const SwitchCase *Case = nullptr; |
1959 | 0 | if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, |
1960 | 0 | getContext(), Case)) { |
1961 | 0 | if (Case) |
1962 | 0 | incrementProfileCounter(Case); |
1963 | 0 | RunCleanupsScope ExecutedScope(*this); |
1964 | |
|
1965 | 0 | if (S.getInit()) |
1966 | 0 | EmitStmt(S.getInit()); |
1967 | | |
1968 | | // Emit the condition variable if needed inside the entire cleanup scope |
1969 | | // used by this special case for constant folded switches. |
1970 | 0 | if (S.getConditionVariable()) |
1971 | 0 | EmitDecl(*S.getConditionVariable()); |
1972 | | |
1973 | | // At this point, we are no longer "within" a switch instance, so |
1974 | | // we can temporarily enforce this to ensure that any embedded case |
1975 | | // statements are not emitted. |
1976 | 0 | SwitchInsn = nullptr; |
1977 | | |
1978 | | // Okay, we can dead code eliminate everything except this case. Emit the |
1979 | | // specified series of statements and we're good. |
1980 | 0 | for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) |
1981 | 0 | EmitStmt(CaseStmts[i]); |
1982 | 0 | incrementProfileCounter(&S); |
1983 | | |
1984 | | // Now we want to restore the saved switch instance so that nested |
1985 | | // switches continue to function properly |
1986 | 0 | SwitchInsn = SavedSwitchInsn; |
1987 | |
|
1988 | 0 | return; |
1989 | 0 | } |
1990 | 0 | } |
1991 | | |
1992 | 0 | JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); |
1993 | |
|
1994 | 0 | RunCleanupsScope ConditionScope(*this); |
1995 | |
|
1996 | 0 | if (S.getInit()) |
1997 | 0 | EmitStmt(S.getInit()); |
1998 | |
|
1999 | 0 | if (S.getConditionVariable()) |
2000 | 0 | EmitDecl(*S.getConditionVariable()); |
2001 | 0 | llvm::Value *CondV = EmitScalarExpr(S.getCond()); |
2002 | | |
2003 | | // Create basic block to hold stuff that comes after switch |
2004 | | // statement. We also need to create a default block now so that |
2005 | | // explicit case ranges tests can have a place to jump to on |
2006 | | // failure. |
2007 | 0 | llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); |
2008 | 0 | SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); |
2009 | 0 | if (PGO.haveRegionCounts()) { |
2010 | | // Walk the SwitchCase list to find how many there are. |
2011 | 0 | uint64_t DefaultCount = 0; |
2012 | 0 | unsigned NumCases = 0; |
2013 | 0 | for (const SwitchCase *Case = S.getSwitchCaseList(); |
2014 | 0 | Case; |
2015 | 0 | Case = Case->getNextSwitchCase()) { |
2016 | 0 | if (isa<DefaultStmt>(Case)) |
2017 | 0 | DefaultCount = getProfileCount(Case); |
2018 | 0 | NumCases += 1; |
2019 | 0 | } |
2020 | 0 | SwitchWeights = new SmallVector<uint64_t, 16>(); |
2021 | 0 | SwitchWeights->reserve(NumCases); |
2022 | | // The default needs to be first. We store the edge count, so we already |
2023 | | // know the right weight. |
2024 | 0 | SwitchWeights->push_back(DefaultCount); |
2025 | 0 | } else if (CGM.getCodeGenOpts().OptimizationLevel) { |
2026 | 0 | SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>(); |
2027 | | // Initialize the default case. |
2028 | 0 | SwitchLikelihood->push_back(Stmt::LH_None); |
2029 | 0 | } |
2030 | |
|
2031 | 0 | CaseRangeBlock = DefaultBlock; |
2032 | | |
2033 | | // Clear the insertion point to indicate we are in unreachable code. |
2034 | 0 | Builder.ClearInsertionPoint(); |
2035 | | |
2036 | | // All break statements jump to NextBlock. If BreakContinueStack is non-empty |
2037 | | // then reuse last ContinueBlock. |
2038 | 0 | JumpDest OuterContinue; |
2039 | 0 | if (!BreakContinueStack.empty()) |
2040 | 0 | OuterContinue = BreakContinueStack.back().ContinueBlock; |
2041 | |
|
2042 | 0 | BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); |
2043 | | |
2044 | | // Emit switch body. |
2045 | 0 | EmitStmt(S.getBody()); |
2046 | |
|
2047 | 0 | BreakContinueStack.pop_back(); |
2048 | | |
2049 | | // Update the default block in case explicit case range tests have |
2050 | | // been chained on top. |
2051 | 0 | SwitchInsn->setDefaultDest(CaseRangeBlock); |
2052 | | |
2053 | | // If a default was never emitted: |
2054 | 0 | if (!DefaultBlock->getParent()) { |
2055 | | // If we have cleanups, emit the default block so that there's a |
2056 | | // place to jump through the cleanups from. |
2057 | 0 | if (ConditionScope.requiresCleanups()) { |
2058 | 0 | EmitBlock(DefaultBlock); |
2059 | | |
2060 | | // Otherwise, just forward the default block to the switch end. |
2061 | 0 | } else { |
2062 | 0 | DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); |
2063 | 0 | delete DefaultBlock; |
2064 | 0 | } |
2065 | 0 | } |
2066 | |
|
2067 | 0 | ConditionScope.ForceCleanup(); |
2068 | | |
2069 | | // Emit continuation. |
2070 | 0 | EmitBlock(SwitchExit.getBlock(), true); |
2071 | 0 | incrementProfileCounter(&S); |
2072 | | |
2073 | | // If the switch has a condition wrapped by __builtin_unpredictable, |
2074 | | // create metadata that specifies that the switch is unpredictable. |
2075 | | // Don't bother if not optimizing because that metadata would not be used. |
2076 | 0 | auto *Call = dyn_cast<CallExpr>(S.getCond()); |
2077 | 0 | if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { |
2078 | 0 | auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); |
2079 | 0 | if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { |
2080 | 0 | llvm::MDBuilder MDHelper(getLLVMContext()); |
2081 | 0 | SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable, |
2082 | 0 | MDHelper.createUnpredictable()); |
2083 | 0 | } |
2084 | 0 | } |
2085 | |
|
2086 | 0 | if (SwitchWeights) { |
2087 | 0 | assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && |
2088 | 0 | "switch weights do not match switch cases"); |
2089 | | // If there's only one jump destination there's no sense weighting it. |
2090 | 0 | if (SwitchWeights->size() > 1) |
2091 | 0 | SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, |
2092 | 0 | createProfileWeights(*SwitchWeights)); |
2093 | 0 | delete SwitchWeights; |
2094 | 0 | } else if (SwitchLikelihood) { |
2095 | 0 | assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() && |
2096 | 0 | "switch likelihoods do not match switch cases"); |
2097 | 0 | std::optional<SmallVector<uint64_t, 16>> LHW = |
2098 | 0 | getLikelihoodWeights(*SwitchLikelihood); |
2099 | 0 | if (LHW) { |
2100 | 0 | llvm::MDBuilder MDHelper(CGM.getLLVMContext()); |
2101 | 0 | SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, |
2102 | 0 | createProfileWeights(*LHW)); |
2103 | 0 | } |
2104 | 0 | delete SwitchLikelihood; |
2105 | 0 | } |
2106 | 0 | SwitchInsn = SavedSwitchInsn; |
2107 | 0 | SwitchWeights = SavedSwitchWeights; |
2108 | 0 | SwitchLikelihood = SavedSwitchLikelihood; |
2109 | 0 | CaseRangeBlock = SavedCRBlock; |
2110 | 0 | } |
2111 | | |
2112 | | static std::string |
2113 | | SimplifyConstraint(const char *Constraint, const TargetInfo &Target, |
2114 | 0 | SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { |
2115 | 0 | std::string Result; |
2116 | |
|
2117 | 0 | while (*Constraint) { |
2118 | 0 | switch (*Constraint) { |
2119 | 0 | default: |
2120 | 0 | Result += Target.convertConstraint(Constraint); |
2121 | 0 | break; |
2122 | | // Ignore these |
2123 | 0 | case '*': |
2124 | 0 | case '?': |
2125 | 0 | case '!': |
2126 | 0 | case '=': // Will see this and the following in mult-alt constraints. |
2127 | 0 | case '+': |
2128 | 0 | break; |
2129 | 0 | case '#': // Ignore the rest of the constraint alternative. |
2130 | 0 | while (Constraint[1] && Constraint[1] != ',') |
2131 | 0 | Constraint++; |
2132 | 0 | break; |
2133 | 0 | case '&': |
2134 | 0 | case '%': |
2135 | 0 | Result += *Constraint; |
2136 | 0 | while (Constraint[1] && Constraint[1] == *Constraint) |
2137 | 0 | Constraint++; |
2138 | 0 | break; |
2139 | 0 | case ',': |
2140 | 0 | Result += "|"; |
2141 | 0 | break; |
2142 | 0 | case 'g': |
2143 | 0 | Result += "imr"; |
2144 | 0 | break; |
2145 | 0 | case '[': { |
2146 | 0 | assert(OutCons && |
2147 | 0 | "Must pass output names to constraints with a symbolic name"); |
2148 | 0 | unsigned Index; |
2149 | 0 | bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); |
2150 | 0 | assert(result && "Could not resolve symbolic name"); (void)result; |
2151 | 0 | Result += llvm::utostr(Index); |
2152 | 0 | break; |
2153 | 0 | } |
2154 | 0 | } |
2155 | | |
2156 | 0 | Constraint++; |
2157 | 0 | } |
2158 | | |
2159 | 0 | return Result; |
2160 | 0 | } |
2161 | | |
2162 | | /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared |
2163 | | /// as using a particular register add that as a constraint that will be used |
2164 | | /// in this asm stmt. |
2165 | | static std::string |
2166 | | AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, |
2167 | | const TargetInfo &Target, CodeGenModule &CGM, |
2168 | | const AsmStmt &Stmt, const bool EarlyClobber, |
2169 | 0 | std::string *GCCReg = nullptr) { |
2170 | 0 | const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); |
2171 | 0 | if (!AsmDeclRef) |
2172 | 0 | return Constraint; |
2173 | 0 | const ValueDecl &Value = *AsmDeclRef->getDecl(); |
2174 | 0 | const VarDecl *Variable = dyn_cast<VarDecl>(&Value); |
2175 | 0 | if (!Variable) |
2176 | 0 | return Constraint; |
2177 | 0 | if (Variable->getStorageClass() != SC_Register) |
2178 | 0 | return Constraint; |
2179 | 0 | AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); |
2180 | 0 | if (!Attr) |
2181 | 0 | return Constraint; |
2182 | 0 | StringRef Register = Attr->getLabel(); |
2183 | 0 | assert(Target.isValidGCCRegisterName(Register)); |
2184 | | // We're using validateOutputConstraint here because we only care if |
2185 | | // this is a register constraint. |
2186 | 0 | TargetInfo::ConstraintInfo Info(Constraint, ""); |
2187 | 0 | if (Target.validateOutputConstraint(Info) && |
2188 | 0 | !Info.allowsRegister()) { |
2189 | 0 | CGM.ErrorUnsupported(&Stmt, "__asm__"); |
2190 | 0 | return Constraint; |
2191 | 0 | } |
2192 | | // Canonicalize the register here before returning it. |
2193 | 0 | Register = Target.getNormalizedGCCRegisterName(Register); |
2194 | 0 | if (GCCReg != nullptr) |
2195 | 0 | *GCCReg = Register.str(); |
2196 | 0 | return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; |
2197 | 0 | } |
2198 | | |
2199 | | std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue( |
2200 | | const TargetInfo::ConstraintInfo &Info, LValue InputValue, |
2201 | 0 | QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { |
2202 | 0 | if (Info.allowsRegister() || !Info.allowsMemory()) { |
2203 | 0 | if (CodeGenFunction::hasScalarEvaluationKind(InputType)) |
2204 | 0 | return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr}; |
2205 | | |
2206 | 0 | llvm::Type *Ty = ConvertType(InputType); |
2207 | 0 | uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); |
2208 | 0 | if ((Size <= 64 && llvm::isPowerOf2_64(Size)) || |
2209 | 0 | getTargetHooks().isScalarizableAsmOperand(*this, Ty)) { |
2210 | 0 | Ty = llvm::IntegerType::get(getLLVMContext(), Size); |
2211 | |
|
2212 | 0 | return { |
2213 | 0 | Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)), |
2214 | 0 | nullptr}; |
2215 | 0 | } |
2216 | 0 | } |
2217 | | |
2218 | 0 | Address Addr = InputValue.getAddress(*this); |
2219 | 0 | ConstraintStr += '*'; |
2220 | 0 | return {Addr.getPointer(), Addr.getElementType()}; |
2221 | 0 | } |
2222 | | |
2223 | | std::pair<llvm::Value *, llvm::Type *> |
2224 | | CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info, |
2225 | | const Expr *InputExpr, |
2226 | 0 | std::string &ConstraintStr) { |
2227 | | // If this can't be a register or memory, i.e., has to be a constant |
2228 | | // (immediate or symbolic), try to emit it as such. |
2229 | 0 | if (!Info.allowsRegister() && !Info.allowsMemory()) { |
2230 | 0 | if (Info.requiresImmediateConstant()) { |
2231 | 0 | Expr::EvalResult EVResult; |
2232 | 0 | InputExpr->EvaluateAsRValue(EVResult, getContext(), true); |
2233 | |
|
2234 | 0 | llvm::APSInt IntResult; |
2235 | 0 | if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(), |
2236 | 0 | getContext())) |
2237 | 0 | return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr}; |
2238 | 0 | } |
2239 | | |
2240 | 0 | Expr::EvalResult Result; |
2241 | 0 | if (InputExpr->EvaluateAsInt(Result, getContext())) |
2242 | 0 | return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()), |
2243 | 0 | nullptr}; |
2244 | 0 | } |
2245 | | |
2246 | 0 | if (Info.allowsRegister() || !Info.allowsMemory()) |
2247 | 0 | if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) |
2248 | 0 | return {EmitScalarExpr(InputExpr), nullptr}; |
2249 | 0 | if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) |
2250 | 0 | return {EmitScalarExpr(InputExpr), nullptr}; |
2251 | 0 | InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); |
2252 | 0 | LValue Dest = EmitLValue(InputExpr); |
2253 | 0 | return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, |
2254 | 0 | InputExpr->getExprLoc()); |
2255 | 0 | } |
2256 | | |
2257 | | /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline |
2258 | | /// asm call instruction. The !srcloc MDNode contains a list of constant |
2259 | | /// integers which are the source locations of the start of each line in the |
2260 | | /// asm. |
2261 | | static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, |
2262 | 0 | CodeGenFunction &CGF) { |
2263 | 0 | SmallVector<llvm::Metadata *, 8> Locs; |
2264 | | // Add the location of the first line to the MDNode. |
2265 | 0 | Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( |
2266 | 0 | CGF.Int64Ty, Str->getBeginLoc().getRawEncoding()))); |
2267 | 0 | StringRef StrVal = Str->getString(); |
2268 | 0 | if (!StrVal.empty()) { |
2269 | 0 | const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); |
2270 | 0 | const LangOptions &LangOpts = CGF.CGM.getLangOpts(); |
2271 | 0 | unsigned StartToken = 0; |
2272 | 0 | unsigned ByteOffset = 0; |
2273 | | |
2274 | | // Add the location of the start of each subsequent line of the asm to the |
2275 | | // MDNode. |
2276 | 0 | for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) { |
2277 | 0 | if (StrVal[i] != '\n') continue; |
2278 | 0 | SourceLocation LineLoc = Str->getLocationOfByte( |
2279 | 0 | i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset); |
2280 | 0 | Locs.push_back(llvm::ConstantAsMetadata::get( |
2281 | 0 | llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding()))); |
2282 | 0 | } |
2283 | 0 | } |
2284 | |
|
2285 | 0 | return llvm::MDNode::get(CGF.getLLVMContext(), Locs); |
2286 | 0 | } |
2287 | | |
2288 | | static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, |
2289 | | bool HasUnwindClobber, bool ReadOnly, |
2290 | | bool ReadNone, bool NoMerge, const AsmStmt &S, |
2291 | | const std::vector<llvm::Type *> &ResultRegTypes, |
2292 | | const std::vector<llvm::Type *> &ArgElemTypes, |
2293 | | CodeGenFunction &CGF, |
2294 | 0 | std::vector<llvm::Value *> &RegResults) { |
2295 | 0 | if (!HasUnwindClobber) |
2296 | 0 | Result.addFnAttr(llvm::Attribute::NoUnwind); |
2297 | |
|
2298 | 0 | if (NoMerge) |
2299 | 0 | Result.addFnAttr(llvm::Attribute::NoMerge); |
2300 | | // Attach readnone and readonly attributes. |
2301 | 0 | if (!HasSideEffect) { |
2302 | 0 | if (ReadNone) |
2303 | 0 | Result.setDoesNotAccessMemory(); |
2304 | 0 | else if (ReadOnly) |
2305 | 0 | Result.setOnlyReadsMemory(); |
2306 | 0 | } |
2307 | | |
2308 | | // Add elementtype attribute for indirect constraints. |
2309 | 0 | for (auto Pair : llvm::enumerate(ArgElemTypes)) { |
2310 | 0 | if (Pair.value()) { |
2311 | 0 | auto Attr = llvm::Attribute::get( |
2312 | 0 | CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value()); |
2313 | 0 | Result.addParamAttr(Pair.index(), Attr); |
2314 | 0 | } |
2315 | 0 | } |
2316 | | |
2317 | | // Slap the source location of the inline asm into a !srcloc metadata on the |
2318 | | // call. |
2319 | 0 | if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) |
2320 | 0 | Result.setMetadata("srcloc", |
2321 | 0 | getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF)); |
2322 | 0 | else { |
2323 | | // At least put the line number on MS inline asm blobs. |
2324 | 0 | llvm::Constant *Loc = |
2325 | 0 | llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding()); |
2326 | 0 | Result.setMetadata("srcloc", |
2327 | 0 | llvm::MDNode::get(CGF.getLLVMContext(), |
2328 | 0 | llvm::ConstantAsMetadata::get(Loc))); |
2329 | 0 | } |
2330 | |
|
2331 | 0 | if (CGF.getLangOpts().assumeFunctionsAreConvergent()) |
2332 | | // Conservatively, mark all inline asm blocks in CUDA or OpenCL as |
2333 | | // convergent (meaning, they may call an intrinsically convergent op, such |
2334 | | // as bar.sync, and so can't have certain optimizations applied around |
2335 | | // them). |
2336 | 0 | Result.addFnAttr(llvm::Attribute::Convergent); |
2337 | | // Extract all of the register value results from the asm. |
2338 | 0 | if (ResultRegTypes.size() == 1) { |
2339 | 0 | RegResults.push_back(&Result); |
2340 | 0 | } else { |
2341 | 0 | for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { |
2342 | 0 | llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult"); |
2343 | 0 | RegResults.push_back(Tmp); |
2344 | 0 | } |
2345 | 0 | } |
2346 | 0 | } |
2347 | | |
2348 | | static void |
2349 | | EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, |
2350 | | const llvm::ArrayRef<llvm::Value *> RegResults, |
2351 | | const llvm::ArrayRef<llvm::Type *> ResultRegTypes, |
2352 | | const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes, |
2353 | | const llvm::ArrayRef<LValue> ResultRegDests, |
2354 | | const llvm::ArrayRef<QualType> ResultRegQualTys, |
2355 | | const llvm::BitVector &ResultTypeRequiresCast, |
2356 | 0 | const llvm::BitVector &ResultRegIsFlagReg) { |
2357 | 0 | CGBuilderTy &Builder = CGF.Builder; |
2358 | 0 | CodeGenModule &CGM = CGF.CGM; |
2359 | 0 | llvm::LLVMContext &CTX = CGF.getLLVMContext(); |
2360 | |
|
2361 | 0 | assert(RegResults.size() == ResultRegTypes.size()); |
2362 | 0 | assert(RegResults.size() == ResultTruncRegTypes.size()); |
2363 | 0 | assert(RegResults.size() == ResultRegDests.size()); |
2364 | | // ResultRegDests can be also populated by addReturnRegisterOutputs() above, |
2365 | | // in which case its size may grow. |
2366 | 0 | assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); |
2367 | 0 | assert(ResultRegIsFlagReg.size() <= ResultRegDests.size()); |
2368 | | |
2369 | 0 | for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { |
2370 | 0 | llvm::Value *Tmp = RegResults[i]; |
2371 | 0 | llvm::Type *TruncTy = ResultTruncRegTypes[i]; |
2372 | |
|
2373 | 0 | if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { |
2374 | | // Target must guarantee the Value `Tmp` here is lowered to a boolean |
2375 | | // value. |
2376 | 0 | llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2); |
2377 | 0 | llvm::Value *IsBooleanValue = |
2378 | 0 | Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two); |
2379 | 0 | llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume); |
2380 | 0 | Builder.CreateCall(FnAssume, IsBooleanValue); |
2381 | 0 | } |
2382 | | |
2383 | | // If the result type of the LLVM IR asm doesn't match the result type of |
2384 | | // the expression, do the conversion. |
2385 | 0 | if (ResultRegTypes[i] != TruncTy) { |
2386 | | |
2387 | | // Truncate the integer result to the right size, note that TruncTy can be |
2388 | | // a pointer. |
2389 | 0 | if (TruncTy->isFloatingPointTy()) |
2390 | 0 | Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); |
2391 | 0 | else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { |
2392 | 0 | uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); |
2393 | 0 | Tmp = Builder.CreateTrunc( |
2394 | 0 | Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize)); |
2395 | 0 | Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); |
2396 | 0 | } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { |
2397 | 0 | uint64_t TmpSize = |
2398 | 0 | CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); |
2399 | 0 | Tmp = Builder.CreatePtrToInt( |
2400 | 0 | Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize)); |
2401 | 0 | Tmp = Builder.CreateTrunc(Tmp, TruncTy); |
2402 | 0 | } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) { |
2403 | 0 | Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy); |
2404 | 0 | } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) { |
2405 | 0 | Tmp = Builder.CreateBitCast(Tmp, TruncTy); |
2406 | 0 | } |
2407 | 0 | } |
2408 | |
|
2409 | 0 | LValue Dest = ResultRegDests[i]; |
2410 | | // ResultTypeRequiresCast elements correspond to the first |
2411 | | // ResultTypeRequiresCast.size() elements of RegResults. |
2412 | 0 | if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { |
2413 | 0 | unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]); |
2414 | 0 | Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]); |
2415 | 0 | if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) { |
2416 | 0 | Builder.CreateStore(Tmp, A); |
2417 | 0 | continue; |
2418 | 0 | } |
2419 | | |
2420 | 0 | QualType Ty = |
2421 | 0 | CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false); |
2422 | 0 | if (Ty.isNull()) { |
2423 | 0 | const Expr *OutExpr = S.getOutputExpr(i); |
2424 | 0 | CGM.getDiags().Report(OutExpr->getExprLoc(), |
2425 | 0 | diag::err_store_value_to_reg); |
2426 | 0 | return; |
2427 | 0 | } |
2428 | 0 | Dest = CGF.MakeAddrLValue(A, Ty); |
2429 | 0 | } |
2430 | 0 | CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest); |
2431 | 0 | } |
2432 | 0 | } |
2433 | | |
2434 | | static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, |
2435 | 0 | const AsmStmt &S) { |
2436 | 0 | constexpr auto Name = "__ASM__hipstdpar_unsupported"; |
2437 | |
|
2438 | 0 | StringRef Asm; |
2439 | 0 | if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S)) |
2440 | 0 | Asm = GCCAsm->getAsmString()->getString(); |
2441 | |
|
2442 | 0 | auto &Ctx = CGF->CGM.getLLVMContext(); |
2443 | |
|
2444 | 0 | auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm); |
2445 | 0 | auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx), |
2446 | 0 | {StrTy->getType()}, false); |
2447 | 0 | auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy); |
2448 | |
|
2449 | 0 | CGF->Builder.CreateCall(UBF, {StrTy}); |
2450 | 0 | } |
2451 | | |
2452 | 0 | void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { |
2453 | | // Pop all cleanup blocks at the end of the asm statement. |
2454 | 0 | CodeGenFunction::RunCleanupsScope Cleanups(*this); |
2455 | | |
2456 | | // Assemble the final asm string. |
2457 | 0 | std::string AsmString = S.generateAsmString(getContext()); |
2458 | | |
2459 | | // Get all the output and input constraints together. |
2460 | 0 | SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; |
2461 | 0 | SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; |
2462 | |
|
2463 | 0 | bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice; |
2464 | 0 | bool IsValidTargetAsm = true; |
2465 | 0 | for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) { |
2466 | 0 | StringRef Name; |
2467 | 0 | if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) |
2468 | 0 | Name = GAS->getOutputName(i); |
2469 | 0 | TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); |
2470 | 0 | bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; |
2471 | 0 | if (IsHipStdPar && !IsValid) |
2472 | 0 | IsValidTargetAsm = false; |
2473 | 0 | else |
2474 | 0 | assert(IsValid && "Failed to parse output constraint"); |
2475 | 0 | OutputConstraintInfos.push_back(Info); |
2476 | 0 | } |
2477 | |
|
2478 | 0 | for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) { |
2479 | 0 | StringRef Name; |
2480 | 0 | if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) |
2481 | 0 | Name = GAS->getInputName(i); |
2482 | 0 | TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); |
2483 | 0 | bool IsValid = |
2484 | 0 | getTarget().validateInputConstraint(OutputConstraintInfos, Info); |
2485 | 0 | if (IsHipStdPar && !IsValid) |
2486 | 0 | IsValidTargetAsm = false; |
2487 | 0 | else |
2488 | 0 | assert(IsValid && "Failed to parse input constraint"); |
2489 | 0 | InputConstraintInfos.push_back(Info); |
2490 | 0 | } |
2491 | |
|
2492 | 0 | if (!IsValidTargetAsm) |
2493 | 0 | return EmitHipStdParUnsupportedAsm(this, S); |
2494 | | |
2495 | 0 | std::string Constraints; |
2496 | |
|
2497 | 0 | std::vector<LValue> ResultRegDests; |
2498 | 0 | std::vector<QualType> ResultRegQualTys; |
2499 | 0 | std::vector<llvm::Type *> ResultRegTypes; |
2500 | 0 | std::vector<llvm::Type *> ResultTruncRegTypes; |
2501 | 0 | std::vector<llvm::Type *> ArgTypes; |
2502 | 0 | std::vector<llvm::Type *> ArgElemTypes; |
2503 | 0 | std::vector<llvm::Value*> Args; |
2504 | 0 | llvm::BitVector ResultTypeRequiresCast; |
2505 | 0 | llvm::BitVector ResultRegIsFlagReg; |
2506 | | |
2507 | | // Keep track of inout constraints. |
2508 | 0 | std::string InOutConstraints; |
2509 | 0 | std::vector<llvm::Value*> InOutArgs; |
2510 | 0 | std::vector<llvm::Type*> InOutArgTypes; |
2511 | 0 | std::vector<llvm::Type*> InOutArgElemTypes; |
2512 | | |
2513 | | // Keep track of out constraints for tied input operand. |
2514 | 0 | std::vector<std::string> OutputConstraints; |
2515 | | |
2516 | | // Keep track of defined physregs. |
2517 | 0 | llvm::SmallSet<std::string, 8> PhysRegOutputs; |
2518 | | |
2519 | | // An inline asm can be marked readonly if it meets the following conditions: |
2520 | | // - it doesn't have any sideeffects |
2521 | | // - it doesn't clobber memory |
2522 | | // - it doesn't return a value by-reference |
2523 | | // It can be marked readnone if it doesn't have any input memory constraints |
2524 | | // in addition to meeting the conditions listed above. |
2525 | 0 | bool ReadOnly = true, ReadNone = true; |
2526 | |
|
2527 | 0 | for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { |
2528 | 0 | TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; |
2529 | | |
2530 | | // Simplify the output constraint. |
2531 | 0 | std::string OutputConstraint(S.getOutputConstraint(i)); |
2532 | 0 | OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, |
2533 | 0 | getTarget(), &OutputConstraintInfos); |
2534 | |
|
2535 | 0 | const Expr *OutExpr = S.getOutputExpr(i); |
2536 | 0 | OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); |
2537 | |
|
2538 | 0 | std::string GCCReg; |
2539 | 0 | OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, |
2540 | 0 | getTarget(), CGM, S, |
2541 | 0 | Info.earlyClobber(), |
2542 | 0 | &GCCReg); |
2543 | | // Give an error on multiple outputs to same physreg. |
2544 | 0 | if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second) |
2545 | 0 | CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); |
2546 | |
|
2547 | 0 | OutputConstraints.push_back(OutputConstraint); |
2548 | 0 | LValue Dest = EmitLValue(OutExpr); |
2549 | 0 | if (!Constraints.empty()) |
2550 | 0 | Constraints += ','; |
2551 | | |
2552 | | // If this is a register output, then make the inline asm return it |
2553 | | // by-value. If this is a memory result, return the value by-reference. |
2554 | 0 | QualType QTy = OutExpr->getType(); |
2555 | 0 | const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) || |
2556 | 0 | hasAggregateEvaluationKind(QTy); |
2557 | 0 | if (!Info.allowsMemory() && IsScalarOrAggregate) { |
2558 | |
|
2559 | 0 | Constraints += "=" + OutputConstraint; |
2560 | 0 | ResultRegQualTys.push_back(QTy); |
2561 | 0 | ResultRegDests.push_back(Dest); |
2562 | |
|
2563 | 0 | bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc"); |
2564 | 0 | ResultRegIsFlagReg.push_back(IsFlagReg); |
2565 | |
|
2566 | 0 | llvm::Type *Ty = ConvertTypeForMem(QTy); |
2567 | 0 | const bool RequiresCast = Info.allowsRegister() && |
2568 | 0 | (getTargetHooks().isScalarizableAsmOperand(*this, Ty) || |
2569 | 0 | Ty->isAggregateType()); |
2570 | |
|
2571 | 0 | ResultTruncRegTypes.push_back(Ty); |
2572 | 0 | ResultTypeRequiresCast.push_back(RequiresCast); |
2573 | |
|
2574 | 0 | if (RequiresCast) { |
2575 | 0 | unsigned Size = getContext().getTypeSize(QTy); |
2576 | 0 | Ty = llvm::IntegerType::get(getLLVMContext(), Size); |
2577 | 0 | } |
2578 | 0 | ResultRegTypes.push_back(Ty); |
2579 | | // If this output is tied to an input, and if the input is larger, then |
2580 | | // we need to set the actual result type of the inline asm node to be the |
2581 | | // same as the input type. |
2582 | 0 | if (Info.hasMatchingInput()) { |
2583 | 0 | unsigned InputNo; |
2584 | 0 | for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { |
2585 | 0 | TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; |
2586 | 0 | if (Input.hasTiedOperand() && Input.getTiedOperand() == i) |
2587 | 0 | break; |
2588 | 0 | } |
2589 | 0 | assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); |
2590 | | |
2591 | 0 | QualType InputTy = S.getInputExpr(InputNo)->getType(); |
2592 | 0 | QualType OutputType = OutExpr->getType(); |
2593 | |
|
2594 | 0 | uint64_t InputSize = getContext().getTypeSize(InputTy); |
2595 | 0 | if (getContext().getTypeSize(OutputType) < InputSize) { |
2596 | | // Form the asm to return the value as a larger integer or fp type. |
2597 | 0 | ResultRegTypes.back() = ConvertType(InputTy); |
2598 | 0 | } |
2599 | 0 | } |
2600 | 0 | if (llvm::Type* AdjTy = |
2601 | 0 | getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, |
2602 | 0 | ResultRegTypes.back())) |
2603 | 0 | ResultRegTypes.back() = AdjTy; |
2604 | 0 | else { |
2605 | 0 | CGM.getDiags().Report(S.getAsmLoc(), |
2606 | 0 | diag::err_asm_invalid_type_in_input) |
2607 | 0 | << OutExpr->getType() << OutputConstraint; |
2608 | 0 | } |
2609 | | |
2610 | | // Update largest vector width for any vector types. |
2611 | 0 | if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back())) |
2612 | 0 | LargestVectorWidth = |
2613 | 0 | std::max((uint64_t)LargestVectorWidth, |
2614 | 0 | VT->getPrimitiveSizeInBits().getKnownMinValue()); |
2615 | 0 | } else { |
2616 | 0 | Address DestAddr = Dest.getAddress(*this); |
2617 | | // Matrix types in memory are represented by arrays, but accessed through |
2618 | | // vector pointers, with the alignment specified on the access operation. |
2619 | | // For inline assembly, update pointer arguments to use vector pointers. |
2620 | | // Otherwise there will be a mis-match if the matrix is also an |
2621 | | // input-argument which is represented as vector. |
2622 | 0 | if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) |
2623 | 0 | DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType())); |
2624 | |
|
2625 | 0 | ArgTypes.push_back(DestAddr.getType()); |
2626 | 0 | ArgElemTypes.push_back(DestAddr.getElementType()); |
2627 | 0 | Args.push_back(DestAddr.getPointer()); |
2628 | 0 | Constraints += "=*"; |
2629 | 0 | Constraints += OutputConstraint; |
2630 | 0 | ReadOnly = ReadNone = false; |
2631 | 0 | } |
2632 | | |
2633 | 0 | if (Info.isReadWrite()) { |
2634 | 0 | InOutConstraints += ','; |
2635 | |
|
2636 | 0 | const Expr *InputExpr = S.getOutputExpr(i); |
2637 | 0 | llvm::Value *Arg; |
2638 | 0 | llvm::Type *ArgElemType; |
2639 | 0 | std::tie(Arg, ArgElemType) = EmitAsmInputLValue( |
2640 | 0 | Info, Dest, InputExpr->getType(), InOutConstraints, |
2641 | 0 | InputExpr->getExprLoc()); |
2642 | |
|
2643 | 0 | if (llvm::Type* AdjTy = |
2644 | 0 | getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, |
2645 | 0 | Arg->getType())) |
2646 | 0 | Arg = Builder.CreateBitCast(Arg, AdjTy); |
2647 | | |
2648 | | // Update largest vector width for any vector types. |
2649 | 0 | if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) |
2650 | 0 | LargestVectorWidth = |
2651 | 0 | std::max((uint64_t)LargestVectorWidth, |
2652 | 0 | VT->getPrimitiveSizeInBits().getKnownMinValue()); |
2653 | | // Only tie earlyclobber physregs. |
2654 | 0 | if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) |
2655 | 0 | InOutConstraints += llvm::utostr(i); |
2656 | 0 | else |
2657 | 0 | InOutConstraints += OutputConstraint; |
2658 | |
|
2659 | 0 | InOutArgTypes.push_back(Arg->getType()); |
2660 | 0 | InOutArgElemTypes.push_back(ArgElemType); |
2661 | 0 | InOutArgs.push_back(Arg); |
2662 | 0 | } |
2663 | 0 | } |
2664 | | |
2665 | | // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) |
2666 | | // to the return value slot. Only do this when returning in registers. |
2667 | 0 | if (isa<MSAsmStmt>(&S)) { |
2668 | 0 | const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); |
2669 | 0 | if (RetAI.isDirect() || RetAI.isExtend()) { |
2670 | | // Make a fake lvalue for the return value slot. |
2671 | 0 | LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy); |
2672 | 0 | CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( |
2673 | 0 | *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, |
2674 | 0 | ResultRegDests, AsmString, S.getNumOutputs()); |
2675 | 0 | SawAsmBlock = true; |
2676 | 0 | } |
2677 | 0 | } |
2678 | |
|
2679 | 0 | for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { |
2680 | 0 | const Expr *InputExpr = S.getInputExpr(i); |
2681 | |
|
2682 | 0 | TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; |
2683 | |
|
2684 | 0 | if (Info.allowsMemory()) |
2685 | 0 | ReadNone = false; |
2686 | |
|
2687 | 0 | if (!Constraints.empty()) |
2688 | 0 | Constraints += ','; |
2689 | | |
2690 | | // Simplify the input constraint. |
2691 | 0 | std::string InputConstraint(S.getInputConstraint(i)); |
2692 | 0 | InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), |
2693 | 0 | &OutputConstraintInfos); |
2694 | |
|
2695 | 0 | InputConstraint = AddVariableConstraints( |
2696 | 0 | InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), |
2697 | 0 | getTarget(), CGM, S, false /* No EarlyClobber */); |
2698 | |
|
2699 | 0 | std::string ReplaceConstraint (InputConstraint); |
2700 | 0 | llvm::Value *Arg; |
2701 | 0 | llvm::Type *ArgElemType; |
2702 | 0 | std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints); |
2703 | | |
2704 | | // If this input argument is tied to a larger output result, extend the |
2705 | | // input to be the same size as the output. The LLVM backend wants to see |
2706 | | // the input and output of a matching constraint be the same size. Note |
2707 | | // that GCC does not define what the top bits are here. We use zext because |
2708 | | // that is usually cheaper, but LLVM IR should really get an anyext someday. |
2709 | 0 | if (Info.hasTiedOperand()) { |
2710 | 0 | unsigned Output = Info.getTiedOperand(); |
2711 | 0 | QualType OutputType = S.getOutputExpr(Output)->getType(); |
2712 | 0 | QualType InputTy = InputExpr->getType(); |
2713 | |
|
2714 | 0 | if (getContext().getTypeSize(OutputType) > |
2715 | 0 | getContext().getTypeSize(InputTy)) { |
2716 | | // Use ptrtoint as appropriate so that we can do our extension. |
2717 | 0 | if (isa<llvm::PointerType>(Arg->getType())) |
2718 | 0 | Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); |
2719 | 0 | llvm::Type *OutputTy = ConvertType(OutputType); |
2720 | 0 | if (isa<llvm::IntegerType>(OutputTy)) |
2721 | 0 | Arg = Builder.CreateZExt(Arg, OutputTy); |
2722 | 0 | else if (isa<llvm::PointerType>(OutputTy)) |
2723 | 0 | Arg = Builder.CreateZExt(Arg, IntPtrTy); |
2724 | 0 | else if (OutputTy->isFloatingPointTy()) |
2725 | 0 | Arg = Builder.CreateFPExt(Arg, OutputTy); |
2726 | 0 | } |
2727 | | // Deal with the tied operands' constraint code in adjustInlineAsmType. |
2728 | 0 | ReplaceConstraint = OutputConstraints[Output]; |
2729 | 0 | } |
2730 | 0 | if (llvm::Type* AdjTy = |
2731 | 0 | getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint, |
2732 | 0 | Arg->getType())) |
2733 | 0 | Arg = Builder.CreateBitCast(Arg, AdjTy); |
2734 | 0 | else |
2735 | 0 | CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) |
2736 | 0 | << InputExpr->getType() << InputConstraint; |
2737 | | |
2738 | | // Update largest vector width for any vector types. |
2739 | 0 | if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType())) |
2740 | 0 | LargestVectorWidth = |
2741 | 0 | std::max((uint64_t)LargestVectorWidth, |
2742 | 0 | VT->getPrimitiveSizeInBits().getKnownMinValue()); |
2743 | |
|
2744 | 0 | ArgTypes.push_back(Arg->getType()); |
2745 | 0 | ArgElemTypes.push_back(ArgElemType); |
2746 | 0 | Args.push_back(Arg); |
2747 | 0 | Constraints += InputConstraint; |
2748 | 0 | } |
2749 | | |
2750 | | // Append the "input" part of inout constraints. |
2751 | 0 | for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { |
2752 | 0 | ArgTypes.push_back(InOutArgTypes[i]); |
2753 | 0 | ArgElemTypes.push_back(InOutArgElemTypes[i]); |
2754 | 0 | Args.push_back(InOutArgs[i]); |
2755 | 0 | } |
2756 | 0 | Constraints += InOutConstraints; |
2757 | | |
2758 | | // Labels |
2759 | 0 | SmallVector<llvm::BasicBlock *, 16> Transfer; |
2760 | 0 | llvm::BasicBlock *Fallthrough = nullptr; |
2761 | 0 | bool IsGCCAsmGoto = false; |
2762 | 0 | if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) { |
2763 | 0 | IsGCCAsmGoto = GS->isAsmGoto(); |
2764 | 0 | if (IsGCCAsmGoto) { |
2765 | 0 | for (const auto *E : GS->labels()) { |
2766 | 0 | JumpDest Dest = getJumpDestForLabel(E->getLabel()); |
2767 | 0 | Transfer.push_back(Dest.getBlock()); |
2768 | 0 | if (!Constraints.empty()) |
2769 | 0 | Constraints += ','; |
2770 | 0 | Constraints += "!i"; |
2771 | 0 | } |
2772 | 0 | Fallthrough = createBasicBlock("asm.fallthrough"); |
2773 | 0 | } |
2774 | 0 | } |
2775 | |
|
2776 | 0 | bool HasUnwindClobber = false; |
2777 | | |
2778 | | // Clobbers |
2779 | 0 | for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { |
2780 | 0 | StringRef Clobber = S.getClobber(i); |
2781 | |
|
2782 | 0 | if (Clobber == "memory") |
2783 | 0 | ReadOnly = ReadNone = false; |
2784 | 0 | else if (Clobber == "unwind") { |
2785 | 0 | HasUnwindClobber = true; |
2786 | 0 | continue; |
2787 | 0 | } else if (Clobber != "cc") { |
2788 | 0 | Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); |
2789 | 0 | if (CGM.getCodeGenOpts().StackClashProtector && |
2790 | 0 | getTarget().isSPRegName(Clobber)) { |
2791 | 0 | CGM.getDiags().Report(S.getAsmLoc(), |
2792 | 0 | diag::warn_stack_clash_protection_inline_asm); |
2793 | 0 | } |
2794 | 0 | } |
2795 | | |
2796 | 0 | if (isa<MSAsmStmt>(&S)) { |
2797 | 0 | if (Clobber == "eax" || Clobber == "edx") { |
2798 | 0 | if (Constraints.find("=&A") != std::string::npos) |
2799 | 0 | continue; |
2800 | 0 | std::string::size_type position1 = |
2801 | 0 | Constraints.find("={" + Clobber.str() + "}"); |
2802 | 0 | if (position1 != std::string::npos) { |
2803 | 0 | Constraints.insert(position1 + 1, "&"); |
2804 | 0 | continue; |
2805 | 0 | } |
2806 | 0 | std::string::size_type position2 = Constraints.find("=A"); |
2807 | 0 | if (position2 != std::string::npos) { |
2808 | 0 | Constraints.insert(position2 + 1, "&"); |
2809 | 0 | continue; |
2810 | 0 | } |
2811 | 0 | } |
2812 | 0 | } |
2813 | 0 | if (!Constraints.empty()) |
2814 | 0 | Constraints += ','; |
2815 | |
|
2816 | 0 | Constraints += "~{"; |
2817 | 0 | Constraints += Clobber; |
2818 | 0 | Constraints += '}'; |
2819 | 0 | } |
2820 | |
|
2821 | 0 | assert(!(HasUnwindClobber && IsGCCAsmGoto) && |
2822 | 0 | "unwind clobber can't be used with asm goto"); |
2823 | | |
2824 | | // Add machine specific clobbers |
2825 | 0 | std::string_view MachineClobbers = getTarget().getClobbers(); |
2826 | 0 | if (!MachineClobbers.empty()) { |
2827 | 0 | if (!Constraints.empty()) |
2828 | 0 | Constraints += ','; |
2829 | 0 | Constraints += MachineClobbers; |
2830 | 0 | } |
2831 | |
|
2832 | 0 | llvm::Type *ResultType; |
2833 | 0 | if (ResultRegTypes.empty()) |
2834 | 0 | ResultType = VoidTy; |
2835 | 0 | else if (ResultRegTypes.size() == 1) |
2836 | 0 | ResultType = ResultRegTypes[0]; |
2837 | 0 | else |
2838 | 0 | ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); |
2839 | |
|
2840 | 0 | llvm::FunctionType *FTy = |
2841 | 0 | llvm::FunctionType::get(ResultType, ArgTypes, false); |
2842 | |
|
2843 | 0 | bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; |
2844 | |
|
2845 | 0 | llvm::InlineAsm::AsmDialect GnuAsmDialect = |
2846 | 0 | CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT |
2847 | 0 | ? llvm::InlineAsm::AD_ATT |
2848 | 0 | : llvm::InlineAsm::AD_Intel; |
2849 | 0 | llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? |
2850 | 0 | llvm::InlineAsm::AD_Intel : GnuAsmDialect; |
2851 | |
|
2852 | 0 | llvm::InlineAsm *IA = llvm::InlineAsm::get( |
2853 | 0 | FTy, AsmString, Constraints, HasSideEffect, |
2854 | 0 | /* IsAlignStack */ false, AsmDialect, HasUnwindClobber); |
2855 | 0 | std::vector<llvm::Value*> RegResults; |
2856 | 0 | llvm::CallBrInst *CBR; |
2857 | 0 | llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>> |
2858 | 0 | CBRRegResults; |
2859 | 0 | if (IsGCCAsmGoto) { |
2860 | 0 | CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); |
2861 | 0 | EmitBlock(Fallthrough); |
2862 | 0 | UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone, |
2863 | 0 | InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes, |
2864 | 0 | *this, RegResults); |
2865 | | // Because we are emitting code top to bottom, we don't have enough |
2866 | | // information at this point to know precisely whether we have a critical |
2867 | | // edge. If we have outputs, split all indirect destinations. |
2868 | 0 | if (!RegResults.empty()) { |
2869 | 0 | unsigned i = 0; |
2870 | 0 | for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) { |
2871 | 0 | llvm::Twine SynthName = Dest->getName() + ".split"; |
2872 | 0 | llvm::BasicBlock *SynthBB = createBasicBlock(SynthName); |
2873 | 0 | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
2874 | 0 | Builder.SetInsertPoint(SynthBB); |
2875 | |
|
2876 | 0 | if (ResultRegTypes.size() == 1) { |
2877 | 0 | CBRRegResults[SynthBB].push_back(CBR); |
2878 | 0 | } else { |
2879 | 0 | for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) { |
2880 | 0 | llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult"); |
2881 | 0 | CBRRegResults[SynthBB].push_back(Tmp); |
2882 | 0 | } |
2883 | 0 | } |
2884 | |
|
2885 | 0 | EmitBranch(Dest); |
2886 | 0 | EmitBlock(SynthBB); |
2887 | 0 | CBR->setIndirectDest(i++, SynthBB); |
2888 | 0 | } |
2889 | 0 | } |
2890 | 0 | } else if (HasUnwindClobber) { |
2891 | 0 | llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, ""); |
2892 | 0 | UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone, |
2893 | 0 | InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes, |
2894 | 0 | *this, RegResults); |
2895 | 0 | } else { |
2896 | 0 | llvm::CallInst *Result = |
2897 | 0 | Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); |
2898 | 0 | UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone, |
2899 | 0 | InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes, |
2900 | 0 | *this, RegResults); |
2901 | 0 | } |
2902 | |
|
2903 | 0 | EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, |
2904 | 0 | ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, |
2905 | 0 | ResultRegIsFlagReg); |
2906 | | |
2907 | | // If this is an asm goto with outputs, repeat EmitAsmStores, but with a |
2908 | | // different insertion point; one for each indirect destination and with |
2909 | | // CBRRegResults rather than RegResults. |
2910 | 0 | if (IsGCCAsmGoto && !CBRRegResults.empty()) { |
2911 | 0 | for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) { |
2912 | 0 | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
2913 | 0 | Builder.SetInsertPoint(Succ, --(Succ->end())); |
2914 | 0 | EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes, |
2915 | 0 | ResultTruncRegTypes, ResultRegDests, ResultRegQualTys, |
2916 | 0 | ResultTypeRequiresCast, ResultRegIsFlagReg); |
2917 | 0 | } |
2918 | 0 | } |
2919 | 0 | } |
2920 | | |
2921 | 0 | LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { |
2922 | 0 | const RecordDecl *RD = S.getCapturedRecordDecl(); |
2923 | 0 | QualType RecordTy = getContext().getRecordType(RD); |
2924 | | |
2925 | | // Initialize the captured struct. |
2926 | 0 | LValue SlotLV = |
2927 | 0 | MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy); |
2928 | |
|
2929 | 0 | RecordDecl::field_iterator CurField = RD->field_begin(); |
2930 | 0 | for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), |
2931 | 0 | E = S.capture_init_end(); |
2932 | 0 | I != E; ++I, ++CurField) { |
2933 | 0 | LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); |
2934 | 0 | if (CurField->hasCapturedVLAType()) { |
2935 | 0 | EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); |
2936 | 0 | } else { |
2937 | 0 | EmitInitializerForField(*CurField, LV, *I); |
2938 | 0 | } |
2939 | 0 | } |
2940 | |
|
2941 | 0 | return SlotLV; |
2942 | 0 | } |
2943 | | |
2944 | | /// Generate an outlined function for the body of a CapturedStmt, store any |
2945 | | /// captured variables into the captured struct, and call the outlined function. |
2946 | | llvm::Function * |
2947 | 0 | CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { |
2948 | 0 | LValue CapStruct = InitCapturedStruct(S); |
2949 | | |
2950 | | // Emit the CapturedDecl |
2951 | 0 | CodeGenFunction CGF(CGM, true); |
2952 | 0 | CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K)); |
2953 | 0 | llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); |
2954 | 0 | delete CGF.CapturedStmtInfo; |
2955 | | |
2956 | | // Emit call to the helper function. |
2957 | 0 | EmitCallOrInvoke(F, CapStruct.getPointer(*this)); |
2958 | |
|
2959 | 0 | return F; |
2960 | 0 | } |
2961 | | |
2962 | 0 | Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { |
2963 | 0 | LValue CapStruct = InitCapturedStruct(S); |
2964 | 0 | return CapStruct.getAddress(*this); |
2965 | 0 | } |
2966 | | |
2967 | | /// Creates the outlined function for a CapturedStmt. |
2968 | | llvm::Function * |
2969 | 0 | CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { |
2970 | 0 | assert(CapturedStmtInfo && |
2971 | 0 | "CapturedStmtInfo should be set when generating the captured function"); |
2972 | 0 | const CapturedDecl *CD = S.getCapturedDecl(); |
2973 | 0 | const RecordDecl *RD = S.getCapturedRecordDecl(); |
2974 | 0 | SourceLocation Loc = S.getBeginLoc(); |
2975 | 0 | assert(CD->hasBody() && "missing CapturedDecl body"); |
2976 | | |
2977 | | // Build the argument list. |
2978 | 0 | ASTContext &Ctx = CGM.getContext(); |
2979 | 0 | FunctionArgList Args; |
2980 | 0 | Args.append(CD->param_begin(), CD->param_end()); |
2981 | | |
2982 | | // Create the function declaration. |
2983 | 0 | const CGFunctionInfo &FuncInfo = |
2984 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); |
2985 | 0 | llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); |
2986 | |
|
2987 | 0 | llvm::Function *F = |
2988 | 0 | llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, |
2989 | 0 | CapturedStmtInfo->getHelperName(), &CGM.getModule()); |
2990 | 0 | CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); |
2991 | 0 | if (CD->isNothrow()) |
2992 | 0 | F->addFnAttr(llvm::Attribute::NoUnwind); |
2993 | | |
2994 | | // Generate the function. |
2995 | 0 | StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), |
2996 | 0 | CD->getBody()->getBeginLoc()); |
2997 | | // Set the context parameter in CapturedStmtInfo. |
2998 | 0 | Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam()); |
2999 | 0 | CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); |
3000 | | |
3001 | | // Initialize variable-length arrays. |
3002 | 0 | LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), |
3003 | 0 | Ctx.getTagDeclType(RD)); |
3004 | 0 | for (auto *FD : RD->fields()) { |
3005 | 0 | if (FD->hasCapturedVLAType()) { |
3006 | 0 | auto *ExprArg = |
3007 | 0 | EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc()) |
3008 | 0 | .getScalarVal(); |
3009 | 0 | auto VAT = FD->getCapturedVLAType(); |
3010 | 0 | VLASizeMap[VAT->getSizeExpr()] = ExprArg; |
3011 | 0 | } |
3012 | 0 | } |
3013 | | |
3014 | | // If 'this' is captured, load it into CXXThisValue. |
3015 | 0 | if (CapturedStmtInfo->isCXXThisExprCaptured()) { |
3016 | 0 | FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); |
3017 | 0 | LValue ThisLValue = EmitLValueForField(Base, FD); |
3018 | 0 | CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); |
3019 | 0 | } |
3020 | |
|
3021 | 0 | PGO.assignRegionCounters(GlobalDecl(CD), F); |
3022 | 0 | CapturedStmtInfo->EmitBody(*this, CD->getBody()); |
3023 | 0 | FinishFunction(CD->getBodyRBrace()); |
3024 | |
|
3025 | 0 | return F; |
3026 | 0 | } |