/src/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This provides a generalized class for OpenMP runtime code generation |
10 | | // specialized by GPU targets NVPTX and AMDGCN. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "CGOpenMPRuntimeGPU.h" |
15 | | #include "CodeGenFunction.h" |
16 | | #include "clang/AST/Attr.h" |
17 | | #include "clang/AST/DeclOpenMP.h" |
18 | | #include "clang/AST/OpenMPClause.h" |
19 | | #include "clang/AST/StmtOpenMP.h" |
20 | | #include "clang/AST/StmtVisitor.h" |
21 | | #include "clang/Basic/Cuda.h" |
22 | | #include "llvm/ADT/SmallPtrSet.h" |
23 | | #include "llvm/Frontend/OpenMP/OMPGridValues.h" |
24 | | #include "llvm/Support/MathExtras.h" |
25 | | |
26 | | using namespace clang; |
27 | | using namespace CodeGen; |
28 | | using namespace llvm::omp; |
29 | | |
30 | | namespace { |
31 | | /// Pre(post)-action for different OpenMP constructs specialized for NVPTX. |
32 | | class NVPTXActionTy final : public PrePostActionTy { |
33 | | llvm::FunctionCallee EnterCallee = nullptr; |
34 | | ArrayRef<llvm::Value *> EnterArgs; |
35 | | llvm::FunctionCallee ExitCallee = nullptr; |
36 | | ArrayRef<llvm::Value *> ExitArgs; |
37 | | bool Conditional = false; |
38 | | llvm::BasicBlock *ContBlock = nullptr; |
39 | | |
40 | | public: |
41 | | NVPTXActionTy(llvm::FunctionCallee EnterCallee, |
42 | | ArrayRef<llvm::Value *> EnterArgs, |
43 | | llvm::FunctionCallee ExitCallee, |
44 | | ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false) |
45 | | : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee), |
46 | 0 | ExitArgs(ExitArgs), Conditional(Conditional) {} |
47 | 0 | void Enter(CodeGenFunction &CGF) override { |
48 | 0 | llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs); |
49 | 0 | if (Conditional) { |
50 | 0 | llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes); |
51 | 0 | auto *ThenBlock = CGF.createBasicBlock("omp_if.then"); |
52 | 0 | ContBlock = CGF.createBasicBlock("omp_if.end"); |
53 | 0 | // Generate the branch (If-stmt) |
54 | 0 | CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); |
55 | 0 | CGF.EmitBlock(ThenBlock); |
56 | 0 | } |
57 | 0 | } |
58 | 0 | void Done(CodeGenFunction &CGF) { |
59 | 0 | // Emit the rest of blocks/branches |
60 | 0 | CGF.EmitBranch(ContBlock); |
61 | 0 | CGF.EmitBlock(ContBlock, true); |
62 | 0 | } |
63 | 0 | void Exit(CodeGenFunction &CGF) override { |
64 | 0 | CGF.EmitRuntimeCall(ExitCallee, ExitArgs); |
65 | 0 | } |
66 | | }; |
67 | | |
68 | | /// A class to track the execution mode when codegening directives within |
69 | | /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry |
70 | | /// to the target region and used by containing directives such as 'parallel' |
71 | | /// to emit optimized code. |
72 | | class ExecutionRuntimeModesRAII { |
73 | | private: |
74 | | CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode = |
75 | | CGOpenMPRuntimeGPU::EM_Unknown; |
76 | | CGOpenMPRuntimeGPU::ExecutionMode &ExecMode; |
77 | | |
78 | | public: |
79 | | ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode, |
80 | | CGOpenMPRuntimeGPU::ExecutionMode EntryMode) |
81 | 0 | : ExecMode(ExecMode) { |
82 | 0 | SavedExecMode = ExecMode; |
83 | 0 | ExecMode = EntryMode; |
84 | 0 | } |
85 | 0 | ~ExecutionRuntimeModesRAII() { ExecMode = SavedExecMode; } |
86 | | }; |
87 | | |
88 | 0 | static const ValueDecl *getPrivateItem(const Expr *RefExpr) { |
89 | 0 | RefExpr = RefExpr->IgnoreParens(); |
90 | 0 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) { |
91 | 0 | const Expr *Base = ASE->getBase()->IgnoreParenImpCasts(); |
92 | 0 | while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) |
93 | 0 | Base = TempASE->getBase()->IgnoreParenImpCasts(); |
94 | 0 | RefExpr = Base; |
95 | 0 | } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) { |
96 | 0 | const Expr *Base = OASE->getBase()->IgnoreParenImpCasts(); |
97 | 0 | while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) |
98 | 0 | Base = TempOASE->getBase()->IgnoreParenImpCasts(); |
99 | 0 | while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) |
100 | 0 | Base = TempASE->getBase()->IgnoreParenImpCasts(); |
101 | 0 | RefExpr = Base; |
102 | 0 | } |
103 | 0 | RefExpr = RefExpr->IgnoreParenImpCasts(); |
104 | 0 | if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr)) |
105 | 0 | return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl()); |
106 | 0 | const auto *ME = cast<MemberExpr>(RefExpr); |
107 | 0 | return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl()); |
108 | 0 | } |
109 | | |
110 | | static RecordDecl *buildRecordForGlobalizedVars( |
111 | | ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls, |
112 | | ArrayRef<const ValueDecl *> EscapedDeclsForTeams, |
113 | | llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> |
114 | | &MappedDeclsFields, |
115 | 0 | int BufSize) { |
116 | 0 | using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>; |
117 | 0 | if (EscapedDecls.empty() && EscapedDeclsForTeams.empty()) |
118 | 0 | return nullptr; |
119 | 0 | SmallVector<VarsDataTy, 4> GlobalizedVars; |
120 | 0 | for (const ValueDecl *D : EscapedDecls) |
121 | 0 | GlobalizedVars.emplace_back(C.getDeclAlign(D), D); |
122 | 0 | for (const ValueDecl *D : EscapedDeclsForTeams) |
123 | 0 | GlobalizedVars.emplace_back(C.getDeclAlign(D), D); |
124 | | |
125 | | // Build struct _globalized_locals_ty { |
126 | | // /* globalized vars */[WarSize] align (decl_align) |
127 | | // /* globalized vars */ for EscapedDeclsForTeams |
128 | | // }; |
129 | 0 | RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty"); |
130 | 0 | GlobalizedRD->startDefinition(); |
131 | 0 | llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped( |
132 | 0 | EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end()); |
133 | 0 | for (const auto &Pair : GlobalizedVars) { |
134 | 0 | const ValueDecl *VD = Pair.second; |
135 | 0 | QualType Type = VD->getType(); |
136 | 0 | if (Type->isLValueReferenceType()) |
137 | 0 | Type = C.getPointerType(Type.getNonReferenceType()); |
138 | 0 | else |
139 | 0 | Type = Type.getNonReferenceType(); |
140 | 0 | SourceLocation Loc = VD->getLocation(); |
141 | 0 | FieldDecl *Field; |
142 | 0 | if (SingleEscaped.count(VD)) { |
143 | 0 | Field = FieldDecl::Create( |
144 | 0 | C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, |
145 | 0 | C.getTrivialTypeSourceInfo(Type, SourceLocation()), |
146 | 0 | /*BW=*/nullptr, /*Mutable=*/false, |
147 | 0 | /*InitStyle=*/ICIS_NoInit); |
148 | 0 | Field->setAccess(AS_public); |
149 | 0 | if (VD->hasAttrs()) { |
150 | 0 | for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()), |
151 | 0 | E(VD->getAttrs().end()); |
152 | 0 | I != E; ++I) |
153 | 0 | Field->addAttr(*I); |
154 | 0 | } |
155 | 0 | } else { |
156 | 0 | if (BufSize > 1) { |
157 | 0 | llvm::APInt ArraySize(32, BufSize); |
158 | 0 | Type = C.getConstantArrayType(Type, ArraySize, nullptr, |
159 | 0 | ArraySizeModifier::Normal, 0); |
160 | 0 | } |
161 | 0 | Field = FieldDecl::Create( |
162 | 0 | C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, |
163 | 0 | C.getTrivialTypeSourceInfo(Type, SourceLocation()), |
164 | 0 | /*BW=*/nullptr, /*Mutable=*/false, |
165 | 0 | /*InitStyle=*/ICIS_NoInit); |
166 | 0 | Field->setAccess(AS_public); |
167 | 0 | llvm::APInt Align(32, Pair.first.getQuantity()); |
168 | 0 | Field->addAttr(AlignedAttr::CreateImplicit( |
169 | 0 | C, /*IsAlignmentExpr=*/true, |
170 | 0 | IntegerLiteral::Create(C, Align, |
171 | 0 | C.getIntTypeForBitwidth(32, /*Signed=*/0), |
172 | 0 | SourceLocation()), |
173 | 0 | {}, AlignedAttr::GNU_aligned)); |
174 | 0 | } |
175 | 0 | GlobalizedRD->addDecl(Field); |
176 | 0 | MappedDeclsFields.try_emplace(VD, Field); |
177 | 0 | } |
178 | 0 | GlobalizedRD->completeDefinition(); |
179 | 0 | return GlobalizedRD; |
180 | 0 | } |
181 | | |
182 | | /// Get the list of variables that can escape their declaration context. |
183 | | class CheckVarsEscapingDeclContext final |
184 | | : public ConstStmtVisitor<CheckVarsEscapingDeclContext> { |
185 | | CodeGenFunction &CGF; |
186 | | llvm::SetVector<const ValueDecl *> EscapedDecls; |
187 | | llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls; |
188 | | llvm::SetVector<const ValueDecl *> DelayedVariableLengthDecls; |
189 | | llvm::SmallPtrSet<const Decl *, 4> EscapedParameters; |
190 | | RecordDecl *GlobalizedRD = nullptr; |
191 | | llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; |
192 | | bool AllEscaped = false; |
193 | | bool IsForCombinedParallelRegion = false; |
194 | | |
195 | 0 | void markAsEscaped(const ValueDecl *VD) { |
196 | | // Do not globalize declare target variables. |
197 | 0 | if (!isa<VarDecl>(VD) || |
198 | 0 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) |
199 | 0 | return; |
200 | 0 | VD = cast<ValueDecl>(VD->getCanonicalDecl()); |
201 | | // Use user-specified allocation. |
202 | 0 | if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>()) |
203 | 0 | return; |
204 | | // Variables captured by value must be globalized. |
205 | 0 | bool IsCaptured = false; |
206 | 0 | if (auto *CSI = CGF.CapturedStmtInfo) { |
207 | 0 | if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) { |
208 | | // Check if need to capture the variable that was already captured by |
209 | | // value in the outer region. |
210 | 0 | IsCaptured = true; |
211 | 0 | if (!IsForCombinedParallelRegion) { |
212 | 0 | if (!FD->hasAttrs()) |
213 | 0 | return; |
214 | 0 | const auto *Attr = FD->getAttr<OMPCaptureKindAttr>(); |
215 | 0 | if (!Attr) |
216 | 0 | return; |
217 | 0 | if (((Attr->getCaptureKind() != OMPC_map) && |
218 | 0 | !isOpenMPPrivate(Attr->getCaptureKind())) || |
219 | 0 | ((Attr->getCaptureKind() == OMPC_map) && |
220 | 0 | !FD->getType()->isAnyPointerType())) |
221 | 0 | return; |
222 | 0 | } |
223 | 0 | if (!FD->getType()->isReferenceType()) { |
224 | 0 | assert(!VD->getType()->isVariablyModifiedType() && |
225 | 0 | "Parameter captured by value with variably modified type"); |
226 | 0 | EscapedParameters.insert(VD); |
227 | 0 | } else if (!IsForCombinedParallelRegion) { |
228 | 0 | return; |
229 | 0 | } |
230 | 0 | } |
231 | 0 | } |
232 | 0 | if ((!CGF.CapturedStmtInfo || |
233 | 0 | (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) && |
234 | 0 | VD->getType()->isReferenceType()) |
235 | | // Do not globalize variables with reference type. |
236 | 0 | return; |
237 | 0 | if (VD->getType()->isVariablyModifiedType()) { |
238 | | // If not captured at the target region level then mark the escaped |
239 | | // variable as delayed. |
240 | 0 | if (IsCaptured) |
241 | 0 | EscapedVariableLengthDecls.insert(VD); |
242 | 0 | else |
243 | 0 | DelayedVariableLengthDecls.insert(VD); |
244 | 0 | } else |
245 | 0 | EscapedDecls.insert(VD); |
246 | 0 | } |
247 | | |
248 | 0 | void VisitValueDecl(const ValueDecl *VD) { |
249 | 0 | if (VD->getType()->isLValueReferenceType()) |
250 | 0 | markAsEscaped(VD); |
251 | 0 | if (const auto *VarD = dyn_cast<VarDecl>(VD)) { |
252 | 0 | if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) { |
253 | 0 | const bool SavedAllEscaped = AllEscaped; |
254 | 0 | AllEscaped = VD->getType()->isLValueReferenceType(); |
255 | 0 | Visit(VarD->getInit()); |
256 | 0 | AllEscaped = SavedAllEscaped; |
257 | 0 | } |
258 | 0 | } |
259 | 0 | } |
260 | | void VisitOpenMPCapturedStmt(const CapturedStmt *S, |
261 | | ArrayRef<OMPClause *> Clauses, |
262 | 0 | bool IsCombinedParallelRegion) { |
263 | 0 | if (!S) |
264 | 0 | return; |
265 | 0 | for (const CapturedStmt::Capture &C : S->captures()) { |
266 | 0 | if (C.capturesVariable() && !C.capturesVariableByCopy()) { |
267 | 0 | const ValueDecl *VD = C.getCapturedVar(); |
268 | 0 | bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion; |
269 | 0 | if (IsCombinedParallelRegion) { |
270 | | // Check if the variable is privatized in the combined construct and |
271 | | // those private copies must be shared in the inner parallel |
272 | | // directive. |
273 | 0 | IsForCombinedParallelRegion = false; |
274 | 0 | for (const OMPClause *C : Clauses) { |
275 | 0 | if (!isOpenMPPrivate(C->getClauseKind()) || |
276 | 0 | C->getClauseKind() == OMPC_reduction || |
277 | 0 | C->getClauseKind() == OMPC_linear || |
278 | 0 | C->getClauseKind() == OMPC_private) |
279 | 0 | continue; |
280 | 0 | ArrayRef<const Expr *> Vars; |
281 | 0 | if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C)) |
282 | 0 | Vars = PC->getVarRefs(); |
283 | 0 | else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C)) |
284 | 0 | Vars = PC->getVarRefs(); |
285 | 0 | else |
286 | 0 | llvm_unreachable("Unexpected clause."); |
287 | 0 | for (const auto *E : Vars) { |
288 | 0 | const Decl *D = |
289 | 0 | cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); |
290 | 0 | if (D == VD->getCanonicalDecl()) { |
291 | 0 | IsForCombinedParallelRegion = true; |
292 | 0 | break; |
293 | 0 | } |
294 | 0 | } |
295 | 0 | if (IsForCombinedParallelRegion) |
296 | 0 | break; |
297 | 0 | } |
298 | 0 | } |
299 | 0 | markAsEscaped(VD); |
300 | 0 | if (isa<OMPCapturedExprDecl>(VD)) |
301 | 0 | VisitValueDecl(VD); |
302 | 0 | IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion; |
303 | 0 | } |
304 | 0 | } |
305 | 0 | } |
306 | | |
307 | 0 | void buildRecordForGlobalizedVars(bool IsInTTDRegion) { |
308 | 0 | assert(!GlobalizedRD && |
309 | 0 | "Record for globalized variables is built already."); |
310 | 0 | ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams; |
311 | 0 | unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size; |
312 | 0 | if (IsInTTDRegion) |
313 | 0 | EscapedDeclsForTeams = EscapedDecls.getArrayRef(); |
314 | 0 | else |
315 | 0 | EscapedDeclsForParallel = EscapedDecls.getArrayRef(); |
316 | 0 | GlobalizedRD = ::buildRecordForGlobalizedVars( |
317 | 0 | CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams, |
318 | 0 | MappedDeclsFields, WarpSize); |
319 | 0 | } |
320 | | |
321 | | public: |
322 | | CheckVarsEscapingDeclContext(CodeGenFunction &CGF, |
323 | | ArrayRef<const ValueDecl *> TeamsReductions) |
324 | 0 | : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) { |
325 | 0 | } |
326 | 0 | virtual ~CheckVarsEscapingDeclContext() = default; |
327 | 0 | void VisitDeclStmt(const DeclStmt *S) { |
328 | 0 | if (!S) |
329 | 0 | return; |
330 | 0 | for (const Decl *D : S->decls()) |
331 | 0 | if (const auto *VD = dyn_cast_or_null<ValueDecl>(D)) |
332 | 0 | VisitValueDecl(VD); |
333 | 0 | } |
334 | 0 | void VisitOMPExecutableDirective(const OMPExecutableDirective *D) { |
335 | 0 | if (!D) |
336 | 0 | return; |
337 | 0 | if (!D->hasAssociatedStmt()) |
338 | 0 | return; |
339 | 0 | if (const auto *S = |
340 | 0 | dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) { |
341 | | // Do not analyze directives that do not actually require capturing, |
342 | | // like `omp for` or `omp simd` directives. |
343 | 0 | llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; |
344 | 0 | getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind()); |
345 | 0 | if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) { |
346 | 0 | VisitStmt(S->getCapturedStmt()); |
347 | 0 | return; |
348 | 0 | } |
349 | 0 | VisitOpenMPCapturedStmt( |
350 | 0 | S, D->clauses(), |
351 | 0 | CaptureRegions.back() == OMPD_parallel && |
352 | 0 | isOpenMPDistributeDirective(D->getDirectiveKind())); |
353 | 0 | } |
354 | 0 | } |
355 | 0 | void VisitCapturedStmt(const CapturedStmt *S) { |
356 | 0 | if (!S) |
357 | 0 | return; |
358 | 0 | for (const CapturedStmt::Capture &C : S->captures()) { |
359 | 0 | if (C.capturesVariable() && !C.capturesVariableByCopy()) { |
360 | 0 | const ValueDecl *VD = C.getCapturedVar(); |
361 | 0 | markAsEscaped(VD); |
362 | 0 | if (isa<OMPCapturedExprDecl>(VD)) |
363 | 0 | VisitValueDecl(VD); |
364 | 0 | } |
365 | 0 | } |
366 | 0 | } |
367 | 0 | void VisitLambdaExpr(const LambdaExpr *E) { |
368 | 0 | if (!E) |
369 | 0 | return; |
370 | 0 | for (const LambdaCapture &C : E->captures()) { |
371 | 0 | if (C.capturesVariable()) { |
372 | 0 | if (C.getCaptureKind() == LCK_ByRef) { |
373 | 0 | const ValueDecl *VD = C.getCapturedVar(); |
374 | 0 | markAsEscaped(VD); |
375 | 0 | if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD)) |
376 | 0 | VisitValueDecl(VD); |
377 | 0 | } |
378 | 0 | } |
379 | 0 | } |
380 | 0 | } |
381 | 0 | void VisitBlockExpr(const BlockExpr *E) { |
382 | 0 | if (!E) |
383 | 0 | return; |
384 | 0 | for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) { |
385 | 0 | if (C.isByRef()) { |
386 | 0 | const VarDecl *VD = C.getVariable(); |
387 | 0 | markAsEscaped(VD); |
388 | 0 | if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture()) |
389 | 0 | VisitValueDecl(VD); |
390 | 0 | } |
391 | 0 | } |
392 | 0 | } |
393 | 0 | void VisitCallExpr(const CallExpr *E) { |
394 | 0 | if (!E) |
395 | 0 | return; |
396 | 0 | for (const Expr *Arg : E->arguments()) { |
397 | 0 | if (!Arg) |
398 | 0 | continue; |
399 | 0 | if (Arg->isLValue()) { |
400 | 0 | const bool SavedAllEscaped = AllEscaped; |
401 | 0 | AllEscaped = true; |
402 | 0 | Visit(Arg); |
403 | 0 | AllEscaped = SavedAllEscaped; |
404 | 0 | } else { |
405 | 0 | Visit(Arg); |
406 | 0 | } |
407 | 0 | } |
408 | 0 | Visit(E->getCallee()); |
409 | 0 | } |
410 | 0 | void VisitDeclRefExpr(const DeclRefExpr *E) { |
411 | 0 | if (!E) |
412 | 0 | return; |
413 | 0 | const ValueDecl *VD = E->getDecl(); |
414 | 0 | if (AllEscaped) |
415 | 0 | markAsEscaped(VD); |
416 | 0 | if (isa<OMPCapturedExprDecl>(VD)) |
417 | 0 | VisitValueDecl(VD); |
418 | 0 | else if (VD->isInitCapture()) |
419 | 0 | VisitValueDecl(VD); |
420 | 0 | } |
421 | 0 | void VisitUnaryOperator(const UnaryOperator *E) { |
422 | 0 | if (!E) |
423 | 0 | return; |
424 | 0 | if (E->getOpcode() == UO_AddrOf) { |
425 | 0 | const bool SavedAllEscaped = AllEscaped; |
426 | 0 | AllEscaped = true; |
427 | 0 | Visit(E->getSubExpr()); |
428 | 0 | AllEscaped = SavedAllEscaped; |
429 | 0 | } else { |
430 | 0 | Visit(E->getSubExpr()); |
431 | 0 | } |
432 | 0 | } |
433 | 0 | void VisitImplicitCastExpr(const ImplicitCastExpr *E) { |
434 | 0 | if (!E) |
435 | 0 | return; |
436 | 0 | if (E->getCastKind() == CK_ArrayToPointerDecay) { |
437 | 0 | const bool SavedAllEscaped = AllEscaped; |
438 | 0 | AllEscaped = true; |
439 | 0 | Visit(E->getSubExpr()); |
440 | 0 | AllEscaped = SavedAllEscaped; |
441 | 0 | } else { |
442 | 0 | Visit(E->getSubExpr()); |
443 | 0 | } |
444 | 0 | } |
445 | 0 | void VisitExpr(const Expr *E) { |
446 | 0 | if (!E) |
447 | 0 | return; |
448 | 0 | bool SavedAllEscaped = AllEscaped; |
449 | 0 | if (!E->isLValue()) |
450 | 0 | AllEscaped = false; |
451 | 0 | for (const Stmt *Child : E->children()) |
452 | 0 | if (Child) |
453 | 0 | Visit(Child); |
454 | 0 | AllEscaped = SavedAllEscaped; |
455 | 0 | } |
456 | 0 | void VisitStmt(const Stmt *S) { |
457 | 0 | if (!S) |
458 | 0 | return; |
459 | 0 | for (const Stmt *Child : S->children()) |
460 | 0 | if (Child) |
461 | 0 | Visit(Child); |
462 | 0 | } |
463 | | |
464 | | /// Returns the record that handles all the escaped local variables and used |
465 | | /// instead of their original storage. |
466 | 0 | const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) { |
467 | 0 | if (!GlobalizedRD) |
468 | 0 | buildRecordForGlobalizedVars(IsInTTDRegion); |
469 | 0 | return GlobalizedRD; |
470 | 0 | } |
471 | | |
472 | | /// Returns the field in the globalized record for the escaped variable. |
473 | 0 | const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const { |
474 | 0 | assert(GlobalizedRD && |
475 | 0 | "Record for globalized variables must be generated already."); |
476 | 0 | return MappedDeclsFields.lookup(VD); |
477 | 0 | } |
478 | | |
479 | | /// Returns the list of the escaped local variables/parameters. |
480 | 0 | ArrayRef<const ValueDecl *> getEscapedDecls() const { |
481 | 0 | return EscapedDecls.getArrayRef(); |
482 | 0 | } |
483 | | |
484 | | /// Checks if the escaped local variable is actually a parameter passed by |
485 | | /// value. |
486 | 0 | const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const { |
487 | 0 | return EscapedParameters; |
488 | 0 | } |
489 | | |
490 | | /// Returns the list of the escaped variables with the variably modified |
491 | | /// types. |
492 | 0 | ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const { |
493 | 0 | return EscapedVariableLengthDecls.getArrayRef(); |
494 | 0 | } |
495 | | |
496 | | /// Returns the list of the delayed variables with the variably modified |
497 | | /// types. |
498 | 0 | ArrayRef<const ValueDecl *> getDelayedVariableLengthDecls() const { |
499 | 0 | return DelayedVariableLengthDecls.getArrayRef(); |
500 | 0 | } |
501 | | }; |
502 | | } // anonymous namespace |
503 | | |
504 | | /// Get the id of the warp in the block. |
505 | | /// We assume that the warp size is 32, which is always the case |
506 | | /// on the NVPTX device, to generate more efficient code. |
507 | 0 | static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) { |
508 | 0 | CGBuilderTy &Bld = CGF.Builder; |
509 | 0 | unsigned LaneIDBits = |
510 | 0 | llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size); |
511 | 0 | auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
512 | 0 | return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id"); |
513 | 0 | } |
514 | | |
515 | | /// Get the id of the current lane in the Warp. |
516 | | /// We assume that the warp size is 32, which is always the case |
517 | | /// on the NVPTX device, to generate more efficient code. |
518 | 0 | static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) { |
519 | 0 | CGBuilderTy &Bld = CGF.Builder; |
520 | 0 | unsigned LaneIDBits = |
521 | 0 | llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size); |
522 | 0 | assert(LaneIDBits < 32 && "Invalid LaneIDBits size in NVPTX device."); |
523 | 0 | unsigned LaneIDMask = ~0u >> (32u - LaneIDBits); |
524 | 0 | auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
525 | 0 | return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask), |
526 | 0 | "nvptx_lane_id"); |
527 | 0 | } |
528 | | |
529 | | CGOpenMPRuntimeGPU::ExecutionMode |
530 | 0 | CGOpenMPRuntimeGPU::getExecutionMode() const { |
531 | 0 | return CurrentExecutionMode; |
532 | 0 | } |
533 | | |
534 | | CGOpenMPRuntimeGPU::DataSharingMode |
535 | 0 | CGOpenMPRuntimeGPU::getDataSharingMode() const { |
536 | 0 | return CurrentDataSharingMode; |
537 | 0 | } |
538 | | |
539 | | /// Check for inner (nested) SPMD construct, if any |
540 | | static bool hasNestedSPMDDirective(ASTContext &Ctx, |
541 | 0 | const OMPExecutableDirective &D) { |
542 | 0 | const auto *CS = D.getInnermostCapturedStmt(); |
543 | 0 | const auto *Body = |
544 | 0 | CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); |
545 | 0 | const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); |
546 | |
|
547 | 0 | if (const auto *NestedDir = |
548 | 0 | dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { |
549 | 0 | OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); |
550 | 0 | switch (D.getDirectiveKind()) { |
551 | 0 | case OMPD_target: |
552 | 0 | if (isOpenMPParallelDirective(DKind)) |
553 | 0 | return true; |
554 | 0 | if (DKind == OMPD_teams) { |
555 | 0 | Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( |
556 | 0 | /*IgnoreCaptured=*/true); |
557 | 0 | if (!Body) |
558 | 0 | return false; |
559 | 0 | ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); |
560 | 0 | if (const auto *NND = |
561 | 0 | dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { |
562 | 0 | DKind = NND->getDirectiveKind(); |
563 | 0 | if (isOpenMPParallelDirective(DKind)) |
564 | 0 | return true; |
565 | 0 | } |
566 | 0 | } |
567 | 0 | return false; |
568 | 0 | case OMPD_target_teams: |
569 | 0 | return isOpenMPParallelDirective(DKind); |
570 | 0 | case OMPD_target_simd: |
571 | 0 | case OMPD_target_parallel: |
572 | 0 | case OMPD_target_parallel_for: |
573 | 0 | case OMPD_target_parallel_for_simd: |
574 | 0 | case OMPD_target_teams_distribute: |
575 | 0 | case OMPD_target_teams_distribute_simd: |
576 | 0 | case OMPD_target_teams_distribute_parallel_for: |
577 | 0 | case OMPD_target_teams_distribute_parallel_for_simd: |
578 | 0 | case OMPD_parallel: |
579 | 0 | case OMPD_for: |
580 | 0 | case OMPD_parallel_for: |
581 | 0 | case OMPD_parallel_master: |
582 | 0 | case OMPD_parallel_sections: |
583 | 0 | case OMPD_for_simd: |
584 | 0 | case OMPD_parallel_for_simd: |
585 | 0 | case OMPD_cancel: |
586 | 0 | case OMPD_cancellation_point: |
587 | 0 | case OMPD_ordered: |
588 | 0 | case OMPD_threadprivate: |
589 | 0 | case OMPD_allocate: |
590 | 0 | case OMPD_task: |
591 | 0 | case OMPD_simd: |
592 | 0 | case OMPD_sections: |
593 | 0 | case OMPD_section: |
594 | 0 | case OMPD_single: |
595 | 0 | case OMPD_master: |
596 | 0 | case OMPD_critical: |
597 | 0 | case OMPD_taskyield: |
598 | 0 | case OMPD_barrier: |
599 | 0 | case OMPD_taskwait: |
600 | 0 | case OMPD_taskgroup: |
601 | 0 | case OMPD_atomic: |
602 | 0 | case OMPD_flush: |
603 | 0 | case OMPD_depobj: |
604 | 0 | case OMPD_scan: |
605 | 0 | case OMPD_teams: |
606 | 0 | case OMPD_target_data: |
607 | 0 | case OMPD_target_exit_data: |
608 | 0 | case OMPD_target_enter_data: |
609 | 0 | case OMPD_distribute: |
610 | 0 | case OMPD_distribute_simd: |
611 | 0 | case OMPD_distribute_parallel_for: |
612 | 0 | case OMPD_distribute_parallel_for_simd: |
613 | 0 | case OMPD_teams_distribute: |
614 | 0 | case OMPD_teams_distribute_simd: |
615 | 0 | case OMPD_teams_distribute_parallel_for: |
616 | 0 | case OMPD_teams_distribute_parallel_for_simd: |
617 | 0 | case OMPD_target_update: |
618 | 0 | case OMPD_declare_simd: |
619 | 0 | case OMPD_declare_variant: |
620 | 0 | case OMPD_begin_declare_variant: |
621 | 0 | case OMPD_end_declare_variant: |
622 | 0 | case OMPD_declare_target: |
623 | 0 | case OMPD_end_declare_target: |
624 | 0 | case OMPD_declare_reduction: |
625 | 0 | case OMPD_declare_mapper: |
626 | 0 | case OMPD_taskloop: |
627 | 0 | case OMPD_taskloop_simd: |
628 | 0 | case OMPD_master_taskloop: |
629 | 0 | case OMPD_master_taskloop_simd: |
630 | 0 | case OMPD_parallel_master_taskloop: |
631 | 0 | case OMPD_parallel_master_taskloop_simd: |
632 | 0 | case OMPD_requires: |
633 | 0 | case OMPD_unknown: |
634 | 0 | default: |
635 | 0 | llvm_unreachable("Unexpected directive."); |
636 | 0 | } |
637 | 0 | } |
638 | | |
639 | 0 | return false; |
640 | 0 | } |
641 | | |
642 | | static bool supportsSPMDExecutionMode(ASTContext &Ctx, |
643 | 0 | const OMPExecutableDirective &D) { |
644 | 0 | OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); |
645 | 0 | switch (DirectiveKind) { |
646 | 0 | case OMPD_target: |
647 | 0 | case OMPD_target_teams: |
648 | 0 | return hasNestedSPMDDirective(Ctx, D); |
649 | 0 | case OMPD_target_teams_loop: |
650 | 0 | case OMPD_target_parallel_loop: |
651 | 0 | case OMPD_target_parallel: |
652 | 0 | case OMPD_target_parallel_for: |
653 | 0 | case OMPD_target_parallel_for_simd: |
654 | 0 | case OMPD_target_teams_distribute_parallel_for: |
655 | 0 | case OMPD_target_teams_distribute_parallel_for_simd: |
656 | 0 | case OMPD_target_simd: |
657 | 0 | case OMPD_target_teams_distribute_simd: |
658 | 0 | return true; |
659 | 0 | case OMPD_target_teams_distribute: |
660 | 0 | return false; |
661 | 0 | case OMPD_parallel: |
662 | 0 | case OMPD_for: |
663 | 0 | case OMPD_parallel_for: |
664 | 0 | case OMPD_parallel_master: |
665 | 0 | case OMPD_parallel_sections: |
666 | 0 | case OMPD_for_simd: |
667 | 0 | case OMPD_parallel_for_simd: |
668 | 0 | case OMPD_cancel: |
669 | 0 | case OMPD_cancellation_point: |
670 | 0 | case OMPD_ordered: |
671 | 0 | case OMPD_threadprivate: |
672 | 0 | case OMPD_allocate: |
673 | 0 | case OMPD_task: |
674 | 0 | case OMPD_simd: |
675 | 0 | case OMPD_sections: |
676 | 0 | case OMPD_section: |
677 | 0 | case OMPD_single: |
678 | 0 | case OMPD_master: |
679 | 0 | case OMPD_critical: |
680 | 0 | case OMPD_taskyield: |
681 | 0 | case OMPD_barrier: |
682 | 0 | case OMPD_taskwait: |
683 | 0 | case OMPD_taskgroup: |
684 | 0 | case OMPD_atomic: |
685 | 0 | case OMPD_flush: |
686 | 0 | case OMPD_depobj: |
687 | 0 | case OMPD_scan: |
688 | 0 | case OMPD_teams: |
689 | 0 | case OMPD_target_data: |
690 | 0 | case OMPD_target_exit_data: |
691 | 0 | case OMPD_target_enter_data: |
692 | 0 | case OMPD_distribute: |
693 | 0 | case OMPD_distribute_simd: |
694 | 0 | case OMPD_distribute_parallel_for: |
695 | 0 | case OMPD_distribute_parallel_for_simd: |
696 | 0 | case OMPD_teams_distribute: |
697 | 0 | case OMPD_teams_distribute_simd: |
698 | 0 | case OMPD_teams_distribute_parallel_for: |
699 | 0 | case OMPD_teams_distribute_parallel_for_simd: |
700 | 0 | case OMPD_target_update: |
701 | 0 | case OMPD_declare_simd: |
702 | 0 | case OMPD_declare_variant: |
703 | 0 | case OMPD_begin_declare_variant: |
704 | 0 | case OMPD_end_declare_variant: |
705 | 0 | case OMPD_declare_target: |
706 | 0 | case OMPD_end_declare_target: |
707 | 0 | case OMPD_declare_reduction: |
708 | 0 | case OMPD_declare_mapper: |
709 | 0 | case OMPD_taskloop: |
710 | 0 | case OMPD_taskloop_simd: |
711 | 0 | case OMPD_master_taskloop: |
712 | 0 | case OMPD_master_taskloop_simd: |
713 | 0 | case OMPD_parallel_master_taskloop: |
714 | 0 | case OMPD_parallel_master_taskloop_simd: |
715 | 0 | case OMPD_requires: |
716 | 0 | case OMPD_unknown: |
717 | 0 | default: |
718 | 0 | break; |
719 | 0 | } |
720 | 0 | llvm_unreachable( |
721 | 0 | "Unknown programming model for OpenMP directive on NVPTX target."); |
722 | 0 | } |
723 | | |
724 | | void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D, |
725 | | StringRef ParentName, |
726 | | llvm::Function *&OutlinedFn, |
727 | | llvm::Constant *&OutlinedFnID, |
728 | | bool IsOffloadEntry, |
729 | 0 | const RegionCodeGenTy &CodeGen) { |
730 | 0 | ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode, EM_NonSPMD); |
731 | 0 | EntryFunctionState EST; |
732 | 0 | WrapperFunctionsMap.clear(); |
733 | |
|
734 | 0 | [[maybe_unused]] bool IsBareKernel = D.getSingleClause<OMPXBareClause>(); |
735 | 0 | assert(!IsBareKernel && "bare kernel should not be at generic mode"); |
736 | | |
737 | | // Emit target region as a standalone region. |
738 | 0 | class NVPTXPrePostActionTy : public PrePostActionTy { |
739 | 0 | CGOpenMPRuntimeGPU::EntryFunctionState &EST; |
740 | 0 | const OMPExecutableDirective &D; |
741 | |
|
742 | 0 | public: |
743 | 0 | NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST, |
744 | 0 | const OMPExecutableDirective &D) |
745 | 0 | : EST(EST), D(D) {} |
746 | 0 | void Enter(CodeGenFunction &CGF) override { |
747 | 0 | auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
748 | 0 | RT.emitKernelInit(D, CGF, EST, /* IsSPMD */ false); |
749 | | // Skip target region initialization. |
750 | 0 | RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); |
751 | 0 | } |
752 | 0 | void Exit(CodeGenFunction &CGF) override { |
753 | 0 | auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
754 | 0 | RT.clearLocThreadIdInsertPt(CGF); |
755 | 0 | RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false); |
756 | 0 | } |
757 | 0 | } Action(EST, D); |
758 | 0 | CodeGen.setAction(Action); |
759 | 0 | IsInTTDRegion = true; |
760 | 0 | emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, |
761 | 0 | IsOffloadEntry, CodeGen); |
762 | 0 | IsInTTDRegion = false; |
763 | 0 | } |
764 | | |
765 | | void CGOpenMPRuntimeGPU::emitKernelInit(const OMPExecutableDirective &D, |
766 | | CodeGenFunction &CGF, |
767 | 0 | EntryFunctionState &EST, bool IsSPMD) { |
768 | 0 | int32_t MinThreadsVal = 1, MaxThreadsVal = -1, MinTeamsVal = 1, |
769 | 0 | MaxTeamsVal = -1; |
770 | 0 | computeMinAndMaxThreadsAndTeams(D, CGF, MinThreadsVal, MaxThreadsVal, |
771 | 0 | MinTeamsVal, MaxTeamsVal); |
772 | |
|
773 | 0 | CGBuilderTy &Bld = CGF.Builder; |
774 | 0 | Bld.restoreIP(OMPBuilder.createTargetInit( |
775 | 0 | Bld, IsSPMD, MinThreadsVal, MaxThreadsVal, MinTeamsVal, MaxTeamsVal)); |
776 | 0 | if (!IsSPMD) |
777 | 0 | emitGenericVarsProlog(CGF, EST.Loc); |
778 | 0 | } |
779 | | |
780 | | void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF, |
781 | | EntryFunctionState &EST, |
782 | 0 | bool IsSPMD) { |
783 | 0 | if (!IsSPMD) |
784 | 0 | emitGenericVarsEpilog(CGF); |
785 | | |
786 | | // This is temporary until we remove the fixed sized buffer. |
787 | 0 | ASTContext &C = CGM.getContext(); |
788 | 0 | RecordDecl *StaticRD = C.buildImplicitRecord( |
789 | 0 | "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::Union); |
790 | 0 | StaticRD->startDefinition(); |
791 | 0 | for (const RecordDecl *TeamReductionRec : TeamsReductions) { |
792 | 0 | QualType RecTy = C.getRecordType(TeamReductionRec); |
793 | 0 | auto *Field = FieldDecl::Create( |
794 | 0 | C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy, |
795 | 0 | C.getTrivialTypeSourceInfo(RecTy, SourceLocation()), |
796 | 0 | /*BW=*/nullptr, /*Mutable=*/false, |
797 | 0 | /*InitStyle=*/ICIS_NoInit); |
798 | 0 | Field->setAccess(AS_public); |
799 | 0 | StaticRD->addDecl(Field); |
800 | 0 | } |
801 | 0 | StaticRD->completeDefinition(); |
802 | 0 | QualType StaticTy = C.getRecordType(StaticRD); |
803 | 0 | llvm::Type *LLVMReductionsBufferTy = |
804 | 0 | CGM.getTypes().ConvertTypeForMem(StaticTy); |
805 | 0 | const auto &DL = CGM.getModule().getDataLayout(); |
806 | 0 | uint64_t ReductionDataSize = |
807 | 0 | TeamsReductions.empty() |
808 | 0 | ? 0 |
809 | 0 | : DL.getTypeAllocSize(LLVMReductionsBufferTy).getFixedValue(); |
810 | 0 | CGBuilderTy &Bld = CGF.Builder; |
811 | 0 | OMPBuilder.createTargetDeinit(Bld, ReductionDataSize, |
812 | 0 | C.getLangOpts().OpenMPCUDAReductionBufNum); |
813 | 0 | TeamsReductions.clear(); |
814 | 0 | } |
815 | | |
816 | | void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D, |
817 | | StringRef ParentName, |
818 | | llvm::Function *&OutlinedFn, |
819 | | llvm::Constant *&OutlinedFnID, |
820 | | bool IsOffloadEntry, |
821 | 0 | const RegionCodeGenTy &CodeGen) { |
822 | 0 | ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode, EM_SPMD); |
823 | 0 | EntryFunctionState EST; |
824 | |
|
825 | 0 | bool IsBareKernel = D.getSingleClause<OMPXBareClause>(); |
826 | | |
827 | | // Emit target region as a standalone region. |
828 | 0 | class NVPTXPrePostActionTy : public PrePostActionTy { |
829 | 0 | CGOpenMPRuntimeGPU &RT; |
830 | 0 | CGOpenMPRuntimeGPU::EntryFunctionState &EST; |
831 | 0 | bool IsBareKernel; |
832 | 0 | DataSharingMode Mode; |
833 | 0 | const OMPExecutableDirective &D; |
834 | |
|
835 | 0 | public: |
836 | 0 | NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT, |
837 | 0 | CGOpenMPRuntimeGPU::EntryFunctionState &EST, |
838 | 0 | bool IsBareKernel, const OMPExecutableDirective &D) |
839 | 0 | : RT(RT), EST(EST), IsBareKernel(IsBareKernel), |
840 | 0 | Mode(RT.CurrentDataSharingMode), D(D) {} |
841 | 0 | void Enter(CodeGenFunction &CGF) override { |
842 | 0 | if (IsBareKernel) { |
843 | 0 | RT.CurrentDataSharingMode = DataSharingMode::DS_CUDA; |
844 | 0 | return; |
845 | 0 | } |
846 | 0 | RT.emitKernelInit(D, CGF, EST, /* IsSPMD */ true); |
847 | | // Skip target region initialization. |
848 | 0 | RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); |
849 | 0 | } |
850 | 0 | void Exit(CodeGenFunction &CGF) override { |
851 | 0 | if (IsBareKernel) { |
852 | 0 | RT.CurrentDataSharingMode = Mode; |
853 | 0 | return; |
854 | 0 | } |
855 | 0 | RT.clearLocThreadIdInsertPt(CGF); |
856 | 0 | RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true); |
857 | 0 | } |
858 | 0 | } Action(*this, EST, IsBareKernel, D); |
859 | 0 | CodeGen.setAction(Action); |
860 | 0 | IsInTTDRegion = true; |
861 | 0 | emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, |
862 | 0 | IsOffloadEntry, CodeGen); |
863 | 0 | IsInTTDRegion = false; |
864 | 0 | } |
865 | | |
866 | | void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction( |
867 | | const OMPExecutableDirective &D, StringRef ParentName, |
868 | | llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, |
869 | 0 | bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { |
870 | 0 | if (!IsOffloadEntry) // Nothing to do. |
871 | 0 | return; |
872 | | |
873 | 0 | assert(!ParentName.empty() && "Invalid target region parent name!"); |
874 | | |
875 | 0 | bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D); |
876 | 0 | bool IsBareKernel = D.getSingleClause<OMPXBareClause>(); |
877 | 0 | if (Mode || IsBareKernel) |
878 | 0 | emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, |
879 | 0 | CodeGen); |
880 | 0 | else |
881 | 0 | emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, |
882 | 0 | CodeGen); |
883 | 0 | } |
884 | | |
885 | | CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM) |
886 | 0 | : CGOpenMPRuntime(CGM) { |
887 | 0 | llvm::OpenMPIRBuilderConfig Config( |
888 | 0 | CGM.getLangOpts().OpenMPIsTargetDevice, isGPU(), |
889 | 0 | CGM.getLangOpts().OpenMPOffloadMandatory, |
890 | 0 | /*HasRequiresReverseOffload*/ false, /*HasRequiresUnifiedAddress*/ false, |
891 | 0 | hasRequiresUnifiedSharedMemory(), /*HasRequiresDynamicAllocators*/ false); |
892 | 0 | OMPBuilder.setConfig(Config); |
893 | |
|
894 | 0 | if (!CGM.getLangOpts().OpenMPIsTargetDevice) |
895 | 0 | llvm_unreachable("OpenMP can only handle device code."); |
896 | |
|
897 | 0 | if (CGM.getLangOpts().OpenMPCUDAMode) |
898 | 0 | CurrentDataSharingMode = CGOpenMPRuntimeGPU::DS_CUDA; |
899 | |
|
900 | 0 | llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder(); |
901 | 0 | if (CGM.getLangOpts().NoGPULib || CGM.getLangOpts().OMPHostIRFile.empty()) |
902 | 0 | return; |
903 | | |
904 | 0 | OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug, |
905 | 0 | "__omp_rtl_debug_kind"); |
906 | 0 | OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription, |
907 | 0 | "__omp_rtl_assume_teams_oversubscription"); |
908 | 0 | OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription, |
909 | 0 | "__omp_rtl_assume_threads_oversubscription"); |
910 | 0 | OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoThreadState, |
911 | 0 | "__omp_rtl_assume_no_thread_state"); |
912 | 0 | OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoNestedParallelism, |
913 | 0 | "__omp_rtl_assume_no_nested_parallelism"); |
914 | 0 | } |
915 | | |
916 | | void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF, |
917 | | ProcBindKind ProcBind, |
918 | 0 | SourceLocation Loc) { |
919 | | // Nothing to do. |
920 | 0 | } |
921 | | |
922 | | void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF, |
923 | | llvm::Value *NumThreads, |
924 | 0 | SourceLocation Loc) { |
925 | | // Nothing to do. |
926 | 0 | } |
927 | | |
928 | | void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF, |
929 | | const Expr *NumTeams, |
930 | | const Expr *ThreadLimit, |
931 | 0 | SourceLocation Loc) {} |
932 | | |
933 | | llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction( |
934 | | CodeGenFunction &CGF, const OMPExecutableDirective &D, |
935 | | const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, |
936 | 0 | const RegionCodeGenTy &CodeGen) { |
937 | | // Emit target region as a standalone region. |
938 | 0 | bool PrevIsInTTDRegion = IsInTTDRegion; |
939 | 0 | IsInTTDRegion = false; |
940 | 0 | auto *OutlinedFun = |
941 | 0 | cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction( |
942 | 0 | CGF, D, ThreadIDVar, InnermostKind, CodeGen)); |
943 | 0 | IsInTTDRegion = PrevIsInTTDRegion; |
944 | 0 | if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) { |
945 | 0 | llvm::Function *WrapperFun = |
946 | 0 | createParallelDataSharingWrapper(OutlinedFun, D); |
947 | 0 | WrapperFunctionsMap[OutlinedFun] = WrapperFun; |
948 | 0 | } |
949 | |
|
950 | 0 | return OutlinedFun; |
951 | 0 | } |
952 | | |
953 | | /// Get list of lastprivate variables from the teams distribute ... or |
954 | | /// teams {distribute ...} directives. |
955 | | static void |
956 | | getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D, |
957 | 0 | llvm::SmallVectorImpl<const ValueDecl *> &Vars) { |
958 | 0 | assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && |
959 | 0 | "expected teams directive."); |
960 | 0 | const OMPExecutableDirective *Dir = &D; |
961 | 0 | if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { |
962 | 0 | if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild( |
963 | 0 | Ctx, |
964 | 0 | D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers( |
965 | 0 | /*IgnoreCaptured=*/true))) { |
966 | 0 | Dir = dyn_cast_or_null<OMPExecutableDirective>(S); |
967 | 0 | if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind())) |
968 | 0 | Dir = nullptr; |
969 | 0 | } |
970 | 0 | } |
971 | 0 | if (!Dir) |
972 | 0 | return; |
973 | 0 | for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) { |
974 | 0 | for (const Expr *E : C->getVarRefs()) |
975 | 0 | Vars.push_back(getPrivateItem(E)); |
976 | 0 | } |
977 | 0 | } |
978 | | |
979 | | /// Get list of reduction variables from the teams ... directives. |
980 | | static void |
981 | | getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D, |
982 | 0 | llvm::SmallVectorImpl<const ValueDecl *> &Vars) { |
983 | 0 | assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && |
984 | 0 | "expected teams directive."); |
985 | 0 | for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { |
986 | 0 | for (const Expr *E : C->privates()) |
987 | 0 | Vars.push_back(getPrivateItem(E)); |
988 | 0 | } |
989 | 0 | } |
990 | | |
991 | | llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction( |
992 | | CodeGenFunction &CGF, const OMPExecutableDirective &D, |
993 | | const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, |
994 | 0 | const RegionCodeGenTy &CodeGen) { |
995 | 0 | SourceLocation Loc = D.getBeginLoc(); |
996 | |
|
997 | 0 | const RecordDecl *GlobalizedRD = nullptr; |
998 | 0 | llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions; |
999 | 0 | llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; |
1000 | 0 | unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size; |
1001 | | // Globalize team reductions variable unconditionally in all modes. |
1002 | 0 | if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) |
1003 | 0 | getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions); |
1004 | 0 | if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { |
1005 | 0 | getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions); |
1006 | 0 | if (!LastPrivatesReductions.empty()) { |
1007 | 0 | GlobalizedRD = ::buildRecordForGlobalizedVars( |
1008 | 0 | CGM.getContext(), std::nullopt, LastPrivatesReductions, |
1009 | 0 | MappedDeclsFields, WarpSize); |
1010 | 0 | } |
1011 | 0 | } else if (!LastPrivatesReductions.empty()) { |
1012 | 0 | assert(!TeamAndReductions.first && |
1013 | 0 | "Previous team declaration is not expected."); |
1014 | 0 | TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl(); |
1015 | 0 | std::swap(TeamAndReductions.second, LastPrivatesReductions); |
1016 | 0 | } |
1017 | | |
1018 | | // Emit target region as a standalone region. |
1019 | 0 | class NVPTXPrePostActionTy : public PrePostActionTy { |
1020 | 0 | SourceLocation &Loc; |
1021 | 0 | const RecordDecl *GlobalizedRD; |
1022 | 0 | llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> |
1023 | 0 | &MappedDeclsFields; |
1024 | |
|
1025 | 0 | public: |
1026 | 0 | NVPTXPrePostActionTy( |
1027 | 0 | SourceLocation &Loc, const RecordDecl *GlobalizedRD, |
1028 | 0 | llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> |
1029 | 0 | &MappedDeclsFields) |
1030 | 0 | : Loc(Loc), GlobalizedRD(GlobalizedRD), |
1031 | 0 | MappedDeclsFields(MappedDeclsFields) {} |
1032 | 0 | void Enter(CodeGenFunction &CGF) override { |
1033 | 0 | auto &Rt = |
1034 | 0 | static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
1035 | 0 | if (GlobalizedRD) { |
1036 | 0 | auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; |
1037 | 0 | I->getSecond().MappedParams = |
1038 | 0 | std::make_unique<CodeGenFunction::OMPMapVars>(); |
1039 | 0 | DeclToAddrMapTy &Data = I->getSecond().LocalVarData; |
1040 | 0 | for (const auto &Pair : MappedDeclsFields) { |
1041 | 0 | assert(Pair.getFirst()->isCanonicalDecl() && |
1042 | 0 | "Expected canonical declaration"); |
1043 | 0 | Data.insert(std::make_pair(Pair.getFirst(), MappedVarData())); |
1044 | 0 | } |
1045 | 0 | } |
1046 | 0 | Rt.emitGenericVarsProlog(CGF, Loc); |
1047 | 0 | } |
1048 | 0 | void Exit(CodeGenFunction &CGF) override { |
1049 | 0 | static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) |
1050 | 0 | .emitGenericVarsEpilog(CGF); |
1051 | 0 | } |
1052 | 0 | } Action(Loc, GlobalizedRD, MappedDeclsFields); |
1053 | 0 | CodeGen.setAction(Action); |
1054 | 0 | llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction( |
1055 | 0 | CGF, D, ThreadIDVar, InnermostKind, CodeGen); |
1056 | |
|
1057 | 0 | return OutlinedFun; |
1058 | 0 | } |
1059 | | |
1060 | | void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF, |
1061 | 0 | SourceLocation Loc) { |
1062 | 0 | if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic) |
1063 | 0 | return; |
1064 | | |
1065 | 0 | CGBuilderTy &Bld = CGF.Builder; |
1066 | |
|
1067 | 0 | const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); |
1068 | 0 | if (I == FunctionGlobalizedDecls.end()) |
1069 | 0 | return; |
1070 | | |
1071 | 0 | for (auto &Rec : I->getSecond().LocalVarData) { |
1072 | 0 | const auto *VD = cast<VarDecl>(Rec.first); |
1073 | 0 | bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first); |
1074 | 0 | QualType VarTy = VD->getType(); |
1075 | | |
1076 | | // Get the local allocation of a firstprivate variable before sharing |
1077 | 0 | llvm::Value *ParValue; |
1078 | 0 | if (EscapedParam) { |
1079 | 0 | LValue ParLVal = |
1080 | 0 | CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); |
1081 | 0 | ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc); |
1082 | 0 | } |
1083 | | |
1084 | | // Allocate space for the variable to be globalized |
1085 | 0 | llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())}; |
1086 | 0 | llvm::CallBase *VoidPtr = |
1087 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1088 | 0 | CGM.getModule(), OMPRTL___kmpc_alloc_shared), |
1089 | 0 | AllocArgs, VD->getName()); |
1090 | | // FIXME: We should use the variables actual alignment as an argument. |
1091 | 0 | VoidPtr->addRetAttr(llvm::Attribute::get( |
1092 | 0 | CGM.getLLVMContext(), llvm::Attribute::Alignment, |
1093 | 0 | CGM.getContext().getTargetInfo().getNewAlign() / 8)); |
1094 | | |
1095 | | // Cast the void pointer and get the address of the globalized variable. |
1096 | 0 | llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo(); |
1097 | 0 | llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
1098 | 0 | VoidPtr, VarPtrTy, VD->getName() + "_on_stack"); |
1099 | 0 | LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy); |
1100 | 0 | Rec.second.PrivateAddr = VarAddr.getAddress(CGF); |
1101 | 0 | Rec.second.GlobalizedVal = VoidPtr; |
1102 | | |
1103 | | // Assign the local allocation to the newly globalized location. |
1104 | 0 | if (EscapedParam) { |
1105 | 0 | CGF.EmitStoreOfScalar(ParValue, VarAddr); |
1106 | 0 | I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF)); |
1107 | 0 | } |
1108 | 0 | if (auto *DI = CGF.getDebugInfo()) |
1109 | 0 | VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation())); |
1110 | 0 | } |
1111 | |
|
1112 | 0 | for (const auto *ValueD : I->getSecond().EscapedVariableLengthDecls) { |
1113 | 0 | const auto *VD = cast<VarDecl>(ValueD); |
1114 | 0 | std::pair<llvm::Value *, llvm::Value *> AddrSizePair = |
1115 | 0 | getKmpcAllocShared(CGF, VD); |
1116 | 0 | I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(AddrSizePair); |
1117 | 0 | LValue Base = CGF.MakeAddrLValue(AddrSizePair.first, VD->getType(), |
1118 | 0 | CGM.getContext().getDeclAlign(VD), |
1119 | 0 | AlignmentSource::Decl); |
1120 | 0 | I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress(CGF)); |
1121 | 0 | } |
1122 | 0 | I->getSecond().MappedParams->apply(CGF); |
1123 | 0 | } |
1124 | | |
1125 | | bool CGOpenMPRuntimeGPU::isDelayedVariableLengthDecl(CodeGenFunction &CGF, |
1126 | 0 | const VarDecl *VD) const { |
1127 | 0 | const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); |
1128 | 0 | if (I == FunctionGlobalizedDecls.end()) |
1129 | 0 | return false; |
1130 | | |
1131 | | // Check variable declaration is delayed: |
1132 | 0 | return llvm::is_contained(I->getSecond().DelayedVariableLengthDecls, VD); |
1133 | 0 | } |
1134 | | |
1135 | | std::pair<llvm::Value *, llvm::Value *> |
1136 | | CGOpenMPRuntimeGPU::getKmpcAllocShared(CodeGenFunction &CGF, |
1137 | 0 | const VarDecl *VD) { |
1138 | 0 | CGBuilderTy &Bld = CGF.Builder; |
1139 | | |
1140 | | // Compute size and alignment. |
1141 | 0 | llvm::Value *Size = CGF.getTypeSize(VD->getType()); |
1142 | 0 | CharUnits Align = CGM.getContext().getDeclAlign(VD); |
1143 | 0 | Size = Bld.CreateNUWAdd( |
1144 | 0 | Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1)); |
1145 | 0 | llvm::Value *AlignVal = |
1146 | 0 | llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity()); |
1147 | 0 | Size = Bld.CreateUDiv(Size, AlignVal); |
1148 | 0 | Size = Bld.CreateNUWMul(Size, AlignVal); |
1149 | | |
1150 | | // Allocate space for this VLA object to be globalized. |
1151 | 0 | llvm::Value *AllocArgs[] = {Size}; |
1152 | 0 | llvm::CallBase *VoidPtr = |
1153 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1154 | 0 | CGM.getModule(), OMPRTL___kmpc_alloc_shared), |
1155 | 0 | AllocArgs, VD->getName()); |
1156 | 0 | VoidPtr->addRetAttr(llvm::Attribute::get( |
1157 | 0 | CGM.getLLVMContext(), llvm::Attribute::Alignment, Align.getQuantity())); |
1158 | |
|
1159 | 0 | return std::make_pair(VoidPtr, Size); |
1160 | 0 | } |
1161 | | |
1162 | | void CGOpenMPRuntimeGPU::getKmpcFreeShared( |
1163 | | CodeGenFunction &CGF, |
1164 | 0 | const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) { |
1165 | | // Deallocate the memory for each globalized VLA object |
1166 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1167 | 0 | CGM.getModule(), OMPRTL___kmpc_free_shared), |
1168 | 0 | {AddrSizePair.first, AddrSizePair.second}); |
1169 | 0 | } |
1170 | | |
1171 | 0 | void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF) { |
1172 | 0 | if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic) |
1173 | 0 | return; |
1174 | | |
1175 | 0 | const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); |
1176 | 0 | if (I != FunctionGlobalizedDecls.end()) { |
1177 | | // Deallocate the memory for each globalized VLA object that was |
1178 | | // globalized in the prolog (i.e. emitGenericVarsProlog). |
1179 | 0 | for (const auto &AddrSizePair : |
1180 | 0 | llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) { |
1181 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1182 | 0 | CGM.getModule(), OMPRTL___kmpc_free_shared), |
1183 | 0 | {AddrSizePair.first, AddrSizePair.second}); |
1184 | 0 | } |
1185 | | // Deallocate the memory for each globalized value |
1186 | 0 | for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) { |
1187 | 0 | const auto *VD = cast<VarDecl>(Rec.first); |
1188 | 0 | I->getSecond().MappedParams->restore(CGF); |
1189 | |
|
1190 | 0 | llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal, |
1191 | 0 | CGF.getTypeSize(VD->getType())}; |
1192 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1193 | 0 | CGM.getModule(), OMPRTL___kmpc_free_shared), |
1194 | 0 | FreeArgs); |
1195 | 0 | } |
1196 | 0 | } |
1197 | 0 | } |
1198 | | |
1199 | | void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF, |
1200 | | const OMPExecutableDirective &D, |
1201 | | SourceLocation Loc, |
1202 | | llvm::Function *OutlinedFn, |
1203 | 0 | ArrayRef<llvm::Value *> CapturedVars) { |
1204 | 0 | if (!CGF.HaveInsertPoint()) |
1205 | 0 | return; |
1206 | | |
1207 | 0 | bool IsBareKernel = D.getSingleClause<OMPXBareClause>(); |
1208 | |
|
1209 | 0 | Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, |
1210 | 0 | /*Name=*/".zero.addr"); |
1211 | 0 | CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr); |
1212 | 0 | llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; |
1213 | | // We don't emit any thread id function call in bare kernel, but because the |
1214 | | // outlined function has a pointer argument, we emit a nullptr here. |
1215 | 0 | if (IsBareKernel) |
1216 | 0 | OutlinedFnArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy)); |
1217 | 0 | else |
1218 | 0 | OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer()); |
1219 | 0 | OutlinedFnArgs.push_back(ZeroAddr.getPointer()); |
1220 | 0 | OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); |
1221 | 0 | emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); |
1222 | 0 | } |
1223 | | |
1224 | | void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF, |
1225 | | SourceLocation Loc, |
1226 | | llvm::Function *OutlinedFn, |
1227 | | ArrayRef<llvm::Value *> CapturedVars, |
1228 | | const Expr *IfCond, |
1229 | 0 | llvm::Value *NumThreads) { |
1230 | 0 | if (!CGF.HaveInsertPoint()) |
1231 | 0 | return; |
1232 | | |
1233 | 0 | auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond, |
1234 | 0 | NumThreads](CodeGenFunction &CGF, |
1235 | 0 | PrePostActionTy &Action) { |
1236 | 0 | CGBuilderTy &Bld = CGF.Builder; |
1237 | 0 | llvm::Value *NumThreadsVal = NumThreads; |
1238 | 0 | llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn]; |
1239 | 0 | llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy); |
1240 | 0 | if (WFn) |
1241 | 0 | ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy); |
1242 | 0 | llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy); |
1243 | | |
1244 | | // Create a private scope that will globalize the arguments |
1245 | | // passed from the outside of the target region. |
1246 | | // TODO: Is that needed? |
1247 | 0 | CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF); |
1248 | |
|
1249 | 0 | Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca( |
1250 | 0 | llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()), |
1251 | 0 | "captured_vars_addrs"); |
1252 | | // There's something to share. |
1253 | 0 | if (!CapturedVars.empty()) { |
1254 | | // Prepare for parallel region. Indicate the outlined function. |
1255 | 0 | ASTContext &Ctx = CGF.getContext(); |
1256 | 0 | unsigned Idx = 0; |
1257 | 0 | for (llvm::Value *V : CapturedVars) { |
1258 | 0 | Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx); |
1259 | 0 | llvm::Value *PtrV; |
1260 | 0 | if (V->getType()->isIntegerTy()) |
1261 | 0 | PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy); |
1262 | 0 | else |
1263 | 0 | PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy); |
1264 | 0 | CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false, |
1265 | 0 | Ctx.getPointerType(Ctx.VoidPtrTy)); |
1266 | 0 | ++Idx; |
1267 | 0 | } |
1268 | 0 | } |
1269 | |
|
1270 | 0 | llvm::Value *IfCondVal = nullptr; |
1271 | 0 | if (IfCond) |
1272 | 0 | IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty, |
1273 | 0 | /* isSigned */ false); |
1274 | 0 | else |
1275 | 0 | IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1); |
1276 | |
|
1277 | 0 | if (!NumThreadsVal) |
1278 | 0 | NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1); |
1279 | 0 | else |
1280 | 0 | NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty), |
1281 | |
|
1282 | 0 | assert(IfCondVal && "Expected a value"); |
1283 | 0 | llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); |
1284 | 0 | llvm::Value *Args[] = { |
1285 | 0 | RTLoc, |
1286 | 0 | getThreadID(CGF, Loc), |
1287 | 0 | IfCondVal, |
1288 | 0 | NumThreadsVal, |
1289 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, -1), |
1290 | 0 | FnPtr, |
1291 | 0 | ID, |
1292 | 0 | Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(), |
1293 | 0 | CGF.VoidPtrPtrTy), |
1294 | 0 | llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())}; |
1295 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1296 | 0 | CGM.getModule(), OMPRTL___kmpc_parallel_51), |
1297 | 0 | Args); |
1298 | 0 | }; |
1299 | |
|
1300 | 0 | RegionCodeGenTy RCG(ParallelGen); |
1301 | 0 | RCG(CGF); |
1302 | 0 | } |
1303 | | |
1304 | 0 | void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) { |
1305 | | // Always emit simple barriers! |
1306 | 0 | if (!CGF.HaveInsertPoint()) |
1307 | 0 | return; |
1308 | | // Build call __kmpc_barrier_simple_spmd(nullptr, 0); |
1309 | | // This function does not use parameters, so we can emit just default values. |
1310 | 0 | llvm::Value *Args[] = { |
1311 | 0 | llvm::ConstantPointerNull::get( |
1312 | 0 | cast<llvm::PointerType>(getIdentTyPointerTy())), |
1313 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)}; |
1314 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1315 | 0 | CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd), |
1316 | 0 | Args); |
1317 | 0 | } |
1318 | | |
1319 | | void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF, |
1320 | | SourceLocation Loc, |
1321 | | OpenMPDirectiveKind Kind, bool, |
1322 | 0 | bool) { |
1323 | | // Always emit simple barriers! |
1324 | 0 | if (!CGF.HaveInsertPoint()) |
1325 | 0 | return; |
1326 | | // Build call __kmpc_cancel_barrier(loc, thread_id); |
1327 | 0 | unsigned Flags = getDefaultFlagsForBarriers(Kind); |
1328 | 0 | llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), |
1329 | 0 | getThreadID(CGF, Loc)}; |
1330 | |
|
1331 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1332 | 0 | CGM.getModule(), OMPRTL___kmpc_barrier), |
1333 | 0 | Args); |
1334 | 0 | } |
1335 | | |
1336 | | void CGOpenMPRuntimeGPU::emitCriticalRegion( |
1337 | | CodeGenFunction &CGF, StringRef CriticalName, |
1338 | | const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, |
1339 | 0 | const Expr *Hint) { |
1340 | 0 | llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop"); |
1341 | 0 | llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test"); |
1342 | 0 | llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync"); |
1343 | 0 | llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body"); |
1344 | 0 | llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit"); |
1345 | |
|
1346 | 0 | auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
1347 | | |
1348 | | // Get the mask of active threads in the warp. |
1349 | 0 | llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1350 | 0 | CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask)); |
1351 | | // Fetch team-local id of the thread. |
1352 | 0 | llvm::Value *ThreadID = RT.getGPUThreadID(CGF); |
1353 | | |
1354 | | // Get the width of the team. |
1355 | 0 | llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF); |
1356 | | |
1357 | | // Initialize the counter variable for the loop. |
1358 | 0 | QualType Int32Ty = |
1359 | 0 | CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0); |
1360 | 0 | Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter"); |
1361 | 0 | LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty); |
1362 | 0 | CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal, |
1363 | 0 | /*isInit=*/true); |
1364 | | |
1365 | | // Block checks if loop counter exceeds upper bound. |
1366 | 0 | CGF.EmitBlock(LoopBB); |
1367 | 0 | llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); |
1368 | 0 | llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth); |
1369 | 0 | CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB); |
1370 | | |
1371 | | // Block tests which single thread should execute region, and which threads |
1372 | | // should go straight to synchronisation point. |
1373 | 0 | CGF.EmitBlock(TestBB); |
1374 | 0 | CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); |
1375 | 0 | llvm::Value *CmpThreadToCounter = |
1376 | 0 | CGF.Builder.CreateICmpEQ(ThreadID, CounterVal); |
1377 | 0 | CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB); |
1378 | | |
1379 | | // Block emits the body of the critical region. |
1380 | 0 | CGF.EmitBlock(BodyBB); |
1381 | | |
1382 | | // Output the critical statement. |
1383 | 0 | CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc, |
1384 | 0 | Hint); |
1385 | | |
1386 | | // After the body surrounded by the critical region, the single executing |
1387 | | // thread will jump to the synchronisation point. |
1388 | | // Block waits for all threads in current team to finish then increments the |
1389 | | // counter variable and returns to the loop. |
1390 | 0 | CGF.EmitBlock(SyncBB); |
1391 | | // Reconverge active threads in the warp. |
1392 | 0 | (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
1393 | 0 | CGM.getModule(), OMPRTL___kmpc_syncwarp), |
1394 | 0 | Mask); |
1395 | |
|
1396 | 0 | llvm::Value *IncCounterVal = |
1397 | 0 | CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1)); |
1398 | 0 | CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal); |
1399 | 0 | CGF.EmitBranch(LoopBB); |
1400 | | |
1401 | | // Block that is reached when all threads in the team complete the region. |
1402 | 0 | CGF.EmitBlock(ExitBB, /*IsFinished=*/true); |
1403 | 0 | } |
1404 | | |
1405 | | /// Cast value to the specified type. |
1406 | | static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val, |
1407 | | QualType ValTy, QualType CastTy, |
1408 | 0 | SourceLocation Loc) { |
1409 | 0 | assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && |
1410 | 0 | "Cast type must sized."); |
1411 | 0 | assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && |
1412 | 0 | "Val type must sized."); |
1413 | 0 | llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy); |
1414 | 0 | if (ValTy == CastTy) |
1415 | 0 | return Val; |
1416 | 0 | if (CGF.getContext().getTypeSizeInChars(ValTy) == |
1417 | 0 | CGF.getContext().getTypeSizeInChars(CastTy)) |
1418 | 0 | return CGF.Builder.CreateBitCast(Val, LLVMCastTy); |
1419 | 0 | if (CastTy->isIntegerType() && ValTy->isIntegerType()) |
1420 | 0 | return CGF.Builder.CreateIntCast(Val, LLVMCastTy, |
1421 | 0 | CastTy->hasSignedIntegerRepresentation()); |
1422 | 0 | Address CastItem = CGF.CreateMemTemp(CastTy); |
1423 | 0 | Address ValCastItem = CastItem.withElementType(Val->getType()); |
1424 | 0 | CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy, |
1425 | 0 | LValueBaseInfo(AlignmentSource::Type), |
1426 | 0 | TBAAAccessInfo()); |
1427 | 0 | return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc, |
1428 | 0 | LValueBaseInfo(AlignmentSource::Type), |
1429 | 0 | TBAAAccessInfo()); |
1430 | 0 | } |
1431 | | |
1432 | | /// This function creates calls to one of two shuffle functions to copy |
1433 | | /// variables between lanes in a warp. |
1434 | | static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF, |
1435 | | llvm::Value *Elem, |
1436 | | QualType ElemType, |
1437 | | llvm::Value *Offset, |
1438 | 0 | SourceLocation Loc) { |
1439 | 0 | CodeGenModule &CGM = CGF.CGM; |
1440 | 0 | CGBuilderTy &Bld = CGF.Builder; |
1441 | 0 | CGOpenMPRuntimeGPU &RT = |
1442 | 0 | *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime())); |
1443 | 0 | llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder(); |
1444 | |
|
1445 | 0 | CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); |
1446 | 0 | assert(Size.getQuantity() <= 8 && |
1447 | 0 | "Unsupported bitwidth in shuffle instruction."); |
1448 | | |
1449 | 0 | RuntimeFunction ShuffleFn = Size.getQuantity() <= 4 |
1450 | 0 | ? OMPRTL___kmpc_shuffle_int32 |
1451 | 0 | : OMPRTL___kmpc_shuffle_int64; |
1452 | | |
1453 | | // Cast all types to 32- or 64-bit values before calling shuffle routines. |
1454 | 0 | QualType CastTy = CGF.getContext().getIntTypeForBitwidth( |
1455 | 0 | Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1); |
1456 | 0 | llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc); |
1457 | 0 | llvm::Value *WarpSize = |
1458 | 0 | Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true); |
1459 | |
|
1460 | 0 | llvm::Value *ShuffledVal = CGF.EmitRuntimeCall( |
1461 | 0 | OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn), |
1462 | 0 | {ElemCast, Offset, WarpSize}); |
1463 | |
|
1464 | 0 | return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc); |
1465 | 0 | } |
1466 | | |
1467 | | static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, |
1468 | | Address DestAddr, QualType ElemType, |
1469 | 0 | llvm::Value *Offset, SourceLocation Loc) { |
1470 | 0 | CGBuilderTy &Bld = CGF.Builder; |
1471 | |
|
1472 | 0 | CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); |
1473 | | // Create the loop over the big sized data. |
1474 | | // ptr = (void*)Elem; |
1475 | | // ptrEnd = (void*) Elem + 1; |
1476 | | // Step = 8; |
1477 | | // while (ptr + Step < ptrEnd) |
1478 | | // shuffle((int64_t)*ptr); |
1479 | | // Step = 4; |
1480 | | // while (ptr + Step < ptrEnd) |
1481 | | // shuffle((int32_t)*ptr); |
1482 | | // ... |
1483 | 0 | Address ElemPtr = DestAddr; |
1484 | 0 | Address Ptr = SrcAddr; |
1485 | 0 | Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast( |
1486 | 0 | Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy, CGF.Int8Ty); |
1487 | 0 | for (int IntSize = 8; IntSize >= 1; IntSize /= 2) { |
1488 | 0 | if (Size < CharUnits::fromQuantity(IntSize)) |
1489 | 0 | continue; |
1490 | 0 | QualType IntType = CGF.getContext().getIntTypeForBitwidth( |
1491 | 0 | CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)), |
1492 | 0 | /*Signed=*/1); |
1493 | 0 | llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType); |
1494 | 0 | Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo(), |
1495 | 0 | IntTy); |
1496 | 0 | ElemPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
1497 | 0 | ElemPtr, IntTy->getPointerTo(), IntTy); |
1498 | 0 | if (Size.getQuantity() / IntSize > 1) { |
1499 | 0 | llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond"); |
1500 | 0 | llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then"); |
1501 | 0 | llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit"); |
1502 | 0 | llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock(); |
1503 | 0 | CGF.EmitBlock(PreCondBB); |
1504 | 0 | llvm::PHINode *PhiSrc = |
1505 | 0 | Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2); |
1506 | 0 | PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB); |
1507 | 0 | llvm::PHINode *PhiDest = |
1508 | 0 | Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2); |
1509 | 0 | PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB); |
1510 | 0 | Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment()); |
1511 | 0 | ElemPtr = |
1512 | 0 | Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment()); |
1513 | 0 | llvm::Value *PtrDiff = Bld.CreatePtrDiff( |
1514 | 0 | CGF.Int8Ty, PtrEnd.getPointer(), |
1515 | 0 | Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(), |
1516 | 0 | CGF.VoidPtrTy)); |
1517 | 0 | Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)), |
1518 | 0 | ThenBB, ExitBB); |
1519 | 0 | CGF.EmitBlock(ThenBB); |
1520 | 0 | llvm::Value *Res = createRuntimeShuffleFunction( |
1521 | 0 | CGF, |
1522 | 0 | CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, |
1523 | 0 | LValueBaseInfo(AlignmentSource::Type), |
1524 | 0 | TBAAAccessInfo()), |
1525 | 0 | IntType, Offset, Loc); |
1526 | 0 | CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, |
1527 | 0 | LValueBaseInfo(AlignmentSource::Type), |
1528 | 0 | TBAAAccessInfo()); |
1529 | 0 | Address LocalPtr = Bld.CreateConstGEP(Ptr, 1); |
1530 | 0 | Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1); |
1531 | 0 | PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB); |
1532 | 0 | PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB); |
1533 | 0 | CGF.EmitBranch(PreCondBB); |
1534 | 0 | CGF.EmitBlock(ExitBB); |
1535 | 0 | } else { |
1536 | 0 | llvm::Value *Res = createRuntimeShuffleFunction( |
1537 | 0 | CGF, |
1538 | 0 | CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, |
1539 | 0 | LValueBaseInfo(AlignmentSource::Type), |
1540 | 0 | TBAAAccessInfo()), |
1541 | 0 | IntType, Offset, Loc); |
1542 | 0 | CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, |
1543 | 0 | LValueBaseInfo(AlignmentSource::Type), |
1544 | 0 | TBAAAccessInfo()); |
1545 | 0 | Ptr = Bld.CreateConstGEP(Ptr, 1); |
1546 | 0 | ElemPtr = Bld.CreateConstGEP(ElemPtr, 1); |
1547 | 0 | } |
1548 | 0 | Size = Size % IntSize; |
1549 | 0 | } |
1550 | 0 | } |
1551 | | |
1552 | | namespace { |
1553 | | enum CopyAction : unsigned { |
1554 | | // RemoteLaneToThread: Copy over a Reduce list from a remote lane in |
1555 | | // the warp using shuffle instructions. |
1556 | | RemoteLaneToThread, |
1557 | | // ThreadCopy: Make a copy of a Reduce list on the thread's stack. |
1558 | | ThreadCopy, |
1559 | | }; |
1560 | | } // namespace |
1561 | | |
1562 | | struct CopyOptionsTy { |
1563 | | llvm::Value *RemoteLaneOffset; |
1564 | | llvm::Value *ScratchpadIndex; |
1565 | | llvm::Value *ScratchpadWidth; |
1566 | | }; |
1567 | | |
1568 | | /// Emit instructions to copy a Reduce list, which contains partially |
1569 | | /// aggregated values, in the specified direction. |
1570 | | static void emitReductionListCopy( |
1571 | | CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy, |
1572 | | ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase, |
1573 | 0 | CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) { |
1574 | |
|
1575 | 0 | CodeGenModule &CGM = CGF.CGM; |
1576 | 0 | ASTContext &C = CGM.getContext(); |
1577 | 0 | CGBuilderTy &Bld = CGF.Builder; |
1578 | |
|
1579 | 0 | llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset; |
1580 | | |
1581 | | // Iterates, element-by-element, through the source Reduce list and |
1582 | | // make a copy. |
1583 | 0 | unsigned Idx = 0; |
1584 | 0 | for (const Expr *Private : Privates) { |
1585 | 0 | Address SrcElementAddr = Address::invalid(); |
1586 | 0 | Address DestElementAddr = Address::invalid(); |
1587 | 0 | Address DestElementPtrAddr = Address::invalid(); |
1588 | | // Should we shuffle in an element from a remote lane? |
1589 | 0 | bool ShuffleInElement = false; |
1590 | | // Set to true to update the pointer in the dest Reduce list to a |
1591 | | // newly created element. |
1592 | 0 | bool UpdateDestListPtr = false; |
1593 | 0 | QualType PrivatePtrType = C.getPointerType(Private->getType()); |
1594 | 0 | llvm::Type *PrivateLlvmPtrType = CGF.ConvertType(PrivatePtrType); |
1595 | |
|
1596 | 0 | switch (Action) { |
1597 | 0 | case RemoteLaneToThread: { |
1598 | | // Step 1.1: Get the address for the src element in the Reduce list. |
1599 | 0 | Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); |
1600 | 0 | SrcElementAddr = CGF.EmitLoadOfPointer( |
1601 | 0 | SrcElementPtrAddr.withElementType(PrivateLlvmPtrType), |
1602 | 0 | PrivatePtrType->castAs<PointerType>()); |
1603 | | |
1604 | | // Step 1.2: Create a temporary to store the element in the destination |
1605 | | // Reduce list. |
1606 | 0 | DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); |
1607 | 0 | DestElementAddr = |
1608 | 0 | CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); |
1609 | 0 | ShuffleInElement = true; |
1610 | 0 | UpdateDestListPtr = true; |
1611 | 0 | break; |
1612 | 0 | } |
1613 | 0 | case ThreadCopy: { |
1614 | | // Step 1.1: Get the address for the src element in the Reduce list. |
1615 | 0 | Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); |
1616 | 0 | SrcElementAddr = CGF.EmitLoadOfPointer( |
1617 | 0 | SrcElementPtrAddr.withElementType(PrivateLlvmPtrType), |
1618 | 0 | PrivatePtrType->castAs<PointerType>()); |
1619 | | |
1620 | | // Step 1.2: Get the address for dest element. The destination |
1621 | | // element has already been created on the thread's stack. |
1622 | 0 | DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); |
1623 | 0 | DestElementAddr = CGF.EmitLoadOfPointer( |
1624 | 0 | DestElementPtrAddr.withElementType(PrivateLlvmPtrType), |
1625 | 0 | PrivatePtrType->castAs<PointerType>()); |
1626 | 0 | break; |
1627 | 0 | } |
1628 | 0 | } |
1629 | | |
1630 | | // Regardless of src and dest of copy, we emit the load of src |
1631 | | // element as this is required in all directions |
1632 | 0 | SrcElementAddr = SrcElementAddr.withElementType( |
1633 | 0 | CGF.ConvertTypeForMem(Private->getType())); |
1634 | 0 | DestElementAddr = |
1635 | 0 | DestElementAddr.withElementType(SrcElementAddr.getElementType()); |
1636 | | |
1637 | | // Now that all active lanes have read the element in the |
1638 | | // Reduce list, shuffle over the value from the remote lane. |
1639 | 0 | if (ShuffleInElement) { |
1640 | 0 | shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(), |
1641 | 0 | RemoteLaneOffset, Private->getExprLoc()); |
1642 | 0 | } else { |
1643 | 0 | switch (CGF.getEvaluationKind(Private->getType())) { |
1644 | 0 | case TEK_Scalar: { |
1645 | 0 | llvm::Value *Elem = CGF.EmitLoadOfScalar( |
1646 | 0 | SrcElementAddr, /*Volatile=*/false, Private->getType(), |
1647 | 0 | Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type), |
1648 | 0 | TBAAAccessInfo()); |
1649 | | // Store the source element value to the dest element address. |
1650 | 0 | CGF.EmitStoreOfScalar( |
1651 | 0 | Elem, DestElementAddr, /*Volatile=*/false, Private->getType(), |
1652 | 0 | LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); |
1653 | 0 | break; |
1654 | 0 | } |
1655 | 0 | case TEK_Complex: { |
1656 | 0 | CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex( |
1657 | 0 | CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), |
1658 | 0 | Private->getExprLoc()); |
1659 | 0 | CGF.EmitStoreOfComplex( |
1660 | 0 | Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()), |
1661 | 0 | /*isInit=*/false); |
1662 | 0 | break; |
1663 | 0 | } |
1664 | 0 | case TEK_Aggregate: |
1665 | 0 | CGF.EmitAggregateCopy( |
1666 | 0 | CGF.MakeAddrLValue(DestElementAddr, Private->getType()), |
1667 | 0 | CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), |
1668 | 0 | Private->getType(), AggValueSlot::DoesNotOverlap); |
1669 | 0 | break; |
1670 | 0 | } |
1671 | 0 | } |
1672 | | |
1673 | | // Step 3.1: Modify reference in dest Reduce list as needed. |
1674 | | // Modifying the reference in Reduce list to point to the newly |
1675 | | // created element. The element is live in the current function |
1676 | | // scope and that of functions it invokes (i.e., reduce_function). |
1677 | | // RemoteReduceData[i] = (void*)&RemoteElem |
1678 | 0 | if (UpdateDestListPtr) { |
1679 | 0 | CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast( |
1680 | 0 | DestElementAddr.getPointer(), CGF.VoidPtrTy), |
1681 | 0 | DestElementPtrAddr, /*Volatile=*/false, |
1682 | 0 | C.VoidPtrTy); |
1683 | 0 | } |
1684 | |
|
1685 | 0 | ++Idx; |
1686 | 0 | } |
1687 | 0 | } |
1688 | | |
1689 | | /// This function emits a helper that gathers Reduce lists from the first |
1690 | | /// lane of every active warp to lanes in the first warp. |
1691 | | /// |
1692 | | /// void inter_warp_copy_func(void* reduce_data, num_warps) |
1693 | | /// shared smem[warp_size]; |
1694 | | /// For all data entries D in reduce_data: |
1695 | | /// sync |
1696 | | /// If (I am the first lane in each warp) |
1697 | | /// Copy my local D to smem[warp_id] |
1698 | | /// sync |
1699 | | /// if (I am the first warp) |
1700 | | /// Copy smem[thread_id] to my local D |
1701 | | static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM, |
1702 | | ArrayRef<const Expr *> Privates, |
1703 | | QualType ReductionArrayTy, |
1704 | 0 | SourceLocation Loc) { |
1705 | 0 | ASTContext &C = CGM.getContext(); |
1706 | 0 | llvm::Module &M = CGM.getModule(); |
1707 | | |
1708 | | // ReduceList: thread local Reduce list. |
1709 | | // At the stage of the computation when this function is called, partially |
1710 | | // aggregated values reside in the first lane of every active warp. |
1711 | 0 | ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
1712 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
1713 | | // NumWarps: number of warps active in the parallel region. This could |
1714 | | // be smaller than 32 (max warps in a CTA) for partial block reduction. |
1715 | 0 | ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
1716 | 0 | C.getIntTypeForBitwidth(32, /* Signed */ true), |
1717 | 0 | ImplicitParamKind::Other); |
1718 | 0 | FunctionArgList Args; |
1719 | 0 | Args.push_back(&ReduceListArg); |
1720 | 0 | Args.push_back(&NumWarpsArg); |
1721 | |
|
1722 | 0 | const CGFunctionInfo &CGFI = |
1723 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); |
1724 | 0 | auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI), |
1725 | 0 | llvm::GlobalValue::InternalLinkage, |
1726 | 0 | "_omp_reduction_inter_warp_copy_func", &M); |
1727 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); |
1728 | 0 | Fn->setDoesNotRecurse(); |
1729 | 0 | CodeGenFunction CGF(CGM); |
1730 | 0 | CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); |
1731 | |
|
1732 | 0 | CGBuilderTy &Bld = CGF.Builder; |
1733 | | |
1734 | | // This array is used as a medium to transfer, one reduce element at a time, |
1735 | | // the data from the first lane of every warp to lanes in the first warp |
1736 | | // in order to perform the final step of a reduction in a parallel region |
1737 | | // (reduction across warps). The array is placed in NVPTX __shared__ memory |
1738 | | // for reduced latency, as well as to have a distinct copy for concurrently |
1739 | | // executing target regions. The array is declared with common linkage so |
1740 | | // as to be shared across compilation units. |
1741 | 0 | StringRef TransferMediumName = |
1742 | 0 | "__openmp_nvptx_data_transfer_temporary_storage"; |
1743 | 0 | llvm::GlobalVariable *TransferMedium = |
1744 | 0 | M.getGlobalVariable(TransferMediumName); |
1745 | 0 | unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size; |
1746 | 0 | if (!TransferMedium) { |
1747 | 0 | auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize); |
1748 | 0 | unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared); |
1749 | 0 | TransferMedium = new llvm::GlobalVariable( |
1750 | 0 | M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage, |
1751 | 0 | llvm::UndefValue::get(Ty), TransferMediumName, |
1752 | 0 | /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, |
1753 | 0 | SharedAddressSpace); |
1754 | 0 | CGM.addCompilerUsedGlobal(TransferMedium); |
1755 | 0 | } |
1756 | |
|
1757 | 0 | auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
1758 | | // Get the CUDA thread id of the current OpenMP thread on the GPU. |
1759 | 0 | llvm::Value *ThreadID = RT.getGPUThreadID(CGF); |
1760 | | // nvptx_lane_id = nvptx_id % warpsize |
1761 | 0 | llvm::Value *LaneID = getNVPTXLaneID(CGF); |
1762 | | // nvptx_warp_id = nvptx_id / warpsize |
1763 | 0 | llvm::Value *WarpID = getNVPTXWarpID(CGF); |
1764 | |
|
1765 | 0 | Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); |
1766 | 0 | llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy); |
1767 | 0 | Address LocalReduceList( |
1768 | 0 | Bld.CreatePointerBitCastOrAddrSpaceCast( |
1769 | 0 | CGF.EmitLoadOfScalar( |
1770 | 0 | AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc, |
1771 | 0 | LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()), |
1772 | 0 | ElemTy->getPointerTo()), |
1773 | 0 | ElemTy, CGF.getPointerAlign()); |
1774 | |
|
1775 | 0 | unsigned Idx = 0; |
1776 | 0 | for (const Expr *Private : Privates) { |
1777 | | // |
1778 | | // Warp master copies reduce element to transfer medium in __shared__ |
1779 | | // memory. |
1780 | | // |
1781 | 0 | unsigned RealTySize = |
1782 | 0 | C.getTypeSizeInChars(Private->getType()) |
1783 | 0 | .alignTo(C.getTypeAlignInChars(Private->getType())) |
1784 | 0 | .getQuantity(); |
1785 | 0 | for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) { |
1786 | 0 | unsigned NumIters = RealTySize / TySize; |
1787 | 0 | if (NumIters == 0) |
1788 | 0 | continue; |
1789 | 0 | QualType CType = C.getIntTypeForBitwidth( |
1790 | 0 | C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1); |
1791 | 0 | llvm::Type *CopyType = CGF.ConvertTypeForMem(CType); |
1792 | 0 | CharUnits Align = CharUnits::fromQuantity(TySize); |
1793 | 0 | llvm::Value *Cnt = nullptr; |
1794 | 0 | Address CntAddr = Address::invalid(); |
1795 | 0 | llvm::BasicBlock *PrecondBB = nullptr; |
1796 | 0 | llvm::BasicBlock *ExitBB = nullptr; |
1797 | 0 | if (NumIters > 1) { |
1798 | 0 | CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr"); |
1799 | 0 | CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr, |
1800 | 0 | /*Volatile=*/false, C.IntTy); |
1801 | 0 | PrecondBB = CGF.createBasicBlock("precond"); |
1802 | 0 | ExitBB = CGF.createBasicBlock("exit"); |
1803 | 0 | llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body"); |
1804 | | // There is no need to emit line number for unconditional branch. |
1805 | 0 | (void)ApplyDebugLocation::CreateEmpty(CGF); |
1806 | 0 | CGF.EmitBlock(PrecondBB); |
1807 | 0 | Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc); |
1808 | 0 | llvm::Value *Cmp = |
1809 | 0 | Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters)); |
1810 | 0 | Bld.CreateCondBr(Cmp, BodyBB, ExitBB); |
1811 | 0 | CGF.EmitBlock(BodyBB); |
1812 | 0 | } |
1813 | | // kmpc_barrier. |
1814 | 0 | CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, |
1815 | 0 | /*EmitChecks=*/false, |
1816 | 0 | /*ForceSimpleCall=*/true); |
1817 | 0 | llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); |
1818 | 0 | llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); |
1819 | 0 | llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); |
1820 | | |
1821 | | // if (lane_id == 0) |
1822 | 0 | llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master"); |
1823 | 0 | Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB); |
1824 | 0 | CGF.EmitBlock(ThenBB); |
1825 | | |
1826 | | // Reduce element = LocalReduceList[i] |
1827 | 0 | Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); |
1828 | 0 | llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( |
1829 | 0 | ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); |
1830 | | // elemptr = ((CopyType*)(elemptrptr)) + I |
1831 | 0 | Address ElemPtr(ElemPtrPtr, CopyType, Align); |
1832 | 0 | if (NumIters > 1) |
1833 | 0 | ElemPtr = Bld.CreateGEP(ElemPtr, Cnt); |
1834 | | |
1835 | | // Get pointer to location in transfer medium. |
1836 | | // MediumPtr = &medium[warp_id] |
1837 | 0 | llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP( |
1838 | 0 | TransferMedium->getValueType(), TransferMedium, |
1839 | 0 | {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID}); |
1840 | | // Casting to actual data type. |
1841 | | // MediumPtr = (CopyType*)MediumPtrAddr; |
1842 | 0 | Address MediumPtr(MediumPtrVal, CopyType, Align); |
1843 | | |
1844 | | // elem = *elemptr |
1845 | | //*MediumPtr = elem |
1846 | 0 | llvm::Value *Elem = CGF.EmitLoadOfScalar( |
1847 | 0 | ElemPtr, /*Volatile=*/false, CType, Loc, |
1848 | 0 | LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); |
1849 | | // Store the source element value to the dest element address. |
1850 | 0 | CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType, |
1851 | 0 | LValueBaseInfo(AlignmentSource::Type), |
1852 | 0 | TBAAAccessInfo()); |
1853 | |
|
1854 | 0 | Bld.CreateBr(MergeBB); |
1855 | |
|
1856 | 0 | CGF.EmitBlock(ElseBB); |
1857 | 0 | Bld.CreateBr(MergeBB); |
1858 | |
|
1859 | 0 | CGF.EmitBlock(MergeBB); |
1860 | | |
1861 | | // kmpc_barrier. |
1862 | 0 | CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, |
1863 | 0 | /*EmitChecks=*/false, |
1864 | 0 | /*ForceSimpleCall=*/true); |
1865 | | |
1866 | | // |
1867 | | // Warp 0 copies reduce element from transfer medium. |
1868 | | // |
1869 | 0 | llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then"); |
1870 | 0 | llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else"); |
1871 | 0 | llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont"); |
1872 | |
|
1873 | 0 | Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg); |
1874 | 0 | llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar( |
1875 | 0 | AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc); |
1876 | | |
1877 | | // Up to 32 threads in warp 0 are active. |
1878 | 0 | llvm::Value *IsActiveThread = |
1879 | 0 | Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread"); |
1880 | 0 | Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB); |
1881 | |
|
1882 | 0 | CGF.EmitBlock(W0ThenBB); |
1883 | | |
1884 | | // SrcMediumPtr = &medium[tid] |
1885 | 0 | llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP( |
1886 | 0 | TransferMedium->getValueType(), TransferMedium, |
1887 | 0 | {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID}); |
1888 | | // SrcMediumVal = *SrcMediumPtr; |
1889 | 0 | Address SrcMediumPtr(SrcMediumPtrVal, CopyType, Align); |
1890 | | |
1891 | | // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I |
1892 | 0 | Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); |
1893 | 0 | llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar( |
1894 | 0 | TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc); |
1895 | 0 | Address TargetElemPtr(TargetElemPtrVal, CopyType, Align); |
1896 | 0 | if (NumIters > 1) |
1897 | 0 | TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt); |
1898 | | |
1899 | | // *TargetElemPtr = SrcMediumVal; |
1900 | 0 | llvm::Value *SrcMediumValue = |
1901 | 0 | CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc); |
1902 | 0 | CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false, |
1903 | 0 | CType); |
1904 | 0 | Bld.CreateBr(W0MergeBB); |
1905 | |
|
1906 | 0 | CGF.EmitBlock(W0ElseBB); |
1907 | 0 | Bld.CreateBr(W0MergeBB); |
1908 | |
|
1909 | 0 | CGF.EmitBlock(W0MergeBB); |
1910 | |
|
1911 | 0 | if (NumIters > 1) { |
1912 | 0 | Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1)); |
1913 | 0 | CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy); |
1914 | 0 | CGF.EmitBranch(PrecondBB); |
1915 | 0 | (void)ApplyDebugLocation::CreateEmpty(CGF); |
1916 | 0 | CGF.EmitBlock(ExitBB); |
1917 | 0 | } |
1918 | 0 | RealTySize %= TySize; |
1919 | 0 | } |
1920 | 0 | ++Idx; |
1921 | 0 | } |
1922 | |
|
1923 | 0 | CGF.FinishFunction(); |
1924 | 0 | return Fn; |
1925 | 0 | } |
1926 | | |
1927 | | /// Emit a helper that reduces data across two OpenMP threads (lanes) |
1928 | | /// in the same warp. It uses shuffle instructions to copy over data from |
1929 | | /// a remote lane's stack. The reduction algorithm performed is specified |
1930 | | /// by the fourth parameter. |
1931 | | /// |
1932 | | /// Algorithm Versions. |
1933 | | /// Full Warp Reduce (argument value 0): |
1934 | | /// This algorithm assumes that all 32 lanes are active and gathers |
1935 | | /// data from these 32 lanes, producing a single resultant value. |
1936 | | /// Contiguous Partial Warp Reduce (argument value 1): |
1937 | | /// This algorithm assumes that only a *contiguous* subset of lanes |
1938 | | /// are active. This happens for the last warp in a parallel region |
1939 | | /// when the user specified num_threads is not an integer multiple of |
1940 | | /// 32. This contiguous subset always starts with the zeroth lane. |
1941 | | /// Partial Warp Reduce (argument value 2): |
1942 | | /// This algorithm gathers data from any number of lanes at any position. |
1943 | | /// All reduced values are stored in the lowest possible lane. The set |
1944 | | /// of problems every algorithm addresses is a super set of those |
1945 | | /// addressable by algorithms with a lower version number. Overhead |
1946 | | /// increases as algorithm version increases. |
1947 | | /// |
1948 | | /// Terminology |
1949 | | /// Reduce element: |
1950 | | /// Reduce element refers to the individual data field with primitive |
1951 | | /// data types to be combined and reduced across threads. |
1952 | | /// Reduce list: |
1953 | | /// Reduce list refers to a collection of local, thread-private |
1954 | | /// reduce elements. |
1955 | | /// Remote Reduce list: |
1956 | | /// Remote Reduce list refers to a collection of remote (relative to |
1957 | | /// the current thread) reduce elements. |
1958 | | /// |
1959 | | /// We distinguish between three states of threads that are important to |
1960 | | /// the implementation of this function. |
1961 | | /// Alive threads: |
1962 | | /// Threads in a warp executing the SIMT instruction, as distinguished from |
1963 | | /// threads that are inactive due to divergent control flow. |
1964 | | /// Active threads: |
1965 | | /// The minimal set of threads that has to be alive upon entry to this |
1966 | | /// function. The computation is correct iff active threads are alive. |
1967 | | /// Some threads are alive but they are not active because they do not |
1968 | | /// contribute to the computation in any useful manner. Turning them off |
1969 | | /// may introduce control flow overheads without any tangible benefits. |
1970 | | /// Effective threads: |
1971 | | /// In order to comply with the argument requirements of the shuffle |
1972 | | /// function, we must keep all lanes holding data alive. But at most |
1973 | | /// half of them perform value aggregation; we refer to this half of |
1974 | | /// threads as effective. The other half is simply handing off their |
1975 | | /// data. |
1976 | | /// |
1977 | | /// Procedure |
1978 | | /// Value shuffle: |
1979 | | /// In this step active threads transfer data from higher lane positions |
1980 | | /// in the warp to lower lane positions, creating Remote Reduce list. |
1981 | | /// Value aggregation: |
1982 | | /// In this step, effective threads combine their thread local Reduce list |
1983 | | /// with Remote Reduce list and store the result in the thread local |
1984 | | /// Reduce list. |
1985 | | /// Value copy: |
1986 | | /// In this step, we deal with the assumption made by algorithm 2 |
1987 | | /// (i.e. contiguity assumption). When we have an odd number of lanes |
1988 | | /// active, say 2k+1, only k threads will be effective and therefore k |
1989 | | /// new values will be produced. However, the Reduce list owned by the |
1990 | | /// (2k+1)th thread is ignored in the value aggregation. Therefore |
1991 | | /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so |
1992 | | /// that the contiguity assumption still holds. |
1993 | | static llvm::Function *emitShuffleAndReduceFunction( |
1994 | | CodeGenModule &CGM, ArrayRef<const Expr *> Privates, |
1995 | 0 | QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) { |
1996 | 0 | ASTContext &C = CGM.getContext(); |
1997 | | |
1998 | | // Thread local Reduce list used to host the values of data to be reduced. |
1999 | 0 | ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2000 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2001 | | // Current lane id; could be logical. |
2002 | 0 | ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy, |
2003 | 0 | ImplicitParamKind::Other); |
2004 | | // Offset of the remote source lane relative to the current lane. |
2005 | 0 | ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2006 | 0 | C.ShortTy, ImplicitParamKind::Other); |
2007 | | // Algorithm version. This is expected to be known at compile time. |
2008 | 0 | ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2009 | 0 | C.ShortTy, ImplicitParamKind::Other); |
2010 | 0 | FunctionArgList Args; |
2011 | 0 | Args.push_back(&ReduceListArg); |
2012 | 0 | Args.push_back(&LaneIDArg); |
2013 | 0 | Args.push_back(&RemoteLaneOffsetArg); |
2014 | 0 | Args.push_back(&AlgoVerArg); |
2015 | |
|
2016 | 0 | const CGFunctionInfo &CGFI = |
2017 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); |
2018 | 0 | auto *Fn = llvm::Function::Create( |
2019 | 0 | CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, |
2020 | 0 | "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule()); |
2021 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); |
2022 | 0 | Fn->setDoesNotRecurse(); |
2023 | |
|
2024 | 0 | CodeGenFunction CGF(CGM); |
2025 | 0 | CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); |
2026 | |
|
2027 | 0 | CGBuilderTy &Bld = CGF.Builder; |
2028 | |
|
2029 | 0 | Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); |
2030 | 0 | llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy); |
2031 | 0 | Address LocalReduceList( |
2032 | 0 | Bld.CreatePointerBitCastOrAddrSpaceCast( |
2033 | 0 | CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, |
2034 | 0 | C.VoidPtrTy, SourceLocation()), |
2035 | 0 | ElemTy->getPointerTo()), |
2036 | 0 | ElemTy, CGF.getPointerAlign()); |
2037 | |
|
2038 | 0 | Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg); |
2039 | 0 | llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar( |
2040 | 0 | AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); |
2041 | |
|
2042 | 0 | Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg); |
2043 | 0 | llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar( |
2044 | 0 | AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); |
2045 | |
|
2046 | 0 | Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg); |
2047 | 0 | llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar( |
2048 | 0 | AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); |
2049 | | |
2050 | | // Create a local thread-private variable to host the Reduce list |
2051 | | // from a remote lane. |
2052 | 0 | Address RemoteReduceList = |
2053 | 0 | CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list"); |
2054 | | |
2055 | | // This loop iterates through the list of reduce elements and copies, |
2056 | | // element by element, from a remote lane in the warp to RemoteReduceList, |
2057 | | // hosted on the thread's stack. |
2058 | 0 | emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates, |
2059 | 0 | LocalReduceList, RemoteReduceList, |
2060 | 0 | {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal, |
2061 | 0 | /*ScratchpadIndex=*/nullptr, |
2062 | 0 | /*ScratchpadWidth=*/nullptr}); |
2063 | | |
2064 | | // The actions to be performed on the Remote Reduce list is dependent |
2065 | | // on the algorithm version. |
2066 | | // |
2067 | | // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 && |
2068 | | // LaneId % 2 == 0 && Offset > 0): |
2069 | | // do the reduction value aggregation |
2070 | | // |
2071 | | // The thread local variable Reduce list is mutated in place to host the |
2072 | | // reduced data, which is the aggregated value produced from local and |
2073 | | // remote lanes. |
2074 | | // |
2075 | | // Note that AlgoVer is expected to be a constant integer known at compile |
2076 | | // time. |
2077 | | // When AlgoVer==0, the first conjunction evaluates to true, making |
2078 | | // the entire predicate true during compile time. |
2079 | | // When AlgoVer==1, the second conjunction has only the second part to be |
2080 | | // evaluated during runtime. Other conjunctions evaluates to false |
2081 | | // during compile time. |
2082 | | // When AlgoVer==2, the third conjunction has only the second part to be |
2083 | | // evaluated during runtime. Other conjunctions evaluates to false |
2084 | | // during compile time. |
2085 | 0 | llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal); |
2086 | |
|
2087 | 0 | llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); |
2088 | 0 | llvm::Value *CondAlgo1 = Bld.CreateAnd( |
2089 | 0 | Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal)); |
2090 | |
|
2091 | 0 | llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2)); |
2092 | 0 | llvm::Value *CondAlgo2 = Bld.CreateAnd( |
2093 | 0 | Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)))); |
2094 | 0 | CondAlgo2 = Bld.CreateAnd( |
2095 | 0 | CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0))); |
2096 | |
|
2097 | 0 | llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1); |
2098 | 0 | CondReduce = Bld.CreateOr(CondReduce, CondAlgo2); |
2099 | |
|
2100 | 0 | llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); |
2101 | 0 | llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); |
2102 | 0 | llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); |
2103 | 0 | Bld.CreateCondBr(CondReduce, ThenBB, ElseBB); |
2104 | |
|
2105 | 0 | CGF.EmitBlock(ThenBB); |
2106 | | // reduce_function(LocalReduceList, RemoteReduceList) |
2107 | 0 | llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2108 | 0 | LocalReduceList.getPointer(), CGF.VoidPtrTy); |
2109 | 0 | llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2110 | 0 | RemoteReduceList.getPointer(), CGF.VoidPtrTy); |
2111 | 0 | CGM.getOpenMPRuntime().emitOutlinedFunctionCall( |
2112 | 0 | CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr}); |
2113 | 0 | Bld.CreateBr(MergeBB); |
2114 | |
|
2115 | 0 | CGF.EmitBlock(ElseBB); |
2116 | 0 | Bld.CreateBr(MergeBB); |
2117 | |
|
2118 | 0 | CGF.EmitBlock(MergeBB); |
2119 | | |
2120 | | // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local |
2121 | | // Reduce list. |
2122 | 0 | Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); |
2123 | 0 | llvm::Value *CondCopy = Bld.CreateAnd( |
2124 | 0 | Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal)); |
2125 | |
|
2126 | 0 | llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then"); |
2127 | 0 | llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else"); |
2128 | 0 | llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont"); |
2129 | 0 | Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB); |
2130 | |
|
2131 | 0 | CGF.EmitBlock(CpyThenBB); |
2132 | 0 | emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates, |
2133 | 0 | RemoteReduceList, LocalReduceList); |
2134 | 0 | Bld.CreateBr(CpyMergeBB); |
2135 | |
|
2136 | 0 | CGF.EmitBlock(CpyElseBB); |
2137 | 0 | Bld.CreateBr(CpyMergeBB); |
2138 | |
|
2139 | 0 | CGF.EmitBlock(CpyMergeBB); |
2140 | |
|
2141 | 0 | CGF.FinishFunction(); |
2142 | 0 | return Fn; |
2143 | 0 | } |
2144 | | |
2145 | | /// This function emits a helper that copies all the reduction variables from |
2146 | | /// the team into the provided global buffer for the reduction variables. |
2147 | | /// |
2148 | | /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) |
2149 | | /// For all data entries D in reduce_data: |
2150 | | /// Copy local D to buffer.D[Idx] |
2151 | | static llvm::Value *emitListToGlobalCopyFunction( |
2152 | | CodeGenModule &CGM, ArrayRef<const Expr *> Privates, |
2153 | | QualType ReductionArrayTy, SourceLocation Loc, |
2154 | | const RecordDecl *TeamReductionRec, |
2155 | | const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> |
2156 | 0 | &VarFieldMap) { |
2157 | 0 | ASTContext &C = CGM.getContext(); |
2158 | | |
2159 | | // Buffer: global reduction buffer. |
2160 | 0 | ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2161 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2162 | | // Idx: index of the buffer. |
2163 | 0 | ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, |
2164 | 0 | ImplicitParamKind::Other); |
2165 | | // ReduceList: thread local Reduce list. |
2166 | 0 | ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2167 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2168 | 0 | FunctionArgList Args; |
2169 | 0 | Args.push_back(&BufferArg); |
2170 | 0 | Args.push_back(&IdxArg); |
2171 | 0 | Args.push_back(&ReduceListArg); |
2172 | |
|
2173 | 0 | const CGFunctionInfo &CGFI = |
2174 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); |
2175 | 0 | auto *Fn = llvm::Function::Create( |
2176 | 0 | CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, |
2177 | 0 | "_omp_reduction_list_to_global_copy_func", &CGM.getModule()); |
2178 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); |
2179 | 0 | Fn->setDoesNotRecurse(); |
2180 | 0 | CodeGenFunction CGF(CGM); |
2181 | 0 | CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); |
2182 | |
|
2183 | 0 | CGBuilderTy &Bld = CGF.Builder; |
2184 | |
|
2185 | 0 | Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); |
2186 | 0 | Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); |
2187 | 0 | llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy); |
2188 | 0 | Address LocalReduceList( |
2189 | 0 | Bld.CreatePointerBitCastOrAddrSpaceCast( |
2190 | 0 | CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, |
2191 | 0 | C.VoidPtrTy, Loc), |
2192 | 0 | ElemTy->getPointerTo()), |
2193 | 0 | ElemTy, CGF.getPointerAlign()); |
2194 | 0 | QualType StaticTy = C.getRecordType(TeamReductionRec); |
2195 | 0 | llvm::Type *LLVMReductionsBufferTy = |
2196 | 0 | CGM.getTypes().ConvertTypeForMem(StaticTy); |
2197 | 0 | llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2198 | 0 | CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), |
2199 | 0 | LLVMReductionsBufferTy->getPointerTo()); |
2200 | 0 | llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), |
2201 | 0 | /*Volatile=*/false, C.IntTy, |
2202 | 0 | Loc)}; |
2203 | 0 | unsigned Idx = 0; |
2204 | 0 | for (const Expr *Private : Privates) { |
2205 | | // Reduce element = LocalReduceList[i] |
2206 | 0 | Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); |
2207 | 0 | llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( |
2208 | 0 | ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); |
2209 | | // elemptr = ((CopyType*)(elemptrptr)) + I |
2210 | 0 | ElemTy = CGF.ConvertTypeForMem(Private->getType()); |
2211 | 0 | ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2212 | 0 | ElemPtrPtr, ElemTy->getPointerTo()); |
2213 | 0 | Address ElemPtr = |
2214 | 0 | Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType())); |
2215 | 0 | const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); |
2216 | | // Global = Buffer.VD[Idx]; |
2217 | 0 | const FieldDecl *FD = VarFieldMap.lookup(VD); |
2218 | 0 | llvm::Value *BufferPtr = |
2219 | 0 | Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); |
2220 | 0 | LValue GlobLVal = CGF.EmitLValueForField( |
2221 | 0 | CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); |
2222 | 0 | Address GlobAddr = GlobLVal.getAddress(CGF); |
2223 | 0 | GlobLVal.setAddress(Address(GlobAddr.getPointer(), |
2224 | 0 | CGF.ConvertTypeForMem(Private->getType()), |
2225 | 0 | GlobAddr.getAlignment())); |
2226 | 0 | switch (CGF.getEvaluationKind(Private->getType())) { |
2227 | 0 | case TEK_Scalar: { |
2228 | 0 | llvm::Value *V = CGF.EmitLoadOfScalar( |
2229 | 0 | ElemPtr, /*Volatile=*/false, Private->getType(), Loc, |
2230 | 0 | LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); |
2231 | 0 | CGF.EmitStoreOfScalar(V, GlobLVal); |
2232 | 0 | break; |
2233 | 0 | } |
2234 | 0 | case TEK_Complex: { |
2235 | 0 | CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex( |
2236 | 0 | CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc); |
2237 | 0 | CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false); |
2238 | 0 | break; |
2239 | 0 | } |
2240 | 0 | case TEK_Aggregate: |
2241 | 0 | CGF.EmitAggregateCopy(GlobLVal, |
2242 | 0 | CGF.MakeAddrLValue(ElemPtr, Private->getType()), |
2243 | 0 | Private->getType(), AggValueSlot::DoesNotOverlap); |
2244 | 0 | break; |
2245 | 0 | } |
2246 | 0 | ++Idx; |
2247 | 0 | } |
2248 | | |
2249 | 0 | CGF.FinishFunction(); |
2250 | 0 | return Fn; |
2251 | 0 | } |
2252 | | |
2253 | | /// This function emits a helper that reduces all the reduction variables from |
2254 | | /// the team into the provided global buffer for the reduction variables. |
2255 | | /// |
2256 | | /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data) |
2257 | | /// void *GlobPtrs[]; |
2258 | | /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; |
2259 | | /// ... |
2260 | | /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; |
2261 | | /// reduce_function(GlobPtrs, reduce_data); |
2262 | | static llvm::Value *emitListToGlobalReduceFunction( |
2263 | | CodeGenModule &CGM, ArrayRef<const Expr *> Privates, |
2264 | | QualType ReductionArrayTy, SourceLocation Loc, |
2265 | | const RecordDecl *TeamReductionRec, |
2266 | | const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> |
2267 | | &VarFieldMap, |
2268 | 0 | llvm::Function *ReduceFn) { |
2269 | 0 | ASTContext &C = CGM.getContext(); |
2270 | | |
2271 | | // Buffer: global reduction buffer. |
2272 | 0 | ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2273 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2274 | | // Idx: index of the buffer. |
2275 | 0 | ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, |
2276 | 0 | ImplicitParamKind::Other); |
2277 | | // ReduceList: thread local Reduce list. |
2278 | 0 | ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2279 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2280 | 0 | FunctionArgList Args; |
2281 | 0 | Args.push_back(&BufferArg); |
2282 | 0 | Args.push_back(&IdxArg); |
2283 | 0 | Args.push_back(&ReduceListArg); |
2284 | |
|
2285 | 0 | const CGFunctionInfo &CGFI = |
2286 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); |
2287 | 0 | auto *Fn = llvm::Function::Create( |
2288 | 0 | CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, |
2289 | 0 | "_omp_reduction_list_to_global_reduce_func", &CGM.getModule()); |
2290 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); |
2291 | 0 | Fn->setDoesNotRecurse(); |
2292 | 0 | CodeGenFunction CGF(CGM); |
2293 | 0 | CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); |
2294 | |
|
2295 | 0 | CGBuilderTy &Bld = CGF.Builder; |
2296 | |
|
2297 | 0 | Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); |
2298 | 0 | QualType StaticTy = C.getRecordType(TeamReductionRec); |
2299 | 0 | llvm::Type *LLVMReductionsBufferTy = |
2300 | 0 | CGM.getTypes().ConvertTypeForMem(StaticTy); |
2301 | 0 | llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2302 | 0 | CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), |
2303 | 0 | LLVMReductionsBufferTy->getPointerTo()); |
2304 | | |
2305 | | // 1. Build a list of reduction variables. |
2306 | | // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; |
2307 | 0 | Address ReductionList = |
2308 | 0 | CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); |
2309 | 0 | auto IPriv = Privates.begin(); |
2310 | 0 | llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), |
2311 | 0 | /*Volatile=*/false, C.IntTy, |
2312 | 0 | Loc)}; |
2313 | 0 | unsigned Idx = 0; |
2314 | 0 | for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { |
2315 | 0 | Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); |
2316 | | // Global = Buffer.VD[Idx]; |
2317 | 0 | const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); |
2318 | 0 | const FieldDecl *FD = VarFieldMap.lookup(VD); |
2319 | 0 | llvm::Value *BufferPtr = |
2320 | 0 | Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); |
2321 | 0 | LValue GlobLVal = CGF.EmitLValueForField( |
2322 | 0 | CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); |
2323 | 0 | Address GlobAddr = GlobLVal.getAddress(CGF); |
2324 | 0 | CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false, |
2325 | 0 | C.VoidPtrTy); |
2326 | 0 | if ((*IPriv)->getType()->isVariablyModifiedType()) { |
2327 | | // Store array size. |
2328 | 0 | ++Idx; |
2329 | 0 | Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); |
2330 | 0 | llvm::Value *Size = CGF.Builder.CreateIntCast( |
2331 | 0 | CGF.getVLASize( |
2332 | 0 | CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) |
2333 | 0 | .NumElts, |
2334 | 0 | CGF.SizeTy, /*isSigned=*/false); |
2335 | 0 | CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), |
2336 | 0 | Elem); |
2337 | 0 | } |
2338 | 0 | } |
2339 | | |
2340 | | // Call reduce_function(GlobalReduceList, ReduceList) |
2341 | 0 | llvm::Value *GlobalReduceList = ReductionList.getPointer(); |
2342 | 0 | Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); |
2343 | 0 | llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( |
2344 | 0 | AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); |
2345 | 0 | CGM.getOpenMPRuntime().emitOutlinedFunctionCall( |
2346 | 0 | CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr}); |
2347 | 0 | CGF.FinishFunction(); |
2348 | 0 | return Fn; |
2349 | 0 | } |
2350 | | |
2351 | | /// This function emits a helper that copies all the reduction variables from |
2352 | | /// the team into the provided global buffer for the reduction variables. |
2353 | | /// |
2354 | | /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) |
2355 | | /// For all data entries D in reduce_data: |
2356 | | /// Copy buffer.D[Idx] to local D; |
2357 | | static llvm::Value *emitGlobalToListCopyFunction( |
2358 | | CodeGenModule &CGM, ArrayRef<const Expr *> Privates, |
2359 | | QualType ReductionArrayTy, SourceLocation Loc, |
2360 | | const RecordDecl *TeamReductionRec, |
2361 | | const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> |
2362 | 0 | &VarFieldMap) { |
2363 | 0 | ASTContext &C = CGM.getContext(); |
2364 | | |
2365 | | // Buffer: global reduction buffer. |
2366 | 0 | ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2367 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2368 | | // Idx: index of the buffer. |
2369 | 0 | ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, |
2370 | 0 | ImplicitParamKind::Other); |
2371 | | // ReduceList: thread local Reduce list. |
2372 | 0 | ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2373 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2374 | 0 | FunctionArgList Args; |
2375 | 0 | Args.push_back(&BufferArg); |
2376 | 0 | Args.push_back(&IdxArg); |
2377 | 0 | Args.push_back(&ReduceListArg); |
2378 | |
|
2379 | 0 | const CGFunctionInfo &CGFI = |
2380 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); |
2381 | 0 | auto *Fn = llvm::Function::Create( |
2382 | 0 | CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, |
2383 | 0 | "_omp_reduction_global_to_list_copy_func", &CGM.getModule()); |
2384 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); |
2385 | 0 | Fn->setDoesNotRecurse(); |
2386 | 0 | CodeGenFunction CGF(CGM); |
2387 | 0 | CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); |
2388 | |
|
2389 | 0 | CGBuilderTy &Bld = CGF.Builder; |
2390 | |
|
2391 | 0 | Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); |
2392 | 0 | Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); |
2393 | 0 | llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy); |
2394 | 0 | Address LocalReduceList( |
2395 | 0 | Bld.CreatePointerBitCastOrAddrSpaceCast( |
2396 | 0 | CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, |
2397 | 0 | C.VoidPtrTy, Loc), |
2398 | 0 | ElemTy->getPointerTo()), |
2399 | 0 | ElemTy, CGF.getPointerAlign()); |
2400 | 0 | QualType StaticTy = C.getRecordType(TeamReductionRec); |
2401 | 0 | llvm::Type *LLVMReductionsBufferTy = |
2402 | 0 | CGM.getTypes().ConvertTypeForMem(StaticTy); |
2403 | 0 | llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2404 | 0 | CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), |
2405 | 0 | LLVMReductionsBufferTy->getPointerTo()); |
2406 | |
|
2407 | 0 | llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), |
2408 | 0 | /*Volatile=*/false, C.IntTy, |
2409 | 0 | Loc)}; |
2410 | 0 | unsigned Idx = 0; |
2411 | 0 | for (const Expr *Private : Privates) { |
2412 | | // Reduce element = LocalReduceList[i] |
2413 | 0 | Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); |
2414 | 0 | llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( |
2415 | 0 | ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); |
2416 | | // elemptr = ((CopyType*)(elemptrptr)) + I |
2417 | 0 | ElemTy = CGF.ConvertTypeForMem(Private->getType()); |
2418 | 0 | ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2419 | 0 | ElemPtrPtr, ElemTy->getPointerTo()); |
2420 | 0 | Address ElemPtr = |
2421 | 0 | Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType())); |
2422 | 0 | const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); |
2423 | | // Global = Buffer.VD[Idx]; |
2424 | 0 | const FieldDecl *FD = VarFieldMap.lookup(VD); |
2425 | 0 | llvm::Value *BufferPtr = |
2426 | 0 | Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); |
2427 | 0 | LValue GlobLVal = CGF.EmitLValueForField( |
2428 | 0 | CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); |
2429 | 0 | Address GlobAddr = GlobLVal.getAddress(CGF); |
2430 | 0 | GlobLVal.setAddress(Address(GlobAddr.getPointer(), |
2431 | 0 | CGF.ConvertTypeForMem(Private->getType()), |
2432 | 0 | GlobAddr.getAlignment())); |
2433 | 0 | switch (CGF.getEvaluationKind(Private->getType())) { |
2434 | 0 | case TEK_Scalar: { |
2435 | 0 | llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc); |
2436 | 0 | CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(), |
2437 | 0 | LValueBaseInfo(AlignmentSource::Type), |
2438 | 0 | TBAAAccessInfo()); |
2439 | 0 | break; |
2440 | 0 | } |
2441 | 0 | case TEK_Complex: { |
2442 | 0 | CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc); |
2443 | 0 | CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()), |
2444 | 0 | /*isInit=*/false); |
2445 | 0 | break; |
2446 | 0 | } |
2447 | 0 | case TEK_Aggregate: |
2448 | 0 | CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()), |
2449 | 0 | GlobLVal, Private->getType(), |
2450 | 0 | AggValueSlot::DoesNotOverlap); |
2451 | 0 | break; |
2452 | 0 | } |
2453 | 0 | ++Idx; |
2454 | 0 | } |
2455 | | |
2456 | 0 | CGF.FinishFunction(); |
2457 | 0 | return Fn; |
2458 | 0 | } |
2459 | | |
2460 | | /// This function emits a helper that reduces all the reduction variables from |
2461 | | /// the team into the provided global buffer for the reduction variables. |
2462 | | /// |
2463 | | /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data) |
2464 | | /// void *GlobPtrs[]; |
2465 | | /// GlobPtrs[0] = (void*)&buffer.D0[Idx]; |
2466 | | /// ... |
2467 | | /// GlobPtrs[N] = (void*)&buffer.DN[Idx]; |
2468 | | /// reduce_function(reduce_data, GlobPtrs); |
2469 | | static llvm::Value *emitGlobalToListReduceFunction( |
2470 | | CodeGenModule &CGM, ArrayRef<const Expr *> Privates, |
2471 | | QualType ReductionArrayTy, SourceLocation Loc, |
2472 | | const RecordDecl *TeamReductionRec, |
2473 | | const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> |
2474 | | &VarFieldMap, |
2475 | 0 | llvm::Function *ReduceFn) { |
2476 | 0 | ASTContext &C = CGM.getContext(); |
2477 | | |
2478 | | // Buffer: global reduction buffer. |
2479 | 0 | ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2480 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2481 | | // Idx: index of the buffer. |
2482 | 0 | ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, |
2483 | 0 | ImplicitParamKind::Other); |
2484 | | // ReduceList: thread local Reduce list. |
2485 | 0 | ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, |
2486 | 0 | C.VoidPtrTy, ImplicitParamKind::Other); |
2487 | 0 | FunctionArgList Args; |
2488 | 0 | Args.push_back(&BufferArg); |
2489 | 0 | Args.push_back(&IdxArg); |
2490 | 0 | Args.push_back(&ReduceListArg); |
2491 | |
|
2492 | 0 | const CGFunctionInfo &CGFI = |
2493 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); |
2494 | 0 | auto *Fn = llvm::Function::Create( |
2495 | 0 | CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, |
2496 | 0 | "_omp_reduction_global_to_list_reduce_func", &CGM.getModule()); |
2497 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); |
2498 | 0 | Fn->setDoesNotRecurse(); |
2499 | 0 | CodeGenFunction CGF(CGM); |
2500 | 0 | CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); |
2501 | |
|
2502 | 0 | CGBuilderTy &Bld = CGF.Builder; |
2503 | |
|
2504 | 0 | Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); |
2505 | 0 | QualType StaticTy = C.getRecordType(TeamReductionRec); |
2506 | 0 | llvm::Type *LLVMReductionsBufferTy = |
2507 | 0 | CGM.getTypes().ConvertTypeForMem(StaticTy); |
2508 | 0 | llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( |
2509 | 0 | CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), |
2510 | 0 | LLVMReductionsBufferTy->getPointerTo()); |
2511 | | |
2512 | | // 1. Build a list of reduction variables. |
2513 | | // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; |
2514 | 0 | Address ReductionList = |
2515 | 0 | CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); |
2516 | 0 | auto IPriv = Privates.begin(); |
2517 | 0 | llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), |
2518 | 0 | /*Volatile=*/false, C.IntTy, |
2519 | 0 | Loc)}; |
2520 | 0 | unsigned Idx = 0; |
2521 | 0 | for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { |
2522 | 0 | Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); |
2523 | | // Global = Buffer.VD[Idx]; |
2524 | 0 | const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); |
2525 | 0 | const FieldDecl *FD = VarFieldMap.lookup(VD); |
2526 | 0 | llvm::Value *BufferPtr = |
2527 | 0 | Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs); |
2528 | 0 | LValue GlobLVal = CGF.EmitLValueForField( |
2529 | 0 | CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD); |
2530 | 0 | Address GlobAddr = GlobLVal.getAddress(CGF); |
2531 | 0 | CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false, |
2532 | 0 | C.VoidPtrTy); |
2533 | 0 | if ((*IPriv)->getType()->isVariablyModifiedType()) { |
2534 | | // Store array size. |
2535 | 0 | ++Idx; |
2536 | 0 | Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); |
2537 | 0 | llvm::Value *Size = CGF.Builder.CreateIntCast( |
2538 | 0 | CGF.getVLASize( |
2539 | 0 | CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) |
2540 | 0 | .NumElts, |
2541 | 0 | CGF.SizeTy, /*isSigned=*/false); |
2542 | 0 | CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), |
2543 | 0 | Elem); |
2544 | 0 | } |
2545 | 0 | } |
2546 | | |
2547 | | // Call reduce_function(ReduceList, GlobalReduceList) |
2548 | 0 | llvm::Value *GlobalReduceList = ReductionList.getPointer(); |
2549 | 0 | Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); |
2550 | 0 | llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( |
2551 | 0 | AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); |
2552 | 0 | CGM.getOpenMPRuntime().emitOutlinedFunctionCall( |
2553 | 0 | CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList}); |
2554 | 0 | CGF.FinishFunction(); |
2555 | 0 | return Fn; |
2556 | 0 | } |
2557 | | |
2558 | | /// |
2559 | | /// Design of OpenMP reductions on the GPU |
2560 | | /// |
2561 | | /// Consider a typical OpenMP program with one or more reduction |
2562 | | /// clauses: |
2563 | | /// |
2564 | | /// float foo; |
2565 | | /// double bar; |
2566 | | /// #pragma omp target teams distribute parallel for \ |
2567 | | /// reduction(+:foo) reduction(*:bar) |
2568 | | /// for (int i = 0; i < N; i++) { |
2569 | | /// foo += A[i]; bar *= B[i]; |
2570 | | /// } |
2571 | | /// |
2572 | | /// where 'foo' and 'bar' are reduced across all OpenMP threads in |
2573 | | /// all teams. In our OpenMP implementation on the NVPTX device an |
2574 | | /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads |
2575 | | /// within a team are mapped to CUDA threads within a threadblock. |
2576 | | /// Our goal is to efficiently aggregate values across all OpenMP |
2577 | | /// threads such that: |
2578 | | /// |
2579 | | /// - the compiler and runtime are logically concise, and |
2580 | | /// - the reduction is performed efficiently in a hierarchical |
2581 | | /// manner as follows: within OpenMP threads in the same warp, |
2582 | | /// across warps in a threadblock, and finally across teams on |
2583 | | /// the NVPTX device. |
2584 | | /// |
2585 | | /// Introduction to Decoupling |
2586 | | /// |
2587 | | /// We would like to decouple the compiler and the runtime so that the |
2588 | | /// latter is ignorant of the reduction variables (number, data types) |
2589 | | /// and the reduction operators. This allows a simpler interface |
2590 | | /// and implementation while still attaining good performance. |
2591 | | /// |
2592 | | /// Pseudocode for the aforementioned OpenMP program generated by the |
2593 | | /// compiler is as follows: |
2594 | | /// |
2595 | | /// 1. Create private copies of reduction variables on each OpenMP |
2596 | | /// thread: 'foo_private', 'bar_private' |
2597 | | /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned |
2598 | | /// to it and writes the result in 'foo_private' and 'bar_private' |
2599 | | /// respectively. |
2600 | | /// 3. Call the OpenMP runtime on the GPU to reduce within a team |
2601 | | /// and store the result on the team master: |
2602 | | /// |
2603 | | /// __kmpc_nvptx_parallel_reduce_nowait_v2(..., |
2604 | | /// reduceData, shuffleReduceFn, interWarpCpyFn) |
2605 | | /// |
2606 | | /// where: |
2607 | | /// struct ReduceData { |
2608 | | /// double *foo; |
2609 | | /// double *bar; |
2610 | | /// } reduceData |
2611 | | /// reduceData.foo = &foo_private |
2612 | | /// reduceData.bar = &bar_private |
2613 | | /// |
2614 | | /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two |
2615 | | /// auxiliary functions generated by the compiler that operate on |
2616 | | /// variables of type 'ReduceData'. They aid the runtime perform |
2617 | | /// algorithmic steps in a data agnostic manner. |
2618 | | /// |
2619 | | /// 'shuffleReduceFn' is a pointer to a function that reduces data |
2620 | | /// of type 'ReduceData' across two OpenMP threads (lanes) in the |
2621 | | /// same warp. It takes the following arguments as input: |
2622 | | /// |
2623 | | /// a. variable of type 'ReduceData' on the calling lane, |
2624 | | /// b. its lane_id, |
2625 | | /// c. an offset relative to the current lane_id to generate a |
2626 | | /// remote_lane_id. The remote lane contains the second |
2627 | | /// variable of type 'ReduceData' that is to be reduced. |
2628 | | /// d. an algorithm version parameter determining which reduction |
2629 | | /// algorithm to use. |
2630 | | /// |
2631 | | /// 'shuffleReduceFn' retrieves data from the remote lane using |
2632 | | /// efficient GPU shuffle intrinsics and reduces, using the |
2633 | | /// algorithm specified by the 4th parameter, the two operands |
2634 | | /// element-wise. The result is written to the first operand. |
2635 | | /// |
2636 | | /// Different reduction algorithms are implemented in different |
2637 | | /// runtime functions, all calling 'shuffleReduceFn' to perform |
2638 | | /// the essential reduction step. Therefore, based on the 4th |
2639 | | /// parameter, this function behaves slightly differently to |
2640 | | /// cooperate with the runtime to ensure correctness under |
2641 | | /// different circumstances. |
2642 | | /// |
2643 | | /// 'InterWarpCpyFn' is a pointer to a function that transfers |
2644 | | /// reduced variables across warps. It tunnels, through CUDA |
2645 | | /// shared memory, the thread-private data of type 'ReduceData' |
2646 | | /// from lane 0 of each warp to a lane in the first warp. |
2647 | | /// 4. Call the OpenMP runtime on the GPU to reduce across teams. |
2648 | | /// The last team writes the global reduced value to memory. |
2649 | | /// |
2650 | | /// ret = __kmpc_nvptx_teams_reduce_nowait(..., |
2651 | | /// reduceData, shuffleReduceFn, interWarpCpyFn, |
2652 | | /// scratchpadCopyFn, loadAndReduceFn) |
2653 | | /// |
2654 | | /// 'scratchpadCopyFn' is a helper that stores reduced |
2655 | | /// data from the team master to a scratchpad array in |
2656 | | /// global memory. |
2657 | | /// |
2658 | | /// 'loadAndReduceFn' is a helper that loads data from |
2659 | | /// the scratchpad array and reduces it with the input |
2660 | | /// operand. |
2661 | | /// |
2662 | | /// These compiler generated functions hide address |
2663 | | /// calculation and alignment information from the runtime. |
2664 | | /// 5. if ret == 1: |
2665 | | /// The team master of the last team stores the reduced |
2666 | | /// result to the globals in memory. |
2667 | | /// foo += reduceData.foo; bar *= reduceData.bar |
2668 | | /// |
2669 | | /// |
2670 | | /// Warp Reduction Algorithms |
2671 | | /// |
2672 | | /// On the warp level, we have three algorithms implemented in the |
2673 | | /// OpenMP runtime depending on the number of active lanes: |
2674 | | /// |
2675 | | /// Full Warp Reduction |
2676 | | /// |
2677 | | /// The reduce algorithm within a warp where all lanes are active |
2678 | | /// is implemented in the runtime as follows: |
2679 | | /// |
2680 | | /// full_warp_reduce(void *reduce_data, |
2681 | | /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { |
2682 | | /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2) |
2683 | | /// ShuffleReduceFn(reduce_data, 0, offset, 0); |
2684 | | /// } |
2685 | | /// |
2686 | | /// The algorithm completes in log(2, WARPSIZE) steps. |
2687 | | /// |
2688 | | /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is |
2689 | | /// not used therefore we save instructions by not retrieving lane_id |
2690 | | /// from the corresponding special registers. The 4th parameter, which |
2691 | | /// represents the version of the algorithm being used, is set to 0 to |
2692 | | /// signify full warp reduction. |
2693 | | /// |
2694 | | /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: |
2695 | | /// |
2696 | | /// #reduce_elem refers to an element in the local lane's data structure |
2697 | | /// #remote_elem is retrieved from a remote lane |
2698 | | /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); |
2699 | | /// reduce_elem = reduce_elem REDUCE_OP remote_elem; |
2700 | | /// |
2701 | | /// Contiguous Partial Warp Reduction |
2702 | | /// |
2703 | | /// This reduce algorithm is used within a warp where only the first |
2704 | | /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the |
2705 | | /// number of OpenMP threads in a parallel region is not a multiple of |
2706 | | /// WARPSIZE. The algorithm is implemented in the runtime as follows: |
2707 | | /// |
2708 | | /// void |
2709 | | /// contiguous_partial_reduce(void *reduce_data, |
2710 | | /// kmp_ShuffleReductFctPtr ShuffleReduceFn, |
2711 | | /// int size, int lane_id) { |
2712 | | /// int curr_size; |
2713 | | /// int offset; |
2714 | | /// curr_size = size; |
2715 | | /// mask = curr_size/2; |
2716 | | /// while (offset>0) { |
2717 | | /// ShuffleReduceFn(reduce_data, lane_id, offset, 1); |
2718 | | /// curr_size = (curr_size+1)/2; |
2719 | | /// offset = curr_size/2; |
2720 | | /// } |
2721 | | /// } |
2722 | | /// |
2723 | | /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: |
2724 | | /// |
2725 | | /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); |
2726 | | /// if (lane_id < offset) |
2727 | | /// reduce_elem = reduce_elem REDUCE_OP remote_elem |
2728 | | /// else |
2729 | | /// reduce_elem = remote_elem |
2730 | | /// |
2731 | | /// This algorithm assumes that the data to be reduced are located in a |
2732 | | /// contiguous subset of lanes starting from the first. When there is |
2733 | | /// an odd number of active lanes, the data in the last lane is not |
2734 | | /// aggregated with any other lane's dat but is instead copied over. |
2735 | | /// |
2736 | | /// Dispersed Partial Warp Reduction |
2737 | | /// |
2738 | | /// This algorithm is used within a warp when any discontiguous subset of |
2739 | | /// lanes are active. It is used to implement the reduction operation |
2740 | | /// across lanes in an OpenMP simd region or in a nested parallel region. |
2741 | | /// |
2742 | | /// void |
2743 | | /// dispersed_partial_reduce(void *reduce_data, |
2744 | | /// kmp_ShuffleReductFctPtr ShuffleReduceFn) { |
2745 | | /// int size, remote_id; |
2746 | | /// int logical_lane_id = number_of_active_lanes_before_me() * 2; |
2747 | | /// do { |
2748 | | /// remote_id = next_active_lane_id_right_after_me(); |
2749 | | /// # the above function returns 0 of no active lane |
2750 | | /// # is present right after the current lane. |
2751 | | /// size = number_of_active_lanes_in_this_warp(); |
2752 | | /// logical_lane_id /= 2; |
2753 | | /// ShuffleReduceFn(reduce_data, logical_lane_id, |
2754 | | /// remote_id-1-threadIdx.x, 2); |
2755 | | /// } while (logical_lane_id % 2 == 0 && size > 1); |
2756 | | /// } |
2757 | | /// |
2758 | | /// There is no assumption made about the initial state of the reduction. |
2759 | | /// Any number of lanes (>=1) could be active at any position. The reduction |
2760 | | /// result is returned in the first active lane. |
2761 | | /// |
2762 | | /// In this version, 'ShuffleReduceFn' behaves, per element, as follows: |
2763 | | /// |
2764 | | /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); |
2765 | | /// if (lane_id % 2 == 0 && offset > 0) |
2766 | | /// reduce_elem = reduce_elem REDUCE_OP remote_elem |
2767 | | /// else |
2768 | | /// reduce_elem = remote_elem |
2769 | | /// |
2770 | | /// |
2771 | | /// Intra-Team Reduction |
2772 | | /// |
2773 | | /// This function, as implemented in the runtime call |
2774 | | /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP |
2775 | | /// threads in a team. It first reduces within a warp using the |
2776 | | /// aforementioned algorithms. We then proceed to gather all such |
2777 | | /// reduced values at the first warp. |
2778 | | /// |
2779 | | /// The runtime makes use of the function 'InterWarpCpyFn', which copies |
2780 | | /// data from each of the "warp master" (zeroth lane of each warp, where |
2781 | | /// warp-reduced data is held) to the zeroth warp. This step reduces (in |
2782 | | /// a mathematical sense) the problem of reduction across warp masters in |
2783 | | /// a block to the problem of warp reduction. |
2784 | | /// |
2785 | | /// |
2786 | | /// Inter-Team Reduction |
2787 | | /// |
2788 | | /// Once a team has reduced its data to a single value, it is stored in |
2789 | | /// a global scratchpad array. Since each team has a distinct slot, this |
2790 | | /// can be done without locking. |
2791 | | /// |
2792 | | /// The last team to write to the scratchpad array proceeds to reduce the |
2793 | | /// scratchpad array. One or more workers in the last team use the helper |
2794 | | /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e., |
2795 | | /// the k'th worker reduces every k'th element. |
2796 | | /// |
2797 | | /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to |
2798 | | /// reduce across workers and compute a globally reduced value. |
2799 | | /// |
2800 | | void CGOpenMPRuntimeGPU::emitReduction( |
2801 | | CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, |
2802 | | ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, |
2803 | 0 | ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) { |
2804 | 0 | if (!CGF.HaveInsertPoint()) |
2805 | 0 | return; |
2806 | | |
2807 | 0 | bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind); |
2808 | 0 | #ifndef NDEBUG |
2809 | 0 | bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind); |
2810 | 0 | #endif |
2811 | |
|
2812 | 0 | if (Options.SimpleReduction) { |
2813 | 0 | assert(!TeamsReduction && !ParallelReduction && |
2814 | 0 | "Invalid reduction selection in emitReduction."); |
2815 | 0 | CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs, |
2816 | 0 | ReductionOps, Options); |
2817 | 0 | return; |
2818 | 0 | } |
2819 | | |
2820 | 0 | assert((TeamsReduction || ParallelReduction) && |
2821 | 0 | "Invalid reduction selection in emitReduction."); |
2822 | | |
2823 | 0 | llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap; |
2824 | 0 | llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size()); |
2825 | 0 | int Cnt = 0; |
2826 | 0 | for (const Expr *DRE : Privates) { |
2827 | 0 | PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl(); |
2828 | 0 | ++Cnt; |
2829 | 0 | } |
2830 | |
|
2831 | 0 | ASTContext &C = CGM.getContext(); |
2832 | 0 | const RecordDecl *ReductionRec = ::buildRecordForGlobalizedVars( |
2833 | 0 | CGM.getContext(), PrivatesReductions, std::nullopt, VarFieldMap, 1); |
2834 | | |
2835 | | // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList), |
2836 | | // RedList, shuffle_reduce_func, interwarp_copy_func); |
2837 | | // or |
2838 | | // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>); |
2839 | 0 | llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); |
2840 | |
|
2841 | 0 | llvm::Value *Res; |
2842 | | // 1. Build a list of reduction variables. |
2843 | | // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; |
2844 | 0 | auto Size = RHSExprs.size(); |
2845 | 0 | for (const Expr *E : Privates) { |
2846 | 0 | if (E->getType()->isVariablyModifiedType()) |
2847 | | // Reserve place for array size. |
2848 | 0 | ++Size; |
2849 | 0 | } |
2850 | 0 | llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size); |
2851 | 0 | QualType ReductionArrayTy = C.getConstantArrayType( |
2852 | 0 | C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal, |
2853 | 0 | /*IndexTypeQuals=*/0); |
2854 | 0 | Address ReductionList = |
2855 | 0 | CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); |
2856 | 0 | auto IPriv = Privates.begin(); |
2857 | 0 | unsigned Idx = 0; |
2858 | 0 | for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) { |
2859 | 0 | Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); |
2860 | 0 | CGF.Builder.CreateStore( |
2861 | 0 | CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
2862 | 0 | CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), |
2863 | 0 | Elem); |
2864 | 0 | if ((*IPriv)->getType()->isVariablyModifiedType()) { |
2865 | | // Store array size. |
2866 | 0 | ++Idx; |
2867 | 0 | Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); |
2868 | 0 | llvm::Value *Size = CGF.Builder.CreateIntCast( |
2869 | 0 | CGF.getVLASize( |
2870 | 0 | CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) |
2871 | 0 | .NumElts, |
2872 | 0 | CGF.SizeTy, /*isSigned=*/false); |
2873 | 0 | CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), |
2874 | 0 | Elem); |
2875 | 0 | } |
2876 | 0 | } |
2877 | |
|
2878 | 0 | llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
2879 | 0 | ReductionList.getPointer(), CGF.VoidPtrTy); |
2880 | 0 | llvm::Function *ReductionFn = emitReductionFunction( |
2881 | 0 | CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy), |
2882 | 0 | Privates, LHSExprs, RHSExprs, ReductionOps); |
2883 | 0 | llvm::Value *ReductionDataSize = |
2884 | 0 | CGF.getTypeSize(C.getRecordType(ReductionRec)); |
2885 | 0 | ReductionDataSize = |
2886 | 0 | CGF.Builder.CreateSExtOrTrunc(ReductionDataSize, CGF.Int64Ty); |
2887 | 0 | llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction( |
2888 | 0 | CGM, Privates, ReductionArrayTy, ReductionFn, Loc); |
2889 | 0 | llvm::Value *InterWarpCopyFn = |
2890 | 0 | emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc); |
2891 | |
|
2892 | 0 | if (ParallelReduction) { |
2893 | 0 | llvm::Value *Args[] = {RTLoc, ReductionDataSize, RL, ShuffleAndReduceFn, |
2894 | 0 | InterWarpCopyFn}; |
2895 | |
|
2896 | 0 | Res = CGF.EmitRuntimeCall( |
2897 | 0 | OMPBuilder.getOrCreateRuntimeFunction( |
2898 | 0 | CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2), |
2899 | 0 | Args); |
2900 | 0 | } else { |
2901 | 0 | assert(TeamsReduction && "expected teams reduction."); |
2902 | 0 | TeamsReductions.push_back(ReductionRec); |
2903 | 0 | auto *KernelTeamsReductionPtr = CGF.EmitRuntimeCall( |
2904 | 0 | OMPBuilder.getOrCreateRuntimeFunction( |
2905 | 0 | CGM.getModule(), OMPRTL___kmpc_reduction_get_fixed_buffer), |
2906 | 0 | {}, "_openmp_teams_reductions_buffer_$_$ptr"); |
2907 | 0 | llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction( |
2908 | 0 | CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap); |
2909 | 0 | llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction( |
2910 | 0 | CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap, |
2911 | 0 | ReductionFn); |
2912 | 0 | llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction( |
2913 | 0 | CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap); |
2914 | 0 | llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction( |
2915 | 0 | CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap, |
2916 | 0 | ReductionFn); |
2917 | |
|
2918 | 0 | llvm::Value *Args[] = { |
2919 | 0 | RTLoc, |
2920 | 0 | KernelTeamsReductionPtr, |
2921 | 0 | CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum), |
2922 | 0 | ReductionDataSize, |
2923 | 0 | RL, |
2924 | 0 | ShuffleAndReduceFn, |
2925 | 0 | InterWarpCopyFn, |
2926 | 0 | GlobalToBufferCpyFn, |
2927 | 0 | GlobalToBufferRedFn, |
2928 | 0 | BufferToGlobalCpyFn, |
2929 | 0 | BufferToGlobalRedFn}; |
2930 | |
|
2931 | 0 | Res = CGF.EmitRuntimeCall( |
2932 | 0 | OMPBuilder.getOrCreateRuntimeFunction( |
2933 | 0 | CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2), |
2934 | 0 | Args); |
2935 | 0 | } |
2936 | | |
2937 | | // 5. Build if (res == 1) |
2938 | 0 | llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done"); |
2939 | 0 | llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then"); |
2940 | 0 | llvm::Value *Cond = CGF.Builder.CreateICmpEQ( |
2941 | 0 | Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1)); |
2942 | 0 | CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB); |
2943 | | |
2944 | | // 6. Build then branch: where we have reduced values in the master |
2945 | | // thread in each team. |
2946 | | // __kmpc_end_reduce{_nowait}(<gtid>); |
2947 | | // break; |
2948 | 0 | CGF.EmitBlock(ThenBB); |
2949 | | |
2950 | | // Add emission of __kmpc_end_reduce{_nowait}(<gtid>); |
2951 | 0 | auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps, |
2952 | 0 | this](CodeGenFunction &CGF, PrePostActionTy &Action) { |
2953 | 0 | auto IPriv = Privates.begin(); |
2954 | 0 | auto ILHS = LHSExprs.begin(); |
2955 | 0 | auto IRHS = RHSExprs.begin(); |
2956 | 0 | for (const Expr *E : ReductionOps) { |
2957 | 0 | emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS), |
2958 | 0 | cast<DeclRefExpr>(*IRHS)); |
2959 | 0 | ++IPriv; |
2960 | 0 | ++ILHS; |
2961 | 0 | ++IRHS; |
2962 | 0 | } |
2963 | 0 | }; |
2964 | 0 | RegionCodeGenTy RCG(CodeGen); |
2965 | 0 | RCG(CGF); |
2966 | | // There is no need to emit line number for unconditional branch. |
2967 | 0 | (void)ApplyDebugLocation::CreateEmpty(CGF); |
2968 | 0 | CGF.EmitBlock(ExitBB, /*IsFinished=*/true); |
2969 | 0 | } |
2970 | | |
2971 | | const VarDecl * |
2972 | | CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD, |
2973 | 0 | const VarDecl *NativeParam) const { |
2974 | 0 | if (!NativeParam->getType()->isReferenceType()) |
2975 | 0 | return NativeParam; |
2976 | 0 | QualType ArgType = NativeParam->getType(); |
2977 | 0 | QualifierCollector QC; |
2978 | 0 | const Type *NonQualTy = QC.strip(ArgType); |
2979 | 0 | QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); |
2980 | 0 | if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) { |
2981 | 0 | if (Attr->getCaptureKind() == OMPC_map) { |
2982 | 0 | PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy, |
2983 | 0 | LangAS::opencl_global); |
2984 | 0 | } |
2985 | 0 | } |
2986 | 0 | ArgType = CGM.getContext().getPointerType(PointeeTy); |
2987 | 0 | QC.addRestrict(); |
2988 | 0 | enum { NVPTX_local_addr = 5 }; |
2989 | 0 | QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr)); |
2990 | 0 | ArgType = QC.apply(CGM.getContext(), ArgType); |
2991 | 0 | if (isa<ImplicitParamDecl>(NativeParam)) |
2992 | 0 | return ImplicitParamDecl::Create( |
2993 | 0 | CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(), |
2994 | 0 | NativeParam->getIdentifier(), ArgType, ImplicitParamKind::Other); |
2995 | 0 | return ParmVarDecl::Create( |
2996 | 0 | CGM.getContext(), |
2997 | 0 | const_cast<DeclContext *>(NativeParam->getDeclContext()), |
2998 | 0 | NativeParam->getBeginLoc(), NativeParam->getLocation(), |
2999 | 0 | NativeParam->getIdentifier(), ArgType, |
3000 | 0 | /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); |
3001 | 0 | } |
3002 | | |
3003 | | Address |
3004 | | CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF, |
3005 | | const VarDecl *NativeParam, |
3006 | 0 | const VarDecl *TargetParam) const { |
3007 | 0 | assert(NativeParam != TargetParam && |
3008 | 0 | NativeParam->getType()->isReferenceType() && |
3009 | 0 | "Native arg must not be the same as target arg."); |
3010 | 0 | Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam); |
3011 | 0 | QualType NativeParamType = NativeParam->getType(); |
3012 | 0 | QualifierCollector QC; |
3013 | 0 | const Type *NonQualTy = QC.strip(NativeParamType); |
3014 | 0 | QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); |
3015 | 0 | unsigned NativePointeeAddrSpace = |
3016 | 0 | CGF.getTypes().getTargetAddressSpace(NativePointeeTy); |
3017 | 0 | QualType TargetTy = TargetParam->getType(); |
3018 | 0 | llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(LocalAddr, /*Volatile=*/false, |
3019 | 0 | TargetTy, SourceLocation()); |
3020 | | // Cast to native address space. |
3021 | 0 | TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
3022 | 0 | TargetAddr, |
3023 | 0 | llvm::PointerType::get(CGF.getLLVMContext(), NativePointeeAddrSpace)); |
3024 | 0 | Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType); |
3025 | 0 | CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false, |
3026 | 0 | NativeParamType); |
3027 | 0 | return NativeParamAddr; |
3028 | 0 | } |
3029 | | |
3030 | | void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall( |
3031 | | CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, |
3032 | 0 | ArrayRef<llvm::Value *> Args) const { |
3033 | 0 | SmallVector<llvm::Value *, 4> TargetArgs; |
3034 | 0 | TargetArgs.reserve(Args.size()); |
3035 | 0 | auto *FnType = OutlinedFn.getFunctionType(); |
3036 | 0 | for (unsigned I = 0, E = Args.size(); I < E; ++I) { |
3037 | 0 | if (FnType->isVarArg() && FnType->getNumParams() <= I) { |
3038 | 0 | TargetArgs.append(std::next(Args.begin(), I), Args.end()); |
3039 | 0 | break; |
3040 | 0 | } |
3041 | 0 | llvm::Type *TargetType = FnType->getParamType(I); |
3042 | 0 | llvm::Value *NativeArg = Args[I]; |
3043 | 0 | if (!TargetType->isPointerTy()) { |
3044 | 0 | TargetArgs.emplace_back(NativeArg); |
3045 | 0 | continue; |
3046 | 0 | } |
3047 | 0 | TargetArgs.emplace_back( |
3048 | 0 | CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(NativeArg, TargetType)); |
3049 | 0 | } |
3050 | 0 | CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs); |
3051 | 0 | } |
3052 | | |
3053 | | /// Emit function which wraps the outline parallel region |
3054 | | /// and controls the arguments which are passed to this function. |
3055 | | /// The wrapper ensures that the outlined function is called |
3056 | | /// with the correct arguments when data is shared. |
3057 | | llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper( |
3058 | 0 | llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) { |
3059 | 0 | ASTContext &Ctx = CGM.getContext(); |
3060 | 0 | const auto &CS = *D.getCapturedStmt(OMPD_parallel); |
3061 | | |
3062 | | // Create a function that takes as argument the source thread. |
3063 | 0 | FunctionArgList WrapperArgs; |
3064 | 0 | QualType Int16QTy = |
3065 | 0 | Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false); |
3066 | 0 | QualType Int32QTy = |
3067 | 0 | Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false); |
3068 | 0 | ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), |
3069 | 0 | /*Id=*/nullptr, Int16QTy, |
3070 | 0 | ImplicitParamKind::Other); |
3071 | 0 | ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), |
3072 | 0 | /*Id=*/nullptr, Int32QTy, |
3073 | 0 | ImplicitParamKind::Other); |
3074 | 0 | WrapperArgs.emplace_back(&ParallelLevelArg); |
3075 | 0 | WrapperArgs.emplace_back(&WrapperArg); |
3076 | |
|
3077 | 0 | const CGFunctionInfo &CGFI = |
3078 | 0 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs); |
3079 | |
|
3080 | 0 | auto *Fn = llvm::Function::Create( |
3081 | 0 | CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, |
3082 | 0 | Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule()); |
3083 | | |
3084 | | // Ensure we do not inline the function. This is trivially true for the ones |
3085 | | // passed to __kmpc_fork_call but the ones calles in serialized regions |
3086 | | // could be inlined. This is not a perfect but it is closer to the invariant |
3087 | | // we want, namely, every data environment starts with a new function. |
3088 | | // TODO: We should pass the if condition to the runtime function and do the |
3089 | | // handling there. Much cleaner code. |
3090 | 0 | Fn->addFnAttr(llvm::Attribute::NoInline); |
3091 | |
|
3092 | 0 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); |
3093 | 0 | Fn->setLinkage(llvm::GlobalValue::InternalLinkage); |
3094 | 0 | Fn->setDoesNotRecurse(); |
3095 | |
|
3096 | 0 | CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); |
3097 | 0 | CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs, |
3098 | 0 | D.getBeginLoc(), D.getBeginLoc()); |
3099 | |
|
3100 | 0 | const auto *RD = CS.getCapturedRecordDecl(); |
3101 | 0 | auto CurField = RD->field_begin(); |
3102 | |
|
3103 | 0 | Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, |
3104 | 0 | /*Name=*/".zero.addr"); |
3105 | 0 | CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr); |
3106 | | // Get the array of arguments. |
3107 | 0 | SmallVector<llvm::Value *, 8> Args; |
3108 | |
|
3109 | 0 | Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer()); |
3110 | 0 | Args.emplace_back(ZeroAddr.getPointer()); |
3111 | |
|
3112 | 0 | CGBuilderTy &Bld = CGF.Builder; |
3113 | 0 | auto CI = CS.capture_begin(); |
3114 | | |
3115 | | // Use global memory for data sharing. |
3116 | | // Handle passing of global args to workers. |
3117 | 0 | Address GlobalArgs = |
3118 | 0 | CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args"); |
3119 | 0 | llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer(); |
3120 | 0 | llvm::Value *DataSharingArgs[] = {GlobalArgsPtr}; |
3121 | 0 | CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
3122 | 0 | CGM.getModule(), OMPRTL___kmpc_get_shared_variables), |
3123 | 0 | DataSharingArgs); |
3124 | | |
3125 | | // Retrieve the shared variables from the list of references returned |
3126 | | // by the runtime. Pass the variables to the outlined function. |
3127 | 0 | Address SharedArgListAddress = Address::invalid(); |
3128 | 0 | if (CS.capture_size() > 0 || |
3129 | 0 | isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { |
3130 | 0 | SharedArgListAddress = CGF.EmitLoadOfPointer( |
3131 | 0 | GlobalArgs, CGF.getContext() |
3132 | 0 | .getPointerType(CGF.getContext().VoidPtrTy) |
3133 | 0 | .castAs<PointerType>()); |
3134 | 0 | } |
3135 | 0 | unsigned Idx = 0; |
3136 | 0 | if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { |
3137 | 0 | Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); |
3138 | 0 | Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( |
3139 | 0 | Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy); |
3140 | 0 | llvm::Value *LB = CGF.EmitLoadOfScalar( |
3141 | 0 | TypedAddress, |
3142 | 0 | /*Volatile=*/false, |
3143 | 0 | CGF.getContext().getPointerType(CGF.getContext().getSizeType()), |
3144 | 0 | cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc()); |
3145 | 0 | Args.emplace_back(LB); |
3146 | 0 | ++Idx; |
3147 | 0 | Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); |
3148 | 0 | TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( |
3149 | 0 | Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy); |
3150 | 0 | llvm::Value *UB = CGF.EmitLoadOfScalar( |
3151 | 0 | TypedAddress, |
3152 | 0 | /*Volatile=*/false, |
3153 | 0 | CGF.getContext().getPointerType(CGF.getContext().getSizeType()), |
3154 | 0 | cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc()); |
3155 | 0 | Args.emplace_back(UB); |
3156 | 0 | ++Idx; |
3157 | 0 | } |
3158 | 0 | if (CS.capture_size() > 0) { |
3159 | 0 | ASTContext &CGFContext = CGF.getContext(); |
3160 | 0 | for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) { |
3161 | 0 | QualType ElemTy = CurField->getType(); |
3162 | 0 | Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx); |
3163 | 0 | Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( |
3164 | 0 | Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)), |
3165 | 0 | CGF.ConvertTypeForMem(ElemTy)); |
3166 | 0 | llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress, |
3167 | 0 | /*Volatile=*/false, |
3168 | 0 | CGFContext.getPointerType(ElemTy), |
3169 | 0 | CI->getLocation()); |
3170 | 0 | if (CI->capturesVariableByCopy() && |
3171 | 0 | !CI->getCapturedVar()->getType()->isAnyPointerType()) { |
3172 | 0 | Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(), |
3173 | 0 | CI->getLocation()); |
3174 | 0 | } |
3175 | 0 | Args.emplace_back(Arg); |
3176 | 0 | } |
3177 | 0 | } |
3178 | |
|
3179 | 0 | emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args); |
3180 | 0 | CGF.FinishFunction(); |
3181 | 0 | return Fn; |
3182 | 0 | } |
3183 | | |
3184 | | void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF, |
3185 | 0 | const Decl *D) { |
3186 | 0 | if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic) |
3187 | 0 | return; |
3188 | | |
3189 | 0 | assert(D && "Expected function or captured|block decl."); |
3190 | 0 | assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && |
3191 | 0 | "Function is registered already."); |
3192 | 0 | assert((!TeamAndReductions.first || TeamAndReductions.first == D) && |
3193 | 0 | "Team is set but not processed."); |
3194 | 0 | const Stmt *Body = nullptr; |
3195 | 0 | bool NeedToDelayGlobalization = false; |
3196 | 0 | if (const auto *FD = dyn_cast<FunctionDecl>(D)) { |
3197 | 0 | Body = FD->getBody(); |
3198 | 0 | } else if (const auto *BD = dyn_cast<BlockDecl>(D)) { |
3199 | 0 | Body = BD->getBody(); |
3200 | 0 | } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) { |
3201 | 0 | Body = CD->getBody(); |
3202 | 0 | NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP; |
3203 | 0 | if (NeedToDelayGlobalization && |
3204 | 0 | getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) |
3205 | 0 | return; |
3206 | 0 | } |
3207 | 0 | if (!Body) |
3208 | 0 | return; |
3209 | 0 | CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second); |
3210 | 0 | VarChecker.Visit(Body); |
3211 | 0 | const RecordDecl *GlobalizedVarsRecord = |
3212 | 0 | VarChecker.getGlobalizedRecord(IsInTTDRegion); |
3213 | 0 | TeamAndReductions.first = nullptr; |
3214 | 0 | TeamAndReductions.second.clear(); |
3215 | 0 | ArrayRef<const ValueDecl *> EscapedVariableLengthDecls = |
3216 | 0 | VarChecker.getEscapedVariableLengthDecls(); |
3217 | 0 | ArrayRef<const ValueDecl *> DelayedVariableLengthDecls = |
3218 | 0 | VarChecker.getDelayedVariableLengthDecls(); |
3219 | 0 | if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty() && |
3220 | 0 | DelayedVariableLengthDecls.empty()) |
3221 | 0 | return; |
3222 | 0 | auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; |
3223 | 0 | I->getSecond().MappedParams = |
3224 | 0 | std::make_unique<CodeGenFunction::OMPMapVars>(); |
3225 | 0 | I->getSecond().EscapedParameters.insert( |
3226 | 0 | VarChecker.getEscapedParameters().begin(), |
3227 | 0 | VarChecker.getEscapedParameters().end()); |
3228 | 0 | I->getSecond().EscapedVariableLengthDecls.append( |
3229 | 0 | EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end()); |
3230 | 0 | I->getSecond().DelayedVariableLengthDecls.append( |
3231 | 0 | DelayedVariableLengthDecls.begin(), DelayedVariableLengthDecls.end()); |
3232 | 0 | DeclToAddrMapTy &Data = I->getSecond().LocalVarData; |
3233 | 0 | for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { |
3234 | 0 | assert(VD->isCanonicalDecl() && "Expected canonical declaration"); |
3235 | 0 | Data.insert(std::make_pair(VD, MappedVarData())); |
3236 | 0 | } |
3237 | 0 | if (!NeedToDelayGlobalization) { |
3238 | 0 | emitGenericVarsProlog(CGF, D->getBeginLoc()); |
3239 | 0 | struct GlobalizationScope final : EHScopeStack::Cleanup { |
3240 | 0 | GlobalizationScope() = default; |
3241 | |
|
3242 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
3243 | 0 | static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()) |
3244 | 0 | .emitGenericVarsEpilog(CGF); |
3245 | 0 | } |
3246 | 0 | }; |
3247 | 0 | CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup); |
3248 | 0 | } |
3249 | 0 | } |
3250 | | |
3251 | | Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF, |
3252 | 0 | const VarDecl *VD) { |
3253 | 0 | if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) { |
3254 | 0 | const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); |
3255 | 0 | auto AS = LangAS::Default; |
3256 | 0 | switch (A->getAllocatorType()) { |
3257 | | // Use the default allocator here as by default local vars are |
3258 | | // threadlocal. |
3259 | 0 | case OMPAllocateDeclAttr::OMPNullMemAlloc: |
3260 | 0 | case OMPAllocateDeclAttr::OMPDefaultMemAlloc: |
3261 | 0 | case OMPAllocateDeclAttr::OMPThreadMemAlloc: |
3262 | 0 | case OMPAllocateDeclAttr::OMPHighBWMemAlloc: |
3263 | 0 | case OMPAllocateDeclAttr::OMPLowLatMemAlloc: |
3264 | | // Follow the user decision - use default allocation. |
3265 | 0 | return Address::invalid(); |
3266 | 0 | case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: |
3267 | | // TODO: implement aupport for user-defined allocators. |
3268 | 0 | return Address::invalid(); |
3269 | 0 | case OMPAllocateDeclAttr::OMPConstMemAlloc: |
3270 | 0 | AS = LangAS::cuda_constant; |
3271 | 0 | break; |
3272 | 0 | case OMPAllocateDeclAttr::OMPPTeamMemAlloc: |
3273 | 0 | AS = LangAS::cuda_shared; |
3274 | 0 | break; |
3275 | 0 | case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: |
3276 | 0 | case OMPAllocateDeclAttr::OMPCGroupMemAlloc: |
3277 | 0 | break; |
3278 | 0 | } |
3279 | 0 | llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType()); |
3280 | 0 | auto *GV = new llvm::GlobalVariable( |
3281 | 0 | CGM.getModule(), VarTy, /*isConstant=*/false, |
3282 | 0 | llvm::GlobalValue::InternalLinkage, llvm::PoisonValue::get(VarTy), |
3283 | 0 | VD->getName(), |
3284 | 0 | /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, |
3285 | 0 | CGM.getContext().getTargetAddressSpace(AS)); |
3286 | 0 | CharUnits Align = CGM.getContext().getDeclAlign(VD); |
3287 | 0 | GV->setAlignment(Align.getAsAlign()); |
3288 | 0 | return Address( |
3289 | 0 | CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
3290 | 0 | GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace( |
3291 | 0 | VD->getType().getAddressSpace()))), |
3292 | 0 | VarTy, Align); |
3293 | 0 | } |
3294 | | |
3295 | 0 | if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic) |
3296 | 0 | return Address::invalid(); |
3297 | | |
3298 | 0 | VD = VD->getCanonicalDecl(); |
3299 | 0 | auto I = FunctionGlobalizedDecls.find(CGF.CurFn); |
3300 | 0 | if (I == FunctionGlobalizedDecls.end()) |
3301 | 0 | return Address::invalid(); |
3302 | 0 | auto VDI = I->getSecond().LocalVarData.find(VD); |
3303 | 0 | if (VDI != I->getSecond().LocalVarData.end()) |
3304 | 0 | return VDI->second.PrivateAddr; |
3305 | 0 | if (VD->hasAttrs()) { |
3306 | 0 | for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()), |
3307 | 0 | E(VD->attr_end()); |
3308 | 0 | IT != E; ++IT) { |
3309 | 0 | auto VDI = I->getSecond().LocalVarData.find( |
3310 | 0 | cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl()) |
3311 | 0 | ->getCanonicalDecl()); |
3312 | 0 | if (VDI != I->getSecond().LocalVarData.end()) |
3313 | 0 | return VDI->second.PrivateAddr; |
3314 | 0 | } |
3315 | 0 | } |
3316 | | |
3317 | 0 | return Address::invalid(); |
3318 | 0 | } |
3319 | | |
3320 | 0 | void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) { |
3321 | 0 | FunctionGlobalizedDecls.erase(CGF.CurFn); |
3322 | 0 | CGOpenMPRuntime::functionFinished(CGF); |
3323 | 0 | } |
3324 | | |
3325 | | void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk( |
3326 | | CodeGenFunction &CGF, const OMPLoopDirective &S, |
3327 | | OpenMPDistScheduleClauseKind &ScheduleKind, |
3328 | 0 | llvm::Value *&Chunk) const { |
3329 | 0 | auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime()); |
3330 | 0 | if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) { |
3331 | 0 | ScheduleKind = OMPC_DIST_SCHEDULE_static; |
3332 | 0 | Chunk = CGF.EmitScalarConversion( |
3333 | 0 | RT.getGPUNumThreads(CGF), |
3334 | 0 | CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), |
3335 | 0 | S.getIterationVariable()->getType(), S.getBeginLoc()); |
3336 | 0 | return; |
3337 | 0 | } |
3338 | 0 | CGOpenMPRuntime::getDefaultDistScheduleAndChunk( |
3339 | 0 | CGF, S, ScheduleKind, Chunk); |
3340 | 0 | } |
3341 | | |
3342 | | void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk( |
3343 | | CodeGenFunction &CGF, const OMPLoopDirective &S, |
3344 | | OpenMPScheduleClauseKind &ScheduleKind, |
3345 | 0 | const Expr *&ChunkExpr) const { |
3346 | 0 | ScheduleKind = OMPC_SCHEDULE_static; |
3347 | | // Chunk size is 1 in this case. |
3348 | 0 | llvm::APInt ChunkSize(32, 1); |
3349 | 0 | ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize, |
3350 | 0 | CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), |
3351 | 0 | SourceLocation()); |
3352 | 0 | } |
3353 | | |
3354 | | void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas( |
3355 | 0 | CodeGenFunction &CGF, const OMPExecutableDirective &D) const { |
3356 | 0 | assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && |
3357 | 0 | " Expected target-based directive."); |
3358 | 0 | const CapturedStmt *CS = D.getCapturedStmt(OMPD_target); |
3359 | 0 | for (const CapturedStmt::Capture &C : CS->captures()) { |
3360 | | // Capture variables captured by reference in lambdas for target-based |
3361 | | // directives. |
3362 | 0 | if (!C.capturesVariable()) |
3363 | 0 | continue; |
3364 | 0 | const VarDecl *VD = C.getCapturedVar(); |
3365 | 0 | const auto *RD = VD->getType() |
3366 | 0 | .getCanonicalType() |
3367 | 0 | .getNonReferenceType() |
3368 | 0 | ->getAsCXXRecordDecl(); |
3369 | 0 | if (!RD || !RD->isLambda()) |
3370 | 0 | continue; |
3371 | 0 | Address VDAddr = CGF.GetAddrOfLocalVar(VD); |
3372 | 0 | LValue VDLVal; |
3373 | 0 | if (VD->getType().getCanonicalType()->isReferenceType()) |
3374 | 0 | VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType()); |
3375 | 0 | else |
3376 | 0 | VDLVal = CGF.MakeAddrLValue( |
3377 | 0 | VDAddr, VD->getType().getCanonicalType().getNonReferenceType()); |
3378 | 0 | llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures; |
3379 | 0 | FieldDecl *ThisCapture = nullptr; |
3380 | 0 | RD->getCaptureFields(Captures, ThisCapture); |
3381 | 0 | if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) { |
3382 | 0 | LValue ThisLVal = |
3383 | 0 | CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture); |
3384 | 0 | llvm::Value *CXXThis = CGF.LoadCXXThis(); |
3385 | 0 | CGF.EmitStoreOfScalar(CXXThis, ThisLVal); |
3386 | 0 | } |
3387 | 0 | for (const LambdaCapture &LC : RD->captures()) { |
3388 | 0 | if (LC.getCaptureKind() != LCK_ByRef) |
3389 | 0 | continue; |
3390 | 0 | const ValueDecl *VD = LC.getCapturedVar(); |
3391 | | // FIXME: For now VD is always a VarDecl because OpenMP does not support |
3392 | | // capturing structured bindings in lambdas yet. |
3393 | 0 | if (!CS->capturesVariable(cast<VarDecl>(VD))) |
3394 | 0 | continue; |
3395 | 0 | auto It = Captures.find(VD); |
3396 | 0 | assert(It != Captures.end() && "Found lambda capture without field."); |
3397 | 0 | LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second); |
3398 | 0 | Address VDAddr = CGF.GetAddrOfLocalVar(cast<VarDecl>(VD)); |
3399 | 0 | if (VD->getType().getCanonicalType()->isReferenceType()) |
3400 | 0 | VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr, |
3401 | 0 | VD->getType().getCanonicalType()) |
3402 | 0 | .getAddress(CGF); |
3403 | 0 | CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal); |
3404 | 0 | } |
3405 | 0 | } |
3406 | 0 | } |
3407 | | |
3408 | | bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD, |
3409 | 0 | LangAS &AS) { |
3410 | 0 | if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>()) |
3411 | 0 | return false; |
3412 | 0 | const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); |
3413 | 0 | switch(A->getAllocatorType()) { |
3414 | 0 | case OMPAllocateDeclAttr::OMPNullMemAlloc: |
3415 | 0 | case OMPAllocateDeclAttr::OMPDefaultMemAlloc: |
3416 | | // Not supported, fallback to the default mem space. |
3417 | 0 | case OMPAllocateDeclAttr::OMPThreadMemAlloc: |
3418 | 0 | case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: |
3419 | 0 | case OMPAllocateDeclAttr::OMPCGroupMemAlloc: |
3420 | 0 | case OMPAllocateDeclAttr::OMPHighBWMemAlloc: |
3421 | 0 | case OMPAllocateDeclAttr::OMPLowLatMemAlloc: |
3422 | 0 | AS = LangAS::Default; |
3423 | 0 | return true; |
3424 | 0 | case OMPAllocateDeclAttr::OMPConstMemAlloc: |
3425 | 0 | AS = LangAS::cuda_constant; |
3426 | 0 | return true; |
3427 | 0 | case OMPAllocateDeclAttr::OMPPTeamMemAlloc: |
3428 | 0 | AS = LangAS::cuda_shared; |
3429 | 0 | return true; |
3430 | 0 | case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: |
3431 | 0 | llvm_unreachable("Expected predefined allocator for the variables with the " |
3432 | 0 | "static storage."); |
3433 | 0 | } |
3434 | 0 | return false; |
3435 | 0 | } |
3436 | | |
3437 | | // Get current CudaArch and ignore any unknown values |
3438 | 0 | static CudaArch getCudaArch(CodeGenModule &CGM) { |
3439 | 0 | if (!CGM.getTarget().hasFeature("ptx")) |
3440 | 0 | return CudaArch::UNKNOWN; |
3441 | 0 | for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) { |
3442 | 0 | if (Feature.getValue()) { |
3443 | 0 | CudaArch Arch = StringToCudaArch(Feature.getKey()); |
3444 | 0 | if (Arch != CudaArch::UNKNOWN) |
3445 | 0 | return Arch; |
3446 | 0 | } |
3447 | 0 | } |
3448 | 0 | return CudaArch::UNKNOWN; |
3449 | 0 | } |
3450 | | |
3451 | | /// Check to see if target architecture supports unified addressing which is |
3452 | | /// a restriction for OpenMP requires clause "unified_shared_memory". |
3453 | | void CGOpenMPRuntimeGPU::processRequiresDirective( |
3454 | 0 | const OMPRequiresDecl *D) { |
3455 | 0 | for (const OMPClause *Clause : D->clauselists()) { |
3456 | 0 | if (Clause->getClauseKind() == OMPC_unified_shared_memory) { |
3457 | 0 | CudaArch Arch = getCudaArch(CGM); |
3458 | 0 | switch (Arch) { |
3459 | 0 | case CudaArch::SM_20: |
3460 | 0 | case CudaArch::SM_21: |
3461 | 0 | case CudaArch::SM_30: |
3462 | 0 | case CudaArch::SM_32: |
3463 | 0 | case CudaArch::SM_35: |
3464 | 0 | case CudaArch::SM_37: |
3465 | 0 | case CudaArch::SM_50: |
3466 | 0 | case CudaArch::SM_52: |
3467 | 0 | case CudaArch::SM_53: { |
3468 | 0 | SmallString<256> Buffer; |
3469 | 0 | llvm::raw_svector_ostream Out(Buffer); |
3470 | 0 | Out << "Target architecture " << CudaArchToString(Arch) |
3471 | 0 | << " does not support unified addressing"; |
3472 | 0 | CGM.Error(Clause->getBeginLoc(), Out.str()); |
3473 | 0 | return; |
3474 | 0 | } |
3475 | 0 | case CudaArch::SM_60: |
3476 | 0 | case CudaArch::SM_61: |
3477 | 0 | case CudaArch::SM_62: |
3478 | 0 | case CudaArch::SM_70: |
3479 | 0 | case CudaArch::SM_72: |
3480 | 0 | case CudaArch::SM_75: |
3481 | 0 | case CudaArch::SM_80: |
3482 | 0 | case CudaArch::SM_86: |
3483 | 0 | case CudaArch::SM_87: |
3484 | 0 | case CudaArch::SM_89: |
3485 | 0 | case CudaArch::SM_90: |
3486 | 0 | case CudaArch::SM_90a: |
3487 | 0 | case CudaArch::GFX600: |
3488 | 0 | case CudaArch::GFX601: |
3489 | 0 | case CudaArch::GFX602: |
3490 | 0 | case CudaArch::GFX700: |
3491 | 0 | case CudaArch::GFX701: |
3492 | 0 | case CudaArch::GFX702: |
3493 | 0 | case CudaArch::GFX703: |
3494 | 0 | case CudaArch::GFX704: |
3495 | 0 | case CudaArch::GFX705: |
3496 | 0 | case CudaArch::GFX801: |
3497 | 0 | case CudaArch::GFX802: |
3498 | 0 | case CudaArch::GFX803: |
3499 | 0 | case CudaArch::GFX805: |
3500 | 0 | case CudaArch::GFX810: |
3501 | 0 | case CudaArch::GFX900: |
3502 | 0 | case CudaArch::GFX902: |
3503 | 0 | case CudaArch::GFX904: |
3504 | 0 | case CudaArch::GFX906: |
3505 | 0 | case CudaArch::GFX908: |
3506 | 0 | case CudaArch::GFX909: |
3507 | 0 | case CudaArch::GFX90a: |
3508 | 0 | case CudaArch::GFX90c: |
3509 | 0 | case CudaArch::GFX940: |
3510 | 0 | case CudaArch::GFX941: |
3511 | 0 | case CudaArch::GFX942: |
3512 | 0 | case CudaArch::GFX1010: |
3513 | 0 | case CudaArch::GFX1011: |
3514 | 0 | case CudaArch::GFX1012: |
3515 | 0 | case CudaArch::GFX1013: |
3516 | 0 | case CudaArch::GFX1030: |
3517 | 0 | case CudaArch::GFX1031: |
3518 | 0 | case CudaArch::GFX1032: |
3519 | 0 | case CudaArch::GFX1033: |
3520 | 0 | case CudaArch::GFX1034: |
3521 | 0 | case CudaArch::GFX1035: |
3522 | 0 | case CudaArch::GFX1036: |
3523 | 0 | case CudaArch::GFX1100: |
3524 | 0 | case CudaArch::GFX1101: |
3525 | 0 | case CudaArch::GFX1102: |
3526 | 0 | case CudaArch::GFX1103: |
3527 | 0 | case CudaArch::GFX1150: |
3528 | 0 | case CudaArch::GFX1151: |
3529 | 0 | case CudaArch::GFX1200: |
3530 | 0 | case CudaArch::GFX1201: |
3531 | 0 | case CudaArch::Generic: |
3532 | 0 | case CudaArch::UNUSED: |
3533 | 0 | case CudaArch::UNKNOWN: |
3534 | 0 | break; |
3535 | 0 | case CudaArch::LAST: |
3536 | 0 | llvm_unreachable("Unexpected Cuda arch."); |
3537 | 0 | } |
3538 | 0 | } |
3539 | 0 | } |
3540 | 0 | CGOpenMPRuntime::processRequiresDirective(D); |
3541 | 0 | } |
3542 | | |
3543 | 0 | llvm::Value *CGOpenMPRuntimeGPU::getGPUNumThreads(CodeGenFunction &CGF) { |
3544 | 0 | CGBuilderTy &Bld = CGF.Builder; |
3545 | 0 | llvm::Module *M = &CGF.CGM.getModule(); |
3546 | 0 | const char *LocSize = "__kmpc_get_hardware_num_threads_in_block"; |
3547 | 0 | llvm::Function *F = M->getFunction(LocSize); |
3548 | 0 | if (!F) { |
3549 | 0 | F = llvm::Function::Create( |
3550 | 0 | llvm::FunctionType::get(CGF.Int32Ty, std::nullopt, false), |
3551 | 0 | llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule()); |
3552 | 0 | } |
3553 | 0 | return Bld.CreateCall(F, std::nullopt, "nvptx_num_threads"); |
3554 | 0 | } |
3555 | | |
3556 | 0 | llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) { |
3557 | 0 | ArrayRef<llvm::Value *> Args{}; |
3558 | 0 | return CGF.EmitRuntimeCall( |
3559 | 0 | OMPBuilder.getOrCreateRuntimeFunction( |
3560 | 0 | CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block), |
3561 | 0 | Args); |
3562 | 0 | } |
3563 | | |
3564 | 0 | llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) { |
3565 | 0 | ArrayRef<llvm::Value *> Args{}; |
3566 | 0 | return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction( |
3567 | 0 | CGM.getModule(), OMPRTL___kmpc_get_warp_size), |
3568 | 0 | Args); |
3569 | 0 | } |