Coverage Report

Created: 2024-01-17 10:31

/src/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This coordinates the per-function state used while generating code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CodeGenFunction.h"
14
#include "CGBlocks.h"
15
#include "CGCUDARuntime.h"
16
#include "CGCXXABI.h"
17
#include "CGCleanup.h"
18
#include "CGDebugInfo.h"
19
#include "CGHLSLRuntime.h"
20
#include "CGOpenMPRuntime.h"
21
#include "CodeGenModule.h"
22
#include "CodeGenPGO.h"
23
#include "TargetInfo.h"
24
#include "clang/AST/ASTContext.h"
25
#include "clang/AST/ASTLambda.h"
26
#include "clang/AST/Attr.h"
27
#include "clang/AST/Decl.h"
28
#include "clang/AST/DeclCXX.h"
29
#include "clang/AST/Expr.h"
30
#include "clang/AST/StmtCXX.h"
31
#include "clang/AST/StmtObjC.h"
32
#include "clang/Basic/Builtins.h"
33
#include "clang/Basic/CodeGenOptions.h"
34
#include "clang/Basic/TargetInfo.h"
35
#include "clang/CodeGen/CGFunctionInfo.h"
36
#include "clang/Frontend/FrontendDiagnostic.h"
37
#include "llvm/ADT/ArrayRef.h"
38
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
39
#include "llvm/IR/DataLayout.h"
40
#include "llvm/IR/Dominators.h"
41
#include "llvm/IR/FPEnv.h"
42
#include "llvm/IR/IntrinsicInst.h"
43
#include "llvm/IR/Intrinsics.h"
44
#include "llvm/IR/MDBuilder.h"
45
#include "llvm/IR/Operator.h"
46
#include "llvm/Support/CRC.h"
47
#include "llvm/Support/xxhash.h"
48
#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
49
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
50
#include <optional>
51
52
using namespace clang;
53
using namespace CodeGen;
54
55
/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
56
/// markers.
57
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
58
0
                                      const LangOptions &LangOpts) {
59
0
  if (CGOpts.DisableLifetimeMarkers)
60
0
    return false;
61
62
  // Sanitizers may use markers.
63
0
  if (CGOpts.SanitizeAddressUseAfterScope ||
64
0
      LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
65
0
      LangOpts.Sanitize.has(SanitizerKind::Memory))
66
0
    return true;
67
68
  // For now, only in optimized builds.
69
0
  return CGOpts.OptimizationLevel != 0;
70
0
}
71
72
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
73
    : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
74
      Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
75
              CGBuilderInserterTy(this)),
76
      SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
77
      DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
78
      ShouldEmitLifetimeMarkers(
79
0
          shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
80
0
  if (!suppressNewContext)
81
0
    CGM.getCXXABI().getMangleContext().startNewFunction();
82
0
  EHStack.setCGF(this);
83
84
0
  SetFastMathFlags(CurFPFeatures);
85
0
}
86
87
0
CodeGenFunction::~CodeGenFunction() {
88
0
  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
89
90
0
  if (getLangOpts().OpenMP && CurFn)
91
0
    CGM.getOpenMPRuntime().functionFinished(*this);
92
93
  // If we have an OpenMPIRBuilder we want to finalize functions (incl.
94
  // outlining etc) at some point. Doing it once the function codegen is done
95
  // seems to be a reasonable spot. We do it here, as opposed to the deletion
96
  // time of the CodeGenModule, because we have to ensure the IR has not yet
97
  // been "emitted" to the outside, thus, modifications are still sensible.
98
0
  if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
99
0
    CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
100
0
}
101
102
// Map the LangOption for exception behavior into
103
// the corresponding enum in the IR.
104
llvm::fp::ExceptionBehavior
105
0
clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
106
107
0
  switch (Kind) {
108
0
  case LangOptions::FPE_Ignore:  return llvm::fp::ebIgnore;
109
0
  case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
110
0
  case LangOptions::FPE_Strict:  return llvm::fp::ebStrict;
111
0
  default:
112
0
    llvm_unreachable("Unsupported FP Exception Behavior");
113
0
  }
114
0
}
115
116
0
void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
117
0
  llvm::FastMathFlags FMF;
118
0
  FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
119
0
  FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
120
0
  FMF.setNoInfs(FPFeatures.getNoHonorInfs());
121
0
  FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
122
0
  FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
123
0
  FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
124
0
  FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
125
0
  Builder.setFastMathFlags(FMF);
126
0
}
127
128
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
129
                                                  const Expr *E)
130
0
    : CGF(CGF) {
131
0
  ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
132
0
}
133
134
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
135
                                                  FPOptions FPFeatures)
136
0
    : CGF(CGF) {
137
0
  ConstructorHelper(FPFeatures);
138
0
}
139
140
0
void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
141
0
  OldFPFeatures = CGF.CurFPFeatures;
142
0
  CGF.CurFPFeatures = FPFeatures;
143
144
0
  OldExcept = CGF.Builder.getDefaultConstrainedExcept();
145
0
  OldRounding = CGF.Builder.getDefaultConstrainedRounding();
146
147
0
  if (OldFPFeatures == FPFeatures)
148
0
    return;
149
150
0
  FMFGuard.emplace(CGF.Builder);
151
152
0
  llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
153
0
  CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
154
0
  auto NewExceptionBehavior =
155
0
      ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
156
0
          FPFeatures.getExceptionMode()));
157
0
  CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
158
159
0
  CGF.SetFastMathFlags(FPFeatures);
160
161
0
  assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
162
0
          isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
163
0
          isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
164
0
          (NewExceptionBehavior == llvm::fp::ebIgnore &&
165
0
           NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
166
0
         "FPConstrained should be enabled on entire function");
167
168
0
  auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
169
0
    auto OldValue =
170
0
        CGF.CurFn->getFnAttribute(Name).getValueAsBool();
171
0
    auto NewValue = OldValue & Value;
172
0
    if (OldValue != NewValue)
173
0
      CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
174
0
  };
175
0
  mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
176
0
  mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
177
0
  mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
178
0
  mergeFnAttrValue(
179
0
      "unsafe-fp-math",
180
0
      FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
181
0
          FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
182
0
          FPFeatures.allowFPContractAcrossStatement());
183
0
}
184
185
0
CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
186
0
  CGF.CurFPFeatures = OldFPFeatures;
187
0
  CGF.Builder.setDefaultConstrainedExcept(OldExcept);
188
0
  CGF.Builder.setDefaultConstrainedRounding(OldRounding);
189
0
}
190
191
0
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
192
0
  LValueBaseInfo BaseInfo;
193
0
  TBAAAccessInfo TBAAInfo;
194
0
  CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
195
0
  Address Addr(V, ConvertTypeForMem(T), Alignment);
196
0
  return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
197
0
}
198
199
/// Given a value of type T* that may not be to a complete object,
200
/// construct an l-value with the natural pointee alignment of T.
201
LValue
202
0
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
203
0
  LValueBaseInfo BaseInfo;
204
0
  TBAAAccessInfo TBAAInfo;
205
0
  CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
206
0
                                                /* forPointeeType= */ true);
207
0
  Address Addr(V, ConvertTypeForMem(T), Align);
208
0
  return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
209
0
}
210
211
212
0
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
213
0
  return CGM.getTypes().ConvertTypeForMem(T);
214
0
}
215
216
0
llvm::Type *CodeGenFunction::ConvertType(QualType T) {
217
0
  return CGM.getTypes().ConvertType(T);
218
0
}
219
220
0
TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
221
0
  type = type.getCanonicalType();
222
0
  while (true) {
223
0
    switch (type->getTypeClass()) {
224
0
#define TYPE(name, parent)
225
0
#define ABSTRACT_TYPE(name, parent)
226
0
#define NON_CANONICAL_TYPE(name, parent) case Type::name:
227
0
#define DEPENDENT_TYPE(name, parent) case Type::name:
228
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
229
0
#include "clang/AST/TypeNodes.inc"
230
0
      llvm_unreachable("non-canonical or dependent type in IR-generation");
231
232
0
    case Type::Auto:
233
0
    case Type::DeducedTemplateSpecialization:
234
0
      llvm_unreachable("undeduced type in IR-generation");
235
236
    // Various scalar types.
237
0
    case Type::Builtin:
238
0
    case Type::Pointer:
239
0
    case Type::BlockPointer:
240
0
    case Type::LValueReference:
241
0
    case Type::RValueReference:
242
0
    case Type::MemberPointer:
243
0
    case Type::Vector:
244
0
    case Type::ExtVector:
245
0
    case Type::ConstantMatrix:
246
0
    case Type::FunctionProto:
247
0
    case Type::FunctionNoProto:
248
0
    case Type::Enum:
249
0
    case Type::ObjCObjectPointer:
250
0
    case Type::Pipe:
251
0
    case Type::BitInt:
252
0
      return TEK_Scalar;
253
254
    // Complexes.
255
0
    case Type::Complex:
256
0
      return TEK_Complex;
257
258
    // Arrays, records, and Objective-C objects.
259
0
    case Type::ConstantArray:
260
0
    case Type::IncompleteArray:
261
0
    case Type::VariableArray:
262
0
    case Type::Record:
263
0
    case Type::ObjCObject:
264
0
    case Type::ObjCInterface:
265
0
      return TEK_Aggregate;
266
267
    // We operate on atomic values according to their underlying type.
268
0
    case Type::Atomic:
269
0
      type = cast<AtomicType>(type)->getValueType();
270
0
      continue;
271
0
    }
272
0
    llvm_unreachable("unknown type kind!");
273
0
  }
274
0
}
275
276
0
llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
277
  // For cleanliness, we try to avoid emitting the return block for
278
  // simple cases.
279
0
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
280
281
0
  if (CurBB) {
282
0
    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
283
284
    // We have a valid insert point, reuse it if it is empty or there are no
285
    // explicit jumps to the return block.
286
0
    if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
287
0
      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
288
0
      delete ReturnBlock.getBlock();
289
0
      ReturnBlock = JumpDest();
290
0
    } else
291
0
      EmitBlock(ReturnBlock.getBlock());
292
0
    return llvm::DebugLoc();
293
0
  }
294
295
  // Otherwise, if the return block is the target of a single direct
296
  // branch then we can just put the code in that block instead. This
297
  // cleans up functions which started with a unified return block.
298
0
  if (ReturnBlock.getBlock()->hasOneUse()) {
299
0
    llvm::BranchInst *BI =
300
0
      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
301
0
    if (BI && BI->isUnconditional() &&
302
0
        BI->getSuccessor(0) == ReturnBlock.getBlock()) {
303
      // Record/return the DebugLoc of the simple 'return' expression to be used
304
      // later by the actual 'ret' instruction.
305
0
      llvm::DebugLoc Loc = BI->getDebugLoc();
306
0
      Builder.SetInsertPoint(BI->getParent());
307
0
      BI->eraseFromParent();
308
0
      delete ReturnBlock.getBlock();
309
0
      ReturnBlock = JumpDest();
310
0
      return Loc;
311
0
    }
312
0
  }
313
314
  // FIXME: We are at an unreachable point, there is no reason to emit the block
315
  // unless it has uses. However, we still need a place to put the debug
316
  // region.end for now.
317
318
0
  EmitBlock(ReturnBlock.getBlock());
319
0
  return llvm::DebugLoc();
320
0
}
321
322
0
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
323
0
  if (!BB) return;
324
0
  if (!BB->use_empty()) {
325
0
    CGF.CurFn->insert(CGF.CurFn->end(), BB);
326
0
    return;
327
0
  }
328
0
  delete BB;
329
0
}
330
331
0
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
332
0
  assert(BreakContinueStack.empty() &&
333
0
         "mismatched push/pop in break/continue stack!");
334
335
0
  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
336
0
    && NumSimpleReturnExprs == NumReturnExprs
337
0
    && ReturnBlock.getBlock()->use_empty();
338
  // Usually the return expression is evaluated before the cleanup
339
  // code.  If the function contains only a simple return statement,
340
  // such as a constant, the location before the cleanup code becomes
341
  // the last useful breakpoint in the function, because the simple
342
  // return expression will be evaluated after the cleanup code. To be
343
  // safe, set the debug location for cleanup code to the location of
344
  // the return statement.  Otherwise the cleanup code should be at the
345
  // end of the function's lexical scope.
346
  //
347
  // If there are multiple branches to the return block, the branch
348
  // instructions will get the location of the return statements and
349
  // all will be fine.
350
0
  if (CGDebugInfo *DI = getDebugInfo()) {
351
0
    if (OnlySimpleReturnStmts)
352
0
      DI->EmitLocation(Builder, LastStopPoint);
353
0
    else
354
0
      DI->EmitLocation(Builder, EndLoc);
355
0
  }
356
357
  // Pop any cleanups that might have been associated with the
358
  // parameters.  Do this in whatever block we're currently in; it's
359
  // important to do this before we enter the return block or return
360
  // edges will be *really* confused.
361
0
  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
362
0
  bool HasOnlyLifetimeMarkers =
363
0
      HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
364
0
  bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
365
366
0
  std::optional<ApplyDebugLocation> OAL;
367
0
  if (HasCleanups) {
368
    // Make sure the line table doesn't jump back into the body for
369
    // the ret after it's been at EndLoc.
370
0
    if (CGDebugInfo *DI = getDebugInfo()) {
371
0
      if (OnlySimpleReturnStmts)
372
0
        DI->EmitLocation(Builder, EndLoc);
373
0
      else
374
        // We may not have a valid end location. Try to apply it anyway, and
375
        // fall back to an artificial location if needed.
376
0
        OAL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
377
0
    }
378
379
0
    PopCleanupBlocks(PrologueCleanupDepth);
380
0
  }
381
382
  // Emit function epilog (to return).
383
0
  llvm::DebugLoc Loc = EmitReturnBlock();
384
385
0
  if (ShouldInstrumentFunction()) {
386
0
    if (CGM.getCodeGenOpts().InstrumentFunctions)
387
0
      CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
388
0
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
389
0
      CurFn->addFnAttr("instrument-function-exit-inlined",
390
0
                       "__cyg_profile_func_exit");
391
0
  }
392
393
  // Emit debug descriptor for function end.
394
0
  if (CGDebugInfo *DI = getDebugInfo())
395
0
    DI->EmitFunctionEnd(Builder, CurFn);
396
397
  // Reset the debug location to that of the simple 'return' expression, if any
398
  // rather than that of the end of the function's scope '}'.
399
0
  ApplyDebugLocation AL(*this, Loc);
400
0
  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
401
0
  EmitEndEHSpec(CurCodeDecl);
402
403
0
  assert(EHStack.empty() &&
404
0
         "did not remove all scopes from cleanup stack!");
405
406
  // If someone did an indirect goto, emit the indirect goto block at the end of
407
  // the function.
408
0
  if (IndirectBranch) {
409
0
    EmitBlock(IndirectBranch->getParent());
410
0
    Builder.ClearInsertionPoint();
411
0
  }
412
413
  // If some of our locals escaped, insert a call to llvm.localescape in the
414
  // entry block.
415
0
  if (!EscapedLocals.empty()) {
416
    // Invert the map from local to index into a simple vector. There should be
417
    // no holes.
418
0
    SmallVector<llvm::Value *, 4> EscapeArgs;
419
0
    EscapeArgs.resize(EscapedLocals.size());
420
0
    for (auto &Pair : EscapedLocals)
421
0
      EscapeArgs[Pair.second] = Pair.first;
422
0
    llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
423
0
        &CGM.getModule(), llvm::Intrinsic::localescape);
424
0
    CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
425
0
  }
426
427
  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
428
0
  llvm::Instruction *Ptr = AllocaInsertPt;
429
0
  AllocaInsertPt = nullptr;
430
0
  Ptr->eraseFromParent();
431
432
  // PostAllocaInsertPt, if created, was lazily created when it was required,
433
  // remove it now since it was just created for our own convenience.
434
0
  if (PostAllocaInsertPt) {
435
0
    llvm::Instruction *PostPtr = PostAllocaInsertPt;
436
0
    PostAllocaInsertPt = nullptr;
437
0
    PostPtr->eraseFromParent();
438
0
  }
439
440
  // If someone took the address of a label but never did an indirect goto, we
441
  // made a zero entry PHI node, which is illegal, zap it now.
442
0
  if (IndirectBranch) {
443
0
    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
444
0
    if (PN->getNumIncomingValues() == 0) {
445
0
      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
446
0
      PN->eraseFromParent();
447
0
    }
448
0
  }
449
450
0
  EmitIfUsed(*this, EHResumeBlock);
451
0
  EmitIfUsed(*this, TerminateLandingPad);
452
0
  EmitIfUsed(*this, TerminateHandler);
453
0
  EmitIfUsed(*this, UnreachableBlock);
454
455
0
  for (const auto &FuncletAndParent : TerminateFunclets)
456
0
    EmitIfUsed(*this, FuncletAndParent.second);
457
458
0
  if (CGM.getCodeGenOpts().EmitDeclMetadata)
459
0
    EmitDeclMetadata();
460
461
0
  for (const auto &R : DeferredReplacements) {
462
0
    if (llvm::Value *Old = R.first) {
463
0
      Old->replaceAllUsesWith(R.second);
464
0
      cast<llvm::Instruction>(Old)->eraseFromParent();
465
0
    }
466
0
  }
467
0
  DeferredReplacements.clear();
468
469
  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
470
  // PHIs if the current function is a coroutine. We don't do it for all
471
  // functions as it may result in slight increase in numbers of instructions
472
  // if compiled with no optimizations. We do it for coroutine as the lifetime
473
  // of CleanupDestSlot alloca make correct coroutine frame building very
474
  // difficult.
475
0
  if (NormalCleanupDest.isValid() && isCoroutine()) {
476
0
    llvm::DominatorTree DT(*CurFn);
477
0
    llvm::PromoteMemToReg(
478
0
        cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
479
0
    NormalCleanupDest = Address::invalid();
480
0
  }
481
482
  // Scan function arguments for vector width.
483
0
  for (llvm::Argument &A : CurFn->args())
484
0
    if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
485
0
      LargestVectorWidth =
486
0
          std::max((uint64_t)LargestVectorWidth,
487
0
                   VT->getPrimitiveSizeInBits().getKnownMinValue());
488
489
  // Update vector width based on return type.
490
0
  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
491
0
    LargestVectorWidth =
492
0
        std::max((uint64_t)LargestVectorWidth,
493
0
                 VT->getPrimitiveSizeInBits().getKnownMinValue());
494
495
0
  if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
496
0
    LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
497
498
  // Add the min-legal-vector-width attribute. This contains the max width from:
499
  // 1. min-vector-width attribute used in the source program.
500
  // 2. Any builtins used that have a vector width specified.
501
  // 3. Values passed in and out of inline assembly.
502
  // 4. Width of vector arguments and return types for this function.
503
  // 5. Width of vector arguments and return types for functions called by this
504
  //    function.
505
0
  if (getContext().getTargetInfo().getTriple().isX86())
506
0
    CurFn->addFnAttr("min-legal-vector-width",
507
0
                     llvm::utostr(LargestVectorWidth));
508
509
  // Add vscale_range attribute if appropriate.
510
0
  std::optional<std::pair<unsigned, unsigned>> VScaleRange =
511
0
      getContext().getTargetInfo().getVScaleRange(getLangOpts());
512
0
  if (VScaleRange) {
513
0
    CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
514
0
        getLLVMContext(), VScaleRange->first, VScaleRange->second));
515
0
  }
516
517
  // If we generated an unreachable return block, delete it now.
518
0
  if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
519
0
    Builder.ClearInsertionPoint();
520
0
    ReturnBlock.getBlock()->eraseFromParent();
521
0
  }
522
0
  if (ReturnValue.isValid()) {
523
0
    auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
524
0
    if (RetAlloca && RetAlloca->use_empty()) {
525
0
      RetAlloca->eraseFromParent();
526
0
      ReturnValue = Address::invalid();
527
0
    }
528
0
  }
529
0
}
530
531
/// ShouldInstrumentFunction - Return true if the current function should be
532
/// instrumented with __cyg_profile_func_* calls
533
0
bool CodeGenFunction::ShouldInstrumentFunction() {
534
0
  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
535
0
      !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
536
0
      !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
537
0
    return false;
538
0
  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
539
0
    return false;
540
0
  return true;
541
0
}
542
543
0
bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
544
0
  if (!CurFuncDecl)
545
0
    return false;
546
0
  return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
547
0
}
548
549
/// ShouldXRayInstrument - Return true if the current function should be
550
/// instrumented with XRay nop sleds.
551
0
bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
552
0
  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
553
0
}
554
555
/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
556
/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
557
0
bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
558
0
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
559
0
         (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
560
0
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
561
0
              XRayInstrKind::Custom);
562
0
}
563
564
0
bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
565
0
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
566
0
         (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
567
0
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
568
0
              XRayInstrKind::Typed);
569
0
}
570
571
llvm::ConstantInt *
572
0
CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
573
  // Remove any (C++17) exception specifications, to allow calling e.g. a
574
  // noexcept function through a non-noexcept pointer.
575
0
  if (!Ty->isFunctionNoProtoType())
576
0
    Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None);
577
0
  std::string Mangled;
578
0
  llvm::raw_string_ostream Out(Mangled);
579
0
  CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
580
0
  return llvm::ConstantInt::get(
581
0
      CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
582
0
}
583
584
void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
585
0
                                         llvm::Function *Fn) {
586
0
  if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
587
0
    return;
588
589
0
  llvm::LLVMContext &Context = getLLVMContext();
590
591
0
  CGM.GenKernelArgMetadata(Fn, FD, this);
592
593
0
  if (!getLangOpts().OpenCL)
594
0
    return;
595
596
0
  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
597
0
    QualType HintQTy = A->getTypeHint();
598
0
    const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
599
0
    bool IsSignedInteger =
600
0
        HintQTy->isSignedIntegerType() ||
601
0
        (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
602
0
    llvm::Metadata *AttrMDArgs[] = {
603
0
        llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
604
0
            CGM.getTypes().ConvertType(A->getTypeHint()))),
605
0
        llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
606
0
            llvm::IntegerType::get(Context, 32),
607
0
            llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
608
0
    Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
609
0
  }
610
611
0
  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
612
0
    llvm::Metadata *AttrMDArgs[] = {
613
0
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
614
0
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
615
0
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
616
0
    Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
617
0
  }
618
619
0
  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
620
0
    llvm::Metadata *AttrMDArgs[] = {
621
0
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
622
0
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
623
0
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
624
0
    Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
625
0
  }
626
627
0
  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
628
0
          FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
629
0
    llvm::Metadata *AttrMDArgs[] = {
630
0
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
631
0
    Fn->setMetadata("intel_reqd_sub_group_size",
632
0
                    llvm::MDNode::get(Context, AttrMDArgs));
633
0
  }
634
0
}
635
636
/// Determine whether the function F ends with a return stmt.
637
0
static bool endsWithReturn(const Decl* F) {
638
0
  const Stmt *Body = nullptr;
639
0
  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
640
0
    Body = FD->getBody();
641
0
  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
642
0
    Body = OMD->getBody();
643
644
0
  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
645
0
    auto LastStmt = CS->body_rbegin();
646
0
    if (LastStmt != CS->body_rend())
647
0
      return isa<ReturnStmt>(*LastStmt);
648
0
  }
649
0
  return false;
650
0
}
651
652
0
void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
653
0
  if (SanOpts.has(SanitizerKind::Thread)) {
654
0
    Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
655
0
    Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
656
0
  }
657
0
}
658
659
/// Check if the return value of this function requires sanitization.
660
0
bool CodeGenFunction::requiresReturnValueCheck() const {
661
0
  return requiresReturnValueNullabilityCheck() ||
662
0
         (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
663
0
          CurCodeDecl->getAttr<ReturnsNonNullAttr>());
664
0
}
665
666
0
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
667
0
  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
668
0
  if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
669
0
      !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
670
0
      (MD->getNumParams() != 1 && MD->getNumParams() != 2))
671
0
    return false;
672
673
0
  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
674
0
    return false;
675
676
0
  if (MD->getNumParams() == 2) {
677
0
    auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
678
0
    if (!PT || !PT->isVoidPointerType() ||
679
0
        !PT->getPointeeType().isConstQualified())
680
0
      return false;
681
0
  }
682
683
0
  return true;
684
0
}
685
686
0
bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
687
0
  const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
688
0
  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
689
0
}
690
691
0
bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
692
0
  return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
693
0
         getTarget().getCXXABI().isMicrosoft() &&
694
0
         llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
695
0
           return isInAllocaArgument(CGM.getCXXABI(), P->getType());
696
0
         });
697
0
}
698
699
/// Return the UBSan prologue signature for \p FD if one is available.
700
static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
701
0
                                            const FunctionDecl *FD) {
702
0
  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
703
0
    if (!MD->isStatic())
704
0
      return nullptr;
705
0
  return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
706
0
}
707
708
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
709
                                    llvm::Function *Fn,
710
                                    const CGFunctionInfo &FnInfo,
711
                                    const FunctionArgList &Args,
712
                                    SourceLocation Loc,
713
0
                                    SourceLocation StartLoc) {
714
0
  assert(!CurFn &&
715
0
         "Do not use a CodeGenFunction object for more than one function");
716
717
0
  const Decl *D = GD.getDecl();
718
719
0
  DidCallStackSave = false;
720
0
  CurCodeDecl = D;
721
0
  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
722
0
  if (FD && FD->usesSEHTry())
723
0
    CurSEHParent = GD;
724
0
  CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
725
0
  FnRetTy = RetTy;
726
0
  CurFn = Fn;
727
0
  CurFnInfo = &FnInfo;
728
0
  assert(CurFn->isDeclaration() && "Function already has body?");
729
730
  // If this function is ignored for any of the enabled sanitizers,
731
  // disable the sanitizer for the function.
732
0
  do {
733
0
#define SANITIZER(NAME, ID)                                                    \
734
0
  if (SanOpts.empty())                                                         \
735
0
    break;                                                                     \
736
0
  if (SanOpts.has(SanitizerKind::ID))                                          \
737
0
    if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc))                    \
738
0
      SanOpts.set(SanitizerKind::ID, false);
739
740
0
#include "clang/Basic/Sanitizers.def"
741
0
#undef SANITIZER
742
0
  } while (false);
743
744
0
  if (D) {
745
0
    const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
746
0
    SanitizerMask no_sanitize_mask;
747
0
    bool NoSanitizeCoverage = false;
748
749
0
    for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
750
0
      no_sanitize_mask |= Attr->getMask();
751
      // SanitizeCoverage is not handled by SanOpts.
752
0
      if (Attr->hasCoverage())
753
0
        NoSanitizeCoverage = true;
754
0
    }
755
756
    // Apply the no_sanitize* attributes to SanOpts.
757
0
    SanOpts.Mask &= ~no_sanitize_mask;
758
0
    if (no_sanitize_mask & SanitizerKind::Address)
759
0
      SanOpts.set(SanitizerKind::KernelAddress, false);
760
0
    if (no_sanitize_mask & SanitizerKind::KernelAddress)
761
0
      SanOpts.set(SanitizerKind::Address, false);
762
0
    if (no_sanitize_mask & SanitizerKind::HWAddress)
763
0
      SanOpts.set(SanitizerKind::KernelHWAddress, false);
764
0
    if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
765
0
      SanOpts.set(SanitizerKind::HWAddress, false);
766
767
0
    if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
768
0
      Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
769
770
0
    if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
771
0
      Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
772
773
    // Some passes need the non-negated no_sanitize attribute. Pass them on.
774
0
    if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
775
0
      if (no_sanitize_mask & SanitizerKind::Thread)
776
0
        Fn->addFnAttr("no_sanitize_thread");
777
0
    }
778
0
  }
779
780
0
  if (ShouldSkipSanitizerInstrumentation()) {
781
0
    CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
782
0
  } else {
783
    // Apply sanitizer attributes to the function.
784
0
    if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
785
0
      Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
786
0
    if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
787
0
                         SanitizerKind::KernelHWAddress))
788
0
      Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
789
0
    if (SanOpts.has(SanitizerKind::MemtagStack))
790
0
      Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
791
0
    if (SanOpts.has(SanitizerKind::Thread))
792
0
      Fn->addFnAttr(llvm::Attribute::SanitizeThread);
793
0
    if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
794
0
      Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
795
0
  }
796
0
  if (SanOpts.has(SanitizerKind::SafeStack))
797
0
    Fn->addFnAttr(llvm::Attribute::SafeStack);
798
0
  if (SanOpts.has(SanitizerKind::ShadowCallStack))
799
0
    Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
800
801
  // Apply fuzzing attribute to the function.
802
0
  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
803
0
    Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
804
805
  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
806
  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
807
0
  if (SanOpts.has(SanitizerKind::Thread)) {
808
0
    if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
809
0
      IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
810
0
      if (OMD->getMethodFamily() == OMF_dealloc ||
811
0
          OMD->getMethodFamily() == OMF_initialize ||
812
0
          (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
813
0
        markAsIgnoreThreadCheckingAtRuntime(Fn);
814
0
      }
815
0
    }
816
0
  }
817
818
  // Ignore unrelated casts in STL allocate() since the allocator must cast
819
  // from void* to T* before object initialization completes. Don't match on the
820
  // namespace because not all allocators are in std::
821
0
  if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
822
0
    if (matchesStlAllocatorFn(D, getContext()))
823
0
      SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
824
0
  }
825
826
  // Ignore null checks in coroutine functions since the coroutines passes
827
  // are not aware of how to move the extra UBSan instructions across the split
828
  // coroutine boundaries.
829
0
  if (D && SanOpts.has(SanitizerKind::Null))
830
0
    if (FD && FD->getBody() &&
831
0
        FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
832
0
      SanOpts.Mask &= ~SanitizerKind::Null;
833
834
  // Apply xray attributes to the function (as a string, for now)
835
0
  bool AlwaysXRayAttr = false;
836
0
  if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
837
0
    if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
838
0
            XRayInstrKind::FunctionEntry) ||
839
0
        CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
840
0
            XRayInstrKind::FunctionExit)) {
841
0
      if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
842
0
        Fn->addFnAttr("function-instrument", "xray-always");
843
0
        AlwaysXRayAttr = true;
844
0
      }
845
0
      if (XRayAttr->neverXRayInstrument())
846
0
        Fn->addFnAttr("function-instrument", "xray-never");
847
0
      if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
848
0
        if (ShouldXRayInstrumentFunction())
849
0
          Fn->addFnAttr("xray-log-args",
850
0
                        llvm::utostr(LogArgs->getArgumentCount()));
851
0
    }
852
0
  } else {
853
0
    if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
854
0
      Fn->addFnAttr(
855
0
          "xray-instruction-threshold",
856
0
          llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
857
0
  }
858
859
0
  if (ShouldXRayInstrumentFunction()) {
860
0
    if (CGM.getCodeGenOpts().XRayIgnoreLoops)
861
0
      Fn->addFnAttr("xray-ignore-loops");
862
863
0
    if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
864
0
            XRayInstrKind::FunctionExit))
865
0
      Fn->addFnAttr("xray-skip-exit");
866
867
0
    if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
868
0
            XRayInstrKind::FunctionEntry))
869
0
      Fn->addFnAttr("xray-skip-entry");
870
871
0
    auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
872
0
    if (FuncGroups > 1) {
873
0
      auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
874
0
                                              CurFn->getName().bytes_end());
875
0
      auto Group = crc32(FuncName) % FuncGroups;
876
0
      if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
877
0
          !AlwaysXRayAttr)
878
0
        Fn->addFnAttr("function-instrument", "xray-never");
879
0
    }
880
0
  }
881
882
0
  if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
883
0
    switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
884
0
    case ProfileList::Skip:
885
0
      Fn->addFnAttr(llvm::Attribute::SkipProfile);
886
0
      break;
887
0
    case ProfileList::Forbid:
888
0
      Fn->addFnAttr(llvm::Attribute::NoProfile);
889
0
      break;
890
0
    case ProfileList::Allow:
891
0
      break;
892
0
    }
893
0
  }
894
895
0
  unsigned Count, Offset;
896
0
  if (const auto *Attr =
897
0
          D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
898
0
    Count = Attr->getCount();
899
0
    Offset = Attr->getOffset();
900
0
  } else {
901
0
    Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
902
0
    Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
903
0
  }
904
0
  if (Count && Offset <= Count) {
905
0
    Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
906
0
    if (Offset)
907
0
      Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
908
0
  }
909
  // Instruct that functions for COFF/CodeView targets should start with a
910
  // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
911
  // backends as they don't need it -- instructions on these architectures are
912
  // always atomically patchable at runtime.
913
0
  if (CGM.getCodeGenOpts().HotPatch &&
914
0
      getContext().getTargetInfo().getTriple().isX86() &&
915
0
      getContext().getTargetInfo().getTriple().getEnvironment() !=
916
0
          llvm::Triple::CODE16)
917
0
    Fn->addFnAttr("patchable-function", "prologue-short-redirect");
918
919
  // Add no-jump-tables value.
920
0
  if (CGM.getCodeGenOpts().NoUseJumpTables)
921
0
    Fn->addFnAttr("no-jump-tables", "true");
922
923
  // Add no-inline-line-tables value.
924
0
  if (CGM.getCodeGenOpts().NoInlineLineTables)
925
0
    Fn->addFnAttr("no-inline-line-tables");
926
927
  // Add profile-sample-accurate value.
928
0
  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
929
0
    Fn->addFnAttr("profile-sample-accurate");
930
931
0
  if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
932
0
    Fn->addFnAttr("use-sample-profile");
933
934
0
  if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
935
0
    Fn->addFnAttr("cfi-canonical-jump-table");
936
937
0
  if (D && D->hasAttr<NoProfileFunctionAttr>())
938
0
    Fn->addFnAttr(llvm::Attribute::NoProfile);
939
940
0
  if (D) {
941
    // Function attributes take precedence over command line flags.
942
0
    if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
943
0
      switch (A->getThunkType()) {
944
0
      case FunctionReturnThunksAttr::Kind::Keep:
945
0
        break;
946
0
      case FunctionReturnThunksAttr::Kind::Extern:
947
0
        Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
948
0
        break;
949
0
      }
950
0
    } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
951
0
      Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
952
0
  }
953
954
0
  if (FD && (getLangOpts().OpenCL ||
955
0
             (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
956
    // Add metadata for a kernel function.
957
0
    EmitKernelMetadata(FD, Fn);
958
0
  }
959
960
  // If we are checking function types, emit a function type signature as
961
  // prologue data.
962
0
  if (FD && SanOpts.has(SanitizerKind::Function)) {
963
0
    if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
964
0
      llvm::LLVMContext &Ctx = Fn->getContext();
965
0
      llvm::MDBuilder MDB(Ctx);
966
0
      Fn->setMetadata(
967
0
          llvm::LLVMContext::MD_func_sanitize,
968
0
          MDB.createRTTIPointerPrologue(
969
0
              PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
970
0
    }
971
0
  }
972
973
  // If we're checking nullability, we need to know whether we can check the
974
  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
975
0
  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
976
0
    auto Nullability = FnRetTy->getNullability();
977
0
    if (Nullability && *Nullability == NullabilityKind::NonNull) {
978
0
      if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
979
0
            CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
980
0
        RetValNullabilityPrecondition =
981
0
            llvm::ConstantInt::getTrue(getLLVMContext());
982
0
    }
983
0
  }
984
985
  // If we're in C++ mode and the function name is "main", it is guaranteed
986
  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
987
  // used within a program").
988
  //
989
  // OpenCL C 2.0 v2.2-11 s6.9.i:
990
  //     Recursion is not supported.
991
  //
992
  // SYCL v1.2.1 s3.10:
993
  //     kernels cannot include RTTI information, exception classes,
994
  //     recursive code, virtual functions or make use of C++ libraries that
995
  //     are not compiled for the device.
996
0
  if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
997
0
             getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
998
0
             (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
999
0
    Fn->addFnAttr(llvm::Attribute::NoRecurse);
1000
1001
0
  llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1002
0
  llvm::fp::ExceptionBehavior FPExceptionBehavior =
1003
0
      ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1004
0
  Builder.setDefaultConstrainedRounding(RM);
1005
0
  Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1006
0
  if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1007
0
      (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1008
0
               RM != llvm::RoundingMode::NearestTiesToEven))) {
1009
0
    Builder.setIsFPConstrained(true);
1010
0
    Fn->addFnAttr(llvm::Attribute::StrictFP);
1011
0
  }
1012
1013
  // If a custom alignment is used, force realigning to this alignment on
1014
  // any main function which certainly will need it.
1015
0
  if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1016
0
             CGM.getCodeGenOpts().StackAlignment))
1017
0
    Fn->addFnAttr("stackrealign");
1018
1019
  // "main" doesn't need to zero out call-used registers.
1020
0
  if (FD && FD->isMain())
1021
0
    Fn->removeFnAttr("zero-call-used-regs");
1022
1023
0
  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1024
1025
  // Create a marker to make it easy to insert allocas into the entryblock
1026
  // later.  Don't create this with the builder, because we don't want it
1027
  // folded.
1028
0
  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
1029
0
  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1030
1031
0
  ReturnBlock = getJumpDestInCurrentScope("return");
1032
1033
0
  Builder.SetInsertPoint(EntryBB);
1034
1035
  // If we're checking the return value, allocate space for a pointer to a
1036
  // precise source location of the checked return statement.
1037
0
  if (requiresReturnValueCheck()) {
1038
0
    ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1039
0
    Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1040
0
                        ReturnLocation);
1041
0
  }
1042
1043
  // Emit subprogram debug descriptor.
1044
0
  if (CGDebugInfo *DI = getDebugInfo()) {
1045
    // Reconstruct the type from the argument list so that implicit parameters,
1046
    // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1047
    // convention.
1048
0
    DI->emitFunctionStart(GD, Loc, StartLoc,
1049
0
                          DI->getFunctionType(FD, RetTy, Args), CurFn,
1050
0
                          CurFuncIsThunk);
1051
0
  }
1052
1053
0
  if (ShouldInstrumentFunction()) {
1054
0
    if (CGM.getCodeGenOpts().InstrumentFunctions)
1055
0
      CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1056
0
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1057
0
      CurFn->addFnAttr("instrument-function-entry-inlined",
1058
0
                       "__cyg_profile_func_enter");
1059
0
    if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1060
0
      CurFn->addFnAttr("instrument-function-entry-inlined",
1061
0
                       "__cyg_profile_func_enter_bare");
1062
0
  }
1063
1064
  // Since emitting the mcount call here impacts optimizations such as function
1065
  // inlining, we just add an attribute to insert a mcount call in backend.
1066
  // The attribute "counting-function" is set to mcount function name which is
1067
  // architecture dependent.
1068
0
  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1069
    // Calls to fentry/mcount should not be generated if function has
1070
    // the no_instrument_function attribute.
1071
0
    if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1072
0
      if (CGM.getCodeGenOpts().CallFEntry)
1073
0
        Fn->addFnAttr("fentry-call", "true");
1074
0
      else {
1075
0
        Fn->addFnAttr("instrument-function-entry-inlined",
1076
0
                      getTarget().getMCountName());
1077
0
      }
1078
0
      if (CGM.getCodeGenOpts().MNopMCount) {
1079
0
        if (!CGM.getCodeGenOpts().CallFEntry)
1080
0
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1081
0
            << "-mnop-mcount" << "-mfentry";
1082
0
        Fn->addFnAttr("mnop-mcount");
1083
0
      }
1084
1085
0
      if (CGM.getCodeGenOpts().RecordMCount) {
1086
0
        if (!CGM.getCodeGenOpts().CallFEntry)
1087
0
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1088
0
            << "-mrecord-mcount" << "-mfentry";
1089
0
        Fn->addFnAttr("mrecord-mcount");
1090
0
      }
1091
0
    }
1092
0
  }
1093
1094
0
  if (CGM.getCodeGenOpts().PackedStack) {
1095
0
    if (getContext().getTargetInfo().getTriple().getArch() !=
1096
0
        llvm::Triple::systemz)
1097
0
      CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1098
0
        << "-mpacked-stack";
1099
0
    Fn->addFnAttr("packed-stack");
1100
0
  }
1101
1102
0
  if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1103
0
      !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1104
0
    Fn->addFnAttr("warn-stack-size",
1105
0
                  std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1106
1107
0
  if (RetTy->isVoidType()) {
1108
    // Void type; nothing to return.
1109
0
    ReturnValue = Address::invalid();
1110
1111
    // Count the implicit return.
1112
0
    if (!endsWithReturn(D))
1113
0
      ++NumReturnExprs;
1114
0
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1115
    // Indirect return; emit returned value directly into sret slot.
1116
    // This reduces code size, and affects correctness in C++.
1117
0
    auto AI = CurFn->arg_begin();
1118
0
    if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1119
0
      ++AI;
1120
0
    ReturnValue =
1121
0
        Address(&*AI, ConvertType(RetTy),
1122
0
                CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull);
1123
0
    if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1124
0
      ReturnValuePointer = CreateDefaultAlignTempAlloca(
1125
0
          ReturnValue.getPointer()->getType(), "result.ptr");
1126
0
      Builder.CreateStore(ReturnValue.getPointer(), ReturnValuePointer);
1127
0
    }
1128
0
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1129
0
             !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1130
    // Load the sret pointer from the argument struct and return into that.
1131
0
    unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1132
0
    llvm::Function::arg_iterator EI = CurFn->arg_end();
1133
0
    --EI;
1134
0
    llvm::Value *Addr = Builder.CreateStructGEP(
1135
0
        CurFnInfo->getArgStruct(), &*EI, Idx);
1136
0
    llvm::Type *Ty =
1137
0
        cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1138
0
    ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
1139
0
    Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1140
0
    ReturnValue = Address(Addr, ConvertType(RetTy),
1141
0
                          CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
1142
0
  } else {
1143
0
    ReturnValue = CreateIRTemp(RetTy, "retval");
1144
1145
    // Tell the epilog emitter to autorelease the result.  We do this
1146
    // now so that various specialized functions can suppress it
1147
    // during their IR-generation.
1148
0
    if (getLangOpts().ObjCAutoRefCount &&
1149
0
        !CurFnInfo->isReturnsRetained() &&
1150
0
        RetTy->isObjCRetainableType())
1151
0
      AutoreleaseResult = true;
1152
0
  }
1153
1154
0
  EmitStartEHSpec(CurCodeDecl);
1155
1156
0
  PrologueCleanupDepth = EHStack.stable_begin();
1157
1158
  // Emit OpenMP specific initialization of the device functions.
1159
0
  if (getLangOpts().OpenMP && CurCodeDecl)
1160
0
    CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1161
1162
  // Handle emitting HLSL entry functions.
1163
0
  if (D && D->hasAttr<HLSLShaderAttr>())
1164
0
    CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1165
1166
0
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1167
1168
0
  if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1169
0
      MD && !MD->isStatic()) {
1170
0
    bool IsInLambda =
1171
0
        MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1172
0
    if (MD->isImplicitObjectMemberFunction())
1173
0
      CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1174
0
    if (IsInLambda) {
1175
      // We're in a lambda; figure out the captures.
1176
0
      MD->getParent()->getCaptureFields(LambdaCaptureFields,
1177
0
                                        LambdaThisCaptureField);
1178
0
      if (LambdaThisCaptureField) {
1179
        // If the lambda captures the object referred to by '*this' - either by
1180
        // value or by reference, make sure CXXThisValue points to the correct
1181
        // object.
1182
1183
        // Get the lvalue for the field (which is a copy of the enclosing object
1184
        // or contains the address of the enclosing object).
1185
0
        LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1186
0
        if (!LambdaThisCaptureField->getType()->isPointerType()) {
1187
          // If the enclosing object was captured by value, just use its address.
1188
0
          CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1189
0
        } else {
1190
          // Load the lvalue pointed to by the field, since '*this' was captured
1191
          // by reference.
1192
0
          CXXThisValue =
1193
0
              EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1194
0
        }
1195
0
      }
1196
0
      for (auto *FD : MD->getParent()->fields()) {
1197
0
        if (FD->hasCapturedVLAType()) {
1198
0
          auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1199
0
                                           SourceLocation()).getScalarVal();
1200
0
          auto VAT = FD->getCapturedVLAType();
1201
0
          VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1202
0
        }
1203
0
      }
1204
0
    } else if (MD->isImplicitObjectMemberFunction()) {
1205
      // Not in a lambda; just use 'this' from the method.
1206
      // FIXME: Should we generate a new load for each use of 'this'?  The
1207
      // fast register allocator would be happier...
1208
0
      CXXThisValue = CXXABIThisValue;
1209
0
    }
1210
1211
    // Check the 'this' pointer once per function, if it's available.
1212
0
    if (CXXABIThisValue) {
1213
0
      SanitizerSet SkippedChecks;
1214
0
      SkippedChecks.set(SanitizerKind::ObjectSize, true);
1215
0
      QualType ThisTy = MD->getThisType();
1216
1217
      // If this is the call operator of a lambda with no captures, it
1218
      // may have a static invoker function, which may call this operator with
1219
      // a null 'this' pointer.
1220
0
      if (isLambdaCallOperator(MD) && MD->getParent()->isCapturelessLambda())
1221
0
        SkippedChecks.set(SanitizerKind::Null, true);
1222
1223
0
      EmitTypeCheck(
1224
0
          isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1225
0
          Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1226
0
    }
1227
0
  }
1228
1229
  // If any of the arguments have a variably modified type, make sure to
1230
  // emit the type size, but only if the function is not naked. Naked functions
1231
  // have no prolog to run this evaluation.
1232
0
  if (!FD || !FD->hasAttr<NakedAttr>()) {
1233
0
    for (const VarDecl *VD : Args) {
1234
      // Dig out the type as written from ParmVarDecls; it's unclear whether
1235
      // the standard (C99 6.9.1p10) requires this, but we're following the
1236
      // precedent set by gcc.
1237
0
      QualType Ty;
1238
0
      if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1239
0
        Ty = PVD->getOriginalType();
1240
0
      else
1241
0
        Ty = VD->getType();
1242
1243
0
      if (Ty->isVariablyModifiedType())
1244
0
        EmitVariablyModifiedType(Ty);
1245
0
    }
1246
0
  }
1247
  // Emit a location at the end of the prologue.
1248
0
  if (CGDebugInfo *DI = getDebugInfo())
1249
0
    DI->EmitLocation(Builder, StartLoc);
1250
  // TODO: Do we need to handle this in two places like we do with
1251
  // target-features/target-cpu?
1252
0
  if (CurFuncDecl)
1253
0
    if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1254
0
      LargestVectorWidth = VecWidth->getVectorWidth();
1255
0
}
1256
1257
0
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1258
0
  incrementProfileCounter(Body);
1259
0
  maybeCreateMCDCCondBitmap();
1260
0
  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1261
0
    EmitCompoundStmtWithoutScope(*S);
1262
0
  else
1263
0
    EmitStmt(Body);
1264
0
}
1265
1266
/// When instrumenting to collect profile data, the counts for some blocks
1267
/// such as switch cases need to not include the fall-through counts, so
1268
/// emit a branch around the instrumentation code. When not instrumenting,
1269
/// this just calls EmitBlock().
1270
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1271
0
                                               const Stmt *S) {
1272
0
  llvm::BasicBlock *SkipCountBB = nullptr;
1273
0
  if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1274
    // When instrumenting for profiling, the fallthrough to certain
1275
    // statements needs to skip over the instrumentation code so that we
1276
    // get an accurate count.
1277
0
    SkipCountBB = createBasicBlock("skipcount");
1278
0
    EmitBranch(SkipCountBB);
1279
0
  }
1280
0
  EmitBlock(BB);
1281
0
  uint64_t CurrentCount = getCurrentProfileCount();
1282
0
  incrementProfileCounter(S);
1283
0
  setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1284
0
  if (SkipCountBB)
1285
0
    EmitBlock(SkipCountBB);
1286
0
}
1287
1288
/// Tries to mark the given function nounwind based on the
1289
/// non-existence of any throwing calls within it.  We believe this is
1290
/// lightweight enough to do at -O0.
1291
0
static void TryMarkNoThrow(llvm::Function *F) {
1292
  // LLVM treats 'nounwind' on a function as part of the type, so we
1293
  // can't do this on functions that can be overwritten.
1294
0
  if (F->isInterposable()) return;
1295
1296
0
  for (llvm::BasicBlock &BB : *F)
1297
0
    for (llvm::Instruction &I : BB)
1298
0
      if (I.mayThrow())
1299
0
        return;
1300
1301
0
  F->setDoesNotThrow();
1302
0
}
1303
1304
QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1305
0
                                               FunctionArgList &Args) {
1306
0
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1307
0
  QualType ResTy = FD->getReturnType();
1308
1309
0
  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1310
0
  if (MD && MD->isImplicitObjectMemberFunction()) {
1311
0
    if (CGM.getCXXABI().HasThisReturn(GD))
1312
0
      ResTy = MD->getThisType();
1313
0
    else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1314
0
      ResTy = CGM.getContext().VoidPtrTy;
1315
0
    CGM.getCXXABI().buildThisParam(*this, Args);
1316
0
  }
1317
1318
  // The base version of an inheriting constructor whose constructed base is a
1319
  // virtual base is not passed any arguments (because it doesn't actually call
1320
  // the inherited constructor).
1321
0
  bool PassedParams = true;
1322
0
  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1323
0
    if (auto Inherited = CD->getInheritedConstructor())
1324
0
      PassedParams =
1325
0
          getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1326
1327
0
  if (PassedParams) {
1328
0
    for (auto *Param : FD->parameters()) {
1329
0
      Args.push_back(Param);
1330
0
      if (!Param->hasAttr<PassObjectSizeAttr>())
1331
0
        continue;
1332
1333
0
      auto *Implicit = ImplicitParamDecl::Create(
1334
0
          getContext(), Param->getDeclContext(), Param->getLocation(),
1335
0
          /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1336
0
      SizeArguments[Param] = Implicit;
1337
0
      Args.push_back(Implicit);
1338
0
    }
1339
0
  }
1340
1341
0
  if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1342
0
    CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1343
1344
0
  return ResTy;
1345
0
}
1346
1347
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1348
0
                                   const CGFunctionInfo &FnInfo) {
1349
0
  assert(Fn && "generating code for null Function");
1350
0
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1351
0
  CurGD = GD;
1352
1353
0
  FunctionArgList Args;
1354
0
  QualType ResTy = BuildFunctionArgList(GD, Args);
1355
1356
0
  if (FD->isInlineBuiltinDeclaration()) {
1357
    // When generating code for a builtin with an inline declaration, use a
1358
    // mangled name to hold the actual body, while keeping an external
1359
    // definition in case the function pointer is referenced somewhere.
1360
0
    std::string FDInlineName = (Fn->getName() + ".inline").str();
1361
0
    llvm::Module *M = Fn->getParent();
1362
0
    llvm::Function *Clone = M->getFunction(FDInlineName);
1363
0
    if (!Clone) {
1364
0
      Clone = llvm::Function::Create(Fn->getFunctionType(),
1365
0
                                     llvm::GlobalValue::InternalLinkage,
1366
0
                                     Fn->getAddressSpace(), FDInlineName, M);
1367
0
      Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1368
0
    }
1369
0
    Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1370
0
    Fn = Clone;
1371
0
  } else {
1372
    // Detect the unusual situation where an inline version is shadowed by a
1373
    // non-inline version. In that case we should pick the external one
1374
    // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1375
    // to detect that situation before we reach codegen, so do some late
1376
    // replacement.
1377
0
    for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1378
0
         PD = PD->getPreviousDecl()) {
1379
0
      if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1380
0
        std::string FDInlineName = (Fn->getName() + ".inline").str();
1381
0
        llvm::Module *M = Fn->getParent();
1382
0
        if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1383
0
          Clone->replaceAllUsesWith(Fn);
1384
0
          Clone->eraseFromParent();
1385
0
        }
1386
0
        break;
1387
0
      }
1388
0
    }
1389
0
  }
1390
1391
  // Check if we should generate debug info for this function.
1392
0
  if (FD->hasAttr<NoDebugAttr>()) {
1393
    // Clear non-distinct debug info that was possibly attached to the function
1394
    // due to an earlier declaration without the nodebug attribute
1395
0
    Fn->setSubprogram(nullptr);
1396
    // Disable debug info indefinitely for this function
1397
0
    DebugInfo = nullptr;
1398
0
  }
1399
1400
  // The function might not have a body if we're generating thunks for a
1401
  // function declaration.
1402
0
  SourceRange BodyRange;
1403
0
  if (Stmt *Body = FD->getBody())
1404
0
    BodyRange = Body->getSourceRange();
1405
0
  else
1406
0
    BodyRange = FD->getLocation();
1407
0
  CurEHLocation = BodyRange.getEnd();
1408
1409
  // Use the location of the start of the function to determine where
1410
  // the function definition is located. By default use the location
1411
  // of the declaration as the location for the subprogram. A function
1412
  // may lack a declaration in the source code if it is created by code
1413
  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1414
0
  SourceLocation Loc = FD->getLocation();
1415
1416
  // If this is a function specialization then use the pattern body
1417
  // as the location for the function.
1418
0
  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1419
0
    if (SpecDecl->hasBody(SpecDecl))
1420
0
      Loc = SpecDecl->getLocation();
1421
1422
0
  Stmt *Body = FD->getBody();
1423
1424
0
  if (Body) {
1425
    // Coroutines always emit lifetime markers.
1426
0
    if (isa<CoroutineBodyStmt>(Body))
1427
0
      ShouldEmitLifetimeMarkers = true;
1428
1429
    // Initialize helper which will detect jumps which can cause invalid
1430
    // lifetime markers.
1431
0
    if (ShouldEmitLifetimeMarkers)
1432
0
      Bypasses.Init(Body);
1433
0
  }
1434
1435
  // Emit the standard function prologue.
1436
0
  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1437
1438
  // Save parameters for coroutine function.
1439
0
  if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1440
0
    llvm::append_range(FnArgs, FD->parameters());
1441
1442
  // Ensure that the function adheres to the forward progress guarantee, which
1443
  // is required by certain optimizations.
1444
0
  if (checkIfFunctionMustProgress())
1445
0
    CurFn->addFnAttr(llvm::Attribute::MustProgress);
1446
1447
  // Generate the body of the function.
1448
0
  PGO.assignRegionCounters(GD, CurFn);
1449
0
  if (isa<CXXDestructorDecl>(FD))
1450
0
    EmitDestructorBody(Args);
1451
0
  else if (isa<CXXConstructorDecl>(FD))
1452
0
    EmitConstructorBody(Args);
1453
0
  else if (getLangOpts().CUDA &&
1454
0
           !getLangOpts().CUDAIsDevice &&
1455
0
           FD->hasAttr<CUDAGlobalAttr>())
1456
0
    CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1457
0
  else if (isa<CXXMethodDecl>(FD) &&
1458
0
           cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1459
    // The lambda static invoker function is special, because it forwards or
1460
    // clones the body of the function call operator (but is actually static).
1461
0
    EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1462
0
  } else if (isa<CXXMethodDecl>(FD) &&
1463
0
             isLambdaCallOperator(cast<CXXMethodDecl>(FD)) &&
1464
0
             !FnInfo.isDelegateCall() &&
1465
0
             cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1466
0
             hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1467
    // If emitting a lambda with static invoker on X86 Windows, change
1468
    // the call operator body.
1469
    // Make sure that this is a call operator with an inalloca arg and check
1470
    // for delegate call to make sure this is the original call op and not the
1471
    // new forwarding function for the static invoker.
1472
0
    EmitLambdaInAllocaCallOpBody(cast<CXXMethodDecl>(FD));
1473
0
  } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1474
0
             (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1475
0
              cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1476
    // Implicit copy-assignment gets the same special treatment as implicit
1477
    // copy-constructors.
1478
0
    emitImplicitAssignmentOperatorBody(Args);
1479
0
  } else if (Body) {
1480
0
    EmitFunctionBody(Body);
1481
0
  } else
1482
0
    llvm_unreachable("no definition for emitted function");
1483
1484
  // C++11 [stmt.return]p2:
1485
  //   Flowing off the end of a function [...] results in undefined behavior in
1486
  //   a value-returning function.
1487
  // C11 6.9.1p12:
1488
  //   If the '}' that terminates a function is reached, and the value of the
1489
  //   function call is used by the caller, the behavior is undefined.
1490
0
  if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1491
0
      !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1492
0
    bool ShouldEmitUnreachable =
1493
0
        CGM.getCodeGenOpts().StrictReturn ||
1494
0
        !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1495
0
    if (SanOpts.has(SanitizerKind::Return)) {
1496
0
      SanitizerScope SanScope(this);
1497
0
      llvm::Value *IsFalse = Builder.getFalse();
1498
0
      EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1499
0
                SanitizerHandler::MissingReturn,
1500
0
                EmitCheckSourceLocation(FD->getLocation()), std::nullopt);
1501
0
    } else if (ShouldEmitUnreachable) {
1502
0
      if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1503
0
        EmitTrapCall(llvm::Intrinsic::trap);
1504
0
    }
1505
0
    if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1506
0
      Builder.CreateUnreachable();
1507
0
      Builder.ClearInsertionPoint();
1508
0
    }
1509
0
  }
1510
1511
  // Emit the standard function epilogue.
1512
0
  FinishFunction(BodyRange.getEnd());
1513
1514
  // If we haven't marked the function nothrow through other means, do
1515
  // a quick pass now to see if we can.
1516
0
  if (!CurFn->doesNotThrow())
1517
0
    TryMarkNoThrow(CurFn);
1518
0
}
1519
1520
/// ContainsLabel - Return true if the statement contains a label in it.  If
1521
/// this statement is not executed normally, it not containing a label means
1522
/// that we can just remove the code.
1523
0
bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1524
  // Null statement, not a label!
1525
0
  if (!S) return false;
1526
1527
  // If this is a label, we have to emit the code, consider something like:
1528
  // if (0) {  ...  foo:  bar(); }  goto foo;
1529
  //
1530
  // TODO: If anyone cared, we could track __label__'s, since we know that you
1531
  // can't jump to one from outside their declared region.
1532
0
  if (isa<LabelStmt>(S))
1533
0
    return true;
1534
1535
  // If this is a case/default statement, and we haven't seen a switch, we have
1536
  // to emit the code.
1537
0
  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1538
0
    return true;
1539
1540
  // If this is a switch statement, we want to ignore cases below it.
1541
0
  if (isa<SwitchStmt>(S))
1542
0
    IgnoreCaseStmts = true;
1543
1544
  // Scan subexpressions for verboten labels.
1545
0
  for (const Stmt *SubStmt : S->children())
1546
0
    if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1547
0
      return true;
1548
1549
0
  return false;
1550
0
}
1551
1552
/// containsBreak - Return true if the statement contains a break out of it.
1553
/// If the statement (recursively) contains a switch or loop with a break
1554
/// inside of it, this is fine.
1555
0
bool CodeGenFunction::containsBreak(const Stmt *S) {
1556
  // Null statement, not a label!
1557
0
  if (!S) return false;
1558
1559
  // If this is a switch or loop that defines its own break scope, then we can
1560
  // include it and anything inside of it.
1561
0
  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1562
0
      isa<ForStmt>(S))
1563
0
    return false;
1564
1565
0
  if (isa<BreakStmt>(S))
1566
0
    return true;
1567
1568
  // Scan subexpressions for verboten breaks.
1569
0
  for (const Stmt *SubStmt : S->children())
1570
0
    if (containsBreak(SubStmt))
1571
0
      return true;
1572
1573
0
  return false;
1574
0
}
1575
1576
0
bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1577
0
  if (!S) return false;
1578
1579
  // Some statement kinds add a scope and thus never add a decl to the current
1580
  // scope. Note, this list is longer than the list of statements that might
1581
  // have an unscoped decl nested within them, but this way is conservatively
1582
  // correct even if more statement kinds are added.
1583
0
  if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1584
0
      isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1585
0
      isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1586
0
      isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1587
0
    return false;
1588
1589
0
  if (isa<DeclStmt>(S))
1590
0
    return true;
1591
1592
0
  for (const Stmt *SubStmt : S->children())
1593
0
    if (mightAddDeclToScope(SubStmt))
1594
0
      return true;
1595
1596
0
  return false;
1597
0
}
1598
1599
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1600
/// to a constant, or if it does but contains a label, return false.  If it
1601
/// constant folds return true and set the boolean result in Result.
1602
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1603
                                                   bool &ResultBool,
1604
0
                                                   bool AllowLabels) {
1605
  // If MC/DC is enabled, disable folding so that we can instrument all
1606
  // conditions to yield complete test vectors. We still keep track of
1607
  // folded conditions during region mapping and visualization.
1608
0
  if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1609
0
      CGM.getCodeGenOpts().MCDCCoverage)
1610
0
    return false;
1611
1612
0
  llvm::APSInt ResultInt;
1613
0
  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1614
0
    return false;
1615
1616
0
  ResultBool = ResultInt.getBoolValue();
1617
0
  return true;
1618
0
}
1619
1620
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1621
/// to a constant, or if it does but contains a label, return false.  If it
1622
/// constant folds return true and set the folded value.
1623
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1624
                                                   llvm::APSInt &ResultInt,
1625
0
                                                   bool AllowLabels) {
1626
  // FIXME: Rename and handle conversion of other evaluatable things
1627
  // to bool.
1628
0
  Expr::EvalResult Result;
1629
0
  if (!Cond->EvaluateAsInt(Result, getContext()))
1630
0
    return false;  // Not foldable, not integer or not fully evaluatable.
1631
1632
0
  llvm::APSInt Int = Result.Val.getInt();
1633
0
  if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1634
0
    return false;  // Contains a label.
1635
1636
0
  ResultInt = Int;
1637
0
  return true;
1638
0
}
1639
1640
/// Strip parentheses and simplistic logical-NOT operators.
1641
0
const Expr *CodeGenFunction::stripCond(const Expr *C) {
1642
0
  while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(C->IgnoreParens())) {
1643
0
    if (Op->getOpcode() != UO_LNot)
1644
0
      break;
1645
0
    C = Op->getSubExpr();
1646
0
  }
1647
0
  return C->IgnoreParens();
1648
0
}
1649
1650
/// Determine whether the given condition is an instrumentable condition
1651
/// (i.e. no "&&" or "||").
1652
0
bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1653
0
  const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1654
0
  return (!BOp || !BOp->isLogicalOp());
1655
0
}
1656
1657
/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1658
/// increments a profile counter based on the semantics of the given logical
1659
/// operator opcode.  This is used to instrument branch condition coverage for
1660
/// logical operators.
1661
void CodeGenFunction::EmitBranchToCounterBlock(
1662
    const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1663
    llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1664
0
    Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1665
  // If not instrumenting, just emit a branch.
1666
0
  bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1667
0
  if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1668
0
    return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1669
1670
0
  llvm::BasicBlock *ThenBlock = nullptr;
1671
0
  llvm::BasicBlock *ElseBlock = nullptr;
1672
0
  llvm::BasicBlock *NextBlock = nullptr;
1673
1674
  // Create the block we'll use to increment the appropriate counter.
1675
0
  llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1676
1677
  // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1678
  // means we need to evaluate the condition and increment the counter on TRUE:
1679
  //
1680
  // if (Cond)
1681
  //   goto CounterIncrBlock;
1682
  // else
1683
  //   goto FalseBlock;
1684
  //
1685
  // CounterIncrBlock:
1686
  //   Counter++;
1687
  //   goto TrueBlock;
1688
1689
0
  if (LOp == BO_LAnd) {
1690
0
    ThenBlock = CounterIncrBlock;
1691
0
    ElseBlock = FalseBlock;
1692
0
    NextBlock = TrueBlock;
1693
0
  }
1694
1695
  // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1696
  // we need to evaluate the condition and increment the counter on FALSE:
1697
  //
1698
  // if (Cond)
1699
  //   goto TrueBlock;
1700
  // else
1701
  //   goto CounterIncrBlock;
1702
  //
1703
  // CounterIncrBlock:
1704
  //   Counter++;
1705
  //   goto FalseBlock;
1706
1707
0
  else if (LOp == BO_LOr) {
1708
0
    ThenBlock = TrueBlock;
1709
0
    ElseBlock = CounterIncrBlock;
1710
0
    NextBlock = FalseBlock;
1711
0
  } else {
1712
0
    llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1713
0
  }
1714
1715
  // Emit Branch based on condition.
1716
0
  EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1717
1718
  // Emit the block containing the counter increment(s).
1719
0
  EmitBlock(CounterIncrBlock);
1720
1721
  // Increment corresponding counter; if index not provided, use Cond as index.
1722
0
  incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1723
1724
  // Go to the next block.
1725
0
  EmitBranch(NextBlock);
1726
0
}
1727
1728
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1729
/// statement) to the specified blocks.  Based on the condition, this might try
1730
/// to simplify the codegen of the conditional based on the branch.
1731
/// \param LH The value of the likelihood attribute on the True branch.
1732
/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1733
/// ConditionalOperator (ternary) through a recursive call for the operator's
1734
/// LHS and RHS nodes.
1735
void CodeGenFunction::EmitBranchOnBoolExpr(
1736
    const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1737
0
    uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp) {
1738
0
  Cond = Cond->IgnoreParens();
1739
1740
0
  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1741
    // Handle X && Y in a condition.
1742
0
    if (CondBOp->getOpcode() == BO_LAnd) {
1743
0
      MCDCLogOpStack.push_back(CondBOp);
1744
1745
      // If we have "1 && X", simplify the code.  "0 && X" would have constant
1746
      // folded if the case was simple enough.
1747
0
      bool ConstantBool = false;
1748
0
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1749
0
          ConstantBool) {
1750
        // br(1 && X) -> br(X).
1751
0
        incrementProfileCounter(CondBOp);
1752
0
        EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1753
0
                                 FalseBlock, TrueCount, LH);
1754
0
        MCDCLogOpStack.pop_back();
1755
0
        return;
1756
0
      }
1757
1758
      // If we have "X && 1", simplify the code to use an uncond branch.
1759
      // "X && 0" would have been constant folded to 0.
1760
0
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1761
0
          ConstantBool) {
1762
        // br(X && 1) -> br(X).
1763
0
        EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1764
0
                                 FalseBlock, TrueCount, LH, CondBOp);
1765
0
        MCDCLogOpStack.pop_back();
1766
0
        return;
1767
0
      }
1768
1769
      // Emit the LHS as a conditional.  If the LHS conditional is false, we
1770
      // want to jump to the FalseBlock.
1771
0
      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1772
      // The counter tells us how often we evaluate RHS, and all of TrueCount
1773
      // can be propagated to that branch.
1774
0
      uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1775
1776
0
      ConditionalEvaluation eval(*this);
1777
0
      {
1778
0
        ApplyDebugLocation DL(*this, Cond);
1779
        // Propagate the likelihood attribute like __builtin_expect
1780
        // __builtin_expect(X && Y, 1) -> X and Y are likely
1781
        // __builtin_expect(X && Y, 0) -> only Y is unlikely
1782
0
        EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1783
0
                             LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1784
0
        EmitBlock(LHSTrue);
1785
0
      }
1786
1787
0
      incrementProfileCounter(CondBOp);
1788
0
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1789
1790
      // Any temporaries created here are conditional.
1791
0
      eval.begin(*this);
1792
0
      EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1793
0
                               FalseBlock, TrueCount, LH);
1794
0
      eval.end(*this);
1795
0
      MCDCLogOpStack.pop_back();
1796
0
      return;
1797
0
    }
1798
1799
0
    if (CondBOp->getOpcode() == BO_LOr) {
1800
0
      MCDCLogOpStack.push_back(CondBOp);
1801
1802
      // If we have "0 || X", simplify the code.  "1 || X" would have constant
1803
      // folded if the case was simple enough.
1804
0
      bool ConstantBool = false;
1805
0
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1806
0
          !ConstantBool) {
1807
        // br(0 || X) -> br(X).
1808
0
        incrementProfileCounter(CondBOp);
1809
0
        EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1810
0
                                 FalseBlock, TrueCount, LH);
1811
0
        MCDCLogOpStack.pop_back();
1812
0
        return;
1813
0
      }
1814
1815
      // If we have "X || 0", simplify the code to use an uncond branch.
1816
      // "X || 1" would have been constant folded to 1.
1817
0
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1818
0
          !ConstantBool) {
1819
        // br(X || 0) -> br(X).
1820
0
        EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1821
0
                                 FalseBlock, TrueCount, LH, CondBOp);
1822
0
        MCDCLogOpStack.pop_back();
1823
0
        return;
1824
0
      }
1825
      // Emit the LHS as a conditional.  If the LHS conditional is true, we
1826
      // want to jump to the TrueBlock.
1827
0
      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1828
      // We have the count for entry to the RHS and for the whole expression
1829
      // being true, so we can divy up True count between the short circuit and
1830
      // the RHS.
1831
0
      uint64_t LHSCount =
1832
0
          getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1833
0
      uint64_t RHSCount = TrueCount - LHSCount;
1834
1835
0
      ConditionalEvaluation eval(*this);
1836
0
      {
1837
        // Propagate the likelihood attribute like __builtin_expect
1838
        // __builtin_expect(X || Y, 1) -> only Y is likely
1839
        // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1840
0
        ApplyDebugLocation DL(*this, Cond);
1841
0
        EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1842
0
                             LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1843
0
        EmitBlock(LHSFalse);
1844
0
      }
1845
1846
0
      incrementProfileCounter(CondBOp);
1847
0
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1848
1849
      // Any temporaries created here are conditional.
1850
0
      eval.begin(*this);
1851
0
      EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1852
0
                               RHSCount, LH);
1853
1854
0
      eval.end(*this);
1855
0
      MCDCLogOpStack.pop_back();
1856
0
      return;
1857
0
    }
1858
0
  }
1859
1860
0
  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1861
    // br(!x, t, f) -> br(x, f, t)
1862
    // Avoid doing this optimization when instrumenting a condition for MC/DC.
1863
    // LNot is taken as part of the condition for simplicity, and changing its
1864
    // sense negatively impacts test vector tracking.
1865
0
    bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
1866
0
                         CGM.getCodeGenOpts().MCDCCoverage &&
1867
0
                         isInstrumentedCondition(Cond);
1868
0
    if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
1869
      // Negate the count.
1870
0
      uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1871
      // The values of the enum are chosen to make this negation possible.
1872
0
      LH = static_cast<Stmt::Likelihood>(-LH);
1873
      // Negate the condition and swap the destination blocks.
1874
0
      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1875
0
                                  FalseCount, LH);
1876
0
    }
1877
0
  }
1878
1879
0
  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1880
    // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1881
0
    llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1882
0
    llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1883
1884
    // The ConditionalOperator itself has no likelihood information for its
1885
    // true and false branches. This matches the behavior of __builtin_expect.
1886
0
    ConditionalEvaluation cond(*this);
1887
0
    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1888
0
                         getProfileCount(CondOp), Stmt::LH_None);
1889
1890
    // When computing PGO branch weights, we only know the overall count for
1891
    // the true block. This code is essentially doing tail duplication of the
1892
    // naive code-gen, introducing new edges for which counts are not
1893
    // available. Divide the counts proportionally between the LHS and RHS of
1894
    // the conditional operator.
1895
0
    uint64_t LHSScaledTrueCount = 0;
1896
0
    if (TrueCount) {
1897
0
      double LHSRatio =
1898
0
          getProfileCount(CondOp) / (double)getCurrentProfileCount();
1899
0
      LHSScaledTrueCount = TrueCount * LHSRatio;
1900
0
    }
1901
1902
0
    cond.begin(*this);
1903
0
    EmitBlock(LHSBlock);
1904
0
    incrementProfileCounter(CondOp);
1905
0
    {
1906
0
      ApplyDebugLocation DL(*this, Cond);
1907
0
      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1908
0
                           LHSScaledTrueCount, LH, CondOp);
1909
0
    }
1910
0
    cond.end(*this);
1911
1912
0
    cond.begin(*this);
1913
0
    EmitBlock(RHSBlock);
1914
0
    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1915
0
                         TrueCount - LHSScaledTrueCount, LH, CondOp);
1916
0
    cond.end(*this);
1917
1918
0
    return;
1919
0
  }
1920
1921
0
  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1922
    // Conditional operator handling can give us a throw expression as a
1923
    // condition for a case like:
1924
    //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1925
    // Fold this to:
1926
    //   br(c, throw x, br(y, t, f))
1927
0
    EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1928
0
    return;
1929
0
  }
1930
1931
  // Emit the code with the fully general case.
1932
0
  llvm::Value *CondV;
1933
0
  {
1934
0
    ApplyDebugLocation DL(*this, Cond);
1935
0
    CondV = EvaluateExprAsBool(Cond);
1936
0
  }
1937
1938
  // If not at the top of the logical operator nest, update MCDC temp with the
1939
  // boolean result of the evaluated condition.
1940
0
  if (!MCDCLogOpStack.empty()) {
1941
0
    const Expr *MCDCBaseExpr = Cond;
1942
    // When a nested ConditionalOperator (ternary) is encountered in a boolean
1943
    // expression, MC/DC tracks the result of the ternary, and this is tied to
1944
    // the ConditionalOperator expression and not the ternary's LHS or RHS. If
1945
    // this is the case, the ConditionalOperator expression is passed through
1946
    // the ConditionalOp parameter and then used as the MCDC base expression.
1947
0
    if (ConditionalOp)
1948
0
      MCDCBaseExpr = ConditionalOp;
1949
1950
0
    maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
1951
0
  }
1952
1953
0
  llvm::MDNode *Weights = nullptr;
1954
0
  llvm::MDNode *Unpredictable = nullptr;
1955
1956
  // If the branch has a condition wrapped by __builtin_unpredictable,
1957
  // create metadata that specifies that the branch is unpredictable.
1958
  // Don't bother if not optimizing because that metadata would not be used.
1959
0
  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1960
0
  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1961
0
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1962
0
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1963
0
      llvm::MDBuilder MDHelper(getLLVMContext());
1964
0
      Unpredictable = MDHelper.createUnpredictable();
1965
0
    }
1966
0
  }
1967
1968
  // If there is a Likelihood knowledge for the cond, lower it.
1969
  // Note that if not optimizing this won't emit anything.
1970
0
  llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1971
0
  if (CondV != NewCondV)
1972
0
    CondV = NewCondV;
1973
0
  else {
1974
    // Otherwise, lower profile counts. Note that we do this even at -O0.
1975
0
    uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1976
0
    Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1977
0
  }
1978
1979
0
  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1980
0
}
1981
1982
/// ErrorUnsupported - Print out an error that codegen doesn't support the
1983
/// specified stmt yet.
1984
0
void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1985
0
  CGM.ErrorUnsupported(S, Type);
1986
0
}
1987
1988
/// emitNonZeroVLAInit - Emit the "zero" initialization of a
1989
/// variable-length array whose elements have a non-zero bit-pattern.
1990
///
1991
/// \param baseType the inner-most element type of the array
1992
/// \param src - a char* pointing to the bit-pattern for a single
1993
/// base element of the array
1994
/// \param sizeInChars - the total size of the VLA, in chars
1995
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1996
                               Address dest, Address src,
1997
0
                               llvm::Value *sizeInChars) {
1998
0
  CGBuilderTy &Builder = CGF.Builder;
1999
2000
0
  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2001
0
  llvm::Value *baseSizeInChars
2002
0
    = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2003
2004
0
  Address begin = dest.withElementType(CGF.Int8Ty);
2005
0
  llvm::Value *end = Builder.CreateInBoundsGEP(
2006
0
      begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
2007
2008
0
  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2009
0
  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2010
0
  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2011
2012
  // Make a loop over the VLA.  C99 guarantees that the VLA element
2013
  // count must be nonzero.
2014
0
  CGF.EmitBlock(loopBB);
2015
2016
0
  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2017
0
  cur->addIncoming(begin.getPointer(), originBB);
2018
2019
0
  CharUnits curAlign =
2020
0
    dest.getAlignment().alignmentOfArrayElement(baseSize);
2021
2022
  // memcpy the individual element bit-pattern.
2023
0
  Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2024
0
                       /*volatile*/ false);
2025
2026
  // Go to the next element.
2027
0
  llvm::Value *next =
2028
0
    Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2029
2030
  // Leave if that's the end of the VLA.
2031
0
  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2032
0
  Builder.CreateCondBr(done, contBB, loopBB);
2033
0
  cur->addIncoming(next, loopBB);
2034
2035
0
  CGF.EmitBlock(contBB);
2036
0
}
2037
2038
void
2039
0
CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
2040
  // Ignore empty classes in C++.
2041
0
  if (getLangOpts().CPlusPlus) {
2042
0
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
2043
0
      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
2044
0
        return;
2045
0
    }
2046
0
  }
2047
2048
0
  if (DestPtr.getElementType() != Int8Ty)
2049
0
    DestPtr = DestPtr.withElementType(Int8Ty);
2050
2051
  // Get size and alignment info for this aggregate.
2052
0
  CharUnits size = getContext().getTypeSizeInChars(Ty);
2053
2054
0
  llvm::Value *SizeVal;
2055
0
  const VariableArrayType *vla;
2056
2057
  // Don't bother emitting a zero-byte memset.
2058
0
  if (size.isZero()) {
2059
    // But note that getTypeInfo returns 0 for a VLA.
2060
0
    if (const VariableArrayType *vlaType =
2061
0
          dyn_cast_or_null<VariableArrayType>(
2062
0
                                          getContext().getAsArrayType(Ty))) {
2063
0
      auto VlaSize = getVLASize(vlaType);
2064
0
      SizeVal = VlaSize.NumElts;
2065
0
      CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2066
0
      if (!eltSize.isOne())
2067
0
        SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2068
0
      vla = vlaType;
2069
0
    } else {
2070
0
      return;
2071
0
    }
2072
0
  } else {
2073
0
    SizeVal = CGM.getSize(size);
2074
0
    vla = nullptr;
2075
0
  }
2076
2077
  // If the type contains a pointer to data member we can't memset it to zero.
2078
  // Instead, create a null constant and copy it to the destination.
2079
  // TODO: there are other patterns besides zero that we can usefully memset,
2080
  // like -1, which happens to be the pattern used by member-pointers.
2081
0
  if (!CGM.getTypes().isZeroInitializable(Ty)) {
2082
    // For a VLA, emit a single element, then splat that over the VLA.
2083
0
    if (vla) Ty = getContext().getBaseElementType(vla);
2084
2085
0
    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2086
2087
0
    llvm::GlobalVariable *NullVariable =
2088
0
      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2089
0
                               /*isConstant=*/true,
2090
0
                               llvm::GlobalVariable::PrivateLinkage,
2091
0
                               NullConstant, Twine());
2092
0
    CharUnits NullAlign = DestPtr.getAlignment();
2093
0
    NullVariable->setAlignment(NullAlign.getAsAlign());
2094
0
    Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2095
2096
0
    if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2097
2098
    // Get and call the appropriate llvm.memcpy overload.
2099
0
    Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2100
0
    return;
2101
0
  }
2102
2103
  // Otherwise, just memset the whole thing to zero.  This is legal
2104
  // because in LLVM, all default initializers (other than the ones we just
2105
  // handled above) are guaranteed to have a bit pattern of all zeros.
2106
0
  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2107
0
}
2108
2109
0
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2110
  // Make sure that there is a block for the indirect goto.
2111
0
  if (!IndirectBranch)
2112
0
    GetIndirectGotoBlock();
2113
2114
0
  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2115
2116
  // Make sure the indirect branch includes all of the address-taken blocks.
2117
0
  IndirectBranch->addDestination(BB);
2118
0
  return llvm::BlockAddress::get(CurFn, BB);
2119
0
}
2120
2121
0
llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2122
  // If we already made the indirect branch for indirect goto, return its block.
2123
0
  if (IndirectBranch) return IndirectBranch->getParent();
2124
2125
0
  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2126
2127
  // Create the PHI node that indirect gotos will add entries to.
2128
0
  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2129
0
                                              "indirect.goto.dest");
2130
2131
  // Create the indirect branch instruction.
2132
0
  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2133
0
  return IndirectBranch->getParent();
2134
0
}
2135
2136
/// Computes the length of an array in elements, as well as the base
2137
/// element type and a properly-typed first element pointer.
2138
llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2139
                                              QualType &baseType,
2140
0
                                              Address &addr) {
2141
0
  const ArrayType *arrayType = origArrayType;
2142
2143
  // If it's a VLA, we have to load the stored size.  Note that
2144
  // this is the size of the VLA in bytes, not its size in elements.
2145
0
  llvm::Value *numVLAElements = nullptr;
2146
0
  if (isa<VariableArrayType>(arrayType)) {
2147
0
    numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2148
2149
    // Walk into all VLAs.  This doesn't require changes to addr,
2150
    // which has type T* where T is the first non-VLA element type.
2151
0
    do {
2152
0
      QualType elementType = arrayType->getElementType();
2153
0
      arrayType = getContext().getAsArrayType(elementType);
2154
2155
      // If we only have VLA components, 'addr' requires no adjustment.
2156
0
      if (!arrayType) {
2157
0
        baseType = elementType;
2158
0
        return numVLAElements;
2159
0
      }
2160
0
    } while (isa<VariableArrayType>(arrayType));
2161
2162
    // We get out here only if we find a constant array type
2163
    // inside the VLA.
2164
0
  }
2165
2166
  // We have some number of constant-length arrays, so addr should
2167
  // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
2168
  // down to the first element of addr.
2169
0
  SmallVector<llvm::Value*, 8> gepIndices;
2170
2171
  // GEP down to the array type.
2172
0
  llvm::ConstantInt *zero = Builder.getInt32(0);
2173
0
  gepIndices.push_back(zero);
2174
2175
0
  uint64_t countFromCLAs = 1;
2176
0
  QualType eltType;
2177
2178
0
  llvm::ArrayType *llvmArrayType =
2179
0
    dyn_cast<llvm::ArrayType>(addr.getElementType());
2180
0
  while (llvmArrayType) {
2181
0
    assert(isa<ConstantArrayType>(arrayType));
2182
0
    assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
2183
0
             == llvmArrayType->getNumElements());
2184
2185
0
    gepIndices.push_back(zero);
2186
0
    countFromCLAs *= llvmArrayType->getNumElements();
2187
0
    eltType = arrayType->getElementType();
2188
2189
0
    llvmArrayType =
2190
0
      dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2191
0
    arrayType = getContext().getAsArrayType(arrayType->getElementType());
2192
0
    assert((!llvmArrayType || arrayType) &&
2193
0
           "LLVM and Clang types are out-of-synch");
2194
0
  }
2195
2196
0
  if (arrayType) {
2197
    // From this point onwards, the Clang array type has been emitted
2198
    // as some other type (probably a packed struct). Compute the array
2199
    // size, and just emit the 'begin' expression as a bitcast.
2200
0
    while (arrayType) {
2201
0
      countFromCLAs *=
2202
0
          cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
2203
0
      eltType = arrayType->getElementType();
2204
0
      arrayType = getContext().getAsArrayType(eltType);
2205
0
    }
2206
2207
0
    llvm::Type *baseType = ConvertType(eltType);
2208
0
    addr = addr.withElementType(baseType);
2209
0
  } else {
2210
    // Create the actual GEP.
2211
0
    addr = Address(Builder.CreateInBoundsGEP(
2212
0
        addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
2213
0
        ConvertTypeForMem(eltType),
2214
0
        addr.getAlignment());
2215
0
  }
2216
2217
0
  baseType = eltType;
2218
2219
0
  llvm::Value *numElements
2220
0
    = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2221
2222
  // If we had any VLA dimensions, factor them in.
2223
0
  if (numVLAElements)
2224
0
    numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2225
2226
0
  return numElements;
2227
0
}
2228
2229
0
CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2230
0
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2231
0
  assert(vla && "type was not a variable array type!");
2232
0
  return getVLASize(vla);
2233
0
}
2234
2235
CodeGenFunction::VlaSizePair
2236
0
CodeGenFunction::getVLASize(const VariableArrayType *type) {
2237
  // The number of elements so far; always size_t.
2238
0
  llvm::Value *numElements = nullptr;
2239
2240
0
  QualType elementType;
2241
0
  do {
2242
0
    elementType = type->getElementType();
2243
0
    llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2244
0
    assert(vlaSize && "no size for VLA!");
2245
0
    assert(vlaSize->getType() == SizeTy);
2246
2247
0
    if (!numElements) {
2248
0
      numElements = vlaSize;
2249
0
    } else {
2250
      // It's undefined behavior if this wraps around, so mark it that way.
2251
      // FIXME: Teach -fsanitize=undefined to trap this.
2252
0
      numElements = Builder.CreateNUWMul(numElements, vlaSize);
2253
0
    }
2254
0
  } while ((type = getContext().getAsVariableArrayType(elementType)));
2255
2256
0
  return { numElements, elementType };
2257
0
}
2258
2259
CodeGenFunction::VlaSizePair
2260
0
CodeGenFunction::getVLAElements1D(QualType type) {
2261
0
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2262
0
  assert(vla && "type was not a variable array type!");
2263
0
  return getVLAElements1D(vla);
2264
0
}
2265
2266
CodeGenFunction::VlaSizePair
2267
0
CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2268
0
  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2269
0
  assert(VlaSize && "no size for VLA!");
2270
0
  assert(VlaSize->getType() == SizeTy);
2271
0
  return { VlaSize, Vla->getElementType() };
2272
0
}
2273
2274
0
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2275
0
  assert(type->isVariablyModifiedType() &&
2276
0
         "Must pass variably modified type to EmitVLASizes!");
2277
2278
0
  EnsureInsertPoint();
2279
2280
  // We're going to walk down into the type and look for VLA
2281
  // expressions.
2282
0
  do {
2283
0
    assert(type->isVariablyModifiedType());
2284
2285
0
    const Type *ty = type.getTypePtr();
2286
0
    switch (ty->getTypeClass()) {
2287
2288
0
#define TYPE(Class, Base)
2289
0
#define ABSTRACT_TYPE(Class, Base)
2290
0
#define NON_CANONICAL_TYPE(Class, Base)
2291
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2292
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2293
0
#include "clang/AST/TypeNodes.inc"
2294
0
      llvm_unreachable("unexpected dependent type!");
2295
2296
    // These types are never variably-modified.
2297
0
    case Type::Builtin:
2298
0
    case Type::Complex:
2299
0
    case Type::Vector:
2300
0
    case Type::ExtVector:
2301
0
    case Type::ConstantMatrix:
2302
0
    case Type::Record:
2303
0
    case Type::Enum:
2304
0
    case Type::Using:
2305
0
    case Type::TemplateSpecialization:
2306
0
    case Type::ObjCTypeParam:
2307
0
    case Type::ObjCObject:
2308
0
    case Type::ObjCInterface:
2309
0
    case Type::ObjCObjectPointer:
2310
0
    case Type::BitInt:
2311
0
      llvm_unreachable("type class is never variably-modified!");
2312
2313
0
    case Type::Elaborated:
2314
0
      type = cast<ElaboratedType>(ty)->getNamedType();
2315
0
      break;
2316
2317
0
    case Type::Adjusted:
2318
0
      type = cast<AdjustedType>(ty)->getAdjustedType();
2319
0
      break;
2320
2321
0
    case Type::Decayed:
2322
0
      type = cast<DecayedType>(ty)->getPointeeType();
2323
0
      break;
2324
2325
0
    case Type::Pointer:
2326
0
      type = cast<PointerType>(ty)->getPointeeType();
2327
0
      break;
2328
2329
0
    case Type::BlockPointer:
2330
0
      type = cast<BlockPointerType>(ty)->getPointeeType();
2331
0
      break;
2332
2333
0
    case Type::LValueReference:
2334
0
    case Type::RValueReference:
2335
0
      type = cast<ReferenceType>(ty)->getPointeeType();
2336
0
      break;
2337
2338
0
    case Type::MemberPointer:
2339
0
      type = cast<MemberPointerType>(ty)->getPointeeType();
2340
0
      break;
2341
2342
0
    case Type::ConstantArray:
2343
0
    case Type::IncompleteArray:
2344
      // Losing element qualification here is fine.
2345
0
      type = cast<ArrayType>(ty)->getElementType();
2346
0
      break;
2347
2348
0
    case Type::VariableArray: {
2349
      // Losing element qualification here is fine.
2350
0
      const VariableArrayType *vat = cast<VariableArrayType>(ty);
2351
2352
      // Unknown size indication requires no size computation.
2353
      // Otherwise, evaluate and record it.
2354
0
      if (const Expr *sizeExpr = vat->getSizeExpr()) {
2355
        // It's possible that we might have emitted this already,
2356
        // e.g. with a typedef and a pointer to it.
2357
0
        llvm::Value *&entry = VLASizeMap[sizeExpr];
2358
0
        if (!entry) {
2359
0
          llvm::Value *size = EmitScalarExpr(sizeExpr);
2360
2361
          // C11 6.7.6.2p5:
2362
          //   If the size is an expression that is not an integer constant
2363
          //   expression [...] each time it is evaluated it shall have a value
2364
          //   greater than zero.
2365
0
          if (SanOpts.has(SanitizerKind::VLABound)) {
2366
0
            SanitizerScope SanScope(this);
2367
0
            llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2368
0
            clang::QualType SEType = sizeExpr->getType();
2369
0
            llvm::Value *CheckCondition =
2370
0
                SEType->isSignedIntegerType()
2371
0
                    ? Builder.CreateICmpSGT(size, Zero)
2372
0
                    : Builder.CreateICmpUGT(size, Zero);
2373
0
            llvm::Constant *StaticArgs[] = {
2374
0
                EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2375
0
                EmitCheckTypeDescriptor(SEType)};
2376
0
            EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
2377
0
                      SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
2378
0
          }
2379
2380
          // Always zexting here would be wrong if it weren't
2381
          // undefined behavior to have a negative bound.
2382
          // FIXME: What about when size's type is larger than size_t?
2383
0
          entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2384
0
        }
2385
0
      }
2386
0
      type = vat->getElementType();
2387
0
      break;
2388
0
    }
2389
2390
0
    case Type::FunctionProto:
2391
0
    case Type::FunctionNoProto:
2392
0
      type = cast<FunctionType>(ty)->getReturnType();
2393
0
      break;
2394
2395
0
    case Type::Paren:
2396
0
    case Type::TypeOf:
2397
0
    case Type::UnaryTransform:
2398
0
    case Type::Attributed:
2399
0
    case Type::BTFTagAttributed:
2400
0
    case Type::SubstTemplateTypeParm:
2401
0
    case Type::MacroQualified:
2402
      // Keep walking after single level desugaring.
2403
0
      type = type.getSingleStepDesugaredType(getContext());
2404
0
      break;
2405
2406
0
    case Type::Typedef:
2407
0
    case Type::Decltype:
2408
0
    case Type::Auto:
2409
0
    case Type::DeducedTemplateSpecialization:
2410
      // Stop walking: nothing to do.
2411
0
      return;
2412
2413
0
    case Type::TypeOfExpr:
2414
      // Stop walking: emit typeof expression.
2415
0
      EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2416
0
      return;
2417
2418
0
    case Type::Atomic:
2419
0
      type = cast<AtomicType>(ty)->getValueType();
2420
0
      break;
2421
2422
0
    case Type::Pipe:
2423
0
      type = cast<PipeType>(ty)->getElementType();
2424
0
      break;
2425
0
    }
2426
0
  } while (type->isVariablyModifiedType());
2427
0
}
2428
2429
0
Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2430
0
  if (getContext().getBuiltinVaListType()->isArrayType())
2431
0
    return EmitPointerWithAlignment(E);
2432
0
  return EmitLValue(E).getAddress(*this);
2433
0
}
2434
2435
0
Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2436
0
  return EmitLValue(E).getAddress(*this);
2437
0
}
2438
2439
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2440
0
                                              const APValue &Init) {
2441
0
  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2442
0
  if (CGDebugInfo *Dbg = getDebugInfo())
2443
0
    if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2444
0
      Dbg->EmitGlobalVariable(E->getDecl(), Init);
2445
0
}
2446
2447
CodeGenFunction::PeepholeProtection
2448
0
CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2449
  // At the moment, the only aggressive peephole we do in IR gen
2450
  // is trunc(zext) folding, but if we add more, we can easily
2451
  // extend this protection.
2452
2453
0
  if (!rvalue.isScalar()) return PeepholeProtection();
2454
0
  llvm::Value *value = rvalue.getScalarVal();
2455
0
  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2456
2457
  // Just make an extra bitcast.
2458
0
  assert(HaveInsertPoint());
2459
0
  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2460
0
                                                  Builder.GetInsertBlock());
2461
2462
0
  PeepholeProtection protection;
2463
0
  protection.Inst = inst;
2464
0
  return protection;
2465
0
}
2466
2467
0
void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2468
0
  if (!protection.Inst) return;
2469
2470
  // In theory, we could try to duplicate the peepholes now, but whatever.
2471
0
  protection.Inst->eraseFromParent();
2472
0
}
2473
2474
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2475
                                              QualType Ty, SourceLocation Loc,
2476
                                              SourceLocation AssumptionLoc,
2477
                                              llvm::Value *Alignment,
2478
0
                                              llvm::Value *OffsetValue) {
2479
0
  if (Alignment->getType() != IntPtrTy)
2480
0
    Alignment =
2481
0
        Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2482
0
  if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2483
0
    OffsetValue =
2484
0
        Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2485
0
  llvm::Value *TheCheck = nullptr;
2486
0
  if (SanOpts.has(SanitizerKind::Alignment)) {
2487
0
    llvm::Value *PtrIntValue =
2488
0
        Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2489
2490
0
    if (OffsetValue) {
2491
0
      bool IsOffsetZero = false;
2492
0
      if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2493
0
        IsOffsetZero = CI->isZero();
2494
2495
0
      if (!IsOffsetZero)
2496
0
        PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2497
0
    }
2498
2499
0
    llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2500
0
    llvm::Value *Mask =
2501
0
        Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2502
0
    llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2503
0
    TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2504
0
  }
2505
0
  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2506
0
      CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2507
2508
0
  if (!SanOpts.has(SanitizerKind::Alignment))
2509
0
    return;
2510
0
  emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2511
0
                               OffsetValue, TheCheck, Assumption);
2512
0
}
2513
2514
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2515
                                              const Expr *E,
2516
                                              SourceLocation AssumptionLoc,
2517
                                              llvm::Value *Alignment,
2518
0
                                              llvm::Value *OffsetValue) {
2519
0
  QualType Ty = E->getType();
2520
0
  SourceLocation Loc = E->getExprLoc();
2521
2522
0
  emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2523
0
                          OffsetValue);
2524
0
}
2525
2526
llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2527
                                                 llvm::Value *AnnotatedVal,
2528
                                                 StringRef AnnotationStr,
2529
                                                 SourceLocation Location,
2530
0
                                                 const AnnotateAttr *Attr) {
2531
0
  SmallVector<llvm::Value *, 5> Args = {
2532
0
      AnnotatedVal,
2533
0
      CGM.EmitAnnotationString(AnnotationStr),
2534
0
      CGM.EmitAnnotationUnit(Location),
2535
0
      CGM.EmitAnnotationLineNo(Location),
2536
0
  };
2537
0
  if (Attr)
2538
0
    Args.push_back(CGM.EmitAnnotationArgs(Attr));
2539
0
  return Builder.CreateCall(AnnotationFn, Args);
2540
0
}
2541
2542
0
void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2543
0
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2544
0
  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2545
0
    EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2546
0
                                        {V->getType(), CGM.ConstGlobalsPtrTy}),
2547
0
                       V, I->getAnnotation(), D->getLocation(), I);
2548
0
}
2549
2550
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2551
0
                                              Address Addr) {
2552
0
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2553
0
  llvm::Value *V = Addr.getPointer();
2554
0
  llvm::Type *VTy = V->getType();
2555
0
  auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2556
0
  unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2557
0
  llvm::PointerType *IntrinTy =
2558
0
      llvm::PointerType::get(CGM.getLLVMContext(), AS);
2559
0
  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2560
0
                                       {IntrinTy, CGM.ConstGlobalsPtrTy});
2561
2562
0
  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2563
    // FIXME Always emit the cast inst so we can differentiate between
2564
    // annotation on the first field of a struct and annotation on the struct
2565
    // itself.
2566
0
    if (VTy != IntrinTy)
2567
0
      V = Builder.CreateBitCast(V, IntrinTy);
2568
0
    V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2569
0
    V = Builder.CreateBitCast(V, VTy);
2570
0
  }
2571
2572
0
  return Address(V, Addr.getElementType(), Addr.getAlignment());
2573
0
}
2574
2575
0
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2576
2577
CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2578
0
    : CGF(CGF) {
2579
0
  assert(!CGF->IsSanitizerScope);
2580
0
  CGF->IsSanitizerScope = true;
2581
0
}
2582
2583
0
CodeGenFunction::SanitizerScope::~SanitizerScope() {
2584
0
  CGF->IsSanitizerScope = false;
2585
0
}
2586
2587
void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2588
                                   const llvm::Twine &Name,
2589
                                   llvm::BasicBlock *BB,
2590
0
                                   llvm::BasicBlock::iterator InsertPt) const {
2591
0
  LoopStack.InsertHelper(I);
2592
0
  if (IsSanitizerScope)
2593
0
    I->setNoSanitizeMetadata();
2594
0
}
2595
2596
void CGBuilderInserter::InsertHelper(
2597
    llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2598
0
    llvm::BasicBlock::iterator InsertPt) const {
2599
0
  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2600
0
  if (CGF)
2601
0
    CGF->InsertHelper(I, Name, BB, InsertPt);
2602
0
}
2603
2604
// Emits an error if we don't have a valid set of target features for the
2605
// called function.
2606
void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2607
0
                                          const FunctionDecl *TargetDecl) {
2608
0
  return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2609
0
}
2610
2611
// Emits an error if we don't have a valid set of target features for the
2612
// called function.
2613
void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2614
0
                                          const FunctionDecl *TargetDecl) {
2615
  // Early exit if this is an indirect call.
2616
0
  if (!TargetDecl)
2617
0
    return;
2618
2619
  // Get the current enclosing function if it exists. If it doesn't
2620
  // we can't check the target features anyhow.
2621
0
  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2622
0
  if (!FD)
2623
0
    return;
2624
2625
  // Grab the required features for the call. For a builtin this is listed in
2626
  // the td file with the default cpu, for an always_inline function this is any
2627
  // listed cpu and any listed features.
2628
0
  unsigned BuiltinID = TargetDecl->getBuiltinID();
2629
0
  std::string MissingFeature;
2630
0
  llvm::StringMap<bool> CallerFeatureMap;
2631
0
  CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2632
  // When compiling in HipStdPar mode we have to be conservative in rejecting
2633
  // target specific features in the FE, and defer the possible error to the
2634
  // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2635
  // referenced by an accelerator executable function, we emit an error.
2636
0
  bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2637
0
  if (BuiltinID) {
2638
0
    StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2639
0
    if (!Builtin::evaluateRequiredTargetFeatures(
2640
0
        FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2641
0
      CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2642
0
          << TargetDecl->getDeclName()
2643
0
          << FeatureList;
2644
0
    }
2645
0
  } else if (!TargetDecl->isMultiVersion() &&
2646
0
             TargetDecl->hasAttr<TargetAttr>()) {
2647
    // Get the required features for the callee.
2648
2649
0
    const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2650
0
    ParsedTargetAttr ParsedAttr =
2651
0
        CGM.getContext().filterFunctionTargetAttrs(TD);
2652
2653
0
    SmallVector<StringRef, 1> ReqFeatures;
2654
0
    llvm::StringMap<bool> CalleeFeatureMap;
2655
0
    CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2656
2657
0
    for (const auto &F : ParsedAttr.Features) {
2658
0
      if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2659
0
        ReqFeatures.push_back(StringRef(F).substr(1));
2660
0
    }
2661
2662
0
    for (const auto &F : CalleeFeatureMap) {
2663
      // Only positive features are "required".
2664
0
      if (F.getValue())
2665
0
        ReqFeatures.push_back(F.getKey());
2666
0
    }
2667
0
    if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2668
0
      if (!CallerFeatureMap.lookup(Feature)) {
2669
0
        MissingFeature = Feature.str();
2670
0
        return false;
2671
0
      }
2672
0
      return true;
2673
0
    }) && !IsHipStdPar)
2674
0
      CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2675
0
          << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2676
0
  } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2677
0
    llvm::StringMap<bool> CalleeFeatureMap;
2678
0
    CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2679
2680
0
    for (const auto &F : CalleeFeatureMap) {
2681
0
      if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2682
0
                           !CallerFeatureMap.find(F.getKey())->getValue()) &&
2683
0
          !IsHipStdPar)
2684
0
        CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2685
0
            << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2686
0
    }
2687
0
  }
2688
0
}
2689
2690
0
void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2691
0
  if (!CGM.getCodeGenOpts().SanitizeStats)
2692
0
    return;
2693
2694
0
  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2695
0
  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2696
0
  CGM.getSanStats().create(IRB, SSK);
2697
0
}
2698
2699
void CodeGenFunction::EmitKCFIOperandBundle(
2700
0
    const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2701
0
  const FunctionProtoType *FP =
2702
0
      Callee.getAbstractInfo().getCalleeFunctionProtoType();
2703
0
  if (FP)
2704
0
    Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
2705
0
}
2706
2707
llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
2708
0
    const MultiVersionResolverOption &RO) {
2709
0
  llvm::SmallVector<StringRef, 8> CondFeatures;
2710
0
  for (const StringRef &Feature : RO.Conditions.Features) {
2711
    // Form condition for features which are not yet enabled in target
2712
0
    if (!getContext().getTargetInfo().hasFeature(Feature))
2713
0
      CondFeatures.push_back(Feature);
2714
0
  }
2715
0
  if (!CondFeatures.empty()) {
2716
0
    return EmitAArch64CpuSupports(CondFeatures);
2717
0
  }
2718
0
  return nullptr;
2719
0
}
2720
2721
llvm::Value *CodeGenFunction::FormX86ResolverCondition(
2722
0
    const MultiVersionResolverOption &RO) {
2723
0
  llvm::Value *Condition = nullptr;
2724
2725
0
  if (!RO.Conditions.Architecture.empty()) {
2726
0
    StringRef Arch = RO.Conditions.Architecture;
2727
    // If arch= specifies an x86-64 micro-architecture level, test the feature
2728
    // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2729
0
    if (Arch.starts_with("x86-64"))
2730
0
      Condition = EmitX86CpuSupports({Arch});
2731
0
    else
2732
0
      Condition = EmitX86CpuIs(Arch);
2733
0
  }
2734
2735
0
  if (!RO.Conditions.Features.empty()) {
2736
0
    llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2737
0
    Condition =
2738
0
        Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2739
0
  }
2740
0
  return Condition;
2741
0
}
2742
2743
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2744
                                             llvm::Function *Resolver,
2745
                                             CGBuilderTy &Builder,
2746
                                             llvm::Function *FuncToReturn,
2747
0
                                             bool SupportsIFunc) {
2748
0
  if (SupportsIFunc) {
2749
0
    Builder.CreateRet(FuncToReturn);
2750
0
    return;
2751
0
  }
2752
2753
0
  llvm::SmallVector<llvm::Value *, 10> Args(
2754
0
      llvm::make_pointer_range(Resolver->args()));
2755
2756
0
  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2757
0
  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2758
2759
0
  if (Resolver->getReturnType()->isVoidTy())
2760
0
    Builder.CreateRetVoid();
2761
0
  else
2762
0
    Builder.CreateRet(Result);
2763
0
}
2764
2765
void CodeGenFunction::EmitMultiVersionResolver(
2766
0
    llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2767
2768
0
  llvm::Triple::ArchType ArchType =
2769
0
      getContext().getTargetInfo().getTriple().getArch();
2770
2771
0
  switch (ArchType) {
2772
0
  case llvm::Triple::x86:
2773
0
  case llvm::Triple::x86_64:
2774
0
    EmitX86MultiVersionResolver(Resolver, Options);
2775
0
    return;
2776
0
  case llvm::Triple::aarch64:
2777
0
    EmitAArch64MultiVersionResolver(Resolver, Options);
2778
0
    return;
2779
2780
0
  default:
2781
0
    assert(false && "Only implemented for x86 and AArch64 targets");
2782
0
  }
2783
0
}
2784
2785
void CodeGenFunction::EmitAArch64MultiVersionResolver(
2786
0
    llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2787
0
  assert(!Options.empty() && "No multiversion resolver options found");
2788
0
  assert(Options.back().Conditions.Features.size() == 0 &&
2789
0
         "Default case must be last");
2790
0
  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2791
0
  assert(SupportsIFunc &&
2792
0
         "Multiversion resolver requires target IFUNC support");
2793
0
  bool AArch64CpuInitialized = false;
2794
0
  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2795
2796
0
  for (const MultiVersionResolverOption &RO : Options) {
2797
0
    Builder.SetInsertPoint(CurBlock);
2798
0
    llvm::Value *Condition = FormAArch64ResolverCondition(RO);
2799
2800
    // The 'default' or 'all features enabled' case.
2801
0
    if (!Condition) {
2802
0
      CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2803
0
                                       SupportsIFunc);
2804
0
      return;
2805
0
    }
2806
2807
0
    if (!AArch64CpuInitialized) {
2808
0
      Builder.SetInsertPoint(CurBlock, CurBlock->begin());
2809
0
      EmitAArch64CpuInit();
2810
0
      AArch64CpuInitialized = true;
2811
0
      Builder.SetInsertPoint(CurBlock);
2812
0
    }
2813
2814
0
    llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2815
0
    CGBuilderTy RetBuilder(*this, RetBlock);
2816
0
    CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2817
0
                                     SupportsIFunc);
2818
0
    CurBlock = createBasicBlock("resolver_else", Resolver);
2819
0
    Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2820
0
  }
2821
2822
  // If no default, emit an unreachable.
2823
0
  Builder.SetInsertPoint(CurBlock);
2824
0
  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2825
0
  TrapCall->setDoesNotReturn();
2826
0
  TrapCall->setDoesNotThrow();
2827
0
  Builder.CreateUnreachable();
2828
0
  Builder.ClearInsertionPoint();
2829
0
}
2830
2831
void CodeGenFunction::EmitX86MultiVersionResolver(
2832
0
    llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2833
2834
0
  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2835
2836
  // Main function's basic block.
2837
0
  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2838
0
  Builder.SetInsertPoint(CurBlock);
2839
0
  EmitX86CpuInit();
2840
2841
0
  for (const MultiVersionResolverOption &RO : Options) {
2842
0
    Builder.SetInsertPoint(CurBlock);
2843
0
    llvm::Value *Condition = FormX86ResolverCondition(RO);
2844
2845
    // The 'default' or 'generic' case.
2846
0
    if (!Condition) {
2847
0
      assert(&RO == Options.end() - 1 &&
2848
0
             "Default or Generic case must be last");
2849
0
      CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2850
0
                                       SupportsIFunc);
2851
0
      return;
2852
0
    }
2853
2854
0
    llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2855
0
    CGBuilderTy RetBuilder(*this, RetBlock);
2856
0
    CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2857
0
                                     SupportsIFunc);
2858
0
    CurBlock = createBasicBlock("resolver_else", Resolver);
2859
0
    Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2860
0
  }
2861
2862
  // If no generic/default, emit an unreachable.
2863
0
  Builder.SetInsertPoint(CurBlock);
2864
0
  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2865
0
  TrapCall->setDoesNotReturn();
2866
0
  TrapCall->setDoesNotThrow();
2867
0
  Builder.CreateUnreachable();
2868
0
  Builder.ClearInsertionPoint();
2869
0
}
2870
2871
// Loc - where the diagnostic will point, where in the source code this
2872
//  alignment has failed.
2873
// SecondaryLoc - if present (will be present if sufficiently different from
2874
//  Loc), the diagnostic will additionally point a "Note:" to this location.
2875
//  It should be the location where the __attribute__((assume_aligned))
2876
//  was written e.g.
2877
void CodeGenFunction::emitAlignmentAssumptionCheck(
2878
    llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2879
    SourceLocation SecondaryLoc, llvm::Value *Alignment,
2880
    llvm::Value *OffsetValue, llvm::Value *TheCheck,
2881
0
    llvm::Instruction *Assumption) {
2882
0
  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2883
0
         cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2884
0
             llvm::Intrinsic::getDeclaration(
2885
0
                 Builder.GetInsertBlock()->getParent()->getParent(),
2886
0
                 llvm::Intrinsic::assume) &&
2887
0
         "Assumption should be a call to llvm.assume().");
2888
0
  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2889
0
         "Assumption should be the last instruction of the basic block, "
2890
0
         "since the basic block is still being generated.");
2891
2892
0
  if (!SanOpts.has(SanitizerKind::Alignment))
2893
0
    return;
2894
2895
  // Don't check pointers to volatile data. The behavior here is implementation-
2896
  // defined.
2897
0
  if (Ty->getPointeeType().isVolatileQualified())
2898
0
    return;
2899
2900
  // We need to temorairly remove the assumption so we can insert the
2901
  // sanitizer check before it, else the check will be dropped by optimizations.
2902
0
  Assumption->removeFromParent();
2903
2904
0
  {
2905
0
    SanitizerScope SanScope(this);
2906
2907
0
    if (!OffsetValue)
2908
0
      OffsetValue = Builder.getInt1(false); // no offset.
2909
2910
0
    llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2911
0
                                    EmitCheckSourceLocation(SecondaryLoc),
2912
0
                                    EmitCheckTypeDescriptor(Ty)};
2913
0
    llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2914
0
                                  EmitCheckValue(Alignment),
2915
0
                                  EmitCheckValue(OffsetValue)};
2916
0
    EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2917
0
              SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2918
0
  }
2919
2920
  // We are now in the (new, empty) "cont" basic block.
2921
  // Reintroduce the assumption.
2922
0
  Builder.Insert(Assumption);
2923
  // FIXME: Assumption still has it's original basic block as it's Parent.
2924
0
}
2925
2926
0
llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2927
0
  if (CGDebugInfo *DI = getDebugInfo())
2928
0
    return DI->SourceLocToDebugLoc(Location);
2929
2930
0
  return llvm::DebugLoc();
2931
0
}
2932
2933
llvm::Value *
2934
CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2935
0
                                                      Stmt::Likelihood LH) {
2936
0
  switch (LH) {
2937
0
  case Stmt::LH_None:
2938
0
    return Cond;
2939
0
  case Stmt::LH_Likely:
2940
0
  case Stmt::LH_Unlikely:
2941
    // Don't generate llvm.expect on -O0 as the backend won't use it for
2942
    // anything.
2943
0
    if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2944
0
      return Cond;
2945
0
    llvm::Type *CondTy = Cond->getType();
2946
0
    assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2947
0
    llvm::Function *FnExpect =
2948
0
        CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2949
0
    llvm::Value *ExpectedValueOfCond =
2950
0
        llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2951
0
    return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2952
0
                              Cond->getName() + ".expval");
2953
0
  }
2954
0
  llvm_unreachable("Unknown Likelihood");
2955
0
}
2956
2957
llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
2958
                                                    unsigned NumElementsDst,
2959
0
                                                    const llvm::Twine &Name) {
2960
0
  auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
2961
0
  unsigned NumElementsSrc = SrcTy->getNumElements();
2962
0
  if (NumElementsSrc == NumElementsDst)
2963
0
    return SrcVec;
2964
2965
0
  std::vector<int> ShuffleMask(NumElementsDst, -1);
2966
0
  for (unsigned MaskIdx = 0;
2967
0
       MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
2968
0
    ShuffleMask[MaskIdx] = MaskIdx;
2969
2970
0
  return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
2971
0
}