Coverage Report

Created: 2024-01-17 10:31

/src/llvm-project/llvm/lib/IR/Instructions.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements all of the non-inline methods for the LLVM instruction
10
// classes.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "llvm/IR/Instructions.h"
15
#include "LLVMContextImpl.h"
16
#include "llvm/ADT/SmallBitVector.h"
17
#include "llvm/ADT/SmallVector.h"
18
#include "llvm/ADT/Twine.h"
19
#include "llvm/IR/Attributes.h"
20
#include "llvm/IR/BasicBlock.h"
21
#include "llvm/IR/Constant.h"
22
#include "llvm/IR/Constants.h"
23
#include "llvm/IR/DataLayout.h"
24
#include "llvm/IR/DerivedTypes.h"
25
#include "llvm/IR/Function.h"
26
#include "llvm/IR/InstrTypes.h"
27
#include "llvm/IR/Instruction.h"
28
#include "llvm/IR/Intrinsics.h"
29
#include "llvm/IR/LLVMContext.h"
30
#include "llvm/IR/MDBuilder.h"
31
#include "llvm/IR/Metadata.h"
32
#include "llvm/IR/Module.h"
33
#include "llvm/IR/Operator.h"
34
#include "llvm/IR/ProfDataUtils.h"
35
#include "llvm/IR/Type.h"
36
#include "llvm/IR/Value.h"
37
#include "llvm/Support/AtomicOrdering.h"
38
#include "llvm/Support/Casting.h"
39
#include "llvm/Support/ErrorHandling.h"
40
#include "llvm/Support/MathExtras.h"
41
#include "llvm/Support/ModRef.h"
42
#include "llvm/Support/TypeSize.h"
43
#include <algorithm>
44
#include <cassert>
45
#include <cstdint>
46
#include <optional>
47
#include <vector>
48
49
using namespace llvm;
50
51
static cl::opt<bool> DisableI2pP2iOpt(
52
    "disable-i2p-p2i-opt", cl::init(false),
53
    cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
54
55
//===----------------------------------------------------------------------===//
56
//                            AllocaInst Class
57
//===----------------------------------------------------------------------===//
58
59
std::optional<TypeSize>
60
0
AllocaInst::getAllocationSize(const DataLayout &DL) const {
61
0
  TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
62
0
  if (isArrayAllocation()) {
63
0
    auto *C = dyn_cast<ConstantInt>(getArraySize());
64
0
    if (!C)
65
0
      return std::nullopt;
66
0
    assert(!Size.isScalable() && "Array elements cannot have a scalable size");
67
0
    Size *= C->getZExtValue();
68
0
  }
69
0
  return Size;
70
0
}
71
72
std::optional<TypeSize>
73
0
AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
74
0
  std::optional<TypeSize> Size = getAllocationSize(DL);
75
0
  if (Size)
76
0
    return *Size * 8;
77
0
  return std::nullopt;
78
0
}
79
80
//===----------------------------------------------------------------------===//
81
//                              SelectInst Class
82
//===----------------------------------------------------------------------===//
83
84
/// areInvalidOperands - Return a string if the specified operands are invalid
85
/// for a select operation, otherwise return null.
86
243k
const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
87
243k
  if (Op1->getType() != Op2->getType())
88
1
    return "both values to select must have same type";
89
90
243k
  if (Op1->getType()->isTokenTy())
91
0
    return "select values cannot have token type";
92
93
243k
  if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
94
    // Vector select.
95
72.2k
    if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
96
0
      return "vector select condition element type must be i1";
97
72.2k
    VectorType *ET = dyn_cast<VectorType>(Op1->getType());
98
72.2k
    if (!ET)
99
0
      return "selected values for vector select must be vectors";
100
72.2k
    if (ET->getElementCount() != VT->getElementCount())
101
0
      return "vector select requires selected vectors to have "
102
0
                   "the same vector length as select condition";
103
171k
  } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
104
1
    return "select condition must be i1 or <n x i1>";
105
1
  }
106
243k
  return nullptr;
107
243k
}
108
109
//===----------------------------------------------------------------------===//
110
//                               PHINode Class
111
//===----------------------------------------------------------------------===//
112
113
PHINode::PHINode(const PHINode &PN)
114
    : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
115
21.7k
      ReservedSpace(PN.getNumOperands()) {
116
21.7k
  allocHungoffUses(PN.getNumOperands());
117
21.7k
  std::copy(PN.op_begin(), PN.op_end(), op_begin());
118
21.7k
  copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
119
21.7k
  SubclassOptionalData = PN.SubclassOptionalData;
120
21.7k
}
121
122
// removeIncomingValue - Remove an incoming value.  This is useful if a
123
// predecessor basic block is deleted.
124
96.3k
Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
125
96.3k
  Value *Removed = getIncomingValue(Idx);
126
127
  // Move everything after this operand down.
128
  //
129
  // FIXME: we could just swap with the end of the list, then erase.  However,
130
  // clients might not expect this to happen.  The code as it is thrashes the
131
  // use/def lists, which is kinda lame.
132
96.3k
  std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
133
96.3k
  copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);
134
135
  // Nuke the last value.
136
96.3k
  Op<-1>().set(nullptr);
137
96.3k
  setNumHungOffUseOperands(getNumOperands() - 1);
138
139
  // If the PHI node is dead, because it has zero entries, nuke it now.
140
96.3k
  if (getNumOperands() == 0 && DeletePHIIfEmpty) {
141
    // If anyone is using this PHI, make them use a dummy value instead...
142
117
    replaceAllUsesWith(PoisonValue::get(getType()));
143
117
    eraseFromParent();
144
117
  }
145
96.3k
  return Removed;
146
96.3k
}
147
148
void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
149
75.9k
                                    bool DeletePHIIfEmpty) {
150
75.9k
  SmallDenseSet<unsigned> RemoveIndices;
151
327k
  for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
152
251k
    if (Predicate(Idx))
153
127k
      RemoveIndices.insert(Idx);
154
155
75.9k
  if (RemoveIndices.empty())
156
5
    return;
157
158
  // Remove operands.
159
251k
  auto NewOpEnd = remove_if(operands(), [&](Use &U) {
160
251k
    return RemoveIndices.contains(U.getOperandNo());
161
251k
  });
162
75.8k
  for (Use &U : make_range(NewOpEnd, op_end()))
163
127k
    U.set(nullptr);
164
165
  // Remove incoming blocks.
166
75.8k
  (void)std::remove_if(const_cast<block_iterator>(block_begin()),
167
251k
                 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
168
251k
                   return RemoveIndices.contains(&BB - block_begin());
169
251k
                 });
170
171
75.8k
  setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
172
173
  // If the PHI node is dead, because it has zero entries, nuke it now.
174
75.8k
  if (getNumOperands() == 0 && DeletePHIIfEmpty) {
175
    // If anyone is using this PHI, make them use a dummy value instead...
176
0
    replaceAllUsesWith(PoisonValue::get(getType()));
177
0
    eraseFromParent();
178
0
  }
179
75.8k
}
180
181
/// growOperands - grow operands - This grows the operand list in response
182
/// to a push_back style of operation.  This grows the number of ops by 1.5
183
/// times.
184
///
185
11.9k
void PHINode::growOperands() {
186
11.9k
  unsigned e = getNumOperands();
187
11.9k
  unsigned NumOps = e + e / 2;
188
11.9k
  if (NumOps < 2) NumOps = 2;      // 2 op PHI nodes are VERY common.
189
190
11.9k
  ReservedSpace = NumOps;
191
11.9k
  growHungoffUses(ReservedSpace, /* IsPhi */ true);
192
11.9k
}
193
194
/// hasConstantValue - If the specified PHI node always merges together the same
195
/// value, return the value, otherwise return null.
196
356k
Value *PHINode::hasConstantValue() const {
197
  // Exploit the fact that phi nodes always have at least one entry.
198
356k
  Value *ConstantValue = getIncomingValue(0);
199
408k
  for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
200
285k
    if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
201
270k
      if (ConstantValue != this)
202
234k
        return nullptr; // Incoming values not all the same.
203
       // The case where the first value is this PHI.
204
36.7k
      ConstantValue = getIncomingValue(i);
205
36.7k
    }
206
122k
  if (ConstantValue == this)
207
28
    return UndefValue::get(getType());
208
122k
  return ConstantValue;
209
122k
}
210
211
/// hasConstantOrUndefValue - Whether the specified PHI node always merges
212
/// together the same value, assuming that undefs result in the same value as
213
/// non-undefs.
214
/// Unlike \ref hasConstantValue, this does not return a value because the
215
/// unique non-undef incoming value need not dominate the PHI node.
216
0
bool PHINode::hasConstantOrUndefValue() const {
217
0
  Value *ConstantValue = nullptr;
218
0
  for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
219
0
    Value *Incoming = getIncomingValue(i);
220
0
    if (Incoming != this && !isa<UndefValue>(Incoming)) {
221
0
      if (ConstantValue && ConstantValue != Incoming)
222
0
        return false;
223
0
      ConstantValue = Incoming;
224
0
    }
225
0
  }
226
0
  return true;
227
0
}
228
229
//===----------------------------------------------------------------------===//
230
//                       LandingPadInst Implementation
231
//===----------------------------------------------------------------------===//
232
233
LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
234
                               const Twine &NameStr, Instruction *InsertBefore)
235
2.91k
    : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
236
2.91k
  init(NumReservedValues, NameStr);
237
2.91k
}
238
239
LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
240
                               const Twine &NameStr, BasicBlock *InsertAtEnd)
241
0
    : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
242
0
  init(NumReservedValues, NameStr);
243
0
}
244
245
LandingPadInst::LandingPadInst(const LandingPadInst &LP)
246
    : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
247
                  LP.getNumOperands()),
248
0
      ReservedSpace(LP.getNumOperands()) {
249
0
  allocHungoffUses(LP.getNumOperands());
250
0
  Use *OL = getOperandList();
251
0
  const Use *InOL = LP.getOperandList();
252
0
  for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
253
0
    OL[I] = InOL[I];
254
255
0
  setCleanup(LP.isCleanup());
256
0
}
257
258
LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
259
                                       const Twine &NameStr,
260
2.91k
                                       Instruction *InsertBefore) {
261
2.91k
  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
262
2.91k
}
263
264
LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
265
                                       const Twine &NameStr,
266
0
                                       BasicBlock *InsertAtEnd) {
267
0
  return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
268
0
}
269
270
2.91k
void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
271
2.91k
  ReservedSpace = NumReservedValues;
272
2.91k
  setNumHungOffUseOperands(0);
273
2.91k
  allocHungoffUses(ReservedSpace);
274
2.91k
  setName(NameStr);
275
2.91k
  setCleanup(false);
276
2.91k
}
277
278
/// growOperands - grow operands - This grows the operand list in response to a
279
/// push_back style of operation. This grows the number of ops by 2 times.
280
2.07k
void LandingPadInst::growOperands(unsigned Size) {
281
2.07k
  unsigned e = getNumOperands();
282
2.07k
  if (ReservedSpace >= e + Size) return;
283
0
  ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
284
0
  growHungoffUses(ReservedSpace);
285
0
}
286
287
2.07k
void LandingPadInst::addClause(Constant *Val) {
288
2.07k
  unsigned OpNo = getNumOperands();
289
2.07k
  growOperands(1);
290
2.07k
  assert(OpNo < ReservedSpace && "Growing didn't work!");
291
0
  setNumHungOffUseOperands(getNumOperands() + 1);
292
2.07k
  getOperandList()[OpNo] = Val;
293
2.07k
}
294
295
//===----------------------------------------------------------------------===//
296
//                        CallBase Implementation
297
//===----------------------------------------------------------------------===//
298
299
CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
300
0
                           Instruction *InsertPt) {
301
0
  switch (CB->getOpcode()) {
302
0
  case Instruction::Call:
303
0
    return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
304
0
  case Instruction::Invoke:
305
0
    return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
306
0
  case Instruction::CallBr:
307
0
    return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
308
0
  default:
309
0
    llvm_unreachable("Unknown CallBase sub-class!");
310
0
  }
311
0
}
312
313
CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
314
0
                           Instruction *InsertPt) {
315
0
  SmallVector<OperandBundleDef, 2> OpDefs;
316
0
  for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
317
0
    auto ChildOB = CI->getOperandBundleAt(i);
318
0
    if (ChildOB.getTagName() != OpB.getTag())
319
0
      OpDefs.emplace_back(ChildOB);
320
0
  }
321
0
  OpDefs.emplace_back(OpB);
322
0
  return CallBase::Create(CI, OpDefs, InsertPt);
323
0
}
324
325
326
75.3k
Function *CallBase::getCaller() { return getParent()->getParent(); }
327
328
345
unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
329
345
  assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
330
0
  return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
331
345
}
332
333
241k
bool CallBase::isIndirectCall() const {
334
241k
  const Value *V = getCalledOperand();
335
241k
  if (isa<Function>(V) || isa<Constant>(V))
336
238k
    return false;
337
2.97k
  return !isInlineAsm();
338
241k
}
339
340
/// Tests if this call site must be tail call optimized. Only a CallInst can
341
/// be tail call optimized.
342
151k
bool CallBase::isMustTailCall() const {
343
151k
  if (auto *CI = dyn_cast<CallInst>(this))
344
151k
    return CI->isMustTailCall();
345
461
  return false;
346
151k
}
347
348
/// Tests if this call site is marked as a tail call.
349
1.93k
bool CallBase::isTailCall() const {
350
1.93k
  if (auto *CI = dyn_cast<CallInst>(this))
351
1.92k
    return CI->isTailCall();
352
8
  return false;
353
1.93k
}
354
355
2.25M
Intrinsic::ID CallBase::getIntrinsicID() const {
356
2.25M
  if (auto *F = getCalledFunction())
357
2.22M
    return F->getIntrinsicID();
358
33.0k
  return Intrinsic::not_intrinsic;
359
2.25M
}
360
361
2.54k
FPClassTest CallBase::getRetNoFPClass() const {
362
2.54k
  FPClassTest Mask = Attrs.getRetNoFPClass();
363
364
2.54k
  if (const Function *F = getCalledFunction())
365
2.42k
    Mask |= F->getAttributes().getRetNoFPClass();
366
2.54k
  return Mask;
367
2.54k
}
368
369
0
FPClassTest CallBase::getParamNoFPClass(unsigned i) const {
370
0
  FPClassTest Mask = Attrs.getParamNoFPClass(i);
371
372
0
  if (const Function *F = getCalledFunction())
373
0
    Mask |= F->getAttributes().getParamNoFPClass(i);
374
0
  return Mask;
375
0
}
376
377
911
bool CallBase::isReturnNonNull() const {
378
911
  if (hasRetAttr(Attribute::NonNull))
379
147
    return true;
380
381
764
  if (getRetDereferenceableBytes() > 0 &&
382
764
      !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
383
0
    return true;
384
385
764
  return false;
386
764
}
387
388
1.51M
Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
389
1.51M
  unsigned Index;
390
391
1.51M
  if (Attrs.hasAttrSomewhere(Kind, &Index))
392
0
    return getArgOperand(Index - AttributeList::FirstArgIndex);
393
1.51M
  if (const Function *F = getCalledFunction())
394
1.50M
    if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
395
1.71k
      return getArgOperand(Index - AttributeList::FirstArgIndex);
396
397
1.51M
  return nullptr;
398
1.51M
}
399
400
/// Determine whether the argument or parameter has the given attribute.
401
5.02M
bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
402
5.02M
  assert(ArgNo < arg_size() && "Param index out of bounds!");
403
404
5.02M
  if (Attrs.hasParamAttr(ArgNo, Kind))
405
17.0k
    return true;
406
407
5.00M
  const Function *F = getCalledFunction();
408
5.00M
  if (!F)
409
62.8k
    return false;
410
411
4.94M
  if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
412
4.74M
    return false;
413
414
  // Take into account mod/ref by operand bundles.
415
206k
  switch (Kind) {
416
181
  case Attribute::ReadNone:
417
181
    return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
418
520
  case Attribute::ReadOnly:
419
520
    return !hasClobberingOperandBundles();
420
588
  case Attribute::WriteOnly:
421
588
    return !hasReadingOperandBundles();
422
204k
  default:
423
204k
    return true;
424
206k
  }
425
206k
}
426
427
3.76M
bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
428
3.76M
  Value *V = getCalledOperand();
429
3.76M
  if (auto *CE = dyn_cast<ConstantExpr>(V))
430
130
    if (CE->getOpcode() == BitCast)
431
0
      V = CE->getOperand(0);
432
433
3.76M
  if (auto *F = dyn_cast<Function>(V))
434
3.72M
    return F->getAttributes().hasFnAttr(Kind);
435
436
48.1k
  return false;
437
3.76M
}
438
439
6.00k
bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
440
6.00k
  Value *V = getCalledOperand();
441
6.00k
  if (auto *CE = dyn_cast<ConstantExpr>(V))
442
0
    if (CE->getOpcode() == BitCast)
443
0
      V = CE->getOperand(0);
444
445
6.00k
  if (auto *F = dyn_cast<Function>(V))
446
5.99k
    return F->getAttributes().hasFnAttr(Kind);
447
448
13
  return false;
449
6.00k
}
450
451
template <typename AK>
452
205k
Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
453
205k
  if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
454
    // getMemoryEffects() correctly combines memory effects from the call-site,
455
    // operand bundles and function.
456
195k
    assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
457
195k
  }
458
459
0
  Value *V = getCalledOperand();
460
205k
  if (auto *CE = dyn_cast<ConstantExpr>(V))
461
25
    if (CE->getOpcode() == BitCast)
462
0
      V = CE->getOperand(0);
463
464
205k
  if (auto *F = dyn_cast<Function>(V))
465
202k
    return F->getAttributes().getFnAttr(Kind);
466
467
3.34k
  return Attribute();
468
205k
}
llvm::Attribute llvm::CallBase::getFnAttrOnCalledFunction<llvm::Attribute::AttrKind>(llvm::Attribute::AttrKind) const
Line
Count
Source
452
195k
Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
453
195k
  if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
454
    // getMemoryEffects() correctly combines memory effects from the call-site,
455
    // operand bundles and function.
456
195k
    assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
457
195k
  }
458
459
0
  Value *V = getCalledOperand();
460
195k
  if (auto *CE = dyn_cast<ConstantExpr>(V))
461
25
    if (CE->getOpcode() == BitCast)
462
0
      V = CE->getOperand(0);
463
464
195k
  if (auto *F = dyn_cast<Function>(V))
465
192k
    return F->getAttributes().getFnAttr(Kind);
466
467
3.34k
  return Attribute();
468
195k
}
llvm::Attribute llvm::CallBase::getFnAttrOnCalledFunction<llvm::StringRef>(llvm::StringRef) const
Line
Count
Source
452
9.64k
Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
453
9.64k
  if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
454
    // getMemoryEffects() correctly combines memory effects from the call-site,
455
    // operand bundles and function.
456
9.64k
    assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
457
9.64k
  }
458
459
9.64k
  Value *V = getCalledOperand();
460
9.64k
  if (auto *CE = dyn_cast<ConstantExpr>(V))
461
0
    if (CE->getOpcode() == BitCast)
462
0
      V = CE->getOperand(0);
463
464
9.64k
  if (auto *F = dyn_cast<Function>(V))
465
9.64k
    return F->getAttributes().getFnAttr(Kind);
466
467
0
  return Attribute();
468
9.64k
}
469
470
template Attribute
471
CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
472
template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
473
474
void CallBase::getOperandBundlesAsDefs(
475
134k
    SmallVectorImpl<OperandBundleDef> &Defs) const {
476
135k
  for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
477
977
    Defs.emplace_back(getOperandBundleAt(i));
478
134k
}
479
480
CallBase::op_iterator
481
CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
482
375k
                                     const unsigned BeginIndex) {
483
375k
  auto It = op_begin() + BeginIndex;
484
375k
  for (auto &B : Bundles)
485
44.2k
    It = std::copy(B.input_begin(), B.input_end(), It);
486
487
375k
  auto *ContextImpl = getContext().pImpl;
488
375k
  auto BI = Bundles.begin();
489
375k
  unsigned CurrentIndex = BeginIndex;
490
491
375k
  for (auto &BOI : bundle_op_infos()) {
492
44.2k
    assert(BI != Bundles.end() && "Incorrect allocation?");
493
494
0
    BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
495
44.2k
    BOI.Begin = CurrentIndex;
496
44.2k
    BOI.End = CurrentIndex + BI->input_size();
497
44.2k
    CurrentIndex = BOI.End;
498
44.2k
    BI++;
499
44.2k
  }
500
501
375k
  assert(BI == Bundles.end() && "Incorrect allocation?");
502
503
0
  return It;
504
375k
}
505
506
85
CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
507
  /// When there isn't many bundles, we do a simple linear search.
508
  /// Else fallback to a binary-search that use the fact that bundles usually
509
  /// have similar number of argument to get faster convergence.
510
85
  if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
511
85
    for (auto &BOI : bundle_op_infos())
512
99
      if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
513
85
        return BOI;
514
515
0
    llvm_unreachable("Did not find operand bundle for operand!");
516
0
  }
517
518
0
  assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
519
0
  assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
520
0
         OpIdx < std::prev(bundle_op_info_end())->End &&
521
0
         "The Idx isn't in the operand bundle");
522
523
  /// We need a decimal number below and to prevent using floating point numbers
524
  /// we use an intergal value multiplied by this constant.
525
0
  constexpr unsigned NumberScaling = 1024;
526
527
0
  bundle_op_iterator Begin = bundle_op_info_begin();
528
0
  bundle_op_iterator End = bundle_op_info_end();
529
0
  bundle_op_iterator Current = Begin;
530
531
0
  while (Begin != End) {
532
0
    unsigned ScaledOperandPerBundle =
533
0
        NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
534
0
    Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
535
0
                       ScaledOperandPerBundle);
536
0
    if (Current >= End)
537
0
      Current = std::prev(End);
538
0
    assert(Current < End && Current >= Begin &&
539
0
           "the operand bundle doesn't cover every value in the range");
540
0
    if (OpIdx >= Current->Begin && OpIdx < Current->End)
541
0
      break;
542
0
    if (OpIdx >= Current->End)
543
0
      Begin = Current + 1;
544
0
    else
545
0
      End = Current;
546
0
  }
547
548
0
  assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
549
0
         "the operand bundle doesn't cover every value in the range");
550
0
  return *Current;
551
85
}
552
553
CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
554
                                     OperandBundleDef OB,
555
0
                                     Instruction *InsertPt) {
556
0
  if (CB->getOperandBundle(ID))
557
0
    return CB;
558
559
0
  SmallVector<OperandBundleDef, 1> Bundles;
560
0
  CB->getOperandBundlesAsDefs(Bundles);
561
0
  Bundles.push_back(OB);
562
0
  return Create(CB, Bundles, InsertPt);
563
0
}
564
565
CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
566
0
                                        Instruction *InsertPt) {
567
0
  SmallVector<OperandBundleDef, 1> Bundles;
568
0
  bool CreateNew = false;
569
570
0
  for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
571
0
    auto Bundle = CB->getOperandBundleAt(I);
572
0
    if (Bundle.getTagID() == ID) {
573
0
      CreateNew = true;
574
0
      continue;
575
0
    }
576
0
    Bundles.emplace_back(Bundle);
577
0
  }
578
579
0
  return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
580
0
}
581
582
57.0k
bool CallBase::hasReadingOperandBundles() const {
583
  // Implementation note: this is a conservative implementation of operand
584
  // bundle semantics, where *any* non-assume operand bundle (other than
585
  // ptrauth) forces a callsite to be at least readonly.
586
57.0k
  return hasOperandBundlesOtherThan(
587
57.0k
             {LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
588
57.0k
         getIntrinsicID() != Intrinsic::assume;
589
57.0k
}
590
591
57.0k
bool CallBase::hasClobberingOperandBundles() const {
592
57.0k
  return hasOperandBundlesOtherThan(
593
57.0k
             {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
594
57.0k
              LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
595
57.0k
         getIntrinsicID() != Intrinsic::assume;
596
57.0k
}
597
598
236k
MemoryEffects CallBase::getMemoryEffects() const {
599
236k
  MemoryEffects ME = getAttributes().getMemoryEffects();
600
236k
  if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
601
231k
    MemoryEffects FnME = Fn->getMemoryEffects();
602
231k
    if (hasOperandBundles()) {
603
      // TODO: Add a method to get memory effects for operand bundles instead.
604
5.26k
      if (hasReadingOperandBundles())
605
4.94k
        FnME |= MemoryEffects::readOnly();
606
5.26k
      if (hasClobberingOperandBundles())
607
1.43k
        FnME |= MemoryEffects::writeOnly();
608
5.26k
    }
609
231k
    ME &= FnME;
610
231k
  }
611
236k
  return ME;
612
236k
}
613
53
void CallBase::setMemoryEffects(MemoryEffects ME) {
614
53
  addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME));
615
53
}
616
617
/// Determine if the function does not access memory.
618
34.8k
bool CallBase::doesNotAccessMemory() const {
619
34.8k
  return getMemoryEffects().doesNotAccessMemory();
620
34.8k
}
621
53
void CallBase::setDoesNotAccessMemory() {
622
53
  setMemoryEffects(MemoryEffects::none());
623
53
}
624
625
/// Determine if the function does not access or only reads memory.
626
147k
bool CallBase::onlyReadsMemory() const {
627
147k
  return getMemoryEffects().onlyReadsMemory();
628
147k
}
629
0
void CallBase::setOnlyReadsMemory() {
630
0
  setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
631
0
}
632
633
/// Determine if the function does not access or only writes memory.
634
53.8k
bool CallBase::onlyWritesMemory() const {
635
53.8k
  return getMemoryEffects().onlyWritesMemory();
636
53.8k
}
637
0
void CallBase::setOnlyWritesMemory() {
638
0
  setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
639
0
}
640
641
/// Determine if the call can access memmory only using pointers based
642
/// on its arguments.
643
496
bool CallBase::onlyAccessesArgMemory() const {
644
496
  return getMemoryEffects().onlyAccessesArgPointees();
645
496
}
646
0
void CallBase::setOnlyAccessesArgMemory() {
647
0
  setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
648
0
}
649
650
/// Determine if the function may only access memory that is
651
///  inaccessible from the IR.
652
208
bool CallBase::onlyAccessesInaccessibleMemory() const {
653
208
  return getMemoryEffects().onlyAccessesInaccessibleMem();
654
208
}
655
0
void CallBase::setOnlyAccessesInaccessibleMemory() {
656
0
  setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
657
0
}
658
659
/// Determine if the function may only access memory that is
660
///  either inaccessible from the IR or pointed to by its arguments.
661
0
bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
662
0
  return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
663
0
}
664
0
void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
665
0
  setMemoryEffects(getMemoryEffects() &
666
0
                   MemoryEffects::inaccessibleOrArgMemOnly());
667
0
}
668
669
//===----------------------------------------------------------------------===//
670
//                        CallInst Implementation
671
//===----------------------------------------------------------------------===//
672
673
void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
674
370k
                    ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
675
370k
  this->FTy = FTy;
676
370k
  assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
677
370k
         "NumOperands not set up?");
678
679
0
#ifndef NDEBUG
680
0
  assert((Args.size() == FTy->getNumParams() ||
681
370k
          (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
682
370k
         "Calling a function with bad signature!");
683
684
1.14M
  for (unsigned i = 0; i != Args.size(); ++i)
685
774k
    assert((i >= FTy->getNumParams() ||
686
370k
            FTy->getParamType(i) == Args[i]->getType()) &&
687
370k
           "Calling a function with a bad signature!");
688
370k
#endif
689
690
  // Set operands in order of their index to match use-list-order
691
  // prediction.
692
370k
  llvm::copy(Args, op_begin());
693
370k
  setCalledOperand(Func);
694
695
370k
  auto It = populateBundleOperandInfos(Bundles, Args.size());
696
370k
  (void)It;
697
370k
  assert(It + 1 == op_end() && "Should add up!");
698
699
0
  setName(NameStr);
700
370k
}
701
702
0
void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
703
0
  this->FTy = FTy;
704
0
  assert(getNumOperands() == 1 && "NumOperands not set up?");
705
0
  setCalledOperand(Func);
706
707
0
  assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
708
709
0
  setName(NameStr);
710
0
}
711
712
CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
713
                   Instruction *InsertBefore)
714
    : CallBase(Ty->getReturnType(), Instruction::Call,
715
0
               OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
716
0
  init(Ty, Func, Name);
717
0
}
718
719
CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
720
                   BasicBlock *InsertAtEnd)
721
    : CallBase(Ty->getReturnType(), Instruction::Call,
722
0
               OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
723
0
  init(Ty, Func, Name);
724
0
}
725
726
CallInst::CallInst(const CallInst &CI)
727
    : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
728
               OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
729
3.45k
               CI.getNumOperands()) {
730
3.45k
  setTailCallKind(CI.getTailCallKind());
731
3.45k
  setCallingConv(CI.getCallingConv());
732
733
3.45k
  std::copy(CI.op_begin(), CI.op_end(), op_begin());
734
3.45k
  std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
735
3.45k
            bundle_op_info_begin());
736
3.45k
  SubclassOptionalData = CI.SubclassOptionalData;
737
3.45k
}
738
739
CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
740
18
                           Instruction *InsertPt) {
741
18
  std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
742
743
18
  auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
744
18
                                 Args, OpB, CI->getName(), InsertPt);
745
18
  NewCI->setTailCallKind(CI->getTailCallKind());
746
18
  NewCI->setCallingConv(CI->getCallingConv());
747
18
  NewCI->SubclassOptionalData = CI->SubclassOptionalData;
748
18
  NewCI->setAttributes(CI->getAttributes());
749
18
  NewCI->setDebugLoc(CI->getDebugLoc());
750
18
  return NewCI;
751
18
}
752
753
// Update profile weight for call instruction by scaling it using the ratio
754
// of S/T. The meaning of "branch_weights" meta data for call instruction is
755
// transfered to represent call count.
756
0
void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
757
0
  auto *ProfileData = getMetadata(LLVMContext::MD_prof);
758
0
  if (ProfileData == nullptr)
759
0
    return;
760
761
0
  auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
762
0
  if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
763
0
                        !ProfDataName->getString().equals("VP")))
764
0
    return;
765
766
0
  if (T == 0) {
767
0
    LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
768
0
                         "div by 0. Ignoring. Likely the function "
769
0
                      << getParent()->getParent()->getName()
770
0
                      << " has 0 entry count, and contains call instructions "
771
0
                         "with non-zero prof info.");
772
0
    return;
773
0
  }
774
775
0
  MDBuilder MDB(getContext());
776
0
  SmallVector<Metadata *, 3> Vals;
777
0
  Vals.push_back(ProfileData->getOperand(0));
778
0
  APInt APS(128, S), APT(128, T);
779
0
  if (ProfDataName->getString().equals("branch_weights") &&
780
0
      ProfileData->getNumOperands() > 0) {
781
    // Using APInt::div may be expensive, but most cases should fit 64 bits.
782
0
    APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
783
0
                       ->getValue()
784
0
                       .getZExtValue());
785
0
    Val *= APS;
786
0
    Vals.push_back(MDB.createConstant(
787
0
        ConstantInt::get(Type::getInt32Ty(getContext()),
788
0
                         Val.udiv(APT).getLimitedValue(UINT32_MAX))));
789
0
  } else if (ProfDataName->getString().equals("VP"))
790
0
    for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
791
      // The first value is the key of the value profile, which will not change.
792
0
      Vals.push_back(ProfileData->getOperand(i));
793
0
      uint64_t Count =
794
0
          mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
795
0
              ->getValue()
796
0
              .getZExtValue();
797
      // Don't scale the magic number.
798
0
      if (Count == NOMORE_ICP_MAGICNUM) {
799
0
        Vals.push_back(ProfileData->getOperand(i + 1));
800
0
        continue;
801
0
      }
802
      // Using APInt::div may be expensive, but most cases should fit 64 bits.
803
0
      APInt Val(128, Count);
804
0
      Val *= APS;
805
0
      Vals.push_back(MDB.createConstant(
806
0
          ConstantInt::get(Type::getInt64Ty(getContext()),
807
0
                           Val.udiv(APT).getLimitedValue())));
808
0
    }
809
0
  setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
810
0
}
811
812
//===----------------------------------------------------------------------===//
813
//                        InvokeInst Implementation
814
//===----------------------------------------------------------------------===//
815
816
void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
817
                      BasicBlock *IfException, ArrayRef<Value *> Args,
818
                      ArrayRef<OperandBundleDef> Bundles,
819
5.16k
                      const Twine &NameStr) {
820
5.16k
  this->FTy = FTy;
821
822
5.16k
  assert((int)getNumOperands() ==
823
5.16k
             ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
824
5.16k
         "NumOperands not set up?");
825
826
0
#ifndef NDEBUG
827
0
  assert(((Args.size() == FTy->getNumParams()) ||
828
5.16k
          (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
829
5.16k
         "Invoking a function with bad signature");
830
831
8.12k
  for (unsigned i = 0, e = Args.size(); i != e; i++)
832
2.96k
    assert((i >= FTy->getNumParams() ||
833
5.16k
            FTy->getParamType(i) == Args[i]->getType()) &&
834
5.16k
           "Invoking a function with a bad signature!");
835
5.16k
#endif
836
837
  // Set operands in order of their index to match use-list-order
838
  // prediction.
839
5.16k
  llvm::copy(Args, op_begin());
840
5.16k
  setNormalDest(IfNormal);
841
5.16k
  setUnwindDest(IfException);
842
5.16k
  setCalledOperand(Fn);
843
844
5.16k
  auto It = populateBundleOperandInfos(Bundles, Args.size());
845
5.16k
  (void)It;
846
5.16k
  assert(It + 3 == op_end() && "Should add up!");
847
848
0
  setName(NameStr);
849
5.16k
}
850
851
InvokeInst::InvokeInst(const InvokeInst &II)
852
    : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
853
               OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
854
0
               II.getNumOperands()) {
855
0
  setCallingConv(II.getCallingConv());
856
0
  std::copy(II.op_begin(), II.op_end(), op_begin());
857
0
  std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
858
0
            bundle_op_info_begin());
859
0
  SubclassOptionalData = II.SubclassOptionalData;
860
0
}
861
862
InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
863
0
                               Instruction *InsertPt) {
864
0
  std::vector<Value *> Args(II->arg_begin(), II->arg_end());
865
866
0
  auto *NewII = InvokeInst::Create(
867
0
      II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
868
0
      II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
869
0
  NewII->setCallingConv(II->getCallingConv());
870
0
  NewII->SubclassOptionalData = II->SubclassOptionalData;
871
0
  NewII->setAttributes(II->getAttributes());
872
0
  NewII->setDebugLoc(II->getDebugLoc());
873
0
  return NewII;
874
0
}
875
876
0
LandingPadInst *InvokeInst::getLandingPadInst() const {
877
0
  return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
878
0
}
879
880
//===----------------------------------------------------------------------===//
881
//                        CallBrInst Implementation
882
//===----------------------------------------------------------------------===//
883
884
void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
885
                      ArrayRef<BasicBlock *> IndirectDests,
886
                      ArrayRef<Value *> Args,
887
                      ArrayRef<OperandBundleDef> Bundles,
888
104
                      const Twine &NameStr) {
889
104
  this->FTy = FTy;
890
891
104
  assert((int)getNumOperands() ==
892
104
             ComputeNumOperands(Args.size(), IndirectDests.size(),
893
104
                                CountBundleInputs(Bundles)) &&
894
104
         "NumOperands not set up?");
895
896
0
#ifndef NDEBUG
897
0
  assert(((Args.size() == FTy->getNumParams()) ||
898
104
          (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
899
104
         "Calling a function with bad signature");
900
901
106
  for (unsigned i = 0, e = Args.size(); i != e; i++)
902
2
    assert((i >= FTy->getNumParams() ||
903
104
            FTy->getParamType(i) == Args[i]->getType()) &&
904
104
           "Calling a function with a bad signature!");
905
104
#endif
906
907
  // Set operands in order of their index to match use-list-order
908
  // prediction.
909
104
  std::copy(Args.begin(), Args.end(), op_begin());
910
104
  NumIndirectDests = IndirectDests.size();
911
104
  setDefaultDest(Fallthrough);
912
289
  for (unsigned i = 0; i != NumIndirectDests; ++i)
913
185
    setIndirectDest(i, IndirectDests[i]);
914
104
  setCalledOperand(Fn);
915
916
104
  auto It = populateBundleOperandInfos(Bundles, Args.size());
917
104
  (void)It;
918
104
  assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
919
920
0
  setName(NameStr);
921
104
}
922
923
CallBrInst::CallBrInst(const CallBrInst &CBI)
924
    : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
925
               OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
926
0
               CBI.getNumOperands()) {
927
0
  setCallingConv(CBI.getCallingConv());
928
0
  std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
929
0
  std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
930
0
            bundle_op_info_begin());
931
0
  SubclassOptionalData = CBI.SubclassOptionalData;
932
0
  NumIndirectDests = CBI.NumIndirectDests;
933
0
}
934
935
CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
936
0
                               Instruction *InsertPt) {
937
0
  std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
938
939
0
  auto *NewCBI = CallBrInst::Create(
940
0
      CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
941
0
      CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
942
0
  NewCBI->setCallingConv(CBI->getCallingConv());
943
0
  NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
944
0
  NewCBI->setAttributes(CBI->getAttributes());
945
0
  NewCBI->setDebugLoc(CBI->getDebugLoc());
946
0
  NewCBI->NumIndirectDests = CBI->NumIndirectDests;
947
0
  return NewCBI;
948
0
}
949
950
//===----------------------------------------------------------------------===//
951
//                        ReturnInst Implementation
952
//===----------------------------------------------------------------------===//
953
954
ReturnInst::ReturnInst(const ReturnInst &RI)
955
    : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
956
                  OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
957
3.72k
                  RI.getNumOperands()) {
958
3.72k
  if (RI.getNumOperands())
959
1.35k
    Op<0>() = RI.Op<0>();
960
3.72k
  SubclassOptionalData = RI.SubclassOptionalData;
961
3.72k
}
962
963
ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
964
    : Instruction(Type::getVoidTy(C), Instruction::Ret,
965
                  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
966
967k
                  InsertBefore) {
967
967k
  if (retVal)
968
803k
    Op<0>() = retVal;
969
967k
}
970
971
ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
972
    : Instruction(Type::getVoidTy(C), Instruction::Ret,
973
                  OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
974
0
                  InsertAtEnd) {
975
0
  if (retVal)
976
0
    Op<0>() = retVal;
977
0
}
978
979
ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
980
    : Instruction(Type::getVoidTy(Context), Instruction::Ret,
981
0
                  OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
982
983
//===----------------------------------------------------------------------===//
984
//                        ResumeInst Implementation
985
//===----------------------------------------------------------------------===//
986
987
ResumeInst::ResumeInst(const ResumeInst &RI)
988
    : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
989
1
                  OperandTraits<ResumeInst>::op_begin(this), 1) {
990
1
  Op<0>() = RI.Op<0>();
991
1
}
992
993
ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
994
    : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
995
1.90k
                  OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
996
1.90k
  Op<0>() = Exn;
997
1.90k
}
998
999
ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1000
    : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1001
0
                  OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1002
0
  Op<0>() = Exn;
1003
0
}
1004
1005
//===----------------------------------------------------------------------===//
1006
//                        CleanupReturnInst Implementation
1007
//===----------------------------------------------------------------------===//
1008
1009
CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1010
    : Instruction(CRI.getType(), Instruction::CleanupRet,
1011
                  OperandTraits<CleanupReturnInst>::op_end(this) -
1012
                      CRI.getNumOperands(),
1013
8
                  CRI.getNumOperands()) {
1014
8
  setSubclassData<Instruction::OpaqueField>(
1015
8
      CRI.getSubclassData<Instruction::OpaqueField>());
1016
8
  Op<0>() = CRI.Op<0>();
1017
8
  if (CRI.hasUnwindDest())
1018
2
    Op<1>() = CRI.Op<1>();
1019
8
}
1020
1021
1.31k
void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1022
1.31k
  if (UnwindBB)
1023
909
    setSubclassData<UnwindDestField>(true);
1024
1025
1.31k
  Op<0>() = CleanupPad;
1026
1.31k
  if (UnwindBB)
1027
909
    Op<1>() = UnwindBB;
1028
1.31k
}
1029
1030
CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1031
                                     unsigned Values, Instruction *InsertBefore)
1032
    : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1033
                  Instruction::CleanupRet,
1034
                  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1035
1.31k
                  Values, InsertBefore) {
1036
1.31k
  init(CleanupPad, UnwindBB);
1037
1.31k
}
1038
1039
CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1040
                                     unsigned Values, BasicBlock *InsertAtEnd)
1041
    : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1042
                  Instruction::CleanupRet,
1043
                  OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1044
0
                  Values, InsertAtEnd) {
1045
0
  init(CleanupPad, UnwindBB);
1046
0
}
1047
1048
//===----------------------------------------------------------------------===//
1049
//                        CatchReturnInst Implementation
1050
//===----------------------------------------------------------------------===//
1051
848
void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1052
848
  Op<0>() = CatchPad;
1053
848
  Op<1>() = BB;
1054
848
}
1055
1056
CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1057
    : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1058
0
                  OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1059
0
  Op<0>() = CRI.Op<0>();
1060
0
  Op<1>() = CRI.Op<1>();
1061
0
}
1062
1063
CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1064
                                 Instruction *InsertBefore)
1065
    : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1066
                  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1067
848
                  InsertBefore) {
1068
848
  init(CatchPad, BB);
1069
848
}
1070
1071
CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1072
                                 BasicBlock *InsertAtEnd)
1073
    : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1074
                  OperandTraits<CatchReturnInst>::op_begin(this), 2,
1075
0
                  InsertAtEnd) {
1076
0
  init(CatchPad, BB);
1077
0
}
1078
1079
//===----------------------------------------------------------------------===//
1080
//                       CatchSwitchInst Implementation
1081
//===----------------------------------------------------------------------===//
1082
1083
CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1084
                                 unsigned NumReservedValues,
1085
                                 const Twine &NameStr,
1086
                                 Instruction *InsertBefore)
1087
    : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1088
1.03k
                  InsertBefore) {
1089
1.03k
  if (UnwindDest)
1090
452
    ++NumReservedValues;
1091
1.03k
  init(ParentPad, UnwindDest, NumReservedValues + 1);
1092
1.03k
  setName(NameStr);
1093
1.03k
}
1094
1095
CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1096
                                 unsigned NumReservedValues,
1097
                                 const Twine &NameStr, BasicBlock *InsertAtEnd)
1098
    : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1099
0
                  InsertAtEnd) {
1100
0
  if (UnwindDest)
1101
0
    ++NumReservedValues;
1102
0
  init(ParentPad, UnwindDest, NumReservedValues + 1);
1103
0
  setName(NameStr);
1104
0
}
1105
1106
CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1107
    : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1108
0
                  CSI.getNumOperands()) {
1109
0
  init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1110
0
  setNumHungOffUseOperands(ReservedSpace);
1111
0
  Use *OL = getOperandList();
1112
0
  const Use *InOL = CSI.getOperandList();
1113
0
  for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1114
0
    OL[I] = InOL[I];
1115
0
}
1116
1117
void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1118
1.03k
                           unsigned NumReservedValues) {
1119
1.03k
  assert(ParentPad && NumReservedValues);
1120
1121
0
  ReservedSpace = NumReservedValues;
1122
1.03k
  setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1123
1.03k
  allocHungoffUses(ReservedSpace);
1124
1125
1.03k
  Op<0>() = ParentPad;
1126
1.03k
  if (UnwindDest) {
1127
452
    setSubclassData<UnwindDestField>(true);
1128
452
    setUnwindDest(UnwindDest);
1129
452
  }
1130
1.03k
}
1131
1132
/// growOperands - grow operands - This grows the operand list in response to a
1133
/// push_back style of operation. This grows the number of ops by 2 times.
1134
1.32k
void CatchSwitchInst::growOperands(unsigned Size) {
1135
1.32k
  unsigned NumOperands = getNumOperands();
1136
1.32k
  assert(NumOperands >= 1);
1137
1.32k
  if (ReservedSpace >= NumOperands + Size)
1138
1.32k
    return;
1139
0
  ReservedSpace = (NumOperands + Size / 2) * 2;
1140
0
  growHungoffUses(ReservedSpace);
1141
0
}
1142
1143
1.32k
void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1144
1.32k
  unsigned OpNo = getNumOperands();
1145
1.32k
  growOperands(1);
1146
1.32k
  assert(OpNo < ReservedSpace && "Growing didn't work!");
1147
0
  setNumHungOffUseOperands(getNumOperands() + 1);
1148
1.32k
  getOperandList()[OpNo] = Handler;
1149
1.32k
}
1150
1151
51
void CatchSwitchInst::removeHandler(handler_iterator HI) {
1152
  // Move all subsequent handlers up one.
1153
51
  Use *EndDst = op_end() - 1;
1154
73
  for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1155
22
    *CurDst = *(CurDst + 1);
1156
  // Null out the last handler use.
1157
51
  *EndDst = nullptr;
1158
1159
51
  setNumHungOffUseOperands(getNumOperands() - 1);
1160
51
}
1161
1162
//===----------------------------------------------------------------------===//
1163
//                        FuncletPadInst Implementation
1164
//===----------------------------------------------------------------------===//
1165
void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1166
2.67k
                          const Twine &NameStr) {
1167
2.67k
  assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1168
0
  llvm::copy(Args, op_begin());
1169
2.67k
  setParentPad(ParentPad);
1170
2.67k
  setName(NameStr);
1171
2.67k
}
1172
1173
FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1174
    : Instruction(FPI.getType(), FPI.getOpcode(),
1175
                  OperandTraits<FuncletPadInst>::op_end(this) -
1176
                      FPI.getNumOperands(),
1177
0
                  FPI.getNumOperands()) {
1178
0
  std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1179
0
  setParentPad(FPI.getParentPad());
1180
0
}
1181
1182
FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1183
                               ArrayRef<Value *> Args, unsigned Values,
1184
                               const Twine &NameStr, Instruction *InsertBefore)
1185
    : Instruction(ParentPad->getType(), Op,
1186
                  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1187
2.67k
                  InsertBefore) {
1188
2.67k
  init(ParentPad, Args, NameStr);
1189
2.67k
}
1190
1191
FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1192
                               ArrayRef<Value *> Args, unsigned Values,
1193
                               const Twine &NameStr, BasicBlock *InsertAtEnd)
1194
    : Instruction(ParentPad->getType(), Op,
1195
                  OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1196
0
                  InsertAtEnd) {
1197
0
  init(ParentPad, Args, NameStr);
1198
0
}
1199
1200
//===----------------------------------------------------------------------===//
1201
//                      UnreachableInst Implementation
1202
//===----------------------------------------------------------------------===//
1203
1204
UnreachableInst::UnreachableInst(LLVMContext &Context,
1205
                                 Instruction *InsertBefore)
1206
    : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1207
71.8k
                  0, InsertBefore) {}
1208
UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1209
    : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1210
270k
                  0, InsertAtEnd) {}
1211
1212
//===----------------------------------------------------------------------===//
1213
//                        BranchInst Implementation
1214
//===----------------------------------------------------------------------===//
1215
1216
1.16M
void BranchInst::AssertOK() {
1217
1.16M
  if (isConditional())
1218
1.16M
    assert(getCondition()->getType()->isIntegerTy(1) &&
1219
1.16M
           "May only branch on boolean predicates!");
1220
1.16M
}
1221
1222
BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1223
    : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1224
                  OperandTraits<BranchInst>::op_end(this) - 1, 1,
1225
476k
                  InsertBefore) {
1226
476k
  assert(IfTrue && "Branch destination may not be null!");
1227
0
  Op<-1>() = IfTrue;
1228
476k
}
1229
1230
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1231
                       Instruction *InsertBefore)
1232
    : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1233
                  OperandTraits<BranchInst>::op_end(this) - 3, 3,
1234
1.16M
                  InsertBefore) {
1235
  // Assign in order of operand index to make use-list order predictable.
1236
1.16M
  Op<-3>() = Cond;
1237
1.16M
  Op<-2>() = IfFalse;
1238
1.16M
  Op<-1>() = IfTrue;
1239
1.16M
#ifndef NDEBUG
1240
1.16M
  AssertOK();
1241
1.16M
#endif
1242
1.16M
}
1243
1244
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1245
    : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1246
686k
                  OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1247
686k
  assert(IfTrue && "Branch destination may not be null!");
1248
0
  Op<-1>() = IfTrue;
1249
686k
}
1250
1251
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1252
                       BasicBlock *InsertAtEnd)
1253
    : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1254
0
                  OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1255
  // Assign in order of operand index to make use-list order predictable.
1256
0
  Op<-3>() = Cond;
1257
0
  Op<-2>() = IfFalse;
1258
0
  Op<-1>() = IfTrue;
1259
0
#ifndef NDEBUG
1260
0
  AssertOK();
1261
0
#endif
1262
0
}
1263
1264
BranchInst::BranchInst(const BranchInst &BI)
1265
    : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1266
                  OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1267
52.9k
                  BI.getNumOperands()) {
1268
  // Assign in order of operand index to make use-list order predictable.
1269
52.9k
  if (BI.getNumOperands() != 1) {
1270
31.2k
    assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1271
0
    Op<-3>() = BI.Op<-3>();
1272
31.2k
    Op<-2>() = BI.Op<-2>();
1273
31.2k
  }
1274
0
  Op<-1>() = BI.Op<-1>();
1275
52.9k
  SubclassOptionalData = BI.SubclassOptionalData;
1276
52.9k
}
1277
1278
6.08k
void BranchInst::swapSuccessors() {
1279
6.08k
  assert(isConditional() &&
1280
6.08k
         "Cannot swap successors of an unconditional branch");
1281
0
  Op<-1>().swap(Op<-2>());
1282
1283
  // Update profile metadata if present and it matches our structural
1284
  // expectations.
1285
6.08k
  swapProfMetadata();
1286
6.08k
}
1287
1288
//===----------------------------------------------------------------------===//
1289
//                        AllocaInst Implementation
1290
//===----------------------------------------------------------------------===//
1291
1292
735k
static Value *getAISize(LLVMContext &Context, Value *Amt) {
1293
735k
  if (!Amt)
1294
23.6k
    Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1295
712k
  else {
1296
712k
    assert(!isa<BasicBlock>(Amt) &&
1297
712k
           "Passed basic block into allocation size parameter! Use other ctor");
1298
0
    assert(Amt->getType()->isIntegerTy() &&
1299
712k
           "Allocation array size is not an integer!");
1300
712k
  }
1301
0
  return Amt;
1302
735k
}
1303
1304
0
static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
1305
0
  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1306
0
  assert(BB->getParent() &&
1307
0
         "BB must be in a Function when alignment not provided!");
1308
0
  const DataLayout &DL = BB->getModule()->getDataLayout();
1309
0
  return DL.getPrefTypeAlign(Ty);
1310
0
}
1311
1312
0
static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
1313
0
  assert(I && "Insertion position cannot be null when alignment not provided!");
1314
0
  return computeAllocaDefaultAlign(Ty, I->getParent());
1315
0
}
1316
1317
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1318
                       Instruction *InsertBefore)
1319
0
  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1320
1321
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1322
                       BasicBlock *InsertAtEnd)
1323
0
  : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1324
1325
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1326
                       const Twine &Name, Instruction *InsertBefore)
1327
    : AllocaInst(Ty, AddrSpace, ArraySize,
1328
                 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1329
0
                 InsertBefore) {}
1330
1331
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1332
                       const Twine &Name, BasicBlock *InsertAtEnd)
1333
    : AllocaInst(Ty, AddrSpace, ArraySize,
1334
                 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1335
0
                 InsertAtEnd) {}
1336
1337
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1338
                       Align Align, const Twine &Name,
1339
                       Instruction *InsertBefore)
1340
    : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1341
                       getAISize(Ty->getContext(), ArraySize), InsertBefore),
1342
735k
      AllocatedType(Ty) {
1343
735k
  setAlignment(Align);
1344
735k
  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1345
0
  setName(Name);
1346
735k
}
1347
1348
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1349
                       Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1350
    : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1351
                       getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1352
0
      AllocatedType(Ty) {
1353
0
  setAlignment(Align);
1354
0
  assert(!Ty->isVoidTy() && "Cannot allocate void!");
1355
0
  setName(Name);
1356
0
}
1357
1358
1359
11.1M
bool AllocaInst::isArrayAllocation() const {
1360
11.1M
  if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1361
9.47M
    return !CI->isOne();
1362
1.65M
  return true;
1363
11.1M
}
1364
1365
/// isStaticAlloca - Return true if this alloca is in the entry block of the
1366
/// function and is a constant size.  If so, the code generator will fold it
1367
/// into the prolog/epilog code, so it is basically free.
1368
700k
bool AllocaInst::isStaticAlloca() const {
1369
  // Must be constant size.
1370
700k
  if (!isa<ConstantInt>(getArraySize())) return false;
1371
1372
  // Must be in the entry block.
1373
671k
  const BasicBlock *Parent = getParent();
1374
671k
  return Parent->isEntryBlock() && !isUsedWithInAlloca();
1375
700k
}
1376
1377
//===----------------------------------------------------------------------===//
1378
//                           LoadInst Implementation
1379
//===----------------------------------------------------------------------===//
1380
1381
1.69M
void LoadInst::AssertOK() {
1382
1.69M
  assert(getOperand(0)->getType()->isPointerTy() &&
1383
1.69M
         "Ptr must have pointer type.");
1384
1.69M
}
1385
1386
332k
static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
1387
332k
  assert(BB && "Insertion BB cannot be null when alignment not provided!");
1388
0
  assert(BB->getParent() &&
1389
332k
         "BB must be in a Function when alignment not provided!");
1390
0
  const DataLayout &DL = BB->getModule()->getDataLayout();
1391
332k
  return DL.getABITypeAlign(Ty);
1392
332k
}
1393
1394
332k
static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
1395
332k
  assert(I && "Insertion position cannot be null when alignment not provided!");
1396
0
  return computeLoadStoreDefaultAlign(Ty, I->getParent());
1397
332k
}
1398
1399
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1400
                   Instruction *InsertBef)
1401
85.8k
    : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1402
1403
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1404
                   BasicBlock *InsertAE)
1405
0
    : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1406
1407
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1408
                   Instruction *InsertBef)
1409
    : LoadInst(Ty, Ptr, Name, isVolatile,
1410
85.8k
               computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1411
1412
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1413
                   BasicBlock *InsertAE)
1414
    : LoadInst(Ty, Ptr, Name, isVolatile,
1415
0
               computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1416
1417
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1418
                   Align Align, Instruction *InsertBef)
1419
    : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1420
1.67M
               SyncScope::System, InsertBef) {}
1421
1422
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1423
                   Align Align, BasicBlock *InsertAE)
1424
    : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1425
0
               SyncScope::System, InsertAE) {}
1426
1427
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1428
                   Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1429
                   Instruction *InsertBef)
1430
1.69M
    : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1431
1.69M
  setVolatile(isVolatile);
1432
1.69M
  setAlignment(Align);
1433
1.69M
  setAtomic(Order, SSID);
1434
1.69M
  AssertOK();
1435
1.69M
  setName(Name);
1436
1.69M
}
1437
1438
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1439
                   Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1440
                   BasicBlock *InsertAE)
1441
0
    : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1442
0
  setVolatile(isVolatile);
1443
0
  setAlignment(Align);
1444
0
  setAtomic(Order, SSID);
1445
0
  AssertOK();
1446
0
  setName(Name);
1447
0
}
1448
1449
//===----------------------------------------------------------------------===//
1450
//                           StoreInst Implementation
1451
//===----------------------------------------------------------------------===//
1452
1453
4.05M
void StoreInst::AssertOK() {
1454
4.05M
  assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1455
0
  assert(getOperand(1)->getType()->isPointerTy() &&
1456
4.05M
         "Ptr must have pointer type!");
1457
4.05M
}
1458
1459
StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1460
30
    : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1461
1462
StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1463
0
    : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1464
1465
StoreInst::StoreInst(Value *val, Value *addr, BasicBlock::iterator InsertBefore)
1466
247k
    : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1467
1468
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1469
                     Instruction *InsertBefore)
1470
    : StoreInst(val, addr, isVolatile,
1471
                computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1472
30
                InsertBefore) {}
1473
1474
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1475
                     BasicBlock *InsertAtEnd)
1476
    : StoreInst(val, addr, isVolatile,
1477
                computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1478
0
                InsertAtEnd) {}
1479
1480
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1481
                     BasicBlock::iterator InsertBefore)
1482
    : StoreInst(val, addr, isVolatile,
1483
                computeLoadStoreDefaultAlign(val->getType(), &*InsertBefore),
1484
247k
                InsertBefore) {}
1485
1486
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1487
                     Instruction *InsertBefore)
1488
    : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1489
3.75M
                SyncScope::System, InsertBefore) {}
1490
1491
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1492
                     BasicBlock *InsertAtEnd)
1493
    : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1494
0
                SyncScope::System, InsertAtEnd) {}
1495
1496
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1497
                     BasicBlock::iterator InsertBefore)
1498
    : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1499
247k
                SyncScope::System, InsertBefore) {}
1500
1501
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1502
                     AtomicOrdering Order, SyncScope::ID SSID,
1503
                     Instruction *InsertBefore)
1504
    : Instruction(Type::getVoidTy(val->getContext()), Store,
1505
                  OperandTraits<StoreInst>::op_begin(this),
1506
3.80M
                  OperandTraits<StoreInst>::operands(this), InsertBefore) {
1507
3.80M
  Op<0>() = val;
1508
3.80M
  Op<1>() = addr;
1509
3.80M
  setVolatile(isVolatile);
1510
3.80M
  setAlignment(Align);
1511
3.80M
  setAtomic(Order, SSID);
1512
3.80M
  AssertOK();
1513
3.80M
}
1514
1515
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1516
                     AtomicOrdering Order, SyncScope::ID SSID,
1517
                     BasicBlock *InsertAtEnd)
1518
    : Instruction(Type::getVoidTy(val->getContext()), Store,
1519
                  OperandTraits<StoreInst>::op_begin(this),
1520
0
                  OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1521
0
  Op<0>() = val;
1522
0
  Op<1>() = addr;
1523
0
  setVolatile(isVolatile);
1524
0
  setAlignment(Align);
1525
0
  setAtomic(Order, SSID);
1526
0
  AssertOK();
1527
0
}
1528
1529
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1530
                     AtomicOrdering Order, SyncScope::ID SSID,
1531
                     BasicBlock::iterator InsertBefore)
1532
    : Instruction(Type::getVoidTy(val->getContext()), Store,
1533
                  OperandTraits<StoreInst>::op_begin(this),
1534
247k
                  OperandTraits<StoreInst>::operands(this)) {
1535
247k
  Op<0>() = val;
1536
247k
  Op<1>() = addr;
1537
247k
  setVolatile(isVolatile);
1538
247k
  setAlignment(Align);
1539
247k
  setAtomic(Order, SSID);
1540
247k
  insertBefore(*InsertBefore->getParent(), InsertBefore);
1541
247k
  AssertOK();
1542
247k
}
1543
1544
//===----------------------------------------------------------------------===//
1545
//                       AtomicCmpXchgInst Implementation
1546
//===----------------------------------------------------------------------===//
1547
1548
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1549
                             Align Alignment, AtomicOrdering SuccessOrdering,
1550
                             AtomicOrdering FailureOrdering,
1551
578
                             SyncScope::ID SSID) {
1552
578
  Op<0>() = Ptr;
1553
578
  Op<1>() = Cmp;
1554
578
  Op<2>() = NewVal;
1555
578
  setSuccessOrdering(SuccessOrdering);
1556
578
  setFailureOrdering(FailureOrdering);
1557
578
  setSyncScopeID(SSID);
1558
578
  setAlignment(Alignment);
1559
1560
578
  assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1561
578
         "All operands must be non-null!");
1562
0
  assert(getOperand(0)->getType()->isPointerTy() &&
1563
578
         "Ptr must have pointer type!");
1564
0
  assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1565
578
         "Cmp type and NewVal type must be same!");
1566
578
}
1567
1568
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1569
                                     Align Alignment,
1570
                                     AtomicOrdering SuccessOrdering,
1571
                                     AtomicOrdering FailureOrdering,
1572
                                     SyncScope::ID SSID,
1573
                                     Instruction *InsertBefore)
1574
    : Instruction(
1575
          StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1576
          AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1577
578
          OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1578
578
  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1579
578
}
1580
1581
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1582
                                     Align Alignment,
1583
                                     AtomicOrdering SuccessOrdering,
1584
                                     AtomicOrdering FailureOrdering,
1585
                                     SyncScope::ID SSID,
1586
                                     BasicBlock *InsertAtEnd)
1587
    : Instruction(
1588
          StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1589
          AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1590
0
          OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1591
0
  Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1592
0
}
1593
1594
//===----------------------------------------------------------------------===//
1595
//                       AtomicRMWInst Implementation
1596
//===----------------------------------------------------------------------===//
1597
1598
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1599
                         Align Alignment, AtomicOrdering Ordering,
1600
2.12k
                         SyncScope::ID SSID) {
1601
2.12k
  assert(Ordering != AtomicOrdering::NotAtomic &&
1602
2.12k
         "atomicrmw instructions can only be atomic.");
1603
0
  assert(Ordering != AtomicOrdering::Unordered &&
1604
2.12k
         "atomicrmw instructions cannot be unordered.");
1605
0
  Op<0>() = Ptr;
1606
2.12k
  Op<1>() = Val;
1607
2.12k
  setOperation(Operation);
1608
2.12k
  setOrdering(Ordering);
1609
2.12k
  setSyncScopeID(SSID);
1610
2.12k
  setAlignment(Alignment);
1611
1612
2.12k
  assert(getOperand(0) && getOperand(1) &&
1613
2.12k
         "All operands must be non-null!");
1614
0
  assert(getOperand(0)->getType()->isPointerTy() &&
1615
2.12k
         "Ptr must have pointer type!");
1616
0
  assert(Ordering != AtomicOrdering::NotAtomic &&
1617
2.12k
         "AtomicRMW instructions must be atomic!");
1618
2.12k
}
1619
1620
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1621
                             Align Alignment, AtomicOrdering Ordering,
1622
                             SyncScope::ID SSID, Instruction *InsertBefore)
1623
    : Instruction(Val->getType(), AtomicRMW,
1624
                  OperandTraits<AtomicRMWInst>::op_begin(this),
1625
2.12k
                  OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1626
2.12k
  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1627
2.12k
}
1628
1629
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1630
                             Align Alignment, AtomicOrdering Ordering,
1631
                             SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1632
    : Instruction(Val->getType(), AtomicRMW,
1633
                  OperandTraits<AtomicRMWInst>::op_begin(this),
1634
0
                  OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1635
0
  Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1636
0
}
1637
1638
0
StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1639
0
  switch (Op) {
1640
0
  case AtomicRMWInst::Xchg:
1641
0
    return "xchg";
1642
0
  case AtomicRMWInst::Add:
1643
0
    return "add";
1644
0
  case AtomicRMWInst::Sub:
1645
0
    return "sub";
1646
0
  case AtomicRMWInst::And:
1647
0
    return "and";
1648
0
  case AtomicRMWInst::Nand:
1649
0
    return "nand";
1650
0
  case AtomicRMWInst::Or:
1651
0
    return "or";
1652
0
  case AtomicRMWInst::Xor:
1653
0
    return "xor";
1654
0
  case AtomicRMWInst::Max:
1655
0
    return "max";
1656
0
  case AtomicRMWInst::Min:
1657
0
    return "min";
1658
0
  case AtomicRMWInst::UMax:
1659
0
    return "umax";
1660
0
  case AtomicRMWInst::UMin:
1661
0
    return "umin";
1662
0
  case AtomicRMWInst::FAdd:
1663
0
    return "fadd";
1664
0
  case AtomicRMWInst::FSub:
1665
0
    return "fsub";
1666
0
  case AtomicRMWInst::FMax:
1667
0
    return "fmax";
1668
0
  case AtomicRMWInst::FMin:
1669
0
    return "fmin";
1670
0
  case AtomicRMWInst::UIncWrap:
1671
0
    return "uinc_wrap";
1672
0
  case AtomicRMWInst::UDecWrap:
1673
0
    return "udec_wrap";
1674
0
  case AtomicRMWInst::BAD_BINOP:
1675
0
    return "<invalid operation>";
1676
0
  }
1677
1678
0
  llvm_unreachable("invalid atomicrmw operation");
1679
0
}
1680
1681
//===----------------------------------------------------------------------===//
1682
//                       FenceInst Implementation
1683
//===----------------------------------------------------------------------===//
1684
1685
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1686
                     SyncScope::ID SSID,
1687
                     Instruction *InsertBefore)
1688
319
  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1689
319
  setOrdering(Ordering);
1690
319
  setSyncScopeID(SSID);
1691
319
}
1692
1693
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1694
                     SyncScope::ID SSID,
1695
                     BasicBlock *InsertAtEnd)
1696
0
  : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1697
0
  setOrdering(Ordering);
1698
0
  setSyncScopeID(SSID);
1699
0
}
1700
1701
//===----------------------------------------------------------------------===//
1702
//                       GetElementPtrInst Implementation
1703
//===----------------------------------------------------------------------===//
1704
1705
void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1706
1.34M
                             const Twine &Name) {
1707
1.34M
  assert(getNumOperands() == 1 + IdxList.size() &&
1708
1.34M
         "NumOperands not initialized?");
1709
0
  Op<0>() = Ptr;
1710
1.34M
  llvm::copy(IdxList, op_begin() + 1);
1711
1.34M
  setName(Name);
1712
1.34M
}
1713
1714
GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1715
    : Instruction(GEPI.getType(), GetElementPtr,
1716
                  OperandTraits<GetElementPtrInst>::op_end(this) -
1717
                      GEPI.getNumOperands(),
1718
                  GEPI.getNumOperands()),
1719
      SourceElementType(GEPI.SourceElementType),
1720
66.2k
      ResultElementType(GEPI.ResultElementType) {
1721
66.2k
  std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1722
66.2k
  SubclassOptionalData = GEPI.SubclassOptionalData;
1723
66.2k
}
1724
1725
1.33M
Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
1726
1.33M
  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1727
96.1k
    if (!Struct->indexValid(Idx))
1728
0
      return nullptr;
1729
96.1k
    return Struct->getTypeAtIndex(Idx);
1730
96.1k
  }
1731
1.23M
  if (!Idx->getType()->isIntOrIntVectorTy())
1732
1
    return nullptr;
1733
1.23M
  if (auto *Array = dyn_cast<ArrayType>(Ty))
1734
1.23M
    return Array->getElementType();
1735
1.98k
  if (auto *Vector = dyn_cast<VectorType>(Ty))
1736
1.98k
    return Vector->getElementType();
1737
0
  return nullptr;
1738
1.98k
}
1739
1740
16.4k
Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
1741
16.4k
  if (auto *Struct = dyn_cast<StructType>(Ty)) {
1742
1.17k
    if (Idx >= Struct->getNumElements())
1743
0
      return nullptr;
1744
1.17k
    return Struct->getElementType(Idx);
1745
1.17k
  }
1746
15.2k
  if (auto *Array = dyn_cast<ArrayType>(Ty))
1747
10.3k
    return Array->getElementType();
1748
4.89k
  if (auto *Vector = dyn_cast<VectorType>(Ty))
1749
150
    return Vector->getElementType();
1750
4.74k
  return nullptr;
1751
4.89k
}
1752
1753
template <typename IndexTy>
1754
4.76M
static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1755
4.76M
  if (IdxList.empty())
1756
389
    return Ty;
1757
4.76M
  for (IndexTy V : IdxList.slice(1)) {
1758
1.33M
    Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1759
1.33M
    if (!Ty)
1760
1
      return Ty;
1761
1.33M
  }
1762
4.76M
  return Ty;
1763
4.76M
}
Instructions.cpp:llvm::Type* getIndexedTypeInternal<llvm::Value*>(llvm::Type*, llvm::ArrayRef<llvm::Value*>)
Line
Count
Source
1754
4.57M
static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1755
4.57M
  if (IdxList.empty())
1756
389
    return Ty;
1757
4.57M
  for (IndexTy V : IdxList.slice(1)) {
1758
1.23M
    Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1759
1.23M
    if (!Ty)
1760
1
      return Ty;
1761
1.23M
  }
1762
4.57M
  return Ty;
1763
4.57M
}
Instructions.cpp:llvm::Type* getIndexedTypeInternal<llvm::Constant*>(llvm::Type*, llvm::ArrayRef<llvm::Constant*>)
Line
Count
Source
1754
188k
static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1755
188k
  if (IdxList.empty())
1756
0
    return Ty;
1757
188k
  for (IndexTy V : IdxList.slice(1)) {
1758
96.9k
    Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1759
96.9k
    if (!Ty)
1760
0
      return Ty;
1761
96.9k
  }
1762
188k
  return Ty;
1763
188k
}
Unexecuted instantiation: Instructions.cpp:llvm::Type* getIndexedTypeInternal<unsigned long>(llvm::Type*, llvm::ArrayRef<unsigned long>)
1764
1765
4.57M
Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1766
4.57M
  return getIndexedTypeInternal(Ty, IdxList);
1767
4.57M
}
1768
1769
Type *GetElementPtrInst::getIndexedType(Type *Ty,
1770
188k
                                        ArrayRef<Constant *> IdxList) {
1771
188k
  return getIndexedTypeInternal(Ty, IdxList);
1772
188k
}
1773
1774
0
Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1775
0
  return getIndexedTypeInternal(Ty, IdxList);
1776
0
}
1777
1778
/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1779
/// zeros.  If so, the result pointer and the first operand have the same
1780
/// value, just potentially different types.
1781
4.94M
bool GetElementPtrInst::hasAllZeroIndices() const {
1782
9.52M
  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1783
9.51M
    if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1784
9.23M
      if (!CI->isZero()) return false;
1785
9.23M
    } else {
1786
283k
      return false;
1787
283k
    }
1788
9.51M
  }
1789
5.74k
  return true;
1790
4.94M
}
1791
1792
/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1793
/// constant integers.  If so, the result pointer and the first operand have
1794
/// a constant offset between them.
1795
721k
bool GetElementPtrInst::hasAllConstantIndices() const {
1796
1.50M
  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1797
1.10M
    if (!isa<ConstantInt>(getOperand(i)))
1798
315k
      return false;
1799
1.10M
  }
1800
406k
  return true;
1801
721k
}
1802
1803
574k
void GetElementPtrInst::setIsInBounds(bool B) {
1804
574k
  cast<GEPOperator>(this)->setIsInBounds(B);
1805
574k
}
1806
1807
1.65M
bool GetElementPtrInst::isInBounds() const {
1808
1.65M
  return cast<GEPOperator>(this)->isInBounds();
1809
1.65M
}
1810
1811
bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1812
191k
                                                 APInt &Offset) const {
1813
  // Delegate to the generic GEPOperator implementation.
1814
191k
  return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1815
191k
}
1816
1817
bool GetElementPtrInst::collectOffset(
1818
    const DataLayout &DL, unsigned BitWidth,
1819
    MapVector<Value *, APInt> &VariableOffsets,
1820
148k
    APInt &ConstantOffset) const {
1821
  // Delegate to the generic GEPOperator implementation.
1822
148k
  return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1823
148k
                                                ConstantOffset);
1824
148k
}
1825
1826
//===----------------------------------------------------------------------===//
1827
//                           ExtractElementInst Implementation
1828
//===----------------------------------------------------------------------===//
1829
1830
ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1831
                                       const Twine &Name,
1832
                                       Instruction *InsertBef)
1833
  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1834
                ExtractElement,
1835
                OperandTraits<ExtractElementInst>::op_begin(this),
1836
109k
                2, InsertBef) {
1837
109k
  assert(isValidOperands(Val, Index) &&
1838
109k
         "Invalid extractelement instruction operands!");
1839
0
  Op<0>() = Val;
1840
109k
  Op<1>() = Index;
1841
109k
  setName(Name);
1842
109k
}
1843
1844
ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1845
                                       const Twine &Name,
1846
                                       BasicBlock *InsertAE)
1847
  : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1848
                ExtractElement,
1849
                OperandTraits<ExtractElementInst>::op_begin(this),
1850
0
                2, InsertAE) {
1851
0
  assert(isValidOperands(Val, Index) &&
1852
0
         "Invalid extractelement instruction operands!");
1853
1854
0
  Op<0>() = Val;
1855
0
  Op<1>() = Index;
1856
0
  setName(Name);
1857
0
}
1858
1859
268k
bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1860
268k
  if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1861
0
    return false;
1862
268k
  return true;
1863
268k
}
1864
1865
//===----------------------------------------------------------------------===//
1866
//                           InsertElementInst Implementation
1867
//===----------------------------------------------------------------------===//
1868
1869
InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1870
                                     const Twine &Name,
1871
                                     Instruction *InsertBef)
1872
  : Instruction(Vec->getType(), InsertElement,
1873
                OperandTraits<InsertElementInst>::op_begin(this),
1874
154k
                3, InsertBef) {
1875
154k
  assert(isValidOperands(Vec, Elt, Index) &&
1876
154k
         "Invalid insertelement instruction operands!");
1877
0
  Op<0>() = Vec;
1878
154k
  Op<1>() = Elt;
1879
154k
  Op<2>() = Index;
1880
154k
  setName(Name);
1881
154k
}
1882
1883
InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1884
                                     const Twine &Name,
1885
                                     BasicBlock *InsertAE)
1886
  : Instruction(Vec->getType(), InsertElement,
1887
                OperandTraits<InsertElementInst>::op_begin(this),
1888
3.35k
                3, InsertAE) {
1889
3.35k
  assert(isValidOperands(Vec, Elt, Index) &&
1890
3.35k
         "Invalid insertelement instruction operands!");
1891
1892
0
  Op<0>() = Vec;
1893
3.35k
  Op<1>() = Elt;
1894
3.35k
  Op<2>() = Index;
1895
3.35k
  setName(Name);
1896
3.35k
}
1897
1898
bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1899
391k
                                        const Value *Index) {
1900
391k
  if (!Vec->getType()->isVectorTy())
1901
0
    return false;   // First operand of insertelement must be vector type.
1902
1903
391k
  if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1904
0
    return false;// Second operand of insertelement must be vector element type.
1905
1906
391k
  if (!Index->getType()->isIntegerTy())
1907
0
    return false;  // Third operand of insertelement must be i32.
1908
391k
  return true;
1909
391k
}
1910
1911
//===----------------------------------------------------------------------===//
1912
//                      ShuffleVectorInst Implementation
1913
//===----------------------------------------------------------------------===//
1914
1915
1.00k
static Value *createPlaceholderForShuffleVector(Value *V) {
1916
1.00k
  assert(V && "Cannot create placeholder of nullptr V");
1917
0
  return PoisonValue::get(V->getType());
1918
1.00k
}
1919
1920
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
1921
                                     Instruction *InsertBefore)
1922
    : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1923
0
                        InsertBefore) {}
1924
1925
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
1926
                                     BasicBlock *InsertAtEnd)
1927
    : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1928
0
                        InsertAtEnd) {}
1929
1930
ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
1931
                                     const Twine &Name,
1932
                                     Instruction *InsertBefore)
1933
    : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1934
1.00k
                        InsertBefore) {}
1935
1936
ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
1937
                                     const Twine &Name, BasicBlock *InsertAtEnd)
1938
    : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1939
0
                        InsertAtEnd) {}
1940
1941
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1942
                                     const Twine &Name,
1943
                                     Instruction *InsertBefore)
1944
    : Instruction(
1945
          VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1946
                          cast<VectorType>(Mask->getType())->getElementCount()),
1947
          ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1948
78.8k
          OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1949
78.8k
  assert(isValidOperands(V1, V2, Mask) &&
1950
78.8k
         "Invalid shuffle vector instruction operands!");
1951
1952
0
  Op<0>() = V1;
1953
78.8k
  Op<1>() = V2;
1954
78.8k
  SmallVector<int, 16> MaskArr;
1955
78.8k
  getShuffleMask(cast<Constant>(Mask), MaskArr);
1956
78.8k
  setShuffleMask(MaskArr);
1957
78.8k
  setName(Name);
1958
78.8k
}
1959
1960
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1961
                                     const Twine &Name, BasicBlock *InsertAtEnd)
1962
    : Instruction(
1963
          VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1964
                          cast<VectorType>(Mask->getType())->getElementCount()),
1965
          ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1966
0
          OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
1967
0
  assert(isValidOperands(V1, V2, Mask) &&
1968
0
         "Invalid shuffle vector instruction operands!");
1969
1970
0
  Op<0>() = V1;
1971
0
  Op<1>() = V2;
1972
0
  SmallVector<int, 16> MaskArr;
1973
0
  getShuffleMask(cast<Constant>(Mask), MaskArr);
1974
0
  setShuffleMask(MaskArr);
1975
0
  setName(Name);
1976
0
}
1977
1978
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1979
                                     const Twine &Name,
1980
                                     Instruction *InsertBefore)
1981
    : Instruction(
1982
          VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1983
                          Mask.size(), isa<ScalableVectorType>(V1->getType())),
1984
          ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1985
9.85k
          OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1986
9.85k
  assert(isValidOperands(V1, V2, Mask) &&
1987
9.85k
         "Invalid shuffle vector instruction operands!");
1988
0
  Op<0>() = V1;
1989
9.85k
  Op<1>() = V2;
1990
9.85k
  setShuffleMask(Mask);
1991
9.85k
  setName(Name);
1992
9.85k
}
1993
1994
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1995
                                     const Twine &Name, BasicBlock *InsertAtEnd)
1996
    : Instruction(
1997
          VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1998
                          Mask.size(), isa<ScalableVectorType>(V1->getType())),
1999
          ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2000
0
          OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2001
0
  assert(isValidOperands(V1, V2, Mask) &&
2002
0
         "Invalid shuffle vector instruction operands!");
2003
2004
0
  Op<0>() = V1;
2005
0
  Op<1>() = V2;
2006
0
  setShuffleMask(Mask);
2007
0
  setName(Name);
2008
0
}
2009
2010
748
void ShuffleVectorInst::commute() {
2011
748
  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2012
748
  int NumMaskElts = ShuffleMask.size();
2013
748
  SmallVector<int, 16> NewMask(NumMaskElts);
2014
7.33k
  for (int i = 0; i != NumMaskElts; ++i) {
2015
6.58k
    int MaskElt = getMaskValue(i);
2016
6.58k
    if (MaskElt == PoisonMaskElem) {
2017
2.36k
      NewMask[i] = PoisonMaskElem;
2018
2.36k
      continue;
2019
2.36k
    }
2020
4.22k
    assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2021
4.22k
    MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2022
4.22k
    NewMask[i] = MaskElt;
2023
4.22k
  }
2024
748
  setShuffleMask(NewMask);
2025
748
  Op<0>().swap(Op<1>());
2026
748
}
2027
2028
bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
2029
169k
                                        ArrayRef<int> Mask) {
2030
  // V1 and V2 must be vectors of the same type.
2031
169k
  if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2032
0
    return false;
2033
2034
  // Make sure the mask elements make sense.
2035
169k
  int V1Size =
2036
169k
      cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2037
169k
  for (int Elem : Mask)
2038
1.10M
    if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2039
0
      return false;
2040
2041
169k
  if (isa<ScalableVectorType>(V1->getType()))
2042
8.36k
    if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2043
0
      return false;
2044
2045
169k
  return true;
2046
169k
}
2047
2048
bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
2049
78.8k
                                        const Value *Mask) {
2050
  // V1 and V2 must be vectors of the same type.
2051
78.8k
  if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2052
0
    return false;
2053
2054
  // Mask must be vector of i32, and must be the same kind of vector as the
2055
  // input vectors
2056
78.8k
  auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2057
78.8k
  if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2058
78.8k
      isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2059
0
    return false;
2060
2061
  // Check to see if Mask is valid.
2062
78.8k
  if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2063
33.2k
    return true;
2064
2065
45.5k
  if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2066
13.1k
    unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2067
55.4k
    for (Value *Op : MV->operands()) {
2068
55.4k
      if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2069
33.4k
        if (CI->uge(V1Size*2))
2070
0
          return false;
2071
33.4k
      } else if (!isa<UndefValue>(Op)) {
2072
0
        return false;
2073
0
      }
2074
55.4k
    }
2075
13.1k
    return true;
2076
13.1k
  }
2077
2078
32.4k
  if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2079
32.4k
    unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2080
32.4k
    for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2081
167k
         i != e; ++i)
2082
135k
      if (CDS->getElementAsInteger(i) >= V1Size*2)
2083
0
        return false;
2084
32.4k
    return true;
2085
32.4k
  }
2086
2087
0
  return false;
2088
32.4k
}
2089
2090
void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
2091
81.7k
                                       SmallVectorImpl<int> &Result) {
2092
81.7k
  ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2093
2094
81.7k
  if (isa<ConstantAggregateZero>(Mask)) {
2095
8.57k
    Result.resize(EC.getKnownMinValue(), 0);
2096
8.57k
    return;
2097
8.57k
  }
2098
2099
73.1k
  Result.reserve(EC.getKnownMinValue());
2100
2101
73.1k
  if (EC.isScalable()) {
2102
632
    assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2103
632
           "Scalable vector shuffle mask must be undef or zeroinitializer");
2104
632
    int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2105
3.64k
    for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2106
3.01k
      Result.emplace_back(MaskVal);
2107
632
    return;
2108
632
  }
2109
2110
72.5k
  unsigned NumElts = EC.getKnownMinValue();
2111
2112
72.5k
  if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2113
167k
    for (unsigned i = 0; i != NumElts; ++i)
2114
135k
      Result.push_back(CDS->getElementAsInteger(i));
2115
32.4k
    return;
2116
32.4k
  }
2117
394k
  for (unsigned i = 0; i != NumElts; ++i) {
2118
354k
    Constant *C = Mask->getAggregateElement(i);
2119
354k
    Result.push_back(isa<UndefValue>(C) ? -1 :
2120
354k
                     cast<ConstantInt>(C)->getZExtValue());
2121
354k
  }
2122
40.0k
}
2123
2124
90.3k
void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
2125
90.3k
  ShuffleMask.assign(Mask.begin(), Mask.end());
2126
90.3k
  ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2127
90.3k
}
2128
2129
Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2130
93.2k
                                                          Type *ResultTy) {
2131
93.2k
  Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2132
93.2k
  if (isa<ScalableVectorType>(ResultTy)) {
2133
3.96k
    assert(all_equal(Mask) && "Unexpected shuffle");
2134
0
    Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2135
3.96k
    if (Mask[0] == 0)
2136
3.30k
      return Constant::getNullValue(VecTy);
2137
657
    return UndefValue::get(VecTy);
2138
3.96k
  }
2139
89.2k
  SmallVector<Constant *, 16> MaskConst;
2140
608k
  for (int Elem : Mask) {
2141
608k
    if (Elem == PoisonMaskElem)
2142
341k
      MaskConst.push_back(PoisonValue::get(Int32Ty));
2143
266k
    else
2144
266k
      MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2145
608k
  }
2146
89.2k
  return ConstantVector::get(MaskConst);
2147
93.2k
}
2148
2149
23.6k
static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2150
23.6k
  assert(!Mask.empty() && "Shuffle mask must contain elements");
2151
0
  bool UsesLHS = false;
2152
23.6k
  bool UsesRHS = false;
2153
104k
  for (int I : Mask) {
2154
104k
    if (I == -1)
2155
14.1k
      continue;
2156
90.0k
    assert(I >= 0 && I < (NumOpElts * 2) &&
2157
90.0k
           "Out-of-bounds shuffle mask element");
2158
0
    UsesLHS |= (I < NumOpElts);
2159
90.0k
    UsesRHS |= (I >= NumOpElts);
2160
90.0k
    if (UsesLHS && UsesRHS)
2161
12.3k
      return false;
2162
90.0k
  }
2163
  // Allow for degenerate case: completely undef mask means neither source is used.
2164
11.3k
  return UsesLHS || UsesRHS;
2165
23.6k
}
2166
2167
20.0k
bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts) {
2168
  // We don't have vector operand size information, so assume operands are the
2169
  // same size as the mask.
2170
20.0k
  return isSingleSourceMaskImpl(Mask, NumSrcElts);
2171
20.0k
}
2172
2173
3.63k
static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2174
3.63k
  if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2175
165
    return false;
2176
11.3k
  for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2177
8.87k
    if (Mask[i] == -1)
2178
1.31k
      continue;
2179
7.56k
    if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2180
1.03k
      return false;
2181
7.56k
  }
2182
2.43k
  return true;
2183
3.47k
}
2184
2185
109
bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask, int NumSrcElts) {
2186
109
  if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2187
0
    return false;
2188
  // We don't have vector operand size information, so assume operands are the
2189
  // same size as the mask.
2190
109
  return isIdentityMaskImpl(Mask, NumSrcElts);
2191
109
}
2192
2193
217
bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask, int NumSrcElts) {
2194
217
  if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2195
0
    return false;
2196
217
  if (!isSingleSourceMask(Mask, NumSrcElts))
2197
89
    return false;
2198
2199
  // The number of elements in the mask must be at least 2.
2200
128
  if (NumSrcElts < 2)
2201
0
    return false;
2202
2203
294
  for (int I = 0, E = Mask.size(); I < E; ++I) {
2204
256
    if (Mask[I] == -1)
2205
27
      continue;
2206
229
    if (Mask[I] != (NumSrcElts - 1 - I) &&
2207
229
        Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
2208
90
      return false;
2209
229
  }
2210
38
  return true;
2211
128
}
2212
2213
556
bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts) {
2214
556
  if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2215
0
    return false;
2216
556
  if (!isSingleSourceMask(Mask, NumSrcElts))
2217
229
    return false;
2218
1.50k
  for (int I = 0, E = Mask.size(); I < E; ++I) {
2219
1.28k
    if (Mask[I] == -1)
2220
448
      continue;
2221
839
    if (Mask[I] != 0 && Mask[I] != NumSrcElts)
2222
112
      return false;
2223
839
  }
2224
215
  return true;
2225
327
}
2226
2227
19.0k
bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask, int NumSrcElts) {
2228
19.0k
  if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2229
0
    return false;
2230
  // Select is differentiated from identity. It requires using both sources.
2231
19.0k
  if (isSingleSourceMask(Mask, NumSrcElts))
2232
6.93k
    return false;
2233
68.1k
  for (int I = 0, E = Mask.size(); I < E; ++I) {
2234
57.5k
    if (Mask[I] == -1)
2235
3.75k
      continue;
2236
53.8k
    if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2237
1.56k
      return false;
2238
53.8k
  }
2239
10.5k
  return true;
2240
12.1k
}
2241
2242
4
bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask, int NumSrcElts) {
2243
  // Example masks that will return true:
2244
  // v1 = <a, b, c, d>
2245
  // v2 = <e, f, g, h>
2246
  // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2247
  // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2248
2249
4
  if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2250
0
    return false;
2251
  // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2252
4
  int Sz = Mask.size();
2253
4
  if (Sz < 2 || !isPowerOf2_32(Sz))
2254
0
    return false;
2255
2256
  // 2. The first element of the mask must be either a 0 or a 1.
2257
4
  if (Mask[0] != 0 && Mask[0] != 1)
2258
0
    return false;
2259
2260
  // 3. The difference between the first 2 elements must be equal to the
2261
  // number of elements in the mask.
2262
4
  if ((Mask[1] - Mask[0]) != NumSrcElts)
2263
4
    return false;
2264
2265
  // 4. The difference between consecutive even-numbered and odd-numbered
2266
  // elements must be equal to 2.
2267
0
  for (int I = 2; I < Sz; ++I) {
2268
0
    int MaskEltVal = Mask[I];
2269
0
    if (MaskEltVal == -1)
2270
0
      return false;
2271
0
    int MaskEltPrevVal = Mask[I - 2];
2272
0
    if (MaskEltVal - MaskEltPrevVal != 2)
2273
0
      return false;
2274
0
  }
2275
0
  return true;
2276
0
}
2277
2278
bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,
2279
0
                                     int &Index) {
2280
0
  if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2281
0
    return false;
2282
  // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2283
0
  int StartIndex = -1;
2284
0
  for (int I = 0, E = Mask.size(); I != E; ++I) {
2285
0
    int MaskEltVal = Mask[I];
2286
0
    if (MaskEltVal == -1)
2287
0
      continue;
2288
2289
0
    if (StartIndex == -1) {
2290
      // Don't support a StartIndex that begins in the second input, or if the
2291
      // first non-undef index would access below the StartIndex.
2292
0
      if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2293
0
        return false;
2294
2295
0
      StartIndex = MaskEltVal - I;
2296
0
      continue;
2297
0
    }
2298
2299
    // Splice is sequential starting from StartIndex.
2300
0
    if (MaskEltVal != (StartIndex + I))
2301
0
      return false;
2302
0
  }
2303
2304
0
  if (StartIndex == -1)
2305
0
    return false;
2306
2307
  // NOTE: This accepts StartIndex == 0 (COPY).
2308
0
  Index = StartIndex;
2309
0
  return true;
2310
0
}
2311
2312
bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
2313
0
                                               int NumSrcElts, int &Index) {
2314
  // Must extract from a single source.
2315
0
  if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2316
0
    return false;
2317
2318
  // Must be smaller (else this is an Identity shuffle).
2319
0
  if (NumSrcElts <= (int)Mask.size())
2320
0
    return false;
2321
2322
  // Find start of extraction, accounting that we may start with an UNDEF.
2323
0
  int SubIndex = -1;
2324
0
  for (int i = 0, e = Mask.size(); i != e; ++i) {
2325
0
    int M = Mask[i];
2326
0
    if (M < 0)
2327
0
      continue;
2328
0
    int Offset = (M % NumSrcElts) - i;
2329
0
    if (0 <= SubIndex && SubIndex != Offset)
2330
0
      return false;
2331
0
    SubIndex = Offset;
2332
0
  }
2333
2334
0
  if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2335
0
    Index = SubIndex;
2336
0
    return true;
2337
0
  }
2338
0
  return false;
2339
0
}
2340
2341
bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
2342
                                              int NumSrcElts, int &NumSubElts,
2343
0
                                              int &Index) {
2344
0
  int NumMaskElts = Mask.size();
2345
2346
  // Don't try to match if we're shuffling to a smaller size.
2347
0
  if (NumMaskElts < NumSrcElts)
2348
0
    return false;
2349
2350
  // TODO: We don't recognize self-insertion/widening.
2351
0
  if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2352
0
    return false;
2353
2354
  // Determine which mask elements are attributed to which source.
2355
0
  APInt UndefElts = APInt::getZero(NumMaskElts);
2356
0
  APInt Src0Elts = APInt::getZero(NumMaskElts);
2357
0
  APInt Src1Elts = APInt::getZero(NumMaskElts);
2358
0
  bool Src0Identity = true;
2359
0
  bool Src1Identity = true;
2360
2361
0
  for (int i = 0; i != NumMaskElts; ++i) {
2362
0
    int M = Mask[i];
2363
0
    if (M < 0) {
2364
0
      UndefElts.setBit(i);
2365
0
      continue;
2366
0
    }
2367
0
    if (M < NumSrcElts) {
2368
0
      Src0Elts.setBit(i);
2369
0
      Src0Identity &= (M == i);
2370
0
      continue;
2371
0
    }
2372
0
    Src1Elts.setBit(i);
2373
0
    Src1Identity &= (M == (i + NumSrcElts));
2374
0
  }
2375
0
  assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2376
0
         "unknown shuffle elements");
2377
0
  assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2378
0
         "2-source shuffle not found");
2379
2380
  // Determine lo/hi span ranges.
2381
  // TODO: How should we handle undefs at the start of subvector insertions?
2382
0
  int Src0Lo = Src0Elts.countr_zero();
2383
0
  int Src1Lo = Src1Elts.countr_zero();
2384
0
  int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2385
0
  int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2386
2387
  // If src0 is in place, see if the src1 elements is inplace within its own
2388
  // span.
2389
0
  if (Src0Identity) {
2390
0
    int NumSub1Elts = Src1Hi - Src1Lo;
2391
0
    ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2392
0
    if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2393
0
      NumSubElts = NumSub1Elts;
2394
0
      Index = Src1Lo;
2395
0
      return true;
2396
0
    }
2397
0
  }
2398
2399
  // If src1 is in place, see if the src0 elements is inplace within its own
2400
  // span.
2401
0
  if (Src1Identity) {
2402
0
    int NumSub0Elts = Src0Hi - Src0Lo;
2403
0
    ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2404
0
    if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2405
0
      NumSubElts = NumSub0Elts;
2406
0
      Index = Src0Lo;
2407
0
      return true;
2408
0
    }
2409
0
  }
2410
2411
0
  return false;
2412
0
}
2413
2414
846
bool ShuffleVectorInst::isIdentityWithPadding() const {
2415
  // FIXME: Not currently possible to express a shuffle mask for a scalable
2416
  // vector for this case.
2417
846
  if (isa<ScalableVectorType>(getType()))
2418
14
    return false;
2419
2420
832
  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2421
832
  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2422
832
  if (NumMaskElts <= NumOpElts)
2423
495
    return false;
2424
2425
  // The first part of the mask must choose elements from exactly 1 source op.
2426
337
  ArrayRef<int> Mask = getShuffleMask();
2427
337
  if (!isIdentityMaskImpl(Mask, NumOpElts))
2428
47
    return false;
2429
2430
  // All extending must be with undef elements.
2431
968
  for (int i = NumOpElts; i < NumMaskElts; ++i)
2432
678
    if (Mask[i] != -1)
2433
0
      return false;
2434
2435
290
  return true;
2436
290
}
2437
2438
24.0k
bool ShuffleVectorInst::isIdentityWithExtract() const {
2439
  // FIXME: Not currently possible to express a shuffle mask for a scalable
2440
  // vector for this case.
2441
24.0k
  if (isa<ScalableVectorType>(getType()))
2442
14
    return false;
2443
2444
24.0k
  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2445
24.0k
  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2446
24.0k
  if (NumMaskElts >= NumOpElts)
2447
20.9k
    return false;
2448
2449
3.14k
  return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2450
24.0k
}
2451
2452
442
bool ShuffleVectorInst::isConcat() const {
2453
  // Vector concatenation is differentiated from identity with padding.
2454
442
  if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2455
97
    return false;
2456
2457
  // FIXME: Not currently possible to express a shuffle mask for a scalable
2458
  // vector for this case.
2459
345
  if (isa<ScalableVectorType>(getType()))
2460
0
    return false;
2461
2462
345
  int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2463
345
  int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2464
345
  if (NumMaskElts != NumOpElts * 2)
2465
304
    return false;
2466
2467
  // Use the mask length rather than the operands' vector lengths here. We
2468
  // already know that the shuffle returns a vector twice as long as the inputs,
2469
  // and neither of the inputs are undef vectors. If the mask picks consecutive
2470
  // elements from both inputs, then this is a concatenation of the inputs.
2471
41
  return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2472
345
}
2473
2474
static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
2475
0
                                        int ReplicationFactor, int VF) {
2476
0
  assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2477
0
         "Unexpected mask size.");
2478
2479
0
  for (int CurrElt : seq(VF)) {
2480
0
    ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2481
0
    assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2482
0
           "Run out of mask?");
2483
0
    Mask = Mask.drop_front(ReplicationFactor);
2484
0
    if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2485
0
          return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2486
0
        }))
2487
0
      return false;
2488
0
  }
2489
0
  assert(Mask.empty() && "Did not consume the whole mask?");
2490
2491
0
  return true;
2492
0
}
2493
2494
bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
2495
0
                                          int &ReplicationFactor, int &VF) {
2496
  // undef-less case is trivial.
2497
0
  if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2498
0
    ReplicationFactor =
2499
0
        Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2500
0
    if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2501
0
      return false;
2502
0
    VF = Mask.size() / ReplicationFactor;
2503
0
    return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2504
0
  }
2505
2506
  // However, if the mask contains undef's, we have to enumerate possible tuples
2507
  // and pick one. There are bounds on replication factor: [1, mask size]
2508
  // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2509
  // Additionally, mask size is a replication factor multiplied by vector size,
2510
  // which further significantly reduces the search space.
2511
2512
  // Before doing that, let's perform basic correctness checking first.
2513
0
  int Largest = -1;
2514
0
  for (int MaskElt : Mask) {
2515
0
    if (MaskElt == PoisonMaskElem)
2516
0
      continue;
2517
    // Elements must be in non-decreasing order.
2518
0
    if (MaskElt < Largest)
2519
0
      return false;
2520
0
    Largest = std::max(Largest, MaskElt);
2521
0
  }
2522
2523
  // Prefer larger replication factor if all else equal.
2524
0
  for (int PossibleReplicationFactor :
2525
0
       reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2526
0
    if (Mask.size() % PossibleReplicationFactor != 0)
2527
0
      continue;
2528
0
    int PossibleVF = Mask.size() / PossibleReplicationFactor;
2529
0
    if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2530
0
                                     PossibleVF))
2531
0
      continue;
2532
0
    ReplicationFactor = PossibleReplicationFactor;
2533
0
    VF = PossibleVF;
2534
0
    return true;
2535
0
  }
2536
2537
0
  return false;
2538
0
}
2539
2540
bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2541
0
                                          int &VF) const {
2542
  // Not possible to express a shuffle mask for a scalable vector for this
2543
  // case.
2544
0
  if (isa<ScalableVectorType>(getType()))
2545
0
    return false;
2546
2547
0
  VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2548
0
  if (ShuffleMask.size() % VF != 0)
2549
0
    return false;
2550
0
  ReplicationFactor = ShuffleMask.size() / VF;
2551
2552
0
  return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2553
0
}
2554
2555
0
bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
2556
0
  if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2557
0
      Mask.size() % VF != 0)
2558
0
    return false;
2559
0
  for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2560
0
    ArrayRef<int> SubMask = Mask.slice(K, VF);
2561
0
    if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2562
0
      continue;
2563
0
    SmallBitVector Used(VF, false);
2564
0
    for (int Idx : SubMask) {
2565
0
      if (Idx != PoisonMaskElem && Idx < VF)
2566
0
        Used.set(Idx);
2567
0
    }
2568
0
    if (!Used.all())
2569
0
      return false;
2570
0
  }
2571
0
  return true;
2572
0
}
2573
2574
/// Return true if this shuffle mask is a replication mask.
2575
0
bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
2576
  // Not possible to express a shuffle mask for a scalable vector for this
2577
  // case.
2578
0
  if (isa<ScalableVectorType>(getType()))
2579
0
    return false;
2580
0
  if (!isSingleSourceMask(ShuffleMask, VF))
2581
0
    return false;
2582
2583
0
  return isOneUseSingleSourceMask(ShuffleMask, VF);
2584
0
}
2585
2586
324
bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2587
324
  FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2588
  // shuffle_vector can only interleave fixed length vectors - for scalable
2589
  // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2590
324
  if (!OpTy)
2591
0
    return false;
2592
324
  unsigned OpNumElts = OpTy->getNumElements();
2593
2594
324
  return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2595
324
}
2596
2597
bool ShuffleVectorInst::isInterleaveMask(
2598
    ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2599
324
    SmallVectorImpl<unsigned> &StartIndexes) {
2600
324
  unsigned NumElts = Mask.size();
2601
324
  if (NumElts % Factor)
2602
3
    return false;
2603
2604
321
  unsigned LaneLen = NumElts / Factor;
2605
321
  if (!isPowerOf2_32(LaneLen))
2606
2
    return false;
2607
2608
319
  StartIndexes.resize(Factor);
2609
2610
  // Check whether each element matches the general interleaved rule.
2611
  // Ignore undef elements, as long as the defined elements match the rule.
2612
  // Outer loop processes all factors (x, y, z in the above example)
2613
319
  unsigned I = 0, J;
2614
953
  for (; I < Factor; I++) {
2615
637
    unsigned SavedLaneValue;
2616
637
    unsigned SavedNoUndefs = 0;
2617
2618
    // Inner loop processes consecutive accesses (x, x+1... in the example)
2619
1.35k
    for (J = 0; J < LaneLen - 1; J++) {
2620
      // Lane computes x's position in the Mask
2621
721
      unsigned Lane = J * Factor + I;
2622
721
      unsigned NextLane = Lane + Factor;
2623
721
      int LaneValue = Mask[Lane];
2624
721
      int NextLaneValue = Mask[NextLane];
2625
2626
      // If both are defined, values must be sequential
2627
721
      if (LaneValue >= 0 && NextLaneValue >= 0 &&
2628
721
          LaneValue + 1 != NextLaneValue)
2629
3
        break;
2630
2631
      // If the next value is undef, save the current one as reference
2632
718
      if (LaneValue >= 0 && NextLaneValue < 0) {
2633
0
        SavedLaneValue = LaneValue;
2634
0
        SavedNoUndefs = 1;
2635
0
      }
2636
2637
      // Undefs are allowed, but defined elements must still be consecutive:
2638
      // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2639
      // Verify this by storing the last non-undef followed by an undef
2640
      // Check that following non-undef masks are incremented with the
2641
      // corresponding distance.
2642
718
      if (SavedNoUndefs > 0 && LaneValue < 0) {
2643
0
        SavedNoUndefs++;
2644
0
        if (NextLaneValue >= 0 &&
2645
0
            SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2646
0
          break;
2647
0
      }
2648
718
    }
2649
2650
637
    if (J < LaneLen - 1)
2651
3
      return false;
2652
2653
634
    int StartMask = 0;
2654
634
    if (Mask[I] >= 0) {
2655
      // Check that the start of the I range (J=0) is greater than 0
2656
4
      StartMask = Mask[I];
2657
630
    } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2658
      // StartMask defined by the last value in lane
2659
0
      StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2660
630
    } else if (SavedNoUndefs > 0) {
2661
      // StartMask defined by some non-zero value in the j loop
2662
0
      StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2663
0
    }
2664
    // else StartMask remains set to 0, i.e. all elements are undefs
2665
2666
634
    if (StartMask < 0)
2667
0
      return false;
2668
    // We must stay within the vectors; This case can happen with undefs.
2669
634
    if (StartMask + LaneLen > NumInputElts)
2670
0
      return false;
2671
2672
634
    StartIndexes[I] = StartMask;
2673
634
  }
2674
2675
316
  return true;
2676
319
}
2677
2678
/// Try to lower a vector shuffle as a bit rotation.
2679
///
2680
/// Look for a repeated rotation pattern in each sub group.
2681
/// Returns an element-wise left bit rotation amount or -1 if failed.
2682
691
static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2683
691
  int NumElts = Mask.size();
2684
691
  assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2685
2686
0
  int RotateAmt = -1;
2687
853
  for (int i = 0; i != NumElts; i += NumSubElts) {
2688
1.91k
    for (int j = 0; j != NumSubElts; ++j) {
2689
1.75k
      int M = Mask[i + j];
2690
1.75k
      if (M < 0)
2691
419
        continue;
2692
1.33k
      if (M < i || M >= i + NumSubElts)
2693
312
        return -1;
2694
1.02k
      int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2695
1.02k
      if (0 <= RotateAmt && Offset != RotateAmt)
2696
371
        return -1;
2697
654
      RotateAmt = Offset;
2698
654
    }
2699
845
  }
2700
8
  return RotateAmt;
2701
691
}
2702
2703
bool ShuffleVectorInst::isBitRotateMask(
2704
    ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2705
294
    unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2706
977
  for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2707
691
    int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2708
691
    if (EltRotateAmt < 0)
2709
683
      continue;
2710
8
    RotateAmt = EltRotateAmt * EltSizeInBits;
2711
8
    return true;
2712
691
  }
2713
2714
286
  return false;
2715
294
}
2716
2717
//===----------------------------------------------------------------------===//
2718
//                             InsertValueInst Class
2719
//===----------------------------------------------------------------------===//
2720
2721
void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2722
176k
                           const Twine &Name) {
2723
176k
  assert(getNumOperands() == 2 && "NumOperands not initialized?");
2724
2725
  // There's no fundamental reason why we require at least one index
2726
  // (other than weirdness with &*IdxBegin being invalid; see
2727
  // getelementptr's init routine for example). But there's no
2728
  // present need to support it.
2729
0
  assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2730
2731
0
  assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
2732
176k
         Val->getType() && "Inserted value must match indexed type!");
2733
0
  Op<0>() = Agg;
2734
176k
  Op<1>() = Val;
2735
2736
176k
  Indices.append(Idxs.begin(), Idxs.end());
2737
176k
  setName(Name);
2738
176k
}
2739
2740
InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2741
  : Instruction(IVI.getType(), InsertValue,
2742
                OperandTraits<InsertValueInst>::op_begin(this), 2),
2743
1.17k
    Indices(IVI.Indices) {
2744
1.17k
  Op<0>() = IVI.getOperand(0);
2745
1.17k
  Op<1>() = IVI.getOperand(1);
2746
1.17k
  SubclassOptionalData = IVI.SubclassOptionalData;
2747
1.17k
}
2748
2749
//===----------------------------------------------------------------------===//
2750
//                             ExtractValueInst Class
2751
//===----------------------------------------------------------------------===//
2752
2753
183k
void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2754
183k
  assert(getNumOperands() == 1 && "NumOperands not initialized?");
2755
2756
  // There's no fundamental reason why we require at least one index.
2757
  // But there's no present need to support it.
2758
0
  assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2759
2760
0
  Indices.append(Idxs.begin(), Idxs.end());
2761
183k
  setName(Name);
2762
183k
}
2763
2764
ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2765
  : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2766
947
    Indices(EVI.Indices) {
2767
947
  SubclassOptionalData = EVI.SubclassOptionalData;
2768
947
}
2769
2770
// getIndexedType - Returns the type of the element that would be extracted
2771
// with an extractvalue instruction with the specified parameters.
2772
//
2773
// A null type is returned if the indices are invalid for the specified
2774
// pointer type.
2775
//
2776
Type *ExtractValueInst::getIndexedType(Type *Agg,
2777
448k
                                       ArrayRef<unsigned> Idxs) {
2778
455k
  for (unsigned Index : Idxs) {
2779
    // We can't use CompositeType::indexValid(Index) here.
2780
    // indexValid() always returns true for arrays because getelementptr allows
2781
    // out-of-bounds indices. Since we don't allow those for extractvalue and
2782
    // insertvalue we need to check array indexing manually.
2783
    // Since the only other types we can index into are struct types it's just
2784
    // as easy to check those manually as well.
2785
455k
    if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2786
352k
      if (Index >= AT->getNumElements())
2787
0
        return nullptr;
2788
352k
      Agg = AT->getElementType();
2789
352k
    } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2790
102k
      if (Index >= ST->getNumElements())
2791
0
        return nullptr;
2792
102k
      Agg = ST->getElementType(Index);
2793
102k
    } else {
2794
      // Not a valid type to index into.
2795
534
      return nullptr;
2796
534
    }
2797
455k
  }
2798
447k
  return const_cast<Type*>(Agg);
2799
448k
}
2800
2801
//===----------------------------------------------------------------------===//
2802
//                             UnaryOperator Class
2803
//===----------------------------------------------------------------------===//
2804
2805
UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
2806
                             Type *Ty, const Twine &Name,
2807
                             Instruction *InsertBefore)
2808
10.0k
  : UnaryInstruction(Ty, iType, S, InsertBefore) {
2809
10.0k
  Op<0>() = S;
2810
10.0k
  setName(Name);
2811
10.0k
  AssertOK();
2812
10.0k
}
2813
2814
UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
2815
                             Type *Ty, const Twine &Name,
2816
                             BasicBlock *InsertAtEnd)
2817
0
  : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2818
0
  Op<0>() = S;
2819
0
  setName(Name);
2820
0
  AssertOK();
2821
0
}
2822
2823
UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
2824
                                     const Twine &Name,
2825
10.0k
                                     Instruction *InsertBefore) {
2826
10.0k
  return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2827
10.0k
}
2828
2829
UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
2830
                                     const Twine &Name,
2831
27
                                     BasicBlock *InsertAtEnd) {
2832
27
  UnaryOperator *Res = Create(Op, S, Name);
2833
27
  Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2834
27
  return Res;
2835
27
}
2836
2837
10.0k
void UnaryOperator::AssertOK() {
2838
10.0k
  Value *LHS = getOperand(0);
2839
10.0k
  (void)LHS; // Silence warnings.
2840
10.0k
#ifndef NDEBUG
2841
10.0k
  switch (getOpcode()) {
2842
10.0k
  case FNeg:
2843
10.0k
    assert(getType() == LHS->getType() &&
2844
10.0k
           "Unary operation should return same type as operand!");
2845
0
    assert(getType()->isFPOrFPVectorTy() &&
2846
10.0k
           "Tried to create a floating-point operation on a "
2847
10.0k
           "non-floating-point type!");
2848
0
    break;
2849
0
  default: llvm_unreachable("Invalid opcode provided");
2850
10.0k
  }
2851
10.0k
#endif
2852
10.0k
}
2853
2854
//===----------------------------------------------------------------------===//
2855
//                             BinaryOperator Class
2856
//===----------------------------------------------------------------------===//
2857
2858
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2859
                               Type *Ty, const Twine &Name,
2860
                               Instruction *InsertBefore)
2861
  : Instruction(Ty, iType,
2862
                OperandTraits<BinaryOperator>::op_begin(this),
2863
                OperandTraits<BinaryOperator>::operands(this),
2864
3.79M
                InsertBefore) {
2865
3.79M
  Op<0>() = S1;
2866
3.79M
  Op<1>() = S2;
2867
3.79M
  setName(Name);
2868
3.79M
  AssertOK();
2869
3.79M
}
2870
2871
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2872
                               Type *Ty, const Twine &Name,
2873
                               BasicBlock *InsertAtEnd)
2874
  : Instruction(Ty, iType,
2875
                OperandTraits<BinaryOperator>::op_begin(this),
2876
                OperandTraits<BinaryOperator>::operands(this),
2877
0
                InsertAtEnd) {
2878
0
  Op<0>() = S1;
2879
0
  Op<1>() = S2;
2880
0
  setName(Name);
2881
0
  AssertOK();
2882
0
}
2883
2884
3.79M
void BinaryOperator::AssertOK() {
2885
3.79M
  Value *LHS = getOperand(0), *RHS = getOperand(1);
2886
3.79M
  (void)LHS; (void)RHS; // Silence warnings.
2887
3.79M
  assert(LHS->getType() == RHS->getType() &&
2888
3.79M
         "Binary operator operand types must match!");
2889
0
#ifndef NDEBUG
2890
0
  switch (getOpcode()) {
2891
1.00M
  case Add: case Sub:
2892
1.42M
  case Mul:
2893
1.42M
    assert(getType() == LHS->getType() &&
2894
1.42M
           "Arithmetic operation should return same type as operands!");
2895
0
    assert(getType()->isIntOrIntVectorTy() &&
2896
1.42M
           "Tried to create an integer operation on a non-integer type!");
2897
0
    break;
2898
106k
  case FAdd: case FSub:
2899
161k
  case FMul:
2900
161k
    assert(getType() == LHS->getType() &&
2901
161k
           "Arithmetic operation should return same type as operands!");
2902
0
    assert(getType()->isFPOrFPVectorTy() &&
2903
161k
           "Tried to create a floating-point operation on a "
2904
161k
           "non-floating-point type!");
2905
0
    break;
2906
165k
  case UDiv:
2907
329k
  case SDiv:
2908
329k
    assert(getType() == LHS->getType() &&
2909
329k
           "Arithmetic operation should return same type as operands!");
2910
0
    assert(getType()->isIntOrIntVectorTy() &&
2911
329k
           "Incorrect operand type (not integer) for S/UDIV");
2912
0
    break;
2913
47.3k
  case FDiv:
2914
47.3k
    assert(getType() == LHS->getType() &&
2915
47.3k
           "Arithmetic operation should return same type as operands!");
2916
0
    assert(getType()->isFPOrFPVectorTy() &&
2917
47.3k
           "Incorrect operand type (not floating point) for FDIV");
2918
0
    break;
2919
180k
  case URem:
2920
340k
  case SRem:
2921
340k
    assert(getType() == LHS->getType() &&
2922
340k
           "Arithmetic operation should return same type as operands!");
2923
0
    assert(getType()->isIntOrIntVectorTy() &&
2924
340k
           "Incorrect operand type (not integer) for S/UREM");
2925
0
    break;
2926
39.4k
  case FRem:
2927
39.4k
    assert(getType() == LHS->getType() &&
2928
39.4k
           "Arithmetic operation should return same type as operands!");
2929
0
    assert(getType()->isFPOrFPVectorTy() &&
2930
39.4k
           "Incorrect operand type (not floating point) for FREM");
2931
0
    break;
2932
272k
  case Shl:
2933
485k
  case LShr:
2934
655k
  case AShr:
2935
655k
    assert(getType() == LHS->getType() &&
2936
655k
           "Shift operation should return same type as operands!");
2937
0
    assert(getType()->isIntOrIntVectorTy() &&
2938
655k
           "Tried to create a shift operation on a non-integral type!");
2939
0
    break;
2940
599k
  case And: case Or:
2941
797k
  case Xor:
2942
797k
    assert(getType() == LHS->getType() &&
2943
797k
           "Logical operation should return same type as operands!");
2944
0
    assert(getType()->isIntOrIntVectorTy() &&
2945
797k
           "Tried to create a logical operation on a non-integral type!");
2946
0
    break;
2947
0
  default: llvm_unreachable("Invalid opcode provided");
2948
3.79M
  }
2949
3.79M
#endif
2950
3.79M
}
2951
2952
BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2953
                                       const Twine &Name,
2954
3.78M
                                       Instruction *InsertBefore) {
2955
3.78M
  assert(S1->getType() == S2->getType() &&
2956
3.78M
         "Cannot create binary operator with two operands of differing type!");
2957
0
  return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2958
3.78M
}
2959
2960
BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2961
                                       const Twine &Name,
2962
9.21k
                                       BasicBlock *InsertAtEnd) {
2963
9.21k
  BinaryOperator *Res = Create(Op, S1, S2, Name);
2964
9.21k
  Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2965
9.21k
  return Res;
2966
9.21k
}
2967
2968
BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2969
9.96k
                                          Instruction *InsertBefore) {
2970
9.96k
  Value *Zero = ConstantInt::get(Op->getType(), 0);
2971
9.96k
  return new BinaryOperator(Instruction::Sub,
2972
9.96k
                            Zero, Op,
2973
9.96k
                            Op->getType(), Name, InsertBefore);
2974
9.96k
}
2975
2976
BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2977
0
                                          BasicBlock *InsertAtEnd) {
2978
0
  Value *Zero = ConstantInt::get(Op->getType(), 0);
2979
0
  return new BinaryOperator(Instruction::Sub,
2980
0
                            Zero, Op,
2981
0
                            Op->getType(), Name, InsertAtEnd);
2982
0
}
2983
2984
BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2985
278
                                             Instruction *InsertBefore) {
2986
278
  Value *Zero = ConstantInt::get(Op->getType(), 0);
2987
278
  return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2988
278
}
2989
2990
BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2991
0
                                             BasicBlock *InsertAtEnd) {
2992
0
  Value *Zero = ConstantInt::get(Op->getType(), 0);
2993
0
  return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
2994
0
}
2995
2996
BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
2997
0
                                             Instruction *InsertBefore) {
2998
0
  Value *Zero = ConstantInt::get(Op->getType(), 0);
2999
0
  return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertBefore);
3000
0
}
3001
3002
BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
3003
0
                                             BasicBlock *InsertAtEnd) {
3004
0
  Value *Zero = ConstantInt::get(Op->getType(), 0);
3005
0
  return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertAtEnd);
3006
0
}
3007
3008
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
3009
2.93k
                                          Instruction *InsertBefore) {
3010
2.93k
  Constant *C = Constant::getAllOnesValue(Op->getType());
3011
2.93k
  return new BinaryOperator(Instruction::Xor, Op, C,
3012
2.93k
                            Op->getType(), Name, InsertBefore);
3013
2.93k
}
3014
3015
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
3016
0
                                          BasicBlock *InsertAtEnd) {
3017
0
  Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
3018
0
  return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3019
0
                            Op->getType(), Name, InsertAtEnd);
3020
0
}
3021
3022
// Exchange the two operands to this instruction. This instruction is safe to
3023
// use on any binary instruction and does not modify the semantics of the
3024
// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3025
// is changed.
3026
70.9k
bool BinaryOperator::swapOperands() {
3027
70.9k
  if (!isCommutative())
3028
0
    return true; // Can't commute operands
3029
70.9k
  Op<0>().swap(Op<1>());
3030
70.9k
  return false;
3031
70.9k
}
3032
3033
//===----------------------------------------------------------------------===//
3034
//                             FPMathOperator Class
3035
//===----------------------------------------------------------------------===//
3036
3037
0
float FPMathOperator::getFPAccuracy() const {
3038
0
  const MDNode *MD =
3039
0
      cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3040
0
  if (!MD)
3041
0
    return 0.0;
3042
0
  ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3043
0
  return Accuracy->getValueAPF().convertToFloat();
3044
0
}
3045
3046
//===----------------------------------------------------------------------===//
3047
//                                CastInst Class
3048
//===----------------------------------------------------------------------===//
3049
3050
// Just determine if this cast only deals with integral->integral conversion.
3051
0
bool CastInst::isIntegerCast() const {
3052
0
  switch (getOpcode()) {
3053
0
    default: return false;
3054
0
    case Instruction::ZExt:
3055
0
    case Instruction::SExt:
3056
0
    case Instruction::Trunc:
3057
0
      return true;
3058
0
    case Instruction::BitCast:
3059
0
      return getOperand(0)->getType()->isIntegerTy() &&
3060
0
        getType()->isIntegerTy();
3061
0
  }
3062
0
}
3063
3064
/// This function determines if the CastInst does not require any bits to be
3065
/// changed in order to effect the cast. Essentially, it identifies cases where
3066
/// no code gen is necessary for the cast, hence the name no-op cast.  For
3067
/// example, the following are all no-op casts:
3068
/// # bitcast i32* %x to i8*
3069
/// # bitcast <2 x i32> %x to <4 x i16>
3070
/// # ptrtoint i32* %x to i32     ; on 32-bit plaforms only
3071
/// Determine if the described cast is a no-op.
3072
bool CastInst::isNoopCast(Instruction::CastOps Opcode,
3073
                          Type *SrcTy,
3074
                          Type *DestTy,
3075
5.21k
                          const DataLayout &DL) {
3076
5.21k
  assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3077
0
  switch (Opcode) {
3078
0
    default: llvm_unreachable("Invalid CastOp");
3079
149
    case Instruction::Trunc:
3080
500
    case Instruction::ZExt:
3081
4.95k
    case Instruction::SExt:
3082
4.95k
    case Instruction::FPTrunc:
3083
5.01k
    case Instruction::FPExt:
3084
5.02k
    case Instruction::UIToFP:
3085
5.02k
    case Instruction::SIToFP:
3086
5.02k
    case Instruction::FPToUI:
3087
5.02k
    case Instruction::FPToSI:
3088
5.02k
    case Instruction::AddrSpaceCast:
3089
      // TODO: Target informations may give a more accurate answer here.
3090
5.02k
      return false;
3091
117
    case Instruction::BitCast:
3092
117
      return true;  // BitCast never modifies bits.
3093
31
    case Instruction::PtrToInt:
3094
31
      return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3095
31
             DestTy->getScalarSizeInBits();
3096
36
    case Instruction::IntToPtr:
3097
36
      return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3098
36
             SrcTy->getScalarSizeInBits();
3099
5.21k
  }
3100
5.21k
}
3101
3102
5.21k
bool CastInst::isNoopCast(const DataLayout &DL) const {
3103
5.21k
  return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3104
5.21k
}
3105
3106
/// This function determines if a pair of casts can be eliminated and what
3107
/// opcode should be used in the elimination. This assumes that there are two
3108
/// instructions like this:
3109
/// *  %F = firstOpcode SrcTy %x to MidTy
3110
/// *  %S = secondOpcode MidTy %F to DstTy
3111
/// The function returns a resultOpcode so these two casts can be replaced with:
3112
/// *  %Replacement = resultOpcode %SrcTy %x to DstTy
3113
/// If no such cast is permitted, the function returns 0.
3114
unsigned CastInst::isEliminableCastPair(
3115
  Instruction::CastOps firstOp, Instruction::CastOps secondOp,
3116
  Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3117
12.0k
  Type *DstIntPtrTy) {
3118
  // Define the 144 possibilities for these two cast instructions. The values
3119
  // in this matrix determine what to do in a given situation and select the
3120
  // case in the switch below.  The rows correspond to firstOp, the columns
3121
  // correspond to secondOp.  In looking at the table below, keep in mind
3122
  // the following cast properties:
3123
  //
3124
  //          Size Compare       Source               Destination
3125
  // Operator  Src ? Size   Type       Sign         Type       Sign
3126
  // -------- ------------ -------------------   ---------------------
3127
  // TRUNC         >       Integer      Any        Integral     Any
3128
  // ZEXT          <       Integral   Unsigned     Integer      Any
3129
  // SEXT          <       Integral    Signed      Integer      Any
3130
  // FPTOUI       n/a      FloatPt      n/a        Integral   Unsigned
3131
  // FPTOSI       n/a      FloatPt      n/a        Integral    Signed
3132
  // UITOFP       n/a      Integral   Unsigned     FloatPt      n/a
3133
  // SITOFP       n/a      Integral    Signed      FloatPt      n/a
3134
  // FPTRUNC       >       FloatPt      n/a        FloatPt      n/a
3135
  // FPEXT         <       FloatPt      n/a        FloatPt      n/a
3136
  // PTRTOINT     n/a      Pointer      n/a        Integral   Unsigned
3137
  // INTTOPTR     n/a      Integral   Unsigned     Pointer      n/a
3138
  // BITCAST       =       FirstClass   n/a       FirstClass    n/a
3139
  // ADDRSPCST    n/a      Pointer      n/a        Pointer      n/a
3140
  //
3141
  // NOTE: some transforms are safe, but we consider them to be non-profitable.
3142
  // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3143
  // into "fptoui double to i64", but this loses information about the range
3144
  // of the produced value (we no longer know the top-part is all zeros).
3145
  // Further this conversion is often much more expensive for typical hardware,
3146
  // and causes issues when building libgcc.  We disallow fptosi+sext for the
3147
  // same reason.
3148
12.0k
  const unsigned numCastOps =
3149
12.0k
    Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3150
12.0k
  static const uint8_t CastResults[numCastOps][numCastOps] = {
3151
    // T        F  F  U  S  F  F  P  I  B  A  -+
3152
    // R  Z  S  P  P  I  I  T  P  2  N  T  S   |
3153
    // U  E  E  2  2  2  2  R  E  I  T  C  C   +- secondOp
3154
    // N  X  X  U  S  F  F  N  X  N  2  V  V   |
3155
    // C  T  T  I  I  P  P  C  T  T  P  T  T  -+
3156
12.0k
    {  1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc         -+
3157
12.0k
    {  8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt           |
3158
12.0k
    {  8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt           |
3159
12.0k
    {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI         |
3160
12.0k
    {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI         |
3161
12.0k
    { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP         +- firstOp
3162
12.0k
    { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP         |
3163
12.0k
    { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc        |
3164
12.0k
    { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt          |
3165
12.0k
    {  1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt       |
3166
12.0k
    { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr       |
3167
12.0k
    {  5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast        |
3168
12.0k
    {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3169
12.0k
  };
3170
3171
  // TODO: This logic could be encoded into the table above and handled in the
3172
  // switch below.
3173
  // If either of the casts are a bitcast from scalar to vector, disallow the
3174
  // merging. However, any pair of bitcasts are allowed.
3175
12.0k
  bool IsFirstBitcast  = (firstOp == Instruction::BitCast);
3176
12.0k
  bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3177
12.0k
  bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3178
3179
  // Check if any of the casts convert scalars <-> vectors.
3180
12.0k
  if ((IsFirstBitcast  && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3181
12.0k
      (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3182
500
    if (!AreBothBitcasts)
3183
288
      return 0;
3184
3185
11.8k
  int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3186
11.8k
                            [secondOp-Instruction::CastOpsBegin];
3187
11.8k
  switch (ElimCase) {
3188
5.39k
    case 0:
3189
      // Categorically disallowed.
3190
5.39k
      return 0;
3191
1.40k
    case 1:
3192
      // Allowed, use first cast's opcode.
3193
1.40k
      return firstOp;
3194
374
    case 2:
3195
      // Allowed, use second cast's opcode.
3196
374
      return secondOp;
3197
446
    case 3:
3198
      // No-op cast in second op implies firstOp as long as the DestTy
3199
      // is integer and we are not converting between a vector and a
3200
      // non-vector type.
3201
446
      if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3202
0
        return firstOp;
3203
446
      return 0;
3204
491
    case 4:
3205
      // No-op cast in second op implies firstOp as long as the DestTy
3206
      // matches MidTy.
3207
491
      if (DstTy == MidTy)
3208
0
        return firstOp;
3209
491
      return 0;
3210
303
    case 5:
3211
      // No-op cast in first op implies secondOp as long as the SrcTy
3212
      // is an integer.
3213
303
      if (SrcTy->isIntegerTy())
3214
0
        return secondOp;
3215
303
      return 0;
3216
11
    case 6:
3217
      // No-op cast in first op implies secondOp as long as the SrcTy
3218
      // is a floating point.
3219
11
      if (SrcTy->isFloatingPointTy())
3220
0
        return secondOp;
3221
11
      return 0;
3222
228
    case 7: {
3223
      // Disable inttoptr/ptrtoint optimization if enabled.
3224
228
      if (DisableI2pP2iOpt)
3225
0
        return 0;
3226
3227
      // Cannot simplify if address spaces are different!
3228
228
      if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3229
29
        return 0;
3230
3231
199
      unsigned MidSize = MidTy->getScalarSizeInBits();
3232
      // We can still fold this without knowing the actual sizes as long we
3233
      // know that the intermediate pointer is the largest possible
3234
      // pointer size.
3235
      // FIXME: Is this always true?
3236
199
      if (MidSize == 64)
3237
88
        return Instruction::BitCast;
3238
3239
      // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3240
111
      if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3241
25
        return 0;
3242
86
      unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3243
86
      if (MidSize >= PtrSize)
3244
39
        return Instruction::BitCast;
3245
47
      return 0;
3246
86
    }
3247
2.05k
    case 8: {
3248
      // ext, trunc -> bitcast,    if the SrcTy and DstTy are the same
3249
      // ext, trunc -> ext,        if sizeof(SrcTy) < sizeof(DstTy)
3250
      // ext, trunc -> trunc,      if sizeof(SrcTy) > sizeof(DstTy)
3251
2.05k
      unsigned SrcSize = SrcTy->getScalarSizeInBits();
3252
2.05k
      unsigned DstSize = DstTy->getScalarSizeInBits();
3253
2.05k
      if (SrcTy == DstTy)
3254
1.87k
        return Instruction::BitCast;
3255
180
      if (SrcSize < DstSize)
3256
144
        return firstOp;
3257
36
      if (SrcSize > DstSize)
3258
36
        return secondOp;
3259
0
      return 0;
3260
36
    }
3261
208
    case 9:
3262
      // zext, sext -> zext, because sext can't sign extend after zext
3263
208
      return Instruction::ZExt;
3264
755
    case 11: {
3265
      // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3266
755
      if (!MidIntPtrTy)
3267
0
        return 0;
3268
755
      unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3269
755
      unsigned SrcSize = SrcTy->getScalarSizeInBits();
3270
755
      unsigned DstSize = DstTy->getScalarSizeInBits();
3271
755
      if (SrcSize <= PtrSize && SrcSize == DstSize)
3272
639
        return Instruction::BitCast;
3273
116
      return 0;
3274
755
    }
3275
123
    case 12:
3276
      // addrspacecast, addrspacecast -> bitcast,       if SrcAS == DstAS
3277
      // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3278
123
      if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3279
24
        return Instruction::AddrSpaceCast;
3280
99
      return Instruction::BitCast;
3281
0
    case 13:
3282
      // FIXME: this state can be merged with (1), but the following assert
3283
      // is useful to check the correcteness of the sequence due to semantic
3284
      // change of bitcast.
3285
0
      assert(
3286
0
        SrcTy->isPtrOrPtrVectorTy() &&
3287
0
        MidTy->isPtrOrPtrVectorTy() &&
3288
0
        DstTy->isPtrOrPtrVectorTy() &&
3289
0
        SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3290
0
        MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3291
0
        "Illegal addrspacecast, bitcast sequence!");
3292
      // Allowed, use first cast's opcode
3293
0
      return firstOp;
3294
0
    case 14:
3295
      // bitcast, addrspacecast -> addrspacecast
3296
0
      return Instruction::AddrSpaceCast;
3297
0
    case 15:
3298
      // FIXME: this state can be merged with (1), but the following assert
3299
      // is useful to check the correcteness of the sequence due to semantic
3300
      // change of bitcast.
3301
0
      assert(
3302
0
        SrcTy->isIntOrIntVectorTy() &&
3303
0
        MidTy->isPtrOrPtrVectorTy() &&
3304
0
        DstTy->isPtrOrPtrVectorTy() &&
3305
0
        MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3306
0
        "Illegal inttoptr, bitcast sequence!");
3307
      // Allowed, use first cast's opcode
3308
0
      return firstOp;
3309
0
    case 16:
3310
      // FIXME: this state can be merged with (2), but the following assert
3311
      // is useful to check the correcteness of the sequence due to semantic
3312
      // change of bitcast.
3313
0
      assert(
3314
0
        SrcTy->isPtrOrPtrVectorTy() &&
3315
0
        MidTy->isPtrOrPtrVectorTy() &&
3316
0
        DstTy->isIntOrIntVectorTy() &&
3317
0
        SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3318
0
        "Illegal bitcast, ptrtoint sequence!");
3319
      // Allowed, use second cast's opcode
3320
0
      return secondOp;
3321
10
    case 17:
3322
      // (sitofp (zext x)) -> (uitofp x)
3323
10
      return Instruction::UIToFP;
3324
0
    case 99:
3325
      // Cast combination can't happen (error in input). This is for all cases
3326
      // where the MidTy is not the same for the two cast instructions.
3327
0
      llvm_unreachable("Invalid Cast Combination");
3328
0
    default:
3329
0
      llvm_unreachable("Error in CastResults table!!!");
3330
11.8k
  }
3331
11.8k
}
3332
3333
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3334
375k
  const Twine &Name, Instruction *InsertBefore) {
3335
375k
  assert(castIsValid(op, S, Ty) && "Invalid cast!");
3336
  // Construct and return the appropriate CastInst subclass
3337
0
  switch (op) {
3338
133k
  case Trunc:         return new TruncInst         (S, Ty, Name, InsertBefore);
3339
28.0k
  case ZExt:          return new ZExtInst          (S, Ty, Name, InsertBefore);
3340
88.1k
  case SExt:          return new SExtInst          (S, Ty, Name, InsertBefore);
3341
8.08k
  case FPTrunc:       return new FPTruncInst       (S, Ty, Name, InsertBefore);
3342
17.2k
  case FPExt:         return new FPExtInst         (S, Ty, Name, InsertBefore);
3343
3.59k
  case UIToFP:        return new UIToFPInst        (S, Ty, Name, InsertBefore);
3344
15.9k
  case SIToFP:        return new SIToFPInst        (S, Ty, Name, InsertBefore);
3345
1.34k
  case FPToUI:        return new FPToUIInst        (S, Ty, Name, InsertBefore);
3346
2.91k
  case FPToSI:        return new FPToSIInst        (S, Ty, Name, InsertBefore);
3347
8.40k
  case PtrToInt:      return new PtrToIntInst      (S, Ty, Name, InsertBefore);
3348
2.98k
  case IntToPtr:      return new IntToPtrInst      (S, Ty, Name, InsertBefore);
3349
62.7k
  case BitCast:       return new BitCastInst       (S, Ty, Name, InsertBefore);
3350
1.57k
  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3351
0
  default: llvm_unreachable("Invalid opcode provided");
3352
375k
  }
3353
375k
}
3354
3355
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3356
526
  const Twine &Name, BasicBlock *InsertAtEnd) {
3357
526
  assert(castIsValid(op, S, Ty) && "Invalid cast!");
3358
  // Construct and return the appropriate CastInst subclass
3359
0
  switch (op) {
3360
0
  case Trunc:         return new TruncInst         (S, Ty, Name, InsertAtEnd);
3361
494
  case ZExt:          return new ZExtInst          (S, Ty, Name, InsertAtEnd);
3362
0
  case SExt:          return new SExtInst          (S, Ty, Name, InsertAtEnd);
3363
0
  case FPTrunc:       return new FPTruncInst       (S, Ty, Name, InsertAtEnd);
3364
0
  case FPExt:         return new FPExtInst         (S, Ty, Name, InsertAtEnd);
3365
26
  case UIToFP:        return new UIToFPInst        (S, Ty, Name, InsertAtEnd);
3366
6
  case SIToFP:        return new SIToFPInst        (S, Ty, Name, InsertAtEnd);
3367
0
  case FPToUI:        return new FPToUIInst        (S, Ty, Name, InsertAtEnd);
3368
0
  case FPToSI:        return new FPToSIInst        (S, Ty, Name, InsertAtEnd);
3369
0
  case PtrToInt:      return new PtrToIntInst      (S, Ty, Name, InsertAtEnd);
3370
0
  case IntToPtr:      return new IntToPtrInst      (S, Ty, Name, InsertAtEnd);
3371
0
  case BitCast:       return new BitCastInst       (S, Ty, Name, InsertAtEnd);
3372
0
  case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3373
0
  default: llvm_unreachable("Invalid opcode provided");
3374
526
  }
3375
526
}
3376
3377
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
3378
                                        const Twine &Name,
3379
1.01k
                                        Instruction *InsertBefore) {
3380
1.01k
  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3381
0
    return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3382
1.01k
  return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3383
1.01k
}
3384
3385
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
3386
                                        const Twine &Name,
3387
0
                                        BasicBlock *InsertAtEnd) {
3388
0
  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3389
0
    return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3390
0
  return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3391
0
}
3392
3393
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
3394
                                        const Twine &Name,
3395
0
                                        Instruction *InsertBefore) {
3396
0
  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3397
0
    return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3398
0
  return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3399
0
}
3400
3401
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
3402
                                        const Twine &Name,
3403
0
                                        BasicBlock *InsertAtEnd) {
3404
0
  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3405
0
    return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3406
0
  return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3407
0
}
3408
3409
CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
3410
                                         const Twine &Name,
3411
34
                                         Instruction *InsertBefore) {
3412
34
  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3413
0
    return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3414
34
  return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3415
34
}
3416
3417
CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
3418
                                         const Twine &Name,
3419
0
                                         BasicBlock *InsertAtEnd) {
3420
0
  if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3421
0
    return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3422
0
  return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3423
0
}
3424
3425
CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
3426
                                      const Twine &Name,
3427
0
                                      BasicBlock *InsertAtEnd) {
3428
0
  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3429
0
  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3430
0
         "Invalid cast");
3431
0
  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3432
0
  assert((!Ty->isVectorTy() ||
3433
0
          cast<VectorType>(Ty)->getElementCount() ==
3434
0
              cast<VectorType>(S->getType())->getElementCount()) &&
3435
0
         "Invalid cast");
3436
3437
0
  if (Ty->isIntOrIntVectorTy())
3438
0
    return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3439
3440
0
  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3441
0
}
3442
3443
/// Create a BitCast or a PtrToInt cast instruction
3444
CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
3445
                                      const Twine &Name,
3446
9
                                      Instruction *InsertBefore) {
3447
9
  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3448
0
  assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3449
9
         "Invalid cast");
3450
0
  assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3451
0
  assert((!Ty->isVectorTy() ||
3452
9
          cast<VectorType>(Ty)->getElementCount() ==
3453
9
              cast<VectorType>(S->getType())->getElementCount()) &&
3454
9
         "Invalid cast");
3455
3456
9
  if (Ty->isIntOrIntVectorTy())
3457
9
    return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3458
3459
0
  return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3460
9
}
3461
3462
CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3463
  Value *S, Type *Ty,
3464
  const Twine &Name,
3465
0
  BasicBlock *InsertAtEnd) {
3466
0
  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3467
0
  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3468
3469
0
  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3470
0
    return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3471
3472
0
  return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3473
0
}
3474
3475
CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3476
  Value *S, Type *Ty,
3477
  const Twine &Name,
3478
484
  Instruction *InsertBefore) {
3479
484
  assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3480
0
  assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3481
3482
484
  if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3483
484
    return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3484
3485
0
  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3486
484
}
3487
3488
CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3489
                                           const Twine &Name,
3490
409
                                           Instruction *InsertBefore) {
3491
409
  if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3492
47
    return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3493
362
  if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3494
307
    return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3495
3496
55
  return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3497
362
}
3498
3499
CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
3500
                                      bool isSigned, const Twine &Name,
3501
30.6k
                                      Instruction *InsertBefore) {
3502
30.6k
  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3503
30.6k
         "Invalid integer cast");
3504
0
  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3505
30.6k
  unsigned DstBits = Ty->getScalarSizeInBits();
3506
30.6k
  Instruction::CastOps opcode =
3507
30.6k
    (SrcBits == DstBits ? Instruction::BitCast :
3508
30.6k
     (SrcBits > DstBits ? Instruction::Trunc :
3509
30.6k
      (isSigned ? Instruction::SExt : Instruction::ZExt)));
3510
30.6k
  return Create(opcode, C, Ty, Name, InsertBefore);
3511
30.6k
}
3512
3513
CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
3514
                                      bool isSigned, const Twine &Name,
3515
0
                                      BasicBlock *InsertAtEnd) {
3516
0
  assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3517
0
         "Invalid cast");
3518
0
  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3519
0
  unsigned DstBits = Ty->getScalarSizeInBits();
3520
0
  Instruction::CastOps opcode =
3521
0
    (SrcBits == DstBits ? Instruction::BitCast :
3522
0
     (SrcBits > DstBits ? Instruction::Trunc :
3523
0
      (isSigned ? Instruction::SExt : Instruction::ZExt)));
3524
0
  return Create(opcode, C, Ty, Name, InsertAtEnd);
3525
0
}
3526
3527
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
3528
                                 const Twine &Name,
3529
41
                                 Instruction *InsertBefore) {
3530
41
  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3531
41
         "Invalid cast");
3532
0
  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3533
41
  unsigned DstBits = Ty->getScalarSizeInBits();
3534
41
  Instruction::CastOps opcode =
3535
41
    (SrcBits == DstBits ? Instruction::BitCast :
3536
41
     (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3537
41
  return Create(opcode, C, Ty, Name, InsertBefore);
3538
41
}
3539
3540
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
3541
                                 const Twine &Name,
3542
0
                                 BasicBlock *InsertAtEnd) {
3543
0
  assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3544
0
         "Invalid cast");
3545
0
  unsigned SrcBits = C->getType()->getScalarSizeInBits();
3546
0
  unsigned DstBits = Ty->getScalarSizeInBits();
3547
0
  Instruction::CastOps opcode =
3548
0
    (SrcBits == DstBits ? Instruction::BitCast :
3549
0
     (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3550
0
  return Create(opcode, C, Ty, Name, InsertAtEnd);
3551
0
}
3552
3553
13.0k
bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3554
13.0k
  if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3555
93
    return false;
3556
3557
12.9k
  if (SrcTy == DestTy)
3558
7.17k
    return true;
3559
3560
5.82k
  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3561
170
    if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3562
78
      if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3563
        // An element by element cast. Valid if casting the elements is valid.
3564
62
        SrcTy = SrcVecTy->getElementType();
3565
62
        DestTy = DestVecTy->getElementType();
3566
62
      }
3567
78
    }
3568
170
  }
3569
3570
5.82k
  if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3571
70
    if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3572
16
      return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3573
16
    }
3574
70
  }
3575
3576
5.80k
  TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3577
5.80k
  TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3578
3579
  // Could still have vectors of pointers if the number of elements doesn't
3580
  // match
3581
5.80k
  if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3582
184
    return false;
3583
3584
5.62k
  if (SrcBits != DestBits)
3585
5.24k
    return false;
3586
3587
379
  if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3588
0
    return false;
3589
3590
379
  return true;
3591
379
}
3592
3593
bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
3594
13.3k
                                          const DataLayout &DL) {
3595
  // ptrtoint and inttoptr are not allowed on non-integral pointers
3596
13.3k
  if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3597
279
    if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3598
119
      return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3599
119
              !DL.isNonIntegralPointerType(PtrTy));
3600
13.2k
  if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3601
316
    if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3602
126
      return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3603
126
              !DL.isNonIntegralPointerType(PtrTy));
3604
3605
13.0k
  return isBitCastable(SrcTy, DestTy);
3606
13.2k
}
3607
3608
// Provide a way to get a "cast" where the cast opcode is inferred from the
3609
// types and size of the operand. This, basically, is a parallel of the
3610
// logic in the castIsValid function below.  This axiom should hold:
3611
//   castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3612
// should not assert in castIsValid. In other words, this produces a "correct"
3613
// casting opcode for the arguments passed to it.
3614
Instruction::CastOps
3615
CastInst::getCastOpcode(
3616
312k
  const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3617
312k
  Type *SrcTy = Src->getType();
3618
3619
312k
  assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3620
312k
         "Only first class types are castable!");
3621
3622
312k
  if (SrcTy == DestTy)
3623
266k
    return BitCast;
3624
3625
  // FIXME: Check address space sizes here
3626
45.7k
  if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3627
253
    if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3628
253
      if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3629
        // An element by element cast.  Find the appropriate opcode based on the
3630
        // element types.
3631
253
        SrcTy = SrcVecTy->getElementType();
3632
253
        DestTy = DestVecTy->getElementType();
3633
253
      }
3634
3635
  // Get the bit sizes, we'll need these
3636
45.7k
  unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3637
45.7k
  unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3638
3639
  // Run through the possibilities ...
3640
45.7k
  if (DestTy->isIntegerTy()) {                      // Casting to integral
3641
45.6k
    if (SrcTy->isIntegerTy()) {                     // Casting from integral
3642
45.6k
      if (DestBits < SrcBits)
3643
6.61k
        return Trunc;                               // int -> smaller int
3644
39.0k
      else if (DestBits > SrcBits) {                // its an extension
3645
39.0k
        if (SrcIsSigned)
3646
39.0k
          return SExt;                              // signed -> SEXT
3647
3
        else
3648
3
          return ZExt;                              // unsigned -> ZEXT
3649
39.0k
      } else {
3650
0
        return BitCast;                             // Same size, No-op cast
3651
0
      }
3652
45.6k
    } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3653
0
      if (DestIsSigned)
3654
0
        return FPToSI;                              // FP -> sint
3655
0
      else
3656
0
        return FPToUI;                              // FP -> uint
3657
0
    } else if (SrcTy->isVectorTy()) {
3658
0
      assert(DestBits == SrcBits &&
3659
0
             "Casting vector to integer of different width");
3660
0
      return BitCast;                             // Same size, no-op cast
3661
0
    } else {
3662
0
      assert(SrcTy->isPointerTy() &&
3663
0
             "Casting from a value that is not first-class type");
3664
0
      return PtrToInt;                              // ptr -> int
3665
0
    }
3666
45.6k
  } else if (DestTy->isFloatingPointTy()) {         // Casting to floating pt
3667
0
    if (SrcTy->isIntegerTy()) {                     // Casting from integral
3668
0
      if (SrcIsSigned)
3669
0
        return SIToFP;                              // sint -> FP
3670
0
      else
3671
0
        return UIToFP;                              // uint -> FP
3672
0
    } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3673
0
      if (DestBits < SrcBits) {
3674
0
        return FPTrunc;                             // FP -> smaller FP
3675
0
      } else if (DestBits > SrcBits) {
3676
0
        return FPExt;                               // FP -> larger FP
3677
0
      } else  {
3678
0
        return BitCast;                             // same size, no-op cast
3679
0
      }
3680
0
    } else if (SrcTy->isVectorTy()) {
3681
0
      assert(DestBits == SrcBits &&
3682
0
             "Casting vector to floating point of different width");
3683
0
      return BitCast;                             // same size, no-op cast
3684
0
    }
3685
0
    llvm_unreachable("Casting pointer or non-first class to float");
3686
28
  } else if (DestTy->isVectorTy()) {
3687
0
    assert(DestBits == SrcBits &&
3688
0
           "Illegal cast to vector (wrong type or size)");
3689
0
    return BitCast;
3690
28
  } else if (DestTy->isPointerTy()) {
3691
28
    if (SrcTy->isPointerTy()) {
3692
0
      if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3693
0
        return AddrSpaceCast;
3694
0
      return BitCast;                               // ptr -> ptr
3695
28
    } else if (SrcTy->isIntegerTy()) {
3696
28
      return IntToPtr;                              // int -> ptr
3697
28
    }
3698
0
    llvm_unreachable("Casting pointer to other than pointer or int");
3699
0
  } else if (DestTy->isX86_MMXTy()) {
3700
0
    if (SrcTy->isVectorTy()) {
3701
0
      assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3702
0
      return BitCast;                               // 64-bit vector to MMX
3703
0
    }
3704
0
    llvm_unreachable("Illegal cast to X86_MMX");
3705
0
  }
3706
0
  llvm_unreachable("Casting to type that is not first-class");
3707
0
}
3708
3709
//===----------------------------------------------------------------------===//
3710
//                    CastInst SubClass Constructors
3711
//===----------------------------------------------------------------------===//
3712
3713
/// Check that the construction parameters for a CastInst are correct. This
3714
/// could be broken out into the separate constructors but it is useful to have
3715
/// it in one place and to eliminate the redundant code for getting the sizes
3716
/// of the types involved.
3717
bool
3718
1.24M
CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
3719
1.24M
  if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3720
1.24M
      SrcTy->isAggregateType() || DstTy->isAggregateType())
3721
368
    return false;
3722
3723
  // Get the size of the types in bits, and whether we are dealing
3724
  // with vector types, we'll need this later.
3725
1.24M
  bool SrcIsVec = isa<VectorType>(SrcTy);
3726
1.24M
  bool DstIsVec = isa<VectorType>(DstTy);
3727
1.24M
  unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3728
1.24M
  unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3729
3730
  // If these are vector types, get the lengths of the vectors (using zero for
3731
  // scalar types means that checking that vector lengths match also checks that
3732
  // scalars are not being converted to vectors or vectors to scalars).
3733
1.24M
  ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3734
1.24M
                                : ElementCount::getFixed(0);
3735
1.24M
  ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3736
1.24M
                                : ElementCount::getFixed(0);
3737
3738
  // Switch on the opcode provided
3739
1.24M
  switch (op) {
3740
0
  default: return false; // This is an input error
3741
365k
  case Instruction::Trunc:
3742
365k
    return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3743
365k
           SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3744
146k
  case Instruction::ZExt:
3745
146k
    return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3746
146k
           SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3747
197k
  case Instruction::SExt:
3748
197k
    return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3749
197k
           SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3750
23.8k
  case Instruction::FPTrunc:
3751
23.8k
    return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3752
23.8k
           SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3753
51.6k
  case Instruction::FPExt:
3754
51.6k
    return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3755
51.6k
           SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3756
10.7k
  case Instruction::UIToFP:
3757
57.2k
  case Instruction::SIToFP:
3758
57.2k
    return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3759
57.2k
           SrcEC == DstEC;
3760
4.00k
  case Instruction::FPToUI:
3761
12.6k
  case Instruction::FPToSI:
3762
12.6k
    return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3763
12.6k
           SrcEC == DstEC;
3764
34.4k
  case Instruction::PtrToInt:
3765
34.4k
    if (SrcEC != DstEC)
3766
5
      return false;
3767
34.4k
    return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3768
18.6k
  case Instruction::IntToPtr:
3769
18.6k
    if (SrcEC != DstEC)
3770
4
      return false;
3771
18.6k
    return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3772
311k
  case Instruction::BitCast: {
3773
311k
    PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3774
311k
    PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3775
3776
    // BitCast implies a no-op cast of type only. No bits change.
3777
    // However, you can't cast pointers to anything but pointers.
3778
311k
    if (!SrcPtrTy != !DstPtrTy)
3779
29
      return false;
3780
3781
    // For non-pointer cases, the cast is okay if the source and destination bit
3782
    // widths are identical.
3783
311k
    if (!SrcPtrTy)
3784
234k
      return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3785
3786
    // If both are pointers then the address spaces must match.
3787
76.8k
    if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3788
4
      return false;
3789
3790
    // A vector of pointers must have the same number of elements.
3791
76.8k
    if (SrcIsVec && DstIsVec)
3792
35
      return SrcEC == DstEC;
3793
76.8k
    if (SrcIsVec)
3794
25
      return SrcEC == ElementCount::getFixed(1);
3795
76.8k
    if (DstIsVec)
3796
0
      return DstEC == ElementCount::getFixed(1);
3797
3798
76.8k
    return true;
3799
76.8k
  }
3800
21.0k
  case Instruction::AddrSpaceCast: {
3801
21.0k
    PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3802
21.0k
    if (!SrcPtrTy)
3803
13
      return false;
3804
3805
21.0k
    PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3806
21.0k
    if (!DstPtrTy)
3807
3
      return false;
3808
3809
21.0k
    if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3810
2
      return false;
3811
3812
21.0k
    return SrcEC == DstEC;
3813
21.0k
  }
3814
1.24M
  }
3815
1.24M
}
3816
3817
TruncInst::TruncInst(
3818
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3819
135k
) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3820
135k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3821
135k
}
3822
3823
TruncInst::TruncInst(
3824
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3825
0
) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3826
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3827
0
}
3828
3829
ZExtInst::ZExtInst(
3830
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3831
92.7k
)  : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3832
92.7k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3833
92.7k
}
3834
3835
ZExtInst::ZExtInst(
3836
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3837
494
)  : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3838
494
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3839
494
}
3840
SExtInst::SExtInst(
3841
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3842
89.6k
) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3843
89.6k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3844
89.6k
}
3845
3846
SExtInst::SExtInst(
3847
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3848
0
)  : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3849
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3850
0
}
3851
3852
FPTruncInst::FPTruncInst(
3853
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3854
8.09k
) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3855
8.09k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3856
8.09k
}
3857
3858
FPTruncInst::FPTruncInst(
3859
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3860
0
) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3861
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3862
0
}
3863
3864
FPExtInst::FPExtInst(
3865
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3866
17.3k
) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3867
17.3k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3868
17.3k
}
3869
3870
FPExtInst::FPExtInst(
3871
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3872
0
) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3873
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3874
0
}
3875
3876
UIToFPInst::UIToFPInst(
3877
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3878
3.59k
) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3879
3.59k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3880
3.59k
}
3881
3882
UIToFPInst::UIToFPInst(
3883
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3884
26
) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3885
26
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3886
26
}
3887
3888
SIToFPInst::SIToFPInst(
3889
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3890
15.9k
) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3891
15.9k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3892
15.9k
}
3893
3894
SIToFPInst::SIToFPInst(
3895
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3896
6
) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3897
6
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3898
6
}
3899
3900
FPToUIInst::FPToUIInst(
3901
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3902
1.34k
) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3903
1.34k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3904
1.34k
}
3905
3906
FPToUIInst::FPToUIInst(
3907
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3908
0
) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3909
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3910
0
}
3911
3912
FPToSIInst::FPToSIInst(
3913
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3914
2.93k
) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3915
2.93k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3916
2.93k
}
3917
3918
FPToSIInst::FPToSIInst(
3919
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3920
0
) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3921
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3922
0
}
3923
3924
PtrToIntInst::PtrToIntInst(
3925
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3926
8.66k
) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3927
8.66k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3928
8.66k
}
3929
3930
PtrToIntInst::PtrToIntInst(
3931
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3932
0
) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3933
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3934
0
}
3935
3936
IntToPtrInst::IntToPtrInst(
3937
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3938
3.18k
) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3939
3.18k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3940
3.18k
}
3941
3942
IntToPtrInst::IntToPtrInst(
3943
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3944
0
) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3945
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3946
0
}
3947
3948
BitCastInst::BitCastInst(
3949
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3950
70.5k
) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3951
70.5k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3952
70.5k
}
3953
3954
BitCastInst::BitCastInst(
3955
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3956
0
) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3957
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3958
0
}
3959
3960
AddrSpaceCastInst::AddrSpaceCastInst(
3961
  Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3962
3.34k
) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3963
3.34k
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3964
3.34k
}
3965
3966
AddrSpaceCastInst::AddrSpaceCastInst(
3967
  Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3968
0
) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3969
0
  assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3970
0
}
3971
3972
//===----------------------------------------------------------------------===//
3973
//                               CmpInst Classes
3974
//===----------------------------------------------------------------------===//
3975
3976
CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3977
                 Value *RHS, const Twine &Name, Instruction *InsertBefore,
3978
                 Instruction *FlagsSource)
3979
  : Instruction(ty, op,
3980
                OperandTraits<CmpInst>::op_begin(this),
3981
                OperandTraits<CmpInst>::operands(this),
3982
1.91M
                InsertBefore) {
3983
1.91M
  Op<0>() = LHS;
3984
1.91M
  Op<1>() = RHS;
3985
1.91M
  setPredicate((Predicate)predicate);
3986
1.91M
  setName(Name);
3987
1.91M
  if (FlagsSource)
3988
280
    copyIRFlags(FlagsSource);
3989
1.91M
}
3990
3991
CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3992
                 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
3993
  : Instruction(ty, op,
3994
                OperandTraits<CmpInst>::op_begin(this),
3995
                OperandTraits<CmpInst>::operands(this),
3996
236
                InsertAtEnd) {
3997
236
  Op<0>() = LHS;
3998
236
  Op<1>() = RHS;
3999
236
  setPredicate((Predicate)predicate);
4000
236
  setName(Name);
4001
236
}
4002
4003
CmpInst *
4004
CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
4005
32.8k
                const Twine &Name, Instruction *InsertBefore) {
4006
32.8k
  if (Op == Instruction::ICmp) {
4007
24.0k
    if (InsertBefore)
4008
0
      return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4009
0
                          S1, S2, Name);
4010
24.0k
    else
4011
24.0k
      return new ICmpInst(CmpInst::Predicate(predicate),
4012
24.0k
                          S1, S2, Name);
4013
24.0k
  }
4014
4015
8.78k
  if (InsertBefore)
4016
0
    return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4017
0
                        S1, S2, Name);
4018
8.78k
  else
4019
8.78k
    return new FCmpInst(CmpInst::Predicate(predicate),
4020
8.78k
                        S1, S2, Name);
4021
8.78k
}
4022
4023
CmpInst *
4024
CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
4025
236
                const Twine &Name, BasicBlock *InsertAtEnd) {
4026
236
  if (Op == Instruction::ICmp) {
4027
230
    return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4028
230
                        S1, S2, Name);
4029
230
  }
4030
6
  return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4031
6
                      S1, S2, Name);
4032
236
}
4033
4034
1.39k
void CmpInst::swapOperands() {
4035
1.39k
  if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4036
1.39k
    IC->swapOperands();
4037
0
  else
4038
0
    cast<FCmpInst>(this)->swapOperands();
4039
1.39k
}
4040
4041
0
bool CmpInst::isCommutative() const {
4042
0
  if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4043
0
    return IC->isCommutative();
4044
0
  return cast<FCmpInst>(this)->isCommutative();
4045
0
}
4046
4047
898k
bool CmpInst::isEquality(Predicate P) {
4048
898k
  if (ICmpInst::isIntPredicate(P))
4049
887k
    return ICmpInst::isEquality(P);
4050
10.9k
  if (FCmpInst::isFPPredicate(P))
4051
10.9k
    return FCmpInst::isEquality(P);
4052
0
  llvm_unreachable("Unsupported predicate kind");
4053
0
}
4054
4055
6.35M
CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
4056
6.35M
  switch (pred) {
4057
0
    default: llvm_unreachable("Unknown cmp predicate!");
4058
417k
    case ICMP_EQ: return ICMP_NE;
4059
638k
    case ICMP_NE: return ICMP_EQ;
4060
332k
    case ICMP_UGT: return ICMP_ULE;
4061
1.21M
    case ICMP_ULT: return ICMP_UGE;
4062
446k
    case ICMP_UGE: return ICMP_ULT;
4063
576k
    case ICMP_ULE: return ICMP_UGT;
4064
705k
    case ICMP_SGT: return ICMP_SLE;
4065
846k
    case ICMP_SLT: return ICMP_SGE;
4066
742k
    case ICMP_SGE: return ICMP_SLT;
4067
416k
    case ICMP_SLE: return ICMP_SGT;
4068
4069
429
    case FCMP_OEQ: return FCMP_UNE;
4070
625
    case FCMP_ONE: return FCMP_UEQ;
4071
1.56k
    case FCMP_OGT: return FCMP_ULE;
4072
1.51k
    case FCMP_OLT: return FCMP_UGE;
4073
938
    case FCMP_OGE: return FCMP_ULT;
4074
1.17k
    case FCMP_OLE: return FCMP_UGT;
4075
1.31k
    case FCMP_UEQ: return FCMP_ONE;
4076
1.31k
    case FCMP_UNE: return FCMP_OEQ;
4077
1.83k
    case FCMP_UGT: return FCMP_OLE;
4078
2.82k
    case FCMP_ULT: return FCMP_OGE;
4079
885
    case FCMP_UGE: return FCMP_OLT;
4080
970
    case FCMP_ULE: return FCMP_OGT;
4081
165
    case FCMP_ORD: return FCMP_UNO;
4082
227
    case FCMP_UNO: return FCMP_ORD;
4083
6
    case FCMP_TRUE: return FCMP_FALSE;
4084
8
    case FCMP_FALSE: return FCMP_TRUE;
4085
6.35M
  }
4086
6.35M
}
4087
4088
1.01k
StringRef CmpInst::getPredicateName(Predicate Pred) {
4089
1.01k
  switch (Pred) {
4090
0
  default:                   return "unknown";
4091
0
  case FCmpInst::FCMP_FALSE: return "false";
4092
12
  case FCmpInst::FCMP_OEQ:   return "oeq";
4093
20
  case FCmpInst::FCMP_OGT:   return "ogt";
4094
2
  case FCmpInst::FCMP_OGE:   return "oge";
4095
3
  case FCmpInst::FCMP_OLT:   return "olt";
4096
56
  case FCmpInst::FCMP_OLE:   return "ole";
4097
1
  case FCmpInst::FCMP_ONE:   return "one";
4098
2
  case FCmpInst::FCMP_ORD:   return "ord";
4099
40
  case FCmpInst::FCMP_UNO:   return "uno";
4100
7
  case FCmpInst::FCMP_UEQ:   return "ueq";
4101
7
  case FCmpInst::FCMP_UGT:   return "ugt";
4102
9
  case FCmpInst::FCMP_UGE:   return "uge";
4103
2
  case FCmpInst::FCMP_ULT:   return "ult";
4104
11
  case FCmpInst::FCMP_ULE:   return "ule";
4105
30
  case FCmpInst::FCMP_UNE:   return "une";
4106
56
  case FCmpInst::FCMP_TRUE:  return "true";
4107
100
  case ICmpInst::ICMP_EQ:    return "eq";
4108
78
  case ICmpInst::ICMP_NE:    return "ne";
4109
54
  case ICmpInst::ICMP_SGT:   return "sgt";
4110
156
  case ICmpInst::ICMP_SGE:   return "sge";
4111
70
  case ICmpInst::ICMP_SLT:   return "slt";
4112
38
  case ICmpInst::ICMP_SLE:   return "sle";
4113
23
  case ICmpInst::ICMP_UGT:   return "ugt";
4114
148
  case ICmpInst::ICMP_UGE:   return "uge";
4115
41
  case ICmpInst::ICMP_ULT:   return "ult";
4116
53
  case ICmpInst::ICMP_ULE:   return "ule";
4117
1.01k
  }
4118
1.01k
}
4119
4120
1.01k
raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {
4121
1.01k
  OS << CmpInst::getPredicateName(Pred);
4122
1.01k
  return OS;
4123
1.01k
}
4124
4125
13.1k
ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
4126
13.1k
  switch (pred) {
4127
0
    default: llvm_unreachable("Unknown icmp predicate!");
4128
309
    case ICMP_EQ: case ICMP_NE:
4129
313
    case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4130
313
       return pred;
4131
3.01k
    case ICMP_UGT: return ICMP_SGT;
4132
9.07k
    case ICMP_ULT: return ICMP_SLT;
4133
335
    case ICMP_UGE: return ICMP_SGE;
4134
414
    case ICMP_ULE: return ICMP_SLE;
4135
13.1k
  }
4136
13.1k
}
4137
4138
39.0k
ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
4139
39.0k
  switch (pred) {
4140
0
    default: llvm_unreachable("Unknown icmp predicate!");
4141
15.6k
    case ICMP_EQ: case ICMP_NE:
4142
27.7k
    case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4143
27.7k
       return pred;
4144
3.10k
    case ICMP_SGT: return ICMP_UGT;
4145
3.51k
    case ICMP_SLT: return ICMP_ULT;
4146
2.44k
    case ICMP_SGE: return ICMP_UGE;
4147
2.28k
    case ICMP_SLE: return ICMP_ULE;
4148
39.0k
  }
4149
39.0k
}
4150
4151
2.39M
CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
4152
2.39M
  switch (pred) {
4153
0
    default: llvm_unreachable("Unknown cmp predicate!");
4154
494k
    case ICMP_EQ: case ICMP_NE:
4155
494k
      return pred;
4156
293k
    case ICMP_SGT: return ICMP_SLT;
4157
314k
    case ICMP_SLT: return ICMP_SGT;
4158
111k
    case ICMP_SGE: return ICMP_SLE;
4159
124k
    case ICMP_SLE: return ICMP_SGE;
4160
196k
    case ICMP_UGT: return ICMP_ULT;
4161
507k
    case ICMP_ULT: return ICMP_UGT;
4162
117k
    case ICMP_UGE: return ICMP_ULE;
4163
93.1k
    case ICMP_ULE: return ICMP_UGE;
4164
4165
673
    case FCMP_FALSE: case FCMP_TRUE:
4166
20.4k
    case FCMP_OEQ: case FCMP_ONE:
4167
38.3k
    case FCMP_UEQ: case FCMP_UNE:
4168
64.7k
    case FCMP_ORD: case FCMP_UNO:
4169
64.7k
      return pred;
4170
8.58k
    case FCMP_OGT: return FCMP_OLT;
4171
8.91k
    case FCMP_OLT: return FCMP_OGT;
4172
12.1k
    case FCMP_OGE: return FCMP_OLE;
4173
11.4k
    case FCMP_OLE: return FCMP_OGE;
4174
9.68k
    case FCMP_UGT: return FCMP_ULT;
4175
9.09k
    case FCMP_ULT: return FCMP_UGT;
4176
8.72k
    case FCMP_UGE: return FCMP_ULE;
4177
6.28k
    case FCMP_ULE: return FCMP_UGE;
4178
2.39M
  }
4179
2.39M
}
4180
4181
6.29k
bool CmpInst::isNonStrictPredicate(Predicate pred) {
4182
6.29k
  switch (pred) {
4183
1.36k
  case ICMP_SGE:
4184
3.04k
  case ICMP_SLE:
4185
4.76k
  case ICMP_UGE:
4186
6.29k
  case ICMP_ULE:
4187
6.29k
  case FCMP_OGE:
4188
6.29k
  case FCMP_OLE:
4189
6.29k
  case FCMP_UGE:
4190
6.29k
  case FCMP_ULE:
4191
6.29k
    return true;
4192
0
  default:
4193
0
    return false;
4194
6.29k
  }
4195
6.29k
}
4196
4197
10.7k
bool CmpInst::isStrictPredicate(Predicate pred) {
4198
10.7k
  switch (pred) {
4199
556
  case ICMP_SGT:
4200
2.07k
  case ICMP_SLT:
4201
2.20k
  case ICMP_UGT:
4202
4.44k
  case ICMP_ULT:
4203
4.44k
  case FCMP_OGT:
4204
4.44k
  case FCMP_OLT:
4205
4.44k
  case FCMP_UGT:
4206
4.44k
  case FCMP_ULT:
4207
4.44k
    return true;
4208
6.29k
  default:
4209
6.29k
    return false;
4210
10.7k
  }
4211
10.7k
}
4212
4213
8.62k
CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
4214
8.62k
  switch (pred) {
4215
1.38k
  case ICMP_SGE:
4216
1.38k
    return ICMP_SGT;
4217
1.69k
  case ICMP_SLE:
4218
1.69k
    return ICMP_SLT;
4219
1.75k
  case ICMP_UGE:
4220
1.75k
    return ICMP_UGT;
4221
1.56k
  case ICMP_ULE:
4222
1.56k
    return ICMP_ULT;
4223
0
  case FCMP_OGE:
4224
0
    return FCMP_OGT;
4225
0
  case FCMP_OLE:
4226
0
    return FCMP_OLT;
4227
0
  case FCMP_UGE:
4228
0
    return FCMP_UGT;
4229
0
  case FCMP_ULE:
4230
0
    return FCMP_ULT;
4231
2.22k
  default:
4232
2.22k
    return pred;
4233
8.62k
  }
4234
8.62k
}
4235
4236
202k
CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
4237
202k
  switch (pred) {
4238
32.6k
  case ICMP_SGT:
4239
32.6k
    return ICMP_SGE;
4240
35.8k
  case ICMP_SLT:
4241
35.8k
    return ICMP_SLE;
4242
23.6k
  case ICMP_UGT:
4243
23.6k
    return ICMP_UGE;
4244
45.9k
  case ICMP_ULT:
4245
45.9k
    return ICMP_ULE;
4246
0
  case FCMP_OGT:
4247
0
    return FCMP_OGE;
4248
0
  case FCMP_OLT:
4249
0
    return FCMP_OLE;
4250
0
  case FCMP_UGT:
4251
0
    return FCMP_UGE;
4252
0
  case FCMP_ULT:
4253
0
    return FCMP_ULE;
4254
64.4k
  default:
4255
64.4k
    return pred;
4256
202k
  }
4257
202k
}
4258
4259
10.7k
CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
4260
10.7k
  assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4261
4262
10.7k
  if (isStrictPredicate(pred))
4263
4.44k
    return getNonStrictPredicate(pred);
4264
6.29k
  if (isNonStrictPredicate(pred))
4265
6.29k
    return getStrictPredicate(pred);
4266
4267
0
  llvm_unreachable("Unknown predicate!");
4268
0
}
4269
4270
167k
CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
4271
167k
  assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4272
4273
0
  switch (pred) {
4274
0
  default:
4275
0
    llvm_unreachable("Unknown predicate!");
4276
128k
  case CmpInst::ICMP_ULT:
4277
128k
    return CmpInst::ICMP_SLT;
4278
2.28k
  case CmpInst::ICMP_ULE:
4279
2.28k
    return CmpInst::ICMP_SLE;
4280
33.2k
  case CmpInst::ICMP_UGT:
4281
33.2k
    return CmpInst::ICMP_SGT;
4282
3.88k
  case CmpInst::ICMP_UGE:
4283
3.88k
    return CmpInst::ICMP_SGE;
4284
167k
  }
4285
167k
}
4286
4287
143k
CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {
4288
143k
  assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4289
4290
0
  switch (pred) {
4291
0
  default:
4292
0
    llvm_unreachable("Unknown predicate!");
4293
47.4k
  case CmpInst::ICMP_SLT:
4294
47.4k
    return CmpInst::ICMP_ULT;
4295
3.03k
  case CmpInst::ICMP_SLE:
4296
3.03k
    return CmpInst::ICMP_ULE;
4297
90.4k
  case CmpInst::ICMP_SGT:
4298
90.4k
    return CmpInst::ICMP_UGT;
4299
2.90k
  case CmpInst::ICMP_SGE:
4300
2.90k
    return CmpInst::ICMP_UGE;
4301
143k
  }
4302
143k
}
4303
4304
1.92M
bool CmpInst::isUnsigned(Predicate predicate) {
4305
1.92M
  switch (predicate) {
4306
717k
    default: return false;
4307
900k
    case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
4308
1.21M
    case ICmpInst::ICMP_UGE: return true;
4309
1.92M
  }
4310
1.92M
}
4311
4312
3.49M
bool CmpInst::isSigned(Predicate predicate) {
4313
3.49M
  switch (predicate) {
4314
1.93M
    default: return false;
4315
1.23M
    case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
4316
1.56M
    case ICmpInst::ICMP_SGE: return true;
4317
3.49M
  }
4318
3.49M
}
4319
4320
bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4321
392k
                       ICmpInst::Predicate Pred) {
4322
392k
  assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4323
0
  switch (Pred) {
4324
63.1k
  case ICmpInst::Predicate::ICMP_EQ:
4325
63.1k
    return LHS.eq(RHS);
4326
56.2k
  case ICmpInst::Predicate::ICMP_NE:
4327
56.2k
    return LHS.ne(RHS);
4328
46.3k
  case ICmpInst::Predicate::ICMP_UGT:
4329
46.3k
    return LHS.ugt(RHS);
4330
18.5k
  case ICmpInst::Predicate::ICMP_UGE:
4331
18.5k
    return LHS.uge(RHS);
4332
28.6k
  case ICmpInst::Predicate::ICMP_ULT:
4333
28.6k
    return LHS.ult(RHS);
4334
40.2k
  case ICmpInst::Predicate::ICMP_ULE:
4335
40.2k
    return LHS.ule(RHS);
4336
34.2k
  case ICmpInst::Predicate::ICMP_SGT:
4337
34.2k
    return LHS.sgt(RHS);
4338
41.0k
  case ICmpInst::Predicate::ICMP_SGE:
4339
41.0k
    return LHS.sge(RHS);
4340
35.8k
  case ICmpInst::Predicate::ICMP_SLT:
4341
35.8k
    return LHS.slt(RHS);
4342
27.8k
  case ICmpInst::Predicate::ICMP_SLE:
4343
27.8k
    return LHS.sle(RHS);
4344
0
  default:
4345
0
    llvm_unreachable("Unexpected non-integer predicate.");
4346
392k
  };
4347
0
}
4348
4349
bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
4350
76.2k
                       FCmpInst::Predicate Pred) {
4351
76.2k
  APFloat::cmpResult R = LHS.compare(RHS);
4352
76.2k
  switch (Pred) {
4353
0
  default:
4354
0
    llvm_unreachable("Invalid FCmp Predicate");
4355
0
  case FCmpInst::FCMP_FALSE:
4356
0
    return false;
4357
0
  case FCmpInst::FCMP_TRUE:
4358
0
    return true;
4359
6.14k
  case FCmpInst::FCMP_UNO:
4360
6.14k
    return R == APFloat::cmpUnordered;
4361
2.83k
  case FCmpInst::FCMP_ORD:
4362
2.83k
    return R != APFloat::cmpUnordered;
4363
5.71k
  case FCmpInst::FCMP_UEQ:
4364
5.71k
    return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4365
6.46k
  case FCmpInst::FCMP_OEQ:
4366
6.46k
    return R == APFloat::cmpEqual;
4367
5.32k
  case FCmpInst::FCMP_UNE:
4368
5.32k
    return R != APFloat::cmpEqual;
4369
8.08k
  case FCmpInst::FCMP_ONE:
4370
8.08k
    return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
4371
4.51k
  case FCmpInst::FCMP_ULT:
4372
4.51k
    return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4373
5.94k
  case FCmpInst::FCMP_OLT:
4374
5.94k
    return R == APFloat::cmpLessThan;
4375
5.39k
  case FCmpInst::FCMP_UGT:
4376
5.39k
    return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
4377
6.29k
  case FCmpInst::FCMP_OGT:
4378
6.29k
    return R == APFloat::cmpGreaterThan;
4379
4.45k
  case FCmpInst::FCMP_ULE:
4380
4.45k
    return R != APFloat::cmpGreaterThan;
4381
6.12k
  case FCmpInst::FCMP_OLE:
4382
6.12k
    return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4383
2.70k
  case FCmpInst::FCMP_UGE:
4384
2.70k
    return R != APFloat::cmpLessThan;
4385
6.30k
  case FCmpInst::FCMP_OGE:
4386
6.30k
    return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4387
76.2k
  }
4388
76.2k
}
4389
4390
311k
CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {
4391
311k
  assert(CmpInst::isRelational(pred) &&
4392
311k
         "Call only with non-equality predicates!");
4393
4394
311k
  if (isSigned(pred))
4395
143k
    return getUnsignedPredicate(pred);
4396
167k
  if (isUnsigned(pred))
4397
167k
    return getSignedPredicate(pred);
4398
4399
0
  llvm_unreachable("Unknown predicate!");
4400
0
}
4401
4402
5.49k
bool CmpInst::isOrdered(Predicate predicate) {
4403
5.49k
  switch (predicate) {
4404
2.30k
    default: return false;
4405
688
    case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
4406
2.54k
    case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
4407
3.18k
    case FCmpInst::FCMP_ORD: return true;
4408
5.49k
  }
4409
5.49k
}
4410
4411
6.08k
bool CmpInst::isUnordered(Predicate predicate) {
4412
6.08k
  switch (predicate) {
4413
3.33k
    default: return false;
4414
1.04k
    case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
4415
2.33k
    case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
4416
2.74k
    case FCmpInst::FCMP_UNO: return true;
4417
6.08k
  }
4418
6.08k
}
4419
4420
329k
bool CmpInst::isTrueWhenEqual(Predicate predicate) {
4421
329k
  switch(predicate) {
4422
241k
    default: return false;
4423
85.8k
    case ICMP_EQ:   case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
4424
87.9k
    case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
4425
329k
  }
4426
329k
}
4427
4428
139k
bool CmpInst::isFalseWhenEqual(Predicate predicate) {
4429
139k
  switch(predicate) {
4430
46.9k
  case ICMP_NE:    case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
4431
48.7k
  case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
4432
90.4k
  default: return false;
4433
139k
  }
4434
139k
}
4435
4436
25.3k
bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
4437
  // If the predicates match, then we know the first condition implies the
4438
  // second is true.
4439
25.3k
  if (Pred1 == Pred2)
4440
5.15k
    return true;
4441
4442
20.2k
  switch (Pred1) {
4443
12.0k
  default:
4444
12.0k
    break;
4445
12.0k
  case ICMP_EQ:
4446
    // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
4447
646
    return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
4448
646
           Pred2 == ICMP_SLE;
4449
488
  case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
4450
488
    return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
4451
1.79k
  case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
4452
1.79k
    return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
4453
1.11k
  case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
4454
1.11k
    return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
4455
4.12k
  case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
4456
4.12k
    return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
4457
20.2k
  }
4458
12.0k
  return false;
4459
20.2k
}
4460
4461
11.1k
bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
4462
11.1k
  return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
4463
11.1k
}
4464
4465
//===----------------------------------------------------------------------===//
4466
//                        SwitchInst Implementation
4467
//===----------------------------------------------------------------------===//
4468
4469
20.2k
void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4470
20.2k
  assert(Value && Default && NumReserved);
4471
0
  ReservedSpace = NumReserved;
4472
20.2k
  setNumHungOffUseOperands(2);
4473
20.2k
  allocHungoffUses(ReservedSpace);
4474
4475
20.2k
  Op<0>() = Value;
4476
20.2k
  Op<1>() = Default;
4477
20.2k
}
4478
4479
/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4480
/// switch on and a default destination.  The number of additional cases can
4481
/// be specified here to make memory allocation more efficient.  This
4482
/// constructor can also autoinsert before another instruction.
4483
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4484
                       Instruction *InsertBefore)
4485
    : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4486
19.8k
                  nullptr, 0, InsertBefore) {
4487
19.8k
  init(Value, Default, 2+NumCases*2);
4488
19.8k
}
4489
4490
/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4491
/// switch on and a default destination.  The number of additional cases can
4492
/// be specified here to make memory allocation more efficient.  This
4493
/// constructor also autoinserts at the end of the specified BasicBlock.
4494
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4495
                       BasicBlock *InsertAtEnd)
4496
    : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4497
315
                  nullptr, 0, InsertAtEnd) {
4498
315
  init(Value, Default, 2+NumCases*2);
4499
315
}
4500
4501
SwitchInst::SwitchInst(const SwitchInst &SI)
4502
9
    : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
4503
9
  init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4504
9
  setNumHungOffUseOperands(SI.getNumOperands());
4505
9
  Use *OL = getOperandList();
4506
9
  const Use *InOL = SI.getOperandList();
4507
26
  for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4508
17
    OL[i] = InOL[i];
4509
17
    OL[i+1] = InOL[i+1];
4510
17
  }
4511
9
  SubclassOptionalData = SI.SubclassOptionalData;
4512
9
}
4513
4514
/// addCase - Add an entry to the switch instruction...
4515
///
4516
118k
void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
4517
118k
  unsigned NewCaseIdx = getNumCases();
4518
118k
  unsigned OpNo = getNumOperands();
4519
118k
  if (OpNo+2 > ReservedSpace)
4520
403
    growOperands();  // Get more space!
4521
  // Initialize some new operands.
4522
118k
  assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4523
0
  setNumHungOffUseOperands(OpNo+2);
4524
118k
  CaseHandle Case(this, NewCaseIdx);
4525
118k
  Case.setValue(OnVal);
4526
118k
  Case.setSuccessor(Dest);
4527
118k
}
4528
4529
/// removeCase - This method removes the specified case and its successor
4530
/// from the switch instruction.
4531
19.7k
SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
4532
19.7k
  unsigned idx = I->getCaseIndex();
4533
4534
19.7k
  assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4535
4536
0
  unsigned NumOps = getNumOperands();
4537
19.7k
  Use *OL = getOperandList();
4538
4539
  // Overwrite this case with the end of the list.
4540
19.7k
  if (2 + (idx + 1) * 2 != NumOps) {
4541
15.2k
    OL[2 + idx * 2] = OL[NumOps - 2];
4542
15.2k
    OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4543
15.2k
  }
4544
4545
  // Nuke the last value.
4546
19.7k
  OL[NumOps-2].set(nullptr);
4547
19.7k
  OL[NumOps-2+1].set(nullptr);
4548
19.7k
  setNumHungOffUseOperands(NumOps-2);
4549
4550
19.7k
  return CaseIt(this, idx);
4551
19.7k
}
4552
4553
/// growOperands - grow operands - This grows the operand list in response
4554
/// to a push_back style of operation.  This grows the number of ops by 3 times.
4555
///
4556
403
void SwitchInst::growOperands() {
4557
403
  unsigned e = getNumOperands();
4558
403
  unsigned NumOps = e*3;
4559
4560
403
  ReservedSpace = NumOps;
4561
403
  growHungoffUses(ReservedSpace);
4562
403
}
4563
4564
257
MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
4565
257
  assert(Changed && "called only if metadata has changed");
4566
4567
257
  if (!Weights)
4568
0
    return nullptr;
4569
4570
257
  assert(SI.getNumSuccessors() == Weights->size() &&
4571
257
         "num of prof branch_weights must accord with num of successors");
4572
4573
257
  bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4574
4575
257
  if (AllZeroes || Weights->size() < 2)
4576
62
    return nullptr;
4577
4578
195
  return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4579
257
}
4580
4581
7.84k
void SwitchInstProfUpdateWrapper::init() {
4582
7.84k
  MDNode *ProfileData = getBranchWeightMDNode(SI);
4583
7.84k
  if (!ProfileData)
4584
7.19k
    return;
4585
4586
647
  if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
4587
0
    llvm_unreachable("number of prof branch_weights metadata operands does "
4588
0
                     "not correspond to number of succesors");
4589
0
  }
4590
4591
647
  SmallVector<uint32_t, 8> Weights;
4592
647
  if (!extractBranchWeights(ProfileData, Weights))
4593
0
    return;
4594
647
  this->Weights = std::move(Weights);
4595
647
}
4596
4597
SwitchInst::CaseIt
4598
9.73k
SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
4599
9.73k
  if (Weights) {
4600
325
    assert(SI.getNumSuccessors() == Weights->size() &&
4601
325
           "num of prof branch_weights must accord with num of successors");
4602
0
    Changed = true;
4603
    // Copy the last case to the place of the removed one and shrink.
4604
    // This is tightly coupled with the way SwitchInst::removeCase() removes
4605
    // the cases in SwitchInst::removeCase(CaseIt).
4606
325
    (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4607
325
    Weights->pop_back();
4608
325
  }
4609
0
  return SI.removeCase(I);
4610
9.73k
}
4611
4612
void SwitchInstProfUpdateWrapper::addCase(
4613
    ConstantInt *OnVal, BasicBlock *Dest,
4614
1.19k
    SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4615
1.19k
  SI.addCase(OnVal, Dest);
4616
4617
1.19k
  if (!Weights && W && *W) {
4618
0
    Changed = true;
4619
0
    Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4620
0
    (*Weights)[SI.getNumSuccessors() - 1] = *W;
4621
1.19k
  } else if (Weights) {
4622
9
    Changed = true;
4623
9
    Weights->push_back(W.value_or(0));
4624
9
  }
4625
1.19k
  if (Weights)
4626
9
    assert(SI.getNumSuccessors() == Weights->size() &&
4627
1.19k
           "num of prof branch_weights must accord with num of successors");
4628
1.19k
}
4629
4630
Instruction::InstListType::iterator
4631
135
SwitchInstProfUpdateWrapper::eraseFromParent() {
4632
  // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4633
135
  Changed = false;
4634
135
  if (Weights)
4635
0
    Weights->resize(0);
4636
135
  return SI.eraseFromParent();
4637
135
}
4638
4639
SwitchInstProfUpdateWrapper::CaseWeightOpt
4640
1.31k
SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
4641
1.31k
  if (!Weights)
4642
1.30k
    return std::nullopt;
4643
9
  return (*Weights)[idx];
4644
1.31k
}
4645
4646
void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4647
406
    unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4648
406
  if (!W)
4649
372
    return;
4650
4651
34
  if (!Weights && *W)
4652
0
    Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4653
4654
34
  if (Weights) {
4655
9
    auto &OldW = (*Weights)[idx];
4656
9
    if (*W != OldW) {
4657
9
      Changed = true;
4658
9
      OldW = *W;
4659
9
    }
4660
9
  }
4661
34
}
4662
4663
SwitchInstProfUpdateWrapper::CaseWeightOpt
4664
SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
4665
483
                                                unsigned idx) {
4666
483
  if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4667
0
    if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4668
0
      return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4669
0
          ->getValue()
4670
0
          .getZExtValue();
4671
4672
483
  return std::nullopt;
4673
483
}
4674
4675
//===----------------------------------------------------------------------===//
4676
//                        IndirectBrInst Implementation
4677
//===----------------------------------------------------------------------===//
4678
4679
5.44k
void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4680
5.44k
  assert(Address && Address->getType()->isPointerTy() &&
4681
5.44k
         "Address of indirectbr must be a pointer");
4682
0
  ReservedSpace = 1+NumDests;
4683
5.44k
  setNumHungOffUseOperands(1);
4684
5.44k
  allocHungoffUses(ReservedSpace);
4685
4686
5.44k
  Op<0>() = Address;
4687
5.44k
}
4688
4689
4690
/// growOperands - grow operands - This grows the operand list in response
4691
/// to a push_back style of operation.  This grows the number of ops by 2 times.
4692
///
4693
0
void IndirectBrInst::growOperands() {
4694
0
  unsigned e = getNumOperands();
4695
0
  unsigned NumOps = e*2;
4696
4697
0
  ReservedSpace = NumOps;
4698
0
  growHungoffUses(ReservedSpace);
4699
0
}
4700
4701
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4702
                               Instruction *InsertBefore)
4703
    : Instruction(Type::getVoidTy(Address->getContext()),
4704
5.44k
                  Instruction::IndirectBr, nullptr, 0, InsertBefore) {
4705
5.44k
  init(Address, NumCases);
4706
5.44k
}
4707
4708
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4709
                               BasicBlock *InsertAtEnd)
4710
    : Instruction(Type::getVoidTy(Address->getContext()),
4711
0
                  Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
4712
0
  init(Address, NumCases);
4713
0
}
4714
4715
IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4716
    : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4717
0
                  nullptr, IBI.getNumOperands()) {
4718
0
  allocHungoffUses(IBI.getNumOperands());
4719
0
  Use *OL = getOperandList();
4720
0
  const Use *InOL = IBI.getOperandList();
4721
0
  for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4722
0
    OL[i] = InOL[i];
4723
0
  SubclassOptionalData = IBI.SubclassOptionalData;
4724
0
}
4725
4726
/// addDestination - Add a destination.
4727
///
4728
17.1k
void IndirectBrInst::addDestination(BasicBlock *DestBB) {
4729
17.1k
  unsigned OpNo = getNumOperands();
4730
17.1k
  if (OpNo+1 > ReservedSpace)
4731
0
    growOperands();  // Get more space!
4732
  // Initialize some new operands.
4733
17.1k
  assert(OpNo < ReservedSpace && "Growing didn't work!");
4734
0
  setNumHungOffUseOperands(OpNo+1);
4735
17.1k
  getOperandList()[OpNo] = DestBB;
4736
17.1k
}
4737
4738
/// removeDestination - This method removes the specified successor from the
4739
/// indirectbr instruction.
4740
3.47k
void IndirectBrInst::removeDestination(unsigned idx) {
4741
3.47k
  assert(idx < getNumOperands()-1 && "Successor index out of range!");
4742
4743
0
  unsigned NumOps = getNumOperands();
4744
3.47k
  Use *OL = getOperandList();
4745
4746
  // Replace this value with the last one.
4747
3.47k
  OL[idx+1] = OL[NumOps-1];
4748
4749
  // Nuke the last value.
4750
3.47k
  OL[NumOps-1].set(nullptr);
4751
3.47k
  setNumHungOffUseOperands(NumOps-1);
4752
3.47k
}
4753
4754
//===----------------------------------------------------------------------===//
4755
//                            FreezeInst Implementation
4756
//===----------------------------------------------------------------------===//
4757
4758
FreezeInst::FreezeInst(Value *S,
4759
                       const Twine &Name, Instruction *InsertBefore)
4760
21.0k
    : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4761
21.0k
  setName(Name);
4762
21.0k
}
4763
4764
FreezeInst::FreezeInst(Value *S,
4765
                       const Twine &Name, BasicBlock *InsertAtEnd)
4766
0
    : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
4767
0
  setName(Name);
4768
0
}
4769
4770
//===----------------------------------------------------------------------===//
4771
//                           cloneImpl() implementations
4772
//===----------------------------------------------------------------------===//
4773
4774
// Define these methods here so vtables don't get emitted into every translation
4775
// unit that uses these classes.
4776
4777
66.2k
GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4778
66.2k
  return new (getNumOperands()) GetElementPtrInst(*this);
4779
66.2k
}
4780
4781
0
UnaryOperator *UnaryOperator::cloneImpl() const {
4782
0
  return Create(getOpcode(), Op<0>());
4783
0
}
4784
4785
83.4k
BinaryOperator *BinaryOperator::cloneImpl() const {
4786
83.4k
  return Create(getOpcode(), Op<0>(), Op<1>());
4787
83.4k
}
4788
4789
9.83k
FCmpInst *FCmpInst::cloneImpl() const {
4790
9.83k
  return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4791
9.83k
}
4792
4793
46.6k
ICmpInst *ICmpInst::cloneImpl() const {
4794
46.6k
  return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4795
46.6k
}
4796
4797
947
ExtractValueInst *ExtractValueInst::cloneImpl() const {
4798
947
  return new ExtractValueInst(*this);
4799
947
}
4800
4801
1.17k
InsertValueInst *InsertValueInst::cloneImpl() const {
4802
1.17k
  return new InsertValueInst(*this);
4803
1.17k
}
4804
4805
11.2k
AllocaInst *AllocaInst::cloneImpl() const {
4806
11.2k
  AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
4807
11.2k
                                      getOperand(0), getAlign());
4808
11.2k
  Result->setUsedWithInAlloca(isUsedWithInAlloca());
4809
11.2k
  Result->setSwiftError(isSwiftError());
4810
11.2k
  return Result;
4811
11.2k
}
4812
4813
18.5k
LoadInst *LoadInst::cloneImpl() const {
4814
18.5k
  return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4815
18.5k
                      getAlign(), getOrdering(), getSyncScopeID());
4816
18.5k
}
4817
4818
54.9k
StoreInst *StoreInst::cloneImpl() const {
4819
54.9k
  return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4820
54.9k
                       getOrdering(), getSyncScopeID());
4821
54.9k
}
4822
4823
0
AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
4824
0
  AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
4825
0
      getOperand(0), getOperand(1), getOperand(2), getAlign(),
4826
0
      getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4827
0
  Result->setVolatile(isVolatile());
4828
0
  Result->setWeak(isWeak());
4829
0
  return Result;
4830
0
}
4831
4832
0
AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
4833
0
  AtomicRMWInst *Result =
4834
0
      new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4835
0
                        getAlign(), getOrdering(), getSyncScopeID());
4836
0
  Result->setVolatile(isVolatile());
4837
0
  return Result;
4838
0
}
4839
4840
0
FenceInst *FenceInst::cloneImpl() const {
4841
0
  return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4842
0
}
4843
4844
572
TruncInst *TruncInst::cloneImpl() const {
4845
572
  return new TruncInst(getOperand(0), getType());
4846
572
}
4847
4848
187
ZExtInst *ZExtInst::cloneImpl() const {
4849
187
  return new ZExtInst(getOperand(0), getType());
4850
187
}
4851
4852
997
SExtInst *SExtInst::cloneImpl() const {
4853
997
  return new SExtInst(getOperand(0), getType());
4854
997
}
4855
4856
3
FPTruncInst *FPTruncInst::cloneImpl() const {
4857
3
  return new FPTruncInst(getOperand(0), getType());
4858
3
}
4859
4860
5
FPExtInst *FPExtInst::cloneImpl() const {
4861
5
  return new FPExtInst(getOperand(0), getType());
4862
5
}
4863
4864
0
UIToFPInst *UIToFPInst::cloneImpl() const {
4865
0
  return new UIToFPInst(getOperand(0), getType());
4866
0
}
4867
4868
15
SIToFPInst *SIToFPInst::cloneImpl() const {
4869
15
  return new SIToFPInst(getOperand(0), getType());
4870
15
}
4871
4872
0
FPToUIInst *FPToUIInst::cloneImpl() const {
4873
0
  return new FPToUIInst(getOperand(0), getType());
4874
0
}
4875
4876
20
FPToSIInst *FPToSIInst::cloneImpl() const {
4877
20
  return new FPToSIInst(getOperand(0), getType());
4878
20
}
4879
4880
258
PtrToIntInst *PtrToIntInst::cloneImpl() const {
4881
258
  return new PtrToIntInst(getOperand(0), getType());
4882
258
}
4883
4884
19
IntToPtrInst *IntToPtrInst::cloneImpl() const {
4885
19
  return new IntToPtrInst(getOperand(0), getType());
4886
19
}
4887
4888
325
BitCastInst *BitCastInst::cloneImpl() const {
4889
325
  return new BitCastInst(getOperand(0), getType());
4890
325
}
4891
4892
1
AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
4893
1
  return new AddrSpaceCastInst(getOperand(0), getType());
4894
1
}
4895
4896
3.45k
CallInst *CallInst::cloneImpl() const {
4897
3.45k
  if (hasOperandBundles()) {
4898
20
    unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4899
20
    return new(getNumOperands(), DescriptorBytes) CallInst(*this);
4900
20
  }
4901
3.43k
  return  new(getNumOperands()) CallInst(*this);
4902
3.45k
}
4903
4904
670
SelectInst *SelectInst::cloneImpl() const {
4905
670
  return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4906
670
}
4907
4908
0
VAArgInst *VAArgInst::cloneImpl() const {
4909
0
  return new VAArgInst(getOperand(0), getType());
4910
0
}
4911
4912
519
ExtractElementInst *ExtractElementInst::cloneImpl() const {
4913
519
  return ExtractElementInst::Create(getOperand(0), getOperand(1));
4914
519
}
4915
4916
892
InsertElementInst *InsertElementInst::cloneImpl() const {
4917
892
  return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
4918
892
}
4919
4920
376
ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
4921
376
  return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
4922
376
}
4923
4924
21.7k
PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
4925
4926
0
LandingPadInst *LandingPadInst::cloneImpl() const {
4927
0
  return new LandingPadInst(*this);
4928
0
}
4929
4930
3.72k
ReturnInst *ReturnInst::cloneImpl() const {
4931
3.72k
  return new(getNumOperands()) ReturnInst(*this);
4932
3.72k
}
4933
4934
52.9k
BranchInst *BranchInst::cloneImpl() const {
4935
52.9k
  return new(getNumOperands()) BranchInst(*this);
4936
52.9k
}
4937
4938
9
SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4939
4940
0
IndirectBrInst *IndirectBrInst::cloneImpl() const {
4941
0
  return new IndirectBrInst(*this);
4942
0
}
4943
4944
0
InvokeInst *InvokeInst::cloneImpl() const {
4945
0
  if (hasOperandBundles()) {
4946
0
    unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4947
0
    return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
4948
0
  }
4949
0
  return new(getNumOperands()) InvokeInst(*this);
4950
0
}
4951
4952
0
CallBrInst *CallBrInst::cloneImpl() const {
4953
0
  if (hasOperandBundles()) {
4954
0
    unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4955
0
    return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
4956
0
  }
4957
0
  return new (getNumOperands()) CallBrInst(*this);
4958
0
}
4959
4960
1
ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
4961
4962
8
CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4963
8
  return new (getNumOperands()) CleanupReturnInst(*this);
4964
8
}
4965
4966
0
CatchReturnInst *CatchReturnInst::cloneImpl() const {
4967
0
  return new (getNumOperands()) CatchReturnInst(*this);
4968
0
}
4969
4970
0
CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4971
0
  return new CatchSwitchInst(*this);
4972
0
}
4973
4974
0
FuncletPadInst *FuncletPadInst::cloneImpl() const {
4975
0
  return new (getNumOperands()) FuncletPadInst(*this);
4976
0
}
4977
4978
112
UnreachableInst *UnreachableInst::cloneImpl() const {
4979
112
  LLVMContext &Context = getContext();
4980
112
  return new UnreachableInst(Context);
4981
112
}
4982
4983
59
FreezeInst *FreezeInst::cloneImpl() const {
4984
59
  return new FreezeInst(getOperand(0));
4985
59
}