Coverage Report

Created: 2024-01-17 10:31

/src/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file defines the interface for lazy computation of value constraint
10
// information.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "llvm/Analysis/LazyValueInfo.h"
15
#include "llvm/ADT/DenseSet.h"
16
#include "llvm/ADT/STLExtras.h"
17
#include "llvm/Analysis/AssumptionCache.h"
18
#include "llvm/Analysis/ConstantFolding.h"
19
#include "llvm/Analysis/InstructionSimplify.h"
20
#include "llvm/Analysis/TargetLibraryInfo.h"
21
#include "llvm/Analysis/ValueLattice.h"
22
#include "llvm/Analysis/ValueTracking.h"
23
#include "llvm/IR/AssemblyAnnotationWriter.h"
24
#include "llvm/IR/CFG.h"
25
#include "llvm/IR/ConstantRange.h"
26
#include "llvm/IR/Constants.h"
27
#include "llvm/IR/DataLayout.h"
28
#include "llvm/IR/Dominators.h"
29
#include "llvm/IR/InstrTypes.h"
30
#include "llvm/IR/Instructions.h"
31
#include "llvm/IR/IntrinsicInst.h"
32
#include "llvm/IR/Intrinsics.h"
33
#include "llvm/IR/LLVMContext.h"
34
#include "llvm/IR/PatternMatch.h"
35
#include "llvm/IR/ValueHandle.h"
36
#include "llvm/InitializePasses.h"
37
#include "llvm/Support/Debug.h"
38
#include "llvm/Support/FormattedStream.h"
39
#include "llvm/Support/KnownBits.h"
40
#include "llvm/Support/raw_ostream.h"
41
#include <optional>
42
using namespace llvm;
43
using namespace PatternMatch;
44
45
#define DEBUG_TYPE "lazy-value-info"
46
47
// This is the number of worklist items we will process to try to discover an
48
// answer for a given value.
49
static const unsigned MaxProcessedPerValue = 500;
50
51
char LazyValueInfoWrapperPass::ID = 0;
52
0
LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {
53
0
  initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry());
54
0
}
55
0
INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
56
0
                "Lazy Value Information Analysis", false, true)
57
0
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
58
0
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
59
0
INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
60
                "Lazy Value Information Analysis", false, true)
61
62
namespace llvm {
63
0
  FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
64
}
65
66
AnalysisKey LazyValueAnalysis::Key;
67
68
/// Returns true if this lattice value represents at most one possible value.
69
/// This is as precise as any lattice value can get while still representing
70
/// reachable code.
71
0
static bool hasSingleValue(const ValueLatticeElement &Val) {
72
0
  if (Val.isConstantRange() &&
73
0
      Val.getConstantRange().isSingleElement())
74
    // Integer constants are single element ranges
75
0
    return true;
76
0
  if (Val.isConstant())
77
    // Non integer constants
78
0
    return true;
79
0
  return false;
80
0
}
81
82
/// Combine two sets of facts about the same value into a single set of
83
/// facts.  Note that this method is not suitable for merging facts along
84
/// different paths in a CFG; that's what the mergeIn function is for.  This
85
/// is for merging facts gathered about the same value at the same location
86
/// through two independent means.
87
/// Notes:
88
/// * This method does not promise to return the most precise possible lattice
89
///   value implied by A and B.  It is allowed to return any lattice element
90
///   which is at least as strong as *either* A or B (unless our facts
91
///   conflict, see below).
92
/// * Due to unreachable code, the intersection of two lattice values could be
93
///   contradictory.  If this happens, we return some valid lattice value so as
94
///   not confuse the rest of LVI.  Ideally, we'd always return Undefined, but
95
///   we do not make this guarantee.  TODO: This would be a useful enhancement.
96
static ValueLatticeElement intersect(const ValueLatticeElement &A,
97
0
                                     const ValueLatticeElement &B) {
98
  // Undefined is the strongest state.  It means the value is known to be along
99
  // an unreachable path.
100
0
  if (A.isUnknown())
101
0
    return A;
102
0
  if (B.isUnknown())
103
0
    return B;
104
105
  // If we gave up for one, but got a useable fact from the other, use it.
106
0
  if (A.isOverdefined())
107
0
    return B;
108
0
  if (B.isOverdefined())
109
0
    return A;
110
111
  // Can't get any more precise than constants.
112
0
  if (hasSingleValue(A))
113
0
    return A;
114
0
  if (hasSingleValue(B))
115
0
    return B;
116
117
  // Could be either constant range or not constant here.
118
0
  if (!A.isConstantRange() || !B.isConstantRange()) {
119
    // TODO: Arbitrary choice, could be improved
120
0
    return A;
121
0
  }
122
123
  // Intersect two constant ranges
124
0
  ConstantRange Range =
125
0
      A.getConstantRange().intersectWith(B.getConstantRange());
126
  // Note: An empty range is implicitly converted to unknown or undef depending
127
  // on MayIncludeUndef internally.
128
0
  return ValueLatticeElement::getRange(
129
0
      std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
130
0
                            B.isConstantRangeIncludingUndef());
131
0
}
132
133
//===----------------------------------------------------------------------===//
134
//                          LazyValueInfoCache Decl
135
//===----------------------------------------------------------------------===//
136
137
namespace {
138
  /// A callback value handle updates the cache when values are erased.
139
  class LazyValueInfoCache;
140
  struct LVIValueHandle final : public CallbackVH {
141
    LazyValueInfoCache *Parent;
142
143
    LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
144
0
      : CallbackVH(V), Parent(P) { }
145
146
    void deleted() override;
147
0
    void allUsesReplacedWith(Value *V) override {
148
0
      deleted();
149
0
    }
150
  };
151
} // end anonymous namespace
152
153
namespace {
154
  using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
155
156
  /// This is the cache kept by LazyValueInfo which
157
  /// maintains information about queries across the clients' queries.
158
  class LazyValueInfoCache {
159
    /// This is all of the cached information for one basic block. It contains
160
    /// the per-value lattice elements, as well as a separate set for
161
    /// overdefined values to reduce memory usage. Additionally pointers
162
    /// dereferenced in the block are cached for nullability queries.
163
    struct BlockCacheEntry {
164
      SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
165
      SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
166
      // std::nullopt indicates that the nonnull pointers for this basic block
167
      // block have not been computed yet.
168
      std::optional<NonNullPointerSet> NonNullPointers;
169
    };
170
171
    /// Cached information per basic block.
172
    DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
173
        BlockCache;
174
    /// Set of value handles used to erase values from the cache on deletion.
175
    DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
176
177
0
    const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
178
0
      auto It = BlockCache.find_as(BB);
179
0
      if (It == BlockCache.end())
180
0
        return nullptr;
181
0
      return It->second.get();
182
0
    }
183
184
0
    BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
185
0
      auto It = BlockCache.find_as(BB);
186
0
      if (It == BlockCache.end())
187
0
        It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() })
188
0
                       .first;
189
190
0
      return It->second.get();
191
0
    }
192
193
0
    void addValueHandle(Value *Val) {
194
0
      auto HandleIt = ValueHandles.find_as(Val);
195
0
      if (HandleIt == ValueHandles.end())
196
0
        ValueHandles.insert({ Val, this });
197
0
    }
198
199
  public:
200
    void insertResult(Value *Val, BasicBlock *BB,
201
0
                      const ValueLatticeElement &Result) {
202
0
      BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
203
204
      // Insert over-defined values into their own cache to reduce memory
205
      // overhead.
206
0
      if (Result.isOverdefined())
207
0
        Entry->OverDefined.insert(Val);
208
0
      else
209
0
        Entry->LatticeElements.insert({ Val, Result });
210
211
0
      addValueHandle(Val);
212
0
    }
213
214
    std::optional<ValueLatticeElement>
215
0
    getCachedValueInfo(Value *V, BasicBlock *BB) const {
216
0
      const BlockCacheEntry *Entry = getBlockEntry(BB);
217
0
      if (!Entry)
218
0
        return std::nullopt;
219
220
0
      if (Entry->OverDefined.count(V))
221
0
        return ValueLatticeElement::getOverdefined();
222
223
0
      auto LatticeIt = Entry->LatticeElements.find_as(V);
224
0
      if (LatticeIt == Entry->LatticeElements.end())
225
0
        return std::nullopt;
226
227
0
      return LatticeIt->second;
228
0
    }
229
230
    bool isNonNullAtEndOfBlock(
231
        Value *V, BasicBlock *BB,
232
0
        function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
233
0
      BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
234
0
      if (!Entry->NonNullPointers) {
235
0
        Entry->NonNullPointers = InitFn(BB);
236
0
        for (Value *V : *Entry->NonNullPointers)
237
0
          addValueHandle(V);
238
0
      }
239
240
0
      return Entry->NonNullPointers->count(V);
241
0
    }
242
243
    /// clear - Empty the cache.
244
0
    void clear() {
245
0
      BlockCache.clear();
246
0
      ValueHandles.clear();
247
0
    }
248
249
    /// Inform the cache that a given value has been deleted.
250
    void eraseValue(Value *V);
251
252
    /// This is part of the update interface to inform the cache
253
    /// that a block has been deleted.
254
    void eraseBlock(BasicBlock *BB);
255
256
    /// Updates the cache to remove any influence an overdefined value in
257
    /// OldSucc might have (unless also overdefined in NewSucc).  This just
258
    /// flushes elements from the cache and does not add any.
259
    void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
260
  };
261
}
262
263
0
void LazyValueInfoCache::eraseValue(Value *V) {
264
0
  for (auto &Pair : BlockCache) {
265
0
    Pair.second->LatticeElements.erase(V);
266
0
    Pair.second->OverDefined.erase(V);
267
0
    if (Pair.second->NonNullPointers)
268
0
      Pair.second->NonNullPointers->erase(V);
269
0
  }
270
271
0
  auto HandleIt = ValueHandles.find_as(V);
272
0
  if (HandleIt != ValueHandles.end())
273
0
    ValueHandles.erase(HandleIt);
274
0
}
275
276
0
void LVIValueHandle::deleted() {
277
  // This erasure deallocates *this, so it MUST happen after we're done
278
  // using any and all members of *this.
279
0
  Parent->eraseValue(*this);
280
0
}
281
282
0
void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
283
0
  BlockCache.erase(BB);
284
0
}
285
286
void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
287
0
                                        BasicBlock *NewSucc) {
288
  // When an edge in the graph has been threaded, values that we could not
289
  // determine a value for before (i.e. were marked overdefined) may be
290
  // possible to solve now. We do NOT try to proactively update these values.
291
  // Instead, we clear their entries from the cache, and allow lazy updating to
292
  // recompute them when needed.
293
294
  // The updating process is fairly simple: we need to drop cached info
295
  // for all values that were marked overdefined in OldSucc, and for those same
296
  // values in any successor of OldSucc (except NewSucc) in which they were
297
  // also marked overdefined.
298
0
  std::vector<BasicBlock*> worklist;
299
0
  worklist.push_back(OldSucc);
300
301
0
  const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
302
0
  if (!Entry || Entry->OverDefined.empty())
303
0
    return; // Nothing to process here.
304
0
  SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
305
0
                                      Entry->OverDefined.end());
306
307
  // Use a worklist to perform a depth-first search of OldSucc's successors.
308
  // NOTE: We do not need a visited list since any blocks we have already
309
  // visited will have had their overdefined markers cleared already, and we
310
  // thus won't loop to their successors.
311
0
  while (!worklist.empty()) {
312
0
    BasicBlock *ToUpdate = worklist.back();
313
0
    worklist.pop_back();
314
315
    // Skip blocks only accessible through NewSucc.
316
0
    if (ToUpdate == NewSucc) continue;
317
318
    // If a value was marked overdefined in OldSucc, and is here too...
319
0
    auto OI = BlockCache.find_as(ToUpdate);
320
0
    if (OI == BlockCache.end() || OI->second->OverDefined.empty())
321
0
      continue;
322
0
    auto &ValueSet = OI->second->OverDefined;
323
324
0
    bool changed = false;
325
0
    for (Value *V : ValsToClear) {
326
0
      if (!ValueSet.erase(V))
327
0
        continue;
328
329
      // If we removed anything, then we potentially need to update
330
      // blocks successors too.
331
0
      changed = true;
332
0
    }
333
334
0
    if (!changed) continue;
335
336
0
    llvm::append_range(worklist, successors(ToUpdate));
337
0
  }
338
0
}
339
340
namespace llvm {
341
namespace {
342
/// An assembly annotator class to print LazyValueCache information in
343
/// comments.
344
class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
345
  LazyValueInfoImpl *LVIImpl;
346
  // While analyzing which blocks we can solve values for, we need the dominator
347
  // information.
348
  DominatorTree &DT;
349
350
public:
351
  LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
352
0
      : LVIImpl(L), DT(DTree) {}
353
354
  void emitBasicBlockStartAnnot(const BasicBlock *BB,
355
                                formatted_raw_ostream &OS) override;
356
357
  void emitInstructionAnnot(const Instruction *I,
358
                            formatted_raw_ostream &OS) override;
359
};
360
} // namespace
361
// The actual implementation of the lazy analysis and update.  Note that the
362
// inheritance from LazyValueInfoCache is intended to be temporary while
363
// splitting the code and then transitioning to a has-a relationship.
364
class LazyValueInfoImpl {
365
366
  /// Cached results from previous queries
367
  LazyValueInfoCache TheCache;
368
369
  /// This stack holds the state of the value solver during a query.
370
  /// It basically emulates the callstack of the naive
371
  /// recursive value lookup process.
372
  SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
373
374
  /// Keeps track of which block-value pairs are in BlockValueStack.
375
  DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
376
377
  /// Push BV onto BlockValueStack unless it's already in there.
378
  /// Returns true on success.
379
0
  bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
380
0
    if (!BlockValueSet.insert(BV).second)
381
0
      return false;  // It's already in the stack.
382
383
0
    LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
384
0
                      << BV.first->getName() << "\n");
385
0
    BlockValueStack.push_back(BV);
386
0
    return true;
387
0
  }
388
389
  AssumptionCache *AC;  ///< A pointer to the cache of @llvm.assume calls.
390
  const DataLayout &DL; ///< A mandatory DataLayout
391
392
  /// Declaration of the llvm.experimental.guard() intrinsic,
393
  /// if it exists in the module.
394
  Function *GuardDecl;
395
396
  std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
397
                                                   Instruction *CxtI);
398
  std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
399
                                                  BasicBlock *T,
400
                                                  Instruction *CxtI = nullptr);
401
402
  // These methods process one work item and may add more. A false value
403
  // returned means that the work item was not completely processed and must
404
  // be revisited after going through the new items.
405
  bool solveBlockValue(Value *Val, BasicBlock *BB);
406
  std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
407
                                                         BasicBlock *BB);
408
  std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
409
                                                             BasicBlock *BB);
410
  std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
411
                                                            BasicBlock *BB);
412
  std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
413
                                                           BasicBlock *BB);
414
  std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
415
                                           BasicBlock *BB);
416
  std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
417
      Instruction *I, BasicBlock *BB,
418
      std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
419
          OpFn);
420
  std::optional<ValueLatticeElement>
421
  solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
422
  std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
423
                                                         BasicBlock *BB);
424
  std::optional<ValueLatticeElement>
425
  solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
426
  std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
427
                                                              BasicBlock *BB);
428
  std::optional<ValueLatticeElement>
429
  solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
430
  bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
431
  void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
432
                                                     ValueLatticeElement &BBLV,
433
                                                     Instruction *BBI);
434
435
  void solve();
436
437
  // For the following methods, if UseBlockValue is true, the function may
438
  // push additional values to the worklist and return nullopt. If
439
  // UseBlockValue is false, it will never return nullopt.
440
441
  std::optional<ValueLatticeElement>
442
  getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
443
                                  const APInt &Offset, Instruction *CxtI,
444
                                  bool UseBlockValue);
445
446
  std::optional<ValueLatticeElement>
447
  getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
448
                            bool UseBlockValue);
449
450
  std::optional<ValueLatticeElement>
451
  getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
452
                        bool UseBlockValue, unsigned Depth = 0);
453
454
  std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
455
                                                       BasicBlock *BBFrom,
456
                                                       BasicBlock *BBTo,
457
                                                       bool UseBlockValue);
458
459
public:
460
  /// This is the query interface to determine the lattice value for the
461
  /// specified Value* at the context instruction (if specified) or at the
462
  /// start of the block.
463
  ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
464
                                      Instruction *CxtI = nullptr);
465
466
  /// This is the query interface to determine the lattice value for the
467
  /// specified Value* at the specified instruction using only information
468
  /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
469
  /// recursive query is performed.
470
  ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
471
472
  /// This is the query interface to determine the lattice
473
  /// value for the specified Value* that is true on the specified edge.
474
  ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
475
                                     BasicBlock *ToBB,
476
                                     Instruction *CxtI = nullptr);
477
478
  ValueLatticeElement getValueAtUse(const Use &U);
479
480
  /// Complete flush all previously computed values
481
0
  void clear() {
482
0
    TheCache.clear();
483
0
  }
484
485
  /// Printing the LazyValueInfo Analysis.
486
0
  void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
487
0
    LazyValueInfoAnnotatedWriter Writer(this, DTree);
488
0
    F.print(OS, &Writer);
489
0
  }
490
491
  /// This is part of the update interface to remove information related to this
492
  /// value from the cache.
493
0
  void forgetValue(Value *V) { TheCache.eraseValue(V); }
494
495
  /// This is part of the update interface to inform the cache
496
  /// that a block has been deleted.
497
0
  void eraseBlock(BasicBlock *BB) {
498
0
    TheCache.eraseBlock(BB);
499
0
  }
500
501
  /// This is the update interface to inform the cache that an edge from
502
  /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
503
  void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
504
505
  LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
506
                    Function *GuardDecl)
507
0
      : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
508
};
509
} // namespace llvm
510
511
0
void LazyValueInfoImpl::solve() {
512
0
  SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
513
0
      BlockValueStack.begin(), BlockValueStack.end());
514
515
0
  unsigned processedCount = 0;
516
0
  while (!BlockValueStack.empty()) {
517
0
    processedCount++;
518
    // Abort if we have to process too many values to get a result for this one.
519
    // Because of the design of the overdefined cache currently being per-block
520
    // to avoid naming-related issues (IE it wants to try to give different
521
    // results for the same name in different blocks), overdefined results don't
522
    // get cached globally, which in turn means we will often try to rediscover
523
    // the same overdefined result again and again.  Once something like
524
    // PredicateInfo is used in LVI or CVP, we should be able to make the
525
    // overdefined cache global, and remove this throttle.
526
0
    if (processedCount > MaxProcessedPerValue) {
527
0
      LLVM_DEBUG(
528
0
          dbgs() << "Giving up on stack because we are getting too deep\n");
529
      // Fill in the original values
530
0
      while (!StartingStack.empty()) {
531
0
        std::pair<BasicBlock *, Value *> &e = StartingStack.back();
532
0
        TheCache.insertResult(e.second, e.first,
533
0
                              ValueLatticeElement::getOverdefined());
534
0
        StartingStack.pop_back();
535
0
      }
536
0
      BlockValueSet.clear();
537
0
      BlockValueStack.clear();
538
0
      return;
539
0
    }
540
0
    std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
541
0
    assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
542
0
    unsigned StackSize = BlockValueStack.size();
543
0
    (void) StackSize;
544
545
0
    if (solveBlockValue(e.second, e.first)) {
546
      // The work item was completely processed.
547
0
      assert(BlockValueStack.size() == StackSize &&
548
0
             BlockValueStack.back() == e && "Nothing should have been pushed!");
549
0
#ifndef NDEBUG
550
0
      std::optional<ValueLatticeElement> BBLV =
551
0
          TheCache.getCachedValueInfo(e.second, e.first);
552
0
      assert(BBLV && "Result should be in cache!");
553
0
      LLVM_DEBUG(
554
0
          dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
555
0
                 << *BBLV << "\n");
556
0
#endif
557
558
0
      BlockValueStack.pop_back();
559
0
      BlockValueSet.erase(e);
560
0
    } else {
561
      // More work needs to be done before revisiting.
562
0
      assert(BlockValueStack.size() == StackSize + 1 &&
563
0
             "Exactly one element should have been pushed!");
564
0
    }
565
0
  }
566
0
}
567
568
std::optional<ValueLatticeElement>
569
LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
570
0
                                 Instruction *CxtI) {
571
  // If already a constant, there is nothing to compute.
572
0
  if (Constant *VC = dyn_cast<Constant>(Val))
573
0
    return ValueLatticeElement::get(VC);
574
575
0
  if (std::optional<ValueLatticeElement> OptLatticeVal =
576
0
          TheCache.getCachedValueInfo(Val, BB)) {
577
0
    intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
578
0
    return OptLatticeVal;
579
0
  }
580
581
  // We have hit a cycle, assume overdefined.
582
0
  if (!pushBlockValue({ BB, Val }))
583
0
    return ValueLatticeElement::getOverdefined();
584
585
  // Yet to be resolved.
586
0
  return std::nullopt;
587
0
}
588
589
0
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
590
0
  switch (BBI->getOpcode()) {
591
0
  default: break;
592
0
  case Instruction::Load:
593
0
  case Instruction::Call:
594
0
  case Instruction::Invoke:
595
0
    if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
596
0
      if (isa<IntegerType>(BBI->getType())) {
597
0
        return ValueLatticeElement::getRange(
598
0
            getConstantRangeFromMetadata(*Ranges));
599
0
      }
600
0
    break;
601
0
  };
602
  // Nothing known - will be intersected with other facts
603
0
  return ValueLatticeElement::getOverdefined();
604
0
}
605
606
0
bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
607
0
  assert(!isa<Constant>(Val) && "Value should not be constant");
608
0
  assert(!TheCache.getCachedValueInfo(Val, BB) &&
609
0
         "Value should not be in cache");
610
611
  // Hold off inserting this value into the Cache in case we have to return
612
  // false and come back later.
613
0
  std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
614
0
  if (!Res)
615
    // Work pushed, will revisit
616
0
    return false;
617
618
0
  TheCache.insertResult(Val, BB, *Res);
619
0
  return true;
620
0
}
621
622
std::optional<ValueLatticeElement>
623
0
LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
624
0
  Instruction *BBI = dyn_cast<Instruction>(Val);
625
0
  if (!BBI || BBI->getParent() != BB)
626
0
    return solveBlockValueNonLocal(Val, BB);
627
628
0
  if (PHINode *PN = dyn_cast<PHINode>(BBI))
629
0
    return solveBlockValuePHINode(PN, BB);
630
631
0
  if (auto *SI = dyn_cast<SelectInst>(BBI))
632
0
    return solveBlockValueSelect(SI, BB);
633
634
  // If this value is a nonnull pointer, record it's range and bailout.  Note
635
  // that for all other pointer typed values, we terminate the search at the
636
  // definition.  We could easily extend this to look through geps, bitcasts,
637
  // and the like to prove non-nullness, but it's not clear that's worth it
638
  // compile time wise.  The context-insensitive value walk done inside
639
  // isKnownNonZero gets most of the profitable cases at much less expense.
640
  // This does mean that we have a sensitivity to where the defining
641
  // instruction is placed, even if it could legally be hoisted much higher.
642
  // That is unfortunate.
643
0
  PointerType *PT = dyn_cast<PointerType>(BBI->getType());
644
0
  if (PT && isKnownNonZero(BBI, DL))
645
0
    return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
646
647
0
  if (BBI->getType()->isIntegerTy()) {
648
0
    if (auto *CI = dyn_cast<CastInst>(BBI))
649
0
      return solveBlockValueCast(CI, BB);
650
651
0
    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
652
0
      return solveBlockValueBinaryOp(BO, BB);
653
654
0
    if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
655
0
      return solveBlockValueExtractValue(EVI, BB);
656
657
0
    if (auto *II = dyn_cast<IntrinsicInst>(BBI))
658
0
      return solveBlockValueIntrinsic(II, BB);
659
0
  }
660
661
0
  LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
662
0
                    << "' - unknown inst def found.\n");
663
0
  return getFromRangeMetadata(BBI);
664
0
}
665
666
0
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
667
  // TODO: Use NullPointerIsDefined instead.
668
0
  if (Ptr->getType()->getPointerAddressSpace() == 0)
669
0
    PtrSet.insert(getUnderlyingObject(Ptr));
670
0
}
671
672
static void AddNonNullPointersByInstruction(
673
0
    Instruction *I, NonNullPointerSet &PtrSet) {
674
0
  if (LoadInst *L = dyn_cast<LoadInst>(I)) {
675
0
    AddNonNullPointer(L->getPointerOperand(), PtrSet);
676
0
  } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
677
0
    AddNonNullPointer(S->getPointerOperand(), PtrSet);
678
0
  } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
679
0
    if (MI->isVolatile()) return;
680
681
    // FIXME: check whether it has a valuerange that excludes zero?
682
0
    ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
683
0
    if (!Len || Len->isZero()) return;
684
685
0
    AddNonNullPointer(MI->getRawDest(), PtrSet);
686
0
    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
687
0
      AddNonNullPointer(MTI->getRawSource(), PtrSet);
688
0
  }
689
0
}
690
691
0
bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
692
0
  if (NullPointerIsDefined(BB->getParent(),
693
0
                           Val->getType()->getPointerAddressSpace()))
694
0
    return false;
695
696
0
  Val = Val->stripInBoundsOffsets();
697
0
  return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
698
0
    NonNullPointerSet NonNullPointers;
699
0
    for (Instruction &I : *BB)
700
0
      AddNonNullPointersByInstruction(&I, NonNullPointers);
701
0
    return NonNullPointers;
702
0
  });
703
0
}
704
705
std::optional<ValueLatticeElement>
706
0
LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
707
0
  ValueLatticeElement Result;  // Start Undefined.
708
709
  // If this is the entry block, we must be asking about an argument.  The
710
  // value is overdefined.
711
0
  if (BB->isEntryBlock()) {
712
0
    assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
713
0
    return ValueLatticeElement::getOverdefined();
714
0
  }
715
716
  // Loop over all of our predecessors, merging what we know from them into
717
  // result.  If we encounter an unexplored predecessor, we eagerly explore it
718
  // in a depth first manner.  In practice, this has the effect of discovering
719
  // paths we can't analyze eagerly without spending compile times analyzing
720
  // other paths.  This heuristic benefits from the fact that predecessors are
721
  // frequently arranged such that dominating ones come first and we quickly
722
  // find a path to function entry.  TODO: We should consider explicitly
723
  // canonicalizing to make this true rather than relying on this happy
724
  // accident.
725
0
  for (BasicBlock *Pred : predecessors(BB)) {
726
0
    std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
727
0
    if (!EdgeResult)
728
      // Explore that input, then return here
729
0
      return std::nullopt;
730
731
0
    Result.mergeIn(*EdgeResult);
732
733
    // If we hit overdefined, exit early.  The BlockVals entry is already set
734
    // to overdefined.
735
0
    if (Result.isOverdefined()) {
736
0
      LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
737
0
                        << "' - overdefined because of pred '"
738
0
                        << Pred->getName() << "' (non local).\n");
739
0
      return Result;
740
0
    }
741
0
  }
742
743
  // Return the merged value, which is more precise than 'overdefined'.
744
0
  assert(!Result.isOverdefined());
745
0
  return Result;
746
0
}
747
748
std::optional<ValueLatticeElement>
749
0
LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
750
0
  ValueLatticeElement Result;  // Start Undefined.
751
752
  // Loop over all of our predecessors, merging what we know from them into
753
  // result.  See the comment about the chosen traversal order in
754
  // solveBlockValueNonLocal; the same reasoning applies here.
755
0
  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
756
0
    BasicBlock *PhiBB = PN->getIncomingBlock(i);
757
0
    Value *PhiVal = PN->getIncomingValue(i);
758
    // Note that we can provide PN as the context value to getEdgeValue, even
759
    // though the results will be cached, because PN is the value being used as
760
    // the cache key in the caller.
761
0
    std::optional<ValueLatticeElement> EdgeResult =
762
0
        getEdgeValue(PhiVal, PhiBB, BB, PN);
763
0
    if (!EdgeResult)
764
      // Explore that input, then return here
765
0
      return std::nullopt;
766
767
0
    Result.mergeIn(*EdgeResult);
768
769
    // If we hit overdefined, exit early.  The BlockVals entry is already set
770
    // to overdefined.
771
0
    if (Result.isOverdefined()) {
772
0
      LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
773
0
                        << "' - overdefined because of pred (local).\n");
774
775
0
      return Result;
776
0
    }
777
0
  }
778
779
  // Return the merged value, which is more precise than 'overdefined'.
780
0
  assert(!Result.isOverdefined() && "Possible PHI in entry block?");
781
0
  return Result;
782
0
}
783
784
// If we can determine a constraint on the value given conditions assumed by
785
// the program, intersect those constraints with BBLV
786
void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
787
0
    Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
788
0
  BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
789
0
  if (!BBI)
790
0
    return;
791
792
0
  BasicBlock *BB = BBI->getParent();
793
0
  for (auto &AssumeVH : AC->assumptionsFor(Val)) {
794
0
    if (!AssumeVH)
795
0
      continue;
796
797
    // Only check assumes in the block of the context instruction. Other
798
    // assumes will have already been taken into account when the value was
799
    // propagated from predecessor blocks.
800
0
    auto *I = cast<CallInst>(AssumeVH);
801
0
    if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
802
0
      continue;
803
804
0
    BBLV = intersect(BBLV, *getValueFromCondition(Val, I->getArgOperand(0),
805
0
                                                  /*IsTrueDest*/ true,
806
0
                                                  /*UseBlockValue*/ false));
807
0
  }
808
809
  // If guards are not used in the module, don't spend time looking for them
810
0
  if (GuardDecl && !GuardDecl->use_empty() &&
811
0
      BBI->getIterator() != BB->begin()) {
812
0
    for (Instruction &I :
813
0
         make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
814
0
      Value *Cond = nullptr;
815
0
      if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
816
0
        BBLV = intersect(BBLV,
817
0
                         *getValueFromCondition(Val, Cond, /*IsTrueDest*/ true,
818
0
                                                /*UseBlockValue*/ false));
819
0
    }
820
0
  }
821
822
0
  if (BBLV.isOverdefined()) {
823
    // Check whether we're checking at the terminator, and the pointer has
824
    // been dereferenced in this block.
825
0
    PointerType *PTy = dyn_cast<PointerType>(Val->getType());
826
0
    if (PTy && BB->getTerminator() == BBI &&
827
0
        isNonNullAtEndOfBlock(Val, BB))
828
0
      BBLV = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
829
0
  }
830
0
}
831
832
static ConstantRange toConstantRange(const ValueLatticeElement &Val,
833
0
                                     Type *Ty, bool UndefAllowed = false) {
834
0
  assert(Ty->isIntOrIntVectorTy() && "Must be integer type");
835
0
  if (Val.isConstantRange(UndefAllowed))
836
0
    return Val.getConstantRange();
837
0
  unsigned BW = Ty->getScalarSizeInBits();
838
0
  if (Val.isUnknown())
839
0
    return ConstantRange::getEmpty(BW);
840
0
  return ConstantRange::getFull(BW);
841
0
}
842
843
std::optional<ValueLatticeElement>
844
0
LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
845
  // Recurse on our inputs if needed
846
0
  std::optional<ValueLatticeElement> OptTrueVal =
847
0
      getBlockValue(SI->getTrueValue(), BB, SI);
848
0
  if (!OptTrueVal)
849
0
    return std::nullopt;
850
0
  ValueLatticeElement &TrueVal = *OptTrueVal;
851
852
0
  std::optional<ValueLatticeElement> OptFalseVal =
853
0
      getBlockValue(SI->getFalseValue(), BB, SI);
854
0
  if (!OptFalseVal)
855
0
    return std::nullopt;
856
0
  ValueLatticeElement &FalseVal = *OptFalseVal;
857
858
0
  if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
859
0
    const ConstantRange &TrueCR = toConstantRange(TrueVal, SI->getType());
860
0
    const ConstantRange &FalseCR = toConstantRange(FalseVal, SI->getType());
861
0
    Value *LHS = nullptr;
862
0
    Value *RHS = nullptr;
863
0
    SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
864
    // Is this a min specifically of our two inputs?  (Avoid the risk of
865
    // ValueTracking getting smarter looking back past our immediate inputs.)
866
0
    if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
867
0
        ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
868
0
         (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
869
0
      ConstantRange ResultCR = [&]() {
870
0
        switch (SPR.Flavor) {
871
0
        default:
872
0
          llvm_unreachable("unexpected minmax type!");
873
0
        case SPF_SMIN:                   /// Signed minimum
874
0
          return TrueCR.smin(FalseCR);
875
0
        case SPF_UMIN:                   /// Unsigned minimum
876
0
          return TrueCR.umin(FalseCR);
877
0
        case SPF_SMAX:                   /// Signed maximum
878
0
          return TrueCR.smax(FalseCR);
879
0
        case SPF_UMAX:                   /// Unsigned maximum
880
0
          return TrueCR.umax(FalseCR);
881
0
        };
882
0
      }();
883
0
      return ValueLatticeElement::getRange(
884
0
          ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
885
0
                        FalseVal.isConstantRangeIncludingUndef());
886
0
    }
887
888
0
    if (SPR.Flavor == SPF_ABS) {
889
0
      if (LHS == SI->getTrueValue())
890
0
        return ValueLatticeElement::getRange(
891
0
            TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
892
0
      if (LHS == SI->getFalseValue())
893
0
        return ValueLatticeElement::getRange(
894
0
            FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
895
0
    }
896
897
0
    if (SPR.Flavor == SPF_NABS) {
898
0
      ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
899
0
      if (LHS == SI->getTrueValue())
900
0
        return ValueLatticeElement::getRange(
901
0
            Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
902
0
      if (LHS == SI->getFalseValue())
903
0
        return ValueLatticeElement::getRange(
904
0
            Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
905
0
    }
906
0
  }
907
908
  // Can we constrain the facts about the true and false values by using the
909
  // condition itself?  This shows up with idioms like e.g. select(a > 5, a, 5).
910
  // TODO: We could potentially refine an overdefined true value above.
911
0
  Value *Cond = SI->getCondition();
912
  // If the value is undef, a different value may be chosen in
913
  // the select condition.
914
0
  if (isGuaranteedNotToBeUndef(Cond, AC)) {
915
0
    TrueVal =
916
0
        intersect(TrueVal, *getValueFromCondition(SI->getTrueValue(), Cond,
917
0
                                                  /*IsTrueDest*/ true,
918
0
                                                  /*UseBlockValue*/ false));
919
0
    FalseVal =
920
0
        intersect(FalseVal, *getValueFromCondition(SI->getFalseValue(), Cond,
921
0
                                                   /*IsTrueDest*/ false,
922
0
                                                   /*UseBlockValue*/ false));
923
0
  }
924
925
0
  ValueLatticeElement Result = TrueVal;
926
0
  Result.mergeIn(FalseVal);
927
0
  return Result;
928
0
}
929
930
std::optional<ConstantRange>
931
0
LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
932
0
  std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
933
0
  if (!OptVal)
934
0
    return std::nullopt;
935
0
  return toConstantRange(*OptVal, V->getType());
936
0
}
937
938
std::optional<ValueLatticeElement>
939
0
LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
940
  // Filter out casts we don't know how to reason about before attempting to
941
  // recurse on our operand.  This can cut a long search short if we know we're
942
  // not going to be able to get any useful information anways.
943
0
  switch (CI->getOpcode()) {
944
0
  case Instruction::Trunc:
945
0
  case Instruction::SExt:
946
0
  case Instruction::ZExt:
947
0
    break;
948
0
  default:
949
    // Unhandled instructions are overdefined.
950
0
    LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
951
0
                      << "' - overdefined (unknown cast).\n");
952
0
    return ValueLatticeElement::getOverdefined();
953
0
  }
954
955
  // Figure out the range of the LHS.  If that fails, we still apply the
956
  // transfer rule on the full set since we may be able to locally infer
957
  // interesting facts.
958
0
  std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
959
0
  if (!LHSRes)
960
    // More work to do before applying this transfer rule.
961
0
    return std::nullopt;
962
0
  const ConstantRange &LHSRange = *LHSRes;
963
964
0
  const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
965
966
  // NOTE: We're currently limited by the set of operations that ConstantRange
967
  // can evaluate symbolically.  Enhancing that set will allows us to analyze
968
  // more definitions.
969
0
  return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
970
0
                                                       ResultBitWidth));
971
0
}
972
973
std::optional<ValueLatticeElement>
974
LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
975
    Instruction *I, BasicBlock *BB,
976
    std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
977
0
        OpFn) {
978
  // Figure out the ranges of the operands.  If that fails, use a
979
  // conservative range, but apply the transfer rule anyways.  This
980
  // lets us pick up facts from expressions like "and i32 (call i32
981
  // @foo()), 32"
982
0
  std::optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
983
0
  if (!LHSRes)
984
0
    return std::nullopt;
985
986
0
  std::optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
987
0
  if (!RHSRes)
988
0
    return std::nullopt;
989
990
0
  const ConstantRange &LHSRange = *LHSRes;
991
0
  const ConstantRange &RHSRange = *RHSRes;
992
0
  return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
993
0
}
994
995
std::optional<ValueLatticeElement>
996
0
LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
997
0
  assert(BO->getOperand(0)->getType()->isSized() &&
998
0
         "all operands to binary operators are sized");
999
0
  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1000
0
    unsigned NoWrapKind = 0;
1001
0
    if (OBO->hasNoUnsignedWrap())
1002
0
      NoWrapKind |= OverflowingBinaryOperator::NoUnsignedWrap;
1003
0
    if (OBO->hasNoSignedWrap())
1004
0
      NoWrapKind |= OverflowingBinaryOperator::NoSignedWrap;
1005
1006
0
    return solveBlockValueBinaryOpImpl(
1007
0
        BO, BB,
1008
0
        [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1009
0
          return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1010
0
        });
1011
0
  }
1012
1013
0
  return solveBlockValueBinaryOpImpl(
1014
0
      BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1015
0
        return CR1.binaryOp(BO->getOpcode(), CR2);
1016
0
      });
1017
0
}
1018
1019
std::optional<ValueLatticeElement>
1020
LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1021
0
                                                    BasicBlock *BB) {
1022
0
  return solveBlockValueBinaryOpImpl(
1023
0
      WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1024
0
        return CR1.binaryOp(WO->getBinaryOp(), CR2);
1025
0
      });
1026
0
}
1027
1028
std::optional<ValueLatticeElement>
1029
0
LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1030
0
  ValueLatticeElement MetadataVal = getFromRangeMetadata(II);
1031
0
  if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1032
0
    LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1033
0
                      << "' - unknown intrinsic.\n");
1034
0
    return MetadataVal;
1035
0
  }
1036
1037
0
  SmallVector<ConstantRange, 2> OpRanges;
1038
0
  for (Value *Op : II->args()) {
1039
0
    std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1040
0
    if (!Range)
1041
0
      return std::nullopt;
1042
0
    OpRanges.push_back(*Range);
1043
0
  }
1044
1045
0
  return intersect(ValueLatticeElement::getRange(ConstantRange::intrinsic(
1046
0
                       II->getIntrinsicID(), OpRanges)),
1047
0
                   MetadataVal);
1048
0
}
1049
1050
std::optional<ValueLatticeElement>
1051
LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1052
0
                                               BasicBlock *BB) {
1053
0
  if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1054
0
    if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1055
0
      return solveBlockValueOverflowIntrinsic(WO, BB);
1056
1057
  // Handle extractvalue of insertvalue to allow further simplification
1058
  // based on replaced with.overflow intrinsics.
1059
0
  if (Value *V = simplifyExtractValueInst(
1060
0
          EVI->getAggregateOperand(), EVI->getIndices(),
1061
0
          EVI->getModule()->getDataLayout()))
1062
0
    return getBlockValue(V, BB, EVI);
1063
1064
0
  LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1065
0
                    << "' - overdefined (unknown extractvalue).\n");
1066
0
  return ValueLatticeElement::getOverdefined();
1067
0
}
1068
1069
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1070
0
                             ICmpInst::Predicate Pred) {
1071
0
  if (LHS == Val)
1072
0
    return true;
1073
1074
  // Handle range checking idiom produced by InstCombine. We will subtract the
1075
  // offset from the allowed range for RHS in this case.
1076
0
  const APInt *C;
1077
0
  if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) {
1078
0
    Offset = *C;
1079
0
    return true;
1080
0
  }
1081
1082
  // Handle the symmetric case. This appears in saturation patterns like
1083
  // (x == 16) ? 16 : (x + 1).
1084
0
  if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) {
1085
0
    Offset = -*C;
1086
0
    return true;
1087
0
  }
1088
1089
  // If (x | y) < C, then (x < C) && (y < C).
1090
0
  if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1091
0
      (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1092
0
    return true;
1093
1094
  // If (x & y) > C, then (x > C) && (y > C).
1095
0
  if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1096
0
      (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1097
0
    return true;
1098
1099
0
  return false;
1100
0
}
1101
1102
/// Get value range for a "(Val + Offset) Pred RHS" condition.
1103
std::optional<ValueLatticeElement>
1104
LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1105
                                                   Value *RHS,
1106
                                                   const APInt &Offset,
1107
                                                   Instruction *CxtI,
1108
0
                                                   bool UseBlockValue) {
1109
0
  ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1110
0
                         /*isFullSet=*/true);
1111
0
  if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1112
0
    RHSRange = ConstantRange(CI->getValue());
1113
0
  } else if (UseBlockValue) {
1114
0
    std::optional<ValueLatticeElement> R =
1115
0
        getBlockValue(RHS, CxtI->getParent(), CxtI);
1116
0
    if (!R)
1117
0
      return std::nullopt;
1118
0
    RHSRange = toConstantRange(*R, RHS->getType());
1119
0
  } else if (Instruction *I = dyn_cast<Instruction>(RHS)) {
1120
0
    if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1121
0
      RHSRange = getConstantRangeFromMetadata(*Ranges);
1122
0
  }
1123
1124
0
  ConstantRange TrueValues =
1125
0
      ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1126
0
  return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1127
0
}
1128
1129
static std::optional<ConstantRange>
1130
getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS,
1131
0
               function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1132
0
  bool Invert = false;
1133
0
  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1134
0
    Pred = ICmpInst::getInversePredicate(Pred);
1135
0
    Invert = true;
1136
0
  }
1137
0
  if (Pred == ICmpInst::ICMP_SLE) {
1138
0
    Pred = ICmpInst::ICMP_SLT;
1139
0
    if (RHS.isMaxSignedValue())
1140
0
      return std::nullopt; // Could also return full/empty here, if we wanted.
1141
0
    ++RHS;
1142
0
  }
1143
0
  assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1144
0
  if (auto CR = Fn(RHS))
1145
0
    return Invert ? CR->inverse() : CR;
1146
0
  return std::nullopt;
1147
0
}
1148
1149
std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1150
0
    Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1151
0
  Value *LHS = ICI->getOperand(0);
1152
0
  Value *RHS = ICI->getOperand(1);
1153
1154
  // Get the predicate that must hold along the considered edge.
1155
0
  CmpInst::Predicate EdgePred =
1156
0
      isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1157
1158
0
  if (isa<Constant>(RHS)) {
1159
0
    if (ICI->isEquality() && LHS == Val) {
1160
0
      if (EdgePred == ICmpInst::ICMP_EQ)
1161
0
        return ValueLatticeElement::get(cast<Constant>(RHS));
1162
0
      else if (!isa<UndefValue>(RHS))
1163
0
        return ValueLatticeElement::getNot(cast<Constant>(RHS));
1164
0
    }
1165
0
  }
1166
1167
0
  Type *Ty = Val->getType();
1168
0
  if (!Ty->isIntegerTy())
1169
0
    return ValueLatticeElement::getOverdefined();
1170
1171
0
  unsigned BitWidth = Ty->getScalarSizeInBits();
1172
0
  APInt Offset(BitWidth, 0);
1173
0
  if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1174
0
    return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1175
0
                                           UseBlockValue);
1176
1177
0
  CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1178
0
  if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1179
0
    return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1180
0
                                           UseBlockValue);
1181
1182
0
  const APInt *Mask, *C;
1183
0
  if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1184
0
      match(RHS, m_APInt(C))) {
1185
    // If (Val & Mask) == C then all the masked bits are known and we can
1186
    // compute a value range based on that.
1187
0
    if (EdgePred == ICmpInst::ICMP_EQ) {
1188
0
      KnownBits Known;
1189
0
      Known.Zero = ~*C & *Mask;
1190
0
      Known.One = *C & *Mask;
1191
0
      return ValueLatticeElement::getRange(
1192
0
          ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1193
0
    }
1194
    // If (Val & Mask) != 0 then the value must be larger than the lowest set
1195
    // bit of Mask.
1196
0
    if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) {
1197
0
      return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1198
0
          APInt::getOneBitSet(BitWidth, Mask->countr_zero()),
1199
0
          APInt::getZero(BitWidth)));
1200
0
    }
1201
0
  }
1202
1203
  // If (X urem Modulus) >= C, then X >= C.
1204
  // If trunc X >= C, then X >= C.
1205
  // TODO: An upper bound could be computed as well.
1206
0
  if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
1207
0
                             m_Trunc(m_Specific(Val)))) &&
1208
0
      match(RHS, m_APInt(C))) {
1209
    // Use the icmp region so we don't have to deal with different predicates.
1210
0
    ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
1211
0
    if (!CR.isEmptySet())
1212
0
      return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1213
0
          CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0)));
1214
0
  }
1215
1216
  // Recognize:
1217
  // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1218
  // Preconditions: (C << ShAmtC) >> ShAmtC == C
1219
0
  const APInt *ShAmtC;
1220
0
  if (CmpInst::isSigned(EdgePred) &&
1221
0
      match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1222
0
      match(RHS, m_APInt(C))) {
1223
0
    auto CR = getRangeViaSLT(
1224
0
        EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1225
0
          APInt New = RHS << *ShAmtC;
1226
0
          if ((New.ashr(*ShAmtC)) != RHS)
1227
0
            return std::nullopt;
1228
0
          return ConstantRange::getNonEmpty(
1229
0
              APInt::getSignedMinValue(New.getBitWidth()), New);
1230
0
        });
1231
0
    if (CR)
1232
0
      return ValueLatticeElement::getRange(*CR);
1233
0
  }
1234
1235
0
  return ValueLatticeElement::getOverdefined();
1236
0
}
1237
1238
// Handle conditions of the form
1239
// extractvalue(op.with.overflow(%x, C), 1).
1240
static ValueLatticeElement getValueFromOverflowCondition(
1241
0
    Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1242
  // TODO: This only works with a constant RHS for now. We could also compute
1243
  // the range of the RHS, but this doesn't fit into the current structure of
1244
  // the edge value calculation.
1245
0
  const APInt *C;
1246
0
  if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1247
0
    return ValueLatticeElement::getOverdefined();
1248
1249
  // Calculate the possible values of %x for which no overflow occurs.
1250
0
  ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1251
0
      WO->getBinaryOp(), *C, WO->getNoWrapKind());
1252
1253
  // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1254
  // constrained to it's inverse (all values that might cause overflow).
1255
0
  if (IsTrueDest)
1256
0
    NWR = NWR.inverse();
1257
0
  return ValueLatticeElement::getRange(NWR);
1258
0
}
1259
1260
std::optional<ValueLatticeElement>
1261
LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1262
                                         bool IsTrueDest, bool UseBlockValue,
1263
0
                                         unsigned Depth) {
1264
0
  if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1265
0
    return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1266
1267
0
  if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1268
0
    if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1269
0
      if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1270
0
        return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1271
1272
0
  if (++Depth == MaxAnalysisRecursionDepth)
1273
0
    return ValueLatticeElement::getOverdefined();
1274
1275
0
  Value *N;
1276
0
  if (match(Cond, m_Not(m_Value(N))))
1277
0
    return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1278
1279
0
  Value *L, *R;
1280
0
  bool IsAnd;
1281
0
  if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1282
0
    IsAnd = true;
1283
0
  else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1284
0
    IsAnd = false;
1285
0
  else
1286
0
    return ValueLatticeElement::getOverdefined();
1287
1288
0
  std::optional<ValueLatticeElement> LV =
1289
0
      getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1290
0
  if (!LV)
1291
0
    return std::nullopt;
1292
0
  std::optional<ValueLatticeElement> RV =
1293
0
      getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1294
0
  if (!RV)
1295
0
    return std::nullopt;
1296
1297
  // if (L && R) -> intersect L and R
1298
  // if (!(L || R)) -> intersect !L and !R
1299
  // if (L || R) -> union L and R
1300
  // if (!(L && R)) -> union !L and !R
1301
0
  if (IsTrueDest ^ IsAnd) {
1302
0
    LV->mergeIn(*RV);
1303
0
    return *LV;
1304
0
  }
1305
1306
0
  return intersect(*LV, *RV);
1307
0
}
1308
1309
// Return true if Usr has Op as an operand, otherwise false.
1310
0
static bool usesOperand(User *Usr, Value *Op) {
1311
0
  return is_contained(Usr->operands(), Op);
1312
0
}
1313
1314
// Return true if the instruction type of Val is supported by
1315
// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1316
// Call this before calling constantFoldUser() to find out if it's even worth
1317
// attempting to call it.
1318
0
static bool isOperationFoldable(User *Usr) {
1319
0
  return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1320
0
}
1321
1322
// Check if Usr can be simplified to an integer constant when the value of one
1323
// of its operands Op is an integer constant OpConstVal. If so, return it as an
1324
// lattice value range with a single element or otherwise return an overdefined
1325
// lattice value.
1326
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1327
                                            const APInt &OpConstVal,
1328
0
                                            const DataLayout &DL) {
1329
0
  assert(isOperationFoldable(Usr) && "Precondition");
1330
0
  Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1331
  // Check if Usr can be simplified to a constant.
1332
0
  if (auto *CI = dyn_cast<CastInst>(Usr)) {
1333
0
    assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1334
0
    if (auto *C = dyn_cast_or_null<ConstantInt>(
1335
0
            simplifyCastInst(CI->getOpcode(), OpConst,
1336
0
                             CI->getDestTy(), DL))) {
1337
0
      return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1338
0
    }
1339
0
  } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1340
0
    bool Op0Match = BO->getOperand(0) == Op;
1341
0
    bool Op1Match = BO->getOperand(1) == Op;
1342
0
    assert((Op0Match || Op1Match) &&
1343
0
           "Operand 0 nor Operand 1 isn't a match");
1344
0
    Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1345
0
    Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1346
0
    if (auto *C = dyn_cast_or_null<ConstantInt>(
1347
0
            simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1348
0
      return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1349
0
    }
1350
0
  } else if (isa<FreezeInst>(Usr)) {
1351
0
    assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1352
0
    return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1353
0
  }
1354
0
  return ValueLatticeElement::getOverdefined();
1355
0
}
1356
1357
/// Compute the value of Val on the edge BBFrom -> BBTo.
1358
std::optional<ValueLatticeElement>
1359
LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1360
0
                                     BasicBlock *BBTo, bool UseBlockValue) {
1361
  // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1362
  // know that v != 0.
1363
0
  if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1364
    // If this is a conditional branch and only one successor goes to BBTo, then
1365
    // we may be able to infer something from the condition.
1366
0
    if (BI->isConditional() &&
1367
0
        BI->getSuccessor(0) != BI->getSuccessor(1)) {
1368
0
      bool isTrueDest = BI->getSuccessor(0) == BBTo;
1369
0
      assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1370
0
             "BBTo isn't a successor of BBFrom");
1371
0
      Value *Condition = BI->getCondition();
1372
1373
      // If V is the condition of the branch itself, then we know exactly what
1374
      // it is.
1375
0
      if (Condition == Val)
1376
0
        return ValueLatticeElement::get(ConstantInt::get(
1377
0
                              Type::getInt1Ty(Val->getContext()), isTrueDest));
1378
1379
      // If the condition of the branch is an equality comparison, we may be
1380
      // able to infer the value.
1381
0
      std::optional<ValueLatticeElement> Result =
1382
0
          getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1383
0
      if (!Result)
1384
0
        return std::nullopt;
1385
1386
0
      if (!Result->isOverdefined())
1387
0
        return Result;
1388
1389
0
      if (User *Usr = dyn_cast<User>(Val)) {
1390
0
        assert(Result->isOverdefined() && "Result isn't overdefined");
1391
        // Check with isOperationFoldable() first to avoid linearly iterating
1392
        // over the operands unnecessarily which can be expensive for
1393
        // instructions with many operands.
1394
0
        if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1395
0
          const DataLayout &DL = BBTo->getModule()->getDataLayout();
1396
0
          if (usesOperand(Usr, Condition)) {
1397
            // If Val has Condition as an operand and Val can be folded into a
1398
            // constant with either Condition == true or Condition == false,
1399
            // propagate the constant.
1400
            // eg.
1401
            //   ; %Val is true on the edge to %then.
1402
            //   %Val = and i1 %Condition, true.
1403
            //   br %Condition, label %then, label %else
1404
0
            APInt ConditionVal(1, isTrueDest ? 1 : 0);
1405
0
            Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1406
0
          } else {
1407
            // If one of Val's operand has an inferred value, we may be able to
1408
            // infer the value of Val.
1409
            // eg.
1410
            //    ; %Val is 94 on the edge to %then.
1411
            //    %Val = add i8 %Op, 1
1412
            //    %Condition = icmp eq i8 %Op, 93
1413
            //    br i1 %Condition, label %then, label %else
1414
0
            for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1415
0
              Value *Op = Usr->getOperand(i);
1416
0
              ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1417
0
                  Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1418
0
              if (std::optional<APInt> OpConst =
1419
0
                      OpLatticeVal.asConstantInteger()) {
1420
0
                Result = constantFoldUser(Usr, Op, *OpConst, DL);
1421
0
                break;
1422
0
              }
1423
0
            }
1424
0
          }
1425
0
        }
1426
0
      }
1427
0
      if (!Result->isOverdefined())
1428
0
        return Result;
1429
0
    }
1430
0
  }
1431
1432
  // If the edge was formed by a switch on the value, then we may know exactly
1433
  // what it is.
1434
0
  if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1435
0
    Value *Condition = SI->getCondition();
1436
0
    if (!isa<IntegerType>(Val->getType()))
1437
0
      return ValueLatticeElement::getOverdefined();
1438
0
    bool ValUsesConditionAndMayBeFoldable = false;
1439
0
    if (Condition != Val) {
1440
      // Check if Val has Condition as an operand.
1441
0
      if (User *Usr = dyn_cast<User>(Val))
1442
0
        ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1443
0
            usesOperand(Usr, Condition);
1444
0
      if (!ValUsesConditionAndMayBeFoldable)
1445
0
        return ValueLatticeElement::getOverdefined();
1446
0
    }
1447
0
    assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1448
0
           "Condition != Val nor Val doesn't use Condition");
1449
1450
0
    bool DefaultCase = SI->getDefaultDest() == BBTo;
1451
0
    unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1452
0
    ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1453
1454
0
    for (auto Case : SI->cases()) {
1455
0
      APInt CaseValue = Case.getCaseValue()->getValue();
1456
0
      ConstantRange EdgeVal(CaseValue);
1457
0
      if (ValUsesConditionAndMayBeFoldable) {
1458
0
        User *Usr = cast<User>(Val);
1459
0
        const DataLayout &DL = BBTo->getModule()->getDataLayout();
1460
0
        ValueLatticeElement EdgeLatticeVal =
1461
0
            constantFoldUser(Usr, Condition, CaseValue, DL);
1462
0
        if (EdgeLatticeVal.isOverdefined())
1463
0
          return ValueLatticeElement::getOverdefined();
1464
0
        EdgeVal = EdgeLatticeVal.getConstantRange();
1465
0
      }
1466
0
      if (DefaultCase) {
1467
        // It is possible that the default destination is the destination of
1468
        // some cases. We cannot perform difference for those cases.
1469
        // We know Condition != CaseValue in BBTo.  In some cases we can use
1470
        // this to infer Val == f(Condition) is != f(CaseValue).  For now, we
1471
        // only do this when f is identity (i.e. Val == Condition), but we
1472
        // should be able to do this for any injective f.
1473
0
        if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1474
0
          EdgesVals = EdgesVals.difference(EdgeVal);
1475
0
      } else if (Case.getCaseSuccessor() == BBTo)
1476
0
        EdgesVals = EdgesVals.unionWith(EdgeVal);
1477
0
    }
1478
0
    return ValueLatticeElement::getRange(std::move(EdgesVals));
1479
0
  }
1480
0
  return ValueLatticeElement::getOverdefined();
1481
0
}
1482
1483
/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1484
/// the basic block if the edge does not constrain Val.
1485
std::optional<ValueLatticeElement>
1486
LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1487
0
                                BasicBlock *BBTo, Instruction *CxtI) {
1488
  // If already a constant, there is nothing to compute.
1489
0
  if (Constant *VC = dyn_cast<Constant>(Val))
1490
0
    return ValueLatticeElement::get(VC);
1491
1492
0
  std::optional<ValueLatticeElement> LocalResult =
1493
0
      getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1494
0
  if (!LocalResult)
1495
0
    return std::nullopt;
1496
1497
0
  if (hasSingleValue(*LocalResult))
1498
    // Can't get any more precise here
1499
0
    return LocalResult;
1500
1501
0
  std::optional<ValueLatticeElement> OptInBlock =
1502
0
      getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1503
0
  if (!OptInBlock)
1504
0
    return std::nullopt;
1505
0
  ValueLatticeElement &InBlock = *OptInBlock;
1506
1507
  // We can use the context instruction (generically the ultimate instruction
1508
  // the calling pass is trying to simplify) here, even though the result of
1509
  // this function is generally cached when called from the solve* functions
1510
  // (and that cached result might be used with queries using a different
1511
  // context instruction), because when this function is called from the solve*
1512
  // functions, the context instruction is not provided. When called from
1513
  // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1514
  // but then the result is not cached.
1515
0
  intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1516
1517
0
  return intersect(*LocalResult, InBlock);
1518
0
}
1519
1520
ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1521
0
                                                       Instruction *CxtI) {
1522
0
  LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1523
0
                    << BB->getName() << "'\n");
1524
1525
0
  assert(BlockValueStack.empty() && BlockValueSet.empty());
1526
0
  std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1527
0
  if (!OptResult) {
1528
0
    solve();
1529
0
    OptResult = getBlockValue(V, BB, CxtI);
1530
0
    assert(OptResult && "Value not available after solving");
1531
0
  }
1532
1533
0
  ValueLatticeElement Result = *OptResult;
1534
0
  LLVM_DEBUG(dbgs() << "  Result = " << Result << "\n");
1535
0
  return Result;
1536
0
}
1537
1538
0
ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1539
0
  LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1540
0
                    << "'\n");
1541
1542
0
  if (auto *C = dyn_cast<Constant>(V))
1543
0
    return ValueLatticeElement::get(C);
1544
1545
0
  ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1546
0
  if (auto *I = dyn_cast<Instruction>(V))
1547
0
    Result = getFromRangeMetadata(I);
1548
0
  intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1549
1550
0
  LLVM_DEBUG(dbgs() << "  Result = " << Result << "\n");
1551
0
  return Result;
1552
0
}
1553
1554
ValueLatticeElement LazyValueInfoImpl::
1555
getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1556
0
               Instruction *CxtI) {
1557
0
  LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1558
0
                    << FromBB->getName() << "' to '" << ToBB->getName()
1559
0
                    << "'\n");
1560
1561
0
  std::optional<ValueLatticeElement> Result =
1562
0
      getEdgeValue(V, FromBB, ToBB, CxtI);
1563
0
  while (!Result) {
1564
    // As the worklist only explicitly tracks block values (but not edge values)
1565
    // we may have to call solve() multiple times, as the edge value calculation
1566
    // may request additional block values.
1567
0
    solve();
1568
0
    Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1569
0
  }
1570
1571
0
  LLVM_DEBUG(dbgs() << "  Result = " << *Result << "\n");
1572
0
  return *Result;
1573
0
}
1574
1575
0
ValueLatticeElement LazyValueInfoImpl::getValueAtUse(const Use &U) {
1576
0
  Value *V = U.get();
1577
0
  auto *CxtI = cast<Instruction>(U.getUser());
1578
0
  ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1579
1580
  // Check whether the only (possibly transitive) use of the value is in a
1581
  // position where V can be constrained by a select or branch condition.
1582
0
  const Use *CurrU = &U;
1583
  // TODO: Increase limit?
1584
0
  const unsigned MaxUsesToInspect = 3;
1585
0
  for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1586
0
    std::optional<ValueLatticeElement> CondVal;
1587
0
    auto *CurrI = cast<Instruction>(CurrU->getUser());
1588
0
    if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1589
      // If the value is undef, a different value may be chosen in
1590
      // the select condition and at use.
1591
0
      if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1592
0
        break;
1593
0
      if (CurrU->getOperandNo() == 1)
1594
0
        CondVal =
1595
0
            *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1596
0
                                   /*UseBlockValue*/ false);
1597
0
      else if (CurrU->getOperandNo() == 2)
1598
0
        CondVal =
1599
0
            *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1600
0
                                   /*UseBlockValue*/ false);
1601
0
    } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1602
      // TODO: Use non-local query?
1603
0
      CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1604
0
                                   PHI->getParent(), /*UseBlockValue*/ false);
1605
0
    }
1606
0
    if (CondVal)
1607
0
      VL = intersect(VL, *CondVal);
1608
1609
    // Only follow one-use chain, to allow direct intersection of conditions.
1610
    // If there are multiple uses, we would have to intersect with the union of
1611
    // all conditions at different uses.
1612
    // Stop walking if we hit a non-speculatable instruction. Even if the
1613
    // result is only used under a specific condition, executing the
1614
    // instruction itself may cause side effects or UB already.
1615
    // This also disallows looking through phi nodes: If the phi node is part
1616
    // of a cycle, we might end up reasoning about values from different cycle
1617
    // iterations (PR60629).
1618
0
    if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
1619
0
      break;
1620
0
    CurrU = &*CurrI->use_begin();
1621
0
  }
1622
0
  return VL;
1623
0
}
1624
1625
void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1626
0
                                   BasicBlock *NewSucc) {
1627
0
  TheCache.threadEdgeImpl(OldSucc, NewSucc);
1628
0
}
1629
1630
//===----------------------------------------------------------------------===//
1631
//                            LazyValueInfo Impl
1632
//===----------------------------------------------------------------------===//
1633
1634
0
bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1635
0
  Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1636
1637
0
  if (auto *Impl = Info.getImpl())
1638
0
    Impl->clear();
1639
1640
  // Fully lazy.
1641
0
  return false;
1642
0
}
1643
1644
0
void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1645
0
  AU.setPreservesAll();
1646
0
  AU.addRequired<AssumptionCacheTracker>();
1647
0
  AU.addRequired<TargetLibraryInfoWrapperPass>();
1648
0
}
1649
1650
0
LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1651
1652
/// This lazily constructs the LazyValueInfoImpl.
1653
0
LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1654
0
  if (!PImpl) {
1655
0
    assert(M && "getCache() called with a null Module");
1656
0
    const DataLayout &DL = M->getDataLayout();
1657
0
    Function *GuardDecl =
1658
0
        M->getFunction(Intrinsic::getName(Intrinsic::experimental_guard));
1659
0
    PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1660
0
  }
1661
0
  return *static_cast<LazyValueInfoImpl *>(PImpl);
1662
0
}
1663
1664
0
LazyValueInfoImpl *LazyValueInfo::getImpl() {
1665
0
  if (!PImpl)
1666
0
    return nullptr;
1667
0
  return static_cast<LazyValueInfoImpl *>(PImpl);
1668
0
}
1669
1670
0
LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1671
1672
0
void LazyValueInfo::releaseMemory() {
1673
  // If the cache was allocated, free it.
1674
0
  if (auto *Impl = getImpl()) {
1675
0
    delete &*Impl;
1676
0
    PImpl = nullptr;
1677
0
  }
1678
0
}
1679
1680
bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1681
0
                               FunctionAnalysisManager::Invalidator &Inv) {
1682
  // We need to invalidate if we have either failed to preserve this analyses
1683
  // result directly or if any of its dependencies have been invalidated.
1684
0
  auto PAC = PA.getChecker<LazyValueAnalysis>();
1685
0
  if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1686
0
    return true;
1687
1688
0
  return false;
1689
0
}
1690
1691
0
void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1692
1693
LazyValueInfo LazyValueAnalysis::run(Function &F,
1694
0
                                     FunctionAnalysisManager &FAM) {
1695
0
  auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1696
1697
0
  return LazyValueInfo(&AC, &F.getParent()->getDataLayout());
1698
0
}
1699
1700
/// Returns true if we can statically tell that this value will never be a
1701
/// "useful" constant.  In practice, this means we've got something like an
1702
/// alloca or a malloc call for which a comparison against a constant can
1703
/// only be guarding dead code.  Note that we are potentially giving up some
1704
/// precision in dead code (a constant result) in favour of avoiding a
1705
/// expensive search for a easily answered common query.
1706
0
static bool isKnownNonConstant(Value *V) {
1707
0
  V = V->stripPointerCasts();
1708
  // The return val of alloc cannot be a Constant.
1709
0
  if (isa<AllocaInst>(V))
1710
0
    return true;
1711
0
  return false;
1712
0
}
1713
1714
0
Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
1715
  // Bail out early if V is known not to be a Constant.
1716
0
  if (isKnownNonConstant(V))
1717
0
    return nullptr;
1718
1719
0
  BasicBlock *BB = CxtI->getParent();
1720
0
  ValueLatticeElement Result =
1721
0
      getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1722
1723
0
  if (Result.isConstant())
1724
0
    return Result.getConstant();
1725
0
  if (Result.isConstantRange()) {
1726
0
    const ConstantRange &CR = Result.getConstantRange();
1727
0
    if (const APInt *SingleVal = CR.getSingleElement())
1728
0
      return ConstantInt::get(V->getContext(), *SingleVal);
1729
0
  }
1730
0
  return nullptr;
1731
0
}
1732
1733
ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
1734
0
                                              bool UndefAllowed) {
1735
0
  assert(V->getType()->isIntegerTy());
1736
0
  BasicBlock *BB = CxtI->getParent();
1737
0
  ValueLatticeElement Result =
1738
0
      getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1739
0
  return toConstantRange(Result, V->getType(), UndefAllowed);
1740
0
}
1741
1742
ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
1743
0
                                                   bool UndefAllowed) {
1744
0
  auto *Inst = cast<Instruction>(U.getUser());
1745
0
  ValueLatticeElement Result =
1746
0
      getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
1747
0
  return toConstantRange(Result, U->getType(), UndefAllowed);
1748
0
}
1749
1750
/// Determine whether the specified value is known to be a
1751
/// constant on the specified edge. Return null if not.
1752
Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1753
                                           BasicBlock *ToBB,
1754
0
                                           Instruction *CxtI) {
1755
0
  Module *M = FromBB->getModule();
1756
0
  ValueLatticeElement Result =
1757
0
      getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1758
1759
0
  if (Result.isConstant())
1760
0
    return Result.getConstant();
1761
0
  if (Result.isConstantRange()) {
1762
0
    const ConstantRange &CR = Result.getConstantRange();
1763
0
    if (const APInt *SingleVal = CR.getSingleElement())
1764
0
      return ConstantInt::get(V->getContext(), *SingleVal);
1765
0
  }
1766
0
  return nullptr;
1767
0
}
1768
1769
ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1770
                                                    BasicBlock *FromBB,
1771
                                                    BasicBlock *ToBB,
1772
0
                                                    Instruction *CxtI) {
1773
0
  Module *M = FromBB->getModule();
1774
0
  ValueLatticeElement Result =
1775
0
      getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1776
  // TODO: Should undef be allowed here?
1777
0
  return toConstantRange(Result, V->getType(), /*UndefAllowed*/ true);
1778
0
}
1779
1780
static LazyValueInfo::Tristate
1781
getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
1782
0
                   const DataLayout &DL) {
1783
  // If we know the value is a constant, evaluate the conditional.
1784
0
  Constant *Res = nullptr;
1785
0
  if (Val.isConstant()) {
1786
0
    Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
1787
0
    if (ConstantInt *ResCI = dyn_cast_or_null<ConstantInt>(Res))
1788
0
      return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1789
0
    return LazyValueInfo::Unknown;
1790
0
  }
1791
1792
0
  if (Val.isConstantRange()) {
1793
0
    ConstantInt *CI = dyn_cast<ConstantInt>(C);
1794
0
    if (!CI) return LazyValueInfo::Unknown;
1795
1796
0
    const ConstantRange &CR = Val.getConstantRange();
1797
0
    if (Pred == ICmpInst::ICMP_EQ) {
1798
0
      if (!CR.contains(CI->getValue()))
1799
0
        return LazyValueInfo::False;
1800
1801
0
      if (CR.isSingleElement())
1802
0
        return LazyValueInfo::True;
1803
0
    } else if (Pred == ICmpInst::ICMP_NE) {
1804
0
      if (!CR.contains(CI->getValue()))
1805
0
        return LazyValueInfo::True;
1806
1807
0
      if (CR.isSingleElement())
1808
0
        return LazyValueInfo::False;
1809
0
    } else {
1810
      // Handle more complex predicates.
1811
0
      ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1812
0
          (ICmpInst::Predicate)Pred, CI->getValue());
1813
0
      if (TrueValues.contains(CR))
1814
0
        return LazyValueInfo::True;
1815
0
      if (TrueValues.inverse().contains(CR))
1816
0
        return LazyValueInfo::False;
1817
0
    }
1818
0
    return LazyValueInfo::Unknown;
1819
0
  }
1820
1821
0
  if (Val.isNotConstant()) {
1822
    // If this is an equality comparison, we can try to fold it knowing that
1823
    // "V != C1".
1824
0
    if (Pred == ICmpInst::ICMP_EQ) {
1825
      // !C1 == C -> false iff C1 == C.
1826
0
      Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1827
0
                                            Val.getNotConstant(), C, DL);
1828
0
      if (Res && Res->isNullValue())
1829
0
        return LazyValueInfo::False;
1830
0
    } else if (Pred == ICmpInst::ICMP_NE) {
1831
      // !C1 != C -> true iff C1 == C.
1832
0
      Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1833
0
                                            Val.getNotConstant(), C, DL);
1834
0
      if (Res && Res->isNullValue())
1835
0
        return LazyValueInfo::True;
1836
0
    }
1837
0
    return LazyValueInfo::Unknown;
1838
0
  }
1839
1840
0
  return LazyValueInfo::Unknown;
1841
0
}
1842
1843
/// Determine whether the specified value comparison with a constant is known to
1844
/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1845
LazyValueInfo::Tristate
1846
LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1847
                                  BasicBlock *FromBB, BasicBlock *ToBB,
1848
0
                                  Instruction *CxtI) {
1849
0
  Module *M = FromBB->getModule();
1850
0
  ValueLatticeElement Result =
1851
0
      getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1852
1853
0
  return getPredicateResult(Pred, C, Result, M->getDataLayout());
1854
0
}
1855
1856
LazyValueInfo::Tristate
1857
LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1858
0
                              Instruction *CxtI, bool UseBlockValue) {
1859
  // Is or is not NonNull are common predicates being queried. If
1860
  // isKnownNonZero can tell us the result of the predicate, we can
1861
  // return it quickly. But this is only a fastpath, and falling
1862
  // through would still be correct.
1863
0
  Module *M = CxtI->getModule();
1864
0
  const DataLayout &DL = M->getDataLayout();
1865
0
  if (V->getType()->isPointerTy() && C->isNullValue() &&
1866
0
      isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1867
0
    if (Pred == ICmpInst::ICMP_EQ)
1868
0
      return LazyValueInfo::False;
1869
0
    else if (Pred == ICmpInst::ICMP_NE)
1870
0
      return LazyValueInfo::True;
1871
0
  }
1872
1873
0
  auto &Impl = getOrCreateImpl(M);
1874
0
  ValueLatticeElement Result =
1875
0
      UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
1876
0
                    : Impl.getValueAt(V, CxtI);
1877
0
  Tristate Ret = getPredicateResult(Pred, C, Result, DL);
1878
0
  if (Ret != Unknown)
1879
0
    return Ret;
1880
1881
  // Note: The following bit of code is somewhat distinct from the rest of LVI;
1882
  // LVI as a whole tries to compute a lattice value which is conservatively
1883
  // correct at a given location.  In this case, we have a predicate which we
1884
  // weren't able to prove about the merged result, and we're pushing that
1885
  // predicate back along each incoming edge to see if we can prove it
1886
  // separately for each input.  As a motivating example, consider:
1887
  // bb1:
1888
  //   %v1 = ... ; constantrange<1, 5>
1889
  //   br label %merge
1890
  // bb2:
1891
  //   %v2 = ... ; constantrange<10, 20>
1892
  //   br label %merge
1893
  // merge:
1894
  //   %phi = phi [%v1, %v2] ; constantrange<1,20>
1895
  //   %pred = icmp eq i32 %phi, 8
1896
  // We can't tell from the lattice value for '%phi' that '%pred' is false
1897
  // along each path, but by checking the predicate over each input separately,
1898
  // we can.
1899
  // We limit the search to one step backwards from the current BB and value.
1900
  // We could consider extending this to search further backwards through the
1901
  // CFG and/or value graph, but there are non-obvious compile time vs quality
1902
  // tradeoffs.
1903
0
  BasicBlock *BB = CxtI->getParent();
1904
1905
  // Function entry or an unreachable block.  Bail to avoid confusing
1906
  // analysis below.
1907
0
  pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1908
0
  if (PI == PE)
1909
0
    return Unknown;
1910
1911
  // If V is a PHI node in the same block as the context, we need to ask
1912
  // questions about the predicate as applied to the incoming value along
1913
  // each edge. This is useful for eliminating cases where the predicate is
1914
  // known along all incoming edges.
1915
0
  if (auto *PHI = dyn_cast<PHINode>(V))
1916
0
    if (PHI->getParent() == BB) {
1917
0
      Tristate Baseline = Unknown;
1918
0
      for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1919
0
        Value *Incoming = PHI->getIncomingValue(i);
1920
0
        BasicBlock *PredBB = PHI->getIncomingBlock(i);
1921
        // Note that PredBB may be BB itself.
1922
0
        Tristate Result =
1923
0
            getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1924
1925
        // Keep going as long as we've seen a consistent known result for
1926
        // all inputs.
1927
0
        Baseline = (i == 0) ? Result /* First iteration */
1928
0
                            : (Baseline == Result ? Baseline
1929
0
                                                  : Unknown); /* All others */
1930
0
        if (Baseline == Unknown)
1931
0
          break;
1932
0
      }
1933
0
      if (Baseline != Unknown)
1934
0
        return Baseline;
1935
0
    }
1936
1937
  // For a comparison where the V is outside this block, it's possible
1938
  // that we've branched on it before. Look to see if the value is known
1939
  // on all incoming edges.
1940
0
  if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1941
    // For predecessor edge, determine if the comparison is true or false
1942
    // on that edge. If they're all true or all false, we can conclude
1943
    // the value of the comparison in this block.
1944
0
    Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1945
0
    if (Baseline != Unknown) {
1946
      // Check that all remaining incoming values match the first one.
1947
0
      while (++PI != PE) {
1948
0
        Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1949
0
        if (Ret != Baseline)
1950
0
          break;
1951
0
      }
1952
      // If we terminated early, then one of the values didn't match.
1953
0
      if (PI == PE) {
1954
0
        return Baseline;
1955
0
      }
1956
0
    }
1957
0
  }
1958
1959
0
  return Unknown;
1960
0
}
1961
1962
LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS,
1963
                                                      Value *RHS,
1964
                                                      Instruction *CxtI,
1965
0
                                                      bool UseBlockValue) {
1966
0
  CmpInst::Predicate Pred = (CmpInst::Predicate)P;
1967
1968
0
  if (auto *C = dyn_cast<Constant>(RHS))
1969
0
    return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
1970
0
  if (auto *C = dyn_cast<Constant>(LHS))
1971
0
    return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1972
0
                          UseBlockValue);
1973
1974
  // Got two non-Constant values. Try to determine the comparison results based
1975
  // on the block values of the two operands, e.g. because they have
1976
  // non-overlapping ranges.
1977
0
  if (UseBlockValue) {
1978
0
    Module *M = CxtI->getModule();
1979
0
    ValueLatticeElement L =
1980
0
        getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
1981
0
    if (L.isOverdefined())
1982
0
      return LazyValueInfo::Unknown;
1983
1984
0
    ValueLatticeElement R =
1985
0
        getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
1986
0
    Type *Ty = CmpInst::makeCmpResultType(LHS->getType());
1987
0
    if (Constant *Res = L.getCompare((CmpInst::Predicate)P, Ty, R,
1988
0
                                     M->getDataLayout())) {
1989
0
      if (Res->isNullValue())
1990
0
        return LazyValueInfo::False;
1991
0
      if (Res->isOneValue())
1992
0
        return LazyValueInfo::True;
1993
0
    }
1994
0
  }
1995
0
  return LazyValueInfo::Unknown;
1996
0
}
1997
1998
void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1999
0
                               BasicBlock *NewSucc) {
2000
0
  if (auto *Impl = getImpl())
2001
0
    Impl->threadEdge(PredBB, OldSucc, NewSucc);
2002
0
}
2003
2004
0
void LazyValueInfo::forgetValue(Value *V) {
2005
0
  if (auto *Impl = getImpl())
2006
0
    Impl->forgetValue(V);
2007
0
}
2008
2009
0
void LazyValueInfo::eraseBlock(BasicBlock *BB) {
2010
0
  if (auto *Impl = getImpl())
2011
0
    Impl->eraseBlock(BB);
2012
0
}
2013
2014
0
void LazyValueInfo::clear() {
2015
0
  if (auto *Impl = getImpl())
2016
0
    Impl->clear();
2017
0
}
2018
2019
0
void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
2020
0
  if (auto *Impl = getImpl())
2021
0
    Impl->printLVI(F, DTree, OS);
2022
0
}
2023
2024
// Print the LVI for the function arguments at the start of each basic block.
2025
void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2026
0
    const BasicBlock *BB, formatted_raw_ostream &OS) {
2027
  // Find if there are latticevalues defined for arguments of the function.
2028
0
  auto *F = BB->getParent();
2029
0
  for (const auto &Arg : F->args()) {
2030
0
    ValueLatticeElement Result = LVIImpl->getValueInBlock(
2031
0
        const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
2032
0
    if (Result.isUnknown())
2033
0
      continue;
2034
0
    OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2035
0
  }
2036
0
}
2037
2038
// This function prints the LVI analysis for the instruction I at the beginning
2039
// of various basic blocks. It relies on calculated values that are stored in
2040
// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2041
// LazyValueInfo for `I`, and print that info.
2042
void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2043
0
    const Instruction *I, formatted_raw_ostream &OS) {
2044
2045
0
  auto *ParentBB = I->getParent();
2046
0
  SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2047
  // We can generate (solve) LVI values only for blocks that are dominated by
2048
  // the I's parent. However, to avoid generating LVI for all dominating blocks,
2049
  // that contain redundant/uninteresting information, we print LVI for
2050
  // blocks that may use this LVI information (such as immediate successor
2051
  // blocks, and blocks that contain uses of `I`).
2052
0
  auto printResult = [&](const BasicBlock *BB) {
2053
0
    if (!BlocksContainingLVI.insert(BB).second)
2054
0
      return;
2055
0
    ValueLatticeElement Result = LVIImpl->getValueInBlock(
2056
0
        const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2057
0
      OS << "; LatticeVal for: '" << *I << "' in BB: '";
2058
0
      BB->printAsOperand(OS, false);
2059
0
      OS << "' is: " << Result << "\n";
2060
0
  };
2061
2062
0
  printResult(ParentBB);
2063
  // Print the LVI analysis results for the immediate successor blocks, that
2064
  // are dominated by `ParentBB`.
2065
0
  for (const auto *BBSucc : successors(ParentBB))
2066
0
    if (DT.dominates(ParentBB, BBSucc))
2067
0
      printResult(BBSucc);
2068
2069
  // Print LVI in blocks where `I` is used.
2070
0
  for (const auto *U : I->users())
2071
0
    if (auto *UseI = dyn_cast<Instruction>(U))
2072
0
      if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2073
0
        printResult(UseI->getParent());
2074
2075
0
}
2076
2077
PreservedAnalyses LazyValueInfoPrinterPass::run(Function &F,
2078
0
                                                FunctionAnalysisManager &AM) {
2079
0
  OS << "LVI for function '" << F.getName() << "':\n";
2080
0
  auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2081
0
  auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2082
0
  LVI.printLVI(F, DTree, OS);
2083
0
  return PreservedAnalyses::all();
2084
0
}