/src/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- AggressiveInstCombine.cpp ------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file implements the aggressive expression pattern combiner classes. |
10 | | // Currently, it handles expression patterns for: |
11 | | // * Truncate instruction |
12 | | // |
13 | | //===----------------------------------------------------------------------===// |
14 | | |
15 | | #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h" |
16 | | #include "AggressiveInstCombineInternal.h" |
17 | | #include "llvm/ADT/Statistic.h" |
18 | | #include "llvm/Analysis/AliasAnalysis.h" |
19 | | #include "llvm/Analysis/AssumptionCache.h" |
20 | | #include "llvm/Analysis/BasicAliasAnalysis.h" |
21 | | #include "llvm/Analysis/ConstantFolding.h" |
22 | | #include "llvm/Analysis/GlobalsModRef.h" |
23 | | #include "llvm/Analysis/TargetLibraryInfo.h" |
24 | | #include "llvm/Analysis/TargetTransformInfo.h" |
25 | | #include "llvm/Analysis/ValueTracking.h" |
26 | | #include "llvm/IR/DataLayout.h" |
27 | | #include "llvm/IR/Dominators.h" |
28 | | #include "llvm/IR/Function.h" |
29 | | #include "llvm/IR/IRBuilder.h" |
30 | | #include "llvm/IR/PatternMatch.h" |
31 | | #include "llvm/Transforms/Utils/BuildLibCalls.h" |
32 | | #include "llvm/Transforms/Utils/Local.h" |
33 | | |
34 | | using namespace llvm; |
35 | | using namespace PatternMatch; |
36 | | |
37 | | #define DEBUG_TYPE "aggressive-instcombine" |
38 | | |
39 | | STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded"); |
40 | | STATISTIC(NumGuardedRotates, |
41 | | "Number of guarded rotates transformed into funnel shifts"); |
42 | | STATISTIC(NumGuardedFunnelShifts, |
43 | | "Number of guarded funnel shifts transformed into funnel shifts"); |
44 | | STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized"); |
45 | | |
46 | | static cl::opt<unsigned> MaxInstrsToScan( |
47 | | "aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden, |
48 | | cl::desc("Max number of instructions to scan for aggressive instcombine.")); |
49 | | |
50 | | /// Match a pattern for a bitwise funnel/rotate operation that partially guards |
51 | | /// against undefined behavior by branching around the funnel-shift/rotation |
52 | | /// when the shift amount is 0. |
53 | 0 | static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) { |
54 | 0 | if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2) |
55 | 0 | return false; |
56 | | |
57 | | // As with the one-use checks below, this is not strictly necessary, but we |
58 | | // are being cautious to avoid potential perf regressions on targets that |
59 | | // do not actually have a funnel/rotate instruction (where the funnel shift |
60 | | // would be expanded back into math/shift/logic ops). |
61 | 0 | if (!isPowerOf2_32(I.getType()->getScalarSizeInBits())) |
62 | 0 | return false; |
63 | | |
64 | | // Match V to funnel shift left/right and capture the source operands and |
65 | | // shift amount. |
66 | 0 | auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1, |
67 | 0 | Value *&ShAmt) { |
68 | 0 | unsigned Width = V->getType()->getScalarSizeInBits(); |
69 | | |
70 | | // fshl(ShVal0, ShVal1, ShAmt) |
71 | | // == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt)) |
72 | 0 | if (match(V, m_OneUse(m_c_Or( |
73 | 0 | m_Shl(m_Value(ShVal0), m_Value(ShAmt)), |
74 | 0 | m_LShr(m_Value(ShVal1), |
75 | 0 | m_Sub(m_SpecificInt(Width), m_Deferred(ShAmt))))))) { |
76 | 0 | return Intrinsic::fshl; |
77 | 0 | } |
78 | | |
79 | | // fshr(ShVal0, ShVal1, ShAmt) |
80 | | // == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt)) |
81 | 0 | if (match(V, |
82 | 0 | m_OneUse(m_c_Or(m_Shl(m_Value(ShVal0), m_Sub(m_SpecificInt(Width), |
83 | 0 | m_Value(ShAmt))), |
84 | 0 | m_LShr(m_Value(ShVal1), m_Deferred(ShAmt)))))) { |
85 | 0 | return Intrinsic::fshr; |
86 | 0 | } |
87 | | |
88 | 0 | return Intrinsic::not_intrinsic; |
89 | 0 | }; |
90 | | |
91 | | // One phi operand must be a funnel/rotate operation, and the other phi |
92 | | // operand must be the source value of that funnel/rotate operation: |
93 | | // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ] |
94 | | // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ] |
95 | | // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ] |
96 | 0 | PHINode &Phi = cast<PHINode>(I); |
97 | 0 | unsigned FunnelOp = 0, GuardOp = 1; |
98 | 0 | Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1); |
99 | 0 | Value *ShVal0, *ShVal1, *ShAmt; |
100 | 0 | Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt); |
101 | 0 | if (IID == Intrinsic::not_intrinsic || |
102 | 0 | (IID == Intrinsic::fshl && ShVal0 != P1) || |
103 | 0 | (IID == Intrinsic::fshr && ShVal1 != P1)) { |
104 | 0 | IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt); |
105 | 0 | if (IID == Intrinsic::not_intrinsic || |
106 | 0 | (IID == Intrinsic::fshl && ShVal0 != P0) || |
107 | 0 | (IID == Intrinsic::fshr && ShVal1 != P0)) |
108 | 0 | return false; |
109 | 0 | assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) && |
110 | 0 | "Pattern must match funnel shift left or right"); |
111 | 0 | std::swap(FunnelOp, GuardOp); |
112 | 0 | } |
113 | | |
114 | | // The incoming block with our source operand must be the "guard" block. |
115 | | // That must contain a cmp+branch to avoid the funnel/rotate when the shift |
116 | | // amount is equal to 0. The other incoming block is the block with the |
117 | | // funnel/rotate. |
118 | 0 | BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp); |
119 | 0 | BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp); |
120 | 0 | Instruction *TermI = GuardBB->getTerminator(); |
121 | | |
122 | | // Ensure that the shift values dominate each block. |
123 | 0 | if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI)) |
124 | 0 | return false; |
125 | | |
126 | 0 | ICmpInst::Predicate Pred; |
127 | 0 | BasicBlock *PhiBB = Phi.getParent(); |
128 | 0 | if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()), |
129 | 0 | m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB)))) |
130 | 0 | return false; |
131 | | |
132 | 0 | if (Pred != CmpInst::ICMP_EQ) |
133 | 0 | return false; |
134 | | |
135 | 0 | IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt()); |
136 | |
|
137 | 0 | if (ShVal0 == ShVal1) |
138 | 0 | ++NumGuardedRotates; |
139 | 0 | else |
140 | 0 | ++NumGuardedFunnelShifts; |
141 | | |
142 | | // If this is not a rotate then the select was blocking poison from the |
143 | | // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it. |
144 | 0 | bool IsFshl = IID == Intrinsic::fshl; |
145 | 0 | if (ShVal0 != ShVal1) { |
146 | 0 | if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1)) |
147 | 0 | ShVal1 = Builder.CreateFreeze(ShVal1); |
148 | 0 | else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0)) |
149 | 0 | ShVal0 = Builder.CreateFreeze(ShVal0); |
150 | 0 | } |
151 | | |
152 | | // We matched a variation of this IR pattern: |
153 | | // GuardBB: |
154 | | // %cmp = icmp eq i32 %ShAmt, 0 |
155 | | // br i1 %cmp, label %PhiBB, label %FunnelBB |
156 | | // FunnelBB: |
157 | | // %sub = sub i32 32, %ShAmt |
158 | | // %shr = lshr i32 %ShVal1, %sub |
159 | | // %shl = shl i32 %ShVal0, %ShAmt |
160 | | // %fsh = or i32 %shr, %shl |
161 | | // br label %PhiBB |
162 | | // PhiBB: |
163 | | // %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ] |
164 | | // --> |
165 | | // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt) |
166 | 0 | Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType()); |
167 | 0 | Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt})); |
168 | 0 | return true; |
169 | 0 | } |
170 | | |
171 | | /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and |
172 | | /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain |
173 | | /// of 'and' ops, then we also need to capture the fact that we saw an |
174 | | /// "and X, 1", so that's an extra return value for that case. |
175 | | struct MaskOps { |
176 | | Value *Root = nullptr; |
177 | | APInt Mask; |
178 | | bool MatchAndChain; |
179 | | bool FoundAnd1 = false; |
180 | | |
181 | | MaskOps(unsigned BitWidth, bool MatchAnds) |
182 | 0 | : Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds) {} |
183 | | }; |
184 | | |
185 | | /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a |
186 | | /// chain of 'and' or 'or' instructions looking for shift ops of a common source |
187 | | /// value. Examples: |
188 | | /// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8) |
189 | | /// returns { X, 0x129 } |
190 | | /// and (and (X >> 1), 1), (X >> 4) |
191 | | /// returns { X, 0x12 } |
192 | 0 | static bool matchAndOrChain(Value *V, MaskOps &MOps) { |
193 | 0 | Value *Op0, *Op1; |
194 | 0 | if (MOps.MatchAndChain) { |
195 | | // Recurse through a chain of 'and' operands. This requires an extra check |
196 | | // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere |
197 | | // in the chain to know that all of the high bits are cleared. |
198 | 0 | if (match(V, m_And(m_Value(Op0), m_One()))) { |
199 | 0 | MOps.FoundAnd1 = true; |
200 | 0 | return matchAndOrChain(Op0, MOps); |
201 | 0 | } |
202 | 0 | if (match(V, m_And(m_Value(Op0), m_Value(Op1)))) |
203 | 0 | return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); |
204 | 0 | } else { |
205 | | // Recurse through a chain of 'or' operands. |
206 | 0 | if (match(V, m_Or(m_Value(Op0), m_Value(Op1)))) |
207 | 0 | return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); |
208 | 0 | } |
209 | | |
210 | | // We need a shift-right or a bare value representing a compare of bit 0 of |
211 | | // the original source operand. |
212 | 0 | Value *Candidate; |
213 | 0 | const APInt *BitIndex = nullptr; |
214 | 0 | if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex)))) |
215 | 0 | Candidate = V; |
216 | | |
217 | | // Initialize result source operand. |
218 | 0 | if (!MOps.Root) |
219 | 0 | MOps.Root = Candidate; |
220 | | |
221 | | // The shift constant is out-of-range? This code hasn't been simplified. |
222 | 0 | if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth())) |
223 | 0 | return false; |
224 | | |
225 | | // Fill in the mask bit derived from the shift constant. |
226 | 0 | MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0); |
227 | 0 | return MOps.Root == Candidate; |
228 | 0 | } |
229 | | |
230 | | /// Match patterns that correspond to "any-bits-set" and "all-bits-set". |
231 | | /// These will include a chain of 'or' or 'and'-shifted bits from a |
232 | | /// common source value: |
233 | | /// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0 |
234 | | /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask |
235 | | /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns |
236 | | /// that differ only with a final 'not' of the result. We expect that final |
237 | | /// 'not' to be folded with the compare that we create here (invert predicate). |
238 | 0 | static bool foldAnyOrAllBitsSet(Instruction &I) { |
239 | | // The 'any-bits-set' ('or' chain) pattern is simpler to match because the |
240 | | // final "and X, 1" instruction must be the final op in the sequence. |
241 | 0 | bool MatchAllBitsSet; |
242 | 0 | if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value()))) |
243 | 0 | MatchAllBitsSet = true; |
244 | 0 | else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One()))) |
245 | 0 | MatchAllBitsSet = false; |
246 | 0 | else |
247 | 0 | return false; |
248 | | |
249 | 0 | MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet); |
250 | 0 | if (MatchAllBitsSet) { |
251 | 0 | if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1) |
252 | 0 | return false; |
253 | 0 | } else { |
254 | 0 | if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps)) |
255 | 0 | return false; |
256 | 0 | } |
257 | | |
258 | | // The pattern was found. Create a masked compare that replaces all of the |
259 | | // shift and logic ops. |
260 | 0 | IRBuilder<> Builder(&I); |
261 | 0 | Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask); |
262 | 0 | Value *And = Builder.CreateAnd(MOps.Root, Mask); |
263 | 0 | Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask) |
264 | 0 | : Builder.CreateIsNotNull(And); |
265 | 0 | Value *Zext = Builder.CreateZExt(Cmp, I.getType()); |
266 | 0 | I.replaceAllUsesWith(Zext); |
267 | 0 | ++NumAnyOrAllBitsSet; |
268 | 0 | return true; |
269 | 0 | } |
270 | | |
271 | | // Try to recognize below function as popcount intrinsic. |
272 | | // This is the "best" algorithm from |
273 | | // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel |
274 | | // Also used in TargetLowering::expandCTPOP(). |
275 | | // |
276 | | // int popcount(unsigned int i) { |
277 | | // i = i - ((i >> 1) & 0x55555555); |
278 | | // i = (i & 0x33333333) + ((i >> 2) & 0x33333333); |
279 | | // i = ((i + (i >> 4)) & 0x0F0F0F0F); |
280 | | // return (i * 0x01010101) >> 24; |
281 | | // } |
282 | 0 | static bool tryToRecognizePopCount(Instruction &I) { |
283 | 0 | if (I.getOpcode() != Instruction::LShr) |
284 | 0 | return false; |
285 | | |
286 | 0 | Type *Ty = I.getType(); |
287 | 0 | if (!Ty->isIntOrIntVectorTy()) |
288 | 0 | return false; |
289 | | |
290 | 0 | unsigned Len = Ty->getScalarSizeInBits(); |
291 | | // FIXME: fix Len == 8 and other irregular type lengths. |
292 | 0 | if (!(Len <= 128 && Len > 8 && Len % 8 == 0)) |
293 | 0 | return false; |
294 | | |
295 | 0 | APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55)); |
296 | 0 | APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33)); |
297 | 0 | APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F)); |
298 | 0 | APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01)); |
299 | 0 | APInt MaskShift = APInt(Len, Len - 8); |
300 | |
|
301 | 0 | Value *Op0 = I.getOperand(0); |
302 | 0 | Value *Op1 = I.getOperand(1); |
303 | 0 | Value *MulOp0; |
304 | | // Matching "(i * 0x01010101...) >> 24". |
305 | 0 | if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) && |
306 | 0 | match(Op1, m_SpecificInt(MaskShift))) { |
307 | 0 | Value *ShiftOp0; |
308 | | // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)". |
309 | 0 | if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)), |
310 | 0 | m_Deferred(ShiftOp0)), |
311 | 0 | m_SpecificInt(Mask0F)))) { |
312 | 0 | Value *AndOp0; |
313 | | // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)". |
314 | 0 | if (match(ShiftOp0, |
315 | 0 | m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)), |
316 | 0 | m_And(m_LShr(m_Deferred(AndOp0), m_SpecificInt(2)), |
317 | 0 | m_SpecificInt(Mask33))))) { |
318 | 0 | Value *Root, *SubOp1; |
319 | | // Matching "i - ((i >> 1) & 0x55555555...)". |
320 | 0 | if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) && |
321 | 0 | match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)), |
322 | 0 | m_SpecificInt(Mask55)))) { |
323 | 0 | LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n"); |
324 | 0 | IRBuilder<> Builder(&I); |
325 | 0 | Function *Func = Intrinsic::getDeclaration( |
326 | 0 | I.getModule(), Intrinsic::ctpop, I.getType()); |
327 | 0 | I.replaceAllUsesWith(Builder.CreateCall(Func, {Root})); |
328 | 0 | ++NumPopCountRecognized; |
329 | 0 | return true; |
330 | 0 | } |
331 | 0 | } |
332 | 0 | } |
333 | 0 | } |
334 | | |
335 | 0 | return false; |
336 | 0 | } |
337 | | |
338 | | /// Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and |
339 | | /// C2 saturate the value of the fp conversion. The transform is not reversable |
340 | | /// as the fptosi.sat is more defined than the input - all values produce a |
341 | | /// valid value for the fptosi.sat, where as some produce poison for original |
342 | | /// that were out of range of the integer conversion. The reversed pattern may |
343 | | /// use fmax and fmin instead. As we cannot directly reverse the transform, and |
344 | | /// it is not always profitable, we make it conditional on the cost being |
345 | | /// reported as lower by TTI. |
346 | 0 | static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI) { |
347 | | // Look for min(max(fptosi, converting to fptosi_sat. |
348 | 0 | Value *In; |
349 | 0 | const APInt *MinC, *MaxC; |
350 | 0 | if (!match(&I, m_SMax(m_OneUse(m_SMin(m_OneUse(m_FPToSI(m_Value(In))), |
351 | 0 | m_APInt(MinC))), |
352 | 0 | m_APInt(MaxC))) && |
353 | 0 | !match(&I, m_SMin(m_OneUse(m_SMax(m_OneUse(m_FPToSI(m_Value(In))), |
354 | 0 | m_APInt(MaxC))), |
355 | 0 | m_APInt(MinC)))) |
356 | 0 | return false; |
357 | | |
358 | | // Check that the constants clamp a saturate. |
359 | 0 | if (!(*MinC + 1).isPowerOf2() || -*MaxC != *MinC + 1) |
360 | 0 | return false; |
361 | | |
362 | 0 | Type *IntTy = I.getType(); |
363 | 0 | Type *FpTy = In->getType(); |
364 | 0 | Type *SatTy = |
365 | 0 | IntegerType::get(IntTy->getContext(), (*MinC + 1).exactLogBase2() + 1); |
366 | 0 | if (auto *VecTy = dyn_cast<VectorType>(IntTy)) |
367 | 0 | SatTy = VectorType::get(SatTy, VecTy->getElementCount()); |
368 | | |
369 | | // Get the cost of the intrinsic, and check that against the cost of |
370 | | // fptosi+smin+smax |
371 | 0 | InstructionCost SatCost = TTI.getIntrinsicInstrCost( |
372 | 0 | IntrinsicCostAttributes(Intrinsic::fptosi_sat, SatTy, {In}, {FpTy}), |
373 | 0 | TTI::TCK_RecipThroughput); |
374 | 0 | SatCost += TTI.getCastInstrCost(Instruction::SExt, IntTy, SatTy, |
375 | 0 | TTI::CastContextHint::None, |
376 | 0 | TTI::TCK_RecipThroughput); |
377 | |
|
378 | 0 | InstructionCost MinMaxCost = TTI.getCastInstrCost( |
379 | 0 | Instruction::FPToSI, IntTy, FpTy, TTI::CastContextHint::None, |
380 | 0 | TTI::TCK_RecipThroughput); |
381 | 0 | MinMaxCost += TTI.getIntrinsicInstrCost( |
382 | 0 | IntrinsicCostAttributes(Intrinsic::smin, IntTy, {IntTy}), |
383 | 0 | TTI::TCK_RecipThroughput); |
384 | 0 | MinMaxCost += TTI.getIntrinsicInstrCost( |
385 | 0 | IntrinsicCostAttributes(Intrinsic::smax, IntTy, {IntTy}), |
386 | 0 | TTI::TCK_RecipThroughput); |
387 | |
|
388 | 0 | if (SatCost >= MinMaxCost) |
389 | 0 | return false; |
390 | | |
391 | 0 | IRBuilder<> Builder(&I); |
392 | 0 | Function *Fn = Intrinsic::getDeclaration(I.getModule(), Intrinsic::fptosi_sat, |
393 | 0 | {SatTy, FpTy}); |
394 | 0 | Value *Sat = Builder.CreateCall(Fn, In); |
395 | 0 | I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy)); |
396 | 0 | return true; |
397 | 0 | } |
398 | | |
399 | | /// Try to replace a mathlib call to sqrt with the LLVM intrinsic. This avoids |
400 | | /// pessimistic codegen that has to account for setting errno and can enable |
401 | | /// vectorization. |
402 | | static bool foldSqrt(Instruction &I, TargetTransformInfo &TTI, |
403 | | TargetLibraryInfo &TLI, AssumptionCache &AC, |
404 | 0 | DominatorTree &DT) { |
405 | | // Match a call to sqrt mathlib function. |
406 | 0 | auto *Call = dyn_cast<CallInst>(&I); |
407 | 0 | if (!Call) |
408 | 0 | return false; |
409 | | |
410 | 0 | Module *M = Call->getModule(); |
411 | 0 | LibFunc Func; |
412 | 0 | if (!TLI.getLibFunc(*Call, Func) || !isLibFuncEmittable(M, &TLI, Func)) |
413 | 0 | return false; |
414 | | |
415 | 0 | if (Func != LibFunc_sqrt && Func != LibFunc_sqrtf && Func != LibFunc_sqrtl) |
416 | 0 | return false; |
417 | | |
418 | | // If (1) this is a sqrt libcall, (2) we can assume that NAN is not created |
419 | | // (because NNAN or the operand arg must not be less than -0.0) and (2) we |
420 | | // would not end up lowering to a libcall anyway (which could change the value |
421 | | // of errno), then: |
422 | | // (1) errno won't be set. |
423 | | // (2) it is safe to convert this to an intrinsic call. |
424 | 0 | Type *Ty = Call->getType(); |
425 | 0 | Value *Arg = Call->getArgOperand(0); |
426 | 0 | if (TTI.haveFastSqrt(Ty) && |
427 | 0 | (Call->hasNoNaNs() || |
428 | 0 | cannotBeOrderedLessThanZero(Arg, M->getDataLayout(), &TLI, 0, &AC, &I, |
429 | 0 | &DT))) { |
430 | 0 | IRBuilder<> Builder(&I); |
431 | 0 | IRBuilderBase::FastMathFlagGuard Guard(Builder); |
432 | 0 | Builder.setFastMathFlags(Call->getFastMathFlags()); |
433 | |
|
434 | 0 | Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, Ty); |
435 | 0 | Value *NewSqrt = Builder.CreateCall(Sqrt, Arg, "sqrt"); |
436 | 0 | I.replaceAllUsesWith(NewSqrt); |
437 | | |
438 | | // Explicitly erase the old call because a call with side effects is not |
439 | | // trivially dead. |
440 | 0 | I.eraseFromParent(); |
441 | 0 | return true; |
442 | 0 | } |
443 | | |
444 | 0 | return false; |
445 | 0 | } |
446 | | |
447 | | // Check if this array of constants represents a cttz table. |
448 | | // Iterate over the elements from \p Table by trying to find/match all |
449 | | // the numbers from 0 to \p InputBits that should represent cttz results. |
450 | | static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul, |
451 | 0 | uint64_t Shift, uint64_t InputBits) { |
452 | 0 | unsigned Length = Table.getNumElements(); |
453 | 0 | if (Length < InputBits || Length > InputBits * 2) |
454 | 0 | return false; |
455 | | |
456 | 0 | APInt Mask = APInt::getBitsSetFrom(InputBits, Shift); |
457 | 0 | unsigned Matched = 0; |
458 | |
|
459 | 0 | for (unsigned i = 0; i < Length; i++) { |
460 | 0 | uint64_t Element = Table.getElementAsInteger(i); |
461 | 0 | if (Element >= InputBits) |
462 | 0 | continue; |
463 | | |
464 | | // Check if \p Element matches a concrete answer. It could fail for some |
465 | | // elements that are never accessed, so we keep iterating over each element |
466 | | // from the table. The number of matched elements should be equal to the |
467 | | // number of potential right answers which is \p InputBits actually. |
468 | 0 | if ((((Mul << Element) & Mask.getZExtValue()) >> Shift) == i) |
469 | 0 | Matched++; |
470 | 0 | } |
471 | |
|
472 | 0 | return Matched == InputBits; |
473 | 0 | } |
474 | | |
475 | | // Try to recognize table-based ctz implementation. |
476 | | // E.g., an example in C (for more cases please see the llvm/tests): |
477 | | // int f(unsigned x) { |
478 | | // static const char table[32] = |
479 | | // {0, 1, 28, 2, 29, 14, 24, 3, 30, |
480 | | // 22, 20, 15, 25, 17, 4, 8, 31, 27, |
481 | | // 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; |
482 | | // return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27]; |
483 | | // } |
484 | | // this can be lowered to `cttz` instruction. |
485 | | // There is also a special case when the element is 0. |
486 | | // |
487 | | // Here are some examples or LLVM IR for a 64-bit target: |
488 | | // |
489 | | // CASE 1: |
490 | | // %sub = sub i32 0, %x |
491 | | // %and = and i32 %sub, %x |
492 | | // %mul = mul i32 %and, 125613361 |
493 | | // %shr = lshr i32 %mul, 27 |
494 | | // %idxprom = zext i32 %shr to i64 |
495 | | // %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz1.table, i64 0, |
496 | | // i64 %idxprom |
497 | | // %0 = load i8, i8* %arrayidx, align 1, !tbaa !8 |
498 | | // |
499 | | // CASE 2: |
500 | | // %sub = sub i32 0, %x |
501 | | // %and = and i32 %sub, %x |
502 | | // %mul = mul i32 %and, 72416175 |
503 | | // %shr = lshr i32 %mul, 26 |
504 | | // %idxprom = zext i32 %shr to i64 |
505 | | // %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @ctz2.table, |
506 | | // i64 0, i64 %idxprom |
507 | | // %0 = load i16, i16* %arrayidx, align 2, !tbaa !8 |
508 | | // |
509 | | // CASE 3: |
510 | | // %sub = sub i32 0, %x |
511 | | // %and = and i32 %sub, %x |
512 | | // %mul = mul i32 %and, 81224991 |
513 | | // %shr = lshr i32 %mul, 27 |
514 | | // %idxprom = zext i32 %shr to i64 |
515 | | // %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz3.table, |
516 | | // i64 0, i64 %idxprom |
517 | | // %0 = load i32, i32* %arrayidx, align 4, !tbaa !8 |
518 | | // |
519 | | // CASE 4: |
520 | | // %sub = sub i64 0, %x |
521 | | // %and = and i64 %sub, %x |
522 | | // %mul = mul i64 %and, 283881067100198605 |
523 | | // %shr = lshr i64 %mul, 58 |
524 | | // %arrayidx = getelementptr inbounds [64 x i8], [64 x i8]* @table, i64 0, |
525 | | // i64 %shr |
526 | | // %0 = load i8, i8* %arrayidx, align 1, !tbaa !8 |
527 | | // |
528 | | // All this can be lowered to @llvm.cttz.i32/64 intrinsic. |
529 | 0 | static bool tryToRecognizeTableBasedCttz(Instruction &I) { |
530 | 0 | LoadInst *LI = dyn_cast<LoadInst>(&I); |
531 | 0 | if (!LI) |
532 | 0 | return false; |
533 | | |
534 | 0 | Type *AccessType = LI->getType(); |
535 | 0 | if (!AccessType->isIntegerTy()) |
536 | 0 | return false; |
537 | | |
538 | 0 | GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getPointerOperand()); |
539 | 0 | if (!GEP || !GEP->isInBounds() || GEP->getNumIndices() != 2) |
540 | 0 | return false; |
541 | | |
542 | 0 | if (!GEP->getSourceElementType()->isArrayTy()) |
543 | 0 | return false; |
544 | | |
545 | 0 | uint64_t ArraySize = GEP->getSourceElementType()->getArrayNumElements(); |
546 | 0 | if (ArraySize != 32 && ArraySize != 64) |
547 | 0 | return false; |
548 | | |
549 | 0 | GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand()); |
550 | 0 | if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant()) |
551 | 0 | return false; |
552 | | |
553 | 0 | ConstantDataArray *ConstData = |
554 | 0 | dyn_cast<ConstantDataArray>(GVTable->getInitializer()); |
555 | 0 | if (!ConstData) |
556 | 0 | return false; |
557 | | |
558 | 0 | if (!match(GEP->idx_begin()->get(), m_ZeroInt())) |
559 | 0 | return false; |
560 | | |
561 | 0 | Value *Idx2 = std::next(GEP->idx_begin())->get(); |
562 | 0 | Value *X1; |
563 | 0 | uint64_t MulConst, ShiftConst; |
564 | | // FIXME: 64-bit targets have `i64` type for the GEP index, so this match will |
565 | | // probably fail for other (e.g. 32-bit) targets. |
566 | 0 | if (!match(Idx2, m_ZExtOrSelf( |
567 | 0 | m_LShr(m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)), |
568 | 0 | m_ConstantInt(MulConst)), |
569 | 0 | m_ConstantInt(ShiftConst))))) |
570 | 0 | return false; |
571 | | |
572 | 0 | unsigned InputBits = X1->getType()->getScalarSizeInBits(); |
573 | 0 | if (InputBits != 32 && InputBits != 64) |
574 | 0 | return false; |
575 | | |
576 | | // Shift should extract top 5..7 bits. |
577 | 0 | if (InputBits - Log2_32(InputBits) != ShiftConst && |
578 | 0 | InputBits - Log2_32(InputBits) - 1 != ShiftConst) |
579 | 0 | return false; |
580 | | |
581 | 0 | if (!isCTTZTable(*ConstData, MulConst, ShiftConst, InputBits)) |
582 | 0 | return false; |
583 | | |
584 | 0 | auto ZeroTableElem = ConstData->getElementAsInteger(0); |
585 | 0 | bool DefinedForZero = ZeroTableElem == InputBits; |
586 | |
|
587 | 0 | IRBuilder<> B(LI); |
588 | 0 | ConstantInt *BoolConst = B.getInt1(!DefinedForZero); |
589 | 0 | Type *XType = X1->getType(); |
590 | 0 | auto Cttz = B.CreateIntrinsic(Intrinsic::cttz, {XType}, {X1, BoolConst}); |
591 | 0 | Value *ZExtOrTrunc = nullptr; |
592 | |
|
593 | 0 | if (DefinedForZero) { |
594 | 0 | ZExtOrTrunc = B.CreateZExtOrTrunc(Cttz, AccessType); |
595 | 0 | } else { |
596 | | // If the value in elem 0 isn't the same as InputBits, we still want to |
597 | | // produce the value from the table. |
598 | 0 | auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0)); |
599 | 0 | auto Select = |
600 | 0 | B.CreateSelect(Cmp, ConstantInt::get(XType, ZeroTableElem), Cttz); |
601 | | |
602 | | // NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target |
603 | | // it should be handled as: `cttz(x) & (typeSize - 1)`. |
604 | |
|
605 | 0 | ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType); |
606 | 0 | } |
607 | |
|
608 | 0 | LI->replaceAllUsesWith(ZExtOrTrunc); |
609 | |
|
610 | 0 | return true; |
611 | 0 | } |
612 | | |
613 | | /// This is used by foldLoadsRecursive() to capture a Root Load node which is |
614 | | /// of type or(load, load) and recursively build the wide load. Also capture the |
615 | | /// shift amount, zero extend type and loadSize. |
616 | | struct LoadOps { |
617 | | LoadInst *Root = nullptr; |
618 | | LoadInst *RootInsert = nullptr; |
619 | | bool FoundRoot = false; |
620 | | uint64_t LoadSize = 0; |
621 | | const APInt *Shift = nullptr; |
622 | | Type *ZextType; |
623 | | AAMDNodes AATags; |
624 | | }; |
625 | | |
626 | | // Identify and Merge consecutive loads recursively which is of the form |
627 | | // (ZExt(L1) << shift1) | (ZExt(L2) << shift2) -> ZExt(L3) << shift1 |
628 | | // (ZExt(L1) << shift1) | ZExt(L2) -> ZExt(L3) |
629 | | static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL, |
630 | 0 | AliasAnalysis &AA) { |
631 | 0 | const APInt *ShAmt2 = nullptr; |
632 | 0 | Value *X; |
633 | 0 | Instruction *L1, *L2; |
634 | | |
635 | | // Go to the last node with loads. |
636 | 0 | if (match(V, m_OneUse(m_c_Or( |
637 | 0 | m_Value(X), |
638 | 0 | m_OneUse(m_Shl(m_OneUse(m_ZExt(m_OneUse(m_Instruction(L2)))), |
639 | 0 | m_APInt(ShAmt2)))))) || |
640 | 0 | match(V, m_OneUse(m_Or(m_Value(X), |
641 | 0 | m_OneUse(m_ZExt(m_OneUse(m_Instruction(L2)))))))) { |
642 | 0 | if (!foldLoadsRecursive(X, LOps, DL, AA) && LOps.FoundRoot) |
643 | | // Avoid Partial chain merge. |
644 | 0 | return false; |
645 | 0 | } else |
646 | 0 | return false; |
647 | | |
648 | | // Check if the pattern has loads |
649 | 0 | LoadInst *LI1 = LOps.Root; |
650 | 0 | const APInt *ShAmt1 = LOps.Shift; |
651 | 0 | if (LOps.FoundRoot == false && |
652 | 0 | (match(X, m_OneUse(m_ZExt(m_Instruction(L1)))) || |
653 | 0 | match(X, m_OneUse(m_Shl(m_OneUse(m_ZExt(m_OneUse(m_Instruction(L1)))), |
654 | 0 | m_APInt(ShAmt1)))))) { |
655 | 0 | LI1 = dyn_cast<LoadInst>(L1); |
656 | 0 | } |
657 | 0 | LoadInst *LI2 = dyn_cast<LoadInst>(L2); |
658 | | |
659 | | // Check if loads are same, atomic, volatile and having same address space. |
660 | 0 | if (LI1 == LI2 || !LI1 || !LI2 || !LI1->isSimple() || !LI2->isSimple() || |
661 | 0 | LI1->getPointerAddressSpace() != LI2->getPointerAddressSpace()) |
662 | 0 | return false; |
663 | | |
664 | | // Check if Loads come from same BB. |
665 | 0 | if (LI1->getParent() != LI2->getParent()) |
666 | 0 | return false; |
667 | | |
668 | | // Find the data layout |
669 | 0 | bool IsBigEndian = DL.isBigEndian(); |
670 | | |
671 | | // Check if loads are consecutive and same size. |
672 | 0 | Value *Load1Ptr = LI1->getPointerOperand(); |
673 | 0 | APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0); |
674 | 0 | Load1Ptr = |
675 | 0 | Load1Ptr->stripAndAccumulateConstantOffsets(DL, Offset1, |
676 | 0 | /* AllowNonInbounds */ true); |
677 | |
|
678 | 0 | Value *Load2Ptr = LI2->getPointerOperand(); |
679 | 0 | APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0); |
680 | 0 | Load2Ptr = |
681 | 0 | Load2Ptr->stripAndAccumulateConstantOffsets(DL, Offset2, |
682 | 0 | /* AllowNonInbounds */ true); |
683 | | |
684 | | // Verify if both loads have same base pointers and load sizes are same. |
685 | 0 | uint64_t LoadSize1 = LI1->getType()->getPrimitiveSizeInBits(); |
686 | 0 | uint64_t LoadSize2 = LI2->getType()->getPrimitiveSizeInBits(); |
687 | 0 | if (Load1Ptr != Load2Ptr || LoadSize1 != LoadSize2) |
688 | 0 | return false; |
689 | | |
690 | | // Support Loadsizes greater or equal to 8bits and only power of 2. |
691 | 0 | if (LoadSize1 < 8 || !isPowerOf2_64(LoadSize1)) |
692 | 0 | return false; |
693 | | |
694 | | // Alias Analysis to check for stores b/w the loads. |
695 | 0 | LoadInst *Start = LOps.FoundRoot ? LOps.RootInsert : LI1, *End = LI2; |
696 | 0 | MemoryLocation Loc; |
697 | 0 | if (!Start->comesBefore(End)) { |
698 | 0 | std::swap(Start, End); |
699 | 0 | Loc = MemoryLocation::get(End); |
700 | 0 | if (LOps.FoundRoot) |
701 | 0 | Loc = Loc.getWithNewSize(LOps.LoadSize); |
702 | 0 | } else |
703 | 0 | Loc = MemoryLocation::get(End); |
704 | 0 | unsigned NumScanned = 0; |
705 | 0 | for (Instruction &Inst : |
706 | 0 | make_range(Start->getIterator(), End->getIterator())) { |
707 | 0 | if (Inst.mayWriteToMemory() && isModSet(AA.getModRefInfo(&Inst, Loc))) |
708 | 0 | return false; |
709 | | |
710 | | // Ignore debug info so that's not counted against MaxInstrsToScan. |
711 | | // Otherwise debug info could affect codegen. |
712 | 0 | if (!isa<DbgInfoIntrinsic>(Inst) && ++NumScanned > MaxInstrsToScan) |
713 | 0 | return false; |
714 | 0 | } |
715 | | |
716 | | // Make sure Load with lower Offset is at LI1 |
717 | 0 | bool Reverse = false; |
718 | 0 | if (Offset2.slt(Offset1)) { |
719 | 0 | std::swap(LI1, LI2); |
720 | 0 | std::swap(ShAmt1, ShAmt2); |
721 | 0 | std::swap(Offset1, Offset2); |
722 | 0 | std::swap(Load1Ptr, Load2Ptr); |
723 | 0 | std::swap(LoadSize1, LoadSize2); |
724 | 0 | Reverse = true; |
725 | 0 | } |
726 | | |
727 | | // Big endian swap the shifts |
728 | 0 | if (IsBigEndian) |
729 | 0 | std::swap(ShAmt1, ShAmt2); |
730 | | |
731 | | // Find Shifts values. |
732 | 0 | uint64_t Shift1 = 0, Shift2 = 0; |
733 | 0 | if (ShAmt1) |
734 | 0 | Shift1 = ShAmt1->getZExtValue(); |
735 | 0 | if (ShAmt2) |
736 | 0 | Shift2 = ShAmt2->getZExtValue(); |
737 | | |
738 | | // First load is always LI1. This is where we put the new load. |
739 | | // Use the merged load size available from LI1 for forward loads. |
740 | 0 | if (LOps.FoundRoot) { |
741 | 0 | if (!Reverse) |
742 | 0 | LoadSize1 = LOps.LoadSize; |
743 | 0 | else |
744 | 0 | LoadSize2 = LOps.LoadSize; |
745 | 0 | } |
746 | | |
747 | | // Verify if shift amount and load index aligns and verifies that loads |
748 | | // are consecutive. |
749 | 0 | uint64_t ShiftDiff = IsBigEndian ? LoadSize2 : LoadSize1; |
750 | 0 | uint64_t PrevSize = |
751 | 0 | DL.getTypeStoreSize(IntegerType::get(LI1->getContext(), LoadSize1)); |
752 | 0 | if ((Shift2 - Shift1) != ShiftDiff || (Offset2 - Offset1) != PrevSize) |
753 | 0 | return false; |
754 | | |
755 | | // Update LOps |
756 | 0 | AAMDNodes AATags1 = LOps.AATags; |
757 | 0 | AAMDNodes AATags2 = LI2->getAAMetadata(); |
758 | 0 | if (LOps.FoundRoot == false) { |
759 | 0 | LOps.FoundRoot = true; |
760 | 0 | AATags1 = LI1->getAAMetadata(); |
761 | 0 | } |
762 | 0 | LOps.LoadSize = LoadSize1 + LoadSize2; |
763 | 0 | LOps.RootInsert = Start; |
764 | | |
765 | | // Concatenate the AATags of the Merged Loads. |
766 | 0 | LOps.AATags = AATags1.concat(AATags2); |
767 | |
|
768 | 0 | LOps.Root = LI1; |
769 | 0 | LOps.Shift = ShAmt1; |
770 | 0 | LOps.ZextType = X->getType(); |
771 | 0 | return true; |
772 | 0 | } |
773 | | |
774 | | // For a given BB instruction, evaluate all loads in the chain that form a |
775 | | // pattern which suggests that the loads can be combined. The one and only use |
776 | | // of the loads is to form a wider load. |
777 | | static bool foldConsecutiveLoads(Instruction &I, const DataLayout &DL, |
778 | | TargetTransformInfo &TTI, AliasAnalysis &AA, |
779 | 0 | const DominatorTree &DT) { |
780 | | // Only consider load chains of scalar values. |
781 | 0 | if (isa<VectorType>(I.getType())) |
782 | 0 | return false; |
783 | | |
784 | 0 | LoadOps LOps; |
785 | 0 | if (!foldLoadsRecursive(&I, LOps, DL, AA) || !LOps.FoundRoot) |
786 | 0 | return false; |
787 | | |
788 | 0 | IRBuilder<> Builder(&I); |
789 | 0 | LoadInst *NewLoad = nullptr, *LI1 = LOps.Root; |
790 | |
|
791 | 0 | IntegerType *WiderType = IntegerType::get(I.getContext(), LOps.LoadSize); |
792 | | // TTI based checks if we want to proceed with wider load |
793 | 0 | bool Allowed = TTI.isTypeLegal(WiderType); |
794 | 0 | if (!Allowed) |
795 | 0 | return false; |
796 | | |
797 | 0 | unsigned AS = LI1->getPointerAddressSpace(); |
798 | 0 | unsigned Fast = 0; |
799 | 0 | Allowed = TTI.allowsMisalignedMemoryAccesses(I.getContext(), LOps.LoadSize, |
800 | 0 | AS, LI1->getAlign(), &Fast); |
801 | 0 | if (!Allowed || !Fast) |
802 | 0 | return false; |
803 | | |
804 | | // Get the Index and Ptr for the new GEP. |
805 | 0 | Value *Load1Ptr = LI1->getPointerOperand(); |
806 | 0 | Builder.SetInsertPoint(LOps.RootInsert); |
807 | 0 | if (!DT.dominates(Load1Ptr, LOps.RootInsert)) { |
808 | 0 | APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0); |
809 | 0 | Load1Ptr = Load1Ptr->stripAndAccumulateConstantOffsets( |
810 | 0 | DL, Offset1, /* AllowNonInbounds */ true); |
811 | 0 | Load1Ptr = Builder.CreatePtrAdd(Load1Ptr, |
812 | 0 | Builder.getInt32(Offset1.getZExtValue())); |
813 | 0 | } |
814 | | // Generate wider load. |
815 | 0 | NewLoad = Builder.CreateAlignedLoad(WiderType, Load1Ptr, LI1->getAlign(), |
816 | 0 | LI1->isVolatile(), ""); |
817 | 0 | NewLoad->takeName(LI1); |
818 | | // Set the New Load AATags Metadata. |
819 | 0 | if (LOps.AATags) |
820 | 0 | NewLoad->setAAMetadata(LOps.AATags); |
821 | |
|
822 | 0 | Value *NewOp = NewLoad; |
823 | | // Check if zero extend needed. |
824 | 0 | if (LOps.ZextType) |
825 | 0 | NewOp = Builder.CreateZExt(NewOp, LOps.ZextType); |
826 | | |
827 | | // Check if shift needed. We need to shift with the amount of load1 |
828 | | // shift if not zero. |
829 | 0 | if (LOps.Shift) |
830 | 0 | NewOp = Builder.CreateShl(NewOp, ConstantInt::get(I.getContext(), *LOps.Shift)); |
831 | 0 | I.replaceAllUsesWith(NewOp); |
832 | |
|
833 | 0 | return true; |
834 | 0 | } |
835 | | |
836 | | // Calculate GEP Stride and accumulated const ModOffset. Return Stride and |
837 | | // ModOffset |
838 | | static std::pair<APInt, APInt> |
839 | 0 | getStrideAndModOffsetOfGEP(Value *PtrOp, const DataLayout &DL) { |
840 | 0 | unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType()); |
841 | 0 | std::optional<APInt> Stride; |
842 | 0 | APInt ModOffset(BW, 0); |
843 | | // Return a minimum gep stride, greatest common divisor of consective gep |
844 | | // index scales(c.f. Bézout's identity). |
845 | 0 | while (auto *GEP = dyn_cast<GEPOperator>(PtrOp)) { |
846 | 0 | MapVector<Value *, APInt> VarOffsets; |
847 | 0 | if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset)) |
848 | 0 | break; |
849 | | |
850 | 0 | for (auto [V, Scale] : VarOffsets) { |
851 | | // Only keep a power of two factor for non-inbounds |
852 | 0 | if (!GEP->isInBounds()) |
853 | 0 | Scale = APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero()); |
854 | |
|
855 | 0 | if (!Stride) |
856 | 0 | Stride = Scale; |
857 | 0 | else |
858 | 0 | Stride = APIntOps::GreatestCommonDivisor(*Stride, Scale); |
859 | 0 | } |
860 | |
|
861 | 0 | PtrOp = GEP->getPointerOperand(); |
862 | 0 | } |
863 | | |
864 | | // Check whether pointer arrives back at Global Variable via at least one GEP. |
865 | | // Even if it doesn't, we can check by alignment. |
866 | 0 | if (!isa<GlobalVariable>(PtrOp) || !Stride) |
867 | 0 | return {APInt(BW, 1), APInt(BW, 0)}; |
868 | | |
869 | | // In consideration of signed GEP indices, non-negligible offset become |
870 | | // remainder of division by minimum GEP stride. |
871 | 0 | ModOffset = ModOffset.srem(*Stride); |
872 | 0 | if (ModOffset.isNegative()) |
873 | 0 | ModOffset += *Stride; |
874 | |
|
875 | 0 | return {*Stride, ModOffset}; |
876 | 0 | } |
877 | | |
878 | | /// If C is a constant patterned array and all valid loaded results for given |
879 | | /// alignment are same to a constant, return that constant. |
880 | 0 | static bool foldPatternedLoads(Instruction &I, const DataLayout &DL) { |
881 | 0 | auto *LI = dyn_cast<LoadInst>(&I); |
882 | 0 | if (!LI || LI->isVolatile()) |
883 | 0 | return false; |
884 | | |
885 | | // We can only fold the load if it is from a constant global with definitive |
886 | | // initializer. Skip expensive logic if this is not the case. |
887 | 0 | auto *PtrOp = LI->getPointerOperand(); |
888 | 0 | auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp)); |
889 | 0 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) |
890 | 0 | return false; |
891 | | |
892 | | // Bail for large initializers in excess of 4K to avoid too many scans. |
893 | 0 | Constant *C = GV->getInitializer(); |
894 | 0 | uint64_t GVSize = DL.getTypeAllocSize(C->getType()); |
895 | 0 | if (!GVSize || 4096 < GVSize) |
896 | 0 | return false; |
897 | | |
898 | 0 | Type *LoadTy = LI->getType(); |
899 | 0 | unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType()); |
900 | 0 | auto [Stride, ConstOffset] = getStrideAndModOffsetOfGEP(PtrOp, DL); |
901 | | |
902 | | // Any possible offset could be multiple of GEP stride. And any valid |
903 | | // offset is multiple of load alignment, so checking only multiples of bigger |
904 | | // one is sufficient to say results' equality. |
905 | 0 | if (auto LA = LI->getAlign(); |
906 | 0 | LA <= GV->getAlign().valueOrOne() && Stride.getZExtValue() < LA.value()) { |
907 | 0 | ConstOffset = APInt(BW, 0); |
908 | 0 | Stride = APInt(BW, LA.value()); |
909 | 0 | } |
910 | |
|
911 | 0 | Constant *Ca = ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL); |
912 | 0 | if (!Ca) |
913 | 0 | return false; |
914 | | |
915 | 0 | unsigned E = GVSize - DL.getTypeStoreSize(LoadTy); |
916 | 0 | for (; ConstOffset.getZExtValue() <= E; ConstOffset += Stride) |
917 | 0 | if (Ca != ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL)) |
918 | 0 | return false; |
919 | | |
920 | 0 | I.replaceAllUsesWith(Ca); |
921 | |
|
922 | 0 | return true; |
923 | 0 | } |
924 | | |
925 | | /// This is the entry point for folds that could be implemented in regular |
926 | | /// InstCombine, but they are separated because they are not expected to |
927 | | /// occur frequently and/or have more than a constant-length pattern match. |
928 | | static bool foldUnusualPatterns(Function &F, DominatorTree &DT, |
929 | | TargetTransformInfo &TTI, |
930 | | TargetLibraryInfo &TLI, AliasAnalysis &AA, |
931 | 0 | AssumptionCache &AC) { |
932 | 0 | bool MadeChange = false; |
933 | 0 | for (BasicBlock &BB : F) { |
934 | | // Ignore unreachable basic blocks. |
935 | 0 | if (!DT.isReachableFromEntry(&BB)) |
936 | 0 | continue; |
937 | | |
938 | 0 | const DataLayout &DL = F.getParent()->getDataLayout(); |
939 | | |
940 | | // Walk the block backwards for efficiency. We're matching a chain of |
941 | | // use->defs, so we're more likely to succeed by starting from the bottom. |
942 | | // Also, we want to avoid matching partial patterns. |
943 | | // TODO: It would be more efficient if we removed dead instructions |
944 | | // iteratively in this loop rather than waiting until the end. |
945 | 0 | for (Instruction &I : make_early_inc_range(llvm::reverse(BB))) { |
946 | 0 | MadeChange |= foldAnyOrAllBitsSet(I); |
947 | 0 | MadeChange |= foldGuardedFunnelShift(I, DT); |
948 | 0 | MadeChange |= tryToRecognizePopCount(I); |
949 | 0 | MadeChange |= tryToFPToSat(I, TTI); |
950 | 0 | MadeChange |= tryToRecognizeTableBasedCttz(I); |
951 | 0 | MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT); |
952 | 0 | MadeChange |= foldPatternedLoads(I, DL); |
953 | | // NOTE: This function introduces erasing of the instruction `I`, so it |
954 | | // needs to be called at the end of this sequence, otherwise we may make |
955 | | // bugs. |
956 | 0 | MadeChange |= foldSqrt(I, TTI, TLI, AC, DT); |
957 | 0 | } |
958 | 0 | } |
959 | | |
960 | | // We're done with transforms, so remove dead instructions. |
961 | 0 | if (MadeChange) |
962 | 0 | for (BasicBlock &BB : F) |
963 | 0 | SimplifyInstructionsInBlock(&BB); |
964 | |
|
965 | 0 | return MadeChange; |
966 | 0 | } |
967 | | |
968 | | /// This is the entry point for all transforms. Pass manager differences are |
969 | | /// handled in the callers of this function. |
970 | | static bool runImpl(Function &F, AssumptionCache &AC, TargetTransformInfo &TTI, |
971 | | TargetLibraryInfo &TLI, DominatorTree &DT, |
972 | 0 | AliasAnalysis &AA) { |
973 | 0 | bool MadeChange = false; |
974 | 0 | const DataLayout &DL = F.getParent()->getDataLayout(); |
975 | 0 | TruncInstCombine TIC(AC, TLI, DL, DT); |
976 | 0 | MadeChange |= TIC.run(F); |
977 | 0 | MadeChange |= foldUnusualPatterns(F, DT, TTI, TLI, AA, AC); |
978 | 0 | return MadeChange; |
979 | 0 | } |
980 | | |
981 | | PreservedAnalyses AggressiveInstCombinePass::run(Function &F, |
982 | 0 | FunctionAnalysisManager &AM) { |
983 | 0 | auto &AC = AM.getResult<AssumptionAnalysis>(F); |
984 | 0 | auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); |
985 | 0 | auto &DT = AM.getResult<DominatorTreeAnalysis>(F); |
986 | 0 | auto &TTI = AM.getResult<TargetIRAnalysis>(F); |
987 | 0 | auto &AA = AM.getResult<AAManager>(F); |
988 | 0 | if (!runImpl(F, AC, TTI, TLI, DT, AA)) { |
989 | | // No changes, all analyses are preserved. |
990 | 0 | return PreservedAnalyses::all(); |
991 | 0 | } |
992 | | // Mark all the analyses that instcombine updates as preserved. |
993 | 0 | PreservedAnalyses PA; |
994 | 0 | PA.preserveSet<CFGAnalyses>(); |
995 | 0 | return PA; |
996 | 0 | } |