SeparateConstOffsetFromGEP.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Loop unrolling may create many similar GEPs for array accesses. 11// e.g., a 2-level loop 12// 13// float a[32][32]; // global variable 14// 15// for (int i = 0; i < 2; ++i) { 16// for (int j = 0; j < 2; ++j) { 17// ... 18// ... = a[x + i][y + j]; 19// ... 20// } 21// } 22// 23// will probably be unrolled to: 24// 25// gep %a, 0, %x, %y; load 26// gep %a, 0, %x, %y + 1; load 27// gep %a, 0, %x + 1, %y; load 28// gep %a, 0, %x + 1, %y + 1; load 29// 30// LLVM's GVN does not use partial redundancy elimination yet, and is thus 31// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs 32// significant slowdown in targets with limited addressing modes. For instance, 33// because the PTX target does not support the reg+reg addressing mode, the 34// NVPTX backend emits PTX code that literally computes the pointer address of 35// each GEP, wasting tons of registers. It emits the following PTX for the 36// first load and similar PTX for other loads. 37// 38// mov.u32 %r1, %x; 39// mov.u32 %r2, %y; 40// mul.wide.u32 %rl2, %r1, 128; 41// mov.u64 %rl3, a; 42// add.s64 %rl4, %rl3, %rl2; 43// mul.wide.u32 %rl5, %r2, 4; 44// add.s64 %rl6, %rl4, %rl5; 45// ld.global.f32 %f1, [%rl6]; 46// 47// To reduce the register pressure, the optimization implemented in this file 48// merges the common part of a group of GEPs, so we can compute each pointer 49// address by adding a simple offset to the common part, saving many registers. 50// 51// It works by splitting each GEP into a variadic base and a constant offset. 52// The variadic base can be computed once and reused by multiple GEPs, and the 53// constant offsets can be nicely folded into the reg+immediate addressing mode 54// (supported by most targets) without using any extra register. 55// 56// For instance, we transform the four GEPs and four loads in the above example 57// into: 58// 59// base = gep a, 0, x, y 60// load base 61// laod base + 1 * sizeof(float) 62// load base + 32 * sizeof(float) 63// load base + 33 * sizeof(float) 64// 65// Given the transformed IR, a backend that supports the reg+immediate 66// addressing mode can easily fold the pointer arithmetics into the loads. For 67// example, the NVPTX backend can easily fold the pointer arithmetics into the 68// ld.global.f32 instructions, and the resultant PTX uses much fewer registers. 69// 70// mov.u32 %r1, %tid.x; 71// mov.u32 %r2, %tid.y; 72// mul.wide.u32 %rl2, %r1, 128; 73// mov.u64 %rl3, a; 74// add.s64 %rl4, %rl3, %rl2; 75// mul.wide.u32 %rl5, %r2, 4; 76// add.s64 %rl6, %rl4, %rl5; 77// ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX 78// ld.global.f32 %f2, [%rl6+4]; // much better 79// ld.global.f32 %f3, [%rl6+128]; // much better 80// ld.global.f32 %f4, [%rl6+132]; // much better 81// 82//===----------------------------------------------------------------------===// 83 84#include "llvm/Analysis/TargetTransformInfo.h" 85#include "llvm/Analysis/ValueTracking.h" 86#include "llvm/IR/Constants.h" 87#include "llvm/IR/DataLayout.h" 88#include "llvm/IR/Instructions.h" 89#include "llvm/IR/LLVMContext.h" 90#include "llvm/IR/Module.h" 91#include "llvm/IR/Operator.h" 92#include "llvm/Support/CommandLine.h" 93#include "llvm/Support/raw_ostream.h" 94#include "llvm/Transforms/Scalar.h" 95 96using namespace llvm; 97 98static cl::opt<bool> DisableSeparateConstOffsetFromGEP( 99 "disable-separate-const-offset-from-gep", cl::init(false), 100 cl::desc("Do not separate the constant offset from a GEP instruction"), 101 cl::Hidden); 102 103namespace { 104 105/// \brief A helper class for separating a constant offset from a GEP index. 106/// 107/// In real programs, a GEP index may be more complicated than a simple addition 108/// of something and a constant integer which can be trivially splitted. For 109/// example, to split ((a << 3) | 5) + b, we need to search deeper for the 110/// constant offset, so that we can separate the index to (a << 3) + b and 5. 111/// 112/// Therefore, this class looks into the expression that computes a given GEP 113/// index, and tries to find a constant integer that can be hoisted to the 114/// outermost level of the expression as an addition. Not every constant in an 115/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a + 116/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case, 117/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15). 118class ConstantOffsetExtractor { 119 public: 120 /// Extracts a constant offset from the given GEP index. It outputs the 121 /// numeric value of the extracted constant offset (0 if failed), and a 122 /// new index representing the remainder (equal to the original index minus 123 /// the constant offset). 124 /// \p Idx The given GEP index 125 /// \p NewIdx The new index to replace 126 /// \p DL The datalayout of the module 127 /// \p IP Calculating the new index requires new instructions. IP indicates 128 /// where to insert them (typically right before the GEP). 129 static int64_t Extract(Value *Idx, Value *&NewIdx, const DataLayout *DL, 130 Instruction *IP); 131 /// Looks for a constant offset without extracting it. The meaning of the 132 /// arguments and the return value are the same as Extract. 133 static int64_t Find(Value *Idx, const DataLayout *DL); 134 135 private: 136 ConstantOffsetExtractor(const DataLayout *Layout, Instruction *InsertionPt) 137 : DL(Layout), IP(InsertionPt) {} 138 /// Searches the expression that computes V for a constant offset. If the 139 /// searching is successful, update UserChain as a path from V to the constant 140 /// offset. 141 int64_t find(Value *V); 142 /// A helper function to look into both operands of a binary operator U. 143 /// \p IsSub Whether U is a sub operator. If so, we need to negate the 144 /// constant offset at some point. 145 int64_t findInEitherOperand(User *U, bool IsSub); 146 /// After finding the constant offset and how it is reached from the GEP 147 /// index, we build a new index which is a clone of the old one except the 148 /// constant offset is removed. For example, given (a + (b + 5)) and knowning 149 /// the constant offset is 5, this function returns (a + b). 150 /// 151 /// We cannot simply change the constant to zero because the expression that 152 /// computes the index or its intermediate result may be used by others. 153 Value *rebuildWithoutConstantOffset(); 154 // A helper function for rebuildWithoutConstantOffset that rebuilds the direct 155 // user (U) of the constant offset (C). 156 Value *rebuildLeafWithoutConstantOffset(User *U, Value *C); 157 /// Returns a clone of U except the first occurrence of From with To. 158 Value *cloneAndReplace(User *U, Value *From, Value *To); 159 160 /// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS == 0. 161 bool NoCommonBits(Value *LHS, Value *RHS) const; 162 /// Computes which bits are known to be one or zero. 163 /// \p KnownOne Mask of all bits that are known to be one. 164 /// \p KnownZero Mask of all bits that are known to be zero. 165 void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero) const; 166 /// Finds the first use of Used in U. Returns -1 if not found. 167 static unsigned FindFirstUse(User *U, Value *Used); 168 /// Returns whether OPC (sext or zext) can be distributed to the operands of 169 /// BO. e.g., sext can be distributed to the operands of an "add nsw" because 170 /// sext (add nsw a, b) == add nsw (sext a), (sext b). 171 static bool Distributable(unsigned OPC, BinaryOperator *BO); 172 173 /// The path from the constant offset to the old GEP index. e.g., if the GEP 174 /// index is "a * b + (c + 5)". After running function find, UserChain[0] will 175 /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and 176 /// UserChain[2] will be the entire expression "a * b + (c + 5)". 177 /// 178 /// This path helps rebuildWithoutConstantOffset rebuild the new GEP index. 179 SmallVector<User *, 8> UserChain; 180 /// The data layout of the module. Used in ComputeKnownBits. 181 const DataLayout *DL; 182 Instruction *IP; /// Insertion position of cloned instructions. 183}; 184 185/// \brief A pass that tries to split every GEP in the function into a variadic 186/// base and a constant offset. It is a FunctionPass because searching for the 187/// constant offset may inspect other basic blocks. 188class SeparateConstOffsetFromGEP : public FunctionPass { 189 public: 190 static char ID; 191 SeparateConstOffsetFromGEP() : FunctionPass(ID) { 192 initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry()); 193 } 194 195 void getAnalysisUsage(AnalysisUsage &AU) const override { 196 AU.addRequired<DataLayoutPass>(); 197 AU.addRequired<TargetTransformInfo>(); 198 } 199 bool runOnFunction(Function &F) override; 200 201 private: 202 /// Tries to split the given GEP into a variadic base and a constant offset, 203 /// and returns true if the splitting succeeds. 204 bool splitGEP(GetElementPtrInst *GEP); 205 /// Finds the constant offset within each index, and accumulates them. This 206 /// function only inspects the GEP without changing it. The output 207 /// NeedsExtraction indicates whether we can extract a non-zero constant 208 /// offset from any index. 209 int64_t accumulateByteOffset(GetElementPtrInst *GEP, const DataLayout *DL, 210 bool &NeedsExtraction); 211}; 212} // anonymous namespace 213 214char SeparateConstOffsetFromGEP::ID = 0; 215INITIALIZE_PASS_BEGIN( 216 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep", 217 "Split GEPs to a variadic base and a constant offset for better CSE", false, 218 false) 219INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 220INITIALIZE_PASS_DEPENDENCY(DataLayoutPass) 221INITIALIZE_PASS_END( 222 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep", 223 "Split GEPs to a variadic base and a constant offset for better CSE", false, 224 false) 225 226FunctionPass *llvm::createSeparateConstOffsetFromGEPPass() { 227 return new SeparateConstOffsetFromGEP(); 228} 229 230bool ConstantOffsetExtractor::Distributable(unsigned OPC, BinaryOperator *BO) { 231 assert(OPC == Instruction::SExt || OPC == Instruction::ZExt); 232 233 // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B) 234 // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B) 235 if (BO->getOpcode() == Instruction::Add || 236 BO->getOpcode() == Instruction::Sub) { 237 return (OPC == Instruction::SExt && BO->hasNoSignedWrap()) || 238 (OPC == Instruction::ZExt && BO->hasNoUnsignedWrap()); 239 } 240 241 // sext/zext (and/or/xor A, B) == and/or/xor (sext/zext A), (sext/zext B) 242 // -instcombine also leverages this invariant to do the reverse 243 // transformation to reduce integer casts. 244 return BO->getOpcode() == Instruction::And || 245 BO->getOpcode() == Instruction::Or || 246 BO->getOpcode() == Instruction::Xor; 247} 248 249int64_t ConstantOffsetExtractor::findInEitherOperand(User *U, bool IsSub) { 250 assert(U->getNumOperands() == 2); 251 int64_t ConstantOffset = find(U->getOperand(0)); 252 // If we found a constant offset in the left operand, stop and return that. 253 // This shortcut might cause us to miss opportunities of combining the 254 // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9. 255 // However, such cases are probably already handled by -instcombine, 256 // given this pass runs after the standard optimizations. 257 if (ConstantOffset != 0) return ConstantOffset; 258 ConstantOffset = find(U->getOperand(1)); 259 // If U is a sub operator, negate the constant offset found in the right 260 // operand. 261 return IsSub ? -ConstantOffset : ConstantOffset; 262} 263 264int64_t ConstantOffsetExtractor::find(Value *V) { 265 // TODO(jingyue): We can even trace into integer/pointer casts, such as 266 // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only 267 // integers because it gives good enough results for our benchmarks. 268 assert(V->getType()->isIntegerTy()); 269 270 User *U = dyn_cast<User>(V); 271 // We cannot do much with Values that are not a User, such as BasicBlock and 272 // MDNode. 273 if (U == nullptr) return 0; 274 275 int64_t ConstantOffset = 0; 276 if (ConstantInt *CI = dyn_cast<ConstantInt>(U)) { 277 // Hooray, we found it! 278 ConstantOffset = CI->getSExtValue(); 279 } else if (Operator *O = dyn_cast<Operator>(U)) { 280 // The GEP index may be more complicated than a simple addition of a 281 // varaible and a constant. Therefore, we trace into subexpressions for more 282 // hoisting opportunities. 283 switch (O->getOpcode()) { 284 case Instruction::Add: { 285 ConstantOffset = findInEitherOperand(U, false); 286 break; 287 } 288 case Instruction::Sub: { 289 ConstantOffset = findInEitherOperand(U, true); 290 break; 291 } 292 case Instruction::Or: { 293 // If LHS and RHS don't have common bits, (LHS | RHS) is equivalent to 294 // (LHS + RHS). 295 if (NoCommonBits(U->getOperand(0), U->getOperand(1))) 296 ConstantOffset = findInEitherOperand(U, false); 297 break; 298 } 299 case Instruction::SExt: 300 case Instruction::ZExt: { 301 // We trace into sext/zext if the operator can be distributed to its 302 // operand. e.g., we can transform into "sext (add nsw a, 5)" and 303 // extract constant 5, because 304 // sext (add nsw a, 5) == add nsw (sext a), 5 305 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) { 306 if (Distributable(O->getOpcode(), BO)) 307 ConstantOffset = find(U->getOperand(0)); 308 } 309 break; 310 } 311 } 312 } 313 // If we found a non-zero constant offset, adds it to the path for future 314 // transformation (rebuildWithoutConstantOffset). Zero is a valid constant 315 // offset, but doesn't help this optimization. 316 if (ConstantOffset != 0) 317 UserChain.push_back(U); 318 return ConstantOffset; 319} 320 321unsigned ConstantOffsetExtractor::FindFirstUse(User *U, Value *Used) { 322 for (unsigned I = 0, E = U->getNumOperands(); I < E; ++I) { 323 if (U->getOperand(I) == Used) 324 return I; 325 } 326 return -1; 327} 328 329Value *ConstantOffsetExtractor::cloneAndReplace(User *U, Value *From, 330 Value *To) { 331 // Finds in U the first use of From. It is safe to ignore future occurrences 332 // of From, because findInEitherOperand similarly stops searching the right 333 // operand when the first operand has a non-zero constant offset. 334 unsigned OpNo = FindFirstUse(U, From); 335 assert(OpNo != (unsigned)-1 && "UserChain wasn't built correctly"); 336 337 // ConstantOffsetExtractor::find only follows Operators (i.e., Instructions 338 // and ConstantExprs). Therefore, U is either an Instruction or a 339 // ConstantExpr. 340 if (Instruction *I = dyn_cast<Instruction>(U)) { 341 Instruction *Clone = I->clone(); 342 Clone->setOperand(OpNo, To); 343 Clone->insertBefore(IP); 344 return Clone; 345 } 346 // cast<Constant>(To) is safe because a ConstantExpr only uses Constants. 347 return cast<ConstantExpr>(U) 348 ->getWithOperandReplaced(OpNo, cast<Constant>(To)); 349} 350 351Value *ConstantOffsetExtractor::rebuildLeafWithoutConstantOffset(User *U, 352 Value *C) { 353 assert(U->getNumOperands() <= 2 && 354 "We didn't trace into any operator with more than 2 operands"); 355 // If U has only one operand which is the constant offset, removing the 356 // constant offset leaves U as a null value. 357 if (U->getNumOperands() == 1) 358 return Constant::getNullValue(U->getType()); 359 360 // U->getNumOperands() == 2 361 unsigned OpNo = FindFirstUse(U, C); // U->getOperand(OpNo) == C 362 assert(OpNo < 2 && "UserChain wasn't built correctly"); 363 Value *TheOther = U->getOperand(1 - OpNo); // The other operand of U 364 // If U = C - X, removing C makes U = -X; otherwise U will simply be X. 365 if (!isa<SubOperator>(U) || OpNo == 1) 366 return TheOther; 367 if (isa<ConstantExpr>(U)) 368 return ConstantExpr::getNeg(cast<Constant>(TheOther)); 369 return BinaryOperator::CreateNeg(TheOther, "", IP); 370} 371 372Value *ConstantOffsetExtractor::rebuildWithoutConstantOffset() { 373 assert(UserChain.size() > 0 && "you at least found a constant, right?"); 374 // Start with the constant and go up through UserChain, each time building a 375 // clone of the subexpression but with the constant removed. 376 // e.g., to build a clone of (a + (b + (c + 5)) but with the 5 removed, we 377 // first c, then (b + c), and finally (a + (b + c)). 378 // 379 // Fast path: if the GEP index is a constant, simply returns 0. 380 if (UserChain.size() == 1) 381 return ConstantInt::get(UserChain[0]->getType(), 0); 382 383 Value *Remainder = 384 rebuildLeafWithoutConstantOffset(UserChain[1], UserChain[0]); 385 for (size_t I = 2; I < UserChain.size(); ++I) 386 Remainder = cloneAndReplace(UserChain[I], UserChain[I - 1], Remainder); 387 return Remainder; 388} 389 390int64_t ConstantOffsetExtractor::Extract(Value *Idx, Value *&NewIdx, 391 const DataLayout *DL, 392 Instruction *IP) { 393 ConstantOffsetExtractor Extractor(DL, IP); 394 // Find a non-zero constant offset first. 395 int64_t ConstantOffset = Extractor.find(Idx); 396 if (ConstantOffset == 0) 397 return 0; 398 // Then rebuild a new index with the constant removed. 399 NewIdx = Extractor.rebuildWithoutConstantOffset(); 400 return ConstantOffset; 401} 402 403int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL) { 404 return ConstantOffsetExtractor(DL, nullptr).find(Idx); 405} 406 407void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne, 408 APInt &KnownZero) const { 409 IntegerType *IT = cast<IntegerType>(V->getType()); 410 KnownOne = APInt(IT->getBitWidth(), 0); 411 KnownZero = APInt(IT->getBitWidth(), 0); 412 llvm::computeKnownBits(V, KnownZero, KnownOne, DL, 0); 413} 414 415bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const { 416 assert(LHS->getType() == RHS->getType() && 417 "LHS and RHS should have the same type"); 418 APInt LHSKnownOne, LHSKnownZero, RHSKnownOne, RHSKnownZero; 419 ComputeKnownBits(LHS, LHSKnownOne, LHSKnownZero); 420 ComputeKnownBits(RHS, RHSKnownOne, RHSKnownZero); 421 return (LHSKnownZero | RHSKnownZero).isAllOnesValue(); 422} 423 424int64_t SeparateConstOffsetFromGEP::accumulateByteOffset( 425 GetElementPtrInst *GEP, const DataLayout *DL, bool &NeedsExtraction) { 426 NeedsExtraction = false; 427 int64_t AccumulativeByteOffset = 0; 428 gep_type_iterator GTI = gep_type_begin(*GEP); 429 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { 430 if (isa<SequentialType>(*GTI)) { 431 // Tries to extract a constant offset from this GEP index. 432 int64_t ConstantOffset = 433 ConstantOffsetExtractor::Find(GEP->getOperand(I), DL); 434 if (ConstantOffset != 0) { 435 NeedsExtraction = true; 436 // A GEP may have multiple indices. We accumulate the extracted 437 // constant offset to a byte offset, and later offset the remainder of 438 // the original GEP with this byte offset. 439 AccumulativeByteOffset += 440 ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType()); 441 } 442 } 443 } 444 return AccumulativeByteOffset; 445} 446 447bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) { 448 // Skip vector GEPs. 449 if (GEP->getType()->isVectorTy()) 450 return false; 451 452 // The backend can already nicely handle the case where all indices are 453 // constant. 454 if (GEP->hasAllConstantIndices()) 455 return false; 456 457 bool Changed = false; 458 459 // Shortcuts integer casts. Eliminating these explicit casts can make 460 // subsequent optimizations more obvious: ConstantOffsetExtractor needn't 461 // trace into these casts. 462 if (GEP->isInBounds()) { 463 // Doing this to inbounds GEPs is safe because their indices are guaranteed 464 // to be non-negative and in bounds. 465 gep_type_iterator GTI = gep_type_begin(*GEP); 466 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { 467 if (isa<SequentialType>(*GTI)) { 468 if (Operator *O = dyn_cast<Operator>(GEP->getOperand(I))) { 469 if (O->getOpcode() == Instruction::SExt || 470 O->getOpcode() == Instruction::ZExt) { 471 GEP->setOperand(I, O->getOperand(0)); 472 Changed = true; 473 } 474 } 475 } 476 } 477 } 478 479 const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout(); 480 bool NeedsExtraction; 481 int64_t AccumulativeByteOffset = 482 accumulateByteOffset(GEP, DL, NeedsExtraction); 483 484 if (!NeedsExtraction) 485 return Changed; 486 // Before really splitting the GEP, check whether the backend supports the 487 // addressing mode we are about to produce. If no, this splitting probably 488 // won't be beneficial. 489 TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>(); 490 if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(), 491 /*BaseGV=*/nullptr, AccumulativeByteOffset, 492 /*HasBaseReg=*/true, /*Scale=*/0)) { 493 return Changed; 494 } 495 496 // Remove the constant offset in each GEP index. The resultant GEP computes 497 // the variadic base. 498 gep_type_iterator GTI = gep_type_begin(*GEP); 499 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { 500 if (isa<SequentialType>(*GTI)) { 501 Value *NewIdx = nullptr; 502 // Tries to extract a constant offset from this GEP index. 503 int64_t ConstantOffset = 504 ConstantOffsetExtractor::Extract(GEP->getOperand(I), NewIdx, DL, GEP); 505 if (ConstantOffset != 0) { 506 assert(NewIdx != nullptr && 507 "ConstantOffset != 0 implies NewIdx is set"); 508 GEP->setOperand(I, NewIdx); 509 // Clear the inbounds attribute because the new index may be off-bound. 510 // e.g., 511 // 512 // b = add i64 a, 5 513 // addr = gep inbounds float* p, i64 b 514 // 515 // is transformed to: 516 // 517 // addr2 = gep float* p, i64 a 518 // addr = gep float* addr2, i64 5 519 // 520 // If a is -4, although the old index b is in bounds, the new index a is 521 // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the 522 // inbounds keyword is not present, the offsets are added to the base 523 // address with silently-wrapping two's complement arithmetic". 524 // Therefore, the final code will be a semantically equivalent. 525 // 526 // TODO(jingyue): do some range analysis to keep as many inbounds as 527 // possible. GEPs with inbounds are more friendly to alias analysis. 528 GEP->setIsInBounds(false); 529 Changed = true; 530 } 531 } 532 } 533 534 // Offsets the base with the accumulative byte offset. 535 // 536 // %gep ; the base 537 // ... %gep ... 538 // 539 // => add the offset 540 // 541 // %gep2 ; clone of %gep 542 // %new.gep = gep %gep2, <offset / sizeof(*%gep)> 543 // %gep ; will be removed 544 // ... %gep ... 545 // 546 // => replace all uses of %gep with %new.gep and remove %gep 547 // 548 // %gep2 ; clone of %gep 549 // %new.gep = gep %gep2, <offset / sizeof(*%gep)> 550 // ... %new.gep ... 551 // 552 // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an 553 // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep): 554 // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the 555 // type of %gep. 556 // 557 // %gep2 ; clone of %gep 558 // %0 = bitcast %gep2 to i8* 559 // %uglygep = gep %0, <offset> 560 // %new.gep = bitcast %uglygep to <type of %gep> 561 // ... %new.gep ... 562 Instruction *NewGEP = GEP->clone(); 563 NewGEP->insertBefore(GEP); 564 565 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 566 uint64_t ElementTypeSizeOfGEP = 567 DL->getTypeAllocSize(GEP->getType()->getElementType()); 568 if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) { 569 // Very likely. As long as %gep is natually aligned, the byte offset we 570 // extracted should be a multiple of sizeof(*%gep). 571 // Per ANSI C standard, signed / unsigned = unsigned. Therefore, we 572 // cast ElementTypeSizeOfGEP to signed. 573 int64_t Index = 574 AccumulativeByteOffset / static_cast<int64_t>(ElementTypeSizeOfGEP); 575 NewGEP = GetElementPtrInst::Create( 576 NewGEP, ConstantInt::get(IntPtrTy, Index, true), GEP->getName(), GEP); 577 } else { 578 // Unlikely but possible. For example, 579 // #pragma pack(1) 580 // struct S { 581 // int a[3]; 582 // int64 b[8]; 583 // }; 584 // #pragma pack() 585 // 586 // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After 587 // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is 588 // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of 589 // sizeof(int64). 590 // 591 // Emit an uglygep in this case. 592 Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(), 593 GEP->getPointerAddressSpace()); 594 NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP); 595 NewGEP = GetElementPtrInst::Create( 596 NewGEP, ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), 597 "uglygep", GEP); 598 if (GEP->getType() != I8PtrTy) 599 NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP); 600 } 601 602 GEP->replaceAllUsesWith(NewGEP); 603 GEP->eraseFromParent(); 604 605 return true; 606} 607 608bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) { 609 if (DisableSeparateConstOffsetFromGEP) 610 return false; 611 612 bool Changed = false; 613 for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) { 614 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) { 615 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++)) { 616 Changed |= splitGEP(GEP); 617 } 618 // No need to split GEP ConstantExprs because all its indices are constant 619 // already. 620 } 621 } 622 return Changed; 623} 624