ScalarReplAggregates.cpp revision 800de31776356910eb877e71df9f32b0a6215324
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/Compiler.h" 38#include "llvm/ADT/SmallVector.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/ADT/StringExtras.h" 41using namespace llvm; 42 43STATISTIC(NumReplaced, "Number of allocas broken up"); 44STATISTIC(NumPromoted, "Number of allocas promoted"); 45STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 46STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 47 48namespace { 49 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 50 static char ID; // Pass identification, replacement for typeid 51 explicit SROA(signed T = -1) : FunctionPass((intptr_t)&ID) { 52 if (T == -1) 53 SRThreshold = 128; 54 else 55 SRThreshold = T; 56 } 57 58 bool runOnFunction(Function &F); 59 60 bool performScalarRepl(Function &F); 61 bool performPromotion(Function &F); 62 63 // getAnalysisUsage - This pass does not require any passes, but we know it 64 // will not alter the CFG, so say so. 65 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 66 AU.addRequired<DominatorTree>(); 67 AU.addRequired<DominanceFrontier>(); 68 AU.addRequired<TargetData>(); 69 AU.setPreservesCFG(); 70 } 71 72 private: 73 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 74 /// information about the uses. All these fields are initialized to false 75 /// and set to true when something is learned. 76 struct AllocaInfo { 77 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 78 bool isUnsafe : 1; 79 80 /// needsCanon - This is set to true if there is some use of the alloca 81 /// that requires canonicalization. 82 bool needsCanon : 1; 83 84 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 85 bool isMemCpySrc : 1; 86 87 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 88 bool isMemCpyDst : 1; 89 90 AllocaInfo() 91 : isUnsafe(false), needsCanon(false), 92 isMemCpySrc(false), isMemCpyDst(false) {} 93 }; 94 95 unsigned SRThreshold; 96 97 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 98 99 int isSafeAllocaToScalarRepl(AllocationInst *AI); 100 101 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 102 AllocaInfo &Info); 103 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 104 AllocaInfo &Info); 105 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 106 unsigned OpNo, AllocaInfo &Info); 107 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, 108 AllocaInfo &Info); 109 110 void DoScalarReplacement(AllocationInst *AI, 111 std::vector<AllocationInst*> &WorkList); 112 void CanonicalizeAllocaUsers(AllocationInst *AI); 113 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 114 115 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 116 SmallVector<AllocaInst*, 32> &NewElts); 117 118 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 119 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 120 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 121 Value *ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 122 unsigned Offset); 123 Value *ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI, 124 unsigned Offset); 125 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 126 }; 127 128 char SROA::ID = 0; 129 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 130} 131 132// Public interface to the ScalarReplAggregates pass 133FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 134 return new SROA(Threshold); 135} 136 137 138bool SROA::runOnFunction(Function &F) { 139 bool Changed = performPromotion(F); 140 while (1) { 141 bool LocalChange = performScalarRepl(F); 142 if (!LocalChange) break; // No need to repromote if no scalarrepl 143 Changed = true; 144 LocalChange = performPromotion(F); 145 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 146 } 147 148 return Changed; 149} 150 151 152bool SROA::performPromotion(Function &F) { 153 std::vector<AllocaInst*> Allocas; 154 DominatorTree &DT = getAnalysis<DominatorTree>(); 155 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 156 157 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 158 159 bool Changed = false; 160 161 while (1) { 162 Allocas.clear(); 163 164 // Find allocas that are safe to promote, by looking at all instructions in 165 // the entry node 166 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 167 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 168 if (isAllocaPromotable(AI)) 169 Allocas.push_back(AI); 170 171 if (Allocas.empty()) break; 172 173 PromoteMemToReg(Allocas, DT, DF); 174 NumPromoted += Allocas.size(); 175 Changed = true; 176 } 177 178 return Changed; 179} 180 181// performScalarRepl - This algorithm is a simple worklist driven algorithm, 182// which runs on all of the malloc/alloca instructions in the function, removing 183// them if they are only used by getelementptr instructions. 184// 185bool SROA::performScalarRepl(Function &F) { 186 std::vector<AllocationInst*> WorkList; 187 188 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 189 BasicBlock &BB = F.getEntryBlock(); 190 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 191 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 192 WorkList.push_back(A); 193 194 const TargetData &TD = getAnalysis<TargetData>(); 195 196 // Process the worklist 197 bool Changed = false; 198 while (!WorkList.empty()) { 199 AllocationInst *AI = WorkList.back(); 200 WorkList.pop_back(); 201 202 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 203 // with unused elements. 204 if (AI->use_empty()) { 205 AI->eraseFromParent(); 206 continue; 207 } 208 209 // If we can turn this aggregate value (potentially with casts) into a 210 // simple scalar value that can be mem2reg'd into a register value. 211 bool IsNotTrivial = false; 212 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 213 if (IsNotTrivial && ActualType != Type::VoidTy) { 214 ConvertToScalar(AI, ActualType); 215 Changed = true; 216 continue; 217 } 218 219 // Check to see if we can perform the core SROA transformation. We cannot 220 // transform the allocation instruction if it is an array allocation 221 // (allocations OF arrays are ok though), and an allocation of a scalar 222 // value cannot be decomposed at all. 223 if (!AI->isArrayAllocation() && 224 (isa<StructType>(AI->getAllocatedType()) || 225 isa<ArrayType>(AI->getAllocatedType())) && 226 AI->getAllocatedType()->isSized() && 227 TD.getABITypeSize(AI->getAllocatedType()) < SRThreshold) { 228 // Check that all of the users of the allocation are capable of being 229 // transformed. 230 switch (isSafeAllocaToScalarRepl(AI)) { 231 default: assert(0 && "Unexpected value!"); 232 case 0: // Not safe to scalar replace. 233 break; 234 case 1: // Safe, but requires cleanup/canonicalizations first 235 CanonicalizeAllocaUsers(AI); 236 // FALL THROUGH. 237 case 3: // Safe to scalar replace. 238 DoScalarReplacement(AI, WorkList); 239 Changed = true; 240 continue; 241 } 242 } 243 244 // Check to see if this allocation is only modified by a memcpy/memmove from 245 // a constant global. If this is the case, we can change all users to use 246 // the constant global instead. This is commonly produced by the CFE by 247 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 248 // is only subsequently read. 249 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 250 DOUT << "Found alloca equal to global: " << *AI; 251 DOUT << " memcpy = " << *TheCopy; 252 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 253 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 254 TheCopy->eraseFromParent(); // Don't mutate the global. 255 AI->eraseFromParent(); 256 ++NumGlobals; 257 Changed = true; 258 continue; 259 } 260 261 // Otherwise, couldn't process this. 262 } 263 264 return Changed; 265} 266 267/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 268/// predicate, do SROA now. 269void SROA::DoScalarReplacement(AllocationInst *AI, 270 std::vector<AllocationInst*> &WorkList) { 271 DOUT << "Found inst to SROA: " << *AI; 272 SmallVector<AllocaInst*, 32> ElementAllocas; 273 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 274 ElementAllocas.reserve(ST->getNumContainedTypes()); 275 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 276 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 277 AI->getAlignment(), 278 AI->getName() + "." + utostr(i), AI); 279 ElementAllocas.push_back(NA); 280 WorkList.push_back(NA); // Add to worklist for recursive processing 281 } 282 } else { 283 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 284 ElementAllocas.reserve(AT->getNumElements()); 285 const Type *ElTy = AT->getElementType(); 286 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 287 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 288 AI->getName() + "." + utostr(i), AI); 289 ElementAllocas.push_back(NA); 290 WorkList.push_back(NA); // Add to worklist for recursive processing 291 } 292 } 293 294 // Now that we have created the alloca instructions that we want to use, 295 // expand the getelementptr instructions to use them. 296 // 297 while (!AI->use_empty()) { 298 Instruction *User = cast<Instruction>(AI->use_back()); 299 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 300 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 301 BCInst->eraseFromParent(); 302 continue; 303 } 304 305 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 306 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 307 unsigned Idx = 308 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 309 310 assert(Idx < ElementAllocas.size() && "Index out of range?"); 311 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 312 313 Value *RepValue; 314 if (GEPI->getNumOperands() == 3) { 315 // Do not insert a new getelementptr instruction with zero indices, only 316 // to have it optimized out later. 317 RepValue = AllocaToUse; 318 } else { 319 // We are indexing deeply into the structure, so we still need a 320 // getelement ptr instruction to finish the indexing. This may be 321 // expanded itself once the worklist is rerun. 322 // 323 SmallVector<Value*, 8> NewArgs; 324 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 325 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 326 RepValue = new GetElementPtrInst(AllocaToUse, NewArgs.begin(), 327 NewArgs.end(), "", GEPI); 328 RepValue->takeName(GEPI); 329 } 330 331 // If this GEP is to the start of the aggregate, check for memcpys. 332 if (Idx == 0) { 333 bool IsStartOfAggregateGEP = true; 334 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) { 335 if (!isa<ConstantInt>(GEPI->getOperand(i))) { 336 IsStartOfAggregateGEP = false; 337 break; 338 } 339 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) { 340 IsStartOfAggregateGEP = false; 341 break; 342 } 343 } 344 345 if (IsStartOfAggregateGEP) 346 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 347 } 348 349 350 // Move all of the users over to the new GEP. 351 GEPI->replaceAllUsesWith(RepValue); 352 // Delete the old GEP 353 GEPI->eraseFromParent(); 354 } 355 356 // Finally, delete the Alloca instruction 357 AI->eraseFromParent(); 358 NumReplaced++; 359} 360 361 362/// isSafeElementUse - Check to see if this use is an allowed use for a 363/// getelementptr instruction of an array aggregate allocation. isFirstElt 364/// indicates whether Ptr is known to the start of the aggregate. 365/// 366void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 367 AllocaInfo &Info) { 368 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 369 I != E; ++I) { 370 Instruction *User = cast<Instruction>(*I); 371 switch (User->getOpcode()) { 372 case Instruction::Load: break; 373 case Instruction::Store: 374 // Store is ok if storing INTO the pointer, not storing the pointer 375 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 376 break; 377 case Instruction::GetElementPtr: { 378 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 379 bool AreAllZeroIndices = isFirstElt; 380 if (GEP->getNumOperands() > 1) { 381 if (!isa<ConstantInt>(GEP->getOperand(1)) || 382 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 383 // Using pointer arithmetic to navigate the array. 384 return MarkUnsafe(Info); 385 386 if (AreAllZeroIndices) { 387 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) { 388 if (!isa<ConstantInt>(GEP->getOperand(i)) || 389 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) { 390 AreAllZeroIndices = false; 391 break; 392 } 393 } 394 } 395 } 396 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 397 if (Info.isUnsafe) return; 398 break; 399 } 400 case Instruction::BitCast: 401 if (isFirstElt) { 402 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 403 if (Info.isUnsafe) return; 404 break; 405 } 406 DOUT << " Transformation preventing inst: " << *User; 407 return MarkUnsafe(Info); 408 case Instruction::Call: 409 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 410 if (isFirstElt) { 411 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 412 if (Info.isUnsafe) return; 413 break; 414 } 415 } 416 DOUT << " Transformation preventing inst: " << *User; 417 return MarkUnsafe(Info); 418 default: 419 DOUT << " Transformation preventing inst: " << *User; 420 return MarkUnsafe(Info); 421 } 422 } 423 return; // All users look ok :) 424} 425 426/// AllUsersAreLoads - Return true if all users of this value are loads. 427static bool AllUsersAreLoads(Value *Ptr) { 428 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 429 I != E; ++I) 430 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 431 return false; 432 return true; 433} 434 435/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 436/// aggregate allocation. 437/// 438void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 439 AllocaInfo &Info) { 440 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 441 return isSafeUseOfBitCastedAllocation(C, AI, Info); 442 443 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 444 if (GEPI == 0) 445 return MarkUnsafe(Info); 446 447 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 448 449 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 450 if (I == E || 451 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 452 return MarkUnsafe(Info); 453 } 454 455 ++I; 456 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 457 458 bool IsAllZeroIndices = true; 459 460 // If this is a use of an array allocation, do a bit more checking for sanity. 461 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 462 uint64_t NumElements = AT->getNumElements(); 463 464 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) { 465 IsAllZeroIndices &= Idx->isZero(); 466 467 // Check to make sure that index falls within the array. If not, 468 // something funny is going on, so we won't do the optimization. 469 // 470 if (Idx->getZExtValue() >= NumElements) 471 return MarkUnsafe(Info); 472 473 // We cannot scalar repl this level of the array unless any array 474 // sub-indices are in-range constants. In particular, consider: 475 // A[0][i]. We cannot know that the user isn't doing invalid things like 476 // allowing i to index an out-of-range subscript that accesses A[1]. 477 // 478 // Scalar replacing *just* the outer index of the array is probably not 479 // going to be a win anyway, so just give up. 480 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) { 481 uint64_t NumElements; 482 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I)) 483 NumElements = SubArrayTy->getNumElements(); 484 else 485 NumElements = cast<VectorType>(*I)->getNumElements(); 486 487 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 488 if (!IdxVal) return MarkUnsafe(Info); 489 if (IdxVal->getZExtValue() >= NumElements) 490 return MarkUnsafe(Info); 491 IsAllZeroIndices &= IdxVal->isZero(); 492 } 493 494 } else { 495 IsAllZeroIndices = 0; 496 497 // If this is an array index and the index is not constant, we cannot 498 // promote... that is unless the array has exactly one or two elements in 499 // it, in which case we CAN promote it, but we have to canonicalize this 500 // out if this is the only problem. 501 if ((NumElements == 1 || NumElements == 2) && 502 AllUsersAreLoads(GEPI)) { 503 Info.needsCanon = true; 504 return; // Canonicalization required! 505 } 506 return MarkUnsafe(Info); 507 } 508 } 509 510 // If there are any non-simple uses of this getelementptr, make sure to reject 511 // them. 512 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 513} 514 515/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 516/// intrinsic can be promoted by SROA. At this point, we know that the operand 517/// of the memintrinsic is a pointer to the beginning of the allocation. 518void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 519 unsigned OpNo, AllocaInfo &Info) { 520 // If not constant length, give up. 521 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 522 if (!Length) return MarkUnsafe(Info); 523 524 // If not the whole aggregate, give up. 525 const TargetData &TD = getAnalysis<TargetData>(); 526 if (Length->getZExtValue() != 527 TD.getABITypeSize(AI->getType()->getElementType())) 528 return MarkUnsafe(Info); 529 530 // We only know about memcpy/memset/memmove. 531 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 532 return MarkUnsafe(Info); 533 534 // Otherwise, we can transform it. Determine whether this is a memcpy/set 535 // into or out of the aggregate. 536 if (OpNo == 1) 537 Info.isMemCpyDst = true; 538 else { 539 assert(OpNo == 2); 540 Info.isMemCpySrc = true; 541 } 542} 543 544/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 545/// are 546void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, 547 AllocaInfo &Info) { 548 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 549 UI != E; ++UI) { 550 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 551 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 552 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 553 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 554 } else { 555 return MarkUnsafe(Info); 556 } 557 if (Info.isUnsafe) return; 558 } 559} 560 561/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 562/// to its first element. Transform users of the cast to use the new values 563/// instead. 564void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 565 SmallVector<AllocaInst*, 32> &NewElts) { 566 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 567 const TargetData &TD = getAnalysis<TargetData>(); 568 569 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 570 while (UI != UE) { 571 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) { 572 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 573 ++UI; 574 BCU->eraseFromParent(); 575 continue; 576 } 577 578 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split 579 // into one per element. 580 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI); 581 582 // If it's not a mem intrinsic, it must be some other user of a gep of the 583 // first pointer. Just leave these alone. 584 if (!MI) { 585 ++UI; 586 continue; 587 } 588 589 // If this is a memcpy/memmove, construct the other pointer as the 590 // appropriate type. 591 Value *OtherPtr = 0; 592 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 593 if (BCInst == MCI->getRawDest()) 594 OtherPtr = MCI->getRawSource(); 595 else { 596 assert(BCInst == MCI->getRawSource()); 597 OtherPtr = MCI->getRawDest(); 598 } 599 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 600 if (BCInst == MMI->getRawDest()) 601 OtherPtr = MMI->getRawSource(); 602 else { 603 assert(BCInst == MMI->getRawSource()); 604 OtherPtr = MMI->getRawDest(); 605 } 606 } 607 608 // If there is an other pointer, we want to convert it to the same pointer 609 // type as AI has, so we can GEP through it. 610 if (OtherPtr) { 611 // It is likely that OtherPtr is a bitcast, if so, remove it. 612 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 613 OtherPtr = BC->getOperand(0); 614 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 615 if (BCE->getOpcode() == Instruction::BitCast) 616 OtherPtr = BCE->getOperand(0); 617 618 // If the pointer is not the right type, insert a bitcast to the right 619 // type. 620 if (OtherPtr->getType() != AI->getType()) 621 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 622 MI); 623 } 624 625 // Process each element of the aggregate. 626 Value *TheFn = MI->getOperand(0); 627 const Type *BytePtrTy = MI->getRawDest()->getType(); 628 bool SROADest = MI->getRawDest() == BCInst; 629 630 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 631 // If this is a memcpy/memmove, emit a GEP of the other element address. 632 Value *OtherElt = 0; 633 if (OtherPtr) { 634 Value *Idx[2]; 635 Idx[0] = Zero; 636 Idx[1] = ConstantInt::get(Type::Int32Ty, i); 637 OtherElt = new GetElementPtrInst(OtherPtr, Idx, Idx + 2, 638 OtherPtr->getNameStr()+"."+utostr(i), 639 MI); 640 } 641 642 Value *EltPtr = NewElts[i]; 643 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 644 645 // If we got down to a scalar, insert a load or store as appropriate. 646 if (EltTy->isFirstClassType()) { 647 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 648 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 649 MI); 650 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 651 continue; 652 } else { 653 assert(isa<MemSetInst>(MI)); 654 655 // If the stored element is zero (common case), just store a null 656 // constant. 657 Constant *StoreVal; 658 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 659 if (CI->isZero()) { 660 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 661 } else { 662 // If EltTy is a vector type, get the element type. 663 const Type *ValTy = EltTy; 664 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 665 ValTy = VTy->getElementType(); 666 667 // Construct an integer with the right value. 668 unsigned EltSize = TD.getTypeSizeInBits(ValTy); 669 APInt OneVal(EltSize, CI->getZExtValue()); 670 APInt TotalVal(OneVal); 671 // Set each byte. 672 for (unsigned i = 0; 8*i < EltSize; ++i) { 673 TotalVal = TotalVal.shl(8); 674 TotalVal |= OneVal; 675 } 676 677 // Convert the integer value to the appropriate type. 678 StoreVal = ConstantInt::get(TotalVal); 679 if (isa<PointerType>(ValTy)) 680 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 681 else if (ValTy->isFloatingPoint()) 682 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 683 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 684 685 // If the requested value was a vector constant, create it. 686 if (EltTy != ValTy) { 687 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 688 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 689 StoreVal = ConstantVector::get(&Elts[0], NumElts); 690 } 691 } 692 new StoreInst(StoreVal, EltPtr, MI); 693 continue; 694 } 695 // Otherwise, if we're storing a byte variable, use a memset call for 696 // this element. 697 } 698 } 699 700 // Cast the element pointer to BytePtrTy. 701 if (EltPtr->getType() != BytePtrTy) 702 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 703 704 // Cast the other pointer (if we have one) to BytePtrTy. 705 if (OtherElt && OtherElt->getType() != BytePtrTy) 706 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 707 MI); 708 709 unsigned EltSize = TD.getABITypeSize(EltTy); 710 711 // Finally, insert the meminst for this element. 712 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 713 Value *Ops[] = { 714 SROADest ? EltPtr : OtherElt, // Dest ptr 715 SROADest ? OtherElt : EltPtr, // Src ptr 716 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 717 Zero // Align 718 }; 719 new CallInst(TheFn, Ops, Ops + 4, "", MI); 720 } else { 721 assert(isa<MemSetInst>(MI)); 722 Value *Ops[] = { 723 EltPtr, MI->getOperand(2), // Dest, Value, 724 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 725 Zero // Align 726 }; 727 new CallInst(TheFn, Ops, Ops + 4, "", MI); 728 } 729 } 730 731 // Finally, MI is now dead, as we've modified its actions to occur on all of 732 // the elements of the aggregate. 733 ++UI; 734 MI->eraseFromParent(); 735 } 736} 737 738/// HasPadding - Return true if the specified type has any structure or 739/// alignment padding, false otherwise. 740static bool HasPadding(const Type *Ty, const TargetData &TD, 741 bool inPacked = false) { 742 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 743 const StructLayout *SL = TD.getStructLayout(STy); 744 unsigned PrevFieldBitOffset = 0; 745 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 746 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 747 748 // Padding in sub-elements? 749 if (HasPadding(STy->getElementType(i), TD, STy->isPacked())) 750 return true; 751 752 // Check to see if there is any padding between this element and the 753 // previous one. 754 if (i) { 755 unsigned PrevFieldEnd = 756 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 757 if (PrevFieldEnd < FieldBitOffset) 758 return true; 759 } 760 761 PrevFieldBitOffset = FieldBitOffset; 762 } 763 764 // Check for tail padding. 765 if (unsigned EltCount = STy->getNumElements()) { 766 unsigned PrevFieldEnd = PrevFieldBitOffset + 767 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 768 if (PrevFieldEnd < SL->getSizeInBits()) 769 return true; 770 } 771 772 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 773 return HasPadding(ATy->getElementType(), TD, false); 774 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 775 return HasPadding(VTy->getElementType(), TD, false); 776 } 777 return inPacked ? 778 false : TD.getTypeSizeInBits(Ty) != TD.getABITypeSizeInBits(Ty); 779} 780 781/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 782/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 783/// or 1 if safe after canonicalization has been performed. 784/// 785int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 786 // Loop over the use list of the alloca. We can only transform it if all of 787 // the users are safe to transform. 788 AllocaInfo Info; 789 790 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 791 I != E; ++I) { 792 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 793 if (Info.isUnsafe) { 794 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 795 return 0; 796 } 797 } 798 799 // Okay, we know all the users are promotable. If the aggregate is a memcpy 800 // source and destination, we have to be careful. In particular, the memcpy 801 // could be moving around elements that live in structure padding of the LLVM 802 // types, but may actually be used. In these cases, we refuse to promote the 803 // struct. 804 if (Info.isMemCpySrc && Info.isMemCpyDst && 805 HasPadding(AI->getType()->getElementType(), getAnalysis<TargetData>())) 806 return 0; 807 808 // If we require cleanup, return 1, otherwise return 3. 809 return Info.needsCanon ? 1 : 3; 810} 811 812/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 813/// allocation, but only if cleaned up, perform the cleanups required. 814void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 815 // At this point, we know that the end result will be SROA'd and promoted, so 816 // we can insert ugly code if required so long as sroa+mem2reg will clean it 817 // up. 818 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 819 UI != E; ) { 820 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++); 821 if (!GEPI) continue; 822 gep_type_iterator I = gep_type_begin(GEPI); 823 ++I; 824 825 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 826 uint64_t NumElements = AT->getNumElements(); 827 828 if (!isa<ConstantInt>(I.getOperand())) { 829 if (NumElements == 1) { 830 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 831 } else { 832 assert(NumElements == 2 && "Unhandled case!"); 833 // All users of the GEP must be loads. At each use of the GEP, insert 834 // two loads of the appropriate indexed GEP and select between them. 835 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 836 Constant::getNullValue(I.getOperand()->getType()), 837 "isone", GEPI); 838 // Insert the new GEP instructions, which are properly indexed. 839 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 840 Indices[1] = Constant::getNullValue(Type::Int32Ty); 841 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), 842 Indices.begin(), 843 Indices.end(), 844 GEPI->getName()+".0", GEPI); 845 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 846 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), 847 Indices.begin(), 848 Indices.end(), 849 GEPI->getName()+".1", GEPI); 850 // Replace all loads of the variable index GEP with loads from both 851 // indexes and a select. 852 while (!GEPI->use_empty()) { 853 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 854 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 855 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 856 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI); 857 LI->replaceAllUsesWith(R); 858 LI->eraseFromParent(); 859 } 860 GEPI->eraseFromParent(); 861 } 862 } 863 } 864 } 865} 866 867/// MergeInType - Add the 'In' type to the accumulated type so far. If the 868/// types are incompatible, return true, otherwise update Accum and return 869/// false. 870/// 871/// There are three cases we handle here: 872/// 1) An effectively-integer union, where the pieces are stored into as 873/// smaller integers (common with byte swap and other idioms). 874/// 2) A union of vector types of the same size and potentially its elements. 875/// Here we turn element accesses into insert/extract element operations. 876/// 3) A union of scalar types, such as int/float or int/pointer. Here we 877/// merge together into integers, allowing the xform to work with #1 as 878/// well. 879static bool MergeInType(const Type *In, const Type *&Accum, 880 const TargetData &TD) { 881 // If this is our first type, just use it. 882 const VectorType *PTy; 883 if (Accum == Type::VoidTy || In == Accum) { 884 Accum = In; 885 } else if (In == Type::VoidTy) { 886 // Noop. 887 } else if (In->isInteger() && Accum->isInteger()) { // integer union. 888 // Otherwise pick whichever type is larger. 889 if (cast<IntegerType>(In)->getBitWidth() > 890 cast<IntegerType>(Accum)->getBitWidth()) 891 Accum = In; 892 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) { 893 // Pointer unions just stay as one of the pointers. 894 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) { 895 if ((PTy = dyn_cast<VectorType>(Accum)) && 896 PTy->getElementType() == In) { 897 // Accum is a vector, and we are accessing an element: ok. 898 } else if ((PTy = dyn_cast<VectorType>(In)) && 899 PTy->getElementType() == Accum) { 900 // In is a vector, and accum is an element: ok, remember In. 901 Accum = In; 902 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) && 903 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) { 904 // Two vectors of the same size: keep Accum. 905 } else { 906 // Cannot insert an short into a <4 x int> or handle 907 // <2 x int> -> <4 x int> 908 return true; 909 } 910 } else { 911 // Pointer/FP/Integer unions merge together as integers. 912 switch (Accum->getTypeID()) { 913 case Type::PointerTyID: Accum = TD.getIntPtrType(); break; 914 case Type::FloatTyID: Accum = Type::Int32Ty; break; 915 case Type::DoubleTyID: Accum = Type::Int64Ty; break; 916 case Type::X86_FP80TyID: return true; 917 case Type::FP128TyID: return true; 918 case Type::PPC_FP128TyID: return true; 919 default: 920 assert(Accum->isInteger() && "Unknown FP type!"); 921 break; 922 } 923 924 switch (In->getTypeID()) { 925 case Type::PointerTyID: In = TD.getIntPtrType(); break; 926 case Type::FloatTyID: In = Type::Int32Ty; break; 927 case Type::DoubleTyID: In = Type::Int64Ty; break; 928 case Type::X86_FP80TyID: return true; 929 case Type::FP128TyID: return true; 930 case Type::PPC_FP128TyID: return true; 931 default: 932 assert(In->isInteger() && "Unknown FP type!"); 933 break; 934 } 935 return MergeInType(In, Accum, TD); 936 } 937 return false; 938} 939 940/// getUIntAtLeastAsBigAs - Return an unsigned integer type that is at least 941/// as big as the specified type. If there is no suitable type, this returns 942/// null. 943const Type *getUIntAtLeastAsBigAs(unsigned NumBits) { 944 if (NumBits > 64) return 0; 945 if (NumBits > 32) return Type::Int64Ty; 946 if (NumBits > 16) return Type::Int32Ty; 947 if (NumBits > 8) return Type::Int16Ty; 948 return Type::Int8Ty; 949} 950 951/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 952/// single scalar integer type, return that type. Further, if the use is not 953/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 954/// there are no uses of this pointer, return Type::VoidTy to differentiate from 955/// failure. 956/// 957const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 958 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 959 const TargetData &TD = getAnalysis<TargetData>(); 960 const PointerType *PTy = cast<PointerType>(V->getType()); 961 962 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 963 Instruction *User = cast<Instruction>(*UI); 964 965 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 966 if (MergeInType(LI->getType(), UsedType, TD)) 967 return 0; 968 969 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 970 // Storing the pointer, not into the value? 971 if (SI->getOperand(0) == V) return 0; 972 973 // NOTE: We could handle storing of FP imms into integers here! 974 975 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD)) 976 return 0; 977 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 978 IsNotTrivial = true; 979 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 980 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0; 981 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 982 // Check to see if this is stepping over an element: GEP Ptr, int C 983 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 984 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 985 unsigned ElSize = TD.getABITypeSize(PTy->getElementType()); 986 unsigned BitOffset = Idx*ElSize*8; 987 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 988 989 IsNotTrivial = true; 990 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 991 if (SubElt == 0) return 0; 992 if (SubElt != Type::VoidTy && SubElt->isInteger()) { 993 const Type *NewTy = 994 getUIntAtLeastAsBigAs(TD.getABITypeSizeInBits(SubElt)+BitOffset); 995 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0; 996 continue; 997 } 998 } else if (GEP->getNumOperands() == 3 && 999 isa<ConstantInt>(GEP->getOperand(1)) && 1000 isa<ConstantInt>(GEP->getOperand(2)) && 1001 cast<ConstantInt>(GEP->getOperand(1))->isZero()) { 1002 // We are stepping into an element, e.g. a structure or an array: 1003 // GEP Ptr, int 0, uint C 1004 const Type *AggTy = PTy->getElementType(); 1005 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1006 1007 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 1008 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 1009 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) { 1010 // Getting an element of the vector. 1011 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range. 1012 1013 // Merge in the vector type. 1014 if (MergeInType(VectorTy, UsedType, TD)) return 0; 1015 1016 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 1017 if (SubTy == 0) return 0; 1018 1019 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 1020 return 0; 1021 1022 // We'll need to change this to an insert/extract element operation. 1023 IsNotTrivial = true; 1024 continue; // Everything looks ok 1025 1026 } else if (isa<StructType>(AggTy)) { 1027 // Structs are always ok. 1028 } else { 1029 return 0; 1030 } 1031 const Type *NTy = getUIntAtLeastAsBigAs(TD.getABITypeSizeInBits(AggTy)); 1032 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0; 1033 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 1034 if (SubTy == 0) return 0; 1035 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 1036 return 0; 1037 continue; // Everything looks ok 1038 } 1039 return 0; 1040 } else { 1041 // Cannot handle this! 1042 return 0; 1043 } 1044 } 1045 1046 return UsedType; 1047} 1048 1049/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 1050/// predicate and is non-trivial. Convert it to something that can be trivially 1051/// promoted into a register by mem2reg. 1052void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 1053 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = " 1054 << *ActualTy << "\n"; 1055 ++NumConverted; 1056 1057 BasicBlock *EntryBlock = AI->getParent(); 1058 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() && 1059 "Not in the entry block!"); 1060 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 1061 1062 // Create and insert the alloca. 1063 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(), 1064 EntryBlock->begin()); 1065 ConvertUsesToScalar(AI, NewAI, 0); 1066 delete AI; 1067} 1068 1069 1070/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1071/// directly. This happens when we are converting an "integer union" to a 1072/// single integer scalar, or when we are converting a "vector union" to a 1073/// vector with insert/extractelement instructions. 1074/// 1075/// Offset is an offset from the original alloca, in bits that need to be 1076/// shifted to the right. By the end of this, there should be no uses of Ptr. 1077void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 1078 while (!Ptr->use_empty()) { 1079 Instruction *User = cast<Instruction>(Ptr->use_back()); 1080 1081 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1082 Value *NV = ConvertUsesOfLoadToScalar(LI, NewAI, Offset); 1083 LI->replaceAllUsesWith(NV); 1084 LI->eraseFromParent(); 1085 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1086 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1087 1088 Value *SV = ConvertUsesOfStoreToScalar(SI, NewAI, Offset); 1089 new StoreInst(SV, NewAI, SI); 1090 SI->eraseFromParent(); 1091 1092 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1093 ConvertUsesToScalar(CI, NewAI, Offset); 1094 CI->eraseFromParent(); 1095 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1096 const PointerType *AggPtrTy = 1097 cast<PointerType>(GEP->getOperand(0)->getType()); 1098 const TargetData &TD = getAnalysis<TargetData>(); 1099 unsigned AggSizeInBits = 1100 TD.getABITypeSizeInBits(AggPtrTy->getElementType()); 1101 1102 // Check to see if this is stepping over an element: GEP Ptr, int C 1103 unsigned NewOffset = Offset; 1104 if (GEP->getNumOperands() == 2) { 1105 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 1106 unsigned BitOffset = Idx*AggSizeInBits; 1107 1108 NewOffset += BitOffset; 1109 } else if (GEP->getNumOperands() == 3) { 1110 // We know that operand #2 is zero. 1111 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1112 const Type *AggTy = AggPtrTy->getElementType(); 1113 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 1114 unsigned ElSizeBits = 1115 TD.getABITypeSizeInBits(SeqTy->getElementType()); 1116 1117 NewOffset += ElSizeBits*Idx; 1118 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 1119 unsigned EltBitOffset = 1120 TD.getStructLayout(STy)->getElementOffsetInBits(Idx); 1121 1122 NewOffset += EltBitOffset; 1123 } else { 1124 assert(0 && "Unsupported operation!"); 1125 abort(); 1126 } 1127 } else { 1128 assert(0 && "Unsupported operation!"); 1129 abort(); 1130 } 1131 ConvertUsesToScalar(GEP, NewAI, NewOffset); 1132 GEP->eraseFromParent(); 1133 } else { 1134 assert(0 && "Unsupported operation!"); 1135 abort(); 1136 } 1137 } 1138} 1139 1140/// ConvertUsesOfLoadToScalar - Convert all of the users the specified load to 1141/// use the new alloca directly, returning the value that should replace the 1142/// load. This happens when we are converting an "integer union" to a 1143/// single integer scalar, or when we are converting a "vector union" to a 1144/// vector with insert/extractelement instructions. 1145/// 1146/// Offset is an offset from the original alloca, in bits that need to be 1147/// shifted to the right. By the end of this, there should be no uses of Ptr. 1148Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 1149 unsigned Offset) { 1150 // The load is a bit extract from NewAI shifted right by Offset bits. 1151 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 1152 1153 if (NV->getType() == LI->getType() && Offset == 0) { 1154 // We win, no conversion needed. 1155 return NV; 1156 } 1157 1158 if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) { 1159 // If the result alloca is a vector type, this is either an element 1160 // access or a bitcast to another vector type. 1161 if (isa<VectorType>(LI->getType())) { 1162 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1163 } else { 1164 // Must be an element access. 1165 const TargetData &TD = getAnalysis<TargetData>(); 1166 unsigned Elt = Offset/TD.getABITypeSizeInBits(PTy->getElementType()); 1167 NV = new ExtractElementInst(NV, ConstantInt::get(Type::Int32Ty, Elt), 1168 "tmp", LI); 1169 } 1170 } else if (isa<PointerType>(NV->getType())) { 1171 assert(isa<PointerType>(LI->getType())); 1172 // Must be ptr->ptr cast. Anything else would result in NV being 1173 // an integer. 1174 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1175 } else { 1176 const IntegerType *NTy = cast<IntegerType>(NV->getType()); 1177 1178 // If this is a big-endian system and the load is narrower than the 1179 // full alloca type, we need to do a shift to get the right bits. 1180 int ShAmt = 0; 1181 const TargetData &TD = getAnalysis<TargetData>(); 1182 if (TD.isBigEndian()) { 1183 // On big-endian machines, the lowest bit is stored at the bit offset 1184 // from the pointer given by getTypeStoreSizeInBits. This matters for 1185 // integers with a bitwidth that is not a multiple of 8. 1186 ShAmt = TD.getTypeStoreSizeInBits(NTy) - 1187 TD.getTypeStoreSizeInBits(LI->getType()) - Offset; 1188 } else { 1189 ShAmt = Offset; 1190 } 1191 1192 // Note: we support negative bitwidths (with shl) which are not defined. 1193 // We do this to support (f.e.) loads off the end of a structure where 1194 // only some bits are used. 1195 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1196 NV = BinaryOperator::createLShr(NV, 1197 ConstantInt::get(NV->getType(),ShAmt), 1198 LI->getName(), LI); 1199 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1200 NV = BinaryOperator::createShl(NV, 1201 ConstantInt::get(NV->getType(),-ShAmt), 1202 LI->getName(), LI); 1203 1204 // Finally, unconditionally truncate the integer to the right width. 1205 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType()); 1206 if (LIBitWidth < NTy->getBitWidth()) 1207 NV = new TruncInst(NV, IntegerType::get(LIBitWidth), 1208 LI->getName(), LI); 1209 1210 // If the result is an integer, this is a trunc or bitcast. 1211 if (isa<IntegerType>(LI->getType())) { 1212 assert(NV->getType() == LI->getType() && "Truncate wasn't enough?"); 1213 } else if (LI->getType()->isFloatingPoint()) { 1214 // Just do a bitcast, we know the sizes match up. 1215 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1216 } else { 1217 // Otherwise must be a pointer. 1218 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI); 1219 } 1220 } 1221 return NV; 1222} 1223 1224 1225/// ConvertUsesOfStoreToScalar - Convert the specified store to a load+store 1226/// pair of the new alloca directly, returning the value that should be stored 1227/// to the alloca. This happens when we are converting an "integer union" to a 1228/// single integer scalar, or when we are converting a "vector union" to a 1229/// vector with insert/extractelement instructions. 1230/// 1231/// Offset is an offset from the original alloca, in bits that need to be 1232/// shifted to the right. By the end of this, there should be no uses of Ptr. 1233Value *SROA::ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI, 1234 unsigned Offset) { 1235 1236 // Convert the stored type to the actual type, shift it left to insert 1237 // then 'or' into place. 1238 Value *SV = SI->getOperand(0); 1239 const Type *AllocaType = NewAI->getType()->getElementType(); 1240 if (SV->getType() == AllocaType && Offset == 0) { 1241 // All is well. 1242 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) { 1243 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1244 1245 // If the result alloca is a vector type, this is either an element 1246 // access or a bitcast to another vector type. 1247 if (isa<VectorType>(SV->getType())) { 1248 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1249 } else { 1250 // Must be an element insertion. 1251 const TargetData &TD = getAnalysis<TargetData>(); 1252 unsigned Elt = Offset/TD.getABITypeSizeInBits(PTy->getElementType()); 1253 SV = new InsertElementInst(Old, SV, 1254 ConstantInt::get(Type::Int32Ty, Elt), 1255 "tmp", SI); 1256 } 1257 } else if (isa<PointerType>(AllocaType)) { 1258 // If the alloca type is a pointer, then all the elements must be 1259 // pointers. 1260 if (SV->getType() != AllocaType) 1261 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1262 } else { 1263 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1264 1265 // If SV is a float, convert it to the appropriate integer type. 1266 // If it is a pointer, do the same, and also handle ptr->ptr casts 1267 // here. 1268 const TargetData &TD = getAnalysis<TargetData>(); 1269 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType()); 1270 unsigned DestWidth = TD.getTypeSizeInBits(AllocaType); 1271 unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType()); 1272 unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType); 1273 if (SV->getType()->isFloatingPoint()) 1274 SV = new BitCastInst(SV, IntegerType::get(SrcWidth), 1275 SV->getName(), SI); 1276 else if (isa<PointerType>(SV->getType())) 1277 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI); 1278 1279 // Always zero extend the value if needed. 1280 if (SV->getType() != AllocaType) 1281 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI); 1282 1283 // If this is a big-endian system and the store is narrower than the 1284 // full alloca type, we need to do a shift to get the right bits. 1285 int ShAmt = 0; 1286 if (TD.isBigEndian()) { 1287 // On big-endian machines, the lowest bit is stored at the bit offset 1288 // from the pointer given by getTypeStoreSizeInBits. This matters for 1289 // integers with a bitwidth that is not a multiple of 8. 1290 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 1291 } else { 1292 ShAmt = Offset; 1293 } 1294 1295 // Note: we support negative bitwidths (with shr) which are not defined. 1296 // We do this to support (f.e.) stores off the end of a structure where 1297 // only some bits in the structure are set. 1298 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1299 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1300 SV = BinaryOperator::createShl(SV, 1301 ConstantInt::get(SV->getType(), ShAmt), 1302 SV->getName(), SI); 1303 Mask <<= ShAmt; 1304 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1305 SV = BinaryOperator::createLShr(SV, 1306 ConstantInt::get(SV->getType(),-ShAmt), 1307 SV->getName(), SI); 1308 Mask = Mask.lshr(ShAmt); 1309 } 1310 1311 // Mask out the bits we are about to insert from the old value, and or 1312 // in the new bits. 1313 if (SrcWidth != DestWidth) { 1314 assert(DestWidth > SrcWidth); 1315 Old = BinaryOperator::createAnd(Old, ConstantInt::get(~Mask), 1316 Old->getName()+".mask", SI); 1317 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI); 1318 } 1319 } 1320 return SV; 1321} 1322 1323 1324 1325/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1326/// some part of a constant global variable. This intentionally only accepts 1327/// constant expressions because we don't can't rewrite arbitrary instructions. 1328static bool PointsToConstantGlobal(Value *V) { 1329 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1330 return GV->isConstant(); 1331 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1332 if (CE->getOpcode() == Instruction::BitCast || 1333 CE->getOpcode() == Instruction::GetElementPtr) 1334 return PointsToConstantGlobal(CE->getOperand(0)); 1335 return false; 1336} 1337 1338/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1339/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1340/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1341/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1342/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1343/// the alloca, and if the source pointer is a pointer to a constant global, we 1344/// can optimize this. 1345static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1346 bool isOffset) { 1347 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1348 if (isa<LoadInst>(*UI)) { 1349 // Ignore loads, they are always ok. 1350 continue; 1351 } 1352 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1353 // If uses of the bitcast are ok, we are ok. 1354 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1355 return false; 1356 continue; 1357 } 1358 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1359 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1360 // doesn't, it does. 1361 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1362 isOffset || !GEP->hasAllZeroIndices())) 1363 return false; 1364 continue; 1365 } 1366 1367 // If this is isn't our memcpy/memmove, reject it as something we can't 1368 // handle. 1369 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1370 return false; 1371 1372 // If we already have seen a copy, reject the second one. 1373 if (TheCopy) return false; 1374 1375 // If the pointer has been offset from the start of the alloca, we can't 1376 // safely handle this. 1377 if (isOffset) return false; 1378 1379 // If the memintrinsic isn't using the alloca as the dest, reject it. 1380 if (UI.getOperandNo() != 1) return false; 1381 1382 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1383 1384 // If the source of the memcpy/move is not a constant global, reject it. 1385 if (!PointsToConstantGlobal(MI->getOperand(2))) 1386 return false; 1387 1388 // Otherwise, the transform is safe. Remember the copy instruction. 1389 TheCopy = MI; 1390 } 1391 return true; 1392} 1393 1394/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1395/// modified by a copy from a constant global. If we can prove this, we can 1396/// replace any uses of the alloca with uses of the global directly. 1397Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1398 Instruction *TheCopy = 0; 1399 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1400 return TheCopy; 1401 return 0; 1402} 1403