ScalarReplAggregates.cpp revision c0bc547c99bd97088e950b3074d917091abe3f51
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/Compiler.h" 38#include "llvm/ADT/SmallVector.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/ADT/StringExtras.h" 41using namespace llvm; 42 43STATISTIC(NumReplaced, "Number of allocas broken up"); 44STATISTIC(NumPromoted, "Number of allocas promoted"); 45STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 46STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 47 48namespace { 49 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 50 static char ID; // Pass identification, replacement for typeid 51 explicit SROA(signed T = -1) : FunctionPass(&ID) { 52 if (T == -1) 53 SRThreshold = 128; 54 else 55 SRThreshold = T; 56 } 57 58 bool runOnFunction(Function &F); 59 60 bool performScalarRepl(Function &F); 61 bool performPromotion(Function &F); 62 63 // getAnalysisUsage - This pass does not require any passes, but we know it 64 // will not alter the CFG, so say so. 65 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 66 AU.addRequired<DominatorTree>(); 67 AU.addRequired<DominanceFrontier>(); 68 AU.addRequired<TargetData>(); 69 AU.setPreservesCFG(); 70 } 71 72 private: 73 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 74 /// information about the uses. All these fields are initialized to false 75 /// and set to true when something is learned. 76 struct AllocaInfo { 77 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 78 bool isUnsafe : 1; 79 80 /// needsCanon - This is set to true if there is some use of the alloca 81 /// that requires canonicalization. 82 bool needsCanon : 1; 83 84 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 85 bool isMemCpySrc : 1; 86 87 /// isMemCpyDst - This is true if this aggregate is memcpy'd into. 88 bool isMemCpyDst : 1; 89 90 AllocaInfo() 91 : isUnsafe(false), needsCanon(false), 92 isMemCpySrc(false), isMemCpyDst(false) {} 93 }; 94 95 unsigned SRThreshold; 96 97 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 98 99 int isSafeAllocaToScalarRepl(AllocationInst *AI); 100 101 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 102 AllocaInfo &Info); 103 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 104 AllocaInfo &Info); 105 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 106 unsigned OpNo, AllocaInfo &Info); 107 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, 108 AllocaInfo &Info); 109 110 void DoScalarReplacement(AllocationInst *AI, 111 std::vector<AllocationInst*> &WorkList); 112 void CanonicalizeAllocaUsers(AllocationInst *AI); 113 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 114 115 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 116 SmallVector<AllocaInst*, 32> &NewElts); 117 118 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 119 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 120 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 121 Value *ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 122 unsigned Offset); 123 Value *ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI, 124 unsigned Offset); 125 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 126 }; 127} 128 129char SROA::ID = 0; 130static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 131 132// Public interface to the ScalarReplAggregates pass 133FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) { 134 return new SROA(Threshold); 135} 136 137 138bool SROA::runOnFunction(Function &F) { 139 bool Changed = performPromotion(F); 140 while (1) { 141 bool LocalChange = performScalarRepl(F); 142 if (!LocalChange) break; // No need to repromote if no scalarrepl 143 Changed = true; 144 LocalChange = performPromotion(F); 145 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 146 } 147 148 return Changed; 149} 150 151 152bool SROA::performPromotion(Function &F) { 153 std::vector<AllocaInst*> Allocas; 154 DominatorTree &DT = getAnalysis<DominatorTree>(); 155 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 156 157 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 158 159 bool Changed = false; 160 161 while (1) { 162 Allocas.clear(); 163 164 // Find allocas that are safe to promote, by looking at all instructions in 165 // the entry node 166 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 167 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 168 if (isAllocaPromotable(AI)) 169 Allocas.push_back(AI); 170 171 if (Allocas.empty()) break; 172 173 PromoteMemToReg(Allocas, DT, DF); 174 NumPromoted += Allocas.size(); 175 Changed = true; 176 } 177 178 return Changed; 179} 180 181/// getNumSAElements - Return the number of elements in the specific struct or 182/// array. 183static uint64_t getNumSAElements(const Type *T) { 184 if (const StructType *ST = dyn_cast<StructType>(T)) 185 return ST->getNumElements(); 186 return cast<ArrayType>(T)->getNumElements(); 187} 188 189// performScalarRepl - This algorithm is a simple worklist driven algorithm, 190// which runs on all of the malloc/alloca instructions in the function, removing 191// them if they are only used by getelementptr instructions. 192// 193bool SROA::performScalarRepl(Function &F) { 194 std::vector<AllocationInst*> WorkList; 195 196 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 197 BasicBlock &BB = F.getEntryBlock(); 198 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 199 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 200 WorkList.push_back(A); 201 202 const TargetData &TD = getAnalysis<TargetData>(); 203 204 // Process the worklist 205 bool Changed = false; 206 while (!WorkList.empty()) { 207 AllocationInst *AI = WorkList.back(); 208 WorkList.pop_back(); 209 210 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 211 // with unused elements. 212 if (AI->use_empty()) { 213 AI->eraseFromParent(); 214 continue; 215 } 216 217 // If we can turn this aggregate value (potentially with casts) into a 218 // simple scalar value that can be mem2reg'd into a register value. 219 bool IsNotTrivial = false; 220 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 221 if (IsNotTrivial && ActualType != Type::VoidTy) { 222 ConvertToScalar(AI, ActualType); 223 Changed = true; 224 continue; 225 } 226 227 // Check to see if we can perform the core SROA transformation. We cannot 228 // transform the allocation instruction if it is an array allocation 229 // (allocations OF arrays are ok though), and an allocation of a scalar 230 // value cannot be decomposed at all. 231 if (!AI->isArrayAllocation() && 232 (isa<StructType>(AI->getAllocatedType()) || 233 isa<ArrayType>(AI->getAllocatedType())) && 234 AI->getAllocatedType()->isSized() && 235 // Do not promote any struct whose size is larger than "128" bytes. 236 TD.getABITypeSize(AI->getAllocatedType()) < SRThreshold && 237 // Do not promote any struct into more than "32" separate vars. 238 getNumSAElements(AI->getAllocatedType()) < SRThreshold/4) { 239 // Check that all of the users of the allocation are capable of being 240 // transformed. 241 switch (isSafeAllocaToScalarRepl(AI)) { 242 default: assert(0 && "Unexpected value!"); 243 case 0: // Not safe to scalar replace. 244 break; 245 case 1: // Safe, but requires cleanup/canonicalizations first 246 CanonicalizeAllocaUsers(AI); 247 // FALL THROUGH. 248 case 3: // Safe to scalar replace. 249 DoScalarReplacement(AI, WorkList); 250 Changed = true; 251 continue; 252 } 253 } 254 255 // Check to see if this allocation is only modified by a memcpy/memmove from 256 // a constant global. If this is the case, we can change all users to use 257 // the constant global instead. This is commonly produced by the CFE by 258 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 259 // is only subsequently read. 260 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 261 DOUT << "Found alloca equal to global: " << *AI; 262 DOUT << " memcpy = " << *TheCopy; 263 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 264 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 265 TheCopy->eraseFromParent(); // Don't mutate the global. 266 AI->eraseFromParent(); 267 ++NumGlobals; 268 Changed = true; 269 continue; 270 } 271 272 // Otherwise, couldn't process this. 273 } 274 275 return Changed; 276} 277 278/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 279/// predicate, do SROA now. 280void SROA::DoScalarReplacement(AllocationInst *AI, 281 std::vector<AllocationInst*> &WorkList) { 282 DOUT << "Found inst to SROA: " << *AI; 283 SmallVector<AllocaInst*, 32> ElementAllocas; 284 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 285 ElementAllocas.reserve(ST->getNumContainedTypes()); 286 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 287 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 288 AI->getAlignment(), 289 AI->getName() + "." + utostr(i), AI); 290 ElementAllocas.push_back(NA); 291 WorkList.push_back(NA); // Add to worklist for recursive processing 292 } 293 } else { 294 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 295 ElementAllocas.reserve(AT->getNumElements()); 296 const Type *ElTy = AT->getElementType(); 297 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 298 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 299 AI->getName() + "." + utostr(i), AI); 300 ElementAllocas.push_back(NA); 301 WorkList.push_back(NA); // Add to worklist for recursive processing 302 } 303 } 304 305 // Now that we have created the alloca instructions that we want to use, 306 // expand the getelementptr instructions to use them. 307 // 308 while (!AI->use_empty()) { 309 Instruction *User = cast<Instruction>(AI->use_back()); 310 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 311 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 312 BCInst->eraseFromParent(); 313 continue; 314 } 315 316 // Replace: 317 // %res = load { i32, i32 }* %alloc 318 // with: 319 // %load.0 = load i32* %alloc.0 320 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 321 // %load.1 = load i32* %alloc.1 322 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 323 // (Also works for arrays instead of structs) 324 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 325 Value *Insert = UndefValue::get(LI->getType()); 326 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 327 Value *Load = new LoadInst(ElementAllocas[i], "load", LI); 328 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); 329 } 330 LI->replaceAllUsesWith(Insert); 331 LI->eraseFromParent(); 332 continue; 333 } 334 335 // Replace: 336 // store { i32, i32 } %val, { i32, i32 }* %alloc 337 // with: 338 // %val.0 = extractvalue { i32, i32 } %val, 0 339 // store i32 %val.0, i32* %alloc.0 340 // %val.1 = extractvalue { i32, i32 } %val, 1 341 // store i32 %val.1, i32* %alloc.1 342 // (Also works for arrays instead of structs) 343 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 344 Value *Val = SI->getOperand(0); 345 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { 346 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); 347 new StoreInst(Extract, ElementAllocas[i], SI); 348 } 349 SI->eraseFromParent(); 350 continue; 351 } 352 353 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 354 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 355 unsigned Idx = 356 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 357 358 assert(Idx < ElementAllocas.size() && "Index out of range?"); 359 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 360 361 Value *RepValue; 362 if (GEPI->getNumOperands() == 3) { 363 // Do not insert a new getelementptr instruction with zero indices, only 364 // to have it optimized out later. 365 RepValue = AllocaToUse; 366 } else { 367 // We are indexing deeply into the structure, so we still need a 368 // getelement ptr instruction to finish the indexing. This may be 369 // expanded itself once the worklist is rerun. 370 // 371 SmallVector<Value*, 8> NewArgs; 372 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 373 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 374 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), 375 NewArgs.end(), "", GEPI); 376 RepValue->takeName(GEPI); 377 } 378 379 // If this GEP is to the start of the aggregate, check for memcpys. 380 if (Idx == 0) { 381 bool IsStartOfAggregateGEP = true; 382 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) { 383 if (!isa<ConstantInt>(GEPI->getOperand(i))) { 384 IsStartOfAggregateGEP = false; 385 break; 386 } 387 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) { 388 IsStartOfAggregateGEP = false; 389 break; 390 } 391 } 392 393 if (IsStartOfAggregateGEP) 394 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 395 } 396 397 398 // Move all of the users over to the new GEP. 399 GEPI->replaceAllUsesWith(RepValue); 400 // Delete the old GEP 401 GEPI->eraseFromParent(); 402 } 403 404 // Finally, delete the Alloca instruction 405 AI->eraseFromParent(); 406 NumReplaced++; 407} 408 409 410/// isSafeElementUse - Check to see if this use is an allowed use for a 411/// getelementptr instruction of an array aggregate allocation. isFirstElt 412/// indicates whether Ptr is known to the start of the aggregate. 413/// 414void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 415 AllocaInfo &Info) { 416 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 417 I != E; ++I) { 418 Instruction *User = cast<Instruction>(*I); 419 switch (User->getOpcode()) { 420 case Instruction::Load: break; 421 case Instruction::Store: 422 // Store is ok if storing INTO the pointer, not storing the pointer 423 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 424 break; 425 case Instruction::GetElementPtr: { 426 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 427 bool AreAllZeroIndices = isFirstElt; 428 if (GEP->getNumOperands() > 1) { 429 if (!isa<ConstantInt>(GEP->getOperand(1)) || 430 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 431 // Using pointer arithmetic to navigate the array. 432 return MarkUnsafe(Info); 433 434 if (AreAllZeroIndices) { 435 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) { 436 if (!isa<ConstantInt>(GEP->getOperand(i)) || 437 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) { 438 AreAllZeroIndices = false; 439 break; 440 } 441 } 442 } 443 } 444 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 445 if (Info.isUnsafe) return; 446 break; 447 } 448 case Instruction::BitCast: 449 if (isFirstElt) { 450 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 451 if (Info.isUnsafe) return; 452 break; 453 } 454 DOUT << " Transformation preventing inst: " << *User; 455 return MarkUnsafe(Info); 456 case Instruction::Call: 457 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 458 if (isFirstElt) { 459 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 460 if (Info.isUnsafe) return; 461 break; 462 } 463 } 464 DOUT << " Transformation preventing inst: " << *User; 465 return MarkUnsafe(Info); 466 default: 467 DOUT << " Transformation preventing inst: " << *User; 468 return MarkUnsafe(Info); 469 } 470 } 471 return; // All users look ok :) 472} 473 474/// AllUsersAreLoads - Return true if all users of this value are loads. 475static bool AllUsersAreLoads(Value *Ptr) { 476 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 477 I != E; ++I) 478 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 479 return false; 480 return true; 481} 482 483/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 484/// aggregate allocation. 485/// 486void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 487 AllocaInfo &Info) { 488 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 489 return isSafeUseOfBitCastedAllocation(C, AI, Info); 490 491 if (isa<LoadInst>(User)) 492 return; // Loads (returning a first class aggregrate) are always rewritable 493 494 if (isa<StoreInst>(User) && User->getOperand(0) != AI) 495 return; // Store is ok if storing INTO the pointer, not storing the pointer 496 497 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 498 if (GEPI == 0) 499 return MarkUnsafe(Info); 500 501 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 502 503 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 504 if (I == E || 505 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 506 return MarkUnsafe(Info); 507 } 508 509 ++I; 510 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 511 512 bool IsAllZeroIndices = true; 513 514 // If the first index is a non-constant index into an array, see if we can 515 // handle it as a special case. 516 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 517 if (!isa<ConstantInt>(I.getOperand())) { 518 IsAllZeroIndices = 0; 519 uint64_t NumElements = AT->getNumElements(); 520 521 // If this is an array index and the index is not constant, we cannot 522 // promote... that is unless the array has exactly one or two elements in 523 // it, in which case we CAN promote it, but we have to canonicalize this 524 // out if this is the only problem. 525 if ((NumElements == 1 || NumElements == 2) && 526 AllUsersAreLoads(GEPI)) { 527 Info.needsCanon = true; 528 return; // Canonicalization required! 529 } 530 return MarkUnsafe(Info); 531 } 532 } 533 534 // Walk through the GEP type indices, checking the types that this indexes 535 // into. 536 for (; I != E; ++I) { 537 // Ignore struct elements, no extra checking needed for these. 538 if (isa<StructType>(*I)) 539 continue; 540 541 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 542 if (!IdxVal) return MarkUnsafe(Info); 543 544 // Are all indices still zero? 545 IsAllZeroIndices &= IdxVal->isZero(); 546 547 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 548 // This GEP indexes an array. Verify that this is an in-range constant 549 // integer. Specifically, consider A[0][i]. We cannot know that the user 550 // isn't doing invalid things like allowing i to index an out-of-range 551 // subscript that accesses A[1]. Because of this, we have to reject SROA 552 // of any accesses into structs where any of the components are variables. 553 if (IdxVal->getZExtValue() >= AT->getNumElements()) 554 return MarkUnsafe(Info); 555 } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) { 556 if (IdxVal->getZExtValue() >= VT->getNumElements()) 557 return MarkUnsafe(Info); 558 } 559 } 560 561 // If there are any non-simple uses of this getelementptr, make sure to reject 562 // them. 563 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 564} 565 566/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 567/// intrinsic can be promoted by SROA. At this point, we know that the operand 568/// of the memintrinsic is a pointer to the beginning of the allocation. 569void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 570 unsigned OpNo, AllocaInfo &Info) { 571 // If not constant length, give up. 572 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 573 if (!Length) return MarkUnsafe(Info); 574 575 // If not the whole aggregate, give up. 576 const TargetData &TD = getAnalysis<TargetData>(); 577 if (Length->getZExtValue() != 578 TD.getABITypeSize(AI->getType()->getElementType())) 579 return MarkUnsafe(Info); 580 581 // We only know about memcpy/memset/memmove. 582 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 583 return MarkUnsafe(Info); 584 585 // Otherwise, we can transform it. Determine whether this is a memcpy/set 586 // into or out of the aggregate. 587 if (OpNo == 1) 588 Info.isMemCpyDst = true; 589 else { 590 assert(OpNo == 2); 591 Info.isMemCpySrc = true; 592 } 593} 594 595/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 596/// are 597void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, 598 AllocaInfo &Info) { 599 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 600 UI != E; ++UI) { 601 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 602 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 603 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 604 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 605 } else { 606 return MarkUnsafe(Info); 607 } 608 if (Info.isUnsafe) return; 609 } 610} 611 612/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 613/// to its first element. Transform users of the cast to use the new values 614/// instead. 615void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 616 SmallVector<AllocaInst*, 32> &NewElts) { 617 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 618 const TargetData &TD = getAnalysis<TargetData>(); 619 620 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 621 while (UI != UE) { 622 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) { 623 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 624 ++UI; 625 BCU->eraseFromParent(); 626 continue; 627 } 628 629 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split 630 // into one per element. 631 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI); 632 633 // If it's not a mem intrinsic, it must be some other user of a gep of the 634 // first pointer. Just leave these alone. 635 if (!MI) { 636 ++UI; 637 continue; 638 } 639 640 // If this is a memcpy/memmove, construct the other pointer as the 641 // appropriate type. 642 Value *OtherPtr = 0; 643 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 644 if (BCInst == MCI->getRawDest()) 645 OtherPtr = MCI->getRawSource(); 646 else { 647 assert(BCInst == MCI->getRawSource()); 648 OtherPtr = MCI->getRawDest(); 649 } 650 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 651 if (BCInst == MMI->getRawDest()) 652 OtherPtr = MMI->getRawSource(); 653 else { 654 assert(BCInst == MMI->getRawSource()); 655 OtherPtr = MMI->getRawDest(); 656 } 657 } 658 659 // If there is an other pointer, we want to convert it to the same pointer 660 // type as AI has, so we can GEP through it. 661 if (OtherPtr) { 662 // It is likely that OtherPtr is a bitcast, if so, remove it. 663 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 664 OtherPtr = BC->getOperand(0); 665 // All zero GEPs are effectively casts 666 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) 667 if (GEP->hasAllZeroIndices()) 668 OtherPtr = GEP->getOperand(0); 669 670 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 671 if (BCE->getOpcode() == Instruction::BitCast) 672 OtherPtr = BCE->getOperand(0); 673 674 // If the pointer is not the right type, insert a bitcast to the right 675 // type. 676 if (OtherPtr->getType() != AI->getType()) 677 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 678 MI); 679 } 680 681 // Process each element of the aggregate. 682 Value *TheFn = MI->getOperand(0); 683 const Type *BytePtrTy = MI->getRawDest()->getType(); 684 bool SROADest = MI->getRawDest() == BCInst; 685 686 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 687 // If this is a memcpy/memmove, emit a GEP of the other element address. 688 Value *OtherElt = 0; 689 if (OtherPtr) { 690 Value *Idx[2] = { Zero, ConstantInt::get(Type::Int32Ty, i) }; 691 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, 692 OtherPtr->getNameStr()+"."+utostr(i), 693 MI); 694 } 695 696 Value *EltPtr = NewElts[i]; 697 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 698 699 // If we got down to a scalar, insert a load or store as appropriate. 700 if (EltTy->isSingleValueType()) { 701 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 702 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 703 MI); 704 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 705 continue; 706 } else { 707 assert(isa<MemSetInst>(MI)); 708 709 // If the stored element is zero (common case), just store a null 710 // constant. 711 Constant *StoreVal; 712 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 713 if (CI->isZero()) { 714 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 715 } else { 716 // If EltTy is a vector type, get the element type. 717 const Type *ValTy = EltTy; 718 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 719 ValTy = VTy->getElementType(); 720 721 // Construct an integer with the right value. 722 unsigned EltSize = TD.getTypeSizeInBits(ValTy); 723 APInt OneVal(EltSize, CI->getZExtValue()); 724 APInt TotalVal(OneVal); 725 // Set each byte. 726 for (unsigned i = 0; 8*i < EltSize; ++i) { 727 TotalVal = TotalVal.shl(8); 728 TotalVal |= OneVal; 729 } 730 731 // Convert the integer value to the appropriate type. 732 StoreVal = ConstantInt::get(TotalVal); 733 if (isa<PointerType>(ValTy)) 734 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 735 else if (ValTy->isFloatingPoint()) 736 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 737 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 738 739 // If the requested value was a vector constant, create it. 740 if (EltTy != ValTy) { 741 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 742 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 743 StoreVal = ConstantVector::get(&Elts[0], NumElts); 744 } 745 } 746 new StoreInst(StoreVal, EltPtr, MI); 747 continue; 748 } 749 // Otherwise, if we're storing a byte variable, use a memset call for 750 // this element. 751 } 752 } 753 754 // Cast the element pointer to BytePtrTy. 755 if (EltPtr->getType() != BytePtrTy) 756 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 757 758 // Cast the other pointer (if we have one) to BytePtrTy. 759 if (OtherElt && OtherElt->getType() != BytePtrTy) 760 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 761 MI); 762 763 unsigned EltSize = TD.getABITypeSize(EltTy); 764 765 // Finally, insert the meminst for this element. 766 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 767 Value *Ops[] = { 768 SROADest ? EltPtr : OtherElt, // Dest ptr 769 SROADest ? OtherElt : EltPtr, // Src ptr 770 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 771 Zero // Align 772 }; 773 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 774 } else { 775 assert(isa<MemSetInst>(MI)); 776 Value *Ops[] = { 777 EltPtr, MI->getOperand(2), // Dest, Value, 778 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 779 Zero // Align 780 }; 781 CallInst::Create(TheFn, Ops, Ops + 4, "", MI); 782 } 783 } 784 785 // Finally, MI is now dead, as we've modified its actions to occur on all of 786 // the elements of the aggregate. 787 ++UI; 788 MI->eraseFromParent(); 789 } 790} 791 792/// HasPadding - Return true if the specified type has any structure or 793/// alignment padding, false otherwise. 794static bool HasPadding(const Type *Ty, const TargetData &TD) { 795 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 796 const StructLayout *SL = TD.getStructLayout(STy); 797 unsigned PrevFieldBitOffset = 0; 798 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 799 unsigned FieldBitOffset = SL->getElementOffsetInBits(i); 800 801 // Padding in sub-elements? 802 if (HasPadding(STy->getElementType(i), TD)) 803 return true; 804 805 // Check to see if there is any padding between this element and the 806 // previous one. 807 if (i) { 808 unsigned PrevFieldEnd = 809 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 810 if (PrevFieldEnd < FieldBitOffset) 811 return true; 812 } 813 814 PrevFieldBitOffset = FieldBitOffset; 815 } 816 817 // Check for tail padding. 818 if (unsigned EltCount = STy->getNumElements()) { 819 unsigned PrevFieldEnd = PrevFieldBitOffset + 820 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 821 if (PrevFieldEnd < SL->getSizeInBits()) 822 return true; 823 } 824 825 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 826 return HasPadding(ATy->getElementType(), TD); 827 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 828 return HasPadding(VTy->getElementType(), TD); 829 } 830 return TD.getTypeSizeInBits(Ty) != TD.getABITypeSizeInBits(Ty); 831} 832 833/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 834/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 835/// or 1 if safe after canonicalization has been performed. 836/// 837int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 838 // Loop over the use list of the alloca. We can only transform it if all of 839 // the users are safe to transform. 840 AllocaInfo Info; 841 842 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 843 I != E; ++I) { 844 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 845 if (Info.isUnsafe) { 846 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 847 return 0; 848 } 849 } 850 851 // Okay, we know all the users are promotable. If the aggregate is a memcpy 852 // source and destination, we have to be careful. In particular, the memcpy 853 // could be moving around elements that live in structure padding of the LLVM 854 // types, but may actually be used. In these cases, we refuse to promote the 855 // struct. 856 if (Info.isMemCpySrc && Info.isMemCpyDst && 857 HasPadding(AI->getType()->getElementType(), getAnalysis<TargetData>())) 858 return 0; 859 860 // If we require cleanup, return 1, otherwise return 3. 861 return Info.needsCanon ? 1 : 3; 862} 863 864/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 865/// allocation, but only if cleaned up, perform the cleanups required. 866void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 867 // At this point, we know that the end result will be SROA'd and promoted, so 868 // we can insert ugly code if required so long as sroa+mem2reg will clean it 869 // up. 870 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 871 UI != E; ) { 872 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++); 873 if (!GEPI) continue; 874 gep_type_iterator I = gep_type_begin(GEPI); 875 ++I; 876 877 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 878 uint64_t NumElements = AT->getNumElements(); 879 880 if (!isa<ConstantInt>(I.getOperand())) { 881 if (NumElements == 1) { 882 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 883 } else { 884 assert(NumElements == 2 && "Unhandled case!"); 885 // All users of the GEP must be loads. At each use of the GEP, insert 886 // two loads of the appropriate indexed GEP and select between them. 887 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 888 Constant::getNullValue(I.getOperand()->getType()), 889 "isone", GEPI); 890 // Insert the new GEP instructions, which are properly indexed. 891 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 892 Indices[1] = Constant::getNullValue(Type::Int32Ty); 893 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 894 Indices.begin(), 895 Indices.end(), 896 GEPI->getName()+".0", GEPI); 897 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 898 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), 899 Indices.begin(), 900 Indices.end(), 901 GEPI->getName()+".1", GEPI); 902 // Replace all loads of the variable index GEP with loads from both 903 // indexes and a select. 904 while (!GEPI->use_empty()) { 905 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 906 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 907 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 908 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI); 909 LI->replaceAllUsesWith(R); 910 LI->eraseFromParent(); 911 } 912 GEPI->eraseFromParent(); 913 } 914 } 915 } 916 } 917} 918 919/// MergeInType - Add the 'In' type to the accumulated type so far. If the 920/// types are incompatible, return true, otherwise update Accum and return 921/// false. 922/// 923/// There are three cases we handle here: 924/// 1) An effectively-integer union, where the pieces are stored into as 925/// smaller integers (common with byte swap and other idioms). 926/// 2) A union of vector types of the same size and potentially its elements. 927/// Here we turn element accesses into insert/extract element operations. 928/// 3) A union of scalar types, such as int/float or int/pointer. Here we 929/// merge together into integers, allowing the xform to work with #1 as 930/// well. 931static bool MergeInType(const Type *In, const Type *&Accum, 932 const TargetData &TD) { 933 // If this is our first type, just use it. 934 const VectorType *PTy; 935 if (Accum == Type::VoidTy || In == Accum) { 936 Accum = In; 937 } else if (In == Type::VoidTy) { 938 // Noop. 939 } else if (In->isInteger() && Accum->isInteger()) { // integer union. 940 // Otherwise pick whichever type is larger. 941 if (cast<IntegerType>(In)->getBitWidth() > 942 cast<IntegerType>(Accum)->getBitWidth()) 943 Accum = In; 944 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) { 945 // Pointer unions just stay as one of the pointers. 946 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) { 947 if ((PTy = dyn_cast<VectorType>(Accum)) && 948 PTy->getElementType() == In) { 949 // Accum is a vector, and we are accessing an element: ok. 950 } else if ((PTy = dyn_cast<VectorType>(In)) && 951 PTy->getElementType() == Accum) { 952 // In is a vector, and accum is an element: ok, remember In. 953 Accum = In; 954 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) && 955 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) { 956 // Two vectors of the same size: keep Accum. 957 } else { 958 // Cannot insert an short into a <4 x int> or handle 959 // <2 x int> -> <4 x int> 960 return true; 961 } 962 } else { 963 // Pointer/FP/Integer unions merge together as integers. 964 switch (Accum->getTypeID()) { 965 case Type::PointerTyID: Accum = TD.getIntPtrType(); break; 966 case Type::FloatTyID: Accum = Type::Int32Ty; break; 967 case Type::DoubleTyID: Accum = Type::Int64Ty; break; 968 case Type::X86_FP80TyID: return true; 969 case Type::FP128TyID: return true; 970 case Type::PPC_FP128TyID: return true; 971 default: 972 assert(Accum->isInteger() && "Unknown FP type!"); 973 break; 974 } 975 976 switch (In->getTypeID()) { 977 case Type::PointerTyID: In = TD.getIntPtrType(); break; 978 case Type::FloatTyID: In = Type::Int32Ty; break; 979 case Type::DoubleTyID: In = Type::Int64Ty; break; 980 case Type::X86_FP80TyID: return true; 981 case Type::FP128TyID: return true; 982 case Type::PPC_FP128TyID: return true; 983 default: 984 assert(In->isInteger() && "Unknown FP type!"); 985 break; 986 } 987 return MergeInType(In, Accum, TD); 988 } 989 return false; 990} 991 992/// getUIntAtLeastAsBigAs - Return an unsigned integer type that is at least 993/// as big as the specified type. If there is no suitable type, this returns 994/// null. 995const Type *getUIntAtLeastAsBigAs(unsigned NumBits) { 996 if (NumBits > 64) return 0; 997 if (NumBits > 32) return Type::Int64Ty; 998 if (NumBits > 16) return Type::Int32Ty; 999 if (NumBits > 8) return Type::Int16Ty; 1000 return Type::Int8Ty; 1001} 1002 1003/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 1004/// single scalar integer type, return that type. Further, if the use is not 1005/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 1006/// there are no uses of this pointer, return Type::VoidTy to differentiate from 1007/// failure. 1008/// 1009const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 1010 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 1011 const TargetData &TD = getAnalysis<TargetData>(); 1012 const PointerType *PTy = cast<PointerType>(V->getType()); 1013 1014 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1015 Instruction *User = cast<Instruction>(*UI); 1016 1017 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1018 // FIXME: Loads of a first class aggregrate value could be converted to a 1019 // series of loads and insertvalues 1020 if (!LI->getType()->isSingleValueType()) 1021 return 0; 1022 1023 if (MergeInType(LI->getType(), UsedType, TD)) 1024 return 0; 1025 1026 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1027 // Storing the pointer, not into the value? 1028 if (SI->getOperand(0) == V) return 0; 1029 1030 // FIXME: Stores of a first class aggregrate value could be converted to a 1031 // series of extractvalues and stores 1032 if (!SI->getOperand(0)->getType()->isSingleValueType()) 1033 return 0; 1034 1035 // NOTE: We could handle storing of FP imms into integers here! 1036 1037 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD)) 1038 return 0; 1039 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1040 IsNotTrivial = true; 1041 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 1042 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0; 1043 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1044 // Check to see if this is stepping over an element: GEP Ptr, int C 1045 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 1046 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 1047 unsigned ElSize = TD.getABITypeSize(PTy->getElementType()); 1048 unsigned BitOffset = Idx*ElSize*8; 1049 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 1050 1051 IsNotTrivial = true; 1052 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 1053 if (SubElt == 0) return 0; 1054 if (SubElt != Type::VoidTy && SubElt->isInteger()) { 1055 const Type *NewTy = 1056 getUIntAtLeastAsBigAs(TD.getABITypeSizeInBits(SubElt)+BitOffset); 1057 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0; 1058 continue; 1059 } 1060 } else if (GEP->getNumOperands() == 3 && 1061 isa<ConstantInt>(GEP->getOperand(1)) && 1062 isa<ConstantInt>(GEP->getOperand(2)) && 1063 cast<ConstantInt>(GEP->getOperand(1))->isZero()) { 1064 // We are stepping into an element, e.g. a structure or an array: 1065 // GEP Ptr, int 0, uint C 1066 const Type *AggTy = PTy->getElementType(); 1067 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1068 1069 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 1070 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 1071 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) { 1072 // Getting an element of the vector. 1073 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range. 1074 1075 // Merge in the vector type. 1076 if (MergeInType(VectorTy, UsedType, TD)) return 0; 1077 1078 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 1079 if (SubTy == 0) return 0; 1080 1081 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 1082 return 0; 1083 1084 // We'll need to change this to an insert/extract element operation. 1085 IsNotTrivial = true; 1086 continue; // Everything looks ok 1087 1088 } else if (isa<StructType>(AggTy)) { 1089 // Structs are always ok. 1090 } else { 1091 return 0; 1092 } 1093 const Type *NTy = getUIntAtLeastAsBigAs(TD.getABITypeSizeInBits(AggTy)); 1094 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0; 1095 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 1096 if (SubTy == 0) return 0; 1097 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 1098 return 0; 1099 continue; // Everything looks ok 1100 } 1101 return 0; 1102 } else { 1103 // Cannot handle this! 1104 return 0; 1105 } 1106 } 1107 1108 return UsedType; 1109} 1110 1111/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 1112/// predicate and is non-trivial. Convert it to something that can be trivially 1113/// promoted into a register by mem2reg. 1114void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 1115 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = " 1116 << *ActualTy << "\n"; 1117 ++NumConverted; 1118 1119 BasicBlock *EntryBlock = AI->getParent(); 1120 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() && 1121 "Not in the entry block!"); 1122 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 1123 1124 // Create and insert the alloca. 1125 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(), 1126 EntryBlock->begin()); 1127 ConvertUsesToScalar(AI, NewAI, 0); 1128 delete AI; 1129} 1130 1131 1132/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1133/// directly. This happens when we are converting an "integer union" to a 1134/// single integer scalar, or when we are converting a "vector union" to a 1135/// vector with insert/extractelement instructions. 1136/// 1137/// Offset is an offset from the original alloca, in bits that need to be 1138/// shifted to the right. By the end of this, there should be no uses of Ptr. 1139void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 1140 while (!Ptr->use_empty()) { 1141 Instruction *User = cast<Instruction>(Ptr->use_back()); 1142 1143 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1144 Value *NV = ConvertUsesOfLoadToScalar(LI, NewAI, Offset); 1145 LI->replaceAllUsesWith(NV); 1146 LI->eraseFromParent(); 1147 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1148 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1149 1150 Value *SV = ConvertUsesOfStoreToScalar(SI, NewAI, Offset); 1151 new StoreInst(SV, NewAI, SI); 1152 SI->eraseFromParent(); 1153 1154 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1155 ConvertUsesToScalar(CI, NewAI, Offset); 1156 CI->eraseFromParent(); 1157 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1158 const PointerType *AggPtrTy = 1159 cast<PointerType>(GEP->getOperand(0)->getType()); 1160 const TargetData &TD = getAnalysis<TargetData>(); 1161 unsigned AggSizeInBits = 1162 TD.getABITypeSizeInBits(AggPtrTy->getElementType()); 1163 1164 // Check to see if this is stepping over an element: GEP Ptr, int C 1165 unsigned NewOffset = Offset; 1166 if (GEP->getNumOperands() == 2) { 1167 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 1168 unsigned BitOffset = Idx*AggSizeInBits; 1169 1170 NewOffset += BitOffset; 1171 } else if (GEP->getNumOperands() == 3) { 1172 // We know that operand #2 is zero. 1173 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1174 const Type *AggTy = AggPtrTy->getElementType(); 1175 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 1176 unsigned ElSizeBits = 1177 TD.getABITypeSizeInBits(SeqTy->getElementType()); 1178 1179 NewOffset += ElSizeBits*Idx; 1180 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 1181 unsigned EltBitOffset = 1182 TD.getStructLayout(STy)->getElementOffsetInBits(Idx); 1183 1184 NewOffset += EltBitOffset; 1185 } else { 1186 assert(0 && "Unsupported operation!"); 1187 abort(); 1188 } 1189 } else { 1190 assert(0 && "Unsupported operation!"); 1191 abort(); 1192 } 1193 ConvertUsesToScalar(GEP, NewAI, NewOffset); 1194 GEP->eraseFromParent(); 1195 } else { 1196 assert(0 && "Unsupported operation!"); 1197 abort(); 1198 } 1199 } 1200} 1201 1202/// ConvertUsesOfLoadToScalar - Convert all of the users the specified load to 1203/// use the new alloca directly, returning the value that should replace the 1204/// load. This happens when we are converting an "integer union" to a 1205/// single integer scalar, or when we are converting a "vector union" to a 1206/// vector with insert/extractelement instructions. 1207/// 1208/// Offset is an offset from the original alloca, in bits that need to be 1209/// shifted to the right. By the end of this, there should be no uses of Ptr. 1210Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, 1211 unsigned Offset) { 1212 // The load is a bit extract from NewAI shifted right by Offset bits. 1213 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 1214 1215 if (NV->getType() == LI->getType() && Offset == 0) { 1216 // We win, no conversion needed. 1217 return NV; 1218 } 1219 1220 // If the result type of the 'union' is a pointer, then this must be ptr->ptr 1221 // cast. Anything else would result in NV being an integer. 1222 if (isa<PointerType>(NV->getType())) { 1223 assert(isa<PointerType>(LI->getType())); 1224 return new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1225 } 1226 1227 if (const VectorType *VTy = dyn_cast<VectorType>(NV->getType())) { 1228 // If the result alloca is a vector type, this is either an element 1229 // access or a bitcast to another vector type. 1230 if (isa<VectorType>(LI->getType())) 1231 return new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1232 1233 // Otherwise it must be an element access. 1234 const TargetData &TD = getAnalysis<TargetData>(); 1235 unsigned Elt = 0; 1236 if (Offset) { 1237 unsigned EltSize = TD.getABITypeSizeInBits(VTy->getElementType()); 1238 Elt = Offset/EltSize; 1239 Offset -= EltSize*Elt; 1240 } 1241 NV = new ExtractElementInst(NV, ConstantInt::get(Type::Int32Ty, Elt), 1242 "tmp", LI); 1243 1244 // If we're done, return this element. 1245 if (NV->getType() == LI->getType() && Offset == 0) 1246 return NV; 1247 } 1248 1249 const IntegerType *NTy = cast<IntegerType>(NV->getType()); 1250 1251 // If this is a big-endian system and the load is narrower than the 1252 // full alloca type, we need to do a shift to get the right bits. 1253 int ShAmt = 0; 1254 const TargetData &TD = getAnalysis<TargetData>(); 1255 if (TD.isBigEndian()) { 1256 // On big-endian machines, the lowest bit is stored at the bit offset 1257 // from the pointer given by getTypeStoreSizeInBits. This matters for 1258 // integers with a bitwidth that is not a multiple of 8. 1259 ShAmt = TD.getTypeStoreSizeInBits(NTy) - 1260 TD.getTypeStoreSizeInBits(LI->getType()) - Offset; 1261 } else { 1262 ShAmt = Offset; 1263 } 1264 1265 // Note: we support negative bitwidths (with shl) which are not defined. 1266 // We do this to support (f.e.) loads off the end of a structure where 1267 // only some bits are used. 1268 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1269 NV = BinaryOperator::CreateLShr(NV, 1270 ConstantInt::get(NV->getType(),ShAmt), 1271 LI->getName(), LI); 1272 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1273 NV = BinaryOperator::CreateShl(NV, 1274 ConstantInt::get(NV->getType(),-ShAmt), 1275 LI->getName(), LI); 1276 1277 // Finally, unconditionally truncate the integer to the right width. 1278 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType()); 1279 if (LIBitWidth < NTy->getBitWidth()) 1280 NV = new TruncInst(NV, IntegerType::get(LIBitWidth), 1281 LI->getName(), LI); 1282 1283 // If the result is an integer, this is a trunc or bitcast. 1284 if (isa<IntegerType>(LI->getType())) { 1285 // Should be done. 1286 } else if (LI->getType()->isFloatingPoint()) { 1287 // Just do a bitcast, we know the sizes match up. 1288 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1289 } else { 1290 // Otherwise must be a pointer. 1291 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI); 1292 } 1293 assert(NV->getType() == LI->getType() && "Didn't convert right?"); 1294 return NV; 1295} 1296 1297 1298/// ConvertUsesOfStoreToScalar - Convert the specified store to a load+store 1299/// pair of the new alloca directly, returning the value that should be stored 1300/// to the alloca. This happens when we are converting an "integer union" to a 1301/// single integer scalar, or when we are converting a "vector union" to a 1302/// vector with insert/extractelement instructions. 1303/// 1304/// Offset is an offset from the original alloca, in bits that need to be 1305/// shifted to the right. By the end of this, there should be no uses of Ptr. 1306Value *SROA::ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI, 1307 unsigned Offset) { 1308 1309 // Convert the stored type to the actual type, shift it left to insert 1310 // then 'or' into place. 1311 Value *SV = SI->getOperand(0); 1312 const Type *AllocaType = NewAI->getType()->getElementType(); 1313 if (SV->getType() == AllocaType && Offset == 0) { 1314 // All is well. 1315 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) { 1316 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1317 1318 // If the result alloca is a vector type, this is either an element 1319 // access or a bitcast to another vector type. 1320 if (isa<VectorType>(SV->getType())) { 1321 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1322 } else { 1323 // Must be an element insertion. 1324 const TargetData &TD = getAnalysis<TargetData>(); 1325 unsigned Elt = Offset/TD.getABITypeSizeInBits(PTy->getElementType()); 1326 SV = InsertElementInst::Create(Old, SV, 1327 ConstantInt::get(Type::Int32Ty, Elt), 1328 "tmp", SI); 1329 } 1330 } else if (isa<PointerType>(AllocaType)) { 1331 // If the alloca type is a pointer, then all the elements must be 1332 // pointers. 1333 if (SV->getType() != AllocaType) 1334 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1335 } else { 1336 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1337 1338 // If SV is a float, convert it to the appropriate integer type. 1339 // If it is a pointer, do the same, and also handle ptr->ptr casts 1340 // here. 1341 const TargetData &TD = getAnalysis<TargetData>(); 1342 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType()); 1343 unsigned DestWidth = TD.getTypeSizeInBits(AllocaType); 1344 unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType()); 1345 unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType); 1346 if (SV->getType()->isFloatingPoint()) 1347 SV = new BitCastInst(SV, IntegerType::get(SrcWidth), 1348 SV->getName(), SI); 1349 else if (isa<PointerType>(SV->getType())) 1350 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI); 1351 1352 // Always zero extend the value if needed. 1353 if (SV->getType() != AllocaType) 1354 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI); 1355 1356 // If this is a big-endian system and the store is narrower than the 1357 // full alloca type, we need to do a shift to get the right bits. 1358 int ShAmt = 0; 1359 if (TD.isBigEndian()) { 1360 // On big-endian machines, the lowest bit is stored at the bit offset 1361 // from the pointer given by getTypeStoreSizeInBits. This matters for 1362 // integers with a bitwidth that is not a multiple of 8. 1363 ShAmt = DestStoreWidth - SrcStoreWidth - Offset; 1364 } else { 1365 ShAmt = Offset; 1366 } 1367 1368 // Note: we support negative bitwidths (with shr) which are not defined. 1369 // We do this to support (f.e.) stores off the end of a structure where 1370 // only some bits in the structure are set. 1371 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1372 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1373 SV = BinaryOperator::CreateShl(SV, 1374 ConstantInt::get(SV->getType(), ShAmt), 1375 SV->getName(), SI); 1376 Mask <<= ShAmt; 1377 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1378 SV = BinaryOperator::CreateLShr(SV, 1379 ConstantInt::get(SV->getType(),-ShAmt), 1380 SV->getName(), SI); 1381 Mask = Mask.lshr(ShAmt); 1382 } 1383 1384 // Mask out the bits we are about to insert from the old value, and or 1385 // in the new bits. 1386 if (SrcWidth != DestWidth) { 1387 assert(DestWidth > SrcWidth); 1388 Old = BinaryOperator::CreateAnd(Old, ConstantInt::get(~Mask), 1389 Old->getName()+".mask", SI); 1390 SV = BinaryOperator::CreateOr(Old, SV, SV->getName()+".ins", SI); 1391 } 1392 } 1393 return SV; 1394} 1395 1396 1397 1398/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1399/// some part of a constant global variable. This intentionally only accepts 1400/// constant expressions because we don't can't rewrite arbitrary instructions. 1401static bool PointsToConstantGlobal(Value *V) { 1402 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1403 return GV->isConstant(); 1404 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1405 if (CE->getOpcode() == Instruction::BitCast || 1406 CE->getOpcode() == Instruction::GetElementPtr) 1407 return PointsToConstantGlobal(CE->getOperand(0)); 1408 return false; 1409} 1410 1411/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1412/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1413/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1414/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1415/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1416/// the alloca, and if the source pointer is a pointer to a constant global, we 1417/// can optimize this. 1418static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1419 bool isOffset) { 1420 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1421 if (isa<LoadInst>(*UI)) { 1422 // Ignore loads, they are always ok. 1423 continue; 1424 } 1425 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1426 // If uses of the bitcast are ok, we are ok. 1427 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1428 return false; 1429 continue; 1430 } 1431 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1432 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1433 // doesn't, it does. 1434 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1435 isOffset || !GEP->hasAllZeroIndices())) 1436 return false; 1437 continue; 1438 } 1439 1440 // If this is isn't our memcpy/memmove, reject it as something we can't 1441 // handle. 1442 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1443 return false; 1444 1445 // If we already have seen a copy, reject the second one. 1446 if (TheCopy) return false; 1447 1448 // If the pointer has been offset from the start of the alloca, we can't 1449 // safely handle this. 1450 if (isOffset) return false; 1451 1452 // If the memintrinsic isn't using the alloca as the dest, reject it. 1453 if (UI.getOperandNo() != 1) return false; 1454 1455 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1456 1457 // If the source of the memcpy/move is not a constant global, reject it. 1458 if (!PointsToConstantGlobal(MI->getOperand(2))) 1459 return false; 1460 1461 // Otherwise, the transform is safe. Remember the copy instruction. 1462 TheCopy = MI; 1463 } 1464 return true; 1465} 1466 1467/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1468/// modified by a copy from a constant global. If we can prove this, we can 1469/// replace any uses of the alloca with uses of the global directly. 1470Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1471 Instruction *TheCopy = 0; 1472 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1473 return TheCopy; 1474 return 0; 1475} 1476