ScalarReplAggregates.cpp revision 39a1c04323a5993d6b2993e615ec44c16e19aeea
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scalarrepl" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Instructions.h" 29#include "llvm/IntrinsicInst.h" 30#include "llvm/Pass.h" 31#include "llvm/Analysis/Dominators.h" 32#include "llvm/Target/TargetData.h" 33#include "llvm/Transforms/Utils/PromoteMemToReg.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/GetElementPtrTypeIterator.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/Compiler.h" 38#include "llvm/ADT/SmallVector.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/ADT/StringExtras.h" 41using namespace llvm; 42 43STATISTIC(NumReplaced, "Number of allocas broken up"); 44STATISTIC(NumPromoted, "Number of allocas promoted"); 45STATISTIC(NumConverted, "Number of aggregates converted to scalar"); 46STATISTIC(NumGlobals, "Number of allocas copied from constant global"); 47 48namespace { 49 struct VISIBILITY_HIDDEN SROA : public FunctionPass { 50 static char ID; // Pass identification, replacement for typeid 51 SROA() : FunctionPass((intptr_t)&ID) {} 52 53 bool runOnFunction(Function &F); 54 55 bool performScalarRepl(Function &F); 56 bool performPromotion(Function &F); 57 58 // getAnalysisUsage - This pass does not require any passes, but we know it 59 // will not alter the CFG, so say so. 60 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.addRequired<ETForest>(); 62 AU.addRequired<DominanceFrontier>(); 63 AU.addRequired<TargetData>(); 64 AU.setPreservesCFG(); 65 } 66 67 private: 68 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures 69 /// information about the uses. All these fields are initialized to false 70 /// and set to true when something is learned. 71 struct AllocaInfo { 72 /// isUnsafe - This is set to true if the alloca cannot be SROA'd. 73 bool isUnsafe : 1; 74 75 /// needsCanon - This is set to true if there is some use of the alloca 76 /// that requires canonicalization. 77 bool needsCanon : 1; 78 79 /// isMemCpySrc - This is true if this aggregate is memcpy'd from. 80 bool isMemCpySrc : 1; 81 82 /// isMemCpyDst - This is true if this aggregate is memcpy'd info. 83 bool isMemCpyDst : 1; 84 85 AllocaInfo() 86 : isUnsafe(false), needsCanon(false), 87 isMemCpySrc(false), isMemCpyDst(false) {} 88 }; 89 90 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } 91 92 int isSafeAllocaToScalarRepl(AllocationInst *AI); 93 94 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 95 AllocaInfo &Info); 96 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 97 AllocaInfo &Info); 98 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 99 unsigned OpNo, AllocaInfo &Info); 100 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, 101 AllocaInfo &Info); 102 103 void DoScalarReplacement(AllocationInst *AI, 104 std::vector<AllocationInst*> &WorkList); 105 void CanonicalizeAllocaUsers(AllocationInst *AI); 106 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 107 108 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 109 SmallVector<AllocaInst*, 32> &NewElts); 110 111 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 112 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 113 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 114 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); 115 }; 116 117 char SROA::ID = 0; 118 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 119} 120 121// Public interface to the ScalarReplAggregates pass 122FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); } 123 124 125bool SROA::runOnFunction(Function &F) { 126 bool Changed = performPromotion(F); 127 while (1) { 128 bool LocalChange = performScalarRepl(F); 129 if (!LocalChange) break; // No need to repromote if no scalarrepl 130 Changed = true; 131 LocalChange = performPromotion(F); 132 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 133 } 134 135 return Changed; 136} 137 138 139bool SROA::performPromotion(Function &F) { 140 std::vector<AllocaInst*> Allocas; 141 ETForest &ET = getAnalysis<ETForest>(); 142 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 143 144 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 145 146 bool Changed = false; 147 148 while (1) { 149 Allocas.clear(); 150 151 // Find allocas that are safe to promote, by looking at all instructions in 152 // the entry node 153 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 154 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 155 if (isAllocaPromotable(AI)) 156 Allocas.push_back(AI); 157 158 if (Allocas.empty()) break; 159 160 PromoteMemToReg(Allocas, ET, DF); 161 NumPromoted += Allocas.size(); 162 Changed = true; 163 } 164 165 return Changed; 166} 167 168// performScalarRepl - This algorithm is a simple worklist driven algorithm, 169// which runs on all of the malloc/alloca instructions in the function, removing 170// them if they are only used by getelementptr instructions. 171// 172bool SROA::performScalarRepl(Function &F) { 173 std::vector<AllocationInst*> WorkList; 174 175 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 176 BasicBlock &BB = F.getEntryBlock(); 177 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 178 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 179 WorkList.push_back(A); 180 181 const TargetData &TD = getAnalysis<TargetData>(); 182 183 // Process the worklist 184 bool Changed = false; 185 while (!WorkList.empty()) { 186 AllocationInst *AI = WorkList.back(); 187 WorkList.pop_back(); 188 189 // Handle dead allocas trivially. These can be formed by SROA'ing arrays 190 // with unused elements. 191 if (AI->use_empty()) { 192 AI->eraseFromParent(); 193 continue; 194 } 195 196 // If we can turn this aggregate value (potentially with casts) into a 197 // simple scalar value that can be mem2reg'd into a register value. 198 bool IsNotTrivial = false; 199 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 200 if (IsNotTrivial && ActualType != Type::VoidTy) { 201 ConvertToScalar(AI, ActualType); 202 Changed = true; 203 continue; 204 } 205 206 // Check to see if we can perform the core SROA transformation. We cannot 207 // transform the allocation instruction if it is an array allocation 208 // (allocations OF arrays are ok though), and an allocation of a scalar 209 // value cannot be decomposed at all. 210 if (!AI->isArrayAllocation() && 211 (isa<StructType>(AI->getAllocatedType()) || 212 isa<ArrayType>(AI->getAllocatedType())) && 213 AI->getAllocatedType()->isSized() && 214 TD.getTypeSize(AI->getAllocatedType()) < 128) { 215 // Check that all of the users of the allocation are capable of being 216 // transformed. 217 switch (isSafeAllocaToScalarRepl(AI)) { 218 default: assert(0 && "Unexpected value!"); 219 case 0: // Not safe to scalar replace. 220 break; 221 case 1: // Safe, but requires cleanup/canonicalizations first 222 CanonicalizeAllocaUsers(AI); 223 // FALL THROUGH. 224 case 3: // Safe to scalar replace. 225 DoScalarReplacement(AI, WorkList); 226 Changed = true; 227 continue; 228 } 229 } 230 231 // Check to see if this allocation is only modified by a memcpy/memmove from 232 // a constant global. If this is the case, we can change all users to use 233 // the constant global instead. This is commonly produced by the CFE by 234 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 235 // is only subsequently read. 236 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) { 237 DOUT << "Found alloca equal to global: " << *AI; 238 DOUT << " memcpy = " << *TheCopy; 239 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); 240 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); 241 TheCopy->eraseFromParent(); // Don't mutate the global. 242 AI->eraseFromParent(); 243 ++NumGlobals; 244 Changed = true; 245 continue; 246 } 247 248 // Otherwise, couldn't process this. 249 } 250 251 return Changed; 252} 253 254/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl 255/// predicate, do SROA now. 256void SROA::DoScalarReplacement(AllocationInst *AI, 257 std::vector<AllocationInst*> &WorkList) { 258 DOUT << "Found inst to SROA: " << *AI; 259 SmallVector<AllocaInst*, 32> ElementAllocas; 260 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 261 ElementAllocas.reserve(ST->getNumContainedTypes()); 262 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 263 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 264 AI->getAlignment(), 265 AI->getName() + "." + utostr(i), AI); 266 ElementAllocas.push_back(NA); 267 WorkList.push_back(NA); // Add to worklist for recursive processing 268 } 269 } else { 270 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 271 ElementAllocas.reserve(AT->getNumElements()); 272 const Type *ElTy = AT->getElementType(); 273 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 274 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 275 AI->getName() + "." + utostr(i), AI); 276 ElementAllocas.push_back(NA); 277 WorkList.push_back(NA); // Add to worklist for recursive processing 278 } 279 } 280 281 // Now that we have created the alloca instructions that we want to use, 282 // expand the getelementptr instructions to use them. 283 // 284 while (!AI->use_empty()) { 285 Instruction *User = cast<Instruction>(AI->use_back()); 286 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { 287 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); 288 BCInst->eraseFromParent(); 289 continue; 290 } 291 292 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 293 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 294 unsigned Idx = 295 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 296 297 assert(Idx < ElementAllocas.size() && "Index out of range?"); 298 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 299 300 Value *RepValue; 301 if (GEPI->getNumOperands() == 3) { 302 // Do not insert a new getelementptr instruction with zero indices, only 303 // to have it optimized out later. 304 RepValue = AllocaToUse; 305 } else { 306 // We are indexing deeply into the structure, so we still need a 307 // getelement ptr instruction to finish the indexing. This may be 308 // expanded itself once the worklist is rerun. 309 // 310 SmallVector<Value*, 8> NewArgs; 311 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty)); 312 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); 313 RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0], 314 NewArgs.size(), "", GEPI); 315 RepValue->takeName(GEPI); 316 } 317 318 // If this GEP is to the start of the aggregate, check for memcpys. 319 if (Idx == 0) { 320 bool IsStartOfAggregateGEP = true; 321 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) { 322 if (!isa<ConstantInt>(GEPI->getOperand(i))) { 323 IsStartOfAggregateGEP = false; 324 break; 325 } 326 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) { 327 IsStartOfAggregateGEP = false; 328 break; 329 } 330 } 331 332 if (IsStartOfAggregateGEP) 333 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); 334 } 335 336 337 // Move all of the users over to the new GEP. 338 GEPI->replaceAllUsesWith(RepValue); 339 // Delete the old GEP 340 GEPI->eraseFromParent(); 341 } 342 343 // Finally, delete the Alloca instruction 344 AI->eraseFromParent(); 345 NumReplaced++; 346} 347 348 349/// isSafeElementUse - Check to see if this use is an allowed use for a 350/// getelementptr instruction of an array aggregate allocation. isFirstElt 351/// indicates whether Ptr is known to the start of the aggregate. 352/// 353void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, 354 AllocaInfo &Info) { 355 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 356 I != E; ++I) { 357 Instruction *User = cast<Instruction>(*I); 358 switch (User->getOpcode()) { 359 case Instruction::Load: break; 360 case Instruction::Store: 361 // Store is ok if storing INTO the pointer, not storing the pointer 362 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); 363 break; 364 case Instruction::GetElementPtr: { 365 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 366 bool AreAllZeroIndices = isFirstElt; 367 if (GEP->getNumOperands() > 1) { 368 if (!isa<ConstantInt>(GEP->getOperand(1)) || 369 !cast<ConstantInt>(GEP->getOperand(1))->isZero()) 370 // Using pointer arithmetic to navigate the array. 371 return MarkUnsafe(Info); 372 373 if (AreAllZeroIndices) { 374 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) { 375 if (!isa<ConstantInt>(GEP->getOperand(i)) || 376 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) { 377 AreAllZeroIndices = false; 378 break; 379 } 380 } 381 } 382 } 383 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); 384 if (Info.isUnsafe) return; 385 break; 386 } 387 case Instruction::BitCast: 388 if (isFirstElt) { 389 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); 390 if (Info.isUnsafe) return; 391 break; 392 } 393 DOUT << " Transformation preventing inst: " << *User; 394 return MarkUnsafe(Info); 395 case Instruction::Call: 396 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { 397 if (isFirstElt) { 398 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); 399 if (Info.isUnsafe) return; 400 break; 401 } 402 } 403 DOUT << " Transformation preventing inst: " << *User; 404 return MarkUnsafe(Info); 405 default: 406 DOUT << " Transformation preventing inst: " << *User; 407 return MarkUnsafe(Info); 408 } 409 } 410 return; // All users look ok :) 411} 412 413/// AllUsersAreLoads - Return true if all users of this value are loads. 414static bool AllUsersAreLoads(Value *Ptr) { 415 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 416 I != E; ++I) 417 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 418 return false; 419 return true; 420} 421 422/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 423/// aggregate allocation. 424/// 425void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, 426 AllocaInfo &Info) { 427 if (BitCastInst *C = dyn_cast<BitCastInst>(User)) 428 return isSafeUseOfBitCastedAllocation(C, AI, Info); 429 430 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); 431 if (GEPI == 0) 432 return MarkUnsafe(Info); 433 434 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 435 436 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". 437 if (I == E || 438 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { 439 return MarkUnsafe(Info); 440 } 441 442 ++I; 443 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? 444 445 bool IsAllZeroIndices = true; 446 447 // If this is a use of an array allocation, do a bit more checking for sanity. 448 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 449 uint64_t NumElements = AT->getNumElements(); 450 451 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) { 452 IsAllZeroIndices &= Idx->isZero(); 453 454 // Check to make sure that index falls within the array. If not, 455 // something funny is going on, so we won't do the optimization. 456 // 457 if (Idx->getZExtValue() >= NumElements) 458 return MarkUnsafe(Info); 459 460 // We cannot scalar repl this level of the array unless any array 461 // sub-indices are in-range constants. In particular, consider: 462 // A[0][i]. We cannot know that the user isn't doing invalid things like 463 // allowing i to index an out-of-range subscript that accesses A[1]. 464 // 465 // Scalar replacing *just* the outer index of the array is probably not 466 // going to be a win anyway, so just give up. 467 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) { 468 uint64_t NumElements; 469 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I)) 470 NumElements = SubArrayTy->getNumElements(); 471 else 472 NumElements = cast<VectorType>(*I)->getNumElements(); 473 474 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); 475 if (!IdxVal) return MarkUnsafe(Info); 476 if (IdxVal->getZExtValue() >= NumElements) 477 return MarkUnsafe(Info); 478 IsAllZeroIndices &= IdxVal->isZero(); 479 } 480 481 } else { 482 IsAllZeroIndices = 0; 483 484 // If this is an array index and the index is not constant, we cannot 485 // promote... that is unless the array has exactly one or two elements in 486 // it, in which case we CAN promote it, but we have to canonicalize this 487 // out if this is the only problem. 488 if ((NumElements == 1 || NumElements == 2) && 489 AllUsersAreLoads(GEPI)) { 490 Info.needsCanon = true; 491 return; // Canonicalization required! 492 } 493 return MarkUnsafe(Info); 494 } 495 } 496 497 // If there are any non-simple uses of this getelementptr, make sure to reject 498 // them. 499 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); 500} 501 502/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory 503/// intrinsic can be promoted by SROA. At this point, we know that the operand 504/// of the memintrinsic is a pointer to the beginning of the allocation. 505void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, 506 unsigned OpNo, AllocaInfo &Info) { 507 // If not constant length, give up. 508 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 509 if (!Length) return MarkUnsafe(Info); 510 511 // If not the whole aggregate, give up. 512 const TargetData &TD = getAnalysis<TargetData>(); 513 if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType())) 514 return MarkUnsafe(Info); 515 516 // We only know about memcpy/memset/memmove. 517 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI)) 518 return MarkUnsafe(Info); 519 520 // Otherwise, we can transform it. Determine whether this is a memcpy/set 521 // into or out of the aggregate. 522 if (OpNo == 1) 523 Info.isMemCpyDst = true; 524 else { 525 assert(OpNo == 2); 526 Info.isMemCpySrc = true; 527 } 528} 529 530/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast 531/// are 532void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, 533 AllocaInfo &Info) { 534 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); 535 UI != E; ++UI) { 536 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { 537 isSafeUseOfBitCastedAllocation(BCU, AI, Info); 538 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { 539 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); 540 } else { 541 return MarkUnsafe(Info); 542 } 543 if (Info.isUnsafe) return; 544 } 545} 546 547/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes 548/// to its first element. Transform users of the cast to use the new values 549/// instead. 550void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, 551 SmallVector<AllocaInst*, 32> &NewElts) { 552 Constant *Zero = Constant::getNullValue(Type::Int32Ty); 553 const TargetData &TD = getAnalysis<TargetData>(); 554 555 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); 556 while (UI != UE) { 557 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) { 558 RewriteBitCastUserOfAlloca(BCU, AI, NewElts); 559 ++UI; 560 BCU->eraseFromParent(); 561 continue; 562 } 563 564 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split 565 // into one per element. 566 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI); 567 568 // If it's not a mem intrinsic, it must be some other user of a gep of the 569 // first pointer. Just leave these alone. 570 if (!MI) { 571 ++UI; 572 continue; 573 } 574 575 // If this is a memcpy/memmove, construct the other pointer as the 576 // appropriate type. 577 Value *OtherPtr = 0; 578 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) { 579 if (BCInst == MCI->getRawDest()) 580 OtherPtr = MCI->getRawSource(); 581 else { 582 assert(BCInst == MCI->getRawSource()); 583 OtherPtr = MCI->getRawDest(); 584 } 585 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) { 586 if (BCInst == MMI->getRawDest()) 587 OtherPtr = MMI->getRawSource(); 588 else { 589 assert(BCInst == MMI->getRawSource()); 590 OtherPtr = MMI->getRawDest(); 591 } 592 } 593 594 // If there is an other pointer, we want to convert it to the same pointer 595 // type as AI has, so we can GEP through it. 596 if (OtherPtr) { 597 // It is likely that OtherPtr is a bitcast, if so, remove it. 598 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) 599 OtherPtr = BC->getOperand(0); 600 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) 601 if (BCE->getOpcode() == Instruction::BitCast) 602 OtherPtr = BCE->getOperand(0); 603 604 // If the pointer is not the right type, insert a bitcast to the right 605 // type. 606 if (OtherPtr->getType() != AI->getType()) 607 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(), 608 MI); 609 } 610 611 // Process each element of the aggregate. 612 Value *TheFn = MI->getOperand(0); 613 const Type *BytePtrTy = MI->getRawDest()->getType(); 614 bool SROADest = MI->getRawDest() == BCInst; 615 616 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { 617 // If this is a memcpy/memmove, emit a GEP of the other element address. 618 Value *OtherElt = 0; 619 if (OtherPtr) { 620 OtherElt = new GetElementPtrInst(OtherPtr, Zero, 621 ConstantInt::get(Type::Int32Ty, i), 622 OtherPtr->getNameStr()+"."+utostr(i), 623 MI); 624 } 625 626 Value *EltPtr = NewElts[i]; 627 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType(); 628 629 // If we got down to a scalar, insert a load or store as appropriate. 630 if (EltTy->isFirstClassType()) { 631 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 632 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp", 633 MI); 634 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI); 635 continue; 636 } else { 637 assert(isa<MemSetInst>(MI)); 638 639 // If the stored element is zero (common case), just store a null 640 // constant. 641 Constant *StoreVal; 642 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { 643 if (CI->isZero()) { 644 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> 645 } else { 646 // If EltTy is a packed type, get the element type. 647 const Type *ValTy = EltTy; 648 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy)) 649 ValTy = VTy->getElementType(); 650 651 // Construct an integer with the right value. 652 unsigned EltSize = TD.getTypeSize(ValTy); 653 APInt OneVal(EltSize*8, CI->getZExtValue()); 654 APInt TotalVal(OneVal); 655 // Set each byte. 656 for (unsigned i = 0; i != EltSize-1; ++i) { 657 TotalVal = TotalVal.shl(8); 658 TotalVal |= OneVal; 659 } 660 661 // Convert the integer value to the appropriate type. 662 StoreVal = ConstantInt::get(TotalVal); 663 if (isa<PointerType>(ValTy)) 664 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); 665 else if (ValTy->isFloatingPoint()) 666 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); 667 assert(StoreVal->getType() == ValTy && "Type mismatch!"); 668 669 // If the requested value was a vector constant, create it. 670 if (EltTy != ValTy) { 671 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); 672 SmallVector<Constant*, 16> Elts(NumElts, StoreVal); 673 StoreVal = ConstantVector::get(&Elts[0], NumElts); 674 } 675 } 676 new StoreInst(StoreVal, EltPtr, MI); 677 continue; 678 } 679 // Otherwise, if we're storing a byte variable, use a memset call for 680 // this element. 681 } 682 } 683 684 // Cast the element pointer to BytePtrTy. 685 if (EltPtr->getType() != BytePtrTy) 686 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI); 687 688 // Cast the other pointer (if we have one) to BytePtrTy. 689 if (OtherElt && OtherElt->getType() != BytePtrTy) 690 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), 691 MI); 692 693 unsigned EltSize = TD.getTypeSize(EltTy); 694 695 // Finally, insert the meminst for this element. 696 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { 697 Value *Ops[] = { 698 SROADest ? EltPtr : OtherElt, // Dest ptr 699 SROADest ? OtherElt : EltPtr, // Src ptr 700 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 701 Zero // Align 702 }; 703 new CallInst(TheFn, Ops, 4, "", MI); 704 } else { 705 assert(isa<MemSetInst>(MI)); 706 Value *Ops[] = { 707 EltPtr, MI->getOperand(2), // Dest, Value, 708 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size 709 Zero // Align 710 }; 711 new CallInst(TheFn, Ops, 4, "", MI); 712 } 713 } 714 715 // Finally, MI is now dead, as we've modified its actions to occur on all of 716 // the elements of the aggregate. 717 ++UI; 718 MI->eraseFromParent(); 719 } 720} 721 722/// HasStructPadding - Return true if the specified type has any structure 723/// padding, false otherwise. 724static bool HasStructPadding(const Type *Ty, const TargetData &TD) { 725 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 726 const StructLayout *SL = TD.getStructLayout(STy); 727 unsigned PrevFieldBitOffset = 0; 728 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 729 unsigned FieldBitOffset = SL->getElementOffset(i)*8; 730 731 // Padding in sub-elements? 732 if (HasStructPadding(STy->getElementType(i), TD)) 733 return true; 734 735 // Check to see if there is any padding between this element and the 736 // previous one. 737 if (i) { 738 unsigned PrevFieldEnd = 739 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); 740 if (PrevFieldEnd < FieldBitOffset) 741 return true; 742 } 743 744 PrevFieldBitOffset = FieldBitOffset; 745 } 746 747 // Check for tail padding. 748 if (unsigned EltCount = STy->getNumElements()) { 749 unsigned PrevFieldEnd = PrevFieldBitOffset + 750 TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); 751 if (PrevFieldEnd < SL->getSizeInBytes()*8) 752 return true; 753 } 754 755 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 756 return HasStructPadding(ATy->getElementType(), TD); 757 } 758 return false; 759} 760 761/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 762/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 763/// or 1 if safe after canonicalization has been performed. 764/// 765int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 766 // Loop over the use list of the alloca. We can only transform it if all of 767 // the users are safe to transform. 768 AllocaInfo Info; 769 770 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 771 I != E; ++I) { 772 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); 773 if (Info.isUnsafe) { 774 DOUT << "Cannot transform: " << *AI << " due to user: " << **I; 775 return 0; 776 } 777 } 778 779 // Okay, we know all the users are promotable. If the aggregate is a memcpy 780 // source and destination, we have to be careful. In particular, the memcpy 781 // could be moving around elements that live in structure padding of the LLVM 782 // types, but may actually be used. In these cases, we refuse to promote the 783 // struct. 784 if (Info.isMemCpySrc && Info.isMemCpyDst && 785 HasStructPadding(AI->getType()->getElementType(), 786 getAnalysis<TargetData>())) 787 return 0; 788 789 // If we require cleanup, return 1, otherwise return 3. 790 return Info.needsCanon ? 1 : 3; 791} 792 793/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 794/// allocation, but only if cleaned up, perform the cleanups required. 795void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 796 // At this point, we know that the end result will be SROA'd and promoted, so 797 // we can insert ugly code if required so long as sroa+mem2reg will clean it 798 // up. 799 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 800 UI != E; ) { 801 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++); 802 if (!GEPI) continue; 803 gep_type_iterator I = gep_type_begin(GEPI); 804 ++I; 805 806 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 807 uint64_t NumElements = AT->getNumElements(); 808 809 if (!isa<ConstantInt>(I.getOperand())) { 810 if (NumElements == 1) { 811 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty)); 812 } else { 813 assert(NumElements == 2 && "Unhandled case!"); 814 // All users of the GEP must be loads. At each use of the GEP, insert 815 // two loads of the appropriate indexed GEP and select between them. 816 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(), 817 Constant::getNullValue(I.getOperand()->getType()), 818 "isone", GEPI); 819 // Insert the new GEP instructions, which are properly indexed. 820 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); 821 Indices[1] = Constant::getNullValue(Type::Int32Ty); 822 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), 823 &Indices[0], Indices.size(), 824 GEPI->getName()+".0", GEPI); 825 Indices[1] = ConstantInt::get(Type::Int32Ty, 1); 826 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), 827 &Indices[0], Indices.size(), 828 GEPI->getName()+".1", GEPI); 829 // Replace all loads of the variable index GEP with loads from both 830 // indexes and a select. 831 while (!GEPI->use_empty()) { 832 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 833 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 834 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 835 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI); 836 LI->replaceAllUsesWith(R); 837 LI->eraseFromParent(); 838 } 839 GEPI->eraseFromParent(); 840 } 841 } 842 } 843 } 844} 845 846/// MergeInType - Add the 'In' type to the accumulated type so far. If the 847/// types are incompatible, return true, otherwise update Accum and return 848/// false. 849/// 850/// There are three cases we handle here: 851/// 1) An effectively-integer union, where the pieces are stored into as 852/// smaller integers (common with byte swap and other idioms). 853/// 2) A union of vector types of the same size and potentially its elements. 854/// Here we turn element accesses into insert/extract element operations. 855/// 3) A union of scalar types, such as int/float or int/pointer. Here we 856/// merge together into integers, allowing the xform to work with #1 as 857/// well. 858static bool MergeInType(const Type *In, const Type *&Accum, 859 const TargetData &TD) { 860 // If this is our first type, just use it. 861 const VectorType *PTy; 862 if (Accum == Type::VoidTy || In == Accum) { 863 Accum = In; 864 } else if (In == Type::VoidTy) { 865 // Noop. 866 } else if (In->isInteger() && Accum->isInteger()) { // integer union. 867 // Otherwise pick whichever type is larger. 868 if (cast<IntegerType>(In)->getBitWidth() > 869 cast<IntegerType>(Accum)->getBitWidth()) 870 Accum = In; 871 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) { 872 // Pointer unions just stay as one of the pointers. 873 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) { 874 if ((PTy = dyn_cast<VectorType>(Accum)) && 875 PTy->getElementType() == In) { 876 // Accum is a vector, and we are accessing an element: ok. 877 } else if ((PTy = dyn_cast<VectorType>(In)) && 878 PTy->getElementType() == Accum) { 879 // In is a vector, and accum is an element: ok, remember In. 880 Accum = In; 881 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) && 882 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) { 883 // Two vectors of the same size: keep Accum. 884 } else { 885 // Cannot insert an short into a <4 x int> or handle 886 // <2 x int> -> <4 x int> 887 return true; 888 } 889 } else { 890 // Pointer/FP/Integer unions merge together as integers. 891 switch (Accum->getTypeID()) { 892 case Type::PointerTyID: Accum = TD.getIntPtrType(); break; 893 case Type::FloatTyID: Accum = Type::Int32Ty; break; 894 case Type::DoubleTyID: Accum = Type::Int64Ty; break; 895 default: 896 assert(Accum->isInteger() && "Unknown FP type!"); 897 break; 898 } 899 900 switch (In->getTypeID()) { 901 case Type::PointerTyID: In = TD.getIntPtrType(); break; 902 case Type::FloatTyID: In = Type::Int32Ty; break; 903 case Type::DoubleTyID: In = Type::Int64Ty; break; 904 default: 905 assert(In->isInteger() && "Unknown FP type!"); 906 break; 907 } 908 return MergeInType(In, Accum, TD); 909 } 910 return false; 911} 912 913/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least 914/// as big as the specified type. If there is no suitable type, this returns 915/// null. 916const Type *getUIntAtLeastAsBitAs(unsigned NumBits) { 917 if (NumBits > 64) return 0; 918 if (NumBits > 32) return Type::Int64Ty; 919 if (NumBits > 16) return Type::Int32Ty; 920 if (NumBits > 8) return Type::Int16Ty; 921 return Type::Int8Ty; 922} 923 924/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 925/// single scalar integer type, return that type. Further, if the use is not 926/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 927/// there are no uses of this pointer, return Type::VoidTy to differentiate from 928/// failure. 929/// 930const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 931 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 932 const TargetData &TD = getAnalysis<TargetData>(); 933 const PointerType *PTy = cast<PointerType>(V->getType()); 934 935 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 936 Instruction *User = cast<Instruction>(*UI); 937 938 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 939 if (MergeInType(LI->getType(), UsedType, TD)) 940 return 0; 941 942 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 943 // Storing the pointer, not into the value? 944 if (SI->getOperand(0) == V) return 0; 945 946 // NOTE: We could handle storing of FP imms into integers here! 947 948 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD)) 949 return 0; 950 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 951 IsNotTrivial = true; 952 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 953 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0; 954 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 955 // Check to see if this is stepping over an element: GEP Ptr, int C 956 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 957 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 958 unsigned ElSize = TD.getTypeSize(PTy->getElementType()); 959 unsigned BitOffset = Idx*ElSize*8; 960 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 961 962 IsNotTrivial = true; 963 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 964 if (SubElt == 0) return 0; 965 if (SubElt != Type::VoidTy && SubElt->isInteger()) { 966 const Type *NewTy = 967 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset); 968 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0; 969 continue; 970 } 971 } else if (GEP->getNumOperands() == 3 && 972 isa<ConstantInt>(GEP->getOperand(1)) && 973 isa<ConstantInt>(GEP->getOperand(2)) && 974 cast<ConstantInt>(GEP->getOperand(1))->isZero()) { 975 // We are stepping into an element, e.g. a structure or an array: 976 // GEP Ptr, int 0, uint C 977 const Type *AggTy = PTy->getElementType(); 978 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 979 980 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 981 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 982 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) { 983 // Getting an element of the packed vector. 984 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range. 985 986 // Merge in the vector type. 987 if (MergeInType(VectorTy, UsedType, TD)) return 0; 988 989 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 990 if (SubTy == 0) return 0; 991 992 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 993 return 0; 994 995 // We'll need to change this to an insert/extract element operation. 996 IsNotTrivial = true; 997 continue; // Everything looks ok 998 999 } else if (isa<StructType>(AggTy)) { 1000 // Structs are always ok. 1001 } else { 1002 return 0; 1003 } 1004 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8); 1005 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0; 1006 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 1007 if (SubTy == 0) return 0; 1008 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD)) 1009 return 0; 1010 continue; // Everything looks ok 1011 } 1012 return 0; 1013 } else { 1014 // Cannot handle this! 1015 return 0; 1016 } 1017 } 1018 1019 return UsedType; 1020} 1021 1022/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 1023/// predicate and is non-trivial. Convert it to something that can be trivially 1024/// promoted into a register by mem2reg. 1025void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 1026 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = " 1027 << *ActualTy << "\n"; 1028 ++NumConverted; 1029 1030 BasicBlock *EntryBlock = AI->getParent(); 1031 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() && 1032 "Not in the entry block!"); 1033 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 1034 1035 // Create and insert the alloca. 1036 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(), 1037 EntryBlock->begin()); 1038 ConvertUsesToScalar(AI, NewAI, 0); 1039 delete AI; 1040} 1041 1042 1043/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 1044/// directly. This happens when we are converting an "integer union" to a 1045/// single integer scalar, or when we are converting a "vector union" to a 1046/// vector with insert/extractelement instructions. 1047/// 1048/// Offset is an offset from the original alloca, in bits that need to be 1049/// shifted to the right. By the end of this, there should be no uses of Ptr. 1050void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 1051 const TargetData &TD = getAnalysis<TargetData>(); 1052 while (!Ptr->use_empty()) { 1053 Instruction *User = cast<Instruction>(Ptr->use_back()); 1054 1055 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1056 // The load is a bit extract from NewAI shifted right by Offset bits. 1057 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 1058 if (NV->getType() == LI->getType()) { 1059 // We win, no conversion needed. 1060 } else if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) { 1061 // If the result alloca is a vector type, this is either an element 1062 // access or a bitcast to another vector type. 1063 if (isa<VectorType>(LI->getType())) { 1064 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1065 } else { 1066 // Must be an element access. 1067 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 1068 NV = new ExtractElementInst( 1069 NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI); 1070 } 1071 } else if (isa<PointerType>(NV->getType())) { 1072 assert(isa<PointerType>(LI->getType())); 1073 // Must be ptr->ptr cast. Anything else would result in NV being 1074 // an integer. 1075 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1076 } else { 1077 const IntegerType *NTy = cast<IntegerType>(NV->getType()); 1078 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType()); 1079 1080 // If this is a big-endian system and the load is narrower than the 1081 // full alloca type, we need to do a shift to get the right bits. 1082 int ShAmt = 0; 1083 if (TD.isBigEndian()) { 1084 ShAmt = NTy->getBitWidth()-LIBitWidth-Offset; 1085 } else { 1086 ShAmt = Offset; 1087 } 1088 1089 // Note: we support negative bitwidths (with shl) which are not defined. 1090 // We do this to support (f.e.) loads off the end of a structure where 1091 // only some bits are used. 1092 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) 1093 NV = BinaryOperator::createLShr(NV, 1094 ConstantInt::get(NV->getType(),ShAmt), 1095 LI->getName(), LI); 1096 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) 1097 NV = BinaryOperator::createShl(NV, 1098 ConstantInt::get(NV->getType(),-ShAmt), 1099 LI->getName(), LI); 1100 1101 // Finally, unconditionally truncate the integer to the right width. 1102 if (LIBitWidth < NTy->getBitWidth()) 1103 NV = new TruncInst(NV, IntegerType::get(LIBitWidth), 1104 LI->getName(), LI); 1105 1106 // If the result is an integer, this is a trunc or bitcast. 1107 if (isa<IntegerType>(LI->getType())) { 1108 assert(NV->getType() == LI->getType() && "Truncate wasn't enough?"); 1109 } else if (LI->getType()->isFloatingPoint()) { 1110 // Just do a bitcast, we know the sizes match up. 1111 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI); 1112 } else { 1113 // Otherwise must be a pointer. 1114 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI); 1115 } 1116 } 1117 LI->replaceAllUsesWith(NV); 1118 LI->eraseFromParent(); 1119 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 1120 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 1121 1122 // Convert the stored type to the actual type, shift it left to insert 1123 // then 'or' into place. 1124 Value *SV = SI->getOperand(0); 1125 const Type *AllocaType = NewAI->getType()->getElementType(); 1126 if (SV->getType() == AllocaType) { 1127 // All is well. 1128 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) { 1129 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1130 1131 // If the result alloca is a vector type, this is either an element 1132 // access or a bitcast to another vector type. 1133 if (isa<VectorType>(SV->getType())) { 1134 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1135 } else { 1136 // Must be an element insertion. 1137 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8); 1138 SV = new InsertElementInst(Old, SV, 1139 ConstantInt::get(Type::Int32Ty, Elt), 1140 "tmp", SI); 1141 } 1142 } else if (isa<PointerType>(AllocaType)) { 1143 // If the alloca type is a pointer, then all the elements must be 1144 // pointers. 1145 if (SV->getType() != AllocaType) 1146 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); 1147 } else { 1148 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 1149 1150 // If SV is a float, convert it to the appropriate integer type. 1151 // If it is a pointer, do the same, and also handle ptr->ptr casts 1152 // here. 1153 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType()); 1154 unsigned DestWidth = AllocaType->getPrimitiveSizeInBits(); 1155 if (SV->getType()->isFloatingPoint()) 1156 SV = new BitCastInst(SV, IntegerType::get(SrcWidth), 1157 SV->getName(), SI); 1158 else if (isa<PointerType>(SV->getType())) 1159 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI); 1160 1161 // Always zero extend the value if needed. 1162 if (SV->getType() != AllocaType) 1163 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI); 1164 1165 // If this is a big-endian system and the store is narrower than the 1166 // full alloca type, we need to do a shift to get the right bits. 1167 int ShAmt = 0; 1168 if (TD.isBigEndian()) { 1169 ShAmt = DestWidth-SrcWidth-Offset; 1170 } else { 1171 ShAmt = Offset; 1172 } 1173 1174 // Note: we support negative bitwidths (with shr) which are not defined. 1175 // We do this to support (f.e.) stores off the end of a structure where 1176 // only some bits in the structure are set. 1177 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); 1178 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { 1179 SV = BinaryOperator::createShl(SV, 1180 ConstantInt::get(SV->getType(), ShAmt), 1181 SV->getName(), SI); 1182 Mask <<= ShAmt; 1183 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { 1184 SV = BinaryOperator::createLShr(SV, 1185 ConstantInt::get(SV->getType(),-ShAmt), 1186 SV->getName(), SI); 1187 Mask = Mask.lshr(ShAmt); 1188 } 1189 1190 // Mask out the bits we are about to insert from the old value, and or 1191 // in the new bits. 1192 if (SrcWidth != DestWidth) { 1193 assert(DestWidth > SrcWidth); 1194 Old = BinaryOperator::createAnd(Old, ConstantInt::get(~Mask), 1195 Old->getName()+".mask", SI); 1196 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI); 1197 } 1198 } 1199 new StoreInst(SV, NewAI, SI); 1200 SI->eraseFromParent(); 1201 1202 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { 1203 ConvertUsesToScalar(CI, NewAI, Offset); 1204 CI->eraseFromParent(); 1205 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 1206 const PointerType *AggPtrTy = 1207 cast<PointerType>(GEP->getOperand(0)->getType()); 1208 const TargetData &TD = getAnalysis<TargetData>(); 1209 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8; 1210 1211 // Check to see if this is stepping over an element: GEP Ptr, int C 1212 unsigned NewOffset = Offset; 1213 if (GEP->getNumOperands() == 2) { 1214 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); 1215 unsigned BitOffset = Idx*AggSizeInBits; 1216 1217 NewOffset += BitOffset; 1218 } else if (GEP->getNumOperands() == 3) { 1219 // We know that operand #2 is zero. 1220 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 1221 const Type *AggTy = AggPtrTy->getElementType(); 1222 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 1223 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8; 1224 1225 NewOffset += ElSizeBits*Idx; 1226 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 1227 unsigned EltBitOffset = 1228 TD.getStructLayout(STy)->getElementOffset(Idx)*8; 1229 1230 NewOffset += EltBitOffset; 1231 } else { 1232 assert(0 && "Unsupported operation!"); 1233 abort(); 1234 } 1235 } else { 1236 assert(0 && "Unsupported operation!"); 1237 abort(); 1238 } 1239 ConvertUsesToScalar(GEP, NewAI, NewOffset); 1240 GEP->eraseFromParent(); 1241 } else { 1242 assert(0 && "Unsupported operation!"); 1243 abort(); 1244 } 1245 } 1246} 1247 1248 1249/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to 1250/// some part of a constant global variable. This intentionally only accepts 1251/// constant expressions because we don't can't rewrite arbitrary instructions. 1252static bool PointsToConstantGlobal(Value *V) { 1253 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 1254 return GV->isConstant(); 1255 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 1256 if (CE->getOpcode() == Instruction::BitCast || 1257 CE->getOpcode() == Instruction::GetElementPtr) 1258 return PointsToConstantGlobal(CE->getOperand(0)); 1259 return false; 1260} 1261 1262/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 1263/// pointer to an alloca. Ignore any reads of the pointer, return false if we 1264/// see any stores or other unknown uses. If we see pointer arithmetic, keep 1265/// track of whether it moves the pointer (with isOffset) but otherwise traverse 1266/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 1267/// the alloca, and if the source pointer is a pointer to a constant global, we 1268/// can optimize this. 1269static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy, 1270 bool isOffset) { 1271 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 1272 if (isa<LoadInst>(*UI)) { 1273 // Ignore loads, they are always ok. 1274 continue; 1275 } 1276 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) { 1277 // If uses of the bitcast are ok, we are ok. 1278 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset)) 1279 return false; 1280 continue; 1281 } 1282 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) { 1283 // If the GEP has all zero indices, it doesn't offset the pointer. If it 1284 // doesn't, it does. 1285 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, 1286 isOffset || !GEP->hasAllZeroIndices())) 1287 return false; 1288 continue; 1289 } 1290 1291 // If this is isn't our memcpy/memmove, reject it as something we can't 1292 // handle. 1293 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI)) 1294 return false; 1295 1296 // If we already have seen a copy, reject the second one. 1297 if (TheCopy) return false; 1298 1299 // If the pointer has been offset from the start of the alloca, we can't 1300 // safely handle this. 1301 if (isOffset) return false; 1302 1303 // If the memintrinsic isn't using the alloca as the dest, reject it. 1304 if (UI.getOperandNo() != 1) return false; 1305 1306 MemIntrinsic *MI = cast<MemIntrinsic>(*UI); 1307 1308 // If the source of the memcpy/move is not a constant global, reject it. 1309 if (!PointsToConstantGlobal(MI->getOperand(2))) 1310 return false; 1311 1312 // Otherwise, the transform is safe. Remember the copy instruction. 1313 TheCopy = MI; 1314 } 1315 return true; 1316} 1317 1318/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 1319/// modified by a copy from a constant global. If we can prove this, we can 1320/// replace any uses of the alloca with uses of the global directly. 1321Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) { 1322 Instruction *TheCopy = 0; 1323 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false)) 1324 return TheCopy; 1325 return 0; 1326} 1327