ScalarReplAggregates.cpp revision 15256cb14ef340235122750a8d7a6b18baf62ef0
1//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation implements the well known scalar replacement of 11// aggregates transformation. This xform breaks up alloca instructions of 12// aggregate type (structure or array) into individual alloca instructions for 13// each member (if possible). Then, if possible, it transforms the individual 14// alloca instructions into nice clean scalar SSA form. 15// 16// This combines a simple SRoA algorithm with the Mem2Reg algorithm because 17// often interact, especially for C++ programs. As such, iterating between 18// SRoA, then Mem2Reg until we run out of things to promote works well. 19// 20//===----------------------------------------------------------------------===// 21 22#include "llvm/Transforms/Scalar.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/Function.h" 26#include "llvm/Pass.h" 27#include "llvm/Instructions.h" 28#include "llvm/Analysis/Dominators.h" 29#include "llvm/Target/TargetData.h" 30#include "llvm/Transforms/Utils/PromoteMemToReg.h" 31#include "llvm/Support/GetElementPtrTypeIterator.h" 32#include "llvm/Support/MathExtras.h" 33#include "llvm/Support/Debug.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/ADT/StringExtras.h" 36#include <iostream> 37using namespace llvm; 38 39namespace { 40 Statistic<> NumReplaced("scalarrepl", "Number of allocas broken up"); 41 Statistic<> NumPromoted("scalarrepl", "Number of allocas promoted"); 42 Statistic<> NumConverted("scalarrepl", 43 "Number of aggregates converted to scalar"); 44 45 struct SROA : public FunctionPass { 46 bool runOnFunction(Function &F); 47 48 bool performScalarRepl(Function &F); 49 bool performPromotion(Function &F); 50 51 // getAnalysisUsage - This pass does not require any passes, but we know it 52 // will not alter the CFG, so say so. 53 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 54 AU.addRequired<DominatorTree>(); 55 AU.addRequired<DominanceFrontier>(); 56 AU.addRequired<TargetData>(); 57 AU.setPreservesCFG(); 58 } 59 60 private: 61 int isSafeElementUse(Value *Ptr); 62 int isSafeUseOfAllocation(Instruction *User); 63 int isSafeAllocaToScalarRepl(AllocationInst *AI); 64 void CanonicalizeAllocaUsers(AllocationInst *AI); 65 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); 66 67 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial); 68 void ConvertToScalar(AllocationInst *AI, const Type *Ty); 69 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset); 70 }; 71 72 RegisterOpt<SROA> X("scalarrepl", "Scalar Replacement of Aggregates"); 73} 74 75// Public interface to the ScalarReplAggregates pass 76FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); } 77 78 79bool SROA::runOnFunction(Function &F) { 80 bool Changed = performPromotion(F); 81 while (1) { 82 bool LocalChange = performScalarRepl(F); 83 if (!LocalChange) break; // No need to repromote if no scalarrepl 84 Changed = true; 85 LocalChange = performPromotion(F); 86 if (!LocalChange) break; // No need to re-scalarrepl if no promotion 87 } 88 89 return Changed; 90} 91 92 93bool SROA::performPromotion(Function &F) { 94 std::vector<AllocaInst*> Allocas; 95 const TargetData &TD = getAnalysis<TargetData>(); 96 DominatorTree &DT = getAnalysis<DominatorTree>(); 97 DominanceFrontier &DF = getAnalysis<DominanceFrontier>(); 98 99 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function 100 101 bool Changed = false; 102 103 while (1) { 104 Allocas.clear(); 105 106 // Find allocas that are safe to promote, by looking at all instructions in 107 // the entry node 108 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) 109 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? 110 if (isAllocaPromotable(AI, TD)) 111 Allocas.push_back(AI); 112 113 if (Allocas.empty()) break; 114 115 PromoteMemToReg(Allocas, DT, DF, TD); 116 NumPromoted += Allocas.size(); 117 Changed = true; 118 } 119 120 return Changed; 121} 122 123// performScalarRepl - This algorithm is a simple worklist driven algorithm, 124// which runs on all of the malloc/alloca instructions in the function, removing 125// them if they are only used by getelementptr instructions. 126// 127bool SROA::performScalarRepl(Function &F) { 128 std::vector<AllocationInst*> WorkList; 129 130 // Scan the entry basic block, adding any alloca's and mallocs to the worklist 131 BasicBlock &BB = F.getEntryBlock(); 132 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) 133 if (AllocationInst *A = dyn_cast<AllocationInst>(I)) 134 WorkList.push_back(A); 135 136 // Process the worklist 137 bool Changed = false; 138 while (!WorkList.empty()) { 139 AllocationInst *AI = WorkList.back(); 140 WorkList.pop_back(); 141 142 // If we can turn this aggregate value (potentially with casts) into a 143 // simple scalar value that can be mem2reg'd into a register value. 144 bool IsNotTrivial = false; 145 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial)) 146 if (IsNotTrivial) { 147 ConvertToScalar(AI, ActualType); 148 Changed = true; 149 continue; 150 } 151 152 // We cannot transform the allocation instruction if it is an array 153 // allocation (allocations OF arrays are ok though), and an allocation of a 154 // scalar value cannot be decomposed at all. 155 // 156 if (AI->isArrayAllocation() || 157 (!isa<StructType>(AI->getAllocatedType()) && 158 !isa<ArrayType>(AI->getAllocatedType()))) continue; 159 160 // Check that all of the users of the allocation are capable of being 161 // transformed. 162 switch (isSafeAllocaToScalarRepl(AI)) { 163 default: assert(0 && "Unexpected value!"); 164 case 0: // Not safe to scalar replace. 165 continue; 166 case 1: // Safe, but requires cleanup/canonicalizations first 167 CanonicalizeAllocaUsers(AI); 168 case 3: // Safe to scalar replace. 169 break; 170 } 171 172 DEBUG(std::cerr << "Found inst to xform: " << *AI); 173 Changed = true; 174 175 std::vector<AllocaInst*> ElementAllocas; 176 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { 177 ElementAllocas.reserve(ST->getNumContainedTypes()); 178 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { 179 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, 180 AI->getAlignment(), 181 AI->getName() + "." + utostr(i), AI); 182 ElementAllocas.push_back(NA); 183 WorkList.push_back(NA); // Add to worklist for recursive processing 184 } 185 } else { 186 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); 187 ElementAllocas.reserve(AT->getNumElements()); 188 const Type *ElTy = AT->getElementType(); 189 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { 190 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), 191 AI->getName() + "." + utostr(i), AI); 192 ElementAllocas.push_back(NA); 193 WorkList.push_back(NA); // Add to worklist for recursive processing 194 } 195 } 196 197 // Now that we have created the alloca instructions that we want to use, 198 // expand the getelementptr instructions to use them. 199 // 200 while (!AI->use_empty()) { 201 Instruction *User = cast<Instruction>(AI->use_back()); 202 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 203 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> 204 unsigned Idx = 205 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getRawValue(); 206 207 assert(Idx < ElementAllocas.size() && "Index out of range?"); 208 AllocaInst *AllocaToUse = ElementAllocas[Idx]; 209 210 Value *RepValue; 211 if (GEPI->getNumOperands() == 3) { 212 // Do not insert a new getelementptr instruction with zero indices, only 213 // to have it optimized out later. 214 RepValue = AllocaToUse; 215 } else { 216 // We are indexing deeply into the structure, so we still need a 217 // getelement ptr instruction to finish the indexing. This may be 218 // expanded itself once the worklist is rerun. 219 // 220 std::string OldName = GEPI->getName(); // Steal the old name. 221 std::vector<Value*> NewArgs; 222 NewArgs.push_back(Constant::getNullValue(Type::IntTy)); 223 NewArgs.insert(NewArgs.end(), GEPI->op_begin()+3, GEPI->op_end()); 224 GEPI->setName(""); 225 RepValue = new GetElementPtrInst(AllocaToUse, NewArgs, OldName, GEPI); 226 } 227 228 // Move all of the users over to the new GEP. 229 GEPI->replaceAllUsesWith(RepValue); 230 // Delete the old GEP 231 GEPI->eraseFromParent(); 232 } 233 234 // Finally, delete the Alloca instruction 235 AI->getParent()->getInstList().erase(AI); 236 NumReplaced++; 237 } 238 239 return Changed; 240} 241 242 243/// isSafeElementUse - Check to see if this use is an allowed use for a 244/// getelementptr instruction of an array aggregate allocation. 245/// 246int SROA::isSafeElementUse(Value *Ptr) { 247 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 248 I != E; ++I) { 249 Instruction *User = cast<Instruction>(*I); 250 switch (User->getOpcode()) { 251 case Instruction::Load: break; 252 case Instruction::Store: 253 // Store is ok if storing INTO the pointer, not storing the pointer 254 if (User->getOperand(0) == Ptr) return 0; 255 break; 256 case Instruction::GetElementPtr: { 257 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); 258 if (GEP->getNumOperands() > 1) { 259 if (!isa<Constant>(GEP->getOperand(1)) || 260 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 261 return 0; // Using pointer arithmetic to navigate the array... 262 } 263 if (!isSafeElementUse(GEP)) return 0; 264 break; 265 } 266 default: 267 DEBUG(std::cerr << " Transformation preventing inst: " << *User); 268 return 0; 269 } 270 } 271 return 3; // All users look ok :) 272} 273 274/// AllUsersAreLoads - Return true if all users of this value are loads. 275static bool AllUsersAreLoads(Value *Ptr) { 276 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); 277 I != E; ++I) 278 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) 279 return false; 280 return true; 281} 282 283/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an 284/// aggregate allocation. 285/// 286int SROA::isSafeUseOfAllocation(Instruction *User) { 287 if (!isa<GetElementPtrInst>(User)) return 0; 288 289 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); 290 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); 291 292 // The GEP is safe to transform if it is of the form GEP <ptr>, 0, <cst> 293 if (I == E || 294 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) 295 return 0; 296 297 ++I; 298 if (I == E) return 0; // ran out of GEP indices?? 299 300 // If this is a use of an array allocation, do a bit more checking for sanity. 301 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 302 uint64_t NumElements = AT->getNumElements(); 303 304 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) { 305 // Check to make sure that index falls within the array. If not, 306 // something funny is going on, so we won't do the optimization. 307 // 308 if (cast<ConstantInt>(GEPI->getOperand(2))->getRawValue() >= NumElements) 309 return 0; 310 311 } else { 312 // If this is an array index and the index is not constant, we cannot 313 // promote... that is unless the array has exactly one or two elements in 314 // it, in which case we CAN promote it, but we have to canonicalize this 315 // out if this is the only problem. 316 if (NumElements == 1 || NumElements == 2) 317 return AllUsersAreLoads(GEPI) ? 1 : 0; // Canonicalization required! 318 return 0; 319 } 320 } 321 322 // If there are any non-simple uses of this getelementptr, make sure to reject 323 // them. 324 return isSafeElementUse(GEPI); 325} 326 327/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of 328/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, 329/// or 1 if safe after canonicalization has been performed. 330/// 331int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) { 332 // Loop over the use list of the alloca. We can only transform it if all of 333 // the users are safe to transform. 334 // 335 int isSafe = 3; 336 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); 337 I != E; ++I) { 338 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I)); 339 if (isSafe == 0) { 340 DEBUG(std::cerr << "Cannot transform: " << *AI << " due to user: " 341 << **I); 342 return 0; 343 } 344 } 345 // If we require cleanup, isSafe is now 1, otherwise it is 3. 346 return isSafe; 347} 348 349/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified 350/// allocation, but only if cleaned up, perform the cleanups required. 351void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) { 352 // At this point, we know that the end result will be SROA'd and promoted, so 353 // we can insert ugly code if required so long as sroa+mem2reg will clean it 354 // up. 355 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 356 UI != E; ) { 357 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(*UI++); 358 gep_type_iterator I = gep_type_begin(GEPI); 359 ++I; 360 361 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { 362 uint64_t NumElements = AT->getNumElements(); 363 364 if (!isa<ConstantInt>(I.getOperand())) { 365 if (NumElements == 1) { 366 GEPI->setOperand(2, Constant::getNullValue(Type::IntTy)); 367 } else { 368 assert(NumElements == 2 && "Unhandled case!"); 369 // All users of the GEP must be loads. At each use of the GEP, insert 370 // two loads of the appropriate indexed GEP and select between them. 371 Value *IsOne = BinaryOperator::createSetNE(I.getOperand(), 372 Constant::getNullValue(I.getOperand()->getType()), 373 "isone", GEPI); 374 // Insert the new GEP instructions, which are properly indexed. 375 std::vector<Value*> Indices(GEPI->op_begin()+1, GEPI->op_end()); 376 Indices[1] = Constant::getNullValue(Type::IntTy); 377 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices, 378 GEPI->getName()+".0", GEPI); 379 Indices[1] = ConstantInt::get(Type::IntTy, 1); 380 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices, 381 GEPI->getName()+".1", GEPI); 382 // Replace all loads of the variable index GEP with loads from both 383 // indexes and a select. 384 while (!GEPI->use_empty()) { 385 LoadInst *LI = cast<LoadInst>(GEPI->use_back()); 386 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); 387 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); 388 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI); 389 LI->replaceAllUsesWith(R); 390 LI->eraseFromParent(); 391 } 392 GEPI->eraseFromParent(); 393 } 394 } 395 } 396 } 397} 398 399/// MergeInType - Add the 'In' type to the accumulated type so far. If the 400/// types are incompatible, return true, otherwise update Accum and return 401/// false. 402static bool MergeInType(const Type *In, const Type *&Accum) { 403 if (!In->isIntegral()) return true; 404 405 // If this is our first type, just use it. 406 if (Accum == Type::VoidTy) { 407 Accum = In; 408 } else { 409 // Otherwise pick whichever type is larger. 410 if (In->getTypeID() > Accum->getTypeID()) 411 Accum = In; 412 } 413 return false; 414} 415 416/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least 417/// as big as the specified type. If there is no suitable type, this returns 418/// null. 419const Type *getUIntAtLeastAsBitAs(unsigned NumBits) { 420 if (NumBits > 64) return 0; 421 if (NumBits > 32) return Type::ULongTy; 422 if (NumBits > 16) return Type::UIntTy; 423 if (NumBits > 8) return Type::UShortTy; 424 return Type::UByteTy; 425} 426 427/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a 428/// single scalar integer type, return that type. Further, if the use is not 429/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If 430/// there are no uses of this pointer, return Type::VoidTy to differentiate from 431/// failure. 432/// 433const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { 434 const Type *UsedType = Type::VoidTy; // No uses, no forced type. 435 const TargetData &TD = getAnalysis<TargetData>(); 436 const PointerType *PTy = cast<PointerType>(V->getType()); 437 438 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { 439 Instruction *User = cast<Instruction>(*UI); 440 441 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 442 if (MergeInType(LI->getType(), UsedType)) 443 return 0; 444 445 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 446 // Storing the pointer, not the into the value? 447 if (SI->getOperand(0) == V) return 0; 448 449 // NOTE: We could handle storing of FP imms here! 450 451 if (MergeInType(SI->getOperand(0)->getType(), UsedType)) 452 return 0; 453 } else if (CastInst *CI = dyn_cast<CastInst>(User)) { 454 if (!isa<PointerType>(CI->getType())) return 0; 455 IsNotTrivial = true; 456 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial); 457 if (!SubTy || MergeInType(SubTy, UsedType)) return 0; 458 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 459 // Check to see if this is stepping over an element: GEP Ptr, int C 460 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { 461 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue(); 462 unsigned ElSize = TD.getTypeSize(PTy->getElementType()); 463 unsigned BitOffset = Idx*ElSize*8; 464 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; 465 466 IsNotTrivial = true; 467 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial); 468 if (SubElt == 0) return 0; 469 if (SubElt != Type::VoidTy) { 470 const Type *NewTy = 471 getUIntAtLeastAsBitAs(SubElt->getPrimitiveSizeInBits()+BitOffset); 472 if (NewTy == 0 || MergeInType(NewTy, UsedType)) return 0; 473 continue; 474 } 475 } else if (GEP->getNumOperands() == 3 && 476 isa<ConstantInt>(GEP->getOperand(1)) && 477 isa<ConstantInt>(GEP->getOperand(2)) && 478 cast<Constant>(GEP->getOperand(1))->isNullValue()) { 479 // We are stepping into an element, e.g. a structure or an array: 480 // GEP Ptr, int 0, uint C 481 const Type *AggTy = PTy->getElementType(); 482 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue(); 483 484 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) { 485 if (Idx >= ATy->getNumElements()) return 0; // Out of range. 486 } else if (const PackedType *PTy = dyn_cast<PackedType>(AggTy)) { 487 if (Idx >= PTy->getNumElements()) return 0; // Out of range. 488 } else if (isa<StructType>(AggTy)) { 489 // Structs are always ok. 490 } else { 491 return 0; 492 } 493 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8); 494 if (NTy == 0 || MergeInType(NTy, UsedType)) return 0; 495 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); 496 if (SubTy == 0) return 0; 497 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType)) 498 return 0; 499 continue; // Everything looks ok 500 } 501 return 0; 502 } else { 503 // Cannot handle this! 504 return 0; 505 } 506 } 507 508 return UsedType; 509} 510 511/// ConvertToScalar - The specified alloca passes the CanConvertToScalar 512/// predicate and is non-trivial. Convert it to something that can be trivially 513/// promoted into a register by mem2reg. 514void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) { 515 DEBUG(std::cerr << "CONVERT TO SCALAR: " << *AI << " TYPE = " 516 << *ActualTy << "\n"); 517 ++NumConverted; 518 519 BasicBlock *EntryBlock = AI->getParent(); 520 assert(EntryBlock == &EntryBlock->getParent()->front() && 521 "Not in the entry block!"); 522 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program. 523 524 // Create and insert the alloca. 525 AllocaInst *NewAI = new AllocaInst(ActualTy->getUnsignedVersion(), 0, 526 AI->getName(), EntryBlock->begin()); 527 ConvertUsesToScalar(AI, NewAI, 0); 528 delete AI; 529} 530 531 532/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca 533/// directly. Offset is an offset from the original alloca, in bits that need 534/// to be shifted to the right. By the end of this, there should be no uses of 535/// Ptr. 536void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { 537 while (!Ptr->use_empty()) { 538 Instruction *User = cast<Instruction>(Ptr->use_back()); 539 540 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 541 // The load is a bit extract from NewAI shifted right by Offset bits. 542 Value *NV = new LoadInst(NewAI, LI->getName(), LI); 543 if (Offset && Offset < NV->getType()->getPrimitiveSizeInBits()) 544 NV = new ShiftInst(Instruction::Shr, NV, 545 ConstantUInt::get(Type::UByteTy, Offset), 546 LI->getName(), LI); 547 if (NV->getType() != LI->getType()) 548 NV = new CastInst(NV, LI->getType(), LI->getName(), LI); 549 LI->replaceAllUsesWith(NV); 550 LI->eraseFromParent(); 551 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 552 assert(SI->getOperand(0) != Ptr && "Consistency error!"); 553 554 // Convert the stored type to the actual type, shift it left to insert 555 // then 'or' into place. 556 Value *SV = SI->getOperand(0); 557 if (SV->getType() != NewAI->getType()->getElementType() || Offset != 0) { 558 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI); 559 // If SV is signed, convert it to unsigned, so that the next cast zero 560 // extends the value. 561 if (SV->getType()->isSigned()) 562 SV = new CastInst(SV, SV->getType()->getUnsignedVersion(), 563 SV->getName(), SI); 564 SV = new CastInst(SV, Old->getType(), SV->getName(), SI); 565 if (Offset && Offset < SV->getType()->getPrimitiveSizeInBits()) 566 SV = new ShiftInst(Instruction::Shl, SV, 567 ConstantUInt::get(Type::UByteTy, Offset), 568 SV->getName()+".adj", SI); 569 // Mask out the bits we are about to insert from the old value. 570 unsigned TotalBits = SV->getType()->getPrimitiveSizeInBits(); 571 unsigned InsertBits = 572 SI->getOperand(0)->getType()->getPrimitiveSizeInBits(); 573 if (TotalBits != InsertBits) { 574 assert(TotalBits > InsertBits); 575 uint64_t Mask = ~(((1ULL << InsertBits)-1) << Offset); 576 if (TotalBits != 64) 577 Mask = Mask & ((1ULL << TotalBits)-1); 578 Old = BinaryOperator::createAnd(Old, 579 ConstantUInt::get(Old->getType(), Mask), 580 Old->getName()+".mask", SI); 581 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI); 582 } 583 } 584 new StoreInst(SV, NewAI, SI); 585 SI->eraseFromParent(); 586 587 } else if (CastInst *CI = dyn_cast<CastInst>(User)) { 588 unsigned NewOff = Offset; 589 const TargetData &TD = getAnalysis<TargetData>(); 590 if (TD.isBigEndian()) { 591 // Adjust the pointer. For example, storing 16-bits into a 32-bit 592 // alloca with just a cast makes it modify the top 16-bits. 593 const Type *SrcTy = cast<PointerType>(Ptr->getType())->getElementType(); 594 const Type *DstTy = cast<PointerType>(CI->getType())->getElementType(); 595 int PtrDiffBits = TD.getTypeSize(SrcTy)*8-TD.getTypeSize(DstTy)*8; 596 NewOff += PtrDiffBits; 597 } 598 ConvertUsesToScalar(CI, NewAI, NewOff); 599 CI->eraseFromParent(); 600 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 601 const PointerType *AggPtrTy = 602 cast<PointerType>(GEP->getOperand(0)->getType()); 603 const TargetData &TD = getAnalysis<TargetData>(); 604 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8; 605 606 // Check to see if this is stepping over an element: GEP Ptr, int C 607 unsigned NewOffset = Offset; 608 if (GEP->getNumOperands() == 2) { 609 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue(); 610 unsigned BitOffset = Idx*AggSizeInBits; 611 612 if (TD.isLittleEndian()) 613 NewOffset += BitOffset; 614 else 615 NewOffset -= BitOffset; 616 617 } else if (GEP->getNumOperands() == 3) { 618 // We know that operand #2 is zero. 619 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue(); 620 const Type *AggTy = AggPtrTy->getElementType(); 621 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { 622 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8; 623 624 if (TD.isLittleEndian()) 625 NewOffset += ElSizeBits*Idx; 626 else 627 NewOffset += AggSizeInBits-ElSizeBits*(Idx+1); 628 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) { 629 unsigned EltBitOffset = TD.getStructLayout(STy)->MemberOffsets[Idx]*8; 630 631 if (TD.isLittleEndian()) 632 NewOffset += EltBitOffset; 633 else { 634 const PointerType *ElPtrTy = cast<PointerType>(GEP->getType()); 635 unsigned ElSizeBits = TD.getTypeSize(ElPtrTy->getElementType())*8; 636 NewOffset += AggSizeInBits-(EltBitOffset+ElSizeBits); 637 } 638 639 } else { 640 assert(0 && "Unsupported operation!"); 641 abort(); 642 } 643 } else { 644 assert(0 && "Unsupported operation!"); 645 abort(); 646 } 647 ConvertUsesToScalar(GEP, NewAI, NewOffset); 648 GEP->eraseFromParent(); 649 } else { 650 assert(0 && "Unsupported operation!"); 651 abort(); 652 } 653 } 654} 655