AutoUpgrade.cpp revision f2937ac4eddb5ced78a1d73206de020c6d9e440f
1//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the auto-upgrade helper functions 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/AutoUpgrade.h" 15#include "llvm/Constants.h" 16#include "llvm/Function.h" 17#include "llvm/LLVMContext.h" 18#include "llvm/Module.h" 19#include "llvm/IntrinsicInst.h" 20#include "llvm/ADT/SmallVector.h" 21#include "llvm/Support/ErrorHandling.h" 22#include "llvm/Support/IRBuilder.h" 23#include <cstring> 24using namespace llvm; 25 26 27static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 28 assert(F && "Illegal to upgrade a non-existent Function."); 29 30 // Get the Function's name. 31 const std::string& Name = F->getName(); 32 33 // Convenience 34 const FunctionType *FTy = F->getFunctionType(); 35 36 // Quickly eliminate it, if it's not a candidate. 37 if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' || 38 Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.') 39 return false; 40 41 Module *M = F->getParent(); 42 switch (Name[5]) { 43 default: break; 44 case 'a': 45 // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss, 46 // and atomics with default address spaces to their new names to their new 47 // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32) 48 if (Name.compare(5,7,"atomic.",7) == 0) { 49 if (Name.compare(12,3,"lcs",3) == 0) { 50 std::string::size_type delim = Name.find('.',12); 51 F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) + 52 ".p0" + Name.substr(delim+1)); 53 NewFn = F; 54 return true; 55 } 56 else if (Name.compare(12,3,"las",3) == 0) { 57 std::string::size_type delim = Name.find('.',12); 58 F->setName("llvm.atomic.load.add"+Name.substr(delim) 59 + ".p0" + Name.substr(delim+1)); 60 NewFn = F; 61 return true; 62 } 63 else if (Name.compare(12,3,"lss",3) == 0) { 64 std::string::size_type delim = Name.find('.',12); 65 F->setName("llvm.atomic.load.sub"+Name.substr(delim) 66 + ".p0" + Name.substr(delim+1)); 67 NewFn = F; 68 return true; 69 } 70 else if (Name.rfind(".p") == std::string::npos) { 71 // We don't have an address space qualifier so this has be upgraded 72 // to the new name. Copy the type name at the end of the intrinsic 73 // and add to it 74 std::string::size_type delim = Name.find_last_of('.'); 75 assert(delim != std::string::npos && "can not find type"); 76 F->setName(Name + ".p0" + Name.substr(delim+1)); 77 NewFn = F; 78 return true; 79 } 80 } 81 break; 82 case 'b': 83 // This upgrades the name of the llvm.bswap intrinsic function to only use 84 // a single type name for overloading. We only care about the old format 85 // 'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being 86 // a '.' after 'bswap.' 87 if (Name.compare(5,6,"bswap.",6) == 0) { 88 std::string::size_type delim = Name.find('.',11); 89 90 if (delim != std::string::npos) { 91 // Construct the new name as 'llvm.bswap' + '.i*' 92 F->setName(Name.substr(0,10)+Name.substr(delim)); 93 NewFn = F; 94 return true; 95 } 96 } 97 break; 98 99 case 'c': 100 // We only want to fix the 'llvm.ct*' intrinsics which do not have the 101 // correct return type, so we check for the name, and then check if the 102 // return type does not match the parameter type. 103 if ( (Name.compare(5,5,"ctpop",5) == 0 || 104 Name.compare(5,4,"ctlz",4) == 0 || 105 Name.compare(5,4,"cttz",4) == 0) && 106 FTy->getReturnType() != FTy->getParamType(0)) { 107 // We first need to change the name of the old (bad) intrinsic, because 108 // its type is incorrect, but we cannot overload that name. We 109 // arbitrarily unique it here allowing us to construct a correctly named 110 // and typed function below. 111 F->setName(""); 112 113 // Now construct the new intrinsic with the correct name and type. We 114 // leave the old function around in order to query its type, whatever it 115 // may be, and correctly convert up to the new type. 116 NewFn = cast<Function>(M->getOrInsertFunction(Name, 117 FTy->getParamType(0), 118 FTy->getParamType(0), 119 (Type *)0)); 120 return true; 121 } 122 break; 123 124 case 'e': 125 // The old llvm.eh.selector.i32 is equivalent to the new llvm.eh.selector. 126 if (Name.compare("llvm.eh.selector.i32") == 0) { 127 F->setName("llvm.eh.selector"); 128 NewFn = F; 129 return true; 130 } 131 // The old llvm.eh.typeid.for.i32 is equivalent to llvm.eh.typeid.for. 132 if (Name.compare("llvm.eh.typeid.for.i32") == 0) { 133 F->setName("llvm.eh.typeid.for"); 134 NewFn = F; 135 return true; 136 } 137 // Convert the old llvm.eh.selector.i64 to a call to llvm.eh.selector. 138 if (Name.compare("llvm.eh.selector.i64") == 0) { 139 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_selector); 140 return true; 141 } 142 // Convert the old llvm.eh.typeid.for.i64 to a call to llvm.eh.typeid.for. 143 if (Name.compare("llvm.eh.typeid.for.i64") == 0) { 144 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_typeid_for); 145 return true; 146 } 147 break; 148 149 case 'm': { 150 // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the 151 // new format that allows overloading the pointer for different address 152 // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16) 153 const char* NewFnName = NULL; 154 if (Name.compare(5,8,"memcpy.i",8) == 0) { 155 if (Name[13] == '8') 156 NewFnName = "llvm.memcpy.p0i8.p0i8.i8"; 157 else if (Name.compare(13,2,"16") == 0) 158 NewFnName = "llvm.memcpy.p0i8.p0i8.i16"; 159 else if (Name.compare(13,2,"32") == 0) 160 NewFnName = "llvm.memcpy.p0i8.p0i8.i32"; 161 else if (Name.compare(13,2,"64") == 0) 162 NewFnName = "llvm.memcpy.p0i8.p0i8.i64"; 163 } else if (Name.compare(5,9,"memmove.i",9) == 0) { 164 if (Name[14] == '8') 165 NewFnName = "llvm.memmove.p0i8.p0i8.i8"; 166 else if (Name.compare(14,2,"16") == 0) 167 NewFnName = "llvm.memmove.p0i8.p0i8.i16"; 168 else if (Name.compare(14,2,"32") == 0) 169 NewFnName = "llvm.memmove.p0i8.p0i8.i32"; 170 else if (Name.compare(14,2,"64") == 0) 171 NewFnName = "llvm.memmove.p0i8.p0i8.i64"; 172 } 173 else if (Name.compare(5,8,"memset.i",8) == 0) { 174 if (Name[13] == '8') 175 NewFnName = "llvm.memset.p0i8.i8"; 176 else if (Name.compare(13,2,"16") == 0) 177 NewFnName = "llvm.memset.p0i8.i16"; 178 else if (Name.compare(13,2,"32") == 0) 179 NewFnName = "llvm.memset.p0i8.i32"; 180 else if (Name.compare(13,2,"64") == 0) 181 NewFnName = "llvm.memset.p0i8.i64"; 182 } 183 if (NewFnName) { 184 const FunctionType *FTy = F->getFunctionType(); 185 NewFn = cast<Function>(M->getOrInsertFunction(NewFnName, 186 FTy->getReturnType(), 187 FTy->getParamType(0), 188 FTy->getParamType(1), 189 FTy->getParamType(2), 190 FTy->getParamType(3), 191 Type::getInt1Ty(F->getContext()), 192 (Type *)0)); 193 return true; 194 } 195 break; 196 } 197 case 'p': 198 // This upgrades the llvm.part.select overloaded intrinsic names to only 199 // use one type specifier in the name. We only care about the old format 200 // 'llvm.part.select.i*.i*', and solve as above with bswap. 201 if (Name.compare(5,12,"part.select.",12) == 0) { 202 std::string::size_type delim = Name.find('.',17); 203 204 if (delim != std::string::npos) { 205 // Construct a new name as 'llvm.part.select' + '.i*' 206 F->setName(Name.substr(0,16)+Name.substr(delim)); 207 NewFn = F; 208 return true; 209 } 210 break; 211 } 212 213 // This upgrades the llvm.part.set intrinsics similarly as above, however 214 // we care about 'llvm.part.set.i*.i*.i*', but only the first two types 215 // must match. There is an additional type specifier after these two 216 // matching types that we must retain when upgrading. Thus, we require 217 // finding 2 periods, not just one, after the intrinsic name. 218 if (Name.compare(5,9,"part.set.",9) == 0) { 219 std::string::size_type delim = Name.find('.',14); 220 221 if (delim != std::string::npos && 222 Name.find('.',delim+1) != std::string::npos) { 223 // Construct a new name as 'llvm.part.select' + '.i*.i*' 224 F->setName(Name.substr(0,13)+Name.substr(delim)); 225 NewFn = F; 226 return true; 227 } 228 break; 229 } 230 231 break; 232 case 'x': 233 // This fixes all MMX shift intrinsic instructions to take a 234 // v1i64 instead of a v2i32 as the second parameter. 235 if (Name.compare(5,10,"x86.mmx.ps",10) == 0 && 236 (Name.compare(13,4,"psll", 4) == 0 || 237 Name.compare(13,4,"psra", 4) == 0 || 238 Name.compare(13,4,"psrl", 4) == 0) && Name[17] != 'i') { 239 240 const llvm::Type *VT = 241 VectorType::get(IntegerType::get(FTy->getContext(), 64), 1); 242 243 // We don't have to do anything if the parameter already has 244 // the correct type. 245 if (FTy->getParamType(1) == VT) 246 break; 247 248 // We first need to change the name of the old (bad) intrinsic, because 249 // its type is incorrect, but we cannot overload that name. We 250 // arbitrarily unique it here allowing us to construct a correctly named 251 // and typed function below. 252 F->setName(""); 253 254 assert(FTy->getNumParams() == 2 && "MMX shift intrinsics take 2 args!"); 255 256 // Now construct the new intrinsic with the correct name and type. We 257 // leave the old function around in order to query its type, whatever it 258 // may be, and correctly convert up to the new type. 259 NewFn = cast<Function>(M->getOrInsertFunction(Name, 260 FTy->getReturnType(), 261 FTy->getParamType(0), 262 VT, 263 (Type *)0)); 264 return true; 265 } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 || 266 Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 || 267 Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 || 268 Name.compare(5,15,"x86.sse2.movs.d",15) == 0 || 269 Name.compare(5,16,"x86.sse2.shuf.pd",16) == 0 || 270 Name.compare(5,18,"x86.sse2.unpckh.pd",18) == 0 || 271 Name.compare(5,18,"x86.sse2.unpckl.pd",18) == 0 || 272 Name.compare(5,20,"x86.sse2.punpckh.qdq",20) == 0 || 273 Name.compare(5,20,"x86.sse2.punpckl.qdq",20) == 0) { 274 // Calls to these intrinsics are transformed into ShuffleVector's. 275 NewFn = 0; 276 return true; 277 } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) { 278 // Calls to these intrinsics are transformed into vector multiplies. 279 NewFn = 0; 280 return true; 281 } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 || 282 Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) { 283 // Calls to these intrinsics are transformed into vector shuffles, shifts, 284 // or 0. 285 NewFn = 0; 286 return true; 287 } 288 289 break; 290 } 291 292 // This may not belong here. This function is effectively being overloaded 293 // to both detect an intrinsic which needs upgrading, and to provide the 294 // upgraded form of the intrinsic. We should perhaps have two separate 295 // functions for this. 296 return false; 297} 298 299bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 300 NewFn = 0; 301 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 302 303 // Upgrade intrinsic attributes. This does not change the function. 304 if (NewFn) 305 F = NewFn; 306 if (unsigned id = F->getIntrinsicID()) 307 F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id)); 308 return Upgraded; 309} 310 311// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the 312// upgraded intrinsic. All argument and return casting must be provided in 313// order to seamlessly integrate with existing context. 314void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 315 Function *F = CI->getCalledFunction(); 316 LLVMContext &C = CI->getContext(); 317 318 assert(F && "CallInst has no function associated with it."); 319 320 if (!NewFn) { 321 bool isLoadH = false, isLoadL = false, isMovL = false; 322 bool isMovSD = false, isShufPD = false; 323 bool isUnpckhPD = false, isUnpcklPD = false; 324 bool isPunpckhQPD = false, isPunpcklQPD = false; 325 if (F->getName() == "llvm.x86.sse2.loadh.pd") 326 isLoadH = true; 327 else if (F->getName() == "llvm.x86.sse2.loadl.pd") 328 isLoadL = true; 329 else if (F->getName() == "llvm.x86.sse2.movl.dq") 330 isMovL = true; 331 else if (F->getName() == "llvm.x86.sse2.movs.d") 332 isMovSD = true; 333 else if (F->getName() == "llvm.x86.sse2.shuf.pd") 334 isShufPD = true; 335 else if (F->getName() == "llvm.x86.sse2.unpckh.pd") 336 isUnpckhPD = true; 337 else if (F->getName() == "llvm.x86.sse2.unpckl.pd") 338 isUnpcklPD = true; 339 else if (F->getName() == "llvm.x86.sse2.punpckh.qdq") 340 isPunpckhQPD = true; 341 else if (F->getName() == "llvm.x86.sse2.punpckl.qdq") 342 isPunpcklQPD = true; 343 344 if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD || 345 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) { 346 std::vector<Constant*> Idxs; 347 Value *Op0 = CI->getArgOperand(0); 348 ShuffleVectorInst *SI = NULL; 349 if (isLoadH || isLoadL) { 350 Value *Op1 = UndefValue::get(Op0->getType()); 351 Value *Addr = new BitCastInst(CI->getArgOperand(1), 352 Type::getDoublePtrTy(C), 353 "upgraded.", CI); 354 Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI); 355 Value *Idx = ConstantInt::get(Type::getInt32Ty(C), 0); 356 Op1 = InsertElementInst::Create(Op1, Load, Idx, "upgraded.", CI); 357 358 if (isLoadH) { 359 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 360 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2)); 361 } else { 362 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2)); 363 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1)); 364 } 365 Value *Mask = ConstantVector::get(Idxs); 366 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI); 367 } else if (isMovL) { 368 Constant *Zero = ConstantInt::get(Type::getInt32Ty(C), 0); 369 Idxs.push_back(Zero); 370 Idxs.push_back(Zero); 371 Idxs.push_back(Zero); 372 Idxs.push_back(Zero); 373 Value *ZeroV = ConstantVector::get(Idxs); 374 375 Idxs.clear(); 376 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 4)); 377 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 5)); 378 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2)); 379 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3)); 380 Value *Mask = ConstantVector::get(Idxs); 381 SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI); 382 } else if (isMovSD || 383 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) { 384 Value *Op1 = CI->getArgOperand(1); 385 if (isMovSD) { 386 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2)); 387 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1)); 388 } else if (isUnpckhPD || isPunpckhQPD) { 389 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1)); 390 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3)); 391 } else { 392 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 393 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2)); 394 } 395 Value *Mask = ConstantVector::get(Idxs); 396 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI); 397 } else if (isShufPD) { 398 Value *Op1 = CI->getArgOperand(1); 399 unsigned MaskVal = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 400 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1)); 401 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 402 ((MaskVal >> 1) & 1)+2)); 403 Value *Mask = ConstantVector::get(Idxs); 404 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI); 405 } 406 407 assert(SI && "Unexpected!"); 408 409 // Handle any uses of the old CallInst. 410 if (!CI->use_empty()) 411 // Replace all uses of the old call with the new cast which has the 412 // correct type. 413 CI->replaceAllUsesWith(SI); 414 415 // Clean up the old call now that it has been completely upgraded. 416 CI->eraseFromParent(); 417 } else if (F->getName() == "llvm.x86.sse41.pmulld") { 418 // Upgrade this set of intrinsics into vector multiplies. 419 Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0), 420 CI->getArgOperand(1), 421 CI->getName(), 422 CI); 423 // Fix up all the uses with our new multiply. 424 if (!CI->use_empty()) 425 CI->replaceAllUsesWith(Mul); 426 427 // Remove upgraded multiply. 428 CI->eraseFromParent(); 429 } else if (F->getName() == "llvm.x86.ssse3.palign.r") { 430 Value *Op1 = CI->getOperand(1); 431 Value *Op2 = CI->getOperand(2); 432 Value *Op3 = CI->getOperand(3); 433 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue(); 434 Value *Rep; 435 IRBuilder<> Builder(C); 436 Builder.SetInsertPoint(CI->getParent(), CI); 437 438 // If palignr is shifting the pair of input vectors less than 9 bytes, 439 // emit a shuffle instruction. 440 if (shiftVal <= 8) { 441 const Type *IntTy = Type::getInt32Ty(C); 442 const Type *EltTy = Type::getInt8Ty(C); 443 const Type *VecTy = VectorType::get(EltTy, 8); 444 445 Op2 = Builder.CreateBitCast(Op2, VecTy); 446 Op1 = Builder.CreateBitCast(Op1, VecTy); 447 448 llvm::SmallVector<llvm::Constant*, 8> Indices; 449 for (unsigned i = 0; i != 8; ++i) 450 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i)); 451 452 Value *SV = ConstantVector::get(Indices.begin(), Indices.size()); 453 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr"); 454 Rep = Builder.CreateBitCast(Rep, F->getReturnType()); 455 } 456 457 // If palignr is shifting the pair of input vectors more than 8 but less 458 // than 16 bytes, emit a logical right shift of the destination. 459 else if (shiftVal < 16) { 460 // MMX has these as 1 x i64 vectors for some odd optimization reasons. 461 const Type *EltTy = Type::getInt64Ty(C); 462 const Type *VecTy = VectorType::get(EltTy, 1); 463 464 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast"); 465 Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8); 466 467 // create i32 constant 468 Function *I = 469 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q); 470 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr"); 471 } 472 473 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 474 else { 475 Rep = Constant::getNullValue(F->getReturnType()); 476 } 477 478 // Replace any uses with our new instruction. 479 if (!CI->use_empty()) 480 CI->replaceAllUsesWith(Rep); 481 482 // Remove upgraded instruction. 483 CI->eraseFromParent(); 484 485 } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") { 486 Value *Op1 = CI->getOperand(1); 487 Value *Op2 = CI->getOperand(2); 488 Value *Op3 = CI->getOperand(3); 489 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue(); 490 Value *Rep; 491 IRBuilder<> Builder(C); 492 Builder.SetInsertPoint(CI->getParent(), CI); 493 494 // If palignr is shifting the pair of input vectors less than 17 bytes, 495 // emit a shuffle instruction. 496 if (shiftVal <= 16) { 497 const Type *IntTy = Type::getInt32Ty(C); 498 const Type *EltTy = Type::getInt8Ty(C); 499 const Type *VecTy = VectorType::get(EltTy, 16); 500 501 Op2 = Builder.CreateBitCast(Op2, VecTy); 502 Op1 = Builder.CreateBitCast(Op1, VecTy); 503 504 llvm::SmallVector<llvm::Constant*, 16> Indices; 505 for (unsigned i = 0; i != 16; ++i) 506 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i)); 507 508 Value *SV = ConstantVector::get(Indices.begin(), Indices.size()); 509 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr"); 510 Rep = Builder.CreateBitCast(Rep, F->getReturnType()); 511 } 512 513 // If palignr is shifting the pair of input vectors more than 16 but less 514 // than 32 bytes, emit a logical right shift of the destination. 515 else if (shiftVal < 32) { 516 const Type *EltTy = Type::getInt64Ty(C); 517 const Type *VecTy = VectorType::get(EltTy, 2); 518 const Type *IntTy = Type::getInt32Ty(C); 519 520 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast"); 521 Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8); 522 523 // create i32 constant 524 Function *I = 525 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq); 526 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr"); 527 } 528 529 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 530 else { 531 Rep = Constant::getNullValue(F->getReturnType()); 532 } 533 534 // Replace any uses with our new instruction. 535 if (!CI->use_empty()) 536 CI->replaceAllUsesWith(Rep); 537 538 // Remove upgraded instruction. 539 CI->eraseFromParent(); 540 541 } else { 542 llvm_unreachable("Unknown function for CallInst upgrade."); 543 } 544 return; 545 } 546 547 switch (NewFn->getIntrinsicID()) { 548 default: llvm_unreachable("Unknown function for CallInst upgrade."); 549 case Intrinsic::x86_mmx_psll_d: 550 case Intrinsic::x86_mmx_psll_q: 551 case Intrinsic::x86_mmx_psll_w: 552 case Intrinsic::x86_mmx_psra_d: 553 case Intrinsic::x86_mmx_psra_w: 554 case Intrinsic::x86_mmx_psrl_d: 555 case Intrinsic::x86_mmx_psrl_q: 556 case Intrinsic::x86_mmx_psrl_w: { 557 Value *Operands[2]; 558 559 Operands[0] = CI->getArgOperand(0); 560 561 // Cast the second parameter to the correct type. 562 BitCastInst *BC = new BitCastInst(CI->getArgOperand(1), 563 NewFn->getFunctionType()->getParamType(1), 564 "upgraded.", CI); 565 Operands[1] = BC; 566 567 // Construct a new CallInst 568 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+2, 569 "upgraded."+CI->getName(), CI); 570 NewCI->setTailCall(CI->isTailCall()); 571 NewCI->setCallingConv(CI->getCallingConv()); 572 573 // Handle any uses of the old CallInst. 574 if (!CI->use_empty()) 575 // Replace all uses of the old call with the new cast which has the 576 // correct type. 577 CI->replaceAllUsesWith(NewCI); 578 579 // Clean up the old call now that it has been completely upgraded. 580 CI->eraseFromParent(); 581 break; 582 } 583 case Intrinsic::ctlz: 584 case Intrinsic::ctpop: 585 case Intrinsic::cttz: { 586 // Build a small vector of the 1..(N-1) operands, which are the 587 // parameters. 588 SmallVector<Value*, 8> Operands(CI->op_begin()+1, CI->op_end()); 589 590 // Construct a new CallInst 591 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(), 592 "upgraded."+CI->getName(), CI); 593 NewCI->setTailCall(CI->isTailCall()); 594 NewCI->setCallingConv(CI->getCallingConv()); 595 596 // Handle any uses of the old CallInst. 597 if (!CI->use_empty()) { 598 // Check for sign extend parameter attributes on the return values. 599 bool SrcSExt = NewFn->getAttributes().paramHasAttr(0, Attribute::SExt); 600 bool DestSExt = F->getAttributes().paramHasAttr(0, Attribute::SExt); 601 602 // Construct an appropriate cast from the new return type to the old. 603 CastInst *RetCast = CastInst::Create( 604 CastInst::getCastOpcode(NewCI, SrcSExt, 605 F->getReturnType(), 606 DestSExt), 607 NewCI, F->getReturnType(), 608 NewCI->getName(), CI); 609 NewCI->moveBefore(RetCast); 610 611 // Replace all uses of the old call with the new cast which has the 612 // correct type. 613 CI->replaceAllUsesWith(RetCast); 614 } 615 616 // Clean up the old call now that it has been completely upgraded. 617 CI->eraseFromParent(); 618 } 619 break; 620 case Intrinsic::eh_selector: 621 case Intrinsic::eh_typeid_for: { 622 // Only the return type changed. 623 SmallVector<Value*, 8> Operands(CI->op_begin() + 1, CI->op_end()); 624 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(), 625 "upgraded." + CI->getName(), CI); 626 NewCI->setTailCall(CI->isTailCall()); 627 NewCI->setCallingConv(CI->getCallingConv()); 628 629 // Handle any uses of the old CallInst. 630 if (!CI->use_empty()) { 631 // Construct an appropriate cast from the new return type to the old. 632 CastInst *RetCast = 633 CastInst::Create(CastInst::getCastOpcode(NewCI, true, 634 F->getReturnType(), true), 635 NewCI, F->getReturnType(), NewCI->getName(), CI); 636 CI->replaceAllUsesWith(RetCast); 637 } 638 CI->eraseFromParent(); 639 } 640 break; 641 case Intrinsic::memcpy: 642 case Intrinsic::memmove: 643 case Intrinsic::memset: { 644 // Add isVolatile 645 const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext()); 646 Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1), 647 CI->getArgOperand(2), CI->getArgOperand(3), 648 llvm::ConstantInt::get(I1Ty, 0) }; 649 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5, 650 CI->getName(), CI); 651 NewCI->setTailCall(CI->isTailCall()); 652 NewCI->setCallingConv(CI->getCallingConv()); 653 // Handle any uses of the old CallInst. 654 if (!CI->use_empty()) 655 // Replace all uses of the old call with the new cast which has the 656 // correct type. 657 CI->replaceAllUsesWith(NewCI); 658 659 // Clean up the old call now that it has been completely upgraded. 660 CI->eraseFromParent(); 661 break; 662 } 663 } 664} 665 666// This tests each Function to determine if it needs upgrading. When we find 667// one we are interested in, we then upgrade all calls to reflect the new 668// function. 669void llvm::UpgradeCallsToIntrinsic(Function* F) { 670 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 671 672 // Upgrade the function and check if it is a totaly new function. 673 Function* NewFn; 674 if (UpgradeIntrinsicFunction(F, NewFn)) { 675 if (NewFn != F) { 676 // Replace all uses to the old function with the new one if necessary. 677 for (Value::use_iterator UI = F->use_begin(), UE = F->use_end(); 678 UI != UE; ) { 679 if (CallInst* CI = dyn_cast<CallInst>(*UI++)) 680 UpgradeIntrinsicCall(CI, NewFn); 681 } 682 // Remove old function, no longer used, from the module. 683 F->eraseFromParent(); 684 } 685 } 686} 687 688/// This function strips all debug info intrinsics, except for llvm.dbg.declare. 689/// If an llvm.dbg.declare intrinsic is invalid, then this function simply 690/// strips that use. 691void llvm::CheckDebugInfoIntrinsics(Module *M) { 692 693 694 if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) { 695 while (!FuncStart->use_empty()) { 696 CallInst *CI = cast<CallInst>(FuncStart->use_back()); 697 CI->eraseFromParent(); 698 } 699 FuncStart->eraseFromParent(); 700 } 701 702 if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) { 703 while (!StopPoint->use_empty()) { 704 CallInst *CI = cast<CallInst>(StopPoint->use_back()); 705 CI->eraseFromParent(); 706 } 707 StopPoint->eraseFromParent(); 708 } 709 710 if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) { 711 while (!RegionStart->use_empty()) { 712 CallInst *CI = cast<CallInst>(RegionStart->use_back()); 713 CI->eraseFromParent(); 714 } 715 RegionStart->eraseFromParent(); 716 } 717 718 if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) { 719 while (!RegionEnd->use_empty()) { 720 CallInst *CI = cast<CallInst>(RegionEnd->use_back()); 721 CI->eraseFromParent(); 722 } 723 RegionEnd->eraseFromParent(); 724 } 725 726 if (Function *Declare = M->getFunction("llvm.dbg.declare")) { 727 if (!Declare->use_empty()) { 728 DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back()); 729 if (!isa<MDNode>(DDI->getArgOperand(0)) || 730 !isa<MDNode>(DDI->getArgOperand(1))) { 731 while (!Declare->use_empty()) { 732 CallInst *CI = cast<CallInst>(Declare->use_back()); 733 CI->eraseFromParent(); 734 } 735 Declare->eraseFromParent(); 736 } 737 } 738 } 739} 740