1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// \file 10/// This file implements a TargetTransformInfo analysis pass specific to the 11/// X86 target machine. It uses the target's detailed information to provide 12/// more precise answers to certain TTI queries, while letting the target 13/// independent and default TTI implementations handle the rest. 14/// 15//===----------------------------------------------------------------------===// 16 17#include "X86.h" 18#include "X86TargetMachine.h" 19#include "llvm/Analysis/TargetTransformInfo.h" 20#include "llvm/IR/IntrinsicInst.h" 21#include "llvm/Support/Debug.h" 22#include "llvm/Target/CostTable.h" 23#include "llvm/Target/TargetLowering.h" 24using namespace llvm; 25 26#define DEBUG_TYPE "x86tti" 27 28// Declare the pass initialization routine locally as target-specific passes 29// don't have a target-wide initialization entry point, and so we rely on the 30// pass constructor initialization. 31namespace llvm { 32void initializeX86TTIPass(PassRegistry &); 33} 34 35namespace { 36 37class X86TTI final : public ImmutablePass, public TargetTransformInfo { 38 const X86Subtarget *ST; 39 const X86TargetLowering *TLI; 40 41 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 42 /// are set if the result needs to be inserted and/or extracted from vectors. 43 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 44 45public: 46 X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) { 47 llvm_unreachable("This pass cannot be directly constructed"); 48 } 49 50 X86TTI(const X86TargetMachine *TM) 51 : ImmutablePass(ID), ST(TM->getSubtargetImpl()), 52 TLI(TM->getTargetLowering()) { 53 initializeX86TTIPass(*PassRegistry::getPassRegistry()); 54 } 55 56 void initializePass() override { 57 pushTTIStack(this); 58 } 59 60 void getAnalysisUsage(AnalysisUsage &AU) const override { 61 TargetTransformInfo::getAnalysisUsage(AU); 62 } 63 64 /// Pass identification. 65 static char ID; 66 67 /// Provide necessary pointer adjustments for the two base classes. 68 void *getAdjustedAnalysisPointer(const void *ID) override { 69 if (ID == &TargetTransformInfo::ID) 70 return (TargetTransformInfo*)this; 71 return this; 72 } 73 74 /// \name Scalar TTI Implementations 75 /// @{ 76 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override; 77 78 /// @} 79 80 /// \name Vector TTI Implementations 81 /// @{ 82 83 unsigned getNumberOfRegisters(bool Vector) const override; 84 unsigned getRegisterBitWidth(bool Vector) const override; 85 unsigned getMaximumUnrollFactor() const override; 86 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, 87 OperandValueKind) const override; 88 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 89 int Index, Type *SubTp) const override; 90 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 91 Type *Src) const override; 92 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 93 Type *CondTy) const override; 94 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 95 unsigned Index) const override; 96 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 97 unsigned AddressSpace) const override; 98 99 unsigned getAddressComputationCost(Type *PtrTy, 100 bool IsComplex) const override; 101 102 unsigned getReductionCost(unsigned Opcode, Type *Ty, 103 bool IsPairwiseForm) const override; 104 105 unsigned getIntImmCost(int64_t) const; 106 107 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override; 108 109 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 110 Type *Ty) const override; 111 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 112 Type *Ty) const override; 113 114 /// @} 115}; 116 117} // end anonymous namespace 118 119INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti", 120 "X86 Target Transform Info", true, true, false) 121char X86TTI::ID = 0; 122 123ImmutablePass * 124llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) { 125 return new X86TTI(TM); 126} 127 128 129//===----------------------------------------------------------------------===// 130// 131// X86 cost model. 132// 133//===----------------------------------------------------------------------===// 134 135X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const { 136 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 137 // TODO: Currently the __builtin_popcount() implementation using SSE3 138 // instructions is inefficient. Once the problem is fixed, we should 139 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 140 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software; 141} 142 143unsigned X86TTI::getNumberOfRegisters(bool Vector) const { 144 if (Vector && !ST->hasSSE1()) 145 return 0; 146 147 if (ST->is64Bit()) { 148 if (Vector && ST->hasAVX512()) 149 return 32; 150 return 16; 151 } 152 return 8; 153} 154 155unsigned X86TTI::getRegisterBitWidth(bool Vector) const { 156 if (Vector) { 157 if (ST->hasAVX512()) return 512; 158 if (ST->hasAVX()) return 256; 159 if (ST->hasSSE1()) return 128; 160 return 0; 161 } 162 163 if (ST->is64Bit()) 164 return 64; 165 return 32; 166 167} 168 169unsigned X86TTI::getMaximumUnrollFactor() const { 170 if (ST->isAtom()) 171 return 1; 172 173 // Sandybridge and Haswell have multiple execution ports and pipelined 174 // vector units. 175 if (ST->hasAVX()) 176 return 4; 177 178 return 2; 179} 180 181unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 182 OperandValueKind Op1Info, 183 OperandValueKind Op2Info) const { 184 // Legalize the type. 185 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 186 187 int ISD = TLI->InstructionOpcodeToISD(Opcode); 188 assert(ISD && "Invalid opcode"); 189 190 static const CostTblEntry<MVT::SimpleValueType> 191 AVX2UniformConstCostTable[] = { 192 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 193 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 194 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 195 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 196 }; 197 198 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 199 ST->hasAVX2()) { 200 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second); 201 if (Idx != -1) 202 return LT.first * AVX2UniformConstCostTable[Idx].Cost; 203 } 204 205 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = { 206 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 207 // customize them to detect the cases where shift amount is a scalar one. 208 { ISD::SHL, MVT::v4i32, 1 }, 209 { ISD::SRL, MVT::v4i32, 1 }, 210 { ISD::SRA, MVT::v4i32, 1 }, 211 { ISD::SHL, MVT::v8i32, 1 }, 212 { ISD::SRL, MVT::v8i32, 1 }, 213 { ISD::SRA, MVT::v8i32, 1 }, 214 { ISD::SHL, MVT::v2i64, 1 }, 215 { ISD::SRL, MVT::v2i64, 1 }, 216 { ISD::SHL, MVT::v4i64, 1 }, 217 { ISD::SRL, MVT::v4i64, 1 }, 218 219 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence. 220 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized. 221 222 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized. 223 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized. 224 225 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. 226 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. 227 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. 228 229 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 230 { ISD::SDIV, MVT::v32i8, 32*20 }, 231 { ISD::SDIV, MVT::v16i16, 16*20 }, 232 { ISD::SDIV, MVT::v8i32, 8*20 }, 233 { ISD::SDIV, MVT::v4i64, 4*20 }, 234 { ISD::UDIV, MVT::v32i8, 32*20 }, 235 { ISD::UDIV, MVT::v16i16, 16*20 }, 236 { ISD::UDIV, MVT::v8i32, 8*20 }, 237 { ISD::UDIV, MVT::v4i64, 4*20 }, 238 }; 239 240 // Look for AVX2 lowering tricks. 241 if (ST->hasAVX2()) { 242 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 243 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 244 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 245 // On AVX2, a packed v16i16 shift left by a constant build_vector 246 // is lowered into a vector multiply (vpmullw). 247 return LT.first; 248 249 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second); 250 if (Idx != -1) 251 return LT.first * AVX2CostTable[Idx].Cost; 252 } 253 254 static const CostTblEntry<MVT::SimpleValueType> 255 SSE2UniformConstCostTable[] = { 256 // We don't correctly identify costs of casts because they are marked as 257 // custom. 258 // Constant splats are cheaper for the following instructions. 259 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 260 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 261 { ISD::SHL, MVT::v4i32, 1 }, // pslld 262 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 263 264 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 265 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 266 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 267 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 268 269 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 270 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 271 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 272 273 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 274 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 275 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 276 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 277 }; 278 279 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 280 ST->hasSSE2()) { 281 // pmuldq sequence. 282 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 283 return LT.first * 15; 284 285 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second); 286 if (Idx != -1) 287 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 288 } 289 290 if (ISD == ISD::SHL && 291 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 292 EVT VT = LT.second; 293 if ((VT == MVT::v8i16 && ST->hasSSE2()) || 294 (VT == MVT::v4i32 && ST->hasSSE41())) 295 // Vector shift left by non uniform constant can be lowered 296 // into vector multiply (pmullw/pmulld). 297 return LT.first; 298 if (VT == MVT::v4i32 && ST->hasSSE2()) 299 // A vector shift left by non uniform constant is converted 300 // into a vector multiply; the new multiply is eventually 301 // lowered into a sequence of shuffles and 2 x pmuludq. 302 ISD = ISD::MUL; 303 } 304 305 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = { 306 // We don't correctly identify costs of casts because they are marked as 307 // custom. 308 // For some cases, where the shift amount is a scalar we would be able 309 // to generate better code. Unfortunately, when this is the case the value 310 // (the splat) will get hoisted out of the loop, thereby making it invisible 311 // to ISel. The cost model must return worst case assumptions because it is 312 // used for vectorization and we don't want to make vectorized code worse 313 // than scalar code. 314 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. 315 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. 316 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 317 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. 318 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized. 319 320 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. 321 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. 322 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. 323 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. 324 325 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. 326 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. 327 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. 328 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. 329 330 // It is not a good idea to vectorize division. We have to scalarize it and 331 // in the process we will often end up having to spilling regular 332 // registers. The overhead of division is going to dominate most kernels 333 // anyways so try hard to prevent vectorization of division - it is 334 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 335 // to hide "20 cycles" for each lane. 336 { ISD::SDIV, MVT::v16i8, 16*20 }, 337 { ISD::SDIV, MVT::v8i16, 8*20 }, 338 { ISD::SDIV, MVT::v4i32, 4*20 }, 339 { ISD::SDIV, MVT::v2i64, 2*20 }, 340 { ISD::UDIV, MVT::v16i8, 16*20 }, 341 { ISD::UDIV, MVT::v8i16, 8*20 }, 342 { ISD::UDIV, MVT::v4i32, 4*20 }, 343 { ISD::UDIV, MVT::v2i64, 2*20 }, 344 }; 345 346 if (ST->hasSSE2()) { 347 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second); 348 if (Idx != -1) 349 return LT.first * SSE2CostTable[Idx].Cost; 350 } 351 352 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = { 353 // We don't have to scalarize unsupported ops. We can issue two half-sized 354 // operations and we only need to extract the upper YMM half. 355 // Two ops + 1 extract + 1 insert = 4. 356 { ISD::MUL, MVT::v16i16, 4 }, 357 { ISD::MUL, MVT::v8i32, 4 }, 358 { ISD::SUB, MVT::v8i32, 4 }, 359 { ISD::ADD, MVT::v8i32, 4 }, 360 { ISD::SUB, MVT::v4i64, 4 }, 361 { ISD::ADD, MVT::v4i64, 4 }, 362 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 363 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 364 // Because we believe v4i64 to be a legal type, we must also include the 365 // split factor of two in the cost table. Therefore, the cost here is 18 366 // instead of 9. 367 { ISD::MUL, MVT::v4i64, 18 }, 368 }; 369 370 // Look for AVX1 lowering tricks. 371 if (ST->hasAVX() && !ST->hasAVX2()) { 372 EVT VT = LT.second; 373 374 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a 375 // sequence of extract + two vector multiply + insert. 376 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) && 377 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) 378 ISD = ISD::MUL; 379 380 int Idx = CostTableLookup(AVX1CostTable, ISD, VT); 381 if (Idx != -1) 382 return LT.first * AVX1CostTable[Idx].Cost; 383 } 384 385 // Custom lowering of vectors. 386 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = { 387 // A v2i64/v4i64 and multiply is custom lowered as a series of long 388 // multiplies(3), shifts(4) and adds(2). 389 { ISD::MUL, MVT::v2i64, 9 }, 390 { ISD::MUL, MVT::v4i64, 9 }, 391 }; 392 int Idx = CostTableLookup(CustomLowered, ISD, LT.second); 393 if (Idx != -1) 394 return LT.first * CustomLowered[Idx].Cost; 395 396 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 397 // 2x pmuludq, 2x shuffle. 398 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 399 !ST->hasSSE41()) 400 return LT.first * 6; 401 402 // Fallback to the default implementation. 403 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, 404 Op2Info); 405} 406 407unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 408 Type *SubTp) const { 409 // We only estimate the cost of reverse and alternate shuffles. 410 if (Kind != SK_Reverse && Kind != SK_Alternate) 411 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 412 413 if (Kind == SK_Reverse) { 414 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 415 unsigned Cost = 1; 416 if (LT.second.getSizeInBits() > 128) 417 Cost = 3; // Extract + insert + copy. 418 419 // Multiple by the number of parts. 420 return Cost * LT.first; 421 } 422 423 if (Kind == SK_Alternate) { 424 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 425 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 426 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 427 428 // The backend knows how to generate a single VEX.256 version of 429 // instruction VPBLENDW if the target supports AVX2. 430 if (ST->hasAVX2() && LT.second == MVT::v16i16) 431 return LT.first; 432 433 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = { 434 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd 435 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd 436 437 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps 438 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps 439 440 // This shuffle is custom lowered into a sequence of: 441 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128 442 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5}, 443 444 // This shuffle is custom lowered into a long sequence of: 445 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128 446 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9} 447 }; 448 449 if (ST->hasAVX()) { 450 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 451 if (Idx != -1) 452 return LT.first * AVXAltShuffleTbl[Idx].Cost; 453 } 454 455 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = { 456 // These are lowered into movsd. 457 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 458 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 459 460 // packed float vectors with four elements are lowered into BLENDI dag 461 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'. 462 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 463 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 464 465 // This shuffle generates a single pshufw. 466 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 467 468 // There is no instruction that matches a v16i8 alternate shuffle. 469 // The backend will expand it into the sequence 'pshufb + pshufb + or'. 470 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} 471 }; 472 473 if (ST->hasSSE41()) { 474 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 475 if (Idx != -1) 476 return LT.first * SSE41AltShuffleTbl[Idx].Cost; 477 } 478 479 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = { 480 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 481 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 482 483 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into 484 // the sequence 'shufps + pshufd' 485 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 486 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 487 488 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or 489 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or 490 }; 491 492 if (ST->hasSSSE3()) { 493 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 494 if (Idx != -1) 495 return LT.first * SSSE3AltShuffleTbl[Idx].Cost; 496 } 497 498 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = { 499 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 500 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 501 502 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd 503 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd 504 505 // This is expanded into a long sequence of four extract + four insert. 506 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw. 507 508 // 8 x (pinsrw + pextrw + and + movb + movzb + or) 509 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48} 510 }; 511 512 // Fall-back (SSE3 and SSE2). 513 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 514 if (Idx != -1) 515 return LT.first * SSEAltShuffleTbl[Idx].Cost; 516 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 517 } 518 519 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 520} 521 522unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { 523 int ISD = TLI->InstructionOpcodeToISD(Opcode); 524 assert(ISD && "Invalid opcode"); 525 526 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src); 527 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst); 528 529 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 530 SSE2ConvTbl[] = { 531 // These are somewhat magic numbers justified by looking at the output of 532 // Intel's IACA, running some kernels and making sure when we take 533 // legalization into account the throughput will be overestimated. 534 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 535 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 536 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 537 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 538 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 539 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 540 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 541 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 542 // There are faster sequences for float conversions. 543 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 544 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 545 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 546 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 547 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 548 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 549 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 550 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 551 }; 552 553 if (ST->hasSSE2() && !ST->hasAVX()) { 554 int Idx = 555 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second); 556 if (Idx != -1) 557 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 558 } 559 560 EVT SrcTy = TLI->getValueType(Src); 561 EVT DstTy = TLI->getValueType(Dst); 562 563 // The function getSimpleVT only handles simple value types. 564 if (!SrcTy.isSimple() || !DstTy.isSimple()) 565 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 566 567 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 568 AVX2ConversionTbl[] = { 569 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 570 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 571 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 572 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 573 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 574 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 575 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 576 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 577 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 578 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 579 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 580 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 581 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 582 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 583 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 584 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 585 586 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 587 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 588 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 589 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 590 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 591 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 592 }; 593 594 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 595 AVXConversionTbl[] = { 596 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 597 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 598 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 599 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 600 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 601 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 602 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 603 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 604 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 605 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 606 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 607 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 608 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 609 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 610 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 611 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 612 613 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 614 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 615 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 616 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 617 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 618 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 619 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 620 621 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 622 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 623 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 624 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 625 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 626 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 627 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 628 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 629 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 630 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 631 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 632 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 633 634 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 635 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 636 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 637 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 638 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 639 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 640 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 641 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 642 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 643 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 644 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 645 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 646 // The generic code to compute the scalar overhead is currently broken. 647 // Workaround this limitation by estimating the scalarization overhead 648 // here. We have roughly 10 instructions per scalar element. 649 // Multiply that by the vector width. 650 // FIXME: remove that when PR19268 is fixed. 651 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 652 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 }, 653 654 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 655 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 656 // This node is expanded into scalarized operations but BasicTTI is overly 657 // optimistic estimating its cost. It computes 3 per element (one 658 // vector-extract, one scalar conversion and one vector-insert). The 659 // problem is that the inserts form a read-modify-write chain so latency 660 // should be factored in too. Inflating the cost per element by 1. 661 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 662 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 663 }; 664 665 if (ST->hasAVX2()) { 666 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 667 DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 668 if (Idx != -1) 669 return AVX2ConversionTbl[Idx].Cost; 670 } 671 672 if (ST->hasAVX()) { 673 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(), 674 SrcTy.getSimpleVT()); 675 if (Idx != -1) 676 return AVXConversionTbl[Idx].Cost; 677 } 678 679 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 680} 681 682unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 683 Type *CondTy) const { 684 // Legalize the type. 685 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 686 687 MVT MTy = LT.second; 688 689 int ISD = TLI->InstructionOpcodeToISD(Opcode); 690 assert(ISD && "Invalid opcode"); 691 692 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = { 693 { ISD::SETCC, MVT::v2f64, 1 }, 694 { ISD::SETCC, MVT::v4f32, 1 }, 695 { ISD::SETCC, MVT::v2i64, 1 }, 696 { ISD::SETCC, MVT::v4i32, 1 }, 697 { ISD::SETCC, MVT::v8i16, 1 }, 698 { ISD::SETCC, MVT::v16i8, 1 }, 699 }; 700 701 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = { 702 { ISD::SETCC, MVT::v4f64, 1 }, 703 { ISD::SETCC, MVT::v8f32, 1 }, 704 // AVX1 does not support 8-wide integer compare. 705 { ISD::SETCC, MVT::v4i64, 4 }, 706 { ISD::SETCC, MVT::v8i32, 4 }, 707 { ISD::SETCC, MVT::v16i16, 4 }, 708 { ISD::SETCC, MVT::v32i8, 4 }, 709 }; 710 711 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = { 712 { ISD::SETCC, MVT::v4i64, 1 }, 713 { ISD::SETCC, MVT::v8i32, 1 }, 714 { ISD::SETCC, MVT::v16i16, 1 }, 715 { ISD::SETCC, MVT::v32i8, 1 }, 716 }; 717 718 if (ST->hasAVX2()) { 719 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy); 720 if (Idx != -1) 721 return LT.first * AVX2CostTbl[Idx].Cost; 722 } 723 724 if (ST->hasAVX()) { 725 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy); 726 if (Idx != -1) 727 return LT.first * AVX1CostTbl[Idx].Cost; 728 } 729 730 if (ST->hasSSE42()) { 731 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy); 732 if (Idx != -1) 733 return LT.first * SSE42CostTbl[Idx].Cost; 734 } 735 736 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); 737} 738 739unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, 740 unsigned Index) const { 741 assert(Val->isVectorTy() && "This must be a vector type"); 742 743 if (Index != -1U) { 744 // Legalize the type. 745 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val); 746 747 // This type is legalized to a scalar type. 748 if (!LT.second.isVector()) 749 return 0; 750 751 // The type may be split. Normalize the index to the new type. 752 unsigned Width = LT.second.getVectorNumElements(); 753 Index = Index % Width; 754 755 // Floating point scalars are already located in index #0. 756 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 757 return 0; 758 } 759 760 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index); 761} 762 763unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, 764 bool Extract) const { 765 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 766 unsigned Cost = 0; 767 768 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 769 if (Insert) 770 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 771 if (Extract) 772 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 773 } 774 775 return Cost; 776} 777 778unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 779 unsigned AddressSpace) const { 780 // Handle non-power-of-two vectors such as <3 x float> 781 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 782 unsigned NumElem = VTy->getVectorNumElements(); 783 784 // Handle a few common cases: 785 // <3 x float> 786 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 787 // Cost = 64 bit store + extract + 32 bit store. 788 return 3; 789 790 // <3 x double> 791 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 792 // Cost = 128 bit store + unpack + 64 bit store. 793 return 3; 794 795 // Assume that all other non-power-of-two numbers are scalarized. 796 if (!isPowerOf2_32(NumElem)) { 797 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, 798 VTy->getScalarType(), 799 Alignment, 800 AddressSpace); 801 unsigned SplitCost = getScalarizationOverhead(Src, 802 Opcode == Instruction::Load, 803 Opcode==Instruction::Store); 804 return NumElem * Cost + SplitCost; 805 } 806 } 807 808 // Legalize the type. 809 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); 810 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 811 "Invalid Opcode"); 812 813 // Each load/store unit costs 1. 814 unsigned Cost = LT.first * 1; 815 816 // On Sandybridge 256bit load/stores are double pumped 817 // (but not on Haswell). 818 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 819 Cost*=2; 820 821 return Cost; 822} 823 824unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 825 // Address computations in vectorized code with non-consecutive addresses will 826 // likely result in more instructions compared to scalar code where the 827 // computation can more often be merged into the index mode. The resulting 828 // extra micro-ops can significantly decrease throughput. 829 unsigned NumVectorInstToHideOverhead = 10; 830 831 if (Ty->isVectorTy() && IsComplex) 832 return NumVectorInstToHideOverhead; 833 834 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex); 835} 836 837unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy, 838 bool IsPairwise) const { 839 840 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 841 842 MVT MTy = LT.second; 843 844 int ISD = TLI->InstructionOpcodeToISD(Opcode); 845 assert(ISD && "Invalid opcode"); 846 847 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 848 // and make it as the cost. 849 850 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = { 851 { ISD::FADD, MVT::v2f64, 2 }, 852 { ISD::FADD, MVT::v4f32, 4 }, 853 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 854 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 855 { ISD::ADD, MVT::v8i16, 5 }, 856 }; 857 858 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = { 859 { ISD::FADD, MVT::v4f32, 4 }, 860 { ISD::FADD, MVT::v4f64, 5 }, 861 { ISD::FADD, MVT::v8f32, 7 }, 862 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 863 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 864 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 865 { ISD::ADD, MVT::v8i16, 5 }, 866 { ISD::ADD, MVT::v8i32, 5 }, 867 }; 868 869 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = { 870 { ISD::FADD, MVT::v2f64, 2 }, 871 { ISD::FADD, MVT::v4f32, 4 }, 872 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 873 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 874 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 875 }; 876 877 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = { 878 { ISD::FADD, MVT::v4f32, 3 }, 879 { ISD::FADD, MVT::v4f64, 3 }, 880 { ISD::FADD, MVT::v8f32, 4 }, 881 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 882 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 883 { ISD::ADD, MVT::v4i64, 3 }, 884 { ISD::ADD, MVT::v8i16, 4 }, 885 { ISD::ADD, MVT::v8i32, 5 }, 886 }; 887 888 if (IsPairwise) { 889 if (ST->hasAVX()) { 890 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy); 891 if (Idx != -1) 892 return LT.first * AVX1CostTblPairWise[Idx].Cost; 893 } 894 895 if (ST->hasSSE42()) { 896 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy); 897 if (Idx != -1) 898 return LT.first * SSE42CostTblPairWise[Idx].Cost; 899 } 900 } else { 901 if (ST->hasAVX()) { 902 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy); 903 if (Idx != -1) 904 return LT.first * AVX1CostTblNoPairWise[Idx].Cost; 905 } 906 907 if (ST->hasSSE42()) { 908 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy); 909 if (Idx != -1) 910 return LT.first * SSE42CostTblNoPairWise[Idx].Cost; 911 } 912 } 913 914 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise); 915} 916 917/// \brief Calculate the cost of materializing a 64-bit value. This helper 918/// method might only calculate a fraction of a larger immediate. Therefore it 919/// is valid to return a cost of ZERO. 920unsigned X86TTI::getIntImmCost(int64_t Val) const { 921 if (Val == 0) 922 return TCC_Free; 923 924 if (isInt<32>(Val)) 925 return TCC_Basic; 926 927 return 2 * TCC_Basic; 928} 929 930unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const { 931 assert(Ty->isIntegerTy()); 932 933 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 934 if (BitSize == 0) 935 return ~0U; 936 937 // Never hoist constants larger than 128bit, because this might lead to 938 // incorrect code generation or assertions in codegen. 939 // Fixme: Create a cost model for types larger than i128 once the codegen 940 // issues have been fixed. 941 if (BitSize > 128) 942 return TCC_Free; 943 944 if (Imm == 0) 945 return TCC_Free; 946 947 // Sign-extend all constants to a multiple of 64-bit. 948 APInt ImmVal = Imm; 949 if (BitSize & 0x3f) 950 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 951 952 // Split the constant into 64-bit chunks and calculate the cost for each 953 // chunk. 954 unsigned Cost = 0; 955 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 956 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 957 int64_t Val = Tmp.getSExtValue(); 958 Cost += getIntImmCost(Val); 959 } 960 // We need at least one instruction to materialze the constant. 961 return std::max(1U, Cost); 962} 963 964unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 965 Type *Ty) const { 966 assert(Ty->isIntegerTy()); 967 968 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 969 // There is no cost model for constants with a bit size of 0. Return TCC_Free 970 // here, so that constant hoisting will ignore this constant. 971 if (BitSize == 0) 972 return TCC_Free; 973 974 unsigned ImmIdx = ~0U; 975 switch (Opcode) { 976 default: return TCC_Free; 977 case Instruction::GetElementPtr: 978 // Always hoist the base address of a GetElementPtr. This prevents the 979 // creation of new constants for every base constant that gets constant 980 // folded with the offset. 981 if (Idx == 0) 982 return 2 * TCC_Basic; 983 return TCC_Free; 984 case Instruction::Store: 985 ImmIdx = 0; 986 break; 987 case Instruction::Add: 988 case Instruction::Sub: 989 case Instruction::Mul: 990 case Instruction::UDiv: 991 case Instruction::SDiv: 992 case Instruction::URem: 993 case Instruction::SRem: 994 case Instruction::And: 995 case Instruction::Or: 996 case Instruction::Xor: 997 case Instruction::ICmp: 998 ImmIdx = 1; 999 break; 1000 // Always return TCC_Free for the shift value of a shift instruction. 1001 case Instruction::Shl: 1002 case Instruction::LShr: 1003 case Instruction::AShr: 1004 if (Idx == 1) 1005 return TCC_Free; 1006 break; 1007 case Instruction::Trunc: 1008 case Instruction::ZExt: 1009 case Instruction::SExt: 1010 case Instruction::IntToPtr: 1011 case Instruction::PtrToInt: 1012 case Instruction::BitCast: 1013 case Instruction::PHI: 1014 case Instruction::Call: 1015 case Instruction::Select: 1016 case Instruction::Ret: 1017 case Instruction::Load: 1018 break; 1019 } 1020 1021 if (Idx == ImmIdx) { 1022 unsigned NumConstants = (BitSize + 63) / 64; 1023 unsigned Cost = X86TTI::getIntImmCost(Imm, Ty); 1024 return (Cost <= NumConstants * TCC_Basic) 1025 ? static_cast<unsigned>(TCC_Free) 1026 : Cost; 1027 } 1028 1029 return X86TTI::getIntImmCost(Imm, Ty); 1030} 1031 1032unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx, 1033 const APInt &Imm, Type *Ty) const { 1034 assert(Ty->isIntegerTy()); 1035 1036 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1037 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1038 // here, so that constant hoisting will ignore this constant. 1039 if (BitSize == 0) 1040 return TCC_Free; 1041 1042 switch (IID) { 1043 default: return TCC_Free; 1044 case Intrinsic::sadd_with_overflow: 1045 case Intrinsic::uadd_with_overflow: 1046 case Intrinsic::ssub_with_overflow: 1047 case Intrinsic::usub_with_overflow: 1048 case Intrinsic::smul_with_overflow: 1049 case Intrinsic::umul_with_overflow: 1050 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1051 return TCC_Free; 1052 break; 1053 case Intrinsic::experimental_stackmap: 1054 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1055 return TCC_Free; 1056 break; 1057 case Intrinsic::experimental_patchpoint_void: 1058 case Intrinsic::experimental_patchpoint_i64: 1059 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1060 return TCC_Free; 1061 break; 1062 } 1063 return X86TTI::getIntImmCost(Imm, Ty); 1064} 1065