X86TargetTransformInfo.cpp revision fc6434a73d053c3e1d9c79034a267ae1434483ad
1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// \file 10/// This file implements a TargetTransformInfo analysis pass specific to the 11/// X86 target machine. It uses the target's detailed information to provide 12/// more precise answers to certain TTI queries, while letting the target 13/// independent and default TTI implementations handle the rest. 14/// 15//===----------------------------------------------------------------------===// 16 17#define DEBUG_TYPE "x86tti" 18#include "X86.h" 19#include "X86TargetMachine.h" 20#include "llvm/Analysis/TargetTransformInfo.h" 21#include "llvm/Support/Debug.h" 22#include "llvm/Target/TargetLowering.h" 23#include "llvm/Target/CostTable.h" 24using namespace llvm; 25 26// Declare the pass initialization routine locally as target-specific passes 27// don't havve a target-wide initialization entry point, and so we rely on the 28// pass constructor initialization. 29namespace llvm { 30void initializeX86TTIPass(PassRegistry &); 31} 32 33namespace { 34 35class X86TTI : public ImmutablePass, public TargetTransformInfo { 36 const X86Subtarget *ST; 37 const X86TargetLowering *TLI; 38 39 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 40 /// are set if the result needs to be inserted and/or extracted from vectors. 41 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 42 43public: 44 X86TTI() : ImmutablePass(ID), ST(0), TLI(0) { 45 llvm_unreachable("This pass cannot be directly constructed"); 46 } 47 48 X86TTI(const X86TargetMachine *TM) 49 : ImmutablePass(ID), ST(TM->getSubtargetImpl()), 50 TLI(TM->getTargetLowering()) { 51 initializeX86TTIPass(*PassRegistry::getPassRegistry()); 52 } 53 54 virtual void initializePass() { 55 pushTTIStack(this); 56 } 57 58 virtual void finalizePass() { 59 popTTIStack(); 60 } 61 62 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 63 TargetTransformInfo::getAnalysisUsage(AU); 64 } 65 66 /// Pass identification. 67 static char ID; 68 69 /// Provide necessary pointer adjustments for the two base classes. 70 virtual void *getAdjustedAnalysisPointer(const void *ID) { 71 if (ID == &TargetTransformInfo::ID) 72 return (TargetTransformInfo*)this; 73 return this; 74 } 75 76 /// \name Scalar TTI Implementations 77 /// @{ 78 virtual PopcntSupportKind getPopcntSupport(unsigned TyWidth) const; 79 80 /// @} 81 82 /// \name Vector TTI Implementations 83 /// @{ 84 85 virtual unsigned getNumberOfRegisters(bool Vector) const; 86 virtual unsigned getRegisterBitWidth(bool Vector) const; 87 virtual unsigned getMaximumUnrollFactor() const; 88 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 89 OperandValueKind, 90 OperandValueKind) const; 91 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 92 int Index, Type *SubTp) const; 93 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 94 Type *Src) const; 95 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 96 Type *CondTy) const; 97 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 98 unsigned Index) const; 99 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src, 100 unsigned Alignment, 101 unsigned AddressSpace) const; 102 103 virtual unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex) const; 104 105 /// @} 106}; 107 108} // end anonymous namespace 109 110INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti", 111 "X86 Target Transform Info", true, true, false) 112char X86TTI::ID = 0; 113 114ImmutablePass * 115llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) { 116 return new X86TTI(TM); 117} 118 119 120//===----------------------------------------------------------------------===// 121// 122// X86 cost model. 123// 124//===----------------------------------------------------------------------===// 125 126X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const { 127 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 128 // TODO: Currently the __builtin_popcount() implementation using SSE3 129 // instructions is inefficient. Once the problem is fixed, we should 130 // call ST->hasSSE3() instead of ST->hasSSE4(). 131 return ST->hasSSE41() ? PSK_FastHardware : PSK_Software; 132} 133 134unsigned X86TTI::getNumberOfRegisters(bool Vector) const { 135 if (Vector && !ST->hasSSE1()) 136 return 0; 137 138 if (ST->is64Bit()) 139 return 16; 140 return 8; 141} 142 143unsigned X86TTI::getRegisterBitWidth(bool Vector) const { 144 if (Vector) { 145 if (ST->hasAVX()) return 256; 146 if (ST->hasSSE1()) return 128; 147 return 0; 148 } 149 150 if (ST->is64Bit()) 151 return 64; 152 return 32; 153 154} 155 156unsigned X86TTI::getMaximumUnrollFactor() const { 157 if (ST->isAtom()) 158 return 1; 159 160 // Sandybridge and Haswell have multiple execution ports and pipelined 161 // vector units. 162 if (ST->hasAVX()) 163 return 4; 164 165 return 2; 166} 167 168unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 169 OperandValueKind Op1Info, 170 OperandValueKind Op2Info) const { 171 // Legalize the type. 172 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 173 174 int ISD = TLI->InstructionOpcodeToISD(Opcode); 175 assert(ISD && "Invalid opcode"); 176 177 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = { 178 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 179 // customize them to detect the cases where shift amount is a scalar one. 180 { ISD::SHL, MVT::v4i32, 1 }, 181 { ISD::SRL, MVT::v4i32, 1 }, 182 { ISD::SRA, MVT::v4i32, 1 }, 183 { ISD::SHL, MVT::v8i32, 1 }, 184 { ISD::SRL, MVT::v8i32, 1 }, 185 { ISD::SRA, MVT::v8i32, 1 }, 186 { ISD::SHL, MVT::v2i64, 1 }, 187 { ISD::SRL, MVT::v2i64, 1 }, 188 { ISD::SHL, MVT::v4i64, 1 }, 189 { ISD::SRL, MVT::v4i64, 1 }, 190 191 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence. 192 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized. 193 194 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized. 195 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized. 196 197 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. 198 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. 199 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. 200 201 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 202 { ISD::SDIV, MVT::v32i8, 32*20 }, 203 { ISD::SDIV, MVT::v16i16, 16*20 }, 204 { ISD::SDIV, MVT::v8i32, 8*20 }, 205 { ISD::SDIV, MVT::v4i64, 4*20 }, 206 { ISD::UDIV, MVT::v32i8, 32*20 }, 207 { ISD::UDIV, MVT::v16i16, 16*20 }, 208 { ISD::UDIV, MVT::v8i32, 8*20 }, 209 { ISD::UDIV, MVT::v4i64, 4*20 }, 210 }; 211 212 // Look for AVX2 lowering tricks. 213 if (ST->hasAVX2()) { 214 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second); 215 if (Idx != -1) 216 return LT.first * AVX2CostTable[Idx].Cost; 217 } 218 219 static const CostTblEntry<MVT::SimpleValueType> 220 SSE2UniformConstCostTable[] = { 221 // We don't correctly identify costs of casts because they are marked as 222 // custom. 223 // Constant splats are cheaper for the following instructions. 224 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 225 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 226 { ISD::SHL, MVT::v4i32, 1 }, // pslld 227 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 228 229 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 230 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 231 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 232 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 233 234 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 235 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 236 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 237 }; 238 239 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 240 ST->hasSSE2()) { 241 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second); 242 if (Idx != -1) 243 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 244 } 245 246 247 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = { 248 // We don't correctly identify costs of casts because they are marked as 249 // custom. 250 // For some cases, where the shift amount is a scalar we would be able 251 // to generate better code. Unfortunately, when this is the case the value 252 // (the splat) will get hoisted out of the loop, thereby making it invisible 253 // to ISel. The cost model must return worst case assumptions because it is 254 // used for vectorization and we don't want to make vectorized code worse 255 // than scalar code. 256 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. 257 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. 258 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 259 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. 260 261 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. 262 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. 263 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. 264 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. 265 266 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. 267 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. 268 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. 269 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. 270 271 // It is not a good idea to vectorize division. We have to scalarize it and 272 // in the process we will often end up having to spilling regular 273 // registers. The overhead of division is going to dominate most kernels 274 // anyways so try hard to prevent vectorization of division - it is 275 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 276 // to hide "20 cycles" for each lane. 277 { ISD::SDIV, MVT::v16i8, 16*20 }, 278 { ISD::SDIV, MVT::v8i16, 8*20 }, 279 { ISD::SDIV, MVT::v4i32, 4*20 }, 280 { ISD::SDIV, MVT::v2i64, 2*20 }, 281 { ISD::UDIV, MVT::v16i8, 16*20 }, 282 { ISD::UDIV, MVT::v8i16, 8*20 }, 283 { ISD::UDIV, MVT::v4i32, 4*20 }, 284 { ISD::UDIV, MVT::v2i64, 2*20 }, 285 }; 286 287 if (ST->hasSSE2()) { 288 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second); 289 if (Idx != -1) 290 return LT.first * SSE2CostTable[Idx].Cost; 291 } 292 293 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = { 294 // We don't have to scalarize unsupported ops. We can issue two half-sized 295 // operations and we only need to extract the upper YMM half. 296 // Two ops + 1 extract + 1 insert = 4. 297 { ISD::MUL, MVT::v8i32, 4 }, 298 { ISD::SUB, MVT::v8i32, 4 }, 299 { ISD::ADD, MVT::v8i32, 4 }, 300 { ISD::SUB, MVT::v4i64, 4 }, 301 { ISD::ADD, MVT::v4i64, 4 }, 302 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 303 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 304 // Because we believe v4i64 to be a legal type, we must also include the 305 // split factor of two in the cost table. Therefore, the cost here is 18 306 // instead of 9. 307 { ISD::MUL, MVT::v4i64, 18 }, 308 }; 309 310 // Look for AVX1 lowering tricks. 311 if (ST->hasAVX() && !ST->hasAVX2()) { 312 int Idx = CostTableLookup(AVX1CostTable, ISD, LT.second); 313 if (Idx != -1) 314 return LT.first * AVX1CostTable[Idx].Cost; 315 } 316 317 // Custom lowering of vectors. 318 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = { 319 // A v2i64/v4i64 and multiply is custom lowered as a series of long 320 // multiplies(3), shifts(4) and adds(2). 321 { ISD::MUL, MVT::v2i64, 9 }, 322 { ISD::MUL, MVT::v4i64, 9 }, 323 }; 324 int Idx = CostTableLookup(CustomLowered, ISD, LT.second); 325 if (Idx != -1) 326 return LT.first * CustomLowered[Idx].Cost; 327 328 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 329 // 2x pmuludq, 2x shuffle. 330 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 331 !ST->hasSSE41()) 332 return 6; 333 334 // Fallback to the default implementation. 335 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, 336 Op2Info); 337} 338 339unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 340 Type *SubTp) const { 341 // We only estimate the cost of reverse shuffles. 342 if (Kind != SK_Reverse) 343 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 344 345 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 346 unsigned Cost = 1; 347 if (LT.second.getSizeInBits() > 128) 348 Cost = 3; // Extract + insert + copy. 349 350 // Multiple by the number of parts. 351 return Cost * LT.first; 352} 353 354unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { 355 int ISD = TLI->InstructionOpcodeToISD(Opcode); 356 assert(ISD && "Invalid opcode"); 357 358 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src); 359 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst); 360 361 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 362 SSE2ConvTbl[] = { 363 // These are somewhat magic numbers justified by looking at the output of 364 // Intel's IACA, running some kernels and making sure when we take 365 // legalization into account the throughput will be overestimated. 366 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 367 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 368 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 369 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 370 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 371 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 372 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 373 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 374 // There are faster sequences for float conversions. 375 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 376 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 377 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 378 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 379 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 380 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 381 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 382 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 383 }; 384 385 if (ST->hasSSE2() && !ST->hasAVX()) { 386 int Idx = 387 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second); 388 if (Idx != -1) 389 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 390 } 391 392 EVT SrcTy = TLI->getValueType(Src); 393 EVT DstTy = TLI->getValueType(Dst); 394 395 // The function getSimpleVT only handles simple value types. 396 if (!SrcTy.isSimple() || !DstTy.isSimple()) 397 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 398 399 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 400 AVXConversionTbl[] = { 401 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 402 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 403 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 404 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 405 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, 406 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 }, 407 408 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 409 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 410 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 411 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 412 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 413 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 414 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 415 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 416 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 417 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 418 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 419 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 420 421 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 422 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 423 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 424 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 425 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 426 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 427 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 428 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 429 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 430 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 431 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 432 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 433 434 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 }, 435 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 436 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 }, 437 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 }, 438 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 8 }, 439 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 440 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 441 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, 442 }; 443 444 if (ST->hasAVX()) { 445 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(), 446 SrcTy.getSimpleVT()); 447 if (Idx != -1) 448 return AVXConversionTbl[Idx].Cost; 449 } 450 451 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 452} 453 454unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 455 Type *CondTy) const { 456 // Legalize the type. 457 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 458 459 MVT MTy = LT.second; 460 461 int ISD = TLI->InstructionOpcodeToISD(Opcode); 462 assert(ISD && "Invalid opcode"); 463 464 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = { 465 { ISD::SETCC, MVT::v2f64, 1 }, 466 { ISD::SETCC, MVT::v4f32, 1 }, 467 { ISD::SETCC, MVT::v2i64, 1 }, 468 { ISD::SETCC, MVT::v4i32, 1 }, 469 { ISD::SETCC, MVT::v8i16, 1 }, 470 { ISD::SETCC, MVT::v16i8, 1 }, 471 }; 472 473 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = { 474 { ISD::SETCC, MVT::v4f64, 1 }, 475 { ISD::SETCC, MVT::v8f32, 1 }, 476 // AVX1 does not support 8-wide integer compare. 477 { ISD::SETCC, MVT::v4i64, 4 }, 478 { ISD::SETCC, MVT::v8i32, 4 }, 479 { ISD::SETCC, MVT::v16i16, 4 }, 480 { ISD::SETCC, MVT::v32i8, 4 }, 481 }; 482 483 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = { 484 { ISD::SETCC, MVT::v4i64, 1 }, 485 { ISD::SETCC, MVT::v8i32, 1 }, 486 { ISD::SETCC, MVT::v16i16, 1 }, 487 { ISD::SETCC, MVT::v32i8, 1 }, 488 }; 489 490 if (ST->hasAVX2()) { 491 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy); 492 if (Idx != -1) 493 return LT.first * AVX2CostTbl[Idx].Cost; 494 } 495 496 if (ST->hasAVX()) { 497 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy); 498 if (Idx != -1) 499 return LT.first * AVX1CostTbl[Idx].Cost; 500 } 501 502 if (ST->hasSSE42()) { 503 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy); 504 if (Idx != -1) 505 return LT.first * SSE42CostTbl[Idx].Cost; 506 } 507 508 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); 509} 510 511unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, 512 unsigned Index) const { 513 assert(Val->isVectorTy() && "This must be a vector type"); 514 515 if (Index != -1U) { 516 // Legalize the type. 517 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val); 518 519 // This type is legalized to a scalar type. 520 if (!LT.second.isVector()) 521 return 0; 522 523 // The type may be split. Normalize the index to the new type. 524 unsigned Width = LT.second.getVectorNumElements(); 525 Index = Index % Width; 526 527 // Floating point scalars are already located in index #0. 528 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 529 return 0; 530 } 531 532 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index); 533} 534 535unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, 536 bool Extract) const { 537 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 538 unsigned Cost = 0; 539 540 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 541 if (Insert) 542 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 543 if (Extract) 544 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 545 } 546 547 return Cost; 548} 549 550unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 551 unsigned AddressSpace) const { 552 // Handle non power of two vectors such as <3 x float> 553 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 554 unsigned NumElem = VTy->getVectorNumElements(); 555 556 // Handle a few common cases: 557 // <3 x float> 558 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 559 // Cost = 64 bit store + extract + 32 bit store. 560 return 3; 561 562 // <3 x double> 563 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 564 // Cost = 128 bit store + unpack + 64 bit store. 565 return 3; 566 567 // Assume that all other non power-of-two numbers are scalarized. 568 if (!isPowerOf2_32(NumElem)) { 569 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, 570 VTy->getScalarType(), 571 Alignment, 572 AddressSpace); 573 unsigned SplitCost = getScalarizationOverhead(Src, 574 Opcode == Instruction::Load, 575 Opcode==Instruction::Store); 576 return NumElem * Cost + SplitCost; 577 } 578 } 579 580 // Legalize the type. 581 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); 582 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 583 "Invalid Opcode"); 584 585 // Each load/store unit costs 1. 586 unsigned Cost = LT.first * 1; 587 588 // On Sandybridge 256bit load/stores are double pumped 589 // (but not on Haswell). 590 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 591 Cost*=2; 592 593 return Cost; 594} 595 596unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 597 // Address computations in vectorized code with non-consecutive addresses will 598 // likely result in more instructions compared to scalar code where the 599 // computation can more often be merged into the index mode. The resulting 600 // extra micro-ops can significantly decrease throughput. 601 unsigned NumVectorInstToHideOverhead = 10; 602 603 if (Ty->isVectorTy() && IsComplex) 604 return NumVectorInstToHideOverhead; 605 606 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex); 607} 608