X86TargetTransformInfo.cpp revision e6dc376eece3e48d7316b788846dac90181d2ffe
1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// \file 10/// This file implements a TargetTransformInfo analysis pass specific to the 11/// X86 target machine. It uses the target's detailed information to provide 12/// more precise answers to certain TTI queries, while letting the target 13/// independent and default TTI implementations handle the rest. 14/// 15//===----------------------------------------------------------------------===// 16 17#define DEBUG_TYPE "x86tti" 18#include "X86.h" 19#include "X86TargetMachine.h" 20#include "llvm/Analysis/TargetTransformInfo.h" 21#include "llvm/Support/Debug.h" 22#include "llvm/Target/TargetLowering.h" 23#include "llvm/Target/CostTable.h" 24using namespace llvm; 25 26// Declare the pass initialization routine locally as target-specific passes 27// don't havve a target-wide initialization entry point, and so we rely on the 28// pass constructor initialization. 29namespace llvm { 30void initializeX86TTIPass(PassRegistry &); 31} 32 33namespace { 34 35class X86TTI : public ImmutablePass, public TargetTransformInfo { 36 const X86Subtarget *ST; 37 const X86TargetLowering *TLI; 38 39 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 40 /// are set if the result needs to be inserted and/or extracted from vectors. 41 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 42 43public: 44 X86TTI() : ImmutablePass(ID), ST(0), TLI(0) { 45 llvm_unreachable("This pass cannot be directly constructed"); 46 } 47 48 X86TTI(const X86TargetMachine *TM) 49 : ImmutablePass(ID), ST(TM->getSubtargetImpl()), 50 TLI(TM->getTargetLowering()) { 51 initializeX86TTIPass(*PassRegistry::getPassRegistry()); 52 } 53 54 virtual void initializePass() { 55 pushTTIStack(this); 56 } 57 58 virtual void finalizePass() { 59 popTTIStack(); 60 } 61 62 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 63 TargetTransformInfo::getAnalysisUsage(AU); 64 } 65 66 /// Pass identification. 67 static char ID; 68 69 /// Provide necessary pointer adjustments for the two base classes. 70 virtual void *getAdjustedAnalysisPointer(const void *ID) { 71 if (ID == &TargetTransformInfo::ID) 72 return (TargetTransformInfo*)this; 73 return this; 74 } 75 76 /// \name Scalar TTI Implementations 77 /// @{ 78 virtual PopcntSupportKind getPopcntSupport(unsigned TyWidth) const; 79 80 /// @} 81 82 /// \name Vector TTI Implementations 83 /// @{ 84 85 virtual unsigned getNumberOfRegisters(bool Vector) const; 86 virtual unsigned getRegisterBitWidth(bool Vector) const; 87 virtual unsigned getMaximumUnrollFactor() const; 88 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 89 OperandValueKind, 90 OperandValueKind) const; 91 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 92 int Index, Type *SubTp) const; 93 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 94 Type *Src) const; 95 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 96 Type *CondTy) const; 97 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 98 unsigned Index) const; 99 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src, 100 unsigned Alignment, 101 unsigned AddressSpace) const; 102 103 /// @} 104}; 105 106} // end anonymous namespace 107 108INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti", 109 "X86 Target Transform Info", true, true, false) 110char X86TTI::ID = 0; 111 112ImmutablePass * 113llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) { 114 return new X86TTI(TM); 115} 116 117 118//===----------------------------------------------------------------------===// 119// 120// X86 cost model. 121// 122//===----------------------------------------------------------------------===// 123 124X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const { 125 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 126 // TODO: Currently the __builtin_popcount() implementation using SSE3 127 // instructions is inefficient. Once the problem is fixed, we should 128 // call ST->hasSSE3() instead of ST->hasSSE4(). 129 return ST->hasSSE41() ? PSK_FastHardware : PSK_Software; 130} 131 132unsigned X86TTI::getNumberOfRegisters(bool Vector) const { 133 if (Vector && !ST->hasSSE1()) 134 return 0; 135 136 if (ST->is64Bit()) 137 return 16; 138 return 8; 139} 140 141unsigned X86TTI::getRegisterBitWidth(bool Vector) const { 142 if (Vector) { 143 if (ST->hasAVX()) return 256; 144 if (ST->hasSSE1()) return 128; 145 return 0; 146 } 147 148 if (ST->is64Bit()) 149 return 64; 150 return 32; 151 152} 153 154unsigned X86TTI::getMaximumUnrollFactor() const { 155 if (ST->isAtom()) 156 return 1; 157 158 // Sandybridge and Haswell have multiple execution ports and pipelined 159 // vector units. 160 if (ST->hasAVX()) 161 return 4; 162 163 return 2; 164} 165 166unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 167 OperandValueKind Op1Info, 168 OperandValueKind Op2Info) const { 169 // Legalize the type. 170 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 171 172 int ISD = TLI->InstructionOpcodeToISD(Opcode); 173 assert(ISD && "Invalid opcode"); 174 175 static const CostTblEntry<MVT> AVX2CostTable[] = { 176 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 177 // customize them to detect the cases where shift amount is a scalar one. 178 { ISD::SHL, MVT::v4i32, 1 }, 179 { ISD::SRL, MVT::v4i32, 1 }, 180 { ISD::SRA, MVT::v4i32, 1 }, 181 { ISD::SHL, MVT::v8i32, 1 }, 182 { ISD::SRL, MVT::v8i32, 1 }, 183 { ISD::SRA, MVT::v8i32, 1 }, 184 { ISD::SHL, MVT::v2i64, 1 }, 185 { ISD::SRL, MVT::v2i64, 1 }, 186 { ISD::SHL, MVT::v4i64, 1 }, 187 { ISD::SRL, MVT::v4i64, 1 }, 188 189 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence. 190 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized. 191 192 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized. 193 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized. 194 195 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. 196 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. 197 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. 198 199 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 200 { ISD::SDIV, MVT::v32i8, 32*20 }, 201 { ISD::SDIV, MVT::v16i16, 16*20 }, 202 { ISD::SDIV, MVT::v8i32, 8*20 }, 203 { ISD::SDIV, MVT::v4i64, 4*20 }, 204 { ISD::UDIV, MVT::v32i8, 32*20 }, 205 { ISD::UDIV, MVT::v16i16, 16*20 }, 206 { ISD::UDIV, MVT::v8i32, 8*20 }, 207 { ISD::UDIV, MVT::v4i64, 4*20 }, 208 }; 209 210 // Look for AVX2 lowering tricks. 211 if (ST->hasAVX2()) { 212 int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable), 213 ISD, LT.second); 214 if (Idx != -1) 215 return LT.first * AVX2CostTable[Idx].Cost; 216 } 217 218 static const CostTblEntry<MVT> SSE2UniformConstCostTable[] = { 219 // We don't correctly identify costs of casts because they are marked as 220 // custom. 221 // Constant splats are cheaper for the following instructions. 222 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 223 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 224 { ISD::SHL, MVT::v4i32, 1 }, // pslld 225 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 226 227 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 228 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 229 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 230 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 231 232 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 233 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 234 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 235 }; 236 237 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 238 ST->hasSSE2()) { 239 int Idx = CostTableLookup<MVT>(SSE2UniformConstCostTable, 240 array_lengthof(SSE2UniformConstCostTable), 241 ISD, LT.second); 242 if (Idx != -1) 243 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 244 } 245 246 247 static const CostTblEntry<MVT> SSE2CostTable[] = { 248 // We don't correctly identify costs of casts because they are marked as 249 // custom. 250 // For some cases, where the shift amount is a scalar we would be able 251 // to generate better code. Unfortunately, when this is the case the value 252 // (the splat) will get hoisted out of the loop, thereby making it invisible 253 // to ISel. The cost model must return worst case assumptions because it is 254 // used for vectorization and we don't want to make vectorized code worse 255 // than scalar code. 256 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. 257 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. 258 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 259 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. 260 261 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. 262 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. 263 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. 264 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. 265 266 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. 267 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. 268 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. 269 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. 270 271 // It is not a good idea to vectorize division. We have to scalarize it and 272 // in the process we will often end up having to spilling regular 273 // registers. The overhead of division is going to dominate most kernels 274 // anyways so try hard to prevent vectorization of division - it is 275 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 276 // to hide "20 cycles" for each lane. 277 { ISD::SDIV, MVT::v16i8, 16*20 }, 278 { ISD::SDIV, MVT::v8i16, 8*20 }, 279 { ISD::SDIV, MVT::v4i32, 4*20 }, 280 { ISD::SDIV, MVT::v2i64, 2*20 }, 281 { ISD::UDIV, MVT::v16i8, 16*20 }, 282 { ISD::UDIV, MVT::v8i16, 8*20 }, 283 { ISD::UDIV, MVT::v4i32, 4*20 }, 284 { ISD::UDIV, MVT::v2i64, 2*20 }, 285 }; 286 287 if (ST->hasSSE2()) { 288 int Idx = CostTableLookup<MVT>(SSE2CostTable, array_lengthof(SSE2CostTable), 289 ISD, LT.second); 290 if (Idx != -1) 291 return LT.first * SSE2CostTable[Idx].Cost; 292 } 293 294 static const CostTblEntry<MVT> AVX1CostTable[] = { 295 // We don't have to scalarize unsupported ops. We can issue two half-sized 296 // operations and we only need to extract the upper YMM half. 297 // Two ops + 1 extract + 1 insert = 4. 298 { ISD::MUL, MVT::v8i32, 4 }, 299 { ISD::SUB, MVT::v8i32, 4 }, 300 { ISD::ADD, MVT::v8i32, 4 }, 301 { ISD::SUB, MVT::v4i64, 4 }, 302 { ISD::ADD, MVT::v4i64, 4 }, 303 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 304 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 305 // Because we believe v4i64 to be a legal type, we must also include the 306 // split factor of two in the cost table. Therefore, the cost here is 18 307 // instead of 9. 308 { ISD::MUL, MVT::v4i64, 18 }, 309 }; 310 311 // Look for AVX1 lowering tricks. 312 if (ST->hasAVX() && !ST->hasAVX2()) { 313 int Idx = CostTableLookup<MVT>(AVX1CostTable, array_lengthof(AVX1CostTable), 314 ISD, LT.second); 315 if (Idx != -1) 316 return LT.first * AVX1CostTable[Idx].Cost; 317 } 318 319 // Custom lowering of vectors. 320 static const CostTblEntry<MVT> CustomLowered[] = { 321 // A v2i64/v4i64 and multiply is custom lowered as a series of long 322 // multiplies(3), shifts(4) and adds(2). 323 { ISD::MUL, MVT::v2i64, 9 }, 324 { ISD::MUL, MVT::v4i64, 9 }, 325 }; 326 int Idx = CostTableLookup<MVT>(CustomLowered, array_lengthof(CustomLowered), 327 ISD, LT.second); 328 if (Idx != -1) 329 return LT.first * CustomLowered[Idx].Cost; 330 331 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 332 // 2x pmuludq, 2x shuffle. 333 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 334 !ST->hasSSE41()) 335 return 6; 336 337 // Fallback to the default implementation. 338 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, 339 Op2Info); 340} 341 342unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 343 Type *SubTp) const { 344 // We only estimate the cost of reverse shuffles. 345 if (Kind != SK_Reverse) 346 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 347 348 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 349 unsigned Cost = 1; 350 if (LT.second.getSizeInBits() > 128) 351 Cost = 3; // Extract + insert + copy. 352 353 // Multiple by the number of parts. 354 return Cost * LT.first; 355} 356 357unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { 358 int ISD = TLI->InstructionOpcodeToISD(Opcode); 359 assert(ISD && "Invalid opcode"); 360 361 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src); 362 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst); 363 364 static const TypeConversionCostTblEntry<MVT> SSE2ConvTbl[] = { 365 // These are somewhat magic numbers justified by looking at the output of 366 // Intel's IACA, running some kernels and making sure when we take 367 // legalization into account the throughput will be overestimated. 368 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 369 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 370 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 371 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 372 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 373 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 374 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 375 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 376 // There are faster sequences for float conversions. 377 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 378 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 379 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 380 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 381 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 382 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 383 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 384 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 385 }; 386 387 if (ST->hasSSE2() && !ST->hasAVX()) { 388 int Idx = ConvertCostTableLookup<MVT>(SSE2ConvTbl, 389 array_lengthof(SSE2ConvTbl), 390 ISD, LTDest.second, LTSrc.second); 391 if (Idx != -1) 392 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 393 } 394 395 EVT SrcTy = TLI->getValueType(Src); 396 EVT DstTy = TLI->getValueType(Dst); 397 398 // The function getSimpleVT only handles simple value types. 399 if (!SrcTy.isSimple() || !DstTy.isSimple()) 400 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 401 402 static const TypeConversionCostTblEntry<MVT> AVXConversionTbl[] = { 403 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 404 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 405 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 406 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 407 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, 408 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 }, 409 410 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 411 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 412 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 413 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 414 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 415 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 416 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 417 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 418 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 419 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 420 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 421 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 422 423 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 424 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 425 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 426 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 427 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 428 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 429 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 430 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 431 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 432 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 433 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 434 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 435 436 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 }, 437 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 438 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 }, 439 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 }, 440 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 8 }, 441 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 442 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 443 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, 444 }; 445 446 if (ST->hasAVX()) { 447 int Idx = ConvertCostTableLookup<MVT>(AVXConversionTbl, 448 array_lengthof(AVXConversionTbl), 449 ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 450 if (Idx != -1) 451 return AVXConversionTbl[Idx].Cost; 452 } 453 454 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 455} 456 457unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 458 Type *CondTy) const { 459 // Legalize the type. 460 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 461 462 MVT MTy = LT.second; 463 464 int ISD = TLI->InstructionOpcodeToISD(Opcode); 465 assert(ISD && "Invalid opcode"); 466 467 static const CostTblEntry<MVT> SSE42CostTbl[] = { 468 { ISD::SETCC, MVT::v2f64, 1 }, 469 { ISD::SETCC, MVT::v4f32, 1 }, 470 { ISD::SETCC, MVT::v2i64, 1 }, 471 { ISD::SETCC, MVT::v4i32, 1 }, 472 { ISD::SETCC, MVT::v8i16, 1 }, 473 { ISD::SETCC, MVT::v16i8, 1 }, 474 }; 475 476 static const CostTblEntry<MVT> AVX1CostTbl[] = { 477 { ISD::SETCC, MVT::v4f64, 1 }, 478 { ISD::SETCC, MVT::v8f32, 1 }, 479 // AVX1 does not support 8-wide integer compare. 480 { ISD::SETCC, MVT::v4i64, 4 }, 481 { ISD::SETCC, MVT::v8i32, 4 }, 482 { ISD::SETCC, MVT::v16i16, 4 }, 483 { ISD::SETCC, MVT::v32i8, 4 }, 484 }; 485 486 static const CostTblEntry<MVT> AVX2CostTbl[] = { 487 { ISD::SETCC, MVT::v4i64, 1 }, 488 { ISD::SETCC, MVT::v8i32, 1 }, 489 { ISD::SETCC, MVT::v16i16, 1 }, 490 { ISD::SETCC, MVT::v32i8, 1 }, 491 }; 492 493 if (ST->hasAVX2()) { 494 int Idx = CostTableLookup<MVT>(AVX2CostTbl, array_lengthof(AVX2CostTbl), 495 ISD, MTy); 496 if (Idx != -1) 497 return LT.first * AVX2CostTbl[Idx].Cost; 498 } 499 500 if (ST->hasAVX()) { 501 int Idx = CostTableLookup<MVT>(AVX1CostTbl, array_lengthof(AVX1CostTbl), 502 ISD, MTy); 503 if (Idx != -1) 504 return LT.first * AVX1CostTbl[Idx].Cost; 505 } 506 507 if (ST->hasSSE42()) { 508 int Idx = CostTableLookup<MVT>(SSE42CostTbl, array_lengthof(SSE42CostTbl), 509 ISD, MTy); 510 if (Idx != -1) 511 return LT.first * SSE42CostTbl[Idx].Cost; 512 } 513 514 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); 515} 516 517unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, 518 unsigned Index) const { 519 assert(Val->isVectorTy() && "This must be a vector type"); 520 521 if (Index != -1U) { 522 // Legalize the type. 523 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val); 524 525 // This type is legalized to a scalar type. 526 if (!LT.second.isVector()) 527 return 0; 528 529 // The type may be split. Normalize the index to the new type. 530 unsigned Width = LT.second.getVectorNumElements(); 531 Index = Index % Width; 532 533 // Floating point scalars are already located in index #0. 534 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 535 return 0; 536 } 537 538 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index); 539} 540 541unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, 542 bool Extract) const { 543 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 544 unsigned Cost = 0; 545 546 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 547 if (Insert) 548 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 549 if (Extract) 550 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 551 } 552 553 return Cost; 554} 555 556unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 557 unsigned AddressSpace) const { 558 // Handle non power of two vectors such as <3 x float> 559 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 560 unsigned NumElem = VTy->getVectorNumElements(); 561 562 // Handle a few common cases: 563 // <3 x float> 564 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 565 // Cost = 64 bit store + extract + 32 bit store. 566 return 3; 567 568 // <3 x double> 569 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 570 // Cost = 128 bit store + unpack + 64 bit store. 571 return 3; 572 573 // Assume that all other non power-of-two numbers are scalarized. 574 if (!isPowerOf2_32(NumElem)) { 575 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, 576 VTy->getScalarType(), 577 Alignment, 578 AddressSpace); 579 unsigned SplitCost = getScalarizationOverhead(Src, 580 Opcode == Instruction::Load, 581 Opcode==Instruction::Store); 582 return NumElem * Cost + SplitCost; 583 } 584 } 585 586 // Legalize the type. 587 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); 588 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 589 "Invalid Opcode"); 590 591 // Each load/store unit costs 1. 592 unsigned Cost = LT.first * 1; 593 594 // On Sandybridge 256bit load/stores are double pumped 595 // (but not on Haswell). 596 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 597 Cost*=2; 598 599 return Cost; 600} 601