X86ISelLowering.cpp revision c735c1c2aed2cbaeb61296f4269535b5d13d8b0a
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86ISelLowering.h" 17#include "Utils/X86ShuffleDecode.h" 18#include "X86.h" 19#include "X86InstrBuilder.h" 20#include "X86TargetMachine.h" 21#include "X86TargetObjectFile.h" 22#include "llvm/ADT/SmallSet.h" 23#include "llvm/ADT/Statistic.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/VariadicFunction.h" 26#include "llvm/CodeGen/IntrinsicLowering.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineJumpTableInfo.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/IR/CallingConv.h" 34#include "llvm/IR/Constants.h" 35#include "llvm/IR/DerivedTypes.h" 36#include "llvm/IR/Function.h" 37#include "llvm/IR/GlobalAlias.h" 38#include "llvm/IR/GlobalVariable.h" 39#include "llvm/IR/Instructions.h" 40#include "llvm/IR/Intrinsics.h" 41#include "llvm/IR/LLVMContext.h" 42#include "llvm/MC/MCAsmInfo.h" 43#include "llvm/MC/MCContext.h" 44#include "llvm/MC/MCExpr.h" 45#include "llvm/MC/MCSymbol.h" 46#include "llvm/Support/CallSite.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Target/TargetOptions.h" 51#include <bitset> 52#include <cctype> 53using namespace llvm; 54 55STATISTIC(NumTailCalls, "Number of tail calls"); 56 57// Forward declarations. 58static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 62/// sets things up to match to an AVX VEXTRACTF128 instruction or a 63/// simple subregister reference. Idx is an index in the 128 bits we 64/// want. It need not be aligned to a 128-bit bounday. That makes 65/// lowering EXTRACT_VECTOR_ELT operations easier. 66static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 67 SelectionDAG &DAG, SDLoc dl) { 68 EVT VT = Vec.getValueType(); 69 assert(VT.is256BitVector() && "Unexpected vector size!"); 70 EVT ElVT = VT.getVectorElementType(); 71 unsigned Factor = VT.getSizeInBits()/128; 72 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 73 VT.getVectorNumElements()/Factor); 74 75 // Extract from UNDEF is UNDEF. 76 if (Vec.getOpcode() == ISD::UNDEF) 77 return DAG.getUNDEF(ResultVT); 78 79 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 80 // we can match to VEXTRACTF128. 81 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 82 83 // This is the index of the first element of the 128-bit chunk 84 // we want. 85 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 86 * ElemsPerChunk); 87 88 // If the input is a buildvector just emit a smaller one. 89 if (Vec.getOpcode() == ISD::BUILD_VECTOR) 90 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT, 91 Vec->op_begin()+NormalizedIdxVal, ElemsPerChunk); 92 93 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 94 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 95 VecIdx); 96 97 return Result; 98} 99 100/// Generate a DAG to put 128-bits into a vector > 128 bits. This 101/// sets things up to match to an AVX VINSERTF128 instruction or a 102/// simple superregister reference. Idx is an index in the 128 bits 103/// we want. It need not be aligned to a 128-bit bounday. That makes 104/// lowering INSERT_VECTOR_ELT operations easier. 105static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 106 unsigned IdxVal, SelectionDAG &DAG, 107 SDLoc dl) { 108 // Inserting UNDEF is Result 109 if (Vec.getOpcode() == ISD::UNDEF) 110 return Result; 111 112 EVT VT = Vec.getValueType(); 113 assert(VT.is128BitVector() && "Unexpected vector size!"); 114 115 EVT ElVT = VT.getVectorElementType(); 116 EVT ResultVT = Result.getValueType(); 117 118 // Insert the relevant 128 bits. 119 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 120 121 // This is the index of the first element of the 128-bit chunk 122 // we want. 123 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 124 * ElemsPerChunk); 125 126 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 127 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 128 VecIdx); 129} 130 131/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 132/// instructions. This is used because creating CONCAT_VECTOR nodes of 133/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 134/// large BUILD_VECTORS. 135static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 136 unsigned NumElems, SelectionDAG &DAG, 137 SDLoc dl) { 138 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 139 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 140} 141 142static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 143 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 144 bool is64Bit = Subtarget->is64Bit(); 145 146 if (Subtarget->isTargetEnvMacho()) { 147 if (is64Bit) 148 return new X86_64MachoTargetObjectFile(); 149 return new TargetLoweringObjectFileMachO(); 150 } 151 152 if (Subtarget->isTargetLinux()) 153 return new X86LinuxTargetObjectFile(); 154 if (Subtarget->isTargetELF()) 155 return new TargetLoweringObjectFileELF(); 156 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 157 return new TargetLoweringObjectFileCOFF(); 158 llvm_unreachable("unknown subtarget type"); 159} 160 161X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 162 : TargetLowering(TM, createTLOF(TM)) { 163 Subtarget = &TM.getSubtarget<X86Subtarget>(); 164 X86ScalarSSEf64 = Subtarget->hasSSE2(); 165 X86ScalarSSEf32 = Subtarget->hasSSE1(); 166 TD = getDataLayout(); 167 168 resetOperationActions(); 169} 170 171void X86TargetLowering::resetOperationActions() { 172 const TargetMachine &TM = getTargetMachine(); 173 static bool FirstTimeThrough = true; 174 175 // If none of the target options have changed, then we don't need to reset the 176 // operation actions. 177 if (!FirstTimeThrough && TO == TM.Options) return; 178 179 if (!FirstTimeThrough) { 180 // Reinitialize the actions. 181 initActions(); 182 FirstTimeThrough = false; 183 } 184 185 TO = TM.Options; 186 187 // Set up the TargetLowering object. 188 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 189 190 // X86 is weird, it always uses i8 for shift amounts and setcc results. 191 setBooleanContents(ZeroOrOneBooleanContent); 192 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 193 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 194 195 // For 64-bit since we have so many registers use the ILP scheduler, for 196 // 32-bit code use the register pressure specific scheduling. 197 // For Atom, always use ILP scheduling. 198 if (Subtarget->isAtom()) 199 setSchedulingPreference(Sched::ILP); 200 else if (Subtarget->is64Bit()) 201 setSchedulingPreference(Sched::ILP); 202 else 203 setSchedulingPreference(Sched::RegPressure); 204 const X86RegisterInfo *RegInfo = 205 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); 206 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); 207 208 // Bypass expensive divides on Atom when compiling with O2 209 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) { 210 addBypassSlowDiv(32, 8); 211 if (Subtarget->is64Bit()) 212 addBypassSlowDiv(64, 16); 213 } 214 215 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 216 // Setup Windows compiler runtime calls. 217 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 218 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 219 setLibcallName(RTLIB::SREM_I64, "_allrem"); 220 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 221 setLibcallName(RTLIB::MUL_I64, "_allmul"); 222 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 223 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 224 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 225 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 226 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 227 228 // The _ftol2 runtime function has an unusual calling conv, which 229 // is modeled by a special pseudo-instruction. 230 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 231 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 232 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 233 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 234 } 235 236 if (Subtarget->isTargetDarwin()) { 237 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 238 setUseUnderscoreSetJmp(false); 239 setUseUnderscoreLongJmp(false); 240 } else if (Subtarget->isTargetMingw()) { 241 // MS runtime is weird: it exports _setjmp, but longjmp! 242 setUseUnderscoreSetJmp(true); 243 setUseUnderscoreLongJmp(false); 244 } else { 245 setUseUnderscoreSetJmp(true); 246 setUseUnderscoreLongJmp(true); 247 } 248 249 // Set up the register classes. 250 addRegisterClass(MVT::i8, &X86::GR8RegClass); 251 addRegisterClass(MVT::i16, &X86::GR16RegClass); 252 addRegisterClass(MVT::i32, &X86::GR32RegClass); 253 if (Subtarget->is64Bit()) 254 addRegisterClass(MVT::i64, &X86::GR64RegClass); 255 256 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 257 258 // We don't accept any truncstore of integer registers. 259 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 260 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 261 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 262 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 263 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 264 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 265 266 // SETOEQ and SETUNE require checking two conditions. 267 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 268 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 269 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 270 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 271 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 272 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 273 274 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 275 // operation. 276 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 277 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 278 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 279 280 if (Subtarget->is64Bit()) { 281 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 282 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 283 } else if (!TM.Options.UseSoftFloat) { 284 // We have an algorithm for SSE2->double, and we turn this into a 285 // 64-bit FILD followed by conditional FADD for other targets. 286 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 287 // We have an algorithm for SSE2, and we turn this into a 64-bit 288 // FILD for other targets. 289 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 290 } 291 292 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 293 // this operation. 294 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 295 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 296 297 if (!TM.Options.UseSoftFloat) { 298 // SSE has no i16 to fp conversion, only i32 299 if (X86ScalarSSEf32) { 300 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 301 // f32 and f64 cases are Legal, f80 case is not 302 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 303 } else { 304 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 305 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 306 } 307 } else { 308 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 309 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 310 } 311 312 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 313 // are Legal, f80 is custom lowered. 314 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 315 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 316 317 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 318 // this operation. 319 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 320 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 321 322 if (X86ScalarSSEf32) { 323 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 324 // f32 and f64 cases are Legal, f80 case is not 325 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 326 } else { 327 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 328 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 329 } 330 331 // Handle FP_TO_UINT by promoting the destination to a larger signed 332 // conversion. 333 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 334 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 335 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 336 337 if (Subtarget->is64Bit()) { 338 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 339 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 340 } else if (!TM.Options.UseSoftFloat) { 341 // Since AVX is a superset of SSE3, only check for SSE here. 342 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 343 // Expand FP_TO_UINT into a select. 344 // FIXME: We would like to use a Custom expander here eventually to do 345 // the optimal thing for SSE vs. the default expansion in the legalizer. 346 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 347 else 348 // With SSE3 we can use fisttpll to convert to a signed i64; without 349 // SSE, we're stuck with a fistpll. 350 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 351 } 352 353 if (isTargetFTOL()) { 354 // Use the _ftol2 runtime function, which has a pseudo-instruction 355 // to handle its weird calling convention. 356 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 357 } 358 359 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 360 if (!X86ScalarSSEf64) { 361 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 362 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 363 if (Subtarget->is64Bit()) { 364 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 365 // Without SSE, i64->f64 goes through memory. 366 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 367 } 368 } 369 370 // Scalar integer divide and remainder are lowered to use operations that 371 // produce two results, to match the available instructions. This exposes 372 // the two-result form to trivial CSE, which is able to combine x/y and x%y 373 // into a single instruction. 374 // 375 // Scalar integer multiply-high is also lowered to use two-result 376 // operations, to match the available instructions. However, plain multiply 377 // (low) operations are left as Legal, as there are single-result 378 // instructions for this in x86. Using the two-result multiply instructions 379 // when both high and low results are needed must be arranged by dagcombine. 380 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 381 MVT VT = IntVTs[i]; 382 setOperationAction(ISD::MULHS, VT, Expand); 383 setOperationAction(ISD::MULHU, VT, Expand); 384 setOperationAction(ISD::SDIV, VT, Expand); 385 setOperationAction(ISD::UDIV, VT, Expand); 386 setOperationAction(ISD::SREM, VT, Expand); 387 setOperationAction(ISD::UREM, VT, Expand); 388 389 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 390 setOperationAction(ISD::ADDC, VT, Custom); 391 setOperationAction(ISD::ADDE, VT, Custom); 392 setOperationAction(ISD::SUBC, VT, Custom); 393 setOperationAction(ISD::SUBE, VT, Custom); 394 } 395 396 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 397 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 398 setOperationAction(ISD::BR_CC , MVT::f32, Expand); 399 setOperationAction(ISD::BR_CC , MVT::f64, Expand); 400 setOperationAction(ISD::BR_CC , MVT::f80, Expand); 401 setOperationAction(ISD::BR_CC , MVT::i8, Expand); 402 setOperationAction(ISD::BR_CC , MVT::i16, Expand); 403 setOperationAction(ISD::BR_CC , MVT::i32, Expand); 404 setOperationAction(ISD::BR_CC , MVT::i64, Expand); 405 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 406 if (Subtarget->is64Bit()) 407 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 408 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 409 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 410 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 411 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 412 setOperationAction(ISD::FREM , MVT::f32 , Expand); 413 setOperationAction(ISD::FREM , MVT::f64 , Expand); 414 setOperationAction(ISD::FREM , MVT::f80 , Expand); 415 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 416 417 // Promote the i8 variants and force them on up to i32 which has a shorter 418 // encoding. 419 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 420 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 421 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 422 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 423 if (Subtarget->hasBMI()) { 424 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 425 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 426 if (Subtarget->is64Bit()) 427 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 428 } else { 429 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 430 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 431 if (Subtarget->is64Bit()) 432 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 433 } 434 435 if (Subtarget->hasLZCNT()) { 436 // When promoting the i8 variants, force them to i32 for a shorter 437 // encoding. 438 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 439 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 440 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 441 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 442 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 443 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 444 if (Subtarget->is64Bit()) 445 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 446 } else { 447 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 448 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 449 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 450 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 451 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 452 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 453 if (Subtarget->is64Bit()) { 454 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 455 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 456 } 457 } 458 459 if (Subtarget->hasPOPCNT()) { 460 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 461 } else { 462 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 463 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 464 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 465 if (Subtarget->is64Bit()) 466 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 467 } 468 469 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 470 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 471 472 // These should be promoted to a larger select which is supported. 473 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 474 // X86 wants to expand cmov itself. 475 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 476 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 477 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 478 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 479 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 480 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 481 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 482 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 483 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 484 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 485 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 486 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 487 if (Subtarget->is64Bit()) { 488 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 489 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 490 } 491 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 492 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 493 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 494 // support continuation, user-level threading, and etc.. As a result, no 495 // other SjLj exception interfaces are implemented and please don't build 496 // your own exception handling based on them. 497 // LLVM/Clang supports zero-cost DWARF exception handling. 498 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 499 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 500 501 // Darwin ABI issue. 502 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 503 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 504 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 505 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 506 if (Subtarget->is64Bit()) 507 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 508 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 509 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 510 if (Subtarget->is64Bit()) { 511 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 512 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 513 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 514 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 515 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 516 } 517 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 518 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 519 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 520 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 521 if (Subtarget->is64Bit()) { 522 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 523 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 524 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 525 } 526 527 if (Subtarget->hasSSE1()) 528 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 529 530 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 531 532 // Expand certain atomics 533 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 534 MVT VT = IntVTs[i]; 535 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 536 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 537 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 538 } 539 540 if (!Subtarget->is64Bit()) { 541 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 542 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 543 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 544 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 545 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 546 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 547 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 548 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 549 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 550 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 551 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 552 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 553 } 554 555 if (Subtarget->hasCmpxchg16b()) { 556 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 557 } 558 559 // FIXME - use subtarget debug flags 560 if (!Subtarget->isTargetDarwin() && 561 !Subtarget->isTargetELF() && 562 !Subtarget->isTargetCygMing()) { 563 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 564 } 565 566 if (Subtarget->is64Bit()) { 567 setExceptionPointerRegister(X86::RAX); 568 setExceptionSelectorRegister(X86::RDX); 569 } else { 570 setExceptionPointerRegister(X86::EAX); 571 setExceptionSelectorRegister(X86::EDX); 572 } 573 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 574 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 575 576 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 577 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 578 579 setOperationAction(ISD::TRAP, MVT::Other, Legal); 580 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 581 582 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 583 setOperationAction(ISD::VASTART , MVT::Other, Custom); 584 setOperationAction(ISD::VAEND , MVT::Other, Expand); 585 if (Subtarget->is64Bit()) { 586 setOperationAction(ISD::VAARG , MVT::Other, Custom); 587 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 588 } else { 589 setOperationAction(ISD::VAARG , MVT::Other, Expand); 590 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 591 } 592 593 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 594 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 595 596 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 597 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 598 MVT::i64 : MVT::i32, Custom); 599 else if (TM.Options.EnableSegmentedStacks) 600 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 601 MVT::i64 : MVT::i32, Custom); 602 else 603 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 604 MVT::i64 : MVT::i32, Expand); 605 606 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 607 // f32 and f64 use SSE. 608 // Set up the FP register classes. 609 addRegisterClass(MVT::f32, &X86::FR32RegClass); 610 addRegisterClass(MVT::f64, &X86::FR64RegClass); 611 612 // Use ANDPD to simulate FABS. 613 setOperationAction(ISD::FABS , MVT::f64, Custom); 614 setOperationAction(ISD::FABS , MVT::f32, Custom); 615 616 // Use XORP to simulate FNEG. 617 setOperationAction(ISD::FNEG , MVT::f64, Custom); 618 setOperationAction(ISD::FNEG , MVT::f32, Custom); 619 620 // Use ANDPD and ORPD to simulate FCOPYSIGN. 621 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 622 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 623 624 // Lower this to FGETSIGNx86 plus an AND. 625 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 626 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 627 628 // We don't support sin/cos/fmod 629 setOperationAction(ISD::FSIN , MVT::f64, Expand); 630 setOperationAction(ISD::FCOS , MVT::f64, Expand); 631 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 632 setOperationAction(ISD::FSIN , MVT::f32, Expand); 633 setOperationAction(ISD::FCOS , MVT::f32, Expand); 634 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 635 636 // Expand FP immediates into loads from the stack, except for the special 637 // cases we handle. 638 addLegalFPImmediate(APFloat(+0.0)); // xorpd 639 addLegalFPImmediate(APFloat(+0.0f)); // xorps 640 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 641 // Use SSE for f32, x87 for f64. 642 // Set up the FP register classes. 643 addRegisterClass(MVT::f32, &X86::FR32RegClass); 644 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 645 646 // Use ANDPS to simulate FABS. 647 setOperationAction(ISD::FABS , MVT::f32, Custom); 648 649 // Use XORP to simulate FNEG. 650 setOperationAction(ISD::FNEG , MVT::f32, Custom); 651 652 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 653 654 // Use ANDPS and ORPS to simulate FCOPYSIGN. 655 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 656 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 657 658 // We don't support sin/cos/fmod 659 setOperationAction(ISD::FSIN , MVT::f32, Expand); 660 setOperationAction(ISD::FCOS , MVT::f32, Expand); 661 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 662 663 // Special cases we handle for FP constants. 664 addLegalFPImmediate(APFloat(+0.0f)); // xorps 665 addLegalFPImmediate(APFloat(+0.0)); // FLD0 666 addLegalFPImmediate(APFloat(+1.0)); // FLD1 667 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 668 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 669 670 if (!TM.Options.UnsafeFPMath) { 671 setOperationAction(ISD::FSIN , MVT::f64, Expand); 672 setOperationAction(ISD::FCOS , MVT::f64, Expand); 673 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 674 } 675 } else if (!TM.Options.UseSoftFloat) { 676 // f32 and f64 in x87. 677 // Set up the FP register classes. 678 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 679 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 680 681 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 682 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 683 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 684 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 685 686 if (!TM.Options.UnsafeFPMath) { 687 setOperationAction(ISD::FSIN , MVT::f64, Expand); 688 setOperationAction(ISD::FSIN , MVT::f32, Expand); 689 setOperationAction(ISD::FCOS , MVT::f64, Expand); 690 setOperationAction(ISD::FCOS , MVT::f32, Expand); 691 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 692 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 693 } 694 addLegalFPImmediate(APFloat(+0.0)); // FLD0 695 addLegalFPImmediate(APFloat(+1.0)); // FLD1 696 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 697 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 698 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 699 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 700 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 701 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 702 } 703 704 // We don't support FMA. 705 setOperationAction(ISD::FMA, MVT::f64, Expand); 706 setOperationAction(ISD::FMA, MVT::f32, Expand); 707 708 // Long double always uses X87. 709 if (!TM.Options.UseSoftFloat) { 710 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 711 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 712 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 713 { 714 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 715 addLegalFPImmediate(TmpFlt); // FLD0 716 TmpFlt.changeSign(); 717 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 718 719 bool ignored; 720 APFloat TmpFlt2(+1.0); 721 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 722 &ignored); 723 addLegalFPImmediate(TmpFlt2); // FLD1 724 TmpFlt2.changeSign(); 725 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 726 } 727 728 if (!TM.Options.UnsafeFPMath) { 729 setOperationAction(ISD::FSIN , MVT::f80, Expand); 730 setOperationAction(ISD::FCOS , MVT::f80, Expand); 731 setOperationAction(ISD::FSINCOS, MVT::f80, Expand); 732 } 733 734 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 735 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 736 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 737 setOperationAction(ISD::FRINT, MVT::f80, Expand); 738 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 739 setOperationAction(ISD::FMA, MVT::f80, Expand); 740 } 741 742 // Always use a library call for pow. 743 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 744 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 745 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 746 747 setOperationAction(ISD::FLOG, MVT::f80, Expand); 748 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 749 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 750 setOperationAction(ISD::FEXP, MVT::f80, Expand); 751 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 752 753 // First set operation action for all vector types to either promote 754 // (for widening) or expand (for scalarization). Then we will selectively 755 // turn on ones that can be effectively codegen'd. 756 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 757 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 758 MVT VT = (MVT::SimpleValueType)i; 759 setOperationAction(ISD::ADD , VT, Expand); 760 setOperationAction(ISD::SUB , VT, Expand); 761 setOperationAction(ISD::FADD, VT, Expand); 762 setOperationAction(ISD::FNEG, VT, Expand); 763 setOperationAction(ISD::FSUB, VT, Expand); 764 setOperationAction(ISD::MUL , VT, Expand); 765 setOperationAction(ISD::FMUL, VT, Expand); 766 setOperationAction(ISD::SDIV, VT, Expand); 767 setOperationAction(ISD::UDIV, VT, Expand); 768 setOperationAction(ISD::FDIV, VT, Expand); 769 setOperationAction(ISD::SREM, VT, Expand); 770 setOperationAction(ISD::UREM, VT, Expand); 771 setOperationAction(ISD::LOAD, VT, Expand); 772 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 773 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand); 774 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 775 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand); 776 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand); 777 setOperationAction(ISD::FABS, VT, Expand); 778 setOperationAction(ISD::FSIN, VT, Expand); 779 setOperationAction(ISD::FSINCOS, VT, Expand); 780 setOperationAction(ISD::FCOS, VT, Expand); 781 setOperationAction(ISD::FSINCOS, VT, Expand); 782 setOperationAction(ISD::FREM, VT, Expand); 783 setOperationAction(ISD::FMA, VT, Expand); 784 setOperationAction(ISD::FPOWI, VT, Expand); 785 setOperationAction(ISD::FSQRT, VT, Expand); 786 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 787 setOperationAction(ISD::FFLOOR, VT, Expand); 788 setOperationAction(ISD::FCEIL, VT, Expand); 789 setOperationAction(ISD::FTRUNC, VT, Expand); 790 setOperationAction(ISD::FRINT, VT, Expand); 791 setOperationAction(ISD::FNEARBYINT, VT, Expand); 792 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 793 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 794 setOperationAction(ISD::SDIVREM, VT, Expand); 795 setOperationAction(ISD::UDIVREM, VT, Expand); 796 setOperationAction(ISD::FPOW, VT, Expand); 797 setOperationAction(ISD::CTPOP, VT, Expand); 798 setOperationAction(ISD::CTTZ, VT, Expand); 799 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 800 setOperationAction(ISD::CTLZ, VT, Expand); 801 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 802 setOperationAction(ISD::SHL, VT, Expand); 803 setOperationAction(ISD::SRA, VT, Expand); 804 setOperationAction(ISD::SRL, VT, Expand); 805 setOperationAction(ISD::ROTL, VT, Expand); 806 setOperationAction(ISD::ROTR, VT, Expand); 807 setOperationAction(ISD::BSWAP, VT, Expand); 808 setOperationAction(ISD::SETCC, VT, Expand); 809 setOperationAction(ISD::FLOG, VT, Expand); 810 setOperationAction(ISD::FLOG2, VT, Expand); 811 setOperationAction(ISD::FLOG10, VT, Expand); 812 setOperationAction(ISD::FEXP, VT, Expand); 813 setOperationAction(ISD::FEXP2, VT, Expand); 814 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 815 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 816 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 817 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 818 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand); 819 setOperationAction(ISD::TRUNCATE, VT, Expand); 820 setOperationAction(ISD::SIGN_EXTEND, VT, Expand); 821 setOperationAction(ISD::ZERO_EXTEND, VT, Expand); 822 setOperationAction(ISD::ANY_EXTEND, VT, Expand); 823 setOperationAction(ISD::VSELECT, VT, Expand); 824 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 825 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 826 setTruncStoreAction(VT, 827 (MVT::SimpleValueType)InnerVT, Expand); 828 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 829 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 830 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 831 } 832 833 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 834 // with -msoft-float, disable use of MMX as well. 835 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 836 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 837 // No operations on x86mmx supported, everything uses intrinsics. 838 } 839 840 // MMX-sized vectors (other than x86mmx) are expected to be expanded 841 // into smaller operations. 842 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 843 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 844 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 845 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 846 setOperationAction(ISD::AND, MVT::v8i8, Expand); 847 setOperationAction(ISD::AND, MVT::v4i16, Expand); 848 setOperationAction(ISD::AND, MVT::v2i32, Expand); 849 setOperationAction(ISD::AND, MVT::v1i64, Expand); 850 setOperationAction(ISD::OR, MVT::v8i8, Expand); 851 setOperationAction(ISD::OR, MVT::v4i16, Expand); 852 setOperationAction(ISD::OR, MVT::v2i32, Expand); 853 setOperationAction(ISD::OR, MVT::v1i64, Expand); 854 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 855 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 856 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 857 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 858 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 859 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 860 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 861 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 862 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 863 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 864 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 865 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 866 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 867 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 868 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 869 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 870 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 871 872 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 873 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 874 875 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 876 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 877 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 878 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 879 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 880 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 881 setOperationAction(ISD::FABS, MVT::v4f32, Custom); 882 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 883 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 884 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 885 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 886 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 887 } 888 889 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 890 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 891 892 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 893 // registers cannot be used even for integer operations. 894 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 895 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 896 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 897 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 898 899 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 900 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 901 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 902 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 903 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 904 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 905 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 906 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 907 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 908 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 909 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 910 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 911 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 912 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 913 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 914 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 915 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 916 setOperationAction(ISD::FABS, MVT::v2f64, Custom); 917 918 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 919 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 920 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 921 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 922 923 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 924 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 925 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 926 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 927 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 928 929 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 930 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 931 MVT VT = (MVT::SimpleValueType)i; 932 // Do not attempt to custom lower non-power-of-2 vectors 933 if (!isPowerOf2_32(VT.getVectorNumElements())) 934 continue; 935 // Do not attempt to custom lower non-128-bit vectors 936 if (!VT.is128BitVector()) 937 continue; 938 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 939 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 940 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 941 } 942 943 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 944 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 945 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 946 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 947 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 948 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 949 950 if (Subtarget->is64Bit()) { 951 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 952 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 953 } 954 955 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 956 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 957 MVT VT = (MVT::SimpleValueType)i; 958 959 // Do not attempt to promote non-128-bit vectors 960 if (!VT.is128BitVector()) 961 continue; 962 963 setOperationAction(ISD::AND, VT, Promote); 964 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 965 setOperationAction(ISD::OR, VT, Promote); 966 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 967 setOperationAction(ISD::XOR, VT, Promote); 968 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 969 setOperationAction(ISD::LOAD, VT, Promote); 970 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 971 setOperationAction(ISD::SELECT, VT, Promote); 972 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 973 } 974 975 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 976 977 // Custom lower v2i64 and v2f64 selects. 978 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 979 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 980 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 981 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 982 983 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 984 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 985 986 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 987 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 988 // As there is no 64-bit GPR available, we need build a special custom 989 // sequence to convert from v2i32 to v2f32. 990 if (!Subtarget->is64Bit()) 991 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); 992 993 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 994 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); 995 996 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); 997 } 998 999 if (Subtarget->hasSSE41()) { 1000 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 1001 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 1002 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 1003 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1004 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1005 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1006 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1007 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1008 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1009 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1010 1011 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1012 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1013 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1014 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 1015 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 1016 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 1017 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 1018 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 1019 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 1020 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 1021 1022 // FIXME: Do we need to handle scalar-to-vector here? 1023 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 1024 1025 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 1026 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 1027 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 1028 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 1029 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 1030 1031 // i8 and i16 vectors are custom , because the source register and source 1032 // source memory operand types are not the same width. f32 vectors are 1033 // custom since the immediate controlling the insert encodes additional 1034 // information. 1035 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 1036 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 1037 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 1038 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 1039 1040 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 1041 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 1042 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 1043 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 1044 1045 // FIXME: these should be Legal but thats only for the case where 1046 // the index is constant. For now custom expand to deal with that. 1047 if (Subtarget->is64Bit()) { 1048 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 1049 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 1050 } 1051 } 1052 1053 if (Subtarget->hasSSE2()) { 1054 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 1055 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 1056 1057 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 1058 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 1059 1060 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 1061 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 1062 1063 // In the customized shift lowering, the legal cases in AVX2 will be 1064 // recognized. 1065 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1066 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1067 1068 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1069 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1070 1071 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1072 1073 setOperationAction(ISD::SDIV, MVT::v8i16, Custom); 1074 setOperationAction(ISD::SDIV, MVT::v4i32, Custom); 1075 } 1076 1077 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) { 1078 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1079 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1080 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1081 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1082 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1083 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1084 1085 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1086 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1087 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1088 1089 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1090 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1091 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1092 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1093 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1094 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); 1095 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal); 1096 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal); 1097 setOperationAction(ISD::FRINT, MVT::v8f32, Legal); 1098 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal); 1099 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1100 setOperationAction(ISD::FABS, MVT::v8f32, Custom); 1101 1102 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1103 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1104 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1105 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1106 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1107 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1108 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1109 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1110 setOperationAction(ISD::FRINT, MVT::v4f64, Legal); 1111 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal); 1112 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1113 setOperationAction(ISD::FABS, MVT::v4f64, Custom); 1114 1115 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); 1116 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom); 1117 1118 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 1119 1120 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1121 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote); 1122 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1123 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1124 1125 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1126 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); 1127 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 1128 1129 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); 1130 1131 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1132 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1133 1134 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1135 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1136 1137 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1138 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1139 1140 setOperationAction(ISD::SDIV, MVT::v16i16, Custom); 1141 1142 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1143 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1144 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1145 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1146 1147 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1148 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1149 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1150 1151 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1152 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1153 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1154 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1155 1156 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); 1157 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); 1158 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom); 1159 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1160 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom); 1161 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom); 1162 1163 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { 1164 setOperationAction(ISD::FMA, MVT::v8f32, Legal); 1165 setOperationAction(ISD::FMA, MVT::v4f64, Legal); 1166 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 1167 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 1168 setOperationAction(ISD::FMA, MVT::f32, Legal); 1169 setOperationAction(ISD::FMA, MVT::f64, Legal); 1170 } 1171 1172 if (Subtarget->hasInt256()) { 1173 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1174 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1175 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1176 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1177 1178 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1179 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1180 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1181 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1182 1183 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1184 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1185 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1186 // Don't lower v32i8 because there is no 128-bit byte mul 1187 1188 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1189 1190 setOperationAction(ISD::SDIV, MVT::v8i32, Custom); 1191 } else { 1192 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1193 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1194 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1195 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1196 1197 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1198 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1199 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1200 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1201 1202 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1203 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1204 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1205 // Don't lower v32i8 because there is no 128-bit byte mul 1206 } 1207 1208 // In the customized shift lowering, the legal cases in AVX2 will be 1209 // recognized. 1210 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1211 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1212 1213 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1214 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1215 1216 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1217 1218 // Custom lower several nodes for 256-bit types. 1219 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1220 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1221 MVT VT = (MVT::SimpleValueType)i; 1222 1223 // Extract subvector is special because the value type 1224 // (result) is 128-bit but the source is 256-bit wide. 1225 if (VT.is128BitVector()) 1226 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1227 1228 // Do not attempt to custom lower other non-256-bit vectors 1229 if (!VT.is256BitVector()) 1230 continue; 1231 1232 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1233 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1234 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1235 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1236 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1237 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1238 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1239 } 1240 1241 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1242 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1243 MVT VT = (MVT::SimpleValueType)i; 1244 1245 // Do not attempt to promote non-256-bit vectors 1246 if (!VT.is256BitVector()) 1247 continue; 1248 1249 setOperationAction(ISD::AND, VT, Promote); 1250 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1251 setOperationAction(ISD::OR, VT, Promote); 1252 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1253 setOperationAction(ISD::XOR, VT, Promote); 1254 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1255 setOperationAction(ISD::LOAD, VT, Promote); 1256 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1257 setOperationAction(ISD::SELECT, VT, Promote); 1258 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1259 } 1260 } 1261 1262 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1263 // of this type with custom code. 1264 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1265 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1266 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1267 Custom); 1268 } 1269 1270 // We want to custom lower some of our intrinsics. 1271 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1272 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1273 1274 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1275 // handle type legalization for these operations here. 1276 // 1277 // FIXME: We really should do custom legalization for addition and 1278 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1279 // than generic legalization for 64-bit multiplication-with-overflow, though. 1280 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1281 // Add/Sub/Mul with overflow operations are custom lowered. 1282 MVT VT = IntVTs[i]; 1283 setOperationAction(ISD::SADDO, VT, Custom); 1284 setOperationAction(ISD::UADDO, VT, Custom); 1285 setOperationAction(ISD::SSUBO, VT, Custom); 1286 setOperationAction(ISD::USUBO, VT, Custom); 1287 setOperationAction(ISD::SMULO, VT, Custom); 1288 setOperationAction(ISD::UMULO, VT, Custom); 1289 } 1290 1291 // There are no 8-bit 3-address imul/mul instructions 1292 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1293 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1294 1295 if (!Subtarget->is64Bit()) { 1296 // These libcalls are not available in 32-bit. 1297 setLibcallName(RTLIB::SHL_I128, 0); 1298 setLibcallName(RTLIB::SRL_I128, 0); 1299 setLibcallName(RTLIB::SRA_I128, 0); 1300 } 1301 1302 // Combine sin / cos into one node or libcall if possible. 1303 if (Subtarget->hasSinCos()) { 1304 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 1305 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 1306 if (Subtarget->isTargetDarwin()) { 1307 // For MacOSX, we don't want to the normal expansion of a libcall to 1308 // sincos. We want to issue a libcall to __sincos_stret to avoid memory 1309 // traffic. 1310 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 1311 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 1312 } 1313 } 1314 1315 // We have target-specific dag combine patterns for the following nodes: 1316 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1317 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1318 setTargetDAGCombine(ISD::VSELECT); 1319 setTargetDAGCombine(ISD::SELECT); 1320 setTargetDAGCombine(ISD::SHL); 1321 setTargetDAGCombine(ISD::SRA); 1322 setTargetDAGCombine(ISD::SRL); 1323 setTargetDAGCombine(ISD::OR); 1324 setTargetDAGCombine(ISD::AND); 1325 setTargetDAGCombine(ISD::ADD); 1326 setTargetDAGCombine(ISD::FADD); 1327 setTargetDAGCombine(ISD::FSUB); 1328 setTargetDAGCombine(ISD::FMA); 1329 setTargetDAGCombine(ISD::SUB); 1330 setTargetDAGCombine(ISD::LOAD); 1331 setTargetDAGCombine(ISD::STORE); 1332 setTargetDAGCombine(ISD::ZERO_EXTEND); 1333 setTargetDAGCombine(ISD::ANY_EXTEND); 1334 setTargetDAGCombine(ISD::SIGN_EXTEND); 1335 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 1336 setTargetDAGCombine(ISD::TRUNCATE); 1337 setTargetDAGCombine(ISD::SINT_TO_FP); 1338 setTargetDAGCombine(ISD::SETCC); 1339 if (Subtarget->is64Bit()) 1340 setTargetDAGCombine(ISD::MUL); 1341 setTargetDAGCombine(ISD::XOR); 1342 1343 computeRegisterProperties(); 1344 1345 // On Darwin, -Os means optimize for size without hurting performance, 1346 // do not reduce the limit. 1347 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1348 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1349 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1350 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1351 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1352 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1353 setPrefLoopAlignment(4); // 2^4 bytes. 1354 1355 // Predictable cmov don't hurt on atom because it's in-order. 1356 PredictableSelectIsExpensive = !Subtarget->isAtom(); 1357 1358 setPrefFunctionAlignment(4); // 2^4 bytes. 1359} 1360 1361EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 1362 if (!VT.isVector()) return MVT::i8; 1363 return VT.changeVectorElementTypeToInteger(); 1364} 1365 1366/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1367/// the desired ByVal argument alignment. 1368static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1369 if (MaxAlign == 16) 1370 return; 1371 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1372 if (VTy->getBitWidth() == 128) 1373 MaxAlign = 16; 1374 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1375 unsigned EltAlign = 0; 1376 getMaxByValAlign(ATy->getElementType(), EltAlign); 1377 if (EltAlign > MaxAlign) 1378 MaxAlign = EltAlign; 1379 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1380 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1381 unsigned EltAlign = 0; 1382 getMaxByValAlign(STy->getElementType(i), EltAlign); 1383 if (EltAlign > MaxAlign) 1384 MaxAlign = EltAlign; 1385 if (MaxAlign == 16) 1386 break; 1387 } 1388 } 1389} 1390 1391/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1392/// function arguments in the caller parameter area. For X86, aggregates 1393/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1394/// are at 4-byte boundaries. 1395unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1396 if (Subtarget->is64Bit()) { 1397 // Max of 8 and alignment of type. 1398 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1399 if (TyAlign > 8) 1400 return TyAlign; 1401 return 8; 1402 } 1403 1404 unsigned Align = 4; 1405 if (Subtarget->hasSSE1()) 1406 getMaxByValAlign(Ty, Align); 1407 return Align; 1408} 1409 1410/// getOptimalMemOpType - Returns the target specific optimal type for load 1411/// and store operations as a result of memset, memcpy, and memmove 1412/// lowering. If DstAlign is zero that means it's safe to destination 1413/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1414/// means there isn't a need to check it against alignment requirement, 1415/// probably because the source does not need to be loaded. If 'IsMemset' is 1416/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 1417/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 1418/// source is constant so it does not need to be loaded. 1419/// It returns EVT::Other if the type should be determined using generic 1420/// target-independent logic. 1421EVT 1422X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1423 unsigned DstAlign, unsigned SrcAlign, 1424 bool IsMemset, bool ZeroMemset, 1425 bool MemcpyStrSrc, 1426 MachineFunction &MF) const { 1427 const Function *F = MF.getFunction(); 1428 if ((!IsMemset || ZeroMemset) && 1429 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1430 Attribute::NoImplicitFloat)) { 1431 if (Size >= 16 && 1432 (Subtarget->isUnalignedMemAccessFast() || 1433 ((DstAlign == 0 || DstAlign >= 16) && 1434 (SrcAlign == 0 || SrcAlign >= 16)))) { 1435 if (Size >= 32) { 1436 if (Subtarget->hasInt256()) 1437 return MVT::v8i32; 1438 if (Subtarget->hasFp256()) 1439 return MVT::v8f32; 1440 } 1441 if (Subtarget->hasSSE2()) 1442 return MVT::v4i32; 1443 if (Subtarget->hasSSE1()) 1444 return MVT::v4f32; 1445 } else if (!MemcpyStrSrc && Size >= 8 && 1446 !Subtarget->is64Bit() && 1447 Subtarget->hasSSE2()) { 1448 // Do not use f64 to lower memcpy if source is string constant. It's 1449 // better to use i32 to avoid the loads. 1450 return MVT::f64; 1451 } 1452 } 1453 if (Subtarget->is64Bit() && Size >= 8) 1454 return MVT::i64; 1455 return MVT::i32; 1456} 1457 1458bool X86TargetLowering::isSafeMemOpType(MVT VT) const { 1459 if (VT == MVT::f32) 1460 return X86ScalarSSEf32; 1461 else if (VT == MVT::f64) 1462 return X86ScalarSSEf64; 1463 return true; 1464} 1465 1466bool 1467X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const { 1468 if (Fast) 1469 *Fast = Subtarget->isUnalignedMemAccessFast(); 1470 return true; 1471} 1472 1473/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1474/// current function. The returned value is a member of the 1475/// MachineJumpTableInfo::JTEntryKind enum. 1476unsigned X86TargetLowering::getJumpTableEncoding() const { 1477 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1478 // symbol. 1479 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1480 Subtarget->isPICStyleGOT()) 1481 return MachineJumpTableInfo::EK_Custom32; 1482 1483 // Otherwise, use the normal jump table encoding heuristics. 1484 return TargetLowering::getJumpTableEncoding(); 1485} 1486 1487const MCExpr * 1488X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1489 const MachineBasicBlock *MBB, 1490 unsigned uid,MCContext &Ctx) const{ 1491 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1492 Subtarget->isPICStyleGOT()); 1493 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1494 // entries. 1495 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1496 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1497} 1498 1499/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1500/// jumptable. 1501SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1502 SelectionDAG &DAG) const { 1503 if (!Subtarget->is64Bit()) 1504 // This doesn't have SDLoc associated with it, but is not really the 1505 // same as a Register. 1506 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()); 1507 return Table; 1508} 1509 1510/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1511/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1512/// MCExpr. 1513const MCExpr *X86TargetLowering:: 1514getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1515 MCContext &Ctx) const { 1516 // X86-64 uses RIP relative addressing based on the jump table label. 1517 if (Subtarget->isPICStyleRIPRel()) 1518 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1519 1520 // Otherwise, the reference is relative to the PIC base. 1521 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1522} 1523 1524// FIXME: Why this routine is here? Move to RegInfo! 1525std::pair<const TargetRegisterClass*, uint8_t> 1526X86TargetLowering::findRepresentativeClass(MVT VT) const{ 1527 const TargetRegisterClass *RRC = 0; 1528 uint8_t Cost = 1; 1529 switch (VT.SimpleTy) { 1530 default: 1531 return TargetLowering::findRepresentativeClass(VT); 1532 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1533 RRC = Subtarget->is64Bit() ? 1534 (const TargetRegisterClass*)&X86::GR64RegClass : 1535 (const TargetRegisterClass*)&X86::GR32RegClass; 1536 break; 1537 case MVT::x86mmx: 1538 RRC = &X86::VR64RegClass; 1539 break; 1540 case MVT::f32: case MVT::f64: 1541 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1542 case MVT::v4f32: case MVT::v2f64: 1543 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1544 case MVT::v4f64: 1545 RRC = &X86::VR128RegClass; 1546 break; 1547 } 1548 return std::make_pair(RRC, Cost); 1549} 1550 1551bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1552 unsigned &Offset) const { 1553 if (!Subtarget->isTargetLinux()) 1554 return false; 1555 1556 if (Subtarget->is64Bit()) { 1557 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1558 Offset = 0x28; 1559 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1560 AddressSpace = 256; 1561 else 1562 AddressSpace = 257; 1563 } else { 1564 // %gs:0x14 on i386 1565 Offset = 0x14; 1566 AddressSpace = 256; 1567 } 1568 return true; 1569} 1570 1571//===----------------------------------------------------------------------===// 1572// Return Value Calling Convention Implementation 1573//===----------------------------------------------------------------------===// 1574 1575#include "X86GenCallingConv.inc" 1576 1577bool 1578X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1579 MachineFunction &MF, bool isVarArg, 1580 const SmallVectorImpl<ISD::OutputArg> &Outs, 1581 LLVMContext &Context) const { 1582 SmallVector<CCValAssign, 16> RVLocs; 1583 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1584 RVLocs, Context); 1585 return CCInfo.CheckReturn(Outs, RetCC_X86); 1586} 1587 1588SDValue 1589X86TargetLowering::LowerReturn(SDValue Chain, 1590 CallingConv::ID CallConv, bool isVarArg, 1591 const SmallVectorImpl<ISD::OutputArg> &Outs, 1592 const SmallVectorImpl<SDValue> &OutVals, 1593 SDLoc dl, SelectionDAG &DAG) const { 1594 MachineFunction &MF = DAG.getMachineFunction(); 1595 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1596 1597 SmallVector<CCValAssign, 16> RVLocs; 1598 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1599 RVLocs, *DAG.getContext()); 1600 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1601 1602 SDValue Flag; 1603 SmallVector<SDValue, 6> RetOps; 1604 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1605 // Operand #1 = Bytes To Pop 1606 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1607 MVT::i16)); 1608 1609 // Copy the result values into the output registers. 1610 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1611 CCValAssign &VA = RVLocs[i]; 1612 assert(VA.isRegLoc() && "Can only return in registers!"); 1613 SDValue ValToCopy = OutVals[i]; 1614 EVT ValVT = ValToCopy.getValueType(); 1615 1616 // Promote values to the appropriate types 1617 if (VA.getLocInfo() == CCValAssign::SExt) 1618 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1619 else if (VA.getLocInfo() == CCValAssign::ZExt) 1620 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1621 else if (VA.getLocInfo() == CCValAssign::AExt) 1622 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1623 else if (VA.getLocInfo() == CCValAssign::BCvt) 1624 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1625 1626 // If this is x86-64, and we disabled SSE, we can't return FP values, 1627 // or SSE or MMX vectors. 1628 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1629 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1630 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1631 report_fatal_error("SSE register return with SSE disabled"); 1632 } 1633 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1634 // llvm-gcc has never done it right and no one has noticed, so this 1635 // should be OK for now. 1636 if (ValVT == MVT::f64 && 1637 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1638 report_fatal_error("SSE2 register return with SSE2 disabled"); 1639 1640 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1641 // the RET instruction and handled by the FP Stackifier. 1642 if (VA.getLocReg() == X86::ST0 || 1643 VA.getLocReg() == X86::ST1) { 1644 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1645 // change the value to the FP stack register class. 1646 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1647 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1648 RetOps.push_back(ValToCopy); 1649 // Don't emit a copytoreg. 1650 continue; 1651 } 1652 1653 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1654 // which is returned in RAX / RDX. 1655 if (Subtarget->is64Bit()) { 1656 if (ValVT == MVT::x86mmx) { 1657 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1658 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1659 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1660 ValToCopy); 1661 // If we don't have SSE2 available, convert to v4f32 so the generated 1662 // register is legal. 1663 if (!Subtarget->hasSSE2()) 1664 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1665 } 1666 } 1667 } 1668 1669 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1670 Flag = Chain.getValue(1); 1671 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1672 } 1673 1674 // The x86-64 ABIs require that for returning structs by value we copy 1675 // the sret argument into %rax/%eax (depending on ABI) for the return. 1676 // Win32 requires us to put the sret argument to %eax as well. 1677 // We saved the argument into a virtual register in the entry block, 1678 // so now we copy the value out and into %rax/%eax. 1679 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() && 1680 (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { 1681 MachineFunction &MF = DAG.getMachineFunction(); 1682 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1683 unsigned Reg = FuncInfo->getSRetReturnReg(); 1684 assert(Reg && 1685 "SRetReturnReg should have been set in LowerFormalArguments()."); 1686 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1687 1688 unsigned RetValReg 1689 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ? 1690 X86::RAX : X86::EAX; 1691 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag); 1692 Flag = Chain.getValue(1); 1693 1694 // RAX/EAX now acts like a return value. 1695 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy())); 1696 } 1697 1698 RetOps[0] = Chain; // Update chain. 1699 1700 // Add the flag if we have it. 1701 if (Flag.getNode()) 1702 RetOps.push_back(Flag); 1703 1704 return DAG.getNode(X86ISD::RET_FLAG, dl, 1705 MVT::Other, &RetOps[0], RetOps.size()); 1706} 1707 1708bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1709 if (N->getNumValues() != 1) 1710 return false; 1711 if (!N->hasNUsesOfValue(1, 0)) 1712 return false; 1713 1714 SDValue TCChain = Chain; 1715 SDNode *Copy = *N->use_begin(); 1716 if (Copy->getOpcode() == ISD::CopyToReg) { 1717 // If the copy has a glue operand, we conservatively assume it isn't safe to 1718 // perform a tail call. 1719 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1720 return false; 1721 TCChain = Copy->getOperand(0); 1722 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1723 return false; 1724 1725 bool HasRet = false; 1726 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1727 UI != UE; ++UI) { 1728 if (UI->getOpcode() != X86ISD::RET_FLAG) 1729 return false; 1730 HasRet = true; 1731 } 1732 1733 if (!HasRet) 1734 return false; 1735 1736 Chain = TCChain; 1737 return true; 1738} 1739 1740MVT 1741X86TargetLowering::getTypeForExtArgOrReturn(MVT VT, 1742 ISD::NodeType ExtendKind) const { 1743 MVT ReturnMVT; 1744 // TODO: Is this also valid on 32-bit? 1745 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1746 ReturnMVT = MVT::i8; 1747 else 1748 ReturnMVT = MVT::i32; 1749 1750 MVT MinVT = getRegisterType(ReturnMVT); 1751 return VT.bitsLT(MinVT) ? MinVT : VT; 1752} 1753 1754/// LowerCallResult - Lower the result values of a call into the 1755/// appropriate copies out of appropriate physical registers. 1756/// 1757SDValue 1758X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1759 CallingConv::ID CallConv, bool isVarArg, 1760 const SmallVectorImpl<ISD::InputArg> &Ins, 1761 SDLoc dl, SelectionDAG &DAG, 1762 SmallVectorImpl<SDValue> &InVals) const { 1763 1764 // Assign locations to each value returned by this call. 1765 SmallVector<CCValAssign, 16> RVLocs; 1766 bool Is64Bit = Subtarget->is64Bit(); 1767 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1768 getTargetMachine(), RVLocs, *DAG.getContext()); 1769 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1770 1771 // Copy all of the result registers out of their specified physreg. 1772 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1773 CCValAssign &VA = RVLocs[i]; 1774 EVT CopyVT = VA.getValVT(); 1775 1776 // If this is x86-64, and we disabled SSE, we can't return FP values 1777 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1778 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1779 report_fatal_error("SSE register return with SSE disabled"); 1780 } 1781 1782 SDValue Val; 1783 1784 // If this is a call to a function that returns an fp value on the floating 1785 // point stack, we must guarantee the value is popped from the stack, so 1786 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1787 // if the return value is not used. We use the FpPOP_RETVAL instruction 1788 // instead. 1789 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1790 // If we prefer to use the value in xmm registers, copy it out as f80 and 1791 // use a truncate to move it from fp stack reg to xmm reg. 1792 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1793 SDValue Ops[] = { Chain, InFlag }; 1794 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1795 MVT::Other, MVT::Glue, Ops), 1); 1796 Val = Chain.getValue(0); 1797 1798 // Round the f80 to the right size, which also moves it to the appropriate 1799 // xmm register. 1800 if (CopyVT != VA.getValVT()) 1801 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1802 // This truncation won't change the value. 1803 DAG.getIntPtrConstant(1)); 1804 } else { 1805 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1806 CopyVT, InFlag).getValue(1); 1807 Val = Chain.getValue(0); 1808 } 1809 InFlag = Chain.getValue(2); 1810 InVals.push_back(Val); 1811 } 1812 1813 return Chain; 1814} 1815 1816//===----------------------------------------------------------------------===// 1817// C & StdCall & Fast Calling Convention implementation 1818//===----------------------------------------------------------------------===// 1819// StdCall calling convention seems to be standard for many Windows' API 1820// routines and around. It differs from C calling convention just a little: 1821// callee should clean up the stack, not caller. Symbols should be also 1822// decorated in some fancy way :) It doesn't support any vector arguments. 1823// For info on fast calling convention see Fast Calling Convention (tail call) 1824// implementation LowerX86_32FastCCCallTo. 1825 1826/// CallIsStructReturn - Determines whether a call uses struct return 1827/// semantics. 1828enum StructReturnType { 1829 NotStructReturn, 1830 RegStructReturn, 1831 StackStructReturn 1832}; 1833static StructReturnType 1834callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1835 if (Outs.empty()) 1836 return NotStructReturn; 1837 1838 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 1839 if (!Flags.isSRet()) 1840 return NotStructReturn; 1841 if (Flags.isInReg()) 1842 return RegStructReturn; 1843 return StackStructReturn; 1844} 1845 1846/// ArgsAreStructReturn - Determines whether a function uses struct 1847/// return semantics. 1848static StructReturnType 1849argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1850 if (Ins.empty()) 1851 return NotStructReturn; 1852 1853 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 1854 if (!Flags.isSRet()) 1855 return NotStructReturn; 1856 if (Flags.isInReg()) 1857 return RegStructReturn; 1858 return StackStructReturn; 1859} 1860 1861/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1862/// by "Src" to address "Dst" with size and alignment information specified by 1863/// the specific parameter attribute. The copy will be passed as a byval 1864/// function parameter. 1865static SDValue 1866CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1867 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1868 SDLoc dl) { 1869 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1870 1871 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1872 /*isVolatile*/false, /*AlwaysInline=*/true, 1873 MachinePointerInfo(), MachinePointerInfo()); 1874} 1875 1876/// IsTailCallConvention - Return true if the calling convention is one that 1877/// supports tail call optimization. 1878static bool IsTailCallConvention(CallingConv::ID CC) { 1879 return (CC == CallingConv::Fast || CC == CallingConv::GHC || 1880 CC == CallingConv::HiPE); 1881} 1882 1883/// \brief Return true if the calling convention is a C calling convention. 1884static bool IsCCallConvention(CallingConv::ID CC) { 1885 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 || 1886 CC == CallingConv::X86_64_SysV); 1887} 1888 1889bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1890 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1891 return false; 1892 1893 CallSite CS(CI); 1894 CallingConv::ID CalleeCC = CS.getCallingConv(); 1895 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) 1896 return false; 1897 1898 return true; 1899} 1900 1901/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1902/// a tailcall target by changing its ABI. 1903static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1904 bool GuaranteedTailCallOpt) { 1905 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1906} 1907 1908SDValue 1909X86TargetLowering::LowerMemArgument(SDValue Chain, 1910 CallingConv::ID CallConv, 1911 const SmallVectorImpl<ISD::InputArg> &Ins, 1912 SDLoc dl, SelectionDAG &DAG, 1913 const CCValAssign &VA, 1914 MachineFrameInfo *MFI, 1915 unsigned i) const { 1916 // Create the nodes corresponding to a load from this parameter slot. 1917 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1918 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1919 getTargetMachine().Options.GuaranteedTailCallOpt); 1920 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1921 EVT ValVT; 1922 1923 // If value is passed by pointer we have address passed instead of the value 1924 // itself. 1925 if (VA.getLocInfo() == CCValAssign::Indirect) 1926 ValVT = VA.getLocVT(); 1927 else 1928 ValVT = VA.getValVT(); 1929 1930 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1931 // changed with more analysis. 1932 // In case of tail call optimization mark all arguments mutable. Since they 1933 // could be overwritten by lowering of arguments in case of a tail call. 1934 if (Flags.isByVal()) { 1935 unsigned Bytes = Flags.getByValSize(); 1936 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1937 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1938 return DAG.getFrameIndex(FI, getPointerTy()); 1939 } else { 1940 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1941 VA.getLocMemOffset(), isImmutable); 1942 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1943 return DAG.getLoad(ValVT, dl, Chain, FIN, 1944 MachinePointerInfo::getFixedStack(FI), 1945 false, false, false, 0); 1946 } 1947} 1948 1949SDValue 1950X86TargetLowering::LowerFormalArguments(SDValue Chain, 1951 CallingConv::ID CallConv, 1952 bool isVarArg, 1953 const SmallVectorImpl<ISD::InputArg> &Ins, 1954 SDLoc dl, 1955 SelectionDAG &DAG, 1956 SmallVectorImpl<SDValue> &InVals) 1957 const { 1958 MachineFunction &MF = DAG.getMachineFunction(); 1959 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1960 1961 const Function* Fn = MF.getFunction(); 1962 if (Fn->hasExternalLinkage() && 1963 Subtarget->isTargetCygMing() && 1964 Fn->getName() == "main") 1965 FuncInfo->setForceFramePointer(true); 1966 1967 MachineFrameInfo *MFI = MF.getFrameInfo(); 1968 bool Is64Bit = Subtarget->is64Bit(); 1969 bool IsWindows = Subtarget->isTargetWindows(); 1970 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv); 1971 1972 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1973 "Var args not supported with calling convention fastcc, ghc or hipe"); 1974 1975 // Assign locations to all of the incoming arguments. 1976 SmallVector<CCValAssign, 16> ArgLocs; 1977 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1978 ArgLocs, *DAG.getContext()); 1979 1980 // Allocate shadow area for Win64 1981 if (IsWin64) 1982 CCInfo.AllocateStack(32, 8); 1983 1984 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1985 1986 unsigned LastVal = ~0U; 1987 SDValue ArgValue; 1988 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1989 CCValAssign &VA = ArgLocs[i]; 1990 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1991 // places. 1992 assert(VA.getValNo() != LastVal && 1993 "Don't support value assigned to multiple locs yet"); 1994 (void)LastVal; 1995 LastVal = VA.getValNo(); 1996 1997 if (VA.isRegLoc()) { 1998 EVT RegVT = VA.getLocVT(); 1999 const TargetRegisterClass *RC; 2000 if (RegVT == MVT::i32) 2001 RC = &X86::GR32RegClass; 2002 else if (Is64Bit && RegVT == MVT::i64) 2003 RC = &X86::GR64RegClass; 2004 else if (RegVT == MVT::f32) 2005 RC = &X86::FR32RegClass; 2006 else if (RegVT == MVT::f64) 2007 RC = &X86::FR64RegClass; 2008 else if (RegVT.is256BitVector()) 2009 RC = &X86::VR256RegClass; 2010 else if (RegVT.is128BitVector()) 2011 RC = &X86::VR128RegClass; 2012 else if (RegVT == MVT::x86mmx) 2013 RC = &X86::VR64RegClass; 2014 else 2015 llvm_unreachable("Unknown argument type!"); 2016 2017 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2018 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2019 2020 // If this is an 8 or 16-bit value, it is really passed promoted to 32 2021 // bits. Insert an assert[sz]ext to capture this, then truncate to the 2022 // right size. 2023 if (VA.getLocInfo() == CCValAssign::SExt) 2024 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2025 DAG.getValueType(VA.getValVT())); 2026 else if (VA.getLocInfo() == CCValAssign::ZExt) 2027 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2028 DAG.getValueType(VA.getValVT())); 2029 else if (VA.getLocInfo() == CCValAssign::BCvt) 2030 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2031 2032 if (VA.isExtInLoc()) { 2033 // Handle MMX values passed in XMM regs. 2034 if (RegVT.isVector()) 2035 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue); 2036 else 2037 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2038 } 2039 } else { 2040 assert(VA.isMemLoc()); 2041 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 2042 } 2043 2044 // If value is passed via pointer - do a load. 2045 if (VA.getLocInfo() == CCValAssign::Indirect) 2046 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 2047 MachinePointerInfo(), false, false, false, 0); 2048 2049 InVals.push_back(ArgValue); 2050 } 2051 2052 // The x86-64 ABIs require that for returning structs by value we copy 2053 // the sret argument into %rax/%eax (depending on ABI) for the return. 2054 // Win32 requires us to put the sret argument to %eax as well. 2055 // Save the argument into a virtual register so that we can access it 2056 // from the return points. 2057 if (MF.getFunction()->hasStructRetAttr() && 2058 (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { 2059 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2060 unsigned Reg = FuncInfo->getSRetReturnReg(); 2061 if (!Reg) { 2062 MVT PtrTy = getPointerTy(); 2063 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); 2064 FuncInfo->setSRetReturnReg(Reg); 2065 } 2066 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 2067 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 2068 } 2069 2070 unsigned StackSize = CCInfo.getNextStackOffset(); 2071 // Align stack specially for tail calls. 2072 if (FuncIsMadeTailCallSafe(CallConv, 2073 MF.getTarget().Options.GuaranteedTailCallOpt)) 2074 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 2075 2076 // If the function takes variable number of arguments, make a frame index for 2077 // the start of the first vararg value... for expansion of llvm.va_start. 2078 if (isVarArg) { 2079 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 2080 CallConv != CallingConv::X86_ThisCall)) { 2081 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 2082 } 2083 if (Is64Bit) { 2084 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 2085 2086 // FIXME: We should really autogenerate these arrays 2087 static const uint16_t GPR64ArgRegsWin64[] = { 2088 X86::RCX, X86::RDX, X86::R8, X86::R9 2089 }; 2090 static const uint16_t GPR64ArgRegs64Bit[] = { 2091 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 2092 }; 2093 static const uint16_t XMMArgRegs64Bit[] = { 2094 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2095 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2096 }; 2097 const uint16_t *GPR64ArgRegs; 2098 unsigned NumXMMRegs = 0; 2099 2100 if (IsWin64) { 2101 // The XMM registers which might contain var arg parameters are shadowed 2102 // in their paired GPR. So we only need to save the GPR to their home 2103 // slots. 2104 TotalNumIntRegs = 4; 2105 GPR64ArgRegs = GPR64ArgRegsWin64; 2106 } else { 2107 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 2108 GPR64ArgRegs = GPR64ArgRegs64Bit; 2109 2110 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 2111 TotalNumXMMRegs); 2112 } 2113 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 2114 TotalNumIntRegs); 2115 2116 bool NoImplicitFloatOps = Fn->getAttributes(). 2117 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 2118 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 2119 "SSE register cannot be used when SSE is disabled!"); 2120 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2121 NoImplicitFloatOps) && 2122 "SSE register cannot be used when SSE is disabled!"); 2123 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2124 !Subtarget->hasSSE1()) 2125 // Kernel mode asks for SSE to be disabled, so don't push them 2126 // on the stack. 2127 TotalNumXMMRegs = 0; 2128 2129 if (IsWin64) { 2130 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2131 // Get to the caller-allocated home save location. Add 8 to account 2132 // for the return address. 2133 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2134 FuncInfo->setRegSaveFrameIndex( 2135 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2136 // Fixup to set vararg frame on shadow area (4 x i64). 2137 if (NumIntRegs < 4) 2138 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2139 } else { 2140 // For X86-64, if there are vararg parameters that are passed via 2141 // registers, then we must store them to their spots on the stack so 2142 // they may be loaded by deferencing the result of va_next. 2143 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2144 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2145 FuncInfo->setRegSaveFrameIndex( 2146 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2147 false)); 2148 } 2149 2150 // Store the integer parameter registers. 2151 SmallVector<SDValue, 8> MemOps; 2152 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2153 getPointerTy()); 2154 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2155 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2156 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2157 DAG.getIntPtrConstant(Offset)); 2158 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2159 &X86::GR64RegClass); 2160 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2161 SDValue Store = 2162 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2163 MachinePointerInfo::getFixedStack( 2164 FuncInfo->getRegSaveFrameIndex(), Offset), 2165 false, false, 0); 2166 MemOps.push_back(Store); 2167 Offset += 8; 2168 } 2169 2170 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2171 // Now store the XMM (fp + vector) parameter registers. 2172 SmallVector<SDValue, 11> SaveXMMOps; 2173 SaveXMMOps.push_back(Chain); 2174 2175 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2176 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2177 SaveXMMOps.push_back(ALVal); 2178 2179 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2180 FuncInfo->getRegSaveFrameIndex())); 2181 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2182 FuncInfo->getVarArgsFPOffset())); 2183 2184 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2185 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2186 &X86::VR128RegClass); 2187 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2188 SaveXMMOps.push_back(Val); 2189 } 2190 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2191 MVT::Other, 2192 &SaveXMMOps[0], SaveXMMOps.size())); 2193 } 2194 2195 if (!MemOps.empty()) 2196 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2197 &MemOps[0], MemOps.size()); 2198 } 2199 } 2200 2201 // Some CCs need callee pop. 2202 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2203 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2204 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2205 } else { 2206 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2207 // If this is an sret function, the return should pop the hidden pointer. 2208 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2209 argsAreStructReturn(Ins) == StackStructReturn) 2210 FuncInfo->setBytesToPopOnReturn(4); 2211 } 2212 2213 if (!Is64Bit) { 2214 // RegSaveFrameIndex is X86-64 only. 2215 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2216 if (CallConv == CallingConv::X86_FastCall || 2217 CallConv == CallingConv::X86_ThisCall) 2218 // fastcc functions can't have varargs. 2219 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2220 } 2221 2222 FuncInfo->setArgumentStackSize(StackSize); 2223 2224 return Chain; 2225} 2226 2227SDValue 2228X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2229 SDValue StackPtr, SDValue Arg, 2230 SDLoc dl, SelectionDAG &DAG, 2231 const CCValAssign &VA, 2232 ISD::ArgFlagsTy Flags) const { 2233 unsigned LocMemOffset = VA.getLocMemOffset(); 2234 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2235 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2236 if (Flags.isByVal()) 2237 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2238 2239 return DAG.getStore(Chain, dl, Arg, PtrOff, 2240 MachinePointerInfo::getStack(LocMemOffset), 2241 false, false, 0); 2242} 2243 2244/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2245/// optimization is performed and it is required. 2246SDValue 2247X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2248 SDValue &OutRetAddr, SDValue Chain, 2249 bool IsTailCall, bool Is64Bit, 2250 int FPDiff, SDLoc dl) const { 2251 // Adjust the Return address stack slot. 2252 EVT VT = getPointerTy(); 2253 OutRetAddr = getReturnAddressFrameIndex(DAG); 2254 2255 // Load the "old" Return address. 2256 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2257 false, false, false, 0); 2258 return SDValue(OutRetAddr.getNode(), 1); 2259} 2260 2261/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2262/// optimization is performed and it is required (FPDiff!=0). 2263static SDValue 2264EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2265 SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, 2266 unsigned SlotSize, int FPDiff, SDLoc dl) { 2267 // Store the return address to the appropriate stack slot. 2268 if (!FPDiff) return Chain; 2269 // Calculate the new stack slot for the return address. 2270 int NewReturnAddrFI = 2271 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2272 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); 2273 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2274 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2275 false, false, 0); 2276 return Chain; 2277} 2278 2279SDValue 2280X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2281 SmallVectorImpl<SDValue> &InVals) const { 2282 SelectionDAG &DAG = CLI.DAG; 2283 SDLoc &dl = CLI.DL; 2284 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2285 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2286 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2287 SDValue Chain = CLI.Chain; 2288 SDValue Callee = CLI.Callee; 2289 CallingConv::ID CallConv = CLI.CallConv; 2290 bool &isTailCall = CLI.IsTailCall; 2291 bool isVarArg = CLI.IsVarArg; 2292 2293 MachineFunction &MF = DAG.getMachineFunction(); 2294 bool Is64Bit = Subtarget->is64Bit(); 2295 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv); 2296 bool IsWindows = Subtarget->isTargetWindows(); 2297 StructReturnType SR = callIsStructReturn(Outs); 2298 bool IsSibcall = false; 2299 2300 if (MF.getTarget().Options.DisableTailCalls) 2301 isTailCall = false; 2302 2303 if (isTailCall) { 2304 // Check if it's really possible to do a tail call. 2305 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2306 isVarArg, SR != NotStructReturn, 2307 MF.getFunction()->hasStructRetAttr(), CLI.RetTy, 2308 Outs, OutVals, Ins, DAG); 2309 2310 // Sibcalls are automatically detected tailcalls which do not require 2311 // ABI changes. 2312 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2313 IsSibcall = true; 2314 2315 if (isTailCall) 2316 ++NumTailCalls; 2317 } 2318 2319 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2320 "Var args not supported with calling convention fastcc, ghc or hipe"); 2321 2322 // Analyze operands of the call, assigning locations to each operand. 2323 SmallVector<CCValAssign, 16> ArgLocs; 2324 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2325 ArgLocs, *DAG.getContext()); 2326 2327 // Allocate shadow area for Win64 2328 if (IsWin64) 2329 CCInfo.AllocateStack(32, 8); 2330 2331 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2332 2333 // Get a count of how many bytes are to be pushed on the stack. 2334 unsigned NumBytes = CCInfo.getNextStackOffset(); 2335 if (IsSibcall) 2336 // This is a sibcall. The memory operands are available in caller's 2337 // own caller's stack. 2338 NumBytes = 0; 2339 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2340 IsTailCallConvention(CallConv)) 2341 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2342 2343 int FPDiff = 0; 2344 if (isTailCall && !IsSibcall) { 2345 // Lower arguments at fp - stackoffset + fpdiff. 2346 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); 2347 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn(); 2348 2349 FPDiff = NumBytesCallerPushed - NumBytes; 2350 2351 // Set the delta of movement of the returnaddr stackslot. 2352 // But only set if delta is greater than previous delta. 2353 if (FPDiff < X86Info->getTCReturnAddrDelta()) 2354 X86Info->setTCReturnAddrDelta(FPDiff); 2355 } 2356 2357 if (!IsSibcall) 2358 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 2359 dl); 2360 2361 SDValue RetAddrFrIdx; 2362 // Load return address for tail calls. 2363 if (isTailCall && FPDiff) 2364 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2365 Is64Bit, FPDiff, dl); 2366 2367 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2368 SmallVector<SDValue, 8> MemOpChains; 2369 SDValue StackPtr; 2370 2371 // Walk the register/memloc assignments, inserting copies/loads. In the case 2372 // of tail call optimization arguments are handle later. 2373 const X86RegisterInfo *RegInfo = 2374 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 2375 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2376 CCValAssign &VA = ArgLocs[i]; 2377 EVT RegVT = VA.getLocVT(); 2378 SDValue Arg = OutVals[i]; 2379 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2380 bool isByVal = Flags.isByVal(); 2381 2382 // Promote the value if needed. 2383 switch (VA.getLocInfo()) { 2384 default: llvm_unreachable("Unknown loc info!"); 2385 case CCValAssign::Full: break; 2386 case CCValAssign::SExt: 2387 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2388 break; 2389 case CCValAssign::ZExt: 2390 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2391 break; 2392 case CCValAssign::AExt: 2393 if (RegVT.is128BitVector()) { 2394 // Special case: passing MMX values in XMM registers. 2395 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2396 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2397 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2398 } else 2399 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2400 break; 2401 case CCValAssign::BCvt: 2402 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2403 break; 2404 case CCValAssign::Indirect: { 2405 // Store the argument. 2406 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2407 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2408 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2409 MachinePointerInfo::getFixedStack(FI), 2410 false, false, 0); 2411 Arg = SpillSlot; 2412 break; 2413 } 2414 } 2415 2416 if (VA.isRegLoc()) { 2417 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2418 if (isVarArg && IsWin64) { 2419 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2420 // shadow reg if callee is a varargs function. 2421 unsigned ShadowReg = 0; 2422 switch (VA.getLocReg()) { 2423 case X86::XMM0: ShadowReg = X86::RCX; break; 2424 case X86::XMM1: ShadowReg = X86::RDX; break; 2425 case X86::XMM2: ShadowReg = X86::R8; break; 2426 case X86::XMM3: ShadowReg = X86::R9; break; 2427 } 2428 if (ShadowReg) 2429 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2430 } 2431 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2432 assert(VA.isMemLoc()); 2433 if (StackPtr.getNode() == 0) 2434 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 2435 getPointerTy()); 2436 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2437 dl, DAG, VA, Flags)); 2438 } 2439 } 2440 2441 if (!MemOpChains.empty()) 2442 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2443 &MemOpChains[0], MemOpChains.size()); 2444 2445 if (Subtarget->isPICStyleGOT()) { 2446 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2447 // GOT pointer. 2448 if (!isTailCall) { 2449 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2450 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()))); 2451 } else { 2452 // If we are tail calling and generating PIC/GOT style code load the 2453 // address of the callee into ECX. The value in ecx is used as target of 2454 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2455 // for tail calls on PIC/GOT architectures. Normally we would just put the 2456 // address of GOT into ebx and then call target@PLT. But for tail calls 2457 // ebx would be restored (since ebx is callee saved) before jumping to the 2458 // target@PLT. 2459 2460 // Note: The actual moving to ECX is done further down. 2461 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2462 if (G && !G->getGlobal()->hasHiddenVisibility() && 2463 !G->getGlobal()->hasProtectedVisibility()) 2464 Callee = LowerGlobalAddress(Callee, DAG); 2465 else if (isa<ExternalSymbolSDNode>(Callee)) 2466 Callee = LowerExternalSymbol(Callee, DAG); 2467 } 2468 } 2469 2470 if (Is64Bit && isVarArg && !IsWin64) { 2471 // From AMD64 ABI document: 2472 // For calls that may call functions that use varargs or stdargs 2473 // (prototype-less calls or calls to functions containing ellipsis (...) in 2474 // the declaration) %al is used as hidden argument to specify the number 2475 // of SSE registers used. The contents of %al do not need to match exactly 2476 // the number of registers, but must be an ubound on the number of SSE 2477 // registers used and is in the range 0 - 8 inclusive. 2478 2479 // Count the number of XMM registers allocated. 2480 static const uint16_t XMMArgRegs[] = { 2481 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2482 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2483 }; 2484 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2485 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2486 && "SSE registers cannot be used when SSE is disabled"); 2487 2488 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2489 DAG.getConstant(NumXMMRegs, MVT::i8))); 2490 } 2491 2492 // For tail calls lower the arguments to the 'real' stack slot. 2493 if (isTailCall) { 2494 // Force all the incoming stack arguments to be loaded from the stack 2495 // before any new outgoing arguments are stored to the stack, because the 2496 // outgoing stack slots may alias the incoming argument stack slots, and 2497 // the alias isn't otherwise explicit. This is slightly more conservative 2498 // than necessary, because it means that each store effectively depends 2499 // on every argument instead of just those arguments it would clobber. 2500 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2501 2502 SmallVector<SDValue, 8> MemOpChains2; 2503 SDValue FIN; 2504 int FI = 0; 2505 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2506 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2507 CCValAssign &VA = ArgLocs[i]; 2508 if (VA.isRegLoc()) 2509 continue; 2510 assert(VA.isMemLoc()); 2511 SDValue Arg = OutVals[i]; 2512 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2513 // Create frame index. 2514 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2515 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2516 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2517 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2518 2519 if (Flags.isByVal()) { 2520 // Copy relative to framepointer. 2521 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2522 if (StackPtr.getNode() == 0) 2523 StackPtr = DAG.getCopyFromReg(Chain, dl, 2524 RegInfo->getStackRegister(), 2525 getPointerTy()); 2526 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2527 2528 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2529 ArgChain, 2530 Flags, DAG, dl)); 2531 } else { 2532 // Store relative to framepointer. 2533 MemOpChains2.push_back( 2534 DAG.getStore(ArgChain, dl, Arg, FIN, 2535 MachinePointerInfo::getFixedStack(FI), 2536 false, false, 0)); 2537 } 2538 } 2539 } 2540 2541 if (!MemOpChains2.empty()) 2542 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2543 &MemOpChains2[0], MemOpChains2.size()); 2544 2545 // Store the return address to the appropriate stack slot. 2546 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, 2547 getPointerTy(), RegInfo->getSlotSize(), 2548 FPDiff, dl); 2549 } 2550 2551 // Build a sequence of copy-to-reg nodes chained together with token chain 2552 // and flag operands which copy the outgoing args into registers. 2553 SDValue InFlag; 2554 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2555 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2556 RegsToPass[i].second, InFlag); 2557 InFlag = Chain.getValue(1); 2558 } 2559 2560 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2561 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2562 // In the 64-bit large code model, we have to make all calls 2563 // through a register, since the call instruction's 32-bit 2564 // pc-relative offset may not be large enough to hold the whole 2565 // address. 2566 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2567 // If the callee is a GlobalAddress node (quite common, every direct call 2568 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2569 // it. 2570 2571 // We should use extra load for direct calls to dllimported functions in 2572 // non-JIT mode. 2573 const GlobalValue *GV = G->getGlobal(); 2574 if (!GV->hasDLLImportLinkage()) { 2575 unsigned char OpFlags = 0; 2576 bool ExtraLoad = false; 2577 unsigned WrapperKind = ISD::DELETED_NODE; 2578 2579 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2580 // external symbols most go through the PLT in PIC mode. If the symbol 2581 // has hidden or protected visibility, or if it is static or local, then 2582 // we don't need to use the PLT - we can directly call it. 2583 if (Subtarget->isTargetELF() && 2584 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2585 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2586 OpFlags = X86II::MO_PLT; 2587 } else if (Subtarget->isPICStyleStubAny() && 2588 (GV->isDeclaration() || GV->isWeakForLinker()) && 2589 (!Subtarget->getTargetTriple().isMacOSX() || 2590 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2591 // PC-relative references to external symbols should go through $stub, 2592 // unless we're building with the leopard linker or later, which 2593 // automatically synthesizes these stubs. 2594 OpFlags = X86II::MO_DARWIN_STUB; 2595 } else if (Subtarget->isPICStyleRIPRel() && 2596 isa<Function>(GV) && 2597 cast<Function>(GV)->getAttributes(). 2598 hasAttribute(AttributeSet::FunctionIndex, 2599 Attribute::NonLazyBind)) { 2600 // If the function is marked as non-lazy, generate an indirect call 2601 // which loads from the GOT directly. This avoids runtime overhead 2602 // at the cost of eager binding (and one extra byte of encoding). 2603 OpFlags = X86II::MO_GOTPCREL; 2604 WrapperKind = X86ISD::WrapperRIP; 2605 ExtraLoad = true; 2606 } 2607 2608 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2609 G->getOffset(), OpFlags); 2610 2611 // Add a wrapper if needed. 2612 if (WrapperKind != ISD::DELETED_NODE) 2613 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2614 // Add extra indirection if needed. 2615 if (ExtraLoad) 2616 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2617 MachinePointerInfo::getGOT(), 2618 false, false, false, 0); 2619 } 2620 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2621 unsigned char OpFlags = 0; 2622 2623 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2624 // external symbols should go through the PLT. 2625 if (Subtarget->isTargetELF() && 2626 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2627 OpFlags = X86II::MO_PLT; 2628 } else if (Subtarget->isPICStyleStubAny() && 2629 (!Subtarget->getTargetTriple().isMacOSX() || 2630 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2631 // PC-relative references to external symbols should go through $stub, 2632 // unless we're building with the leopard linker or later, which 2633 // automatically synthesizes these stubs. 2634 OpFlags = X86II::MO_DARWIN_STUB; 2635 } 2636 2637 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2638 OpFlags); 2639 } 2640 2641 // Returns a chain & a flag for retval copy to use. 2642 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2643 SmallVector<SDValue, 8> Ops; 2644 2645 if (!IsSibcall && isTailCall) { 2646 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2647 DAG.getIntPtrConstant(0, true), InFlag, dl); 2648 InFlag = Chain.getValue(1); 2649 } 2650 2651 Ops.push_back(Chain); 2652 Ops.push_back(Callee); 2653 2654 if (isTailCall) 2655 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2656 2657 // Add argument registers to the end of the list so that they are known live 2658 // into the call. 2659 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2660 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2661 RegsToPass[i].second.getValueType())); 2662 2663 // Add a register mask operand representing the call-preserved registers. 2664 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2665 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2666 assert(Mask && "Missing call preserved mask for calling convention"); 2667 Ops.push_back(DAG.getRegisterMask(Mask)); 2668 2669 if (InFlag.getNode()) 2670 Ops.push_back(InFlag); 2671 2672 if (isTailCall) { 2673 // We used to do: 2674 //// If this is the first return lowered for this function, add the regs 2675 //// to the liveout set for the function. 2676 // This isn't right, although it's probably harmless on x86; liveouts 2677 // should be computed from returns not tail calls. Consider a void 2678 // function making a tail call to a function returning int. 2679 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 2680 } 2681 2682 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2683 InFlag = Chain.getValue(1); 2684 2685 // Create the CALLSEQ_END node. 2686 unsigned NumBytesForCalleeToPush; 2687 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2688 getTargetMachine().Options.GuaranteedTailCallOpt)) 2689 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2690 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2691 SR == StackStructReturn) 2692 // If this is a call to a struct-return function, the callee 2693 // pops the hidden struct pointer, so we have to push it back. 2694 // This is common for Darwin/X86, Linux & Mingw32 targets. 2695 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2696 NumBytesForCalleeToPush = 4; 2697 else 2698 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2699 2700 // Returns a flag for retval copy to use. 2701 if (!IsSibcall) { 2702 Chain = DAG.getCALLSEQ_END(Chain, 2703 DAG.getIntPtrConstant(NumBytes, true), 2704 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2705 true), 2706 InFlag, dl); 2707 InFlag = Chain.getValue(1); 2708 } 2709 2710 // Handle result values, copying them out of physregs into vregs that we 2711 // return. 2712 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2713 Ins, dl, DAG, InVals); 2714} 2715 2716//===----------------------------------------------------------------------===// 2717// Fast Calling Convention (tail call) implementation 2718//===----------------------------------------------------------------------===// 2719 2720// Like std call, callee cleans arguments, convention except that ECX is 2721// reserved for storing the tail called function address. Only 2 registers are 2722// free for argument passing (inreg). Tail call optimization is performed 2723// provided: 2724// * tailcallopt is enabled 2725// * caller/callee are fastcc 2726// On X86_64 architecture with GOT-style position independent code only local 2727// (within module) calls are supported at the moment. 2728// To keep the stack aligned according to platform abi the function 2729// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2730// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2731// If a tail called function callee has more arguments than the caller the 2732// caller needs to make sure that there is room to move the RETADDR to. This is 2733// achieved by reserving an area the size of the argument delta right after the 2734// original REtADDR, but before the saved framepointer or the spilled registers 2735// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2736// stack layout: 2737// arg1 2738// arg2 2739// RETADDR 2740// [ new RETADDR 2741// move area ] 2742// (possible EBP) 2743// ESI 2744// EDI 2745// local1 .. 2746 2747/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2748/// for a 16 byte align requirement. 2749unsigned 2750X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2751 SelectionDAG& DAG) const { 2752 MachineFunction &MF = DAG.getMachineFunction(); 2753 const TargetMachine &TM = MF.getTarget(); 2754 const X86RegisterInfo *RegInfo = 2755 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); 2756 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2757 unsigned StackAlignment = TFI.getStackAlignment(); 2758 uint64_t AlignMask = StackAlignment - 1; 2759 int64_t Offset = StackSize; 2760 unsigned SlotSize = RegInfo->getSlotSize(); 2761 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2762 // Number smaller than 12 so just add the difference. 2763 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2764 } else { 2765 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2766 Offset = ((~AlignMask) & Offset) + StackAlignment + 2767 (StackAlignment-SlotSize); 2768 } 2769 return Offset; 2770} 2771 2772/// MatchingStackOffset - Return true if the given stack call argument is 2773/// already available in the same position (relatively) of the caller's 2774/// incoming argument stack. 2775static 2776bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2777 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2778 const X86InstrInfo *TII) { 2779 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2780 int FI = INT_MAX; 2781 if (Arg.getOpcode() == ISD::CopyFromReg) { 2782 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2783 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2784 return false; 2785 MachineInstr *Def = MRI->getVRegDef(VR); 2786 if (!Def) 2787 return false; 2788 if (!Flags.isByVal()) { 2789 if (!TII->isLoadFromStackSlot(Def, FI)) 2790 return false; 2791 } else { 2792 unsigned Opcode = Def->getOpcode(); 2793 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2794 Def->getOperand(1).isFI()) { 2795 FI = Def->getOperand(1).getIndex(); 2796 Bytes = Flags.getByValSize(); 2797 } else 2798 return false; 2799 } 2800 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2801 if (Flags.isByVal()) 2802 // ByVal argument is passed in as a pointer but it's now being 2803 // dereferenced. e.g. 2804 // define @foo(%struct.X* %A) { 2805 // tail call @bar(%struct.X* byval %A) 2806 // } 2807 return false; 2808 SDValue Ptr = Ld->getBasePtr(); 2809 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2810 if (!FINode) 2811 return false; 2812 FI = FINode->getIndex(); 2813 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2814 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2815 FI = FINode->getIndex(); 2816 Bytes = Flags.getByValSize(); 2817 } else 2818 return false; 2819 2820 assert(FI != INT_MAX); 2821 if (!MFI->isFixedObjectIndex(FI)) 2822 return false; 2823 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2824} 2825 2826/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2827/// for tail call optimization. Targets which want to do tail call 2828/// optimization should implement this function. 2829bool 2830X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2831 CallingConv::ID CalleeCC, 2832 bool isVarArg, 2833 bool isCalleeStructRet, 2834 bool isCallerStructRet, 2835 Type *RetTy, 2836 const SmallVectorImpl<ISD::OutputArg> &Outs, 2837 const SmallVectorImpl<SDValue> &OutVals, 2838 const SmallVectorImpl<ISD::InputArg> &Ins, 2839 SelectionDAG &DAG) const { 2840 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) 2841 return false; 2842 2843 // If -tailcallopt is specified, make fastcc functions tail-callable. 2844 const MachineFunction &MF = DAG.getMachineFunction(); 2845 const Function *CallerF = MF.getFunction(); 2846 2847 // If the function return type is x86_fp80 and the callee return type is not, 2848 // then the FP_EXTEND of the call result is not a nop. It's not safe to 2849 // perform a tailcall optimization here. 2850 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) 2851 return false; 2852 2853 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2854 bool CCMatch = CallerCC == CalleeCC; 2855 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC); 2856 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC); 2857 2858 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2859 if (IsTailCallConvention(CalleeCC) && CCMatch) 2860 return true; 2861 return false; 2862 } 2863 2864 // Look for obvious safe cases to perform tail call optimization that do not 2865 // require ABI changes. This is what gcc calls sibcall. 2866 2867 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2868 // emit a special epilogue. 2869 const X86RegisterInfo *RegInfo = 2870 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 2871 if (RegInfo->needsStackRealignment(MF)) 2872 return false; 2873 2874 // Also avoid sibcall optimization if either caller or callee uses struct 2875 // return semantics. 2876 if (isCalleeStructRet || isCallerStructRet) 2877 return false; 2878 2879 // An stdcall caller is expected to clean up its arguments; the callee 2880 // isn't going to do that. 2881 if (!CCMatch && CallerCC == CallingConv::X86_StdCall) 2882 return false; 2883 2884 // Do not sibcall optimize vararg calls unless all arguments are passed via 2885 // registers. 2886 if (isVarArg && !Outs.empty()) { 2887 2888 // Optimizing for varargs on Win64 is unlikely to be safe without 2889 // additional testing. 2890 if (IsCalleeWin64 || IsCallerWin64) 2891 return false; 2892 2893 SmallVector<CCValAssign, 16> ArgLocs; 2894 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2895 getTargetMachine(), ArgLocs, *DAG.getContext()); 2896 2897 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2899 if (!ArgLocs[i].isRegLoc()) 2900 return false; 2901 } 2902 2903 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2904 // stack. Therefore, if it's not used by the call it is not safe to optimize 2905 // this into a sibcall. 2906 bool Unused = false; 2907 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2908 if (!Ins[i].Used) { 2909 Unused = true; 2910 break; 2911 } 2912 } 2913 if (Unused) { 2914 SmallVector<CCValAssign, 16> RVLocs; 2915 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2916 getTargetMachine(), RVLocs, *DAG.getContext()); 2917 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2918 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2919 CCValAssign &VA = RVLocs[i]; 2920 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2921 return false; 2922 } 2923 } 2924 2925 // If the calling conventions do not match, then we'd better make sure the 2926 // results are returned in the same way as what the caller expects. 2927 if (!CCMatch) { 2928 SmallVector<CCValAssign, 16> RVLocs1; 2929 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2930 getTargetMachine(), RVLocs1, *DAG.getContext()); 2931 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2932 2933 SmallVector<CCValAssign, 16> RVLocs2; 2934 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2935 getTargetMachine(), RVLocs2, *DAG.getContext()); 2936 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2937 2938 if (RVLocs1.size() != RVLocs2.size()) 2939 return false; 2940 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2941 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2942 return false; 2943 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2944 return false; 2945 if (RVLocs1[i].isRegLoc()) { 2946 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2947 return false; 2948 } else { 2949 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2950 return false; 2951 } 2952 } 2953 } 2954 2955 // If the callee takes no arguments then go on to check the results of the 2956 // call. 2957 if (!Outs.empty()) { 2958 // Check if stack adjustment is needed. For now, do not do this if any 2959 // argument is passed on the stack. 2960 SmallVector<CCValAssign, 16> ArgLocs; 2961 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2962 getTargetMachine(), ArgLocs, *DAG.getContext()); 2963 2964 // Allocate shadow area for Win64 2965 if (IsCalleeWin64) 2966 CCInfo.AllocateStack(32, 8); 2967 2968 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2969 if (CCInfo.getNextStackOffset()) { 2970 MachineFunction &MF = DAG.getMachineFunction(); 2971 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2972 return false; 2973 2974 // Check if the arguments are already laid out in the right way as 2975 // the caller's fixed stack objects. 2976 MachineFrameInfo *MFI = MF.getFrameInfo(); 2977 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2978 const X86InstrInfo *TII = 2979 ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2980 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2981 CCValAssign &VA = ArgLocs[i]; 2982 SDValue Arg = OutVals[i]; 2983 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2984 if (VA.getLocInfo() == CCValAssign::Indirect) 2985 return false; 2986 if (!VA.isRegLoc()) { 2987 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2988 MFI, MRI, TII)) 2989 return false; 2990 } 2991 } 2992 } 2993 2994 // If the tailcall address may be in a register, then make sure it's 2995 // possible to register allocate for it. In 32-bit, the call address can 2996 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2997 // callee-saved registers are restored. These happen to be the same 2998 // registers used to pass 'inreg' arguments so watch out for those. 2999 if (!Subtarget->is64Bit() && 3000 ((!isa<GlobalAddressSDNode>(Callee) && 3001 !isa<ExternalSymbolSDNode>(Callee)) || 3002 getTargetMachine().getRelocationModel() == Reloc::PIC_)) { 3003 unsigned NumInRegs = 0; 3004 // In PIC we need an extra register to formulate the address computation 3005 // for the callee. 3006 unsigned MaxInRegs = 3007 (getTargetMachine().getRelocationModel() == Reloc::PIC_) ? 2 : 3; 3008 3009 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3010 CCValAssign &VA = ArgLocs[i]; 3011 if (!VA.isRegLoc()) 3012 continue; 3013 unsigned Reg = VA.getLocReg(); 3014 switch (Reg) { 3015 default: break; 3016 case X86::EAX: case X86::EDX: case X86::ECX: 3017 if (++NumInRegs == MaxInRegs) 3018 return false; 3019 break; 3020 } 3021 } 3022 } 3023 } 3024 3025 return true; 3026} 3027 3028FastISel * 3029X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 3030 const TargetLibraryInfo *libInfo) const { 3031 return X86::createFastISel(funcInfo, libInfo); 3032} 3033 3034//===----------------------------------------------------------------------===// 3035// Other Lowering Hooks 3036//===----------------------------------------------------------------------===// 3037 3038static bool MayFoldLoad(SDValue Op) { 3039 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 3040} 3041 3042static bool MayFoldIntoStore(SDValue Op) { 3043 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 3044} 3045 3046static bool isTargetShuffle(unsigned Opcode) { 3047 switch(Opcode) { 3048 default: return false; 3049 case X86ISD::PSHUFD: 3050 case X86ISD::PSHUFHW: 3051 case X86ISD::PSHUFLW: 3052 case X86ISD::SHUFP: 3053 case X86ISD::PALIGNR: 3054 case X86ISD::MOVLHPS: 3055 case X86ISD::MOVLHPD: 3056 case X86ISD::MOVHLPS: 3057 case X86ISD::MOVLPS: 3058 case X86ISD::MOVLPD: 3059 case X86ISD::MOVSHDUP: 3060 case X86ISD::MOVSLDUP: 3061 case X86ISD::MOVDDUP: 3062 case X86ISD::MOVSS: 3063 case X86ISD::MOVSD: 3064 case X86ISD::UNPCKL: 3065 case X86ISD::UNPCKH: 3066 case X86ISD::VPERMILP: 3067 case X86ISD::VPERM2X128: 3068 case X86ISD::VPERMI: 3069 return true; 3070 } 3071} 3072 3073static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3074 SDValue V1, SelectionDAG &DAG) { 3075 switch(Opc) { 3076 default: llvm_unreachable("Unknown x86 shuffle node"); 3077 case X86ISD::MOVSHDUP: 3078 case X86ISD::MOVSLDUP: 3079 case X86ISD::MOVDDUP: 3080 return DAG.getNode(Opc, dl, VT, V1); 3081 } 3082} 3083 3084static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3085 SDValue V1, unsigned TargetMask, 3086 SelectionDAG &DAG) { 3087 switch(Opc) { 3088 default: llvm_unreachable("Unknown x86 shuffle node"); 3089 case X86ISD::PSHUFD: 3090 case X86ISD::PSHUFHW: 3091 case X86ISD::PSHUFLW: 3092 case X86ISD::VPERMILP: 3093 case X86ISD::VPERMI: 3094 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 3095 } 3096} 3097 3098static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3099 SDValue V1, SDValue V2, unsigned TargetMask, 3100 SelectionDAG &DAG) { 3101 switch(Opc) { 3102 default: llvm_unreachable("Unknown x86 shuffle node"); 3103 case X86ISD::PALIGNR: 3104 case X86ISD::SHUFP: 3105 case X86ISD::VPERM2X128: 3106 return DAG.getNode(Opc, dl, VT, V1, V2, 3107 DAG.getConstant(TargetMask, MVT::i8)); 3108 } 3109} 3110 3111static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3112 SDValue V1, SDValue V2, SelectionDAG &DAG) { 3113 switch(Opc) { 3114 default: llvm_unreachable("Unknown x86 shuffle node"); 3115 case X86ISD::MOVLHPS: 3116 case X86ISD::MOVLHPD: 3117 case X86ISD::MOVHLPS: 3118 case X86ISD::MOVLPS: 3119 case X86ISD::MOVLPD: 3120 case X86ISD::MOVSS: 3121 case X86ISD::MOVSD: 3122 case X86ISD::UNPCKL: 3123 case X86ISD::UNPCKH: 3124 return DAG.getNode(Opc, dl, VT, V1, V2); 3125 } 3126} 3127 3128SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 3129 MachineFunction &MF = DAG.getMachineFunction(); 3130 const X86RegisterInfo *RegInfo = 3131 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 3132 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 3133 int ReturnAddrIndex = FuncInfo->getRAIndex(); 3134 3135 if (ReturnAddrIndex == 0) { 3136 // Set up a frame object for the return address. 3137 unsigned SlotSize = RegInfo->getSlotSize(); 3138 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 3139 false); 3140 FuncInfo->setRAIndex(ReturnAddrIndex); 3141 } 3142 3143 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3144} 3145 3146bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3147 bool hasSymbolicDisplacement) { 3148 // Offset should fit into 32 bit immediate field. 3149 if (!isInt<32>(Offset)) 3150 return false; 3151 3152 // If we don't have a symbolic displacement - we don't have any extra 3153 // restrictions. 3154 if (!hasSymbolicDisplacement) 3155 return true; 3156 3157 // FIXME: Some tweaks might be needed for medium code model. 3158 if (M != CodeModel::Small && M != CodeModel::Kernel) 3159 return false; 3160 3161 // For small code model we assume that latest object is 16MB before end of 31 3162 // bits boundary. We may also accept pretty large negative constants knowing 3163 // that all objects are in the positive half of address space. 3164 if (M == CodeModel::Small && Offset < 16*1024*1024) 3165 return true; 3166 3167 // For kernel code model we know that all object resist in the negative half 3168 // of 32bits address space. We may not accept negative offsets, since they may 3169 // be just off and we may accept pretty large positive ones. 3170 if (M == CodeModel::Kernel && Offset > 0) 3171 return true; 3172 3173 return false; 3174} 3175 3176/// isCalleePop - Determines whether the callee is required to pop its 3177/// own arguments. Callee pop is necessary to support tail calls. 3178bool X86::isCalleePop(CallingConv::ID CallingConv, 3179 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3180 if (IsVarArg) 3181 return false; 3182 3183 switch (CallingConv) { 3184 default: 3185 return false; 3186 case CallingConv::X86_StdCall: 3187 return !is64Bit; 3188 case CallingConv::X86_FastCall: 3189 return !is64Bit; 3190 case CallingConv::X86_ThisCall: 3191 return !is64Bit; 3192 case CallingConv::Fast: 3193 return TailCallOpt; 3194 case CallingConv::GHC: 3195 return TailCallOpt; 3196 case CallingConv::HiPE: 3197 return TailCallOpt; 3198 } 3199} 3200 3201/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3202/// specific condition code, returning the condition code and the LHS/RHS of the 3203/// comparison to make. 3204static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3205 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3206 if (!isFP) { 3207 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3208 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3209 // X > -1 -> X == 0, jump !sign. 3210 RHS = DAG.getConstant(0, RHS.getValueType()); 3211 return X86::COND_NS; 3212 } 3213 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3214 // X < 0 -> X == 0, jump on sign. 3215 return X86::COND_S; 3216 } 3217 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3218 // X < 1 -> X <= 0 3219 RHS = DAG.getConstant(0, RHS.getValueType()); 3220 return X86::COND_LE; 3221 } 3222 } 3223 3224 switch (SetCCOpcode) { 3225 default: llvm_unreachable("Invalid integer condition!"); 3226 case ISD::SETEQ: return X86::COND_E; 3227 case ISD::SETGT: return X86::COND_G; 3228 case ISD::SETGE: return X86::COND_GE; 3229 case ISD::SETLT: return X86::COND_L; 3230 case ISD::SETLE: return X86::COND_LE; 3231 case ISD::SETNE: return X86::COND_NE; 3232 case ISD::SETULT: return X86::COND_B; 3233 case ISD::SETUGT: return X86::COND_A; 3234 case ISD::SETULE: return X86::COND_BE; 3235 case ISD::SETUGE: return X86::COND_AE; 3236 } 3237 } 3238 3239 // First determine if it is required or is profitable to flip the operands. 3240 3241 // If LHS is a foldable load, but RHS is not, flip the condition. 3242 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3243 !ISD::isNON_EXTLoad(RHS.getNode())) { 3244 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3245 std::swap(LHS, RHS); 3246 } 3247 3248 switch (SetCCOpcode) { 3249 default: break; 3250 case ISD::SETOLT: 3251 case ISD::SETOLE: 3252 case ISD::SETUGT: 3253 case ISD::SETUGE: 3254 std::swap(LHS, RHS); 3255 break; 3256 } 3257 3258 // On a floating point condition, the flags are set as follows: 3259 // ZF PF CF op 3260 // 0 | 0 | 0 | X > Y 3261 // 0 | 0 | 1 | X < Y 3262 // 1 | 0 | 0 | X == Y 3263 // 1 | 1 | 1 | unordered 3264 switch (SetCCOpcode) { 3265 default: llvm_unreachable("Condcode should be pre-legalized away"); 3266 case ISD::SETUEQ: 3267 case ISD::SETEQ: return X86::COND_E; 3268 case ISD::SETOLT: // flipped 3269 case ISD::SETOGT: 3270 case ISD::SETGT: return X86::COND_A; 3271 case ISD::SETOLE: // flipped 3272 case ISD::SETOGE: 3273 case ISD::SETGE: return X86::COND_AE; 3274 case ISD::SETUGT: // flipped 3275 case ISD::SETULT: 3276 case ISD::SETLT: return X86::COND_B; 3277 case ISD::SETUGE: // flipped 3278 case ISD::SETULE: 3279 case ISD::SETLE: return X86::COND_BE; 3280 case ISD::SETONE: 3281 case ISD::SETNE: return X86::COND_NE; 3282 case ISD::SETUO: return X86::COND_P; 3283 case ISD::SETO: return X86::COND_NP; 3284 case ISD::SETOEQ: 3285 case ISD::SETUNE: return X86::COND_INVALID; 3286 } 3287} 3288 3289/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3290/// code. Current x86 isa includes the following FP cmov instructions: 3291/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3292static bool hasFPCMov(unsigned X86CC) { 3293 switch (X86CC) { 3294 default: 3295 return false; 3296 case X86::COND_B: 3297 case X86::COND_BE: 3298 case X86::COND_E: 3299 case X86::COND_P: 3300 case X86::COND_A: 3301 case X86::COND_AE: 3302 case X86::COND_NE: 3303 case X86::COND_NP: 3304 return true; 3305 } 3306} 3307 3308/// isFPImmLegal - Returns true if the target can instruction select the 3309/// specified FP immediate natively. If false, the legalizer will 3310/// materialize the FP immediate as a load from a constant pool. 3311bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3312 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3313 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3314 return true; 3315 } 3316 return false; 3317} 3318 3319/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3320/// the specified range (L, H]. 3321static bool isUndefOrInRange(int Val, int Low, int Hi) { 3322 return (Val < 0) || (Val >= Low && Val < Hi); 3323} 3324 3325/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3326/// specified value. 3327static bool isUndefOrEqual(int Val, int CmpVal) { 3328 return (Val < 0 || Val == CmpVal); 3329} 3330 3331/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3332/// from position Pos and ending in Pos+Size, falls within the specified 3333/// sequential range (L, L+Pos]. or is undef. 3334static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3335 unsigned Pos, unsigned Size, int Low) { 3336 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3337 if (!isUndefOrEqual(Mask[i], Low)) 3338 return false; 3339 return true; 3340} 3341 3342/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3343/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3344/// the second operand. 3345static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3346 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3347 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3348 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3349 return (Mask[0] < 2 && Mask[1] < 2); 3350 return false; 3351} 3352 3353/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3354/// is suitable for input to PSHUFHW. 3355static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3356 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16)) 3357 return false; 3358 3359 // Lower quadword copied in order or undef. 3360 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3361 return false; 3362 3363 // Upper quadword shuffled. 3364 for (unsigned i = 4; i != 8; ++i) 3365 if (!isUndefOrInRange(Mask[i], 4, 8)) 3366 return false; 3367 3368 if (VT == MVT::v16i16) { 3369 // Lower quadword copied in order or undef. 3370 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3371 return false; 3372 3373 // Upper quadword shuffled. 3374 for (unsigned i = 12; i != 16; ++i) 3375 if (!isUndefOrInRange(Mask[i], 12, 16)) 3376 return false; 3377 } 3378 3379 return true; 3380} 3381 3382/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3383/// is suitable for input to PSHUFLW. 3384static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3385 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16)) 3386 return false; 3387 3388 // Upper quadword copied in order. 3389 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3390 return false; 3391 3392 // Lower quadword shuffled. 3393 for (unsigned i = 0; i != 4; ++i) 3394 if (!isUndefOrInRange(Mask[i], 0, 4)) 3395 return false; 3396 3397 if (VT == MVT::v16i16) { 3398 // Upper quadword copied in order. 3399 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3400 return false; 3401 3402 // Lower quadword shuffled. 3403 for (unsigned i = 8; i != 12; ++i) 3404 if (!isUndefOrInRange(Mask[i], 8, 12)) 3405 return false; 3406 } 3407 3408 return true; 3409} 3410 3411/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3412/// is suitable for input to PALIGNR. 3413static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3414 const X86Subtarget *Subtarget) { 3415 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) || 3416 (VT.is256BitVector() && !Subtarget->hasInt256())) 3417 return false; 3418 3419 unsigned NumElts = VT.getVectorNumElements(); 3420 unsigned NumLanes = VT.getSizeInBits()/128; 3421 unsigned NumLaneElts = NumElts/NumLanes; 3422 3423 // Do not handle 64-bit element shuffles with palignr. 3424 if (NumLaneElts == 2) 3425 return false; 3426 3427 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3428 unsigned i; 3429 for (i = 0; i != NumLaneElts; ++i) { 3430 if (Mask[i+l] >= 0) 3431 break; 3432 } 3433 3434 // Lane is all undef, go to next lane 3435 if (i == NumLaneElts) 3436 continue; 3437 3438 int Start = Mask[i+l]; 3439 3440 // Make sure its in this lane in one of the sources 3441 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3442 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3443 return false; 3444 3445 // If not lane 0, then we must match lane 0 3446 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3447 return false; 3448 3449 // Correct second source to be contiguous with first source 3450 if (Start >= (int)NumElts) 3451 Start -= NumElts - NumLaneElts; 3452 3453 // Make sure we're shifting in the right direction. 3454 if (Start <= (int)(i+l)) 3455 return false; 3456 3457 Start -= i; 3458 3459 // Check the rest of the elements to see if they are consecutive. 3460 for (++i; i != NumLaneElts; ++i) { 3461 int Idx = Mask[i+l]; 3462 3463 // Make sure its in this lane 3464 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3465 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3466 return false; 3467 3468 // If not lane 0, then we must match lane 0 3469 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3470 return false; 3471 3472 if (Idx >= (int)NumElts) 3473 Idx -= NumElts - NumLaneElts; 3474 3475 if (!isUndefOrEqual(Idx, Start+i)) 3476 return false; 3477 3478 } 3479 } 3480 3481 return true; 3482} 3483 3484/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3485/// the two vector operands have swapped position. 3486static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3487 unsigned NumElems) { 3488 for (unsigned i = 0; i != NumElems; ++i) { 3489 int idx = Mask[i]; 3490 if (idx < 0) 3491 continue; 3492 else if (idx < (int)NumElems) 3493 Mask[i] = idx + NumElems; 3494 else 3495 Mask[i] = idx - NumElems; 3496 } 3497} 3498 3499/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3500/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3501/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3502/// reverse of what x86 shuffles want. 3503static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256, 3504 bool Commuted = false) { 3505 if (!HasFp256 && VT.is256BitVector()) 3506 return false; 3507 3508 unsigned NumElems = VT.getVectorNumElements(); 3509 unsigned NumLanes = VT.getSizeInBits()/128; 3510 unsigned NumLaneElems = NumElems/NumLanes; 3511 3512 if (NumLaneElems != 2 && NumLaneElems != 4) 3513 return false; 3514 3515 // VSHUFPSY divides the resulting vector into 4 chunks. 3516 // The sources are also splitted into 4 chunks, and each destination 3517 // chunk must come from a different source chunk. 3518 // 3519 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3520 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3521 // 3522 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3523 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3524 // 3525 // VSHUFPDY divides the resulting vector into 4 chunks. 3526 // The sources are also splitted into 4 chunks, and each destination 3527 // chunk must come from a different source chunk. 3528 // 3529 // SRC1 => X3 X2 X1 X0 3530 // SRC2 => Y3 Y2 Y1 Y0 3531 // 3532 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3533 // 3534 unsigned HalfLaneElems = NumLaneElems/2; 3535 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3536 for (unsigned i = 0; i != NumLaneElems; ++i) { 3537 int Idx = Mask[i+l]; 3538 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3539 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3540 return false; 3541 // For VSHUFPSY, the mask of the second half must be the same as the 3542 // first but with the appropriate offsets. This works in the same way as 3543 // VPERMILPS works with masks. 3544 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3545 continue; 3546 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3547 return false; 3548 } 3549 } 3550 3551 return true; 3552} 3553 3554/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3555/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3556static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) { 3557 if (!VT.is128BitVector()) 3558 return false; 3559 3560 unsigned NumElems = VT.getVectorNumElements(); 3561 3562 if (NumElems != 4) 3563 return false; 3564 3565 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3566 return isUndefOrEqual(Mask[0], 6) && 3567 isUndefOrEqual(Mask[1], 7) && 3568 isUndefOrEqual(Mask[2], 2) && 3569 isUndefOrEqual(Mask[3], 3); 3570} 3571 3572/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3573/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3574/// <2, 3, 2, 3> 3575static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) { 3576 if (!VT.is128BitVector()) 3577 return false; 3578 3579 unsigned NumElems = VT.getVectorNumElements(); 3580 3581 if (NumElems != 4) 3582 return false; 3583 3584 return isUndefOrEqual(Mask[0], 2) && 3585 isUndefOrEqual(Mask[1], 3) && 3586 isUndefOrEqual(Mask[2], 2) && 3587 isUndefOrEqual(Mask[3], 3); 3588} 3589 3590/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3591/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3592static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) { 3593 if (!VT.is128BitVector()) 3594 return false; 3595 3596 unsigned NumElems = VT.getVectorNumElements(); 3597 3598 if (NumElems != 2 && NumElems != 4) 3599 return false; 3600 3601 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3602 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3603 return false; 3604 3605 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3606 if (!isUndefOrEqual(Mask[i], i)) 3607 return false; 3608 3609 return true; 3610} 3611 3612/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3613/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3614static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) { 3615 if (!VT.is128BitVector()) 3616 return false; 3617 3618 unsigned NumElems = VT.getVectorNumElements(); 3619 3620 if (NumElems != 2 && NumElems != 4) 3621 return false; 3622 3623 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3624 if (!isUndefOrEqual(Mask[i], i)) 3625 return false; 3626 3627 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3628 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3629 return false; 3630 3631 return true; 3632} 3633 3634// 3635// Some special combinations that can be optimized. 3636// 3637static 3638SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3639 SelectionDAG &DAG) { 3640 MVT VT = SVOp->getValueType(0).getSimpleVT(); 3641 SDLoc dl(SVOp); 3642 3643 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3644 return SDValue(); 3645 3646 ArrayRef<int> Mask = SVOp->getMask(); 3647 3648 // These are the special masks that may be optimized. 3649 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3650 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3651 bool MatchEvenMask = true; 3652 bool MatchOddMask = true; 3653 for (int i=0; i<8; ++i) { 3654 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3655 MatchEvenMask = false; 3656 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3657 MatchOddMask = false; 3658 } 3659 3660 if (!MatchEvenMask && !MatchOddMask) 3661 return SDValue(); 3662 3663 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3664 3665 SDValue Op0 = SVOp->getOperand(0); 3666 SDValue Op1 = SVOp->getOperand(1); 3667 3668 if (MatchEvenMask) { 3669 // Shift the second operand right to 32 bits. 3670 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; 3671 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); 3672 } else { 3673 // Shift the first operand left to 32 bits. 3674 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; 3675 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); 3676 } 3677 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; 3678 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); 3679} 3680 3681/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3682/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3683static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3684 bool HasInt256, bool V2IsSplat = false) { 3685 unsigned NumElts = VT.getVectorNumElements(); 3686 3687 assert((VT.is128BitVector() || VT.is256BitVector()) && 3688 "Unsupported vector type for unpckh"); 3689 3690 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 && 3691 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3692 return false; 3693 3694 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3695 // independently on 128-bit lanes. 3696 unsigned NumLanes = VT.getSizeInBits()/128; 3697 unsigned NumLaneElts = NumElts/NumLanes; 3698 3699 for (unsigned l = 0; l != NumLanes; ++l) { 3700 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3701 i != (l+1)*NumLaneElts; 3702 i += 2, ++j) { 3703 int BitI = Mask[i]; 3704 int BitI1 = Mask[i+1]; 3705 if (!isUndefOrEqual(BitI, j)) 3706 return false; 3707 if (V2IsSplat) { 3708 if (!isUndefOrEqual(BitI1, NumElts)) 3709 return false; 3710 } else { 3711 if (!isUndefOrEqual(BitI1, j + NumElts)) 3712 return false; 3713 } 3714 } 3715 } 3716 3717 return true; 3718} 3719 3720/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3721/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3722static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3723 bool HasInt256, bool V2IsSplat = false) { 3724 unsigned NumElts = VT.getVectorNumElements(); 3725 3726 assert((VT.is128BitVector() || VT.is256BitVector()) && 3727 "Unsupported vector type for unpckh"); 3728 3729 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 && 3730 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3731 return false; 3732 3733 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3734 // independently on 128-bit lanes. 3735 unsigned NumLanes = VT.getSizeInBits()/128; 3736 unsigned NumLaneElts = NumElts/NumLanes; 3737 3738 for (unsigned l = 0; l != NumLanes; ++l) { 3739 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3740 i != (l+1)*NumLaneElts; i += 2, ++j) { 3741 int BitI = Mask[i]; 3742 int BitI1 = Mask[i+1]; 3743 if (!isUndefOrEqual(BitI, j)) 3744 return false; 3745 if (V2IsSplat) { 3746 if (isUndefOrEqual(BitI1, NumElts)) 3747 return false; 3748 } else { 3749 if (!isUndefOrEqual(BitI1, j+NumElts)) 3750 return false; 3751 } 3752 } 3753 } 3754 return true; 3755} 3756 3757/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3758/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3759/// <0, 0, 1, 1> 3760static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3761 unsigned NumElts = VT.getVectorNumElements(); 3762 bool Is256BitVec = VT.is256BitVector(); 3763 3764 assert((VT.is128BitVector() || VT.is256BitVector()) && 3765 "Unsupported vector type for unpckh"); 3766 3767 if (Is256BitVec && NumElts != 4 && NumElts != 8 && 3768 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3769 return false; 3770 3771 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3772 // FIXME: Need a better way to get rid of this, there's no latency difference 3773 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3774 // the former later. We should also remove the "_undef" special mask. 3775 if (NumElts == 4 && Is256BitVec) 3776 return false; 3777 3778 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3779 // independently on 128-bit lanes. 3780 unsigned NumLanes = VT.getSizeInBits()/128; 3781 unsigned NumLaneElts = NumElts/NumLanes; 3782 3783 for (unsigned l = 0; l != NumLanes; ++l) { 3784 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3785 i != (l+1)*NumLaneElts; 3786 i += 2, ++j) { 3787 int BitI = Mask[i]; 3788 int BitI1 = Mask[i+1]; 3789 3790 if (!isUndefOrEqual(BitI, j)) 3791 return false; 3792 if (!isUndefOrEqual(BitI1, j)) 3793 return false; 3794 } 3795 } 3796 3797 return true; 3798} 3799 3800/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3801/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3802/// <2, 2, 3, 3> 3803static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3804 unsigned NumElts = VT.getVectorNumElements(); 3805 3806 assert((VT.is128BitVector() || VT.is256BitVector()) && 3807 "Unsupported vector type for unpckh"); 3808 3809 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 && 3810 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3811 return false; 3812 3813 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3814 // independently on 128-bit lanes. 3815 unsigned NumLanes = VT.getSizeInBits()/128; 3816 unsigned NumLaneElts = NumElts/NumLanes; 3817 3818 for (unsigned l = 0; l != NumLanes; ++l) { 3819 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3820 i != (l+1)*NumLaneElts; i += 2, ++j) { 3821 int BitI = Mask[i]; 3822 int BitI1 = Mask[i+1]; 3823 if (!isUndefOrEqual(BitI, j)) 3824 return false; 3825 if (!isUndefOrEqual(BitI1, j)) 3826 return false; 3827 } 3828 } 3829 return true; 3830} 3831 3832/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3833/// specifies a shuffle of elements that is suitable for input to MOVSS, 3834/// MOVSD, and MOVD, i.e. setting the lowest element. 3835static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3836 if (VT.getVectorElementType().getSizeInBits() < 32) 3837 return false; 3838 if (!VT.is128BitVector()) 3839 return false; 3840 3841 unsigned NumElts = VT.getVectorNumElements(); 3842 3843 if (!isUndefOrEqual(Mask[0], NumElts)) 3844 return false; 3845 3846 for (unsigned i = 1; i != NumElts; ++i) 3847 if (!isUndefOrEqual(Mask[i], i)) 3848 return false; 3849 3850 return true; 3851} 3852 3853/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3854/// as permutations between 128-bit chunks or halves. As an example: this 3855/// shuffle bellow: 3856/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3857/// The first half comes from the second half of V1 and the second half from the 3858/// the second half of V2. 3859static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasFp256) { 3860 if (!HasFp256 || !VT.is256BitVector()) 3861 return false; 3862 3863 // The shuffle result is divided into half A and half B. In total the two 3864 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3865 // B must come from C, D, E or F. 3866 unsigned HalfSize = VT.getVectorNumElements()/2; 3867 bool MatchA = false, MatchB = false; 3868 3869 // Check if A comes from one of C, D, E, F. 3870 for (unsigned Half = 0; Half != 4; ++Half) { 3871 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3872 MatchA = true; 3873 break; 3874 } 3875 } 3876 3877 // Check if B comes from one of C, D, E, F. 3878 for (unsigned Half = 0; Half != 4; ++Half) { 3879 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3880 MatchB = true; 3881 break; 3882 } 3883 } 3884 3885 return MatchA && MatchB; 3886} 3887 3888/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3889/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3890static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3891 MVT VT = SVOp->getValueType(0).getSimpleVT(); 3892 3893 unsigned HalfSize = VT.getVectorNumElements()/2; 3894 3895 unsigned FstHalf = 0, SndHalf = 0; 3896 for (unsigned i = 0; i < HalfSize; ++i) { 3897 if (SVOp->getMaskElt(i) > 0) { 3898 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3899 break; 3900 } 3901 } 3902 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3903 if (SVOp->getMaskElt(i) > 0) { 3904 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3905 break; 3906 } 3907 } 3908 3909 return (FstHalf | (SndHalf << 4)); 3910} 3911 3912/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3913/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3914/// Note that VPERMIL mask matching is different depending whether theunderlying 3915/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3916/// to the same elements of the low, but to the higher half of the source. 3917/// In VPERMILPD the two lanes could be shuffled independently of each other 3918/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 3919static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256) { 3920 if (!HasFp256) 3921 return false; 3922 3923 unsigned NumElts = VT.getVectorNumElements(); 3924 // Only match 256-bit with 32/64-bit types 3925 if (!VT.is256BitVector() || (NumElts != 4 && NumElts != 8)) 3926 return false; 3927 3928 unsigned NumLanes = VT.getSizeInBits()/128; 3929 unsigned LaneSize = NumElts/NumLanes; 3930 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3931 for (unsigned i = 0; i != LaneSize; ++i) { 3932 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3933 return false; 3934 if (NumElts != 8 || l == 0) 3935 continue; 3936 // VPERMILPS handling 3937 if (Mask[i] < 0) 3938 continue; 3939 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3940 return false; 3941 } 3942 } 3943 3944 return true; 3945} 3946 3947/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 3948/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3949/// element of vector 2 and the other elements to come from vector 1 in order. 3950static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3951 bool V2IsSplat = false, bool V2IsUndef = false) { 3952 if (!VT.is128BitVector()) 3953 return false; 3954 3955 unsigned NumOps = VT.getVectorNumElements(); 3956 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3957 return false; 3958 3959 if (!isUndefOrEqual(Mask[0], 0)) 3960 return false; 3961 3962 for (unsigned i = 1; i != NumOps; ++i) 3963 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3964 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3965 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3966 return false; 3967 3968 return true; 3969} 3970 3971/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3972/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3973/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3974static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT, 3975 const X86Subtarget *Subtarget) { 3976 if (!Subtarget->hasSSE3()) 3977 return false; 3978 3979 unsigned NumElems = VT.getVectorNumElements(); 3980 3981 if ((VT.is128BitVector() && NumElems != 4) || 3982 (VT.is256BitVector() && NumElems != 8)) 3983 return false; 3984 3985 // "i+1" is the value the indexed mask element must have 3986 for (unsigned i = 0; i != NumElems; i += 2) 3987 if (!isUndefOrEqual(Mask[i], i+1) || 3988 !isUndefOrEqual(Mask[i+1], i+1)) 3989 return false; 3990 3991 return true; 3992} 3993 3994/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3995/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3996/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3997static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT, 3998 const X86Subtarget *Subtarget) { 3999 if (!Subtarget->hasSSE3()) 4000 return false; 4001 4002 unsigned NumElems = VT.getVectorNumElements(); 4003 4004 if ((VT.is128BitVector() && NumElems != 4) || 4005 (VT.is256BitVector() && NumElems != 8)) 4006 return false; 4007 4008 // "i" is the value the indexed mask element must have 4009 for (unsigned i = 0; i != NumElems; i += 2) 4010 if (!isUndefOrEqual(Mask[i], i) || 4011 !isUndefOrEqual(Mask[i+1], i)) 4012 return false; 4013 4014 return true; 4015} 4016 4017/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 4018/// specifies a shuffle of elements that is suitable for input to 256-bit 4019/// version of MOVDDUP. 4020static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasFp256) { 4021 if (!HasFp256 || !VT.is256BitVector()) 4022 return false; 4023 4024 unsigned NumElts = VT.getVectorNumElements(); 4025 if (NumElts != 4) 4026 return false; 4027 4028 for (unsigned i = 0; i != NumElts/2; ++i) 4029 if (!isUndefOrEqual(Mask[i], 0)) 4030 return false; 4031 for (unsigned i = NumElts/2; i != NumElts; ++i) 4032 if (!isUndefOrEqual(Mask[i], NumElts/2)) 4033 return false; 4034 return true; 4035} 4036 4037/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 4038/// specifies a shuffle of elements that is suitable for input to 128-bit 4039/// version of MOVDDUP. 4040static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { 4041 if (!VT.is128BitVector()) 4042 return false; 4043 4044 unsigned e = VT.getVectorNumElements() / 2; 4045 for (unsigned i = 0; i != e; ++i) 4046 if (!isUndefOrEqual(Mask[i], i)) 4047 return false; 4048 for (unsigned i = 0; i != e; ++i) 4049 if (!isUndefOrEqual(Mask[e+i], i)) 4050 return false; 4051 return true; 4052} 4053 4054/// isVEXTRACTF128Index - Return true if the specified 4055/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 4056/// suitable for input to VEXTRACTF128. 4057bool X86::isVEXTRACTF128Index(SDNode *N) { 4058 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4059 return false; 4060 4061 // The index should be aligned on a 128-bit boundary. 4062 uint64_t Index = 4063 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4064 4065 MVT VT = N->getValueType(0).getSimpleVT(); 4066 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); 4067 bool Result = (Index * ElSize) % 128 == 0; 4068 4069 return Result; 4070} 4071 4072/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 4073/// operand specifies a subvector insert that is suitable for input to 4074/// VINSERTF128. 4075bool X86::isVINSERTF128Index(SDNode *N) { 4076 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4077 return false; 4078 4079 // The index should be aligned on a 128-bit boundary. 4080 uint64_t Index = 4081 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4082 4083 MVT VT = N->getValueType(0).getSimpleVT(); 4084 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); 4085 bool Result = (Index * ElSize) % 128 == 0; 4086 4087 return Result; 4088} 4089 4090/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 4091/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 4092/// Handles 128-bit and 256-bit. 4093static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 4094 MVT VT = N->getValueType(0).getSimpleVT(); 4095 4096 assert((VT.is128BitVector() || VT.is256BitVector()) && 4097 "Unsupported vector type for PSHUF/SHUFP"); 4098 4099 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 4100 // independently on 128-bit lanes. 4101 unsigned NumElts = VT.getVectorNumElements(); 4102 unsigned NumLanes = VT.getSizeInBits()/128; 4103 unsigned NumLaneElts = NumElts/NumLanes; 4104 4105 assert((NumLaneElts == 2 || NumLaneElts == 4) && 4106 "Only supports 2 or 4 elements per lane"); 4107 4108 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 4109 unsigned Mask = 0; 4110 for (unsigned i = 0; i != NumElts; ++i) { 4111 int Elt = N->getMaskElt(i); 4112 if (Elt < 0) continue; 4113 Elt &= NumLaneElts - 1; 4114 unsigned ShAmt = (i << Shift) % 8; 4115 Mask |= Elt << ShAmt; 4116 } 4117 4118 return Mask; 4119} 4120 4121/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 4122/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 4123static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 4124 MVT VT = N->getValueType(0).getSimpleVT(); 4125 4126 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4127 "Unsupported vector type for PSHUFHW"); 4128 4129 unsigned NumElts = VT.getVectorNumElements(); 4130 4131 unsigned Mask = 0; 4132 for (unsigned l = 0; l != NumElts; l += 8) { 4133 // 8 nodes per lane, but we only care about the last 4. 4134 for (unsigned i = 0; i < 4; ++i) { 4135 int Elt = N->getMaskElt(l+i+4); 4136 if (Elt < 0) continue; 4137 Elt &= 0x3; // only 2-bits. 4138 Mask |= Elt << (i * 2); 4139 } 4140 } 4141 4142 return Mask; 4143} 4144 4145/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4146/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4147static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4148 MVT VT = N->getValueType(0).getSimpleVT(); 4149 4150 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4151 "Unsupported vector type for PSHUFHW"); 4152 4153 unsigned NumElts = VT.getVectorNumElements(); 4154 4155 unsigned Mask = 0; 4156 for (unsigned l = 0; l != NumElts; l += 8) { 4157 // 8 nodes per lane, but we only care about the first 4. 4158 for (unsigned i = 0; i < 4; ++i) { 4159 int Elt = N->getMaskElt(l+i); 4160 if (Elt < 0) continue; 4161 Elt &= 0x3; // only 2-bits 4162 Mask |= Elt << (i * 2); 4163 } 4164 } 4165 4166 return Mask; 4167} 4168 4169/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4170/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4171static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4172 MVT VT = SVOp->getValueType(0).getSimpleVT(); 4173 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4174 4175 unsigned NumElts = VT.getVectorNumElements(); 4176 unsigned NumLanes = VT.getSizeInBits()/128; 4177 unsigned NumLaneElts = NumElts/NumLanes; 4178 4179 int Val = 0; 4180 unsigned i; 4181 for (i = 0; i != NumElts; ++i) { 4182 Val = SVOp->getMaskElt(i); 4183 if (Val >= 0) 4184 break; 4185 } 4186 if (Val >= (int)NumElts) 4187 Val -= NumElts - NumLaneElts; 4188 4189 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4190 return (Val - i) * EltSize; 4191} 4192 4193/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4194/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4195/// instructions. 4196unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4197 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4198 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4199 4200 uint64_t Index = 4201 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4202 4203 MVT VecVT = N->getOperand(0).getValueType().getSimpleVT(); 4204 MVT ElVT = VecVT.getVectorElementType(); 4205 4206 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4207 return Index / NumElemsPerChunk; 4208} 4209 4210/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4211/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4212/// instructions. 4213unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4214 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4215 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4216 4217 uint64_t Index = 4218 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4219 4220 MVT VecVT = N->getValueType(0).getSimpleVT(); 4221 MVT ElVT = VecVT.getVectorElementType(); 4222 4223 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4224 return Index / NumElemsPerChunk; 4225} 4226 4227/// getShuffleCLImmediate - Return the appropriate immediate to shuffle 4228/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. 4229/// Handles 256-bit. 4230static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { 4231 MVT VT = N->getValueType(0).getSimpleVT(); 4232 4233 unsigned NumElts = VT.getVectorNumElements(); 4234 4235 assert((VT.is256BitVector() && NumElts == 4) && 4236 "Unsupported vector type for VPERMQ/VPERMPD"); 4237 4238 unsigned Mask = 0; 4239 for (unsigned i = 0; i != NumElts; ++i) { 4240 int Elt = N->getMaskElt(i); 4241 if (Elt < 0) 4242 continue; 4243 Mask |= Elt << (i*2); 4244 } 4245 4246 return Mask; 4247} 4248/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4249/// constant +0.0. 4250bool X86::isZeroNode(SDValue Elt) { 4251 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Elt)) 4252 return CN->isNullValue(); 4253 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt)) 4254 return CFP->getValueAPF().isPosZero(); 4255 return false; 4256} 4257 4258/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4259/// their permute mask. 4260static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4261 SelectionDAG &DAG) { 4262 MVT VT = SVOp->getValueType(0).getSimpleVT(); 4263 unsigned NumElems = VT.getVectorNumElements(); 4264 SmallVector<int, 8> MaskVec; 4265 4266 for (unsigned i = 0; i != NumElems; ++i) { 4267 int Idx = SVOp->getMaskElt(i); 4268 if (Idx >= 0) { 4269 if (Idx < (int)NumElems) 4270 Idx += NumElems; 4271 else 4272 Idx -= NumElems; 4273 } 4274 MaskVec.push_back(Idx); 4275 } 4276 return DAG.getVectorShuffle(VT, SDLoc(SVOp), SVOp->getOperand(1), 4277 SVOp->getOperand(0), &MaskVec[0]); 4278} 4279 4280/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4281/// match movhlps. The lower half elements should come from upper half of 4282/// V1 (and in order), and the upper half elements should come from the upper 4283/// half of V2 (and in order). 4284static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) { 4285 if (!VT.is128BitVector()) 4286 return false; 4287 if (VT.getVectorNumElements() != 4) 4288 return false; 4289 for (unsigned i = 0, e = 2; i != e; ++i) 4290 if (!isUndefOrEqual(Mask[i], i+2)) 4291 return false; 4292 for (unsigned i = 2; i != 4; ++i) 4293 if (!isUndefOrEqual(Mask[i], i+4)) 4294 return false; 4295 return true; 4296} 4297 4298/// isScalarLoadToVector - Returns true if the node is a scalar load that 4299/// is promoted to a vector. It also returns the LoadSDNode by reference if 4300/// required. 4301static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4302 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4303 return false; 4304 N = N->getOperand(0).getNode(); 4305 if (!ISD::isNON_EXTLoad(N)) 4306 return false; 4307 if (LD) 4308 *LD = cast<LoadSDNode>(N); 4309 return true; 4310} 4311 4312// Test whether the given value is a vector value which will be legalized 4313// into a load. 4314static bool WillBeConstantPoolLoad(SDNode *N) { 4315 if (N->getOpcode() != ISD::BUILD_VECTOR) 4316 return false; 4317 4318 // Check for any non-constant elements. 4319 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4320 switch (N->getOperand(i).getNode()->getOpcode()) { 4321 case ISD::UNDEF: 4322 case ISD::ConstantFP: 4323 case ISD::Constant: 4324 break; 4325 default: 4326 return false; 4327 } 4328 4329 // Vectors of all-zeros and all-ones are materialized with special 4330 // instructions rather than being loaded. 4331 return !ISD::isBuildVectorAllZeros(N) && 4332 !ISD::isBuildVectorAllOnes(N); 4333} 4334 4335/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4336/// match movlp{s|d}. The lower half elements should come from lower half of 4337/// V1 (and in order), and the upper half elements should come from the upper 4338/// half of V2 (and in order). And since V1 will become the source of the 4339/// MOVLP, it must be either a vector load or a scalar load to vector. 4340static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4341 ArrayRef<int> Mask, EVT VT) { 4342 if (!VT.is128BitVector()) 4343 return false; 4344 4345 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4346 return false; 4347 // Is V2 is a vector load, don't do this transformation. We will try to use 4348 // load folding shufps op. 4349 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4350 return false; 4351 4352 unsigned NumElems = VT.getVectorNumElements(); 4353 4354 if (NumElems != 2 && NumElems != 4) 4355 return false; 4356 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4357 if (!isUndefOrEqual(Mask[i], i)) 4358 return false; 4359 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4360 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4361 return false; 4362 return true; 4363} 4364 4365/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4366/// all the same. 4367static bool isSplatVector(SDNode *N) { 4368 if (N->getOpcode() != ISD::BUILD_VECTOR) 4369 return false; 4370 4371 SDValue SplatValue = N->getOperand(0); 4372 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4373 if (N->getOperand(i) != SplatValue) 4374 return false; 4375 return true; 4376} 4377 4378/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4379/// to an zero vector. 4380/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4381static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4382 SDValue V1 = N->getOperand(0); 4383 SDValue V2 = N->getOperand(1); 4384 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4385 for (unsigned i = 0; i != NumElems; ++i) { 4386 int Idx = N->getMaskElt(i); 4387 if (Idx >= (int)NumElems) { 4388 unsigned Opc = V2.getOpcode(); 4389 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4390 continue; 4391 if (Opc != ISD::BUILD_VECTOR || 4392 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4393 return false; 4394 } else if (Idx >= 0) { 4395 unsigned Opc = V1.getOpcode(); 4396 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4397 continue; 4398 if (Opc != ISD::BUILD_VECTOR || 4399 !X86::isZeroNode(V1.getOperand(Idx))) 4400 return false; 4401 } 4402 } 4403 return true; 4404} 4405 4406/// getZeroVector - Returns a vector of specified type with all zero elements. 4407/// 4408static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4409 SelectionDAG &DAG, SDLoc dl) { 4410 assert(VT.isVector() && "Expected a vector type"); 4411 4412 // Always build SSE zero vectors as <4 x i32> bitcasted 4413 // to their dest type. This ensures they get CSE'd. 4414 SDValue Vec; 4415 if (VT.is128BitVector()) { // SSE 4416 if (Subtarget->hasSSE2()) { // SSE2 4417 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4418 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4419 } else { // SSE1 4420 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4421 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4422 } 4423 } else if (VT.is256BitVector()) { // AVX 4424 if (Subtarget->hasInt256()) { // AVX2 4425 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4426 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4427 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 4428 array_lengthof(Ops)); 4429 } else { 4430 // 256-bit logic and arithmetic instructions in AVX are all 4431 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4432 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4433 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4434 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 4435 array_lengthof(Ops)); 4436 } 4437 } else 4438 llvm_unreachable("Unexpected vector type"); 4439 4440 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4441} 4442 4443/// getOnesVector - Returns a vector of specified type with all bits set. 4444/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4445/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4446/// Then bitcast to their original type, ensuring they get CSE'd. 4447static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG, 4448 SDLoc dl) { 4449 assert(VT.isVector() && "Expected a vector type"); 4450 4451 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4452 SDValue Vec; 4453 if (VT.is256BitVector()) { 4454 if (HasInt256) { // AVX2 4455 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4456 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 4457 array_lengthof(Ops)); 4458 } else { // AVX 4459 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4460 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4461 } 4462 } else if (VT.is128BitVector()) { 4463 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4464 } else 4465 llvm_unreachable("Unexpected vector type"); 4466 4467 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4468} 4469 4470/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4471/// that point to V2 points to its first element. 4472static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4473 for (unsigned i = 0; i != NumElems; ++i) { 4474 if (Mask[i] > (int)NumElems) { 4475 Mask[i] = NumElems; 4476 } 4477 } 4478} 4479 4480/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4481/// operation of specified width. 4482static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, 4483 SDValue V2) { 4484 unsigned NumElems = VT.getVectorNumElements(); 4485 SmallVector<int, 8> Mask; 4486 Mask.push_back(NumElems); 4487 for (unsigned i = 1; i != NumElems; ++i) 4488 Mask.push_back(i); 4489 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4490} 4491 4492/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4493static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, 4494 SDValue V2) { 4495 unsigned NumElems = VT.getVectorNumElements(); 4496 SmallVector<int, 8> Mask; 4497 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4498 Mask.push_back(i); 4499 Mask.push_back(i + NumElems); 4500 } 4501 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4502} 4503 4504/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4505static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, 4506 SDValue V2) { 4507 unsigned NumElems = VT.getVectorNumElements(); 4508 SmallVector<int, 8> Mask; 4509 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4510 Mask.push_back(i + Half); 4511 Mask.push_back(i + NumElems + Half); 4512 } 4513 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4514} 4515 4516// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4517// a generic shuffle instruction because the target has no such instructions. 4518// Generate shuffles which repeat i16 and i8 several times until they can be 4519// represented by v4f32 and then be manipulated by target suported shuffles. 4520static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4521 EVT VT = V.getValueType(); 4522 int NumElems = VT.getVectorNumElements(); 4523 SDLoc dl(V); 4524 4525 while (NumElems > 4) { 4526 if (EltNo < NumElems/2) { 4527 V = getUnpackl(DAG, dl, VT, V, V); 4528 } else { 4529 V = getUnpackh(DAG, dl, VT, V, V); 4530 EltNo -= NumElems/2; 4531 } 4532 NumElems >>= 1; 4533 } 4534 return V; 4535} 4536 4537/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4538static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4539 EVT VT = V.getValueType(); 4540 SDLoc dl(V); 4541 4542 if (VT.is128BitVector()) { 4543 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4544 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4545 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4546 &SplatMask[0]); 4547 } else if (VT.is256BitVector()) { 4548 // To use VPERMILPS to splat scalars, the second half of indicies must 4549 // refer to the higher part, which is a duplication of the lower one, 4550 // because VPERMILPS can only handle in-lane permutations. 4551 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4552 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4553 4554 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4555 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4556 &SplatMask[0]); 4557 } else 4558 llvm_unreachable("Vector size not supported"); 4559 4560 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4561} 4562 4563/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4564static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4565 EVT SrcVT = SV->getValueType(0); 4566 SDValue V1 = SV->getOperand(0); 4567 SDLoc dl(SV); 4568 4569 int EltNo = SV->getSplatIndex(); 4570 int NumElems = SrcVT.getVectorNumElements(); 4571 bool Is256BitVec = SrcVT.is256BitVector(); 4572 4573 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) && 4574 "Unknown how to promote splat for type"); 4575 4576 // Extract the 128-bit part containing the splat element and update 4577 // the splat element index when it refers to the higher register. 4578 if (Is256BitVec) { 4579 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4580 if (EltNo >= NumElems/2) 4581 EltNo -= NumElems/2; 4582 } 4583 4584 // All i16 and i8 vector types can't be used directly by a generic shuffle 4585 // instruction because the target has no such instruction. Generate shuffles 4586 // which repeat i16 and i8 several times until they fit in i32, and then can 4587 // be manipulated by target suported shuffles. 4588 EVT EltVT = SrcVT.getVectorElementType(); 4589 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4590 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4591 4592 // Recreate the 256-bit vector and place the same 128-bit vector 4593 // into the low and high part. This is necessary because we want 4594 // to use VPERM* to shuffle the vectors 4595 if (Is256BitVec) { 4596 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4597 } 4598 4599 return getLegalSplat(DAG, V1, EltNo); 4600} 4601 4602/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4603/// vector of zero or undef vector. This produces a shuffle where the low 4604/// element of V2 is swizzled into the zero/undef vector, landing at element 4605/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4606static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4607 bool IsZero, 4608 const X86Subtarget *Subtarget, 4609 SelectionDAG &DAG) { 4610 EVT VT = V2.getValueType(); 4611 SDValue V1 = IsZero 4612 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT); 4613 unsigned NumElems = VT.getVectorNumElements(); 4614 SmallVector<int, 16> MaskVec; 4615 for (unsigned i = 0; i != NumElems; ++i) 4616 // If this is the insertion idx, put the low elt of V2 here. 4617 MaskVec.push_back(i == Idx ? NumElems : i); 4618 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]); 4619} 4620 4621/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4622/// target specific opcode. Returns true if the Mask could be calculated. 4623/// Sets IsUnary to true if only uses one source. 4624static bool getTargetShuffleMask(SDNode *N, MVT VT, 4625 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4626 unsigned NumElems = VT.getVectorNumElements(); 4627 SDValue ImmN; 4628 4629 IsUnary = false; 4630 switch(N->getOpcode()) { 4631 case X86ISD::SHUFP: 4632 ImmN = N->getOperand(N->getNumOperands()-1); 4633 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4634 break; 4635 case X86ISD::UNPCKH: 4636 DecodeUNPCKHMask(VT, Mask); 4637 break; 4638 case X86ISD::UNPCKL: 4639 DecodeUNPCKLMask(VT, Mask); 4640 break; 4641 case X86ISD::MOVHLPS: 4642 DecodeMOVHLPSMask(NumElems, Mask); 4643 break; 4644 case X86ISD::MOVLHPS: 4645 DecodeMOVLHPSMask(NumElems, Mask); 4646 break; 4647 case X86ISD::PALIGNR: 4648 ImmN = N->getOperand(N->getNumOperands()-1); 4649 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4650 break; 4651 case X86ISD::PSHUFD: 4652 case X86ISD::VPERMILP: 4653 ImmN = N->getOperand(N->getNumOperands()-1); 4654 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4655 IsUnary = true; 4656 break; 4657 case X86ISD::PSHUFHW: 4658 ImmN = N->getOperand(N->getNumOperands()-1); 4659 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4660 IsUnary = true; 4661 break; 4662 case X86ISD::PSHUFLW: 4663 ImmN = N->getOperand(N->getNumOperands()-1); 4664 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4665 IsUnary = true; 4666 break; 4667 case X86ISD::VPERMI: 4668 ImmN = N->getOperand(N->getNumOperands()-1); 4669 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4670 IsUnary = true; 4671 break; 4672 case X86ISD::MOVSS: 4673 case X86ISD::MOVSD: { 4674 // The index 0 always comes from the first element of the second source, 4675 // this is why MOVSS and MOVSD are used in the first place. The other 4676 // elements come from the other positions of the first source vector 4677 Mask.push_back(NumElems); 4678 for (unsigned i = 1; i != NumElems; ++i) { 4679 Mask.push_back(i); 4680 } 4681 break; 4682 } 4683 case X86ISD::VPERM2X128: 4684 ImmN = N->getOperand(N->getNumOperands()-1); 4685 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4686 if (Mask.empty()) return false; 4687 break; 4688 case X86ISD::MOVDDUP: 4689 case X86ISD::MOVLHPD: 4690 case X86ISD::MOVLPD: 4691 case X86ISD::MOVLPS: 4692 case X86ISD::MOVSHDUP: 4693 case X86ISD::MOVSLDUP: 4694 // Not yet implemented 4695 return false; 4696 default: llvm_unreachable("unknown target shuffle node"); 4697 } 4698 4699 return true; 4700} 4701 4702/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4703/// element of the result of the vector shuffle. 4704static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4705 unsigned Depth) { 4706 if (Depth == 6) 4707 return SDValue(); // Limit search depth. 4708 4709 SDValue V = SDValue(N, 0); 4710 EVT VT = V.getValueType(); 4711 unsigned Opcode = V.getOpcode(); 4712 4713 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4714 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4715 int Elt = SV->getMaskElt(Index); 4716 4717 if (Elt < 0) 4718 return DAG.getUNDEF(VT.getVectorElementType()); 4719 4720 unsigned NumElems = VT.getVectorNumElements(); 4721 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4722 : SV->getOperand(1); 4723 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 4724 } 4725 4726 // Recurse into target specific vector shuffles to find scalars. 4727 if (isTargetShuffle(Opcode)) { 4728 MVT ShufVT = V.getValueType().getSimpleVT(); 4729 unsigned NumElems = ShufVT.getVectorNumElements(); 4730 SmallVector<int, 16> ShuffleMask; 4731 bool IsUnary; 4732 4733 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 4734 return SDValue(); 4735 4736 int Elt = ShuffleMask[Index]; 4737 if (Elt < 0) 4738 return DAG.getUNDEF(ShufVT.getVectorElementType()); 4739 4740 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 4741 : N->getOperand(1); 4742 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 4743 Depth+1); 4744 } 4745 4746 // Actual nodes that may contain scalar elements 4747 if (Opcode == ISD::BITCAST) { 4748 V = V.getOperand(0); 4749 EVT SrcVT = V.getValueType(); 4750 unsigned NumElems = VT.getVectorNumElements(); 4751 4752 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4753 return SDValue(); 4754 } 4755 4756 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4757 return (Index == 0) ? V.getOperand(0) 4758 : DAG.getUNDEF(VT.getVectorElementType()); 4759 4760 if (V.getOpcode() == ISD::BUILD_VECTOR) 4761 return V.getOperand(Index); 4762 4763 return SDValue(); 4764} 4765 4766/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4767/// shuffle operation which come from a consecutively from a zero. The 4768/// search can start in two different directions, from left or right. 4769/// We count undefs as zeros until PreferredNum is reached. 4770static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, 4771 unsigned NumElems, bool ZerosFromLeft, 4772 SelectionDAG &DAG, 4773 unsigned PreferredNum = -1U) { 4774 unsigned NumZeros = 0; 4775 for (unsigned i = 0; i != NumElems; ++i) { 4776 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1; 4777 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 4778 if (!Elt.getNode()) 4779 break; 4780 4781 if (X86::isZeroNode(Elt)) 4782 ++NumZeros; 4783 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum. 4784 NumZeros = std::min(NumZeros + 1, PreferredNum); 4785 else 4786 break; 4787 } 4788 4789 return NumZeros; 4790} 4791 4792/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 4793/// correspond consecutively to elements from one of the vector operands, 4794/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4795static 4796bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 4797 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 4798 unsigned NumElems, unsigned &OpNum) { 4799 bool SeenV1 = false; 4800 bool SeenV2 = false; 4801 4802 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 4803 int Idx = SVOp->getMaskElt(i); 4804 // Ignore undef indicies 4805 if (Idx < 0) 4806 continue; 4807 4808 if (Idx < (int)NumElems) 4809 SeenV1 = true; 4810 else 4811 SeenV2 = true; 4812 4813 // Only accept consecutive elements from the same vector 4814 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4815 return false; 4816 } 4817 4818 OpNum = SeenV1 ? 0 : 1; 4819 return true; 4820} 4821 4822/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4823/// logical left shift of a vector. 4824static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4825 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4826 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4827 unsigned NumZeros = getNumOfConsecutiveZeros( 4828 SVOp, NumElems, false /* check zeros from right */, DAG, 4829 SVOp->getMaskElt(0)); 4830 unsigned OpSrc; 4831 4832 if (!NumZeros) 4833 return false; 4834 4835 // Considering the elements in the mask that are not consecutive zeros, 4836 // check if they consecutively come from only one of the source vectors. 4837 // 4838 // V1 = {X, A, B, C} 0 4839 // \ \ \ / 4840 // vector_shuffle V1, V2 <1, 2, 3, X> 4841 // 4842 if (!isShuffleMaskConsecutive(SVOp, 4843 0, // Mask Start Index 4844 NumElems-NumZeros, // Mask End Index(exclusive) 4845 NumZeros, // Where to start looking in the src vector 4846 NumElems, // Number of elements in vector 4847 OpSrc)) // Which source operand ? 4848 return false; 4849 4850 isLeft = false; 4851 ShAmt = NumZeros; 4852 ShVal = SVOp->getOperand(OpSrc); 4853 return true; 4854} 4855 4856/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4857/// logical left shift of a vector. 4858static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4859 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4860 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4861 unsigned NumZeros = getNumOfConsecutiveZeros( 4862 SVOp, NumElems, true /* check zeros from left */, DAG, 4863 NumElems - SVOp->getMaskElt(NumElems - 1) - 1); 4864 unsigned OpSrc; 4865 4866 if (!NumZeros) 4867 return false; 4868 4869 // Considering the elements in the mask that are not consecutive zeros, 4870 // check if they consecutively come from only one of the source vectors. 4871 // 4872 // 0 { A, B, X, X } = V2 4873 // / \ / / 4874 // vector_shuffle V1, V2 <X, X, 4, 5> 4875 // 4876 if (!isShuffleMaskConsecutive(SVOp, 4877 NumZeros, // Mask Start Index 4878 NumElems, // Mask End Index(exclusive) 4879 0, // Where to start looking in the src vector 4880 NumElems, // Number of elements in vector 4881 OpSrc)) // Which source operand ? 4882 return false; 4883 4884 isLeft = true; 4885 ShAmt = NumZeros; 4886 ShVal = SVOp->getOperand(OpSrc); 4887 return true; 4888} 4889 4890/// isVectorShift - Returns true if the shuffle can be implemented as a 4891/// logical left or right shift of a vector. 4892static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4893 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4894 // Although the logic below support any bitwidth size, there are no 4895 // shift instructions which handle more than 128-bit vectors. 4896 if (!SVOp->getValueType(0).is128BitVector()) 4897 return false; 4898 4899 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4900 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4901 return true; 4902 4903 return false; 4904} 4905 4906/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4907/// 4908static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4909 unsigned NumNonZero, unsigned NumZero, 4910 SelectionDAG &DAG, 4911 const X86Subtarget* Subtarget, 4912 const TargetLowering &TLI) { 4913 if (NumNonZero > 8) 4914 return SDValue(); 4915 4916 SDLoc dl(Op); 4917 SDValue V(0, 0); 4918 bool First = true; 4919 for (unsigned i = 0; i < 16; ++i) { 4920 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4921 if (ThisIsNonZero && First) { 4922 if (NumZero) 4923 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4924 else 4925 V = DAG.getUNDEF(MVT::v8i16); 4926 First = false; 4927 } 4928 4929 if ((i & 1) != 0) { 4930 SDValue ThisElt(0, 0), LastElt(0, 0); 4931 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4932 if (LastIsNonZero) { 4933 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4934 MVT::i16, Op.getOperand(i-1)); 4935 } 4936 if (ThisIsNonZero) { 4937 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4938 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4939 ThisElt, DAG.getConstant(8, MVT::i8)); 4940 if (LastIsNonZero) 4941 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4942 } else 4943 ThisElt = LastElt; 4944 4945 if (ThisElt.getNode()) 4946 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4947 DAG.getIntPtrConstant(i/2)); 4948 } 4949 } 4950 4951 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4952} 4953 4954/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4955/// 4956static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4957 unsigned NumNonZero, unsigned NumZero, 4958 SelectionDAG &DAG, 4959 const X86Subtarget* Subtarget, 4960 const TargetLowering &TLI) { 4961 if (NumNonZero > 4) 4962 return SDValue(); 4963 4964 SDLoc dl(Op); 4965 SDValue V(0, 0); 4966 bool First = true; 4967 for (unsigned i = 0; i < 8; ++i) { 4968 bool isNonZero = (NonZeros & (1 << i)) != 0; 4969 if (isNonZero) { 4970 if (First) { 4971 if (NumZero) 4972 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4973 else 4974 V = DAG.getUNDEF(MVT::v8i16); 4975 First = false; 4976 } 4977 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4978 MVT::v8i16, V, Op.getOperand(i), 4979 DAG.getIntPtrConstant(i)); 4980 } 4981 } 4982 4983 return V; 4984} 4985 4986/// getVShift - Return a vector logical shift node. 4987/// 4988static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4989 unsigned NumBits, SelectionDAG &DAG, 4990 const TargetLowering &TLI, SDLoc dl) { 4991 assert(VT.is128BitVector() && "Unknown type for VShift"); 4992 EVT ShVT = MVT::v2i64; 4993 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4994 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4995 return DAG.getNode(ISD::BITCAST, dl, VT, 4996 DAG.getNode(Opc, dl, ShVT, SrcOp, 4997 DAG.getConstant(NumBits, 4998 TLI.getScalarShiftAmountTy(SrcOp.getValueType())))); 4999} 5000 5001SDValue 5002X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, 5003 SelectionDAG &DAG) const { 5004 5005 // Check if the scalar load can be widened into a vector load. And if 5006 // the address is "base + cst" see if the cst can be "absorbed" into 5007 // the shuffle mask. 5008 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 5009 SDValue Ptr = LD->getBasePtr(); 5010 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 5011 return SDValue(); 5012 EVT PVT = LD->getValueType(0); 5013 if (PVT != MVT::i32 && PVT != MVT::f32) 5014 return SDValue(); 5015 5016 int FI = -1; 5017 int64_t Offset = 0; 5018 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 5019 FI = FINode->getIndex(); 5020 Offset = 0; 5021 } else if (DAG.isBaseWithConstantOffset(Ptr) && 5022 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 5023 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5024 Offset = Ptr.getConstantOperandVal(1); 5025 Ptr = Ptr.getOperand(0); 5026 } else { 5027 return SDValue(); 5028 } 5029 5030 // FIXME: 256-bit vector instructions don't require a strict alignment, 5031 // improve this code to support it better. 5032 unsigned RequiredAlign = VT.getSizeInBits()/8; 5033 SDValue Chain = LD->getChain(); 5034 // Make sure the stack object alignment is at least 16 or 32. 5035 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 5036 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 5037 if (MFI->isFixedObjectIndex(FI)) { 5038 // Can't change the alignment. FIXME: It's possible to compute 5039 // the exact stack offset and reference FI + adjust offset instead. 5040 // If someone *really* cares about this. That's the way to implement it. 5041 return SDValue(); 5042 } else { 5043 MFI->setObjectAlignment(FI, RequiredAlign); 5044 } 5045 } 5046 5047 // (Offset % 16 or 32) must be multiple of 4. Then address is then 5048 // Ptr + (Offset & ~15). 5049 if (Offset < 0) 5050 return SDValue(); 5051 if ((Offset % RequiredAlign) & 3) 5052 return SDValue(); 5053 int64_t StartOffset = Offset & ~(RequiredAlign-1); 5054 if (StartOffset) 5055 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(), 5056 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 5057 5058 int EltNo = (Offset - StartOffset) >> 2; 5059 unsigned NumElems = VT.getVectorNumElements(); 5060 5061 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 5062 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 5063 LD->getPointerInfo().getWithOffset(StartOffset), 5064 false, false, false, 0); 5065 5066 SmallVector<int, 8> Mask; 5067 for (unsigned i = 0; i != NumElems; ++i) 5068 Mask.push_back(EltNo); 5069 5070 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 5071 } 5072 5073 return SDValue(); 5074} 5075 5076/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 5077/// vector of type 'VT', see if the elements can be replaced by a single large 5078/// load which has the same value as a build_vector whose operands are 'elts'. 5079/// 5080/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 5081/// 5082/// FIXME: we'd also like to handle the case where the last elements are zero 5083/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 5084/// There's even a handy isZeroNode for that purpose. 5085static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 5086 SDLoc &DL, SelectionDAG &DAG) { 5087 EVT EltVT = VT.getVectorElementType(); 5088 unsigned NumElems = Elts.size(); 5089 5090 LoadSDNode *LDBase = NULL; 5091 unsigned LastLoadedElt = -1U; 5092 5093 // For each element in the initializer, see if we've found a load or an undef. 5094 // If we don't find an initial load element, or later load elements are 5095 // non-consecutive, bail out. 5096 for (unsigned i = 0; i < NumElems; ++i) { 5097 SDValue Elt = Elts[i]; 5098 5099 if (!Elt.getNode() || 5100 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 5101 return SDValue(); 5102 if (!LDBase) { 5103 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 5104 return SDValue(); 5105 LDBase = cast<LoadSDNode>(Elt.getNode()); 5106 LastLoadedElt = i; 5107 continue; 5108 } 5109 if (Elt.getOpcode() == ISD::UNDEF) 5110 continue; 5111 5112 LoadSDNode *LD = cast<LoadSDNode>(Elt); 5113 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 5114 return SDValue(); 5115 LastLoadedElt = i; 5116 } 5117 5118 // If we have found an entire vector of loads and undefs, then return a large 5119 // load of the entire vector width starting at the base pointer. If we found 5120 // consecutive loads for the low half, generate a vzext_load node. 5121 if (LastLoadedElt == NumElems - 1) { 5122 SDValue NewLd = SDValue(); 5123 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 5124 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5125 LDBase->getPointerInfo(), 5126 LDBase->isVolatile(), LDBase->isNonTemporal(), 5127 LDBase->isInvariant(), 0); 5128 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5129 LDBase->getPointerInfo(), 5130 LDBase->isVolatile(), LDBase->isNonTemporal(), 5131 LDBase->isInvariant(), LDBase->getAlignment()); 5132 5133 if (LDBase->hasAnyUseOfValue(1)) { 5134 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5135 SDValue(LDBase, 1), 5136 SDValue(NewLd.getNode(), 1)); 5137 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5138 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5139 SDValue(NewLd.getNode(), 1)); 5140 } 5141 5142 return NewLd; 5143 } 5144 if (NumElems == 4 && LastLoadedElt == 1 && 5145 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 5146 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 5147 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 5148 SDValue ResNode = 5149 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 5150 array_lengthof(Ops), MVT::i64, 5151 LDBase->getPointerInfo(), 5152 LDBase->getAlignment(), 5153 false/*isVolatile*/, true/*ReadMem*/, 5154 false/*WriteMem*/); 5155 5156 // Make sure the newly-created LOAD is in the same position as LDBase in 5157 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and 5158 // update uses of LDBase's output chain to use the TokenFactor. 5159 if (LDBase->hasAnyUseOfValue(1)) { 5160 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5161 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); 5162 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5163 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5164 SDValue(ResNode.getNode(), 1)); 5165 } 5166 5167 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 5168 } 5169 return SDValue(); 5170} 5171 5172/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 5173/// to generate a splat value for the following cases: 5174/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 5175/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 5176/// a scalar load, or a constant. 5177/// The VBROADCAST node is returned when a pattern is found, 5178/// or SDValue() otherwise. 5179SDValue 5180X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { 5181 if (!Subtarget->hasFp256()) 5182 return SDValue(); 5183 5184 MVT VT = Op.getValueType().getSimpleVT(); 5185 SDLoc dl(Op); 5186 5187 assert((VT.is128BitVector() || VT.is256BitVector()) && 5188 "Unsupported vector type for broadcast."); 5189 5190 SDValue Ld; 5191 bool ConstSplatVal; 5192 5193 switch (Op.getOpcode()) { 5194 default: 5195 // Unknown pattern found. 5196 return SDValue(); 5197 5198 case ISD::BUILD_VECTOR: { 5199 // The BUILD_VECTOR node must be a splat. 5200 if (!isSplatVector(Op.getNode())) 5201 return SDValue(); 5202 5203 Ld = Op.getOperand(0); 5204 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5205 Ld.getOpcode() == ISD::ConstantFP); 5206 5207 // The suspected load node has several users. Make sure that all 5208 // of its users are from the BUILD_VECTOR node. 5209 // Constants may have multiple users. 5210 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5211 return SDValue(); 5212 break; 5213 } 5214 5215 case ISD::VECTOR_SHUFFLE: { 5216 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5217 5218 // Shuffles must have a splat mask where the first element is 5219 // broadcasted. 5220 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5221 return SDValue(); 5222 5223 SDValue Sc = Op.getOperand(0); 5224 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5225 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5226 5227 if (!Subtarget->hasInt256()) 5228 return SDValue(); 5229 5230 // Use the register form of the broadcast instruction available on AVX2. 5231 if (VT.is256BitVector()) 5232 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5233 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5234 } 5235 5236 Ld = Sc.getOperand(0); 5237 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5238 Ld.getOpcode() == ISD::ConstantFP); 5239 5240 // The scalar_to_vector node and the suspected 5241 // load node must have exactly one user. 5242 // Constants may have multiple users. 5243 if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) 5244 return SDValue(); 5245 break; 5246 } 5247 } 5248 5249 bool Is256 = VT.is256BitVector(); 5250 5251 // Handle the broadcasting a single constant scalar from the constant pool 5252 // into a vector. On Sandybridge it is still better to load a constant vector 5253 // from the constant pool and not to broadcast it from a scalar. 5254 if (ConstSplatVal && Subtarget->hasInt256()) { 5255 EVT CVT = Ld.getValueType(); 5256 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5257 unsigned ScalarSize = CVT.getSizeInBits(); 5258 5259 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { 5260 const Constant *C = 0; 5261 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5262 C = CI->getConstantIntValue(); 5263 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5264 C = CF->getConstantFPValue(); 5265 5266 assert(C && "Invalid constant type"); 5267 5268 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 5269 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5270 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5271 MachinePointerInfo::getConstantPool(), 5272 false, false, false, Alignment); 5273 5274 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5275 } 5276 } 5277 5278 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5279 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5280 5281 // Handle AVX2 in-register broadcasts. 5282 if (!IsLoad && Subtarget->hasInt256() && 5283 (ScalarSize == 32 || (Is256 && ScalarSize == 64))) 5284 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5285 5286 // The scalar source must be a normal load. 5287 if (!IsLoad) 5288 return SDValue(); 5289 5290 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) 5291 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5292 5293 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5294 // double since there is no vbroadcastsd xmm 5295 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) { 5296 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5297 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5298 } 5299 5300 // Unsupported broadcast. 5301 return SDValue(); 5302} 5303 5304SDValue 5305X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { 5306 EVT VT = Op.getValueType(); 5307 5308 // Skip if insert_vec_elt is not supported. 5309 if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) 5310 return SDValue(); 5311 5312 SDLoc DL(Op); 5313 unsigned NumElems = Op.getNumOperands(); 5314 5315 SDValue VecIn1; 5316 SDValue VecIn2; 5317 SmallVector<unsigned, 4> InsertIndices; 5318 SmallVector<int, 8> Mask(NumElems, -1); 5319 5320 for (unsigned i = 0; i != NumElems; ++i) { 5321 unsigned Opc = Op.getOperand(i).getOpcode(); 5322 5323 if (Opc == ISD::UNDEF) 5324 continue; 5325 5326 if (Opc != ISD::EXTRACT_VECTOR_ELT) { 5327 // Quit if more than 1 elements need inserting. 5328 if (InsertIndices.size() > 1) 5329 return SDValue(); 5330 5331 InsertIndices.push_back(i); 5332 continue; 5333 } 5334 5335 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0); 5336 SDValue ExtIdx = Op.getOperand(i).getOperand(1); 5337 5338 // Quit if extracted from vector of different type. 5339 if (ExtractedFromVec.getValueType() != VT) 5340 return SDValue(); 5341 5342 // Quit if non-constant index. 5343 if (!isa<ConstantSDNode>(ExtIdx)) 5344 return SDValue(); 5345 5346 if (VecIn1.getNode() == 0) 5347 VecIn1 = ExtractedFromVec; 5348 else if (VecIn1 != ExtractedFromVec) { 5349 if (VecIn2.getNode() == 0) 5350 VecIn2 = ExtractedFromVec; 5351 else if (VecIn2 != ExtractedFromVec) 5352 // Quit if more than 2 vectors to shuffle 5353 return SDValue(); 5354 } 5355 5356 unsigned Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue(); 5357 5358 if (ExtractedFromVec == VecIn1) 5359 Mask[i] = Idx; 5360 else if (ExtractedFromVec == VecIn2) 5361 Mask[i] = Idx + NumElems; 5362 } 5363 5364 if (VecIn1.getNode() == 0) 5365 return SDValue(); 5366 5367 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 5368 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]); 5369 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) { 5370 unsigned Idx = InsertIndices[i]; 5371 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), 5372 DAG.getIntPtrConstant(Idx)); 5373 } 5374 5375 return NV; 5376} 5377 5378SDValue 5379X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5380 SDLoc dl(Op); 5381 5382 MVT VT = Op.getValueType().getSimpleVT(); 5383 MVT ExtVT = VT.getVectorElementType(); 5384 unsigned NumElems = Op.getNumOperands(); 5385 5386 // Vectors containing all zeros can be matched by pxor and xorps later 5387 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5388 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5389 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5390 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5391 return Op; 5392 5393 return getZeroVector(VT, Subtarget, DAG, dl); 5394 } 5395 5396 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5397 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5398 // vpcmpeqd on 256-bit vectors. 5399 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) { 5400 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256())) 5401 return Op; 5402 5403 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl); 5404 } 5405 5406 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 5407 if (Broadcast.getNode()) 5408 return Broadcast; 5409 5410 unsigned EVTBits = ExtVT.getSizeInBits(); 5411 5412 unsigned NumZero = 0; 5413 unsigned NumNonZero = 0; 5414 unsigned NonZeros = 0; 5415 bool IsAllConstants = true; 5416 SmallSet<SDValue, 8> Values; 5417 for (unsigned i = 0; i < NumElems; ++i) { 5418 SDValue Elt = Op.getOperand(i); 5419 if (Elt.getOpcode() == ISD::UNDEF) 5420 continue; 5421 Values.insert(Elt); 5422 if (Elt.getOpcode() != ISD::Constant && 5423 Elt.getOpcode() != ISD::ConstantFP) 5424 IsAllConstants = false; 5425 if (X86::isZeroNode(Elt)) 5426 NumZero++; 5427 else { 5428 NonZeros |= (1 << i); 5429 NumNonZero++; 5430 } 5431 } 5432 5433 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5434 if (NumNonZero == 0) 5435 return DAG.getUNDEF(VT); 5436 5437 // Special case for single non-zero, non-undef, element. 5438 if (NumNonZero == 1) { 5439 unsigned Idx = countTrailingZeros(NonZeros); 5440 SDValue Item = Op.getOperand(Idx); 5441 5442 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5443 // the value are obviously zero, truncate the value to i32 and do the 5444 // insertion that way. Only do this if the value is non-constant or if the 5445 // value is a constant being inserted into element 0. It is cheaper to do 5446 // a constant pool load than it is to do a movd + shuffle. 5447 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5448 (!IsAllConstants || Idx == 0)) { 5449 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5450 // Handle SSE only. 5451 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5452 EVT VecVT = MVT::v4i32; 5453 unsigned VecElts = 4; 5454 5455 // Truncate the value (which may itself be a constant) to i32, and 5456 // convert it to a vector with movd (S2V+shuffle to zero extend). 5457 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5458 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5459 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5460 5461 // Now we have our 32-bit value zero extended in the low element of 5462 // a vector. If Idx != 0, swizzle it into place. 5463 if (Idx != 0) { 5464 SmallVector<int, 4> Mask; 5465 Mask.push_back(Idx); 5466 for (unsigned i = 1; i != VecElts; ++i) 5467 Mask.push_back(i); 5468 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5469 &Mask[0]); 5470 } 5471 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5472 } 5473 } 5474 5475 // If we have a constant or non-constant insertion into the low element of 5476 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5477 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5478 // depending on what the source datatype is. 5479 if (Idx == 0) { 5480 if (NumZero == 0) 5481 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5482 5483 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5484 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5485 if (VT.is256BitVector()) { 5486 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5487 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5488 Item, DAG.getIntPtrConstant(0)); 5489 } 5490 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5491 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5492 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5493 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5494 } 5495 5496 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5497 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5498 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5499 if (VT.is256BitVector()) { 5500 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5501 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5502 } else { 5503 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5504 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5505 } 5506 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5507 } 5508 } 5509 5510 // Is it a vector logical left shift? 5511 if (NumElems == 2 && Idx == 1 && 5512 X86::isZeroNode(Op.getOperand(0)) && 5513 !X86::isZeroNode(Op.getOperand(1))) { 5514 unsigned NumBits = VT.getSizeInBits(); 5515 return getVShift(true, VT, 5516 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5517 VT, Op.getOperand(1)), 5518 NumBits/2, DAG, *this, dl); 5519 } 5520 5521 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5522 return SDValue(); 5523 5524 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5525 // is a non-constant being inserted into an element other than the low one, 5526 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5527 // movd/movss) to move this into the low element, then shuffle it into 5528 // place. 5529 if (EVTBits == 32) { 5530 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5531 5532 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5533 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5534 SmallVector<int, 8> MaskVec; 5535 for (unsigned i = 0; i != NumElems; ++i) 5536 MaskVec.push_back(i == Idx ? 0 : 1); 5537 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5538 } 5539 } 5540 5541 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5542 if (Values.size() == 1) { 5543 if (EVTBits == 32) { 5544 // Instead of a shuffle like this: 5545 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5546 // Check if it's possible to issue this instead. 5547 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5548 unsigned Idx = countTrailingZeros(NonZeros); 5549 SDValue Item = Op.getOperand(Idx); 5550 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5551 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5552 } 5553 return SDValue(); 5554 } 5555 5556 // A vector full of immediates; various special cases are already 5557 // handled, so this is best done with a single constant-pool load. 5558 if (IsAllConstants) 5559 return SDValue(); 5560 5561 // For AVX-length vectors, build the individual 128-bit pieces and use 5562 // shuffles to put them in place. 5563 if (VT.is256BitVector()) { 5564 SmallVector<SDValue, 32> V; 5565 for (unsigned i = 0; i != NumElems; ++i) 5566 V.push_back(Op.getOperand(i)); 5567 5568 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5569 5570 // Build both the lower and upper subvector. 5571 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5572 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5573 NumElems/2); 5574 5575 // Recreate the wider vector with the lower and upper part. 5576 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5577 } 5578 5579 // Let legalizer expand 2-wide build_vectors. 5580 if (EVTBits == 64) { 5581 if (NumNonZero == 1) { 5582 // One half is zero or undef. 5583 unsigned Idx = countTrailingZeros(NonZeros); 5584 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5585 Op.getOperand(Idx)); 5586 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5587 } 5588 return SDValue(); 5589 } 5590 5591 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5592 if (EVTBits == 8 && NumElems == 16) { 5593 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5594 Subtarget, *this); 5595 if (V.getNode()) return V; 5596 } 5597 5598 if (EVTBits == 16 && NumElems == 8) { 5599 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5600 Subtarget, *this); 5601 if (V.getNode()) return V; 5602 } 5603 5604 // If element VT is == 32 bits, turn it into a number of shuffles. 5605 SmallVector<SDValue, 8> V(NumElems); 5606 if (NumElems == 4 && NumZero > 0) { 5607 for (unsigned i = 0; i < 4; ++i) { 5608 bool isZero = !(NonZeros & (1 << i)); 5609 if (isZero) 5610 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 5611 else 5612 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5613 } 5614 5615 for (unsigned i = 0; i < 2; ++i) { 5616 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5617 default: break; 5618 case 0: 5619 V[i] = V[i*2]; // Must be a zero vector. 5620 break; 5621 case 1: 5622 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5623 break; 5624 case 2: 5625 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5626 break; 5627 case 3: 5628 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5629 break; 5630 } 5631 } 5632 5633 bool Reverse1 = (NonZeros & 0x3) == 2; 5634 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5635 int MaskVec[] = { 5636 Reverse1 ? 1 : 0, 5637 Reverse1 ? 0 : 1, 5638 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 5639 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 5640 }; 5641 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5642 } 5643 5644 if (Values.size() > 1 && VT.is128BitVector()) { 5645 // Check for a build vector of consecutive loads. 5646 for (unsigned i = 0; i < NumElems; ++i) 5647 V[i] = Op.getOperand(i); 5648 5649 // Check for elements which are consecutive loads. 5650 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5651 if (LD.getNode()) 5652 return LD; 5653 5654 // Check for a build vector from mostly shuffle plus few inserting. 5655 SDValue Sh = buildFromShuffleMostly(Op, DAG); 5656 if (Sh.getNode()) 5657 return Sh; 5658 5659 // For SSE 4.1, use insertps to put the high elements into the low element. 5660 if (getSubtarget()->hasSSE41()) { 5661 SDValue Result; 5662 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5663 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5664 else 5665 Result = DAG.getUNDEF(VT); 5666 5667 for (unsigned i = 1; i < NumElems; ++i) { 5668 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5669 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5670 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5671 } 5672 return Result; 5673 } 5674 5675 // Otherwise, expand into a number of unpckl*, start by extending each of 5676 // our (non-undef) elements to the full vector width with the element in the 5677 // bottom slot of the vector (which generates no code for SSE). 5678 for (unsigned i = 0; i < NumElems; ++i) { 5679 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5680 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5681 else 5682 V[i] = DAG.getUNDEF(VT); 5683 } 5684 5685 // Next, we iteratively mix elements, e.g. for v4f32: 5686 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5687 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5688 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5689 unsigned EltStride = NumElems >> 1; 5690 while (EltStride != 0) { 5691 for (unsigned i = 0; i < EltStride; ++i) { 5692 // If V[i+EltStride] is undef and this is the first round of mixing, 5693 // then it is safe to just drop this shuffle: V[i] is already in the 5694 // right place, the one element (since it's the first round) being 5695 // inserted as undef can be dropped. This isn't safe for successive 5696 // rounds because they will permute elements within both vectors. 5697 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5698 EltStride == NumElems/2) 5699 continue; 5700 5701 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5702 } 5703 EltStride >>= 1; 5704 } 5705 return V[0]; 5706 } 5707 return SDValue(); 5708} 5709 5710// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5711// to create 256-bit vectors from two other 128-bit ones. 5712static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5713 SDLoc dl(Op); 5714 MVT ResVT = Op.getValueType().getSimpleVT(); 5715 5716 assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); 5717 5718 SDValue V1 = Op.getOperand(0); 5719 SDValue V2 = Op.getOperand(1); 5720 unsigned NumElems = ResVT.getVectorNumElements(); 5721 5722 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 5723} 5724 5725static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5726 assert(Op.getNumOperands() == 2); 5727 5728 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5729 // from two other 128-bit ones. 5730 return LowerAVXCONCAT_VECTORS(Op, DAG); 5731} 5732 5733// Try to lower a shuffle node into a simple blend instruction. 5734static SDValue 5735LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 5736 const X86Subtarget *Subtarget, SelectionDAG &DAG) { 5737 SDValue V1 = SVOp->getOperand(0); 5738 SDValue V2 = SVOp->getOperand(1); 5739 SDLoc dl(SVOp); 5740 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5741 MVT EltVT = VT.getVectorElementType(); 5742 unsigned NumElems = VT.getVectorNumElements(); 5743 5744 if (!Subtarget->hasSSE41() || EltVT == MVT::i8) 5745 return SDValue(); 5746 if (!Subtarget->hasInt256() && VT == MVT::v16i16) 5747 return SDValue(); 5748 5749 // Check the mask for BLEND and build the value. 5750 unsigned MaskValue = 0; 5751 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise. 5752 unsigned NumLanes = (NumElems-1)/8 + 1; 5753 unsigned NumElemsInLane = NumElems / NumLanes; 5754 5755 // Blend for v16i16 should be symetric for the both lanes. 5756 for (unsigned i = 0; i < NumElemsInLane; ++i) { 5757 5758 int SndLaneEltIdx = (NumLanes == 2) ? 5759 SVOp->getMaskElt(i + NumElemsInLane) : -1; 5760 int EltIdx = SVOp->getMaskElt(i); 5761 5762 if ((EltIdx < 0 || EltIdx == (int)i) && 5763 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane))) 5764 continue; 5765 5766 if (((unsigned)EltIdx == (i + NumElems)) && 5767 (SndLaneEltIdx < 0 || 5768 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane)) 5769 MaskValue |= (1<<i); 5770 else 5771 return SDValue(); 5772 } 5773 5774 // Convert i32 vectors to floating point if it is not AVX2. 5775 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors. 5776 MVT BlendVT = VT; 5777 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) { 5778 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()), 5779 NumElems); 5780 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1); 5781 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2); 5782 } 5783 5784 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2, 5785 DAG.getConstant(MaskValue, MVT::i32)); 5786 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 5787} 5788 5789// v8i16 shuffles - Prefer shuffles in the following order: 5790// 1. [all] pshuflw, pshufhw, optional move 5791// 2. [ssse3] 1 x pshufb 5792// 3. [ssse3] 2 x pshufb + 1 x por 5793// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5794static SDValue 5795LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, 5796 SelectionDAG &DAG) { 5797 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5798 SDValue V1 = SVOp->getOperand(0); 5799 SDValue V2 = SVOp->getOperand(1); 5800 SDLoc dl(SVOp); 5801 SmallVector<int, 8> MaskVals; 5802 5803 // Determine if more than 1 of the words in each of the low and high quadwords 5804 // of the result come from the same quadword of one of the two inputs. Undef 5805 // mask values count as coming from any quadword, for better codegen. 5806 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5807 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5808 std::bitset<4> InputQuads; 5809 for (unsigned i = 0; i < 8; ++i) { 5810 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5811 int EltIdx = SVOp->getMaskElt(i); 5812 MaskVals.push_back(EltIdx); 5813 if (EltIdx < 0) { 5814 ++Quad[0]; 5815 ++Quad[1]; 5816 ++Quad[2]; 5817 ++Quad[3]; 5818 continue; 5819 } 5820 ++Quad[EltIdx / 4]; 5821 InputQuads.set(EltIdx / 4); 5822 } 5823 5824 int BestLoQuad = -1; 5825 unsigned MaxQuad = 1; 5826 for (unsigned i = 0; i < 4; ++i) { 5827 if (LoQuad[i] > MaxQuad) { 5828 BestLoQuad = i; 5829 MaxQuad = LoQuad[i]; 5830 } 5831 } 5832 5833 int BestHiQuad = -1; 5834 MaxQuad = 1; 5835 for (unsigned i = 0; i < 4; ++i) { 5836 if (HiQuad[i] > MaxQuad) { 5837 BestHiQuad = i; 5838 MaxQuad = HiQuad[i]; 5839 } 5840 } 5841 5842 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5843 // of the two input vectors, shuffle them into one input vector so only a 5844 // single pshufb instruction is necessary. If There are more than 2 input 5845 // quads, disable the next transformation since it does not help SSSE3. 5846 bool V1Used = InputQuads[0] || InputQuads[1]; 5847 bool V2Used = InputQuads[2] || InputQuads[3]; 5848 if (Subtarget->hasSSSE3()) { 5849 if (InputQuads.count() == 2 && V1Used && V2Used) { 5850 BestLoQuad = InputQuads[0] ? 0 : 1; 5851 BestHiQuad = InputQuads[2] ? 2 : 3; 5852 } 5853 if (InputQuads.count() > 2) { 5854 BestLoQuad = -1; 5855 BestHiQuad = -1; 5856 } 5857 } 5858 5859 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5860 // the shuffle mask. If a quad is scored as -1, that means that it contains 5861 // words from all 4 input quadwords. 5862 SDValue NewV; 5863 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5864 int MaskV[] = { 5865 BestLoQuad < 0 ? 0 : BestLoQuad, 5866 BestHiQuad < 0 ? 1 : BestHiQuad 5867 }; 5868 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5869 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5870 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5871 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5872 5873 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5874 // source words for the shuffle, to aid later transformations. 5875 bool AllWordsInNewV = true; 5876 bool InOrder[2] = { true, true }; 5877 for (unsigned i = 0; i != 8; ++i) { 5878 int idx = MaskVals[i]; 5879 if (idx != (int)i) 5880 InOrder[i/4] = false; 5881 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5882 continue; 5883 AllWordsInNewV = false; 5884 break; 5885 } 5886 5887 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5888 if (AllWordsInNewV) { 5889 for (int i = 0; i != 8; ++i) { 5890 int idx = MaskVals[i]; 5891 if (idx < 0) 5892 continue; 5893 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5894 if ((idx != i) && idx < 4) 5895 pshufhw = false; 5896 if ((idx != i) && idx > 3) 5897 pshuflw = false; 5898 } 5899 V1 = NewV; 5900 V2Used = false; 5901 BestLoQuad = 0; 5902 BestHiQuad = 1; 5903 } 5904 5905 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5906 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5907 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5908 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5909 unsigned TargetMask = 0; 5910 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5911 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5912 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5913 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 5914 getShufflePSHUFLWImmediate(SVOp); 5915 V1 = NewV.getOperand(0); 5916 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5917 } 5918 } 5919 5920 // Promote splats to a larger type which usually leads to more efficient code. 5921 // FIXME: Is this true if pshufb is available? 5922 if (SVOp->isSplat()) 5923 return PromoteSplat(SVOp, DAG); 5924 5925 // If we have SSSE3, and all words of the result are from 1 input vector, 5926 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5927 // is present, fall back to case 4. 5928 if (Subtarget->hasSSSE3()) { 5929 SmallVector<SDValue,16> pshufbMask; 5930 5931 // If we have elements from both input vectors, set the high bit of the 5932 // shuffle mask element to zero out elements that come from V2 in the V1 5933 // mask, and elements that come from V1 in the V2 mask, so that the two 5934 // results can be OR'd together. 5935 bool TwoInputs = V1Used && V2Used; 5936 for (unsigned i = 0; i != 8; ++i) { 5937 int EltIdx = MaskVals[i] * 2; 5938 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 5939 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 5940 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5941 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5942 } 5943 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5944 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5945 DAG.getNode(ISD::BUILD_VECTOR, dl, 5946 MVT::v16i8, &pshufbMask[0], 16)); 5947 if (!TwoInputs) 5948 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5949 5950 // Calculate the shuffle mask for the second input, shuffle it, and 5951 // OR it with the first shuffled input. 5952 pshufbMask.clear(); 5953 for (unsigned i = 0; i != 8; ++i) { 5954 int EltIdx = MaskVals[i] * 2; 5955 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5956 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; 5957 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5958 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5959 } 5960 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5961 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5962 DAG.getNode(ISD::BUILD_VECTOR, dl, 5963 MVT::v16i8, &pshufbMask[0], 16)); 5964 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5965 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5966 } 5967 5968 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5969 // and update MaskVals with new element order. 5970 std::bitset<8> InOrder; 5971 if (BestLoQuad >= 0) { 5972 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 5973 for (int i = 0; i != 4; ++i) { 5974 int idx = MaskVals[i]; 5975 if (idx < 0) { 5976 InOrder.set(i); 5977 } else if ((idx / 4) == BestLoQuad) { 5978 MaskV[i] = idx & 3; 5979 InOrder.set(i); 5980 } 5981 } 5982 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5983 &MaskV[0]); 5984 5985 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5986 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5987 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5988 NewV.getOperand(0), 5989 getShufflePSHUFLWImmediate(SVOp), DAG); 5990 } 5991 } 5992 5993 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5994 // and update MaskVals with the new element order. 5995 if (BestHiQuad >= 0) { 5996 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 5997 for (unsigned i = 4; i != 8; ++i) { 5998 int idx = MaskVals[i]; 5999 if (idx < 0) { 6000 InOrder.set(i); 6001 } else if ((idx / 4) == BestHiQuad) { 6002 MaskV[i] = (idx & 3) + 4; 6003 InOrder.set(i); 6004 } 6005 } 6006 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 6007 &MaskV[0]); 6008 6009 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 6010 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 6011 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 6012 NewV.getOperand(0), 6013 getShufflePSHUFHWImmediate(SVOp), DAG); 6014 } 6015 } 6016 6017 // In case BestHi & BestLo were both -1, which means each quadword has a word 6018 // from each of the four input quadwords, calculate the InOrder bitvector now 6019 // before falling through to the insert/extract cleanup. 6020 if (BestLoQuad == -1 && BestHiQuad == -1) { 6021 NewV = V1; 6022 for (int i = 0; i != 8; ++i) 6023 if (MaskVals[i] < 0 || MaskVals[i] == i) 6024 InOrder.set(i); 6025 } 6026 6027 // The other elements are put in the right place using pextrw and pinsrw. 6028 for (unsigned i = 0; i != 8; ++i) { 6029 if (InOrder[i]) 6030 continue; 6031 int EltIdx = MaskVals[i]; 6032 if (EltIdx < 0) 6033 continue; 6034 SDValue ExtOp = (EltIdx < 8) ? 6035 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 6036 DAG.getIntPtrConstant(EltIdx)) : 6037 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 6038 DAG.getIntPtrConstant(EltIdx - 8)); 6039 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 6040 DAG.getIntPtrConstant(i)); 6041 } 6042 return NewV; 6043} 6044 6045// v16i8 shuffles - Prefer shuffles in the following order: 6046// 1. [ssse3] 1 x pshufb 6047// 2. [ssse3] 2 x pshufb + 1 x por 6048// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 6049static 6050SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 6051 SelectionDAG &DAG, 6052 const X86TargetLowering &TLI) { 6053 SDValue V1 = SVOp->getOperand(0); 6054 SDValue V2 = SVOp->getOperand(1); 6055 SDLoc dl(SVOp); 6056 ArrayRef<int> MaskVals = SVOp->getMask(); 6057 6058 // Promote splats to a larger type which usually leads to more efficient code. 6059 // FIXME: Is this true if pshufb is available? 6060 if (SVOp->isSplat()) 6061 return PromoteSplat(SVOp, DAG); 6062 6063 // If we have SSSE3, case 1 is generated when all result bytes come from 6064 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 6065 // present, fall back to case 3. 6066 6067 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 6068 if (TLI.getSubtarget()->hasSSSE3()) { 6069 SmallVector<SDValue,16> pshufbMask; 6070 6071 // If all result elements are from one input vector, then only translate 6072 // undef mask values to 0x80 (zero out result) in the pshufb mask. 6073 // 6074 // Otherwise, we have elements from both input vectors, and must zero out 6075 // elements that come from V2 in the first mask, and V1 in the second mask 6076 // so that we can OR them together. 6077 for (unsigned i = 0; i != 16; ++i) { 6078 int EltIdx = MaskVals[i]; 6079 if (EltIdx < 0 || EltIdx >= 16) 6080 EltIdx = 0x80; 6081 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6082 } 6083 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 6084 DAG.getNode(ISD::BUILD_VECTOR, dl, 6085 MVT::v16i8, &pshufbMask[0], 16)); 6086 6087 // As PSHUFB will zero elements with negative indices, it's safe to ignore 6088 // the 2nd operand if it's undefined or zero. 6089 if (V2.getOpcode() == ISD::UNDEF || 6090 ISD::isBuildVectorAllZeros(V2.getNode())) 6091 return V1; 6092 6093 // Calculate the shuffle mask for the second input, shuffle it, and 6094 // OR it with the first shuffled input. 6095 pshufbMask.clear(); 6096 for (unsigned i = 0; i != 16; ++i) { 6097 int EltIdx = MaskVals[i]; 6098 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; 6099 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6100 } 6101 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 6102 DAG.getNode(ISD::BUILD_VECTOR, dl, 6103 MVT::v16i8, &pshufbMask[0], 16)); 6104 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 6105 } 6106 6107 // No SSSE3 - Calculate in place words and then fix all out of place words 6108 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 6109 // the 16 different words that comprise the two doublequadword input vectors. 6110 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 6111 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 6112 SDValue NewV = V1; 6113 for (int i = 0; i != 8; ++i) { 6114 int Elt0 = MaskVals[i*2]; 6115 int Elt1 = MaskVals[i*2+1]; 6116 6117 // This word of the result is all undef, skip it. 6118 if (Elt0 < 0 && Elt1 < 0) 6119 continue; 6120 6121 // This word of the result is already in the correct place, skip it. 6122 if ((Elt0 == i*2) && (Elt1 == i*2+1)) 6123 continue; 6124 6125 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 6126 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 6127 SDValue InsElt; 6128 6129 // If Elt0 and Elt1 are defined, are consecutive, and can be load 6130 // using a single extract together, load it and store it. 6131 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 6132 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6133 DAG.getIntPtrConstant(Elt1 / 2)); 6134 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6135 DAG.getIntPtrConstant(i)); 6136 continue; 6137 } 6138 6139 // If Elt1 is defined, extract it from the appropriate source. If the 6140 // source byte is not also odd, shift the extracted word left 8 bits 6141 // otherwise clear the bottom 8 bits if we need to do an or. 6142 if (Elt1 >= 0) { 6143 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6144 DAG.getIntPtrConstant(Elt1 / 2)); 6145 if ((Elt1 & 1) == 0) 6146 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 6147 DAG.getConstant(8, 6148 TLI.getShiftAmountTy(InsElt.getValueType()))); 6149 else if (Elt0 >= 0) 6150 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 6151 DAG.getConstant(0xFF00, MVT::i16)); 6152 } 6153 // If Elt0 is defined, extract it from the appropriate source. If the 6154 // source byte is not also even, shift the extracted word right 8 bits. If 6155 // Elt1 was also defined, OR the extracted values together before 6156 // inserting them in the result. 6157 if (Elt0 >= 0) { 6158 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 6159 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 6160 if ((Elt0 & 1) != 0) 6161 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 6162 DAG.getConstant(8, 6163 TLI.getShiftAmountTy(InsElt0.getValueType()))); 6164 else if (Elt1 >= 0) 6165 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 6166 DAG.getConstant(0x00FF, MVT::i16)); 6167 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 6168 : InsElt0; 6169 } 6170 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6171 DAG.getIntPtrConstant(i)); 6172 } 6173 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 6174} 6175 6176// v32i8 shuffles - Translate to VPSHUFB if possible. 6177static 6178SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, 6179 const X86Subtarget *Subtarget, 6180 SelectionDAG &DAG) { 6181 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6182 SDValue V1 = SVOp->getOperand(0); 6183 SDValue V2 = SVOp->getOperand(1); 6184 SDLoc dl(SVOp); 6185 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); 6186 6187 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6188 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode()); 6189 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode()); 6190 6191 // VPSHUFB may be generated if 6192 // (1) one of input vector is undefined or zeroinitializer. 6193 // The mask value 0x80 puts 0 in the corresponding slot of the vector. 6194 // And (2) the mask indexes don't cross the 128-bit lane. 6195 if (VT != MVT::v32i8 || !Subtarget->hasInt256() || 6196 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero)) 6197 return SDValue(); 6198 6199 if (V1IsAllZero && !V2IsAllZero) { 6200 CommuteVectorShuffleMask(MaskVals, 32); 6201 V1 = V2; 6202 } 6203 SmallVector<SDValue, 32> pshufbMask; 6204 for (unsigned i = 0; i != 32; i++) { 6205 int EltIdx = MaskVals[i]; 6206 if (EltIdx < 0 || EltIdx >= 32) 6207 EltIdx = 0x80; 6208 else { 6209 if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16)) 6210 // Cross lane is not allowed. 6211 return SDValue(); 6212 EltIdx &= 0xf; 6213 } 6214 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6215 } 6216 return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1, 6217 DAG.getNode(ISD::BUILD_VECTOR, dl, 6218 MVT::v32i8, &pshufbMask[0], 32)); 6219} 6220 6221/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 6222/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 6223/// done when every pair / quad of shuffle mask elements point to elements in 6224/// the right sequence. e.g. 6225/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 6226static 6227SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 6228 SelectionDAG &DAG) { 6229 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6230 SDLoc dl(SVOp); 6231 unsigned NumElems = VT.getVectorNumElements(); 6232 MVT NewVT; 6233 unsigned Scale; 6234 switch (VT.SimpleTy) { 6235 default: llvm_unreachable("Unexpected!"); 6236 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; 6237 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; 6238 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; 6239 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; 6240 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; 6241 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; 6242 } 6243 6244 SmallVector<int, 8> MaskVec; 6245 for (unsigned i = 0; i != NumElems; i += Scale) { 6246 int StartIdx = -1; 6247 for (unsigned j = 0; j != Scale; ++j) { 6248 int EltIdx = SVOp->getMaskElt(i+j); 6249 if (EltIdx < 0) 6250 continue; 6251 if (StartIdx < 0) 6252 StartIdx = (EltIdx / Scale); 6253 if (EltIdx != (int)(StartIdx*Scale + j)) 6254 return SDValue(); 6255 } 6256 MaskVec.push_back(StartIdx); 6257 } 6258 6259 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); 6260 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); 6261 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 6262} 6263 6264/// getVZextMovL - Return a zero-extending vector move low node. 6265/// 6266static SDValue getVZextMovL(MVT VT, EVT OpVT, 6267 SDValue SrcOp, SelectionDAG &DAG, 6268 const X86Subtarget *Subtarget, SDLoc dl) { 6269 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 6270 LoadSDNode *LD = NULL; 6271 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 6272 LD = dyn_cast<LoadSDNode>(SrcOp); 6273 if (!LD) { 6274 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 6275 // instead. 6276 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 6277 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 6278 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 6279 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 6280 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 6281 // PR2108 6282 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 6283 return DAG.getNode(ISD::BITCAST, dl, VT, 6284 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6285 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6286 OpVT, 6287 SrcOp.getOperand(0) 6288 .getOperand(0)))); 6289 } 6290 } 6291 } 6292 6293 return DAG.getNode(ISD::BITCAST, dl, VT, 6294 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6295 DAG.getNode(ISD::BITCAST, dl, 6296 OpVT, SrcOp))); 6297} 6298 6299/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 6300/// which could not be matched by any known target speficic shuffle 6301static SDValue 6302LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6303 6304 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); 6305 if (NewOp.getNode()) 6306 return NewOp; 6307 6308 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6309 6310 unsigned NumElems = VT.getVectorNumElements(); 6311 unsigned NumLaneElems = NumElems / 2; 6312 6313 SDLoc dl(SVOp); 6314 MVT EltVT = VT.getVectorElementType(); 6315 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); 6316 SDValue Output[2]; 6317 6318 SmallVector<int, 16> Mask; 6319 for (unsigned l = 0; l < 2; ++l) { 6320 // Build a shuffle mask for the output, discovering on the fly which 6321 // input vectors to use as shuffle operands (recorded in InputUsed). 6322 // If building a suitable shuffle vector proves too hard, then bail 6323 // out with UseBuildVector set. 6324 bool UseBuildVector = false; 6325 int InputUsed[2] = { -1, -1 }; // Not yet discovered. 6326 unsigned LaneStart = l * NumLaneElems; 6327 for (unsigned i = 0; i != NumLaneElems; ++i) { 6328 // The mask element. This indexes into the input. 6329 int Idx = SVOp->getMaskElt(i+LaneStart); 6330 if (Idx < 0) { 6331 // the mask element does not index into any input vector. 6332 Mask.push_back(-1); 6333 continue; 6334 } 6335 6336 // The input vector this mask element indexes into. 6337 int Input = Idx / NumLaneElems; 6338 6339 // Turn the index into an offset from the start of the input vector. 6340 Idx -= Input * NumLaneElems; 6341 6342 // Find or create a shuffle vector operand to hold this input. 6343 unsigned OpNo; 6344 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { 6345 if (InputUsed[OpNo] == Input) 6346 // This input vector is already an operand. 6347 break; 6348 if (InputUsed[OpNo] < 0) { 6349 // Create a new operand for this input vector. 6350 InputUsed[OpNo] = Input; 6351 break; 6352 } 6353 } 6354 6355 if (OpNo >= array_lengthof(InputUsed)) { 6356 // More than two input vectors used! Give up on trying to create a 6357 // shuffle vector. Insert all elements into a BUILD_VECTOR instead. 6358 UseBuildVector = true; 6359 break; 6360 } 6361 6362 // Add the mask index for the new shuffle vector. 6363 Mask.push_back(Idx + OpNo * NumLaneElems); 6364 } 6365 6366 if (UseBuildVector) { 6367 SmallVector<SDValue, 16> SVOps; 6368 for (unsigned i = 0; i != NumLaneElems; ++i) { 6369 // The mask element. This indexes into the input. 6370 int Idx = SVOp->getMaskElt(i+LaneStart); 6371 if (Idx < 0) { 6372 SVOps.push_back(DAG.getUNDEF(EltVT)); 6373 continue; 6374 } 6375 6376 // The input vector this mask element indexes into. 6377 int Input = Idx / NumElems; 6378 6379 // Turn the index into an offset from the start of the input vector. 6380 Idx -= Input * NumElems; 6381 6382 // Extract the vector element by hand. 6383 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6384 SVOp->getOperand(Input), 6385 DAG.getIntPtrConstant(Idx))); 6386 } 6387 6388 // Construct the output using a BUILD_VECTOR. 6389 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], 6390 SVOps.size()); 6391 } else if (InputUsed[0] < 0) { 6392 // No input vectors were used! The result is undefined. 6393 Output[l] = DAG.getUNDEF(NVT); 6394 } else { 6395 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), 6396 (InputUsed[0] % 2) * NumLaneElems, 6397 DAG, dl); 6398 // If only one input was used, use an undefined vector for the other. 6399 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : 6400 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), 6401 (InputUsed[1] % 2) * NumLaneElems, DAG, dl); 6402 // At least one input vector was used. Create a new shuffle vector. 6403 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); 6404 } 6405 6406 Mask.clear(); 6407 } 6408 6409 // Concatenate the result back 6410 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); 6411} 6412 6413/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6414/// 4 elements, and match them with several different shuffle types. 6415static SDValue 6416LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6417 SDValue V1 = SVOp->getOperand(0); 6418 SDValue V2 = SVOp->getOperand(1); 6419 SDLoc dl(SVOp); 6420 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6421 6422 assert(VT.is128BitVector() && "Unsupported vector size"); 6423 6424 std::pair<int, int> Locs[4]; 6425 int Mask1[] = { -1, -1, -1, -1 }; 6426 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6427 6428 unsigned NumHi = 0; 6429 unsigned NumLo = 0; 6430 for (unsigned i = 0; i != 4; ++i) { 6431 int Idx = PermMask[i]; 6432 if (Idx < 0) { 6433 Locs[i] = std::make_pair(-1, -1); 6434 } else { 6435 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6436 if (Idx < 4) { 6437 Locs[i] = std::make_pair(0, NumLo); 6438 Mask1[NumLo] = Idx; 6439 NumLo++; 6440 } else { 6441 Locs[i] = std::make_pair(1, NumHi); 6442 if (2+NumHi < 4) 6443 Mask1[2+NumHi] = Idx; 6444 NumHi++; 6445 } 6446 } 6447 } 6448 6449 if (NumLo <= 2 && NumHi <= 2) { 6450 // If no more than two elements come from either vector. This can be 6451 // implemented with two shuffles. First shuffle gather the elements. 6452 // The second shuffle, which takes the first shuffle as both of its 6453 // vector operands, put the elements into the right order. 6454 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6455 6456 int Mask2[] = { -1, -1, -1, -1 }; 6457 6458 for (unsigned i = 0; i != 4; ++i) 6459 if (Locs[i].first != -1) { 6460 unsigned Idx = (i < 2) ? 0 : 4; 6461 Idx += Locs[i].first * 2 + Locs[i].second; 6462 Mask2[i] = Idx; 6463 } 6464 6465 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6466 } 6467 6468 if (NumLo == 3 || NumHi == 3) { 6469 // Otherwise, we must have three elements from one vector, call it X, and 6470 // one element from the other, call it Y. First, use a shufps to build an 6471 // intermediate vector with the one element from Y and the element from X 6472 // that will be in the same half in the final destination (the indexes don't 6473 // matter). Then, use a shufps to build the final vector, taking the half 6474 // containing the element from Y from the intermediate, and the other half 6475 // from X. 6476 if (NumHi == 3) { 6477 // Normalize it so the 3 elements come from V1. 6478 CommuteVectorShuffleMask(PermMask, 4); 6479 std::swap(V1, V2); 6480 } 6481 6482 // Find the element from V2. 6483 unsigned HiIndex; 6484 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6485 int Val = PermMask[HiIndex]; 6486 if (Val < 0) 6487 continue; 6488 if (Val >= 4) 6489 break; 6490 } 6491 6492 Mask1[0] = PermMask[HiIndex]; 6493 Mask1[1] = -1; 6494 Mask1[2] = PermMask[HiIndex^1]; 6495 Mask1[3] = -1; 6496 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6497 6498 if (HiIndex >= 2) { 6499 Mask1[0] = PermMask[0]; 6500 Mask1[1] = PermMask[1]; 6501 Mask1[2] = HiIndex & 1 ? 6 : 4; 6502 Mask1[3] = HiIndex & 1 ? 4 : 6; 6503 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6504 } 6505 6506 Mask1[0] = HiIndex & 1 ? 2 : 0; 6507 Mask1[1] = HiIndex & 1 ? 0 : 2; 6508 Mask1[2] = PermMask[2]; 6509 Mask1[3] = PermMask[3]; 6510 if (Mask1[2] >= 0) 6511 Mask1[2] += 4; 6512 if (Mask1[3] >= 0) 6513 Mask1[3] += 4; 6514 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6515 } 6516 6517 // Break it into (shuffle shuffle_hi, shuffle_lo). 6518 int LoMask[] = { -1, -1, -1, -1 }; 6519 int HiMask[] = { -1, -1, -1, -1 }; 6520 6521 int *MaskPtr = LoMask; 6522 unsigned MaskIdx = 0; 6523 unsigned LoIdx = 0; 6524 unsigned HiIdx = 2; 6525 for (unsigned i = 0; i != 4; ++i) { 6526 if (i == 2) { 6527 MaskPtr = HiMask; 6528 MaskIdx = 1; 6529 LoIdx = 0; 6530 HiIdx = 2; 6531 } 6532 int Idx = PermMask[i]; 6533 if (Idx < 0) { 6534 Locs[i] = std::make_pair(-1, -1); 6535 } else if (Idx < 4) { 6536 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6537 MaskPtr[LoIdx] = Idx; 6538 LoIdx++; 6539 } else { 6540 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6541 MaskPtr[HiIdx] = Idx; 6542 HiIdx++; 6543 } 6544 } 6545 6546 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6547 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6548 int MaskOps[] = { -1, -1, -1, -1 }; 6549 for (unsigned i = 0; i != 4; ++i) 6550 if (Locs[i].first != -1) 6551 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6552 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6553} 6554 6555static bool MayFoldVectorLoad(SDValue V) { 6556 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6557 V = V.getOperand(0); 6558 6559 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6560 V = V.getOperand(0); 6561 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6562 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6563 // BUILD_VECTOR (load), undef 6564 V = V.getOperand(0); 6565 6566 return MayFoldLoad(V); 6567} 6568 6569static 6570SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) { 6571 EVT VT = Op.getValueType(); 6572 6573 // Canonizalize to v2f64. 6574 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6575 return DAG.getNode(ISD::BITCAST, dl, VT, 6576 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6577 V1, DAG)); 6578} 6579 6580static 6581SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, 6582 bool HasSSE2) { 6583 SDValue V1 = Op.getOperand(0); 6584 SDValue V2 = Op.getOperand(1); 6585 EVT VT = Op.getValueType(); 6586 6587 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6588 6589 if (HasSSE2 && VT == MVT::v2f64) 6590 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6591 6592 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6593 return DAG.getNode(ISD::BITCAST, dl, VT, 6594 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6595 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6596 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6597} 6598 6599static 6600SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) { 6601 SDValue V1 = Op.getOperand(0); 6602 SDValue V2 = Op.getOperand(1); 6603 EVT VT = Op.getValueType(); 6604 6605 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6606 "unsupported shuffle type"); 6607 6608 if (V2.getOpcode() == ISD::UNDEF) 6609 V2 = V1; 6610 6611 // v4i32 or v4f32 6612 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6613} 6614 6615static 6616SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6617 SDValue V1 = Op.getOperand(0); 6618 SDValue V2 = Op.getOperand(1); 6619 EVT VT = Op.getValueType(); 6620 unsigned NumElems = VT.getVectorNumElements(); 6621 6622 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6623 // operand of these instructions is only memory, so check if there's a 6624 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6625 // same masks. 6626 bool CanFoldLoad = false; 6627 6628 // Trivial case, when V2 comes from a load. 6629 if (MayFoldVectorLoad(V2)) 6630 CanFoldLoad = true; 6631 6632 // When V1 is a load, it can be folded later into a store in isel, example: 6633 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6634 // turns into: 6635 // (MOVLPSmr addr:$src1, VR128:$src2) 6636 // So, recognize this potential and also use MOVLPS or MOVLPD 6637 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6638 CanFoldLoad = true; 6639 6640 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6641 if (CanFoldLoad) { 6642 if (HasSSE2 && NumElems == 2) 6643 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6644 6645 if (NumElems == 4) 6646 // If we don't care about the second element, proceed to use movss. 6647 if (SVOp->getMaskElt(1) != -1) 6648 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6649 } 6650 6651 // movl and movlp will both match v2i64, but v2i64 is never matched by 6652 // movl earlier because we make it strict to avoid messing with the movlp load 6653 // folding logic (see the code above getMOVLP call). Match it here then, 6654 // this is horrible, but will stay like this until we move all shuffle 6655 // matching to x86 specific nodes. Note that for the 1st condition all 6656 // types are matched with movsd. 6657 if (HasSSE2) { 6658 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6659 // as to remove this logic from here, as much as possible 6660 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT)) 6661 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6662 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6663 } 6664 6665 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6666 6667 // Invert the operand order and use SHUFPS to match it. 6668 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6669 getShuffleSHUFImmediate(SVOp), DAG); 6670} 6671 6672// Reduce a vector shuffle to zext. 6673SDValue 6674X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { 6675 // PMOVZX is only available from SSE41. 6676 if (!Subtarget->hasSSE41()) 6677 return SDValue(); 6678 6679 EVT VT = Op.getValueType(); 6680 6681 // Only AVX2 support 256-bit vector integer extending. 6682 if (!Subtarget->hasInt256() && VT.is256BitVector()) 6683 return SDValue(); 6684 6685 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6686 SDLoc DL(Op); 6687 SDValue V1 = Op.getOperand(0); 6688 SDValue V2 = Op.getOperand(1); 6689 unsigned NumElems = VT.getVectorNumElements(); 6690 6691 // Extending is an unary operation and the element type of the source vector 6692 // won't be equal to or larger than i64. 6693 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() || 6694 VT.getVectorElementType() == MVT::i64) 6695 return SDValue(); 6696 6697 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4. 6698 unsigned Shift = 1; // Start from 2, i.e. 1 << 1. 6699 while ((1U << Shift) < NumElems) { 6700 if (SVOp->getMaskElt(1U << Shift) == 1) 6701 break; 6702 Shift += 1; 6703 // The maximal ratio is 8, i.e. from i8 to i64. 6704 if (Shift > 3) 6705 return SDValue(); 6706 } 6707 6708 // Check the shuffle mask. 6709 unsigned Mask = (1U << Shift) - 1; 6710 for (unsigned i = 0; i != NumElems; ++i) { 6711 int EltIdx = SVOp->getMaskElt(i); 6712 if ((i & Mask) != 0 && EltIdx != -1) 6713 return SDValue(); 6714 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift)) 6715 return SDValue(); 6716 } 6717 6718 LLVMContext *Context = DAG.getContext(); 6719 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift; 6720 EVT NeVT = EVT::getIntegerVT(*Context, NBits); 6721 EVT NVT = EVT::getVectorVT(*Context, NeVT, NumElems >> Shift); 6722 6723 if (!isTypeLegal(NVT)) 6724 return SDValue(); 6725 6726 // Simplify the operand as it's prepared to be fed into shuffle. 6727 unsigned SignificantBits = NVT.getSizeInBits() >> Shift; 6728 if (V1.getOpcode() == ISD::BITCAST && 6729 V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 6730 V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6731 V1.getOperand(0) 6732 .getOperand(0).getValueType().getSizeInBits() == SignificantBits) { 6733 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x) 6734 SDValue V = V1.getOperand(0).getOperand(0).getOperand(0); 6735 ConstantSDNode *CIdx = 6736 dyn_cast<ConstantSDNode>(V1.getOperand(0).getOperand(0).getOperand(1)); 6737 // If it's foldable, i.e. normal load with single use, we will let code 6738 // selection to fold it. Otherwise, we will short the conversion sequence. 6739 if (CIdx && CIdx->getZExtValue() == 0 && 6740 (!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse())) { 6741 if (V.getValueSizeInBits() > V1.getValueSizeInBits()) { 6742 // The "ext_vec_elt" node is wider than the result node. 6743 // In this case we should extract subvector from V. 6744 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast (extract_subvector x)). 6745 unsigned Ratio = V.getValueSizeInBits() / V1.getValueSizeInBits(); 6746 EVT FullVT = V.getValueType(); 6747 EVT SubVecVT = EVT::getVectorVT(*Context, 6748 FullVT.getVectorElementType(), 6749 FullVT.getVectorNumElements()/Ratio); 6750 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, V, 6751 DAG.getIntPtrConstant(0)); 6752 } 6753 V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V); 6754 } 6755 } 6756 6757 return DAG.getNode(ISD::BITCAST, DL, VT, 6758 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1)); 6759} 6760 6761SDValue 6762X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { 6763 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6764 MVT VT = Op.getValueType().getSimpleVT(); 6765 SDLoc dl(Op); 6766 SDValue V1 = Op.getOperand(0); 6767 SDValue V2 = Op.getOperand(1); 6768 6769 if (isZeroShuffle(SVOp)) 6770 return getZeroVector(VT, Subtarget, DAG, dl); 6771 6772 // Handle splat operations 6773 if (SVOp->isSplat()) { 6774 // Use vbroadcast whenever the splat comes from a foldable load 6775 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 6776 if (Broadcast.getNode()) 6777 return Broadcast; 6778 } 6779 6780 // Check integer expanding shuffles. 6781 SDValue NewOp = LowerVectorIntExtend(Op, DAG); 6782 if (NewOp.getNode()) 6783 return NewOp; 6784 6785 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6786 // do it! 6787 if (VT == MVT::v8i16 || VT == MVT::v16i8 || 6788 VT == MVT::v16i16 || VT == MVT::v32i8) { 6789 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 6790 if (NewOp.getNode()) 6791 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6792 } else if ((VT == MVT::v4i32 || 6793 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6794 // FIXME: Figure out a cleaner way to do this. 6795 // Try to make use of movq to zero out the top part. 6796 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6797 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 6798 if (NewOp.getNode()) { 6799 MVT NewVT = NewOp.getValueType().getSimpleVT(); 6800 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), 6801 NewVT, true, false)) 6802 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), 6803 DAG, Subtarget, dl); 6804 } 6805 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6806 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 6807 if (NewOp.getNode()) { 6808 MVT NewVT = NewOp.getValueType().getSimpleVT(); 6809 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT)) 6810 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), 6811 DAG, Subtarget, dl); 6812 } 6813 } 6814 } 6815 return SDValue(); 6816} 6817 6818SDValue 6819X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6820 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6821 SDValue V1 = Op.getOperand(0); 6822 SDValue V2 = Op.getOperand(1); 6823 MVT VT = Op.getValueType().getSimpleVT(); 6824 SDLoc dl(Op); 6825 unsigned NumElems = VT.getVectorNumElements(); 6826 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6827 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6828 bool V1IsSplat = false; 6829 bool V2IsSplat = false; 6830 bool HasSSE2 = Subtarget->hasSSE2(); 6831 bool HasFp256 = Subtarget->hasFp256(); 6832 bool HasInt256 = Subtarget->hasInt256(); 6833 MachineFunction &MF = DAG.getMachineFunction(); 6834 bool OptForSize = MF.getFunction()->getAttributes(). 6835 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 6836 6837 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6838 6839 if (V1IsUndef && V2IsUndef) 6840 return DAG.getUNDEF(VT); 6841 6842 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 6843 6844 // Vector shuffle lowering takes 3 steps: 6845 // 6846 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6847 // narrowing and commutation of operands should be handled. 6848 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6849 // shuffle nodes. 6850 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6851 // so the shuffle can be broken into other shuffles and the legalizer can 6852 // try the lowering again. 6853 // 6854 // The general idea is that no vector_shuffle operation should be left to 6855 // be matched during isel, all of them must be converted to a target specific 6856 // node here. 6857 6858 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6859 // narrowing and commutation of operands should be handled. The actual code 6860 // doesn't include all of those, work in progress... 6861 SDValue NewOp = NormalizeVectorShuffle(Op, DAG); 6862 if (NewOp.getNode()) 6863 return NewOp; 6864 6865 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end()); 6866 6867 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6868 // unpckh_undef). Only use pshufd if speed is more important than size. 6869 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256)) 6870 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6871 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256)) 6872 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6873 6874 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() && 6875 V2IsUndef && MayFoldVectorLoad(V1)) 6876 return getMOVDDup(Op, dl, V1, DAG); 6877 6878 if (isMOVHLPS_v_undef_Mask(M, VT)) 6879 return getMOVHighToLow(Op, dl, DAG); 6880 6881 // Use to match splats 6882 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef && 6883 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6884 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6885 6886 if (isPSHUFDMask(M, VT)) { 6887 // The actual implementation will match the mask in the if above and then 6888 // during isel it can match several different instructions, not only pshufd 6889 // as its name says, sad but true, emulate the behavior for now... 6890 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6891 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6892 6893 unsigned TargetMask = getShuffleSHUFImmediate(SVOp); 6894 6895 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6896 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6897 6898 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64)) 6899 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, 6900 DAG); 6901 6902 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6903 TargetMask, DAG); 6904 } 6905 6906 if (isPALIGNRMask(M, VT, Subtarget)) 6907 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2, 6908 getShufflePALIGNRImmediate(SVOp), 6909 DAG); 6910 6911 // Check if this can be converted into a logical shift. 6912 bool isLeft = false; 6913 unsigned ShAmt = 0; 6914 SDValue ShVal; 6915 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6916 if (isShift && ShVal.hasOneUse()) { 6917 // If the shifted value has multiple uses, it may be cheaper to use 6918 // v_set0 + movlhps or movhlps, etc. 6919 MVT EltVT = VT.getVectorElementType(); 6920 ShAmt *= EltVT.getSizeInBits(); 6921 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6922 } 6923 6924 if (isMOVLMask(M, VT)) { 6925 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6926 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6927 if (!isMOVLPMask(M, VT)) { 6928 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6929 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6930 6931 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6932 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6933 } 6934 } 6935 6936 // FIXME: fold these into legal mask. 6937 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256)) 6938 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6939 6940 if (isMOVHLPSMask(M, VT)) 6941 return getMOVHighToLow(Op, dl, DAG); 6942 6943 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget)) 6944 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6945 6946 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget)) 6947 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6948 6949 if (isMOVLPMask(M, VT)) 6950 return getMOVLP(Op, dl, DAG, HasSSE2); 6951 6952 if (ShouldXformToMOVHLPS(M, VT) || 6953 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT)) 6954 return CommuteVectorShuffle(SVOp, DAG); 6955 6956 if (isShift) { 6957 // No better options. Use a vshldq / vsrldq. 6958 MVT EltVT = VT.getVectorElementType(); 6959 ShAmt *= EltVT.getSizeInBits(); 6960 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6961 } 6962 6963 bool Commuted = false; 6964 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6965 // 1,1,1,1 -> v8i16 though. 6966 V1IsSplat = isSplatVector(V1.getNode()); 6967 V2IsSplat = isSplatVector(V2.getNode()); 6968 6969 // Canonicalize the splat or undef, if present, to be on the RHS. 6970 if (!V2IsUndef && V1IsSplat && !V2IsSplat) { 6971 CommuteVectorShuffleMask(M, NumElems); 6972 std::swap(V1, V2); 6973 std::swap(V1IsSplat, V2IsSplat); 6974 Commuted = true; 6975 } 6976 6977 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6978 // Shuffling low element of v1 into undef, just return v1. 6979 if (V2IsUndef) 6980 return V1; 6981 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6982 // the instruction selector will not match, so get a canonical MOVL with 6983 // swapped operands to undo the commute. 6984 return getMOVL(DAG, dl, VT, V2, V1); 6985 } 6986 6987 if (isUNPCKLMask(M, VT, HasInt256)) 6988 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6989 6990 if (isUNPCKHMask(M, VT, HasInt256)) 6991 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6992 6993 if (V2IsSplat) { 6994 // Normalize mask so all entries that point to V2 points to its first 6995 // element then try to match unpck{h|l} again. If match, return a 6996 // new vector_shuffle with the corrected mask.p 6997 SmallVector<int, 8> NewMask(M.begin(), M.end()); 6998 NormalizeMask(NewMask, NumElems); 6999 if (isUNPCKLMask(NewMask, VT, HasInt256, true)) 7000 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 7001 if (isUNPCKHMask(NewMask, VT, HasInt256, true)) 7002 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 7003 } 7004 7005 if (Commuted) { 7006 // Commute is back and try unpck* again. 7007 // FIXME: this seems wrong. 7008 CommuteVectorShuffleMask(M, NumElems); 7009 std::swap(V1, V2); 7010 std::swap(V1IsSplat, V2IsSplat); 7011 Commuted = false; 7012 7013 if (isUNPCKLMask(M, VT, HasInt256)) 7014 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 7015 7016 if (isUNPCKHMask(M, VT, HasInt256)) 7017 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 7018 } 7019 7020 // Normalize the node to match x86 shuffle ops if needed 7021 if (!V2IsUndef && (isSHUFPMask(M, VT, HasFp256, /* Commuted */ true))) 7022 return CommuteVectorShuffle(SVOp, DAG); 7023 7024 // The checks below are all present in isShuffleMaskLegal, but they are 7025 // inlined here right now to enable us to directly emit target specific 7026 // nodes, and remove one by one until they don't return Op anymore. 7027 7028 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 7029 SVOp->getSplatIndex() == 0 && V2IsUndef) { 7030 if (VT == MVT::v2f64 || VT == MVT::v2i64) 7031 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 7032 } 7033 7034 if (isPSHUFHWMask(M, VT, HasInt256)) 7035 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 7036 getShufflePSHUFHWImmediate(SVOp), 7037 DAG); 7038 7039 if (isPSHUFLWMask(M, VT, HasInt256)) 7040 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 7041 getShufflePSHUFLWImmediate(SVOp), 7042 DAG); 7043 7044 if (isSHUFPMask(M, VT, HasFp256)) 7045 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 7046 getShuffleSHUFImmediate(SVOp), DAG); 7047 7048 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256)) 7049 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 7050 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256)) 7051 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 7052 7053 //===--------------------------------------------------------------------===// 7054 // Generate target specific nodes for 128 or 256-bit shuffles only 7055 // supported in the AVX instruction set. 7056 // 7057 7058 // Handle VMOVDDUPY permutations 7059 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256)) 7060 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 7061 7062 // Handle VPERMILPS/D* permutations 7063 if (isVPERMILPMask(M, VT, HasFp256)) { 7064 if (HasInt256 && VT == MVT::v8i32) 7065 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, 7066 getShuffleSHUFImmediate(SVOp), DAG); 7067 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 7068 getShuffleSHUFImmediate(SVOp), DAG); 7069 } 7070 7071 // Handle VPERM2F128/VPERM2I128 permutations 7072 if (isVPERM2X128Mask(M, VT, HasFp256)) 7073 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 7074 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 7075 7076 SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG); 7077 if (BlendOp.getNode()) 7078 return BlendOp; 7079 7080 if (V2IsUndef && HasInt256 && (VT == MVT::v8i32 || VT == MVT::v8f32)) { 7081 SmallVector<SDValue, 8> permclMask; 7082 for (unsigned i = 0; i != 8; ++i) { 7083 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32)); 7084 } 7085 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, 7086 &permclMask[0], 8); 7087 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 7088 return DAG.getNode(X86ISD::VPERMV, dl, VT, 7089 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); 7090 } 7091 7092 if (V2IsUndef && HasInt256 && (VT == MVT::v4i64 || VT == MVT::v4f64)) 7093 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, 7094 getShuffleCLImmediate(SVOp), DAG); 7095 7096 //===--------------------------------------------------------------------===// 7097 // Since no target specific shuffle was selected for this generic one, 7098 // lower it into other known shuffles. FIXME: this isn't true yet, but 7099 // this is the plan. 7100 // 7101 7102 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 7103 if (VT == MVT::v8i16) { 7104 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG); 7105 if (NewOp.getNode()) 7106 return NewOp; 7107 } 7108 7109 if (VT == MVT::v16i8) { 7110 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 7111 if (NewOp.getNode()) 7112 return NewOp; 7113 } 7114 7115 if (VT == MVT::v32i8) { 7116 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG); 7117 if (NewOp.getNode()) 7118 return NewOp; 7119 } 7120 7121 // Handle all 128-bit wide vectors with 4 elements, and match them with 7122 // several different shuffle types. 7123 if (NumElems == 4 && VT.is128BitVector()) 7124 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 7125 7126 // Handle general 256-bit shuffles 7127 if (VT.is256BitVector()) 7128 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 7129 7130 return SDValue(); 7131} 7132 7133static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { 7134 MVT VT = Op.getValueType().getSimpleVT(); 7135 SDLoc dl(Op); 7136 7137 if (!Op.getOperand(0).getValueType().getSimpleVT().is128BitVector()) 7138 return SDValue(); 7139 7140 if (VT.getSizeInBits() == 8) { 7141 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 7142 Op.getOperand(0), Op.getOperand(1)); 7143 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7144 DAG.getValueType(VT)); 7145 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7146 } 7147 7148 if (VT.getSizeInBits() == 16) { 7149 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7150 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 7151 if (Idx == 0) 7152 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7153 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7154 DAG.getNode(ISD::BITCAST, dl, 7155 MVT::v4i32, 7156 Op.getOperand(0)), 7157 Op.getOperand(1))); 7158 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 7159 Op.getOperand(0), Op.getOperand(1)); 7160 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7161 DAG.getValueType(VT)); 7162 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7163 } 7164 7165 if (VT == MVT::f32) { 7166 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 7167 // the result back to FR32 register. It's only worth matching if the 7168 // result has a single use which is a store or a bitcast to i32. And in 7169 // the case of a store, it's not worth it if the index is a constant 0, 7170 // because a MOVSSmr can be used instead, which is smaller and faster. 7171 if (!Op.hasOneUse()) 7172 return SDValue(); 7173 SDNode *User = *Op.getNode()->use_begin(); 7174 if ((User->getOpcode() != ISD::STORE || 7175 (isa<ConstantSDNode>(Op.getOperand(1)) && 7176 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 7177 (User->getOpcode() != ISD::BITCAST || 7178 User->getValueType(0) != MVT::i32)) 7179 return SDValue(); 7180 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7181 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 7182 Op.getOperand(0)), 7183 Op.getOperand(1)); 7184 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 7185 } 7186 7187 if (VT == MVT::i32 || VT == MVT::i64) { 7188 // ExtractPS/pextrq works with constant index. 7189 if (isa<ConstantSDNode>(Op.getOperand(1))) 7190 return Op; 7191 } 7192 return SDValue(); 7193} 7194 7195SDValue 7196X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7197 SelectionDAG &DAG) const { 7198 if (!isa<ConstantSDNode>(Op.getOperand(1))) 7199 return SDValue(); 7200 7201 SDValue Vec = Op.getOperand(0); 7202 MVT VecVT = Vec.getValueType().getSimpleVT(); 7203 7204 // If this is a 256-bit vector result, first extract the 128-bit vector and 7205 // then extract the element from the 128-bit vector. 7206 if (VecVT.is256BitVector()) { 7207 SDLoc dl(Op.getNode()); 7208 unsigned NumElems = VecVT.getVectorNumElements(); 7209 SDValue Idx = Op.getOperand(1); 7210 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7211 7212 // Get the 128-bit vector. 7213 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); 7214 7215 if (IdxVal >= NumElems/2) 7216 IdxVal -= NumElems/2; 7217 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 7218 DAG.getConstant(IdxVal, MVT::i32)); 7219 } 7220 7221 assert(VecVT.is128BitVector() && "Unexpected vector length"); 7222 7223 if (Subtarget->hasSSE41()) { 7224 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 7225 if (Res.getNode()) 7226 return Res; 7227 } 7228 7229 MVT VT = Op.getValueType().getSimpleVT(); 7230 SDLoc dl(Op); 7231 // TODO: handle v16i8. 7232 if (VT.getSizeInBits() == 16) { 7233 SDValue Vec = Op.getOperand(0); 7234 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7235 if (Idx == 0) 7236 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7237 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7238 DAG.getNode(ISD::BITCAST, dl, 7239 MVT::v4i32, Vec), 7240 Op.getOperand(1))); 7241 // Transform it so it match pextrw which produces a 32-bit result. 7242 MVT EltVT = MVT::i32; 7243 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 7244 Op.getOperand(0), Op.getOperand(1)); 7245 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 7246 DAG.getValueType(VT)); 7247 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7248 } 7249 7250 if (VT.getSizeInBits() == 32) { 7251 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7252 if (Idx == 0) 7253 return Op; 7254 7255 // SHUFPS the element to the lowest double word, then movss. 7256 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 7257 MVT VVT = Op.getOperand(0).getValueType().getSimpleVT(); 7258 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7259 DAG.getUNDEF(VVT), Mask); 7260 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7261 DAG.getIntPtrConstant(0)); 7262 } 7263 7264 if (VT.getSizeInBits() == 64) { 7265 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 7266 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 7267 // to match extract_elt for f64. 7268 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7269 if (Idx == 0) 7270 return Op; 7271 7272 // UNPCKHPD the element to the lowest double word, then movsd. 7273 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 7274 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 7275 int Mask[2] = { 1, -1 }; 7276 MVT VVT = Op.getOperand(0).getValueType().getSimpleVT(); 7277 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7278 DAG.getUNDEF(VVT), Mask); 7279 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7280 DAG.getIntPtrConstant(0)); 7281 } 7282 7283 return SDValue(); 7284} 7285 7286static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { 7287 MVT VT = Op.getValueType().getSimpleVT(); 7288 MVT EltVT = VT.getVectorElementType(); 7289 SDLoc dl(Op); 7290 7291 SDValue N0 = Op.getOperand(0); 7292 SDValue N1 = Op.getOperand(1); 7293 SDValue N2 = Op.getOperand(2); 7294 7295 if (!VT.is128BitVector()) 7296 return SDValue(); 7297 7298 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 7299 isa<ConstantSDNode>(N2)) { 7300 unsigned Opc; 7301 if (VT == MVT::v8i16) 7302 Opc = X86ISD::PINSRW; 7303 else if (VT == MVT::v16i8) 7304 Opc = X86ISD::PINSRB; 7305 else 7306 Opc = X86ISD::PINSRB; 7307 7308 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 7309 // argument. 7310 if (N1.getValueType() != MVT::i32) 7311 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7312 if (N2.getValueType() != MVT::i32) 7313 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7314 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 7315 } 7316 7317 if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 7318 // Bits [7:6] of the constant are the source select. This will always be 7319 // zero here. The DAG Combiner may combine an extract_elt index into these 7320 // bits. For example (insert (extract, 3), 2) could be matched by putting 7321 // the '3' into bits [7:6] of X86ISD::INSERTPS. 7322 // Bits [5:4] of the constant are the destination select. This is the 7323 // value of the incoming immediate. 7324 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 7325 // combine either bitwise AND or insert of float 0.0 to set these bits. 7326 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 7327 // Create this as a scalar to vector.. 7328 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 7329 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 7330 } 7331 7332 if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) { 7333 // PINSR* works with constant index. 7334 return Op; 7335 } 7336 return SDValue(); 7337} 7338 7339SDValue 7340X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 7341 MVT VT = Op.getValueType().getSimpleVT(); 7342 MVT EltVT = VT.getVectorElementType(); 7343 7344 SDLoc dl(Op); 7345 SDValue N0 = Op.getOperand(0); 7346 SDValue N1 = Op.getOperand(1); 7347 SDValue N2 = Op.getOperand(2); 7348 7349 // If this is a 256-bit vector result, first extract the 128-bit vector, 7350 // insert the element into the extracted half and then place it back. 7351 if (VT.is256BitVector()) { 7352 if (!isa<ConstantSDNode>(N2)) 7353 return SDValue(); 7354 7355 // Get the desired 128-bit vector half. 7356 unsigned NumElems = VT.getVectorNumElements(); 7357 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 7358 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); 7359 7360 // Insert the element into the desired half. 7361 bool Upper = IdxVal >= NumElems/2; 7362 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, 7363 DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); 7364 7365 // Insert the changed part back to the 256-bit vector 7366 return Insert128BitVector(N0, V, IdxVal, DAG, dl); 7367 } 7368 7369 if (Subtarget->hasSSE41()) 7370 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 7371 7372 if (EltVT == MVT::i8) 7373 return SDValue(); 7374 7375 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 7376 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 7377 // as its second argument. 7378 if (N1.getValueType() != MVT::i32) 7379 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7380 if (N2.getValueType() != MVT::i32) 7381 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7382 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 7383 } 7384 return SDValue(); 7385} 7386 7387static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { 7388 LLVMContext *Context = DAG.getContext(); 7389 SDLoc dl(Op); 7390 MVT OpVT = Op.getValueType().getSimpleVT(); 7391 7392 // If this is a 256-bit vector result, first insert into a 128-bit 7393 // vector and then insert into the 256-bit vector. 7394 if (!OpVT.is128BitVector()) { 7395 // Insert into a 128-bit vector. 7396 EVT VT128 = EVT::getVectorVT(*Context, 7397 OpVT.getVectorElementType(), 7398 OpVT.getVectorNumElements() / 2); 7399 7400 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7401 7402 // Insert the 128-bit vector. 7403 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); 7404 } 7405 7406 if (OpVT == MVT::v1i64 && 7407 Op.getOperand(0).getValueType() == MVT::i64) 7408 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7409 7410 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7411 assert(OpVT.is128BitVector() && "Expected an SSE type!"); 7412 return DAG.getNode(ISD::BITCAST, dl, OpVT, 7413 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7414} 7415 7416// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7417// a simple subregister reference or explicit instructions to grab 7418// upper bits of a vector. 7419static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7420 SelectionDAG &DAG) { 7421 if (Subtarget->hasFp256()) { 7422 SDLoc dl(Op.getNode()); 7423 SDValue Vec = Op.getNode()->getOperand(0); 7424 SDValue Idx = Op.getNode()->getOperand(1); 7425 7426 if (Op.getNode()->getValueType(0).is128BitVector() && 7427 Vec.getNode()->getValueType(0).is256BitVector() && 7428 isa<ConstantSDNode>(Idx)) { 7429 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7430 return Extract128BitVector(Vec, IdxVal, DAG, dl); 7431 } 7432 } 7433 return SDValue(); 7434} 7435 7436// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7437// simple superregister reference or explicit instructions to insert 7438// the upper bits of a vector. 7439static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7440 SelectionDAG &DAG) { 7441 if (Subtarget->hasFp256()) { 7442 SDLoc dl(Op.getNode()); 7443 SDValue Vec = Op.getNode()->getOperand(0); 7444 SDValue SubVec = Op.getNode()->getOperand(1); 7445 SDValue Idx = Op.getNode()->getOperand(2); 7446 7447 if (Op.getNode()->getValueType(0).is256BitVector() && 7448 SubVec.getNode()->getValueType(0).is128BitVector() && 7449 isa<ConstantSDNode>(Idx)) { 7450 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7451 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); 7452 } 7453 } 7454 return SDValue(); 7455} 7456 7457// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7458// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7459// one of the above mentioned nodes. It has to be wrapped because otherwise 7460// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7461// be used to form addressing mode. These wrapped nodes will be selected 7462// into MOV32ri. 7463SDValue 7464X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7465 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7466 7467 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7468 // global base reg. 7469 unsigned char OpFlag = 0; 7470 unsigned WrapperKind = X86ISD::Wrapper; 7471 CodeModel::Model M = getTargetMachine().getCodeModel(); 7472 7473 if (Subtarget->isPICStyleRIPRel() && 7474 (M == CodeModel::Small || M == CodeModel::Kernel)) 7475 WrapperKind = X86ISD::WrapperRIP; 7476 else if (Subtarget->isPICStyleGOT()) 7477 OpFlag = X86II::MO_GOTOFF; 7478 else if (Subtarget->isPICStyleStubPIC()) 7479 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7480 7481 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7482 CP->getAlignment(), 7483 CP->getOffset(), OpFlag); 7484 SDLoc DL(CP); 7485 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7486 // With PIC, the address is actually $g + Offset. 7487 if (OpFlag) { 7488 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7489 DAG.getNode(X86ISD::GlobalBaseReg, 7490 SDLoc(), getPointerTy()), 7491 Result); 7492 } 7493 7494 return Result; 7495} 7496 7497SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7498 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7499 7500 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7501 // global base reg. 7502 unsigned char OpFlag = 0; 7503 unsigned WrapperKind = X86ISD::Wrapper; 7504 CodeModel::Model M = getTargetMachine().getCodeModel(); 7505 7506 if (Subtarget->isPICStyleRIPRel() && 7507 (M == CodeModel::Small || M == CodeModel::Kernel)) 7508 WrapperKind = X86ISD::WrapperRIP; 7509 else if (Subtarget->isPICStyleGOT()) 7510 OpFlag = X86II::MO_GOTOFF; 7511 else if (Subtarget->isPICStyleStubPIC()) 7512 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7513 7514 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7515 OpFlag); 7516 SDLoc DL(JT); 7517 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7518 7519 // With PIC, the address is actually $g + Offset. 7520 if (OpFlag) 7521 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7522 DAG.getNode(X86ISD::GlobalBaseReg, 7523 SDLoc(), getPointerTy()), 7524 Result); 7525 7526 return Result; 7527} 7528 7529SDValue 7530X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7531 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7532 7533 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7534 // global base reg. 7535 unsigned char OpFlag = 0; 7536 unsigned WrapperKind = X86ISD::Wrapper; 7537 CodeModel::Model M = getTargetMachine().getCodeModel(); 7538 7539 if (Subtarget->isPICStyleRIPRel() && 7540 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7541 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7542 OpFlag = X86II::MO_GOTPCREL; 7543 WrapperKind = X86ISD::WrapperRIP; 7544 } else if (Subtarget->isPICStyleGOT()) { 7545 OpFlag = X86II::MO_GOT; 7546 } else if (Subtarget->isPICStyleStubPIC()) { 7547 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7548 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7549 OpFlag = X86II::MO_DARWIN_NONLAZY; 7550 } 7551 7552 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7553 7554 SDLoc DL(Op); 7555 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7556 7557 // With PIC, the address is actually $g + Offset. 7558 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7559 !Subtarget->is64Bit()) { 7560 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7561 DAG.getNode(X86ISD::GlobalBaseReg, 7562 SDLoc(), getPointerTy()), 7563 Result); 7564 } 7565 7566 // For symbols that require a load from a stub to get the address, emit the 7567 // load. 7568 if (isGlobalStubReference(OpFlag)) 7569 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7570 MachinePointerInfo::getGOT(), false, false, false, 0); 7571 7572 return Result; 7573} 7574 7575SDValue 7576X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7577 // Create the TargetBlockAddressAddress node. 7578 unsigned char OpFlags = 7579 Subtarget->ClassifyBlockAddressReference(); 7580 CodeModel::Model M = getTargetMachine().getCodeModel(); 7581 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7582 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); 7583 SDLoc dl(Op); 7584 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, 7585 OpFlags); 7586 7587 if (Subtarget->isPICStyleRIPRel() && 7588 (M == CodeModel::Small || M == CodeModel::Kernel)) 7589 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7590 else 7591 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7592 7593 // With PIC, the address is actually $g + Offset. 7594 if (isGlobalRelativeToPICBase(OpFlags)) { 7595 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7596 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7597 Result); 7598 } 7599 7600 return Result; 7601} 7602 7603SDValue 7604X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, 7605 int64_t Offset, SelectionDAG &DAG) const { 7606 // Create the TargetGlobalAddress node, folding in the constant 7607 // offset if it is legal. 7608 unsigned char OpFlags = 7609 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7610 CodeModel::Model M = getTargetMachine().getCodeModel(); 7611 SDValue Result; 7612 if (OpFlags == X86II::MO_NO_FLAG && 7613 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7614 // A direct static reference to a global. 7615 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7616 Offset = 0; 7617 } else { 7618 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7619 } 7620 7621 if (Subtarget->isPICStyleRIPRel() && 7622 (M == CodeModel::Small || M == CodeModel::Kernel)) 7623 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7624 else 7625 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7626 7627 // With PIC, the address is actually $g + Offset. 7628 if (isGlobalRelativeToPICBase(OpFlags)) { 7629 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7630 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7631 Result); 7632 } 7633 7634 // For globals that require a load from a stub to get the address, emit the 7635 // load. 7636 if (isGlobalStubReference(OpFlags)) 7637 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7638 MachinePointerInfo::getGOT(), false, false, false, 0); 7639 7640 // If there was a non-zero offset that we didn't fold, create an explicit 7641 // addition for it. 7642 if (Offset != 0) 7643 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7644 DAG.getConstant(Offset, getPointerTy())); 7645 7646 return Result; 7647} 7648 7649SDValue 7650X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7651 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7652 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7653 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG); 7654} 7655 7656static SDValue 7657GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7658 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7659 unsigned char OperandFlags, bool LocalDynamic = false) { 7660 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7661 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7662 SDLoc dl(GA); 7663 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7664 GA->getValueType(0), 7665 GA->getOffset(), 7666 OperandFlags); 7667 7668 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR 7669 : X86ISD::TLSADDR; 7670 7671 if (InFlag) { 7672 SDValue Ops[] = { Chain, TGA, *InFlag }; 7673 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); 7674 } else { 7675 SDValue Ops[] = { Chain, TGA }; 7676 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); 7677 } 7678 7679 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7680 MFI->setAdjustsStack(true); 7681 7682 SDValue Flag = Chain.getValue(1); 7683 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7684} 7685 7686// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7687static SDValue 7688LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7689 const EVT PtrVT) { 7690 SDValue InFlag; 7691 SDLoc dl(GA); // ? function entry point might be better 7692 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7693 DAG.getNode(X86ISD::GlobalBaseReg, 7694 SDLoc(), PtrVT), InFlag); 7695 InFlag = Chain.getValue(1); 7696 7697 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7698} 7699 7700// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7701static SDValue 7702LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7703 const EVT PtrVT) { 7704 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7705 X86::RAX, X86II::MO_TLSGD); 7706} 7707 7708static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, 7709 SelectionDAG &DAG, 7710 const EVT PtrVT, 7711 bool is64Bit) { 7712 SDLoc dl(GA); 7713 7714 // Get the start address of the TLS block for this module. 7715 X86MachineFunctionInfo* MFI = DAG.getMachineFunction() 7716 .getInfo<X86MachineFunctionInfo>(); 7717 MFI->incNumLocalDynamicTLSAccesses(); 7718 7719 SDValue Base; 7720 if (is64Bit) { 7721 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, 7722 X86II::MO_TLSLD, /*LocalDynamic=*/true); 7723 } else { 7724 SDValue InFlag; 7725 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7726 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag); 7727 InFlag = Chain.getValue(1); 7728 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, 7729 X86II::MO_TLSLDM, /*LocalDynamic=*/true); 7730 } 7731 7732 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations 7733 // of Base. 7734 7735 // Build x@dtpoff. 7736 unsigned char OperandFlags = X86II::MO_DTPOFF; 7737 unsigned WrapperKind = X86ISD::Wrapper; 7738 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7739 GA->getValueType(0), 7740 GA->getOffset(), OperandFlags); 7741 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7742 7743 // Add x@dtpoff with the base. 7744 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); 7745} 7746 7747// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. 7748static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7749 const EVT PtrVT, TLSModel::Model model, 7750 bool is64Bit, bool isPIC) { 7751 SDLoc dl(GA); 7752 7753 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7754 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7755 is64Bit ? 257 : 256)); 7756 7757 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7758 DAG.getIntPtrConstant(0), 7759 MachinePointerInfo(Ptr), 7760 false, false, false, 0); 7761 7762 unsigned char OperandFlags = 0; 7763 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7764 // initialexec. 7765 unsigned WrapperKind = X86ISD::Wrapper; 7766 if (model == TLSModel::LocalExec) { 7767 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7768 } else if (model == TLSModel::InitialExec) { 7769 if (is64Bit) { 7770 OperandFlags = X86II::MO_GOTTPOFF; 7771 WrapperKind = X86ISD::WrapperRIP; 7772 } else { 7773 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; 7774 } 7775 } else { 7776 llvm_unreachable("Unexpected model"); 7777 } 7778 7779 // emit "addl x@ntpoff,%eax" (local exec) 7780 // or "addl x@indntpoff,%eax" (initial exec) 7781 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) 7782 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7783 GA->getValueType(0), 7784 GA->getOffset(), OperandFlags); 7785 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7786 7787 if (model == TLSModel::InitialExec) { 7788 if (isPIC && !is64Bit) { 7789 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, 7790 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), 7791 Offset); 7792 } 7793 7794 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7795 MachinePointerInfo::getGOT(), false, false, false, 7796 0); 7797 } 7798 7799 // The address of the thread local variable is the add of the thread 7800 // pointer with the offset of the variable. 7801 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7802} 7803 7804SDValue 7805X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7806 7807 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7808 const GlobalValue *GV = GA->getGlobal(); 7809 7810 if (Subtarget->isTargetELF()) { 7811 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 7812 7813 switch (model) { 7814 case TLSModel::GeneralDynamic: 7815 if (Subtarget->is64Bit()) 7816 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7817 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7818 case TLSModel::LocalDynamic: 7819 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), 7820 Subtarget->is64Bit()); 7821 case TLSModel::InitialExec: 7822 case TLSModel::LocalExec: 7823 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7824 Subtarget->is64Bit(), 7825 getTargetMachine().getRelocationModel() == Reloc::PIC_); 7826 } 7827 llvm_unreachable("Unknown TLS model."); 7828 } 7829 7830 if (Subtarget->isTargetDarwin()) { 7831 // Darwin only has one model of TLS. Lower to that. 7832 unsigned char OpFlag = 0; 7833 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7834 X86ISD::WrapperRIP : X86ISD::Wrapper; 7835 7836 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7837 // global base reg. 7838 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7839 !Subtarget->is64Bit(); 7840 if (PIC32) 7841 OpFlag = X86II::MO_TLVP_PIC_BASE; 7842 else 7843 OpFlag = X86II::MO_TLVP; 7844 SDLoc DL(Op); 7845 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7846 GA->getValueType(0), 7847 GA->getOffset(), OpFlag); 7848 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7849 7850 // With PIC32, the address is actually $g + Offset. 7851 if (PIC32) 7852 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7853 DAG.getNode(X86ISD::GlobalBaseReg, 7854 SDLoc(), getPointerTy()), 7855 Offset); 7856 7857 // Lowering the machine isd will make sure everything is in the right 7858 // location. 7859 SDValue Chain = DAG.getEntryNode(); 7860 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7861 SDValue Args[] = { Chain, Offset }; 7862 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7863 7864 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7865 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7866 MFI->setAdjustsStack(true); 7867 7868 // And our return value (tls address) is in the standard call return value 7869 // location. 7870 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7871 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7872 Chain.getValue(1)); 7873 } 7874 7875 if (Subtarget->isTargetWindows() || Subtarget->isTargetMingw()) { 7876 // Just use the implicit TLS architecture 7877 // Need to generate someting similar to: 7878 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage 7879 // ; from TEB 7880 // mov ecx, dword [rel _tls_index]: Load index (from C runtime) 7881 // mov rcx, qword [rdx+rcx*8] 7882 // mov eax, .tls$:tlsvar 7883 // [rax+rcx] contains the address 7884 // Windows 64bit: gs:0x58 7885 // Windows 32bit: fs:__tls_array 7886 7887 // If GV is an alias then use the aliasee for determining 7888 // thread-localness. 7889 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7890 GV = GA->resolveAliasedGlobal(false); 7891 SDLoc dl(GA); 7892 SDValue Chain = DAG.getEntryNode(); 7893 7894 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or 7895 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly 7896 // use its literal value of 0x2C. 7897 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit() 7898 ? Type::getInt8PtrTy(*DAG.getContext(), 7899 256) 7900 : Type::getInt32PtrTy(*DAG.getContext(), 7901 257)); 7902 7903 SDValue TlsArray = Subtarget->is64Bit() ? DAG.getIntPtrConstant(0x58) : 7904 (Subtarget->isTargetMingw() ? DAG.getIntPtrConstant(0x2C) : 7905 DAG.getExternalSymbol("_tls_array", getPointerTy())); 7906 7907 SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, TlsArray, 7908 MachinePointerInfo(Ptr), 7909 false, false, false, 0); 7910 7911 // Load the _tls_index variable 7912 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy()); 7913 if (Subtarget->is64Bit()) 7914 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain, 7915 IDX, MachinePointerInfo(), MVT::i32, 7916 false, false, 0); 7917 else 7918 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), 7919 false, false, false, 0); 7920 7921 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), 7922 getPointerTy()); 7923 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); 7924 7925 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); 7926 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(), 7927 false, false, false, 0); 7928 7929 // Get the offset of start of .tls section 7930 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7931 GA->getValueType(0), 7932 GA->getOffset(), X86II::MO_SECREL); 7933 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA); 7934 7935 // The address of the thread local variable is the add of the thread 7936 // pointer with the offset of the variable. 7937 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset); 7938 } 7939 7940 llvm_unreachable("TLS not implemented for this target."); 7941} 7942 7943/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7944/// and take a 2 x i32 value to shift plus a shift amount. 7945SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7946 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7947 EVT VT = Op.getValueType(); 7948 unsigned VTBits = VT.getSizeInBits(); 7949 SDLoc dl(Op); 7950 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7951 SDValue ShOpLo = Op.getOperand(0); 7952 SDValue ShOpHi = Op.getOperand(1); 7953 SDValue ShAmt = Op.getOperand(2); 7954 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7955 DAG.getConstant(VTBits - 1, MVT::i8)) 7956 : DAG.getConstant(0, VT); 7957 7958 SDValue Tmp2, Tmp3; 7959 if (Op.getOpcode() == ISD::SHL_PARTS) { 7960 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7961 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7962 } else { 7963 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7964 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7965 } 7966 7967 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7968 DAG.getConstant(VTBits, MVT::i8)); 7969 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7970 AndNode, DAG.getConstant(0, MVT::i8)); 7971 7972 SDValue Hi, Lo; 7973 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7974 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7975 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7976 7977 if (Op.getOpcode() == ISD::SHL_PARTS) { 7978 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7979 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7980 } else { 7981 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7982 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7983 } 7984 7985 SDValue Ops[2] = { Lo, Hi }; 7986 return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); 7987} 7988 7989SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7990 SelectionDAG &DAG) const { 7991 EVT SrcVT = Op.getOperand(0).getValueType(); 7992 7993 if (SrcVT.isVector()) 7994 return SDValue(); 7995 7996 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7997 "Unknown SINT_TO_FP to lower!"); 7998 7999 // These are really Legal; return the operand so the caller accepts it as 8000 // Legal. 8001 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 8002 return Op; 8003 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 8004 Subtarget->is64Bit()) { 8005 return Op; 8006 } 8007 8008 SDLoc dl(Op); 8009 unsigned Size = SrcVT.getSizeInBits()/8; 8010 MachineFunction &MF = DAG.getMachineFunction(); 8011 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 8012 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8013 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8014 StackSlot, 8015 MachinePointerInfo::getFixedStack(SSFI), 8016 false, false, 0); 8017 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 8018} 8019 8020SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 8021 SDValue StackSlot, 8022 SelectionDAG &DAG) const { 8023 // Build the FILD 8024 SDLoc DL(Op); 8025 SDVTList Tys; 8026 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 8027 if (useSSE) 8028 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 8029 else 8030 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 8031 8032 unsigned ByteSize = SrcVT.getSizeInBits()/8; 8033 8034 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 8035 MachineMemOperand *MMO; 8036 if (FI) { 8037 int SSFI = FI->getIndex(); 8038 MMO = 8039 DAG.getMachineFunction() 8040 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8041 MachineMemOperand::MOLoad, ByteSize, ByteSize); 8042 } else { 8043 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 8044 StackSlot = StackSlot.getOperand(1); 8045 } 8046 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 8047 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 8048 X86ISD::FILD, DL, 8049 Tys, Ops, array_lengthof(Ops), 8050 SrcVT, MMO); 8051 8052 if (useSSE) { 8053 Chain = Result.getValue(1); 8054 SDValue InFlag = Result.getValue(2); 8055 8056 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 8057 // shouldn't be necessary except that RFP cannot be live across 8058 // multiple blocks. When stackifier is fixed, they can be uncoupled. 8059 MachineFunction &MF = DAG.getMachineFunction(); 8060 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 8061 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 8062 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8063 Tys = DAG.getVTList(MVT::Other); 8064 SDValue Ops[] = { 8065 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 8066 }; 8067 MachineMemOperand *MMO = 8068 DAG.getMachineFunction() 8069 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8070 MachineMemOperand::MOStore, SSFISize, SSFISize); 8071 8072 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 8073 Ops, array_lengthof(Ops), 8074 Op.getValueType(), MMO); 8075 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 8076 MachinePointerInfo::getFixedStack(SSFI), 8077 false, false, false, 0); 8078 } 8079 8080 return Result; 8081} 8082 8083// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 8084SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 8085 SelectionDAG &DAG) const { 8086 // This algorithm is not obvious. Here it is what we're trying to output: 8087 /* 8088 movq %rax, %xmm0 8089 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 8090 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 8091 #ifdef __SSE3__ 8092 haddpd %xmm0, %xmm0 8093 #else 8094 pshufd $0x4e, %xmm0, %xmm1 8095 addpd %xmm1, %xmm0 8096 #endif 8097 */ 8098 8099 SDLoc dl(Op); 8100 LLVMContext *Context = DAG.getContext(); 8101 8102 // Build some magic constants. 8103 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; 8104 Constant *C0 = ConstantDataVector::get(*Context, CV0); 8105 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 8106 8107 SmallVector<Constant*,2> CV1; 8108 CV1.push_back( 8109 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8110 APInt(64, 0x4330000000000000ULL)))); 8111 CV1.push_back( 8112 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8113 APInt(64, 0x4530000000000000ULL)))); 8114 Constant *C1 = ConstantVector::get(CV1); 8115 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 8116 8117 // Load the 64-bit value into an XMM register. 8118 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 8119 Op.getOperand(0)); 8120 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 8121 MachinePointerInfo::getConstantPool(), 8122 false, false, false, 16); 8123 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 8124 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 8125 CLod0); 8126 8127 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 8128 MachinePointerInfo::getConstantPool(), 8129 false, false, false, 16); 8130 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 8131 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 8132 SDValue Result; 8133 8134 if (Subtarget->hasSSE3()) { 8135 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 8136 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 8137 } else { 8138 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 8139 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 8140 S2F, 0x4E, DAG); 8141 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 8142 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 8143 Sub); 8144 } 8145 8146 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 8147 DAG.getIntPtrConstant(0)); 8148} 8149 8150// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 8151SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 8152 SelectionDAG &DAG) const { 8153 SDLoc dl(Op); 8154 // FP constant to bias correct the final result. 8155 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 8156 MVT::f64); 8157 8158 // Load the 32-bit value into an XMM register. 8159 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 8160 Op.getOperand(0)); 8161 8162 // Zero out the upper parts of the register. 8163 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 8164 8165 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8166 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 8167 DAG.getIntPtrConstant(0)); 8168 8169 // Or the load with the bias. 8170 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 8171 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8172 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8173 MVT::v2f64, Load)), 8174 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8175 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8176 MVT::v2f64, Bias))); 8177 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8178 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 8179 DAG.getIntPtrConstant(0)); 8180 8181 // Subtract the bias. 8182 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 8183 8184 // Handle final rounding. 8185 EVT DestVT = Op.getValueType(); 8186 8187 if (DestVT.bitsLT(MVT::f64)) 8188 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 8189 DAG.getIntPtrConstant(0)); 8190 if (DestVT.bitsGT(MVT::f64)) 8191 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 8192 8193 // Handle final rounding. 8194 return Sub; 8195} 8196 8197SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, 8198 SelectionDAG &DAG) const { 8199 SDValue N0 = Op.getOperand(0); 8200 EVT SVT = N0.getValueType(); 8201 SDLoc dl(Op); 8202 8203 assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || 8204 SVT == MVT::v8i8 || SVT == MVT::v8i16) && 8205 "Custom UINT_TO_FP is not supported!"); 8206 8207 EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, 8208 SVT.getVectorNumElements()); 8209 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), 8210 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0)); 8211} 8212 8213SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 8214 SelectionDAG &DAG) const { 8215 SDValue N0 = Op.getOperand(0); 8216 SDLoc dl(Op); 8217 8218 if (Op.getValueType().isVector()) 8219 return lowerUINT_TO_FP_vec(Op, DAG); 8220 8221 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 8222 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 8223 // the optimization here. 8224 if (DAG.SignBitIsZero(N0)) 8225 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 8226 8227 EVT SrcVT = N0.getValueType(); 8228 EVT DstVT = Op.getValueType(); 8229 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 8230 return LowerUINT_TO_FP_i64(Op, DAG); 8231 if (SrcVT == MVT::i32 && X86ScalarSSEf64) 8232 return LowerUINT_TO_FP_i32(Op, DAG); 8233 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 8234 return SDValue(); 8235 8236 // Make a 64-bit buffer, and use it to build an FILD. 8237 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 8238 if (SrcVT == MVT::i32) { 8239 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 8240 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 8241 getPointerTy(), StackSlot, WordOff); 8242 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8243 StackSlot, MachinePointerInfo(), 8244 false, false, 0); 8245 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 8246 OffsetSlot, MachinePointerInfo(), 8247 false, false, 0); 8248 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 8249 return Fild; 8250 } 8251 8252 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 8253 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8254 StackSlot, MachinePointerInfo(), 8255 false, false, 0); 8256 // For i64 source, we need to add the appropriate power of 2 if the input 8257 // was negative. This is the same as the optimization in 8258 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 8259 // we must be careful to do the computation in x87 extended precision, not 8260 // in SSE. (The generic code can't know it's OK to do this, or how to.) 8261 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 8262 MachineMemOperand *MMO = 8263 DAG.getMachineFunction() 8264 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8265 MachineMemOperand::MOLoad, 8, 8); 8266 8267 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 8268 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 8269 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 8270 array_lengthof(Ops), MVT::i64, MMO); 8271 8272 APInt FF(32, 0x5F800000ULL); 8273 8274 // Check whether the sign bit is set. 8275 SDValue SignSet = DAG.getSetCC(dl, 8276 getSetCCResultType(*DAG.getContext(), MVT::i64), 8277 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 8278 ISD::SETLT); 8279 8280 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 8281 SDValue FudgePtr = DAG.getConstantPool( 8282 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 8283 getPointerTy()); 8284 8285 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 8286 SDValue Zero = DAG.getIntPtrConstant(0); 8287 SDValue Four = DAG.getIntPtrConstant(4); 8288 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 8289 Zero, Four); 8290 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 8291 8292 // Load the value out, extending it from f32 to f80. 8293 // FIXME: Avoid the extend by constructing the right constant pool? 8294 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 8295 FudgePtr, MachinePointerInfo::getConstantPool(), 8296 MVT::f32, false, false, 4); 8297 // Extend everything to 80 bits to force it to be done on x87. 8298 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 8299 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 8300} 8301 8302std::pair<SDValue,SDValue> 8303X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, 8304 bool IsSigned, bool IsReplace) const { 8305 SDLoc DL(Op); 8306 8307 EVT DstTy = Op.getValueType(); 8308 8309 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) { 8310 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 8311 DstTy = MVT::i64; 8312 } 8313 8314 assert(DstTy.getSimpleVT() <= MVT::i64 && 8315 DstTy.getSimpleVT() >= MVT::i16 && 8316 "Unknown FP_TO_INT to lower!"); 8317 8318 // These are really Legal. 8319 if (DstTy == MVT::i32 && 8320 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8321 return std::make_pair(SDValue(), SDValue()); 8322 if (Subtarget->is64Bit() && 8323 DstTy == MVT::i64 && 8324 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8325 return std::make_pair(SDValue(), SDValue()); 8326 8327 // We lower FP->int64 either into FISTP64 followed by a load from a temporary 8328 // stack slot, or into the FTOL runtime function. 8329 MachineFunction &MF = DAG.getMachineFunction(); 8330 unsigned MemSize = DstTy.getSizeInBits()/8; 8331 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8332 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8333 8334 unsigned Opc; 8335 if (!IsSigned && isIntegerTypeFTOL(DstTy)) 8336 Opc = X86ISD::WIN_FTOL; 8337 else 8338 switch (DstTy.getSimpleVT().SimpleTy) { 8339 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 8340 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 8341 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 8342 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 8343 } 8344 8345 SDValue Chain = DAG.getEntryNode(); 8346 SDValue Value = Op.getOperand(0); 8347 EVT TheVT = Op.getOperand(0).getValueType(); 8348 // FIXME This causes a redundant load/store if the SSE-class value is already 8349 // in memory, such as if it is on the callstack. 8350 if (isScalarFPTypeInSSEReg(TheVT)) { 8351 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 8352 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 8353 MachinePointerInfo::getFixedStack(SSFI), 8354 false, false, 0); 8355 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 8356 SDValue Ops[] = { 8357 Chain, StackSlot, DAG.getValueType(TheVT) 8358 }; 8359 8360 MachineMemOperand *MMO = 8361 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8362 MachineMemOperand::MOLoad, MemSize, MemSize); 8363 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 8364 array_lengthof(Ops), DstTy, MMO); 8365 Chain = Value.getValue(1); 8366 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8367 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8368 } 8369 8370 MachineMemOperand *MMO = 8371 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8372 MachineMemOperand::MOStore, MemSize, MemSize); 8373 8374 if (Opc != X86ISD::WIN_FTOL) { 8375 // Build the FP_TO_INT*_IN_MEM 8376 SDValue Ops[] = { Chain, Value, StackSlot }; 8377 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 8378 Ops, array_lengthof(Ops), DstTy, 8379 MMO); 8380 return std::make_pair(FIST, StackSlot); 8381 } else { 8382 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, 8383 DAG.getVTList(MVT::Other, MVT::Glue), 8384 Chain, Value); 8385 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX, 8386 MVT::i32, ftol.getValue(1)); 8387 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX, 8388 MVT::i32, eax.getValue(2)); 8389 SDValue Ops[] = { eax, edx }; 8390 SDValue pair = IsReplace 8391 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, array_lengthof(Ops)) 8392 : DAG.getMergeValues(Ops, array_lengthof(Ops), DL); 8393 return std::make_pair(pair, SDValue()); 8394 } 8395} 8396 8397static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG, 8398 const X86Subtarget *Subtarget) { 8399 MVT VT = Op->getValueType(0).getSimpleVT(); 8400 SDValue In = Op->getOperand(0); 8401 MVT InVT = In.getValueType().getSimpleVT(); 8402 SDLoc dl(Op); 8403 8404 // Optimize vectors in AVX mode: 8405 // 8406 // v8i16 -> v8i32 8407 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 8408 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 8409 // Concat upper and lower parts. 8410 // 8411 // v4i32 -> v4i64 8412 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 8413 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 8414 // Concat upper and lower parts. 8415 // 8416 8417 if (((VT != MVT::v8i32) || (InVT != MVT::v8i16)) && 8418 ((VT != MVT::v4i64) || (InVT != MVT::v4i32))) 8419 return SDValue(); 8420 8421 if (Subtarget->hasInt256()) 8422 return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, In); 8423 8424 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl); 8425 SDValue Undef = DAG.getUNDEF(InVT); 8426 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND; 8427 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef); 8428 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef); 8429 8430 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(), 8431 VT.getVectorNumElements()/2); 8432 8433 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 8434 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 8435 8436 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 8437} 8438 8439SDValue X86TargetLowering::LowerANY_EXTEND(SDValue Op, 8440 SelectionDAG &DAG) const { 8441 if (Subtarget->hasFp256()) { 8442 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget); 8443 if (Res.getNode()) 8444 return Res; 8445 } 8446 8447 return SDValue(); 8448} 8449SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op, 8450 SelectionDAG &DAG) const { 8451 SDLoc DL(Op); 8452 MVT VT = Op.getValueType().getSimpleVT(); 8453 SDValue In = Op.getOperand(0); 8454 MVT SVT = In.getValueType().getSimpleVT(); 8455 8456 if (Subtarget->hasFp256()) { 8457 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget); 8458 if (Res.getNode()) 8459 return Res; 8460 } 8461 8462 if (!VT.is256BitVector() || !SVT.is128BitVector() || 8463 VT.getVectorNumElements() != SVT.getVectorNumElements()) 8464 return SDValue(); 8465 8466 assert(Subtarget->hasFp256() && "256-bit vector is observed without AVX!"); 8467 8468 // AVX2 has better support of integer extending. 8469 if (Subtarget->hasInt256()) 8470 return DAG.getNode(X86ISD::VZEXT, DL, VT, In); 8471 8472 SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In); 8473 static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1}; 8474 SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, 8475 DAG.getVectorShuffle(MVT::v8i16, DL, In, 8476 DAG.getUNDEF(MVT::v8i16), 8477 &Mask[0])); 8478 8479 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi); 8480} 8481 8482SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 8483 SDLoc DL(Op); 8484 MVT VT = Op.getValueType().getSimpleVT(); 8485 SDValue In = Op.getOperand(0); 8486 MVT SVT = In.getValueType().getSimpleVT(); 8487 8488 if ((VT == MVT::v4i32) && (SVT == MVT::v4i64)) { 8489 // On AVX2, v4i64 -> v4i32 becomes VPERMD. 8490 if (Subtarget->hasInt256()) { 8491 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; 8492 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In); 8493 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32), 8494 ShufMask); 8495 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In, 8496 DAG.getIntPtrConstant(0)); 8497 } 8498 8499 // On AVX, v4i64 -> v4i32 becomes a sequence that uses PSHUFD and MOVLHPS. 8500 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8501 DAG.getIntPtrConstant(0)); 8502 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8503 DAG.getIntPtrConstant(2)); 8504 8505 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo); 8506 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi); 8507 8508 // The PSHUFD mask: 8509 static const int ShufMask1[] = {0, 2, 0, 0}; 8510 SDValue Undef = DAG.getUNDEF(VT); 8511 OpLo = DAG.getVectorShuffle(VT, DL, OpLo, Undef, ShufMask1); 8512 OpHi = DAG.getVectorShuffle(VT, DL, OpHi, Undef, ShufMask1); 8513 8514 // The MOVLHPS mask: 8515 static const int ShufMask2[] = {0, 1, 4, 5}; 8516 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask2); 8517 } 8518 8519 if ((VT == MVT::v8i16) && (SVT == MVT::v8i32)) { 8520 // On AVX2, v8i32 -> v8i16 becomed PSHUFB. 8521 if (Subtarget->hasInt256()) { 8522 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In); 8523 8524 SmallVector<SDValue,32> pshufbMask; 8525 for (unsigned i = 0; i < 2; ++i) { 8526 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); 8527 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); 8528 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); 8529 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); 8530 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); 8531 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); 8532 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); 8533 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); 8534 for (unsigned j = 0; j < 8; ++j) 8535 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 8536 } 8537 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, 8538 &pshufbMask[0], 32); 8539 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV); 8540 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In); 8541 8542 static const int ShufMask[] = {0, 2, -1, -1}; 8543 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64), 8544 &ShufMask[0]); 8545 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8546 DAG.getIntPtrConstant(0)); 8547 return DAG.getNode(ISD::BITCAST, DL, VT, In); 8548 } 8549 8550 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, 8551 DAG.getIntPtrConstant(0)); 8552 8553 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, 8554 DAG.getIntPtrConstant(4)); 8555 8556 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo); 8557 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi); 8558 8559 // The PSHUFB mask: 8560 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, 8561 -1, -1, -1, -1, -1, -1, -1, -1}; 8562 8563 SDValue Undef = DAG.getUNDEF(MVT::v16i8); 8564 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1); 8565 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1); 8566 8567 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo); 8568 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi); 8569 8570 // The MOVLHPS Mask: 8571 static const int ShufMask2[] = {0, 1, 4, 5}; 8572 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2); 8573 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res); 8574 } 8575 8576 // Handle truncation of V256 to V128 using shuffles. 8577 if (!VT.is128BitVector() || !SVT.is256BitVector()) 8578 return SDValue(); 8579 8580 assert(VT.getVectorNumElements() != SVT.getVectorNumElements() && 8581 "Invalid op"); 8582 assert(Subtarget->hasFp256() && "256-bit vector without AVX!"); 8583 8584 unsigned NumElems = VT.getVectorNumElements(); 8585 EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 8586 NumElems * 2); 8587 8588 SmallVector<int, 16> MaskVec(NumElems * 2, -1); 8589 // Prepare truncation shuffle mask 8590 for (unsigned i = 0; i != NumElems; ++i) 8591 MaskVec[i] = i * 2; 8592 SDValue V = DAG.getVectorShuffle(NVT, DL, 8593 DAG.getNode(ISD::BITCAST, DL, NVT, In), 8594 DAG.getUNDEF(NVT), &MaskVec[0]); 8595 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, 8596 DAG.getIntPtrConstant(0)); 8597} 8598 8599SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 8600 SelectionDAG &DAG) const { 8601 MVT VT = Op.getValueType().getSimpleVT(); 8602 if (VT.isVector()) { 8603 if (VT == MVT::v8i16) 8604 return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, 8605 DAG.getNode(ISD::FP_TO_SINT, SDLoc(Op), 8606 MVT::v8i32, Op.getOperand(0))); 8607 return SDValue(); 8608 } 8609 8610 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8611 /*IsSigned=*/ true, /*IsReplace=*/ false); 8612 SDValue FIST = Vals.first, StackSlot = Vals.second; 8613 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 8614 if (FIST.getNode() == 0) return Op; 8615 8616 if (StackSlot.getNode()) 8617 // Load the result. 8618 return DAG.getLoad(Op.getValueType(), SDLoc(Op), 8619 FIST, StackSlot, MachinePointerInfo(), 8620 false, false, false, 0); 8621 8622 // The node is the result. 8623 return FIST; 8624} 8625 8626SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 8627 SelectionDAG &DAG) const { 8628 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8629 /*IsSigned=*/ false, /*IsReplace=*/ false); 8630 SDValue FIST = Vals.first, StackSlot = Vals.second; 8631 assert(FIST.getNode() && "Unexpected failure"); 8632 8633 if (StackSlot.getNode()) 8634 // Load the result. 8635 return DAG.getLoad(Op.getValueType(), SDLoc(Op), 8636 FIST, StackSlot, MachinePointerInfo(), 8637 false, false, false, 0); 8638 8639 // The node is the result. 8640 return FIST; 8641} 8642 8643static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { 8644 SDLoc DL(Op); 8645 MVT VT = Op.getValueType().getSimpleVT(); 8646 SDValue In = Op.getOperand(0); 8647 MVT SVT = In.getValueType().getSimpleVT(); 8648 8649 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!"); 8650 8651 return DAG.getNode(X86ISD::VFPEXT, DL, VT, 8652 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, 8653 In, DAG.getUNDEF(SVT))); 8654} 8655 8656SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { 8657 LLVMContext *Context = DAG.getContext(); 8658 SDLoc dl(Op); 8659 MVT VT = Op.getValueType().getSimpleVT(); 8660 MVT EltVT = VT; 8661 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8662 if (VT.isVector()) { 8663 EltVT = VT.getVectorElementType(); 8664 NumElts = VT.getVectorNumElements(); 8665 } 8666 Constant *C; 8667 if (EltVT == MVT::f64) 8668 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8669 APInt(64, ~(1ULL << 63)))); 8670 else 8671 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle, 8672 APInt(32, ~(1U << 31)))); 8673 C = ConstantVector::getSplat(NumElts, C); 8674 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8675 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8676 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8677 MachinePointerInfo::getConstantPool(), 8678 false, false, false, Alignment); 8679 if (VT.isVector()) { 8680 MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8681 return DAG.getNode(ISD::BITCAST, dl, VT, 8682 DAG.getNode(ISD::AND, dl, ANDVT, 8683 DAG.getNode(ISD::BITCAST, dl, ANDVT, 8684 Op.getOperand(0)), 8685 DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask))); 8686 } 8687 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 8688} 8689 8690SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 8691 LLVMContext *Context = DAG.getContext(); 8692 SDLoc dl(Op); 8693 MVT VT = Op.getValueType().getSimpleVT(); 8694 MVT EltVT = VT; 8695 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8696 if (VT.isVector()) { 8697 EltVT = VT.getVectorElementType(); 8698 NumElts = VT.getVectorNumElements(); 8699 } 8700 Constant *C; 8701 if (EltVT == MVT::f64) 8702 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8703 APInt(64, 1ULL << 63))); 8704 else 8705 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle, 8706 APInt(32, 1U << 31))); 8707 C = ConstantVector::getSplat(NumElts, C); 8708 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8709 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8710 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8711 MachinePointerInfo::getConstantPool(), 8712 false, false, false, Alignment); 8713 if (VT.isVector()) { 8714 MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8715 return DAG.getNode(ISD::BITCAST, dl, VT, 8716 DAG.getNode(ISD::XOR, dl, XORVT, 8717 DAG.getNode(ISD::BITCAST, dl, XORVT, 8718 Op.getOperand(0)), 8719 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 8720 } 8721 8722 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 8723} 8724 8725SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 8726 LLVMContext *Context = DAG.getContext(); 8727 SDValue Op0 = Op.getOperand(0); 8728 SDValue Op1 = Op.getOperand(1); 8729 SDLoc dl(Op); 8730 MVT VT = Op.getValueType().getSimpleVT(); 8731 MVT SrcVT = Op1.getValueType().getSimpleVT(); 8732 8733 // If second operand is smaller, extend it first. 8734 if (SrcVT.bitsLT(VT)) { 8735 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 8736 SrcVT = VT; 8737 } 8738 // And if it is bigger, shrink it first. 8739 if (SrcVT.bitsGT(VT)) { 8740 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 8741 SrcVT = VT; 8742 } 8743 8744 // At this point the operands and the result should have the same 8745 // type, and that won't be f80 since that is not custom lowered. 8746 8747 // First get the sign bit of second operand. 8748 SmallVector<Constant*,4> CV; 8749 if (SrcVT == MVT::f64) { 8750 const fltSemantics &Sem = APFloat::IEEEdouble; 8751 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 1ULL << 63)))); 8752 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0)))); 8753 } else { 8754 const fltSemantics &Sem = APFloat::IEEEsingle; 8755 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 1U << 31)))); 8756 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8757 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8758 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8759 } 8760 Constant *C = ConstantVector::get(CV); 8761 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8762 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 8763 MachinePointerInfo::getConstantPool(), 8764 false, false, false, 16); 8765 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 8766 8767 // Shift sign bit right or left if the two operands have different types. 8768 if (SrcVT.bitsGT(VT)) { 8769 // Op0 is MVT::f32, Op1 is MVT::f64. 8770 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 8771 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 8772 DAG.getConstant(32, MVT::i32)); 8773 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 8774 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 8775 DAG.getIntPtrConstant(0)); 8776 } 8777 8778 // Clear first operand sign bit. 8779 CV.clear(); 8780 if (VT == MVT::f64) { 8781 const fltSemantics &Sem = APFloat::IEEEdouble; 8782 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, 8783 APInt(64, ~(1ULL << 63))))); 8784 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0)))); 8785 } else { 8786 const fltSemantics &Sem = APFloat::IEEEsingle; 8787 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, 8788 APInt(32, ~(1U << 31))))); 8789 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8790 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8791 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8792 } 8793 C = ConstantVector::get(CV); 8794 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8795 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8796 MachinePointerInfo::getConstantPool(), 8797 false, false, false, 16); 8798 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8799 8800 // Or the value with the sign bit. 8801 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8802} 8803 8804static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { 8805 SDValue N0 = Op.getOperand(0); 8806 SDLoc dl(Op); 8807 MVT VT = Op.getValueType().getSimpleVT(); 8808 8809 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8810 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8811 DAG.getConstant(1, VT)); 8812 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8813} 8814 8815// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able. 8816// 8817SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, 8818 SelectionDAG &DAG) const { 8819 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree."); 8820 8821 if (!Subtarget->hasSSE41()) 8822 return SDValue(); 8823 8824 if (!Op->hasOneUse()) 8825 return SDValue(); 8826 8827 SDNode *N = Op.getNode(); 8828 SDLoc DL(N); 8829 8830 SmallVector<SDValue, 8> Opnds; 8831 DenseMap<SDValue, unsigned> VecInMap; 8832 EVT VT = MVT::Other; 8833 8834 // Recognize a special case where a vector is casted into wide integer to 8835 // test all 0s. 8836 Opnds.push_back(N->getOperand(0)); 8837 Opnds.push_back(N->getOperand(1)); 8838 8839 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { 8840 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot; 8841 // BFS traverse all OR'd operands. 8842 if (I->getOpcode() == ISD::OR) { 8843 Opnds.push_back(I->getOperand(0)); 8844 Opnds.push_back(I->getOperand(1)); 8845 // Re-evaluate the number of nodes to be traversed. 8846 e += 2; // 2 more nodes (LHS and RHS) are pushed. 8847 continue; 8848 } 8849 8850 // Quit if a non-EXTRACT_VECTOR_ELT 8851 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8852 return SDValue(); 8853 8854 // Quit if without a constant index. 8855 SDValue Idx = I->getOperand(1); 8856 if (!isa<ConstantSDNode>(Idx)) 8857 return SDValue(); 8858 8859 SDValue ExtractedFromVec = I->getOperand(0); 8860 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec); 8861 if (M == VecInMap.end()) { 8862 VT = ExtractedFromVec.getValueType(); 8863 // Quit if not 128/256-bit vector. 8864 if (!VT.is128BitVector() && !VT.is256BitVector()) 8865 return SDValue(); 8866 // Quit if not the same type. 8867 if (VecInMap.begin() != VecInMap.end() && 8868 VT != VecInMap.begin()->first.getValueType()) 8869 return SDValue(); 8870 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; 8871 } 8872 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue(); 8873 } 8874 8875 assert((VT.is128BitVector() || VT.is256BitVector()) && 8876 "Not extracted from 128-/256-bit vector."); 8877 8878 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; 8879 SmallVector<SDValue, 8> VecIns; 8880 8881 for (DenseMap<SDValue, unsigned>::const_iterator 8882 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { 8883 // Quit if not all elements are used. 8884 if (I->second != FullMask) 8885 return SDValue(); 8886 VecIns.push_back(I->first); 8887 } 8888 8889 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8890 8891 // Cast all vectors into TestVT for PTEST. 8892 for (unsigned i = 0, e = VecIns.size(); i < e; ++i) 8893 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]); 8894 8895 // If more than one full vectors are evaluated, OR them first before PTEST. 8896 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { 8897 // Each iteration will OR 2 nodes and append the result until there is only 8898 // 1 node left, i.e. the final OR'd value of all vectors. 8899 SDValue LHS = VecIns[Slot]; 8900 SDValue RHS = VecIns[Slot + 1]; 8901 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); 8902 } 8903 8904 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, 8905 VecIns.back(), VecIns.back()); 8906} 8907 8908/// Emit nodes that will be selected as "test Op0,Op0", or something 8909/// equivalent. 8910SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8911 SelectionDAG &DAG) const { 8912 SDLoc dl(Op); 8913 8914 // CF and OF aren't always set the way we want. Determine which 8915 // of these we need. 8916 bool NeedCF = false; 8917 bool NeedOF = false; 8918 switch (X86CC) { 8919 default: break; 8920 case X86::COND_A: case X86::COND_AE: 8921 case X86::COND_B: case X86::COND_BE: 8922 NeedCF = true; 8923 break; 8924 case X86::COND_G: case X86::COND_GE: 8925 case X86::COND_L: case X86::COND_LE: 8926 case X86::COND_O: case X86::COND_NO: 8927 NeedOF = true; 8928 break; 8929 } 8930 8931 // See if we can use the EFLAGS value from the operand instead of 8932 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8933 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8934 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8935 // Emit a CMP with 0, which is the TEST pattern. 8936 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8937 DAG.getConstant(0, Op.getValueType())); 8938 8939 unsigned Opcode = 0; 8940 unsigned NumOperands = 0; 8941 8942 // Truncate operations may prevent the merge of the SETCC instruction 8943 // and the arithmetic intruction before it. Attempt to truncate the operands 8944 // of the arithmetic instruction and use a reduced bit-width instruction. 8945 bool NeedTruncation = false; 8946 SDValue ArithOp = Op; 8947 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { 8948 SDValue Arith = Op->getOperand(0); 8949 // Both the trunc and the arithmetic op need to have one user each. 8950 if (Arith->hasOneUse()) 8951 switch (Arith.getOpcode()) { 8952 default: break; 8953 case ISD::ADD: 8954 case ISD::SUB: 8955 case ISD::AND: 8956 case ISD::OR: 8957 case ISD::XOR: { 8958 NeedTruncation = true; 8959 ArithOp = Arith; 8960 } 8961 } 8962 } 8963 8964 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation 8965 // which may be the result of a CAST. We use the variable 'Op', which is the 8966 // non-casted variable when we check for possible users. 8967 switch (ArithOp.getOpcode()) { 8968 case ISD::ADD: 8969 // Due to an isel shortcoming, be conservative if this add is likely to be 8970 // selected as part of a load-modify-store instruction. When the root node 8971 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8972 // uses of other nodes in the match, such as the ADD in this case. This 8973 // leads to the ADD being left around and reselected, with the result being 8974 // two adds in the output. Alas, even if none our users are stores, that 8975 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8976 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8977 // climbing the DAG back to the root, and it doesn't seem to be worth the 8978 // effort. 8979 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8980 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8981 if (UI->getOpcode() != ISD::CopyToReg && 8982 UI->getOpcode() != ISD::SETCC && 8983 UI->getOpcode() != ISD::STORE) 8984 goto default_case; 8985 8986 if (ConstantSDNode *C = 8987 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) { 8988 // An add of one will be selected as an INC. 8989 if (C->getAPIntValue() == 1) { 8990 Opcode = X86ISD::INC; 8991 NumOperands = 1; 8992 break; 8993 } 8994 8995 // An add of negative one (subtract of one) will be selected as a DEC. 8996 if (C->getAPIntValue().isAllOnesValue()) { 8997 Opcode = X86ISD::DEC; 8998 NumOperands = 1; 8999 break; 9000 } 9001 } 9002 9003 // Otherwise use a regular EFLAGS-setting add. 9004 Opcode = X86ISD::ADD; 9005 NumOperands = 2; 9006 break; 9007 case ISD::AND: { 9008 // If the primary and result isn't used, don't bother using X86ISD::AND, 9009 // because a TEST instruction will be better. 9010 bool NonFlagUse = false; 9011 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 9012 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 9013 SDNode *User = *UI; 9014 unsigned UOpNo = UI.getOperandNo(); 9015 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 9016 // Look pass truncate. 9017 UOpNo = User->use_begin().getOperandNo(); 9018 User = *User->use_begin(); 9019 } 9020 9021 if (User->getOpcode() != ISD::BRCOND && 9022 User->getOpcode() != ISD::SETCC && 9023 !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) { 9024 NonFlagUse = true; 9025 break; 9026 } 9027 } 9028 9029 if (!NonFlagUse) 9030 break; 9031 } 9032 // FALL THROUGH 9033 case ISD::SUB: 9034 case ISD::OR: 9035 case ISD::XOR: 9036 // Due to the ISEL shortcoming noted above, be conservative if this op is 9037 // likely to be selected as part of a load-modify-store instruction. 9038 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 9039 UE = Op.getNode()->use_end(); UI != UE; ++UI) 9040 if (UI->getOpcode() == ISD::STORE) 9041 goto default_case; 9042 9043 // Otherwise use a regular EFLAGS-setting instruction. 9044 switch (ArithOp.getOpcode()) { 9045 default: llvm_unreachable("unexpected operator!"); 9046 case ISD::SUB: Opcode = X86ISD::SUB; break; 9047 case ISD::XOR: Opcode = X86ISD::XOR; break; 9048 case ISD::AND: Opcode = X86ISD::AND; break; 9049 case ISD::OR: { 9050 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) { 9051 SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG); 9052 if (EFLAGS.getNode()) 9053 return EFLAGS; 9054 } 9055 Opcode = X86ISD::OR; 9056 break; 9057 } 9058 } 9059 9060 NumOperands = 2; 9061 break; 9062 case X86ISD::ADD: 9063 case X86ISD::SUB: 9064 case X86ISD::INC: 9065 case X86ISD::DEC: 9066 case X86ISD::OR: 9067 case X86ISD::XOR: 9068 case X86ISD::AND: 9069 return SDValue(Op.getNode(), 1); 9070 default: 9071 default_case: 9072 break; 9073 } 9074 9075 // If we found that truncation is beneficial, perform the truncation and 9076 // update 'Op'. 9077 if (NeedTruncation) { 9078 EVT VT = Op.getValueType(); 9079 SDValue WideVal = Op->getOperand(0); 9080 EVT WideVT = WideVal.getValueType(); 9081 unsigned ConvertedOp = 0; 9082 // Use a target machine opcode to prevent further DAGCombine 9083 // optimizations that may separate the arithmetic operations 9084 // from the setcc node. 9085 switch (WideVal.getOpcode()) { 9086 default: break; 9087 case ISD::ADD: ConvertedOp = X86ISD::ADD; break; 9088 case ISD::SUB: ConvertedOp = X86ISD::SUB; break; 9089 case ISD::AND: ConvertedOp = X86ISD::AND; break; 9090 case ISD::OR: ConvertedOp = X86ISD::OR; break; 9091 case ISD::XOR: ConvertedOp = X86ISD::XOR; break; 9092 } 9093 9094 if (ConvertedOp) { 9095 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9096 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { 9097 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); 9098 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); 9099 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); 9100 } 9101 } 9102 } 9103 9104 if (Opcode == 0) 9105 // Emit a CMP with 0, which is the TEST pattern. 9106 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 9107 DAG.getConstant(0, Op.getValueType())); 9108 9109 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 9110 SmallVector<SDValue, 4> Ops; 9111 for (unsigned i = 0; i != NumOperands; ++i) 9112 Ops.push_back(Op.getOperand(i)); 9113 9114 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 9115 DAG.ReplaceAllUsesWith(Op, New); 9116 return SDValue(New.getNode(), 1); 9117} 9118 9119/// Emit nodes that will be selected as "cmp Op0,Op1", or something 9120/// equivalent. 9121SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 9122 SelectionDAG &DAG) const { 9123 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 9124 if (C->getAPIntValue() == 0) 9125 return EmitTest(Op0, X86CC, DAG); 9126 9127 SDLoc dl(Op0); 9128 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || 9129 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { 9130 // Use SUB instead of CMP to enable CSE between SUB and CMP. 9131 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); 9132 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, 9133 Op0, Op1); 9134 return SDValue(Sub.getNode(), 1); 9135 } 9136 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 9137} 9138 9139/// Convert a comparison if required by the subtarget. 9140SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, 9141 SelectionDAG &DAG) const { 9142 // If the subtarget does not support the FUCOMI instruction, floating-point 9143 // comparisons have to be converted. 9144 if (Subtarget->hasCMov() || 9145 Cmp.getOpcode() != X86ISD::CMP || 9146 !Cmp.getOperand(0).getValueType().isFloatingPoint() || 9147 !Cmp.getOperand(1).getValueType().isFloatingPoint()) 9148 return Cmp; 9149 9150 // The instruction selector will select an FUCOM instruction instead of 9151 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence 9152 // build an SDNode sequence that transfers the result from FPSW into EFLAGS: 9153 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) 9154 SDLoc dl(Cmp); 9155 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); 9156 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); 9157 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, 9158 DAG.getConstant(8, MVT::i8)); 9159 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); 9160 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); 9161} 9162 9163static bool isAllOnes(SDValue V) { 9164 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9165 return C && C->isAllOnesValue(); 9166} 9167 9168/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 9169/// if it's possible. 9170SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 9171 SDLoc dl, SelectionDAG &DAG) const { 9172 SDValue Op0 = And.getOperand(0); 9173 SDValue Op1 = And.getOperand(1); 9174 if (Op0.getOpcode() == ISD::TRUNCATE) 9175 Op0 = Op0.getOperand(0); 9176 if (Op1.getOpcode() == ISD::TRUNCATE) 9177 Op1 = Op1.getOperand(0); 9178 9179 SDValue LHS, RHS; 9180 if (Op1.getOpcode() == ISD::SHL) 9181 std::swap(Op0, Op1); 9182 if (Op0.getOpcode() == ISD::SHL) { 9183 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 9184 if (And00C->getZExtValue() == 1) { 9185 // If we looked past a truncate, check that it's only truncating away 9186 // known zeros. 9187 unsigned BitWidth = Op0.getValueSizeInBits(); 9188 unsigned AndBitWidth = And.getValueSizeInBits(); 9189 if (BitWidth > AndBitWidth) { 9190 APInt Zeros, Ones; 9191 DAG.ComputeMaskedBits(Op0, Zeros, Ones); 9192 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 9193 return SDValue(); 9194 } 9195 LHS = Op1; 9196 RHS = Op0.getOperand(1); 9197 } 9198 } else if (Op1.getOpcode() == ISD::Constant) { 9199 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 9200 uint64_t AndRHSVal = AndRHS->getZExtValue(); 9201 SDValue AndLHS = Op0; 9202 9203 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 9204 LHS = AndLHS.getOperand(0); 9205 RHS = AndLHS.getOperand(1); 9206 } 9207 9208 // Use BT if the immediate can't be encoded in a TEST instruction. 9209 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 9210 LHS = AndLHS; 9211 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 9212 } 9213 } 9214 9215 if (LHS.getNode()) { 9216 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 9217 // instruction. Since the shift amount is in-range-or-undefined, we know 9218 // that doing a bittest on the i32 value is ok. We extend to i32 because 9219 // the encoding for the i16 version is larger than the i32 version. 9220 // Also promote i16 to i32 for performance / code size reason. 9221 if (LHS.getValueType() == MVT::i8 || 9222 LHS.getValueType() == MVT::i16) 9223 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 9224 9225 // If the operand types disagree, extend the shift amount to match. Since 9226 // BT ignores high bits (like shifts) we can use anyextend. 9227 if (LHS.getValueType() != RHS.getValueType()) 9228 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 9229 9230 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 9231 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 9232 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9233 DAG.getConstant(Cond, MVT::i8), BT); 9234 } 9235 9236 return SDValue(); 9237} 9238 9239// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 9240// ones, and then concatenate the result back. 9241static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 9242 MVT VT = Op.getValueType().getSimpleVT(); 9243 9244 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && 9245 "Unsupported value type for operation"); 9246 9247 unsigned NumElems = VT.getVectorNumElements(); 9248 SDLoc dl(Op); 9249 SDValue CC = Op.getOperand(2); 9250 9251 // Extract the LHS vectors 9252 SDValue LHS = Op.getOperand(0); 9253 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 9254 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 9255 9256 // Extract the RHS vectors 9257 SDValue RHS = Op.getOperand(1); 9258 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 9259 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 9260 9261 // Issue the operation on the smaller types and concatenate the result back 9262 MVT EltVT = VT.getVectorElementType(); 9263 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9264 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9265 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 9266 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 9267} 9268 9269static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, 9270 SelectionDAG &DAG) { 9271 SDValue Cond; 9272 SDValue Op0 = Op.getOperand(0); 9273 SDValue Op1 = Op.getOperand(1); 9274 SDValue CC = Op.getOperand(2); 9275 MVT VT = Op.getValueType().getSimpleVT(); 9276 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 9277 bool isFP = Op.getOperand(1).getValueType().getSimpleVT().isFloatingPoint(); 9278 SDLoc dl(Op); 9279 9280 if (isFP) { 9281#ifndef NDEBUG 9282 MVT EltVT = Op0.getValueType().getVectorElementType().getSimpleVT(); 9283 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 9284#endif 9285 9286 unsigned SSECC; 9287 bool Swap = false; 9288 9289 // SSE Condition code mapping: 9290 // 0 - EQ 9291 // 1 - LT 9292 // 2 - LE 9293 // 3 - UNORD 9294 // 4 - NEQ 9295 // 5 - NLT 9296 // 6 - NLE 9297 // 7 - ORD 9298 switch (SetCCOpcode) { 9299 default: llvm_unreachable("Unexpected SETCC condition"); 9300 case ISD::SETOEQ: 9301 case ISD::SETEQ: SSECC = 0; break; 9302 case ISD::SETOGT: 9303 case ISD::SETGT: Swap = true; // Fallthrough 9304 case ISD::SETLT: 9305 case ISD::SETOLT: SSECC = 1; break; 9306 case ISD::SETOGE: 9307 case ISD::SETGE: Swap = true; // Fallthrough 9308 case ISD::SETLE: 9309 case ISD::SETOLE: SSECC = 2; break; 9310 case ISD::SETUO: SSECC = 3; break; 9311 case ISD::SETUNE: 9312 case ISD::SETNE: SSECC = 4; break; 9313 case ISD::SETULE: Swap = true; // Fallthrough 9314 case ISD::SETUGE: SSECC = 5; break; 9315 case ISD::SETULT: Swap = true; // Fallthrough 9316 case ISD::SETUGT: SSECC = 6; break; 9317 case ISD::SETO: SSECC = 7; break; 9318 case ISD::SETUEQ: 9319 case ISD::SETONE: SSECC = 8; break; 9320 } 9321 if (Swap) 9322 std::swap(Op0, Op1); 9323 9324 // In the two special cases we can't handle, emit two comparisons. 9325 if (SSECC == 8) { 9326 unsigned CC0, CC1; 9327 unsigned CombineOpc; 9328 if (SetCCOpcode == ISD::SETUEQ) { 9329 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR; 9330 } else { 9331 assert(SetCCOpcode == ISD::SETONE); 9332 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND; 9333 } 9334 9335 SDValue Cmp0 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9336 DAG.getConstant(CC0, MVT::i8)); 9337 SDValue Cmp1 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9338 DAG.getConstant(CC1, MVT::i8)); 9339 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); 9340 } 9341 // Handle all other FP comparisons here. 9342 return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9343 DAG.getConstant(SSECC, MVT::i8)); 9344 } 9345 9346 // Break 256-bit integer vector compare into smaller ones. 9347 if (VT.is256BitVector() && !Subtarget->hasInt256()) 9348 return Lower256IntVSETCC(Op, DAG); 9349 9350 // We are handling one of the integer comparisons here. Since SSE only has 9351 // GT and EQ comparisons for integer, swapping operands and multiple 9352 // operations may be required for some comparisons. 9353 unsigned Opc; 9354 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false; 9355 9356 switch (SetCCOpcode) { 9357 default: llvm_unreachable("Unexpected SETCC condition"); 9358 case ISD::SETNE: Invert = true; 9359 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break; 9360 case ISD::SETLT: Swap = true; 9361 case ISD::SETGT: Opc = X86ISD::PCMPGT; break; 9362 case ISD::SETGE: Swap = true; 9363 case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break; 9364 case ISD::SETULT: Swap = true; 9365 case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break; 9366 case ISD::SETUGE: Swap = true; 9367 case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; 9368 } 9369 9370 // Special case: Use min/max operations for SETULE/SETUGE 9371 MVT VET = VT.getVectorElementType(); 9372 bool hasMinMax = 9373 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32)) 9374 || (Subtarget->hasSSE2() && (VET == MVT::i8)); 9375 9376 if (hasMinMax) { 9377 switch (SetCCOpcode) { 9378 default: break; 9379 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break; 9380 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break; 9381 } 9382 9383 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; } 9384 } 9385 9386 if (Swap) 9387 std::swap(Op0, Op1); 9388 9389 // Check that the operation in question is available (most are plain SSE2, 9390 // but PCMPGTQ and PCMPEQQ have different requirements). 9391 if (VT == MVT::v2i64) { 9392 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) { 9393 assert(Subtarget->hasSSE2() && "Don't know how to lower!"); 9394 9395 // First cast everything to the right type. 9396 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); 9397 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); 9398 9399 // Since SSE has no unsigned integer comparisons, we need to flip the sign 9400 // bits of the inputs before performing those operations. The lower 9401 // compare is always unsigned. 9402 SDValue SB; 9403 if (FlipSigns) { 9404 SB = DAG.getConstant(0x80000000U, MVT::v4i32); 9405 } else { 9406 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32); 9407 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32); 9408 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, 9409 Sign, Zero, Sign, Zero); 9410 } 9411 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB); 9412 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB); 9413 9414 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2)) 9415 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1); 9416 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1); 9417 9418 // Create masks for only the low parts/high parts of the 64 bit integers. 9419 static const int MaskHi[] = { 1, 1, 3, 3 }; 9420 static const int MaskLo[] = { 0, 0, 2, 2 }; 9421 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi); 9422 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo); 9423 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi); 9424 9425 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo); 9426 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi); 9427 9428 if (Invert) 9429 Result = DAG.getNOT(dl, Result, MVT::v4i32); 9430 9431 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9432 } 9433 9434 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) { 9435 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with 9436 // pcmpeqd + pshufd + pand. 9437 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!"); 9438 9439 // First cast everything to the right type. 9440 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); 9441 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); 9442 9443 // Do the compare. 9444 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1); 9445 9446 // Make sure the lower and upper halves are both all-ones. 9447 static const int Mask[] = { 1, 0, 3, 2 }; 9448 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask); 9449 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf); 9450 9451 if (Invert) 9452 Result = DAG.getNOT(dl, Result, MVT::v4i32); 9453 9454 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9455 } 9456 } 9457 9458 // Since SSE has no unsigned integer comparisons, we need to flip the sign 9459 // bits of the inputs before performing those operations. 9460 if (FlipSigns) { 9461 EVT EltVT = VT.getVectorElementType(); 9462 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT); 9463 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB); 9464 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB); 9465 } 9466 9467 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 9468 9469 // If the logical-not of the result is required, perform that now. 9470 if (Invert) 9471 Result = DAG.getNOT(dl, Result, VT); 9472 9473 if (MinMax) 9474 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result); 9475 9476 return Result; 9477} 9478 9479SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 9480 9481 MVT VT = Op.getValueType().getSimpleVT(); 9482 9483 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG); 9484 9485 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer"); 9486 SDValue Op0 = Op.getOperand(0); 9487 SDValue Op1 = Op.getOperand(1); 9488 SDLoc dl(Op); 9489 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 9490 9491 // Optimize to BT if possible. 9492 // Lower (X & (1 << N)) == 0 to BT(X, N). 9493 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 9494 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 9495 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 9496 Op1.getOpcode() == ISD::Constant && 9497 cast<ConstantSDNode>(Op1)->isNullValue() && 9498 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 9499 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 9500 if (NewSetCC.getNode()) 9501 return NewSetCC; 9502 } 9503 9504 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 9505 // these. 9506 if (Op1.getOpcode() == ISD::Constant && 9507 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 9508 cast<ConstantSDNode>(Op1)->isNullValue()) && 9509 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 9510 9511 // If the input is a setcc, then reuse the input setcc or use a new one with 9512 // the inverted condition. 9513 if (Op0.getOpcode() == X86ISD::SETCC) { 9514 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 9515 bool Invert = (CC == ISD::SETNE) ^ 9516 cast<ConstantSDNode>(Op1)->isNullValue(); 9517 if (!Invert) return Op0; 9518 9519 CCode = X86::GetOppositeBranchCondition(CCode); 9520 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9521 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 9522 } 9523 } 9524 9525 bool isFP = Op1.getValueType().getSimpleVT().isFloatingPoint(); 9526 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 9527 if (X86CC == X86::COND_INVALID) 9528 return SDValue(); 9529 9530 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 9531 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 9532 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9533 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 9534} 9535 9536// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 9537static bool isX86LogicalCmp(SDValue Op) { 9538 unsigned Opc = Op.getNode()->getOpcode(); 9539 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || 9540 Opc == X86ISD::SAHF) 9541 return true; 9542 if (Op.getResNo() == 1 && 9543 (Opc == X86ISD::ADD || 9544 Opc == X86ISD::SUB || 9545 Opc == X86ISD::ADC || 9546 Opc == X86ISD::SBB || 9547 Opc == X86ISD::SMUL || 9548 Opc == X86ISD::UMUL || 9549 Opc == X86ISD::INC || 9550 Opc == X86ISD::DEC || 9551 Opc == X86ISD::OR || 9552 Opc == X86ISD::XOR || 9553 Opc == X86ISD::AND)) 9554 return true; 9555 9556 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 9557 return true; 9558 9559 return false; 9560} 9561 9562static bool isZero(SDValue V) { 9563 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9564 return C && C->isNullValue(); 9565} 9566 9567static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { 9568 if (V.getOpcode() != ISD::TRUNCATE) 9569 return false; 9570 9571 SDValue VOp0 = V.getOperand(0); 9572 unsigned InBits = VOp0.getValueSizeInBits(); 9573 unsigned Bits = V.getValueSizeInBits(); 9574 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); 9575} 9576 9577SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 9578 bool addTest = true; 9579 SDValue Cond = Op.getOperand(0); 9580 SDValue Op1 = Op.getOperand(1); 9581 SDValue Op2 = Op.getOperand(2); 9582 SDLoc DL(Op); 9583 SDValue CC; 9584 9585 if (Cond.getOpcode() == ISD::SETCC) { 9586 SDValue NewCond = LowerSETCC(Cond, DAG); 9587 if (NewCond.getNode()) 9588 Cond = NewCond; 9589 } 9590 9591 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 9592 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 9593 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 9594 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 9595 if (Cond.getOpcode() == X86ISD::SETCC && 9596 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 9597 isZero(Cond.getOperand(1).getOperand(1))) { 9598 SDValue Cmp = Cond.getOperand(1); 9599 9600 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 9601 9602 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 9603 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 9604 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 9605 9606 SDValue CmpOp0 = Cmp.getOperand(0); 9607 // Apply further optimizations for special cases 9608 // (select (x != 0), -1, 0) -> neg & sbb 9609 // (select (x == 0), 0, -1) -> neg & sbb 9610 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y)) 9611 if (YC->isNullValue() && 9612 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { 9613 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); 9614 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, 9615 DAG.getConstant(0, CmpOp0.getValueType()), 9616 CmpOp0); 9617 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9618 DAG.getConstant(X86::COND_B, MVT::i8), 9619 SDValue(Neg.getNode(), 1)); 9620 return Res; 9621 } 9622 9623 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 9624 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 9625 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9626 9627 SDValue Res = // Res = 0 or -1. 9628 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9629 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 9630 9631 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 9632 Res = DAG.getNOT(DL, Res, Res.getValueType()); 9633 9634 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 9635 if (N2C == 0 || !N2C->isNullValue()) 9636 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 9637 return Res; 9638 } 9639 } 9640 9641 // Look past (and (setcc_carry (cmp ...)), 1). 9642 if (Cond.getOpcode() == ISD::AND && 9643 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9644 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9645 if (C && C->getAPIntValue() == 1) 9646 Cond = Cond.getOperand(0); 9647 } 9648 9649 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9650 // setting operand in place of the X86ISD::SETCC. 9651 unsigned CondOpcode = Cond.getOpcode(); 9652 if (CondOpcode == X86ISD::SETCC || 9653 CondOpcode == X86ISD::SETCC_CARRY) { 9654 CC = Cond.getOperand(0); 9655 9656 SDValue Cmp = Cond.getOperand(1); 9657 unsigned Opc = Cmp.getOpcode(); 9658 MVT VT = Op.getValueType().getSimpleVT(); 9659 9660 bool IllegalFPCMov = false; 9661 if (VT.isFloatingPoint() && !VT.isVector() && 9662 !isScalarFPTypeInSSEReg(VT)) // FPStack? 9663 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 9664 9665 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 9666 Opc == X86ISD::BT) { // FIXME 9667 Cond = Cmp; 9668 addTest = false; 9669 } 9670 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9671 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9672 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9673 Cond.getOperand(0).getValueType() != MVT::i8)) { 9674 SDValue LHS = Cond.getOperand(0); 9675 SDValue RHS = Cond.getOperand(1); 9676 unsigned X86Opcode; 9677 unsigned X86Cond; 9678 SDVTList VTs; 9679 switch (CondOpcode) { 9680 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9681 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9682 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9683 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9684 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9685 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9686 default: llvm_unreachable("unexpected overflowing operator"); 9687 } 9688 if (CondOpcode == ISD::UMULO) 9689 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9690 MVT::i32); 9691 else 9692 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9693 9694 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 9695 9696 if (CondOpcode == ISD::UMULO) 9697 Cond = X86Op.getValue(2); 9698 else 9699 Cond = X86Op.getValue(1); 9700 9701 CC = DAG.getConstant(X86Cond, MVT::i8); 9702 addTest = false; 9703 } 9704 9705 if (addTest) { 9706 // Look pass the truncate if the high bits are known zero. 9707 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9708 Cond = Cond.getOperand(0); 9709 9710 // We know the result of AND is compared against zero. Try to match 9711 // it to BT. 9712 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9713 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 9714 if (NewSetCC.getNode()) { 9715 CC = NewSetCC.getOperand(0); 9716 Cond = NewSetCC.getOperand(1); 9717 addTest = false; 9718 } 9719 } 9720 } 9721 9722 if (addTest) { 9723 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9724 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9725 } 9726 9727 // a < b ? -1 : 0 -> RES = ~setcc_carry 9728 // a < b ? 0 : -1 -> RES = setcc_carry 9729 // a >= b ? -1 : 0 -> RES = setcc_carry 9730 // a >= b ? 0 : -1 -> RES = ~setcc_carry 9731 if (Cond.getOpcode() == X86ISD::SUB) { 9732 Cond = ConvertCmpIfNecessary(Cond, DAG); 9733 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 9734 9735 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 9736 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 9737 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9738 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 9739 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 9740 return DAG.getNOT(DL, Res, Res.getValueType()); 9741 return Res; 9742 } 9743 } 9744 9745 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate 9746 // widen the cmov and push the truncate through. This avoids introducing a new 9747 // branch during isel and doesn't add any extensions. 9748 if (Op.getValueType() == MVT::i8 && 9749 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) { 9750 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); 9751 if (T1.getValueType() == T2.getValueType() && 9752 // Blacklist CopyFromReg to avoid partial register stalls. 9753 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){ 9754 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue); 9755 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond); 9756 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); 9757 } 9758 } 9759 9760 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 9761 // condition is true. 9762 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 9763 SDValue Ops[] = { Op2, Op1, CC, Cond }; 9764 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 9765} 9766 9767SDValue X86TargetLowering::LowerSIGN_EXTEND(SDValue Op, 9768 SelectionDAG &DAG) const { 9769 MVT VT = Op->getValueType(0).getSimpleVT(); 9770 SDValue In = Op->getOperand(0); 9771 MVT InVT = In.getValueType().getSimpleVT(); 9772 SDLoc dl(Op); 9773 9774 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) && 9775 (VT != MVT::v8i32 || InVT != MVT::v8i16)) 9776 return SDValue(); 9777 9778 if (Subtarget->hasInt256()) 9779 return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, In); 9780 9781 // Optimize vectors in AVX mode 9782 // Sign extend v8i16 to v8i32 and 9783 // v4i32 to v4i64 9784 // 9785 // Divide input vector into two parts 9786 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} 9787 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 9788 // concat the vectors to original VT 9789 9790 unsigned NumElems = InVT.getVectorNumElements(); 9791 SDValue Undef = DAG.getUNDEF(InVT); 9792 9793 SmallVector<int,8> ShufMask1(NumElems, -1); 9794 for (unsigned i = 0; i != NumElems/2; ++i) 9795 ShufMask1[i] = i; 9796 9797 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]); 9798 9799 SmallVector<int,8> ShufMask2(NumElems, -1); 9800 for (unsigned i = 0; i != NumElems/2; ++i) 9801 ShufMask2[i] = i + NumElems/2; 9802 9803 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]); 9804 9805 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), 9806 VT.getVectorNumElements()/2); 9807 9808 OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); 9809 OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); 9810 9811 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 9812} 9813 9814// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 9815// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 9816// from the AND / OR. 9817static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 9818 Opc = Op.getOpcode(); 9819 if (Opc != ISD::OR && Opc != ISD::AND) 9820 return false; 9821 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9822 Op.getOperand(0).hasOneUse() && 9823 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 9824 Op.getOperand(1).hasOneUse()); 9825} 9826 9827// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 9828// 1 and that the SETCC node has a single use. 9829static bool isXor1OfSetCC(SDValue Op) { 9830 if (Op.getOpcode() != ISD::XOR) 9831 return false; 9832 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9833 if (N1C && N1C->getAPIntValue() == 1) { 9834 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9835 Op.getOperand(0).hasOneUse(); 9836 } 9837 return false; 9838} 9839 9840SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 9841 bool addTest = true; 9842 SDValue Chain = Op.getOperand(0); 9843 SDValue Cond = Op.getOperand(1); 9844 SDValue Dest = Op.getOperand(2); 9845 SDLoc dl(Op); 9846 SDValue CC; 9847 bool Inverted = false; 9848 9849 if (Cond.getOpcode() == ISD::SETCC) { 9850 // Check for setcc([su]{add,sub,mul}o == 0). 9851 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 9852 isa<ConstantSDNode>(Cond.getOperand(1)) && 9853 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 9854 Cond.getOperand(0).getResNo() == 1 && 9855 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 9856 Cond.getOperand(0).getOpcode() == ISD::UADDO || 9857 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 9858 Cond.getOperand(0).getOpcode() == ISD::USUBO || 9859 Cond.getOperand(0).getOpcode() == ISD::SMULO || 9860 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 9861 Inverted = true; 9862 Cond = Cond.getOperand(0); 9863 } else { 9864 SDValue NewCond = LowerSETCC(Cond, DAG); 9865 if (NewCond.getNode()) 9866 Cond = NewCond; 9867 } 9868 } 9869#if 0 9870 // FIXME: LowerXALUO doesn't handle these!! 9871 else if (Cond.getOpcode() == X86ISD::ADD || 9872 Cond.getOpcode() == X86ISD::SUB || 9873 Cond.getOpcode() == X86ISD::SMUL || 9874 Cond.getOpcode() == X86ISD::UMUL) 9875 Cond = LowerXALUO(Cond, DAG); 9876#endif 9877 9878 // Look pass (and (setcc_carry (cmp ...)), 1). 9879 if (Cond.getOpcode() == ISD::AND && 9880 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9881 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9882 if (C && C->getAPIntValue() == 1) 9883 Cond = Cond.getOperand(0); 9884 } 9885 9886 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9887 // setting operand in place of the X86ISD::SETCC. 9888 unsigned CondOpcode = Cond.getOpcode(); 9889 if (CondOpcode == X86ISD::SETCC || 9890 CondOpcode == X86ISD::SETCC_CARRY) { 9891 CC = Cond.getOperand(0); 9892 9893 SDValue Cmp = Cond.getOperand(1); 9894 unsigned Opc = Cmp.getOpcode(); 9895 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 9896 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 9897 Cond = Cmp; 9898 addTest = false; 9899 } else { 9900 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 9901 default: break; 9902 case X86::COND_O: 9903 case X86::COND_B: 9904 // These can only come from an arithmetic instruction with overflow, 9905 // e.g. SADDO, UADDO. 9906 Cond = Cond.getNode()->getOperand(1); 9907 addTest = false; 9908 break; 9909 } 9910 } 9911 } 9912 CondOpcode = Cond.getOpcode(); 9913 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9914 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9915 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9916 Cond.getOperand(0).getValueType() != MVT::i8)) { 9917 SDValue LHS = Cond.getOperand(0); 9918 SDValue RHS = Cond.getOperand(1); 9919 unsigned X86Opcode; 9920 unsigned X86Cond; 9921 SDVTList VTs; 9922 switch (CondOpcode) { 9923 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9924 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9925 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9926 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9927 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9928 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9929 default: llvm_unreachable("unexpected overflowing operator"); 9930 } 9931 if (Inverted) 9932 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 9933 if (CondOpcode == ISD::UMULO) 9934 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9935 MVT::i32); 9936 else 9937 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9938 9939 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 9940 9941 if (CondOpcode == ISD::UMULO) 9942 Cond = X86Op.getValue(2); 9943 else 9944 Cond = X86Op.getValue(1); 9945 9946 CC = DAG.getConstant(X86Cond, MVT::i8); 9947 addTest = false; 9948 } else { 9949 unsigned CondOpc; 9950 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 9951 SDValue Cmp = Cond.getOperand(0).getOperand(1); 9952 if (CondOpc == ISD::OR) { 9953 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 9954 // two branches instead of an explicit OR instruction with a 9955 // separate test. 9956 if (Cmp == Cond.getOperand(1).getOperand(1) && 9957 isX86LogicalCmp(Cmp)) { 9958 CC = Cond.getOperand(0).getOperand(0); 9959 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9960 Chain, Dest, CC, Cmp); 9961 CC = Cond.getOperand(1).getOperand(0); 9962 Cond = Cmp; 9963 addTest = false; 9964 } 9965 } else { // ISD::AND 9966 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 9967 // two branches instead of an explicit AND instruction with a 9968 // separate test. However, we only do this if this block doesn't 9969 // have a fall-through edge, because this requires an explicit 9970 // jmp when the condition is false. 9971 if (Cmp == Cond.getOperand(1).getOperand(1) && 9972 isX86LogicalCmp(Cmp) && 9973 Op.getNode()->hasOneUse()) { 9974 X86::CondCode CCode = 9975 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9976 CCode = X86::GetOppositeBranchCondition(CCode); 9977 CC = DAG.getConstant(CCode, MVT::i8); 9978 SDNode *User = *Op.getNode()->use_begin(); 9979 // Look for an unconditional branch following this conditional branch. 9980 // We need this because we need to reverse the successors in order 9981 // to implement FCMP_OEQ. 9982 if (User->getOpcode() == ISD::BR) { 9983 SDValue FalseBB = User->getOperand(1); 9984 SDNode *NewBR = 9985 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9986 assert(NewBR == User); 9987 (void)NewBR; 9988 Dest = FalseBB; 9989 9990 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9991 Chain, Dest, CC, Cmp); 9992 X86::CondCode CCode = 9993 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 9994 CCode = X86::GetOppositeBranchCondition(CCode); 9995 CC = DAG.getConstant(CCode, MVT::i8); 9996 Cond = Cmp; 9997 addTest = false; 9998 } 9999 } 10000 } 10001 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 10002 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 10003 // It should be transformed during dag combiner except when the condition 10004 // is set by a arithmetics with overflow node. 10005 X86::CondCode CCode = 10006 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 10007 CCode = X86::GetOppositeBranchCondition(CCode); 10008 CC = DAG.getConstant(CCode, MVT::i8); 10009 Cond = Cond.getOperand(0).getOperand(1); 10010 addTest = false; 10011 } else if (Cond.getOpcode() == ISD::SETCC && 10012 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 10013 // For FCMP_OEQ, we can emit 10014 // two branches instead of an explicit AND instruction with a 10015 // separate test. However, we only do this if this block doesn't 10016 // have a fall-through edge, because this requires an explicit 10017 // jmp when the condition is false. 10018 if (Op.getNode()->hasOneUse()) { 10019 SDNode *User = *Op.getNode()->use_begin(); 10020 // Look for an unconditional branch following this conditional branch. 10021 // We need this because we need to reverse the successors in order 10022 // to implement FCMP_OEQ. 10023 if (User->getOpcode() == ISD::BR) { 10024 SDValue FalseBB = User->getOperand(1); 10025 SDNode *NewBR = 10026 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 10027 assert(NewBR == User); 10028 (void)NewBR; 10029 Dest = FalseBB; 10030 10031 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 10032 Cond.getOperand(0), Cond.getOperand(1)); 10033 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 10034 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10035 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10036 Chain, Dest, CC, Cmp); 10037 CC = DAG.getConstant(X86::COND_P, MVT::i8); 10038 Cond = Cmp; 10039 addTest = false; 10040 } 10041 } 10042 } else if (Cond.getOpcode() == ISD::SETCC && 10043 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 10044 // For FCMP_UNE, we can emit 10045 // two branches instead of an explicit AND instruction with a 10046 // separate test. However, we only do this if this block doesn't 10047 // have a fall-through edge, because this requires an explicit 10048 // jmp when the condition is false. 10049 if (Op.getNode()->hasOneUse()) { 10050 SDNode *User = *Op.getNode()->use_begin(); 10051 // Look for an unconditional branch following this conditional branch. 10052 // We need this because we need to reverse the successors in order 10053 // to implement FCMP_UNE. 10054 if (User->getOpcode() == ISD::BR) { 10055 SDValue FalseBB = User->getOperand(1); 10056 SDNode *NewBR = 10057 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 10058 assert(NewBR == User); 10059 (void)NewBR; 10060 10061 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 10062 Cond.getOperand(0), Cond.getOperand(1)); 10063 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 10064 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10065 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10066 Chain, Dest, CC, Cmp); 10067 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 10068 Cond = Cmp; 10069 addTest = false; 10070 Dest = FalseBB; 10071 } 10072 } 10073 } 10074 } 10075 10076 if (addTest) { 10077 // Look pass the truncate if the high bits are known zero. 10078 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 10079 Cond = Cond.getOperand(0); 10080 10081 // We know the result of AND is compared against zero. Try to match 10082 // it to BT. 10083 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 10084 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 10085 if (NewSetCC.getNode()) { 10086 CC = NewSetCC.getOperand(0); 10087 Cond = NewSetCC.getOperand(1); 10088 addTest = false; 10089 } 10090 } 10091 } 10092 10093 if (addTest) { 10094 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10095 Cond = EmitTest(Cond, X86::COND_NE, DAG); 10096 } 10097 Cond = ConvertCmpIfNecessary(Cond, DAG); 10098 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10099 Chain, Dest, CC, Cond); 10100} 10101 10102// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 10103// Calls to _alloca is needed to probe the stack when allocating more than 4k 10104// bytes in one go. Touching the stack at 4K increments is necessary to ensure 10105// that the guard pages used by the OS virtual memory manager are allocated in 10106// correct sequence. 10107SDValue 10108X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 10109 SelectionDAG &DAG) const { 10110 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 10111 getTargetMachine().Options.EnableSegmentedStacks) && 10112 "This should be used only on Windows targets or when segmented stacks " 10113 "are being used"); 10114 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 10115 SDLoc dl(Op); 10116 10117 // Get the inputs. 10118 SDValue Chain = Op.getOperand(0); 10119 SDValue Size = Op.getOperand(1); 10120 // FIXME: Ensure alignment here 10121 10122 bool Is64Bit = Subtarget->is64Bit(); 10123 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 10124 10125 if (getTargetMachine().Options.EnableSegmentedStacks) { 10126 MachineFunction &MF = DAG.getMachineFunction(); 10127 MachineRegisterInfo &MRI = MF.getRegInfo(); 10128 10129 if (Is64Bit) { 10130 // The 64 bit implementation of segmented stacks needs to clobber both r10 10131 // r11. This makes it impossible to use it along with nested parameters. 10132 const Function *F = MF.getFunction(); 10133 10134 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 10135 I != E; ++I) 10136 if (I->hasNestAttr()) 10137 report_fatal_error("Cannot use segmented stacks with functions that " 10138 "have nested arguments."); 10139 } 10140 10141 const TargetRegisterClass *AddrRegClass = 10142 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 10143 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 10144 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 10145 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 10146 DAG.getRegister(Vreg, SPTy)); 10147 SDValue Ops1[2] = { Value, Chain }; 10148 return DAG.getMergeValues(Ops1, 2, dl); 10149 } else { 10150 SDValue Flag; 10151 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 10152 10153 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 10154 Flag = Chain.getValue(1); 10155 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 10156 10157 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 10158 Flag = Chain.getValue(1); 10159 10160 const X86RegisterInfo *RegInfo = 10161 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 10162 Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 10163 SPTy).getValue(1); 10164 10165 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 10166 return DAG.getMergeValues(Ops1, 2, dl); 10167 } 10168} 10169 10170SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 10171 MachineFunction &MF = DAG.getMachineFunction(); 10172 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 10173 10174 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 10175 SDLoc DL(Op); 10176 10177 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 10178 // vastart just stores the address of the VarArgsFrameIndex slot into the 10179 // memory location argument. 10180 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 10181 getPointerTy()); 10182 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 10183 MachinePointerInfo(SV), false, false, 0); 10184 } 10185 10186 // __va_list_tag: 10187 // gp_offset (0 - 6 * 8) 10188 // fp_offset (48 - 48 + 8 * 16) 10189 // overflow_arg_area (point to parameters coming in memory). 10190 // reg_save_area 10191 SmallVector<SDValue, 8> MemOps; 10192 SDValue FIN = Op.getOperand(1); 10193 // Store gp_offset 10194 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 10195 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 10196 MVT::i32), 10197 FIN, MachinePointerInfo(SV), false, false, 0); 10198 MemOps.push_back(Store); 10199 10200 // Store fp_offset 10201 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10202 FIN, DAG.getIntPtrConstant(4)); 10203 Store = DAG.getStore(Op.getOperand(0), DL, 10204 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 10205 MVT::i32), 10206 FIN, MachinePointerInfo(SV, 4), false, false, 0); 10207 MemOps.push_back(Store); 10208 10209 // Store ptr to overflow_arg_area 10210 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10211 FIN, DAG.getIntPtrConstant(4)); 10212 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 10213 getPointerTy()); 10214 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 10215 MachinePointerInfo(SV, 8), 10216 false, false, 0); 10217 MemOps.push_back(Store); 10218 10219 // Store ptr to reg_save_area. 10220 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10221 FIN, DAG.getIntPtrConstant(8)); 10222 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 10223 getPointerTy()); 10224 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 10225 MachinePointerInfo(SV, 16), false, false, 0); 10226 MemOps.push_back(Store); 10227 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 10228 &MemOps[0], MemOps.size()); 10229} 10230 10231SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 10232 assert(Subtarget->is64Bit() && 10233 "LowerVAARG only handles 64-bit va_arg!"); 10234 assert((Subtarget->isTargetLinux() || 10235 Subtarget->isTargetDarwin()) && 10236 "Unhandled target in LowerVAARG"); 10237 assert(Op.getNode()->getNumOperands() == 4); 10238 SDValue Chain = Op.getOperand(0); 10239 SDValue SrcPtr = Op.getOperand(1); 10240 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 10241 unsigned Align = Op.getConstantOperandVal(3); 10242 SDLoc dl(Op); 10243 10244 EVT ArgVT = Op.getNode()->getValueType(0); 10245 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 10246 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy); 10247 uint8_t ArgMode; 10248 10249 // Decide which area this value should be read from. 10250 // TODO: Implement the AMD64 ABI in its entirety. This simple 10251 // selection mechanism works only for the basic types. 10252 if (ArgVT == MVT::f80) { 10253 llvm_unreachable("va_arg for f80 not yet implemented"); 10254 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 10255 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 10256 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 10257 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 10258 } else { 10259 llvm_unreachable("Unhandled argument type in LowerVAARG"); 10260 } 10261 10262 if (ArgMode == 2) { 10263 // Sanity Check: Make sure using fp_offset makes sense. 10264 assert(!getTargetMachine().Options.UseSoftFloat && 10265 !(DAG.getMachineFunction() 10266 .getFunction()->getAttributes() 10267 .hasAttribute(AttributeSet::FunctionIndex, 10268 Attribute::NoImplicitFloat)) && 10269 Subtarget->hasSSE1()); 10270 } 10271 10272 // Insert VAARG_64 node into the DAG 10273 // VAARG_64 returns two values: Variable Argument Address, Chain 10274 SmallVector<SDValue, 11> InstOps; 10275 InstOps.push_back(Chain); 10276 InstOps.push_back(SrcPtr); 10277 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 10278 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 10279 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 10280 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 10281 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 10282 VTs, &InstOps[0], InstOps.size(), 10283 MVT::i64, 10284 MachinePointerInfo(SV), 10285 /*Align=*/0, 10286 /*Volatile=*/false, 10287 /*ReadMem=*/true, 10288 /*WriteMem=*/true); 10289 Chain = VAARG.getValue(1); 10290 10291 // Load the next argument and return it 10292 return DAG.getLoad(ArgVT, dl, 10293 Chain, 10294 VAARG, 10295 MachinePointerInfo(), 10296 false, false, false, 0); 10297} 10298 10299static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, 10300 SelectionDAG &DAG) { 10301 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 10302 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 10303 SDValue Chain = Op.getOperand(0); 10304 SDValue DstPtr = Op.getOperand(1); 10305 SDValue SrcPtr = Op.getOperand(2); 10306 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 10307 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 10308 SDLoc DL(Op); 10309 10310 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 10311 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 10312 false, 10313 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 10314} 10315 10316// getTargetVShiftNode - Handle vector element shifts where the shift amount 10317// may or may not be a constant. Takes immediate version of shift as input. 10318static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, EVT VT, 10319 SDValue SrcOp, SDValue ShAmt, 10320 SelectionDAG &DAG) { 10321 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 10322 10323 if (isa<ConstantSDNode>(ShAmt)) { 10324 // Constant may be a TargetConstant. Use a regular constant. 10325 uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 10326 switch (Opc) { 10327 default: llvm_unreachable("Unknown target vector shift node"); 10328 case X86ISD::VSHLI: 10329 case X86ISD::VSRLI: 10330 case X86ISD::VSRAI: 10331 return DAG.getNode(Opc, dl, VT, SrcOp, 10332 DAG.getConstant(ShiftAmt, MVT::i32)); 10333 } 10334 } 10335 10336 // Change opcode to non-immediate version 10337 switch (Opc) { 10338 default: llvm_unreachable("Unknown target vector shift node"); 10339 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 10340 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 10341 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 10342 } 10343 10344 // Need to build a vector containing shift amount 10345 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 10346 SDValue ShOps[4]; 10347 ShOps[0] = ShAmt; 10348 ShOps[1] = DAG.getConstant(0, MVT::i32); 10349 ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32); 10350 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 10351 10352 // The return type has to be a 128-bit type with the same element 10353 // type as the input type. 10354 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10355 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); 10356 10357 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); 10358 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 10359} 10360 10361static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { 10362 SDLoc dl(Op); 10363 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10364 switch (IntNo) { 10365 default: return SDValue(); // Don't custom lower most intrinsics. 10366 // Comparison intrinsics. 10367 case Intrinsic::x86_sse_comieq_ss: 10368 case Intrinsic::x86_sse_comilt_ss: 10369 case Intrinsic::x86_sse_comile_ss: 10370 case Intrinsic::x86_sse_comigt_ss: 10371 case Intrinsic::x86_sse_comige_ss: 10372 case Intrinsic::x86_sse_comineq_ss: 10373 case Intrinsic::x86_sse_ucomieq_ss: 10374 case Intrinsic::x86_sse_ucomilt_ss: 10375 case Intrinsic::x86_sse_ucomile_ss: 10376 case Intrinsic::x86_sse_ucomigt_ss: 10377 case Intrinsic::x86_sse_ucomige_ss: 10378 case Intrinsic::x86_sse_ucomineq_ss: 10379 case Intrinsic::x86_sse2_comieq_sd: 10380 case Intrinsic::x86_sse2_comilt_sd: 10381 case Intrinsic::x86_sse2_comile_sd: 10382 case Intrinsic::x86_sse2_comigt_sd: 10383 case Intrinsic::x86_sse2_comige_sd: 10384 case Intrinsic::x86_sse2_comineq_sd: 10385 case Intrinsic::x86_sse2_ucomieq_sd: 10386 case Intrinsic::x86_sse2_ucomilt_sd: 10387 case Intrinsic::x86_sse2_ucomile_sd: 10388 case Intrinsic::x86_sse2_ucomigt_sd: 10389 case Intrinsic::x86_sse2_ucomige_sd: 10390 case Intrinsic::x86_sse2_ucomineq_sd: { 10391 unsigned Opc; 10392 ISD::CondCode CC; 10393 switch (IntNo) { 10394 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10395 case Intrinsic::x86_sse_comieq_ss: 10396 case Intrinsic::x86_sse2_comieq_sd: 10397 Opc = X86ISD::COMI; 10398 CC = ISD::SETEQ; 10399 break; 10400 case Intrinsic::x86_sse_comilt_ss: 10401 case Intrinsic::x86_sse2_comilt_sd: 10402 Opc = X86ISD::COMI; 10403 CC = ISD::SETLT; 10404 break; 10405 case Intrinsic::x86_sse_comile_ss: 10406 case Intrinsic::x86_sse2_comile_sd: 10407 Opc = X86ISD::COMI; 10408 CC = ISD::SETLE; 10409 break; 10410 case Intrinsic::x86_sse_comigt_ss: 10411 case Intrinsic::x86_sse2_comigt_sd: 10412 Opc = X86ISD::COMI; 10413 CC = ISD::SETGT; 10414 break; 10415 case Intrinsic::x86_sse_comige_ss: 10416 case Intrinsic::x86_sse2_comige_sd: 10417 Opc = X86ISD::COMI; 10418 CC = ISD::SETGE; 10419 break; 10420 case Intrinsic::x86_sse_comineq_ss: 10421 case Intrinsic::x86_sse2_comineq_sd: 10422 Opc = X86ISD::COMI; 10423 CC = ISD::SETNE; 10424 break; 10425 case Intrinsic::x86_sse_ucomieq_ss: 10426 case Intrinsic::x86_sse2_ucomieq_sd: 10427 Opc = X86ISD::UCOMI; 10428 CC = ISD::SETEQ; 10429 break; 10430 case Intrinsic::x86_sse_ucomilt_ss: 10431 case Intrinsic::x86_sse2_ucomilt_sd: 10432 Opc = X86ISD::UCOMI; 10433 CC = ISD::SETLT; 10434 break; 10435 case Intrinsic::x86_sse_ucomile_ss: 10436 case Intrinsic::x86_sse2_ucomile_sd: 10437 Opc = X86ISD::UCOMI; 10438 CC = ISD::SETLE; 10439 break; 10440 case Intrinsic::x86_sse_ucomigt_ss: 10441 case Intrinsic::x86_sse2_ucomigt_sd: 10442 Opc = X86ISD::UCOMI; 10443 CC = ISD::SETGT; 10444 break; 10445 case Intrinsic::x86_sse_ucomige_ss: 10446 case Intrinsic::x86_sse2_ucomige_sd: 10447 Opc = X86ISD::UCOMI; 10448 CC = ISD::SETGE; 10449 break; 10450 case Intrinsic::x86_sse_ucomineq_ss: 10451 case Intrinsic::x86_sse2_ucomineq_sd: 10452 Opc = X86ISD::UCOMI; 10453 CC = ISD::SETNE; 10454 break; 10455 } 10456 10457 SDValue LHS = Op.getOperand(1); 10458 SDValue RHS = Op.getOperand(2); 10459 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 10460 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 10461 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 10462 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10463 DAG.getConstant(X86CC, MVT::i8), Cond); 10464 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10465 } 10466 10467 // Arithmetic intrinsics. 10468 case Intrinsic::x86_sse2_pmulu_dq: 10469 case Intrinsic::x86_avx2_pmulu_dq: 10470 return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(), 10471 Op.getOperand(1), Op.getOperand(2)); 10472 10473 // SSE2/AVX2 sub with unsigned saturation intrinsics 10474 case Intrinsic::x86_sse2_psubus_b: 10475 case Intrinsic::x86_sse2_psubus_w: 10476 case Intrinsic::x86_avx2_psubus_b: 10477 case Intrinsic::x86_avx2_psubus_w: 10478 return DAG.getNode(X86ISD::SUBUS, dl, Op.getValueType(), 10479 Op.getOperand(1), Op.getOperand(2)); 10480 10481 // SSE3/AVX horizontal add/sub intrinsics 10482 case Intrinsic::x86_sse3_hadd_ps: 10483 case Intrinsic::x86_sse3_hadd_pd: 10484 case Intrinsic::x86_avx_hadd_ps_256: 10485 case Intrinsic::x86_avx_hadd_pd_256: 10486 case Intrinsic::x86_sse3_hsub_ps: 10487 case Intrinsic::x86_sse3_hsub_pd: 10488 case Intrinsic::x86_avx_hsub_ps_256: 10489 case Intrinsic::x86_avx_hsub_pd_256: 10490 case Intrinsic::x86_ssse3_phadd_w_128: 10491 case Intrinsic::x86_ssse3_phadd_d_128: 10492 case Intrinsic::x86_avx2_phadd_w: 10493 case Intrinsic::x86_avx2_phadd_d: 10494 case Intrinsic::x86_ssse3_phsub_w_128: 10495 case Intrinsic::x86_ssse3_phsub_d_128: 10496 case Intrinsic::x86_avx2_phsub_w: 10497 case Intrinsic::x86_avx2_phsub_d: { 10498 unsigned Opcode; 10499 switch (IntNo) { 10500 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10501 case Intrinsic::x86_sse3_hadd_ps: 10502 case Intrinsic::x86_sse3_hadd_pd: 10503 case Intrinsic::x86_avx_hadd_ps_256: 10504 case Intrinsic::x86_avx_hadd_pd_256: 10505 Opcode = X86ISD::FHADD; 10506 break; 10507 case Intrinsic::x86_sse3_hsub_ps: 10508 case Intrinsic::x86_sse3_hsub_pd: 10509 case Intrinsic::x86_avx_hsub_ps_256: 10510 case Intrinsic::x86_avx_hsub_pd_256: 10511 Opcode = X86ISD::FHSUB; 10512 break; 10513 case Intrinsic::x86_ssse3_phadd_w_128: 10514 case Intrinsic::x86_ssse3_phadd_d_128: 10515 case Intrinsic::x86_avx2_phadd_w: 10516 case Intrinsic::x86_avx2_phadd_d: 10517 Opcode = X86ISD::HADD; 10518 break; 10519 case Intrinsic::x86_ssse3_phsub_w_128: 10520 case Intrinsic::x86_ssse3_phsub_d_128: 10521 case Intrinsic::x86_avx2_phsub_w: 10522 case Intrinsic::x86_avx2_phsub_d: 10523 Opcode = X86ISD::HSUB; 10524 break; 10525 } 10526 return DAG.getNode(Opcode, dl, Op.getValueType(), 10527 Op.getOperand(1), Op.getOperand(2)); 10528 } 10529 10530 // SSE2/SSE41/AVX2 integer max/min intrinsics. 10531 case Intrinsic::x86_sse2_pmaxu_b: 10532 case Intrinsic::x86_sse41_pmaxuw: 10533 case Intrinsic::x86_sse41_pmaxud: 10534 case Intrinsic::x86_avx2_pmaxu_b: 10535 case Intrinsic::x86_avx2_pmaxu_w: 10536 case Intrinsic::x86_avx2_pmaxu_d: 10537 case Intrinsic::x86_sse2_pminu_b: 10538 case Intrinsic::x86_sse41_pminuw: 10539 case Intrinsic::x86_sse41_pminud: 10540 case Intrinsic::x86_avx2_pminu_b: 10541 case Intrinsic::x86_avx2_pminu_w: 10542 case Intrinsic::x86_avx2_pminu_d: 10543 case Intrinsic::x86_sse41_pmaxsb: 10544 case Intrinsic::x86_sse2_pmaxs_w: 10545 case Intrinsic::x86_sse41_pmaxsd: 10546 case Intrinsic::x86_avx2_pmaxs_b: 10547 case Intrinsic::x86_avx2_pmaxs_w: 10548 case Intrinsic::x86_avx2_pmaxs_d: 10549 case Intrinsic::x86_sse41_pminsb: 10550 case Intrinsic::x86_sse2_pmins_w: 10551 case Intrinsic::x86_sse41_pminsd: 10552 case Intrinsic::x86_avx2_pmins_b: 10553 case Intrinsic::x86_avx2_pmins_w: 10554 case Intrinsic::x86_avx2_pmins_d: { 10555 unsigned Opcode; 10556 switch (IntNo) { 10557 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10558 case Intrinsic::x86_sse2_pmaxu_b: 10559 case Intrinsic::x86_sse41_pmaxuw: 10560 case Intrinsic::x86_sse41_pmaxud: 10561 case Intrinsic::x86_avx2_pmaxu_b: 10562 case Intrinsic::x86_avx2_pmaxu_w: 10563 case Intrinsic::x86_avx2_pmaxu_d: 10564 Opcode = X86ISD::UMAX; 10565 break; 10566 case Intrinsic::x86_sse2_pminu_b: 10567 case Intrinsic::x86_sse41_pminuw: 10568 case Intrinsic::x86_sse41_pminud: 10569 case Intrinsic::x86_avx2_pminu_b: 10570 case Intrinsic::x86_avx2_pminu_w: 10571 case Intrinsic::x86_avx2_pminu_d: 10572 Opcode = X86ISD::UMIN; 10573 break; 10574 case Intrinsic::x86_sse41_pmaxsb: 10575 case Intrinsic::x86_sse2_pmaxs_w: 10576 case Intrinsic::x86_sse41_pmaxsd: 10577 case Intrinsic::x86_avx2_pmaxs_b: 10578 case Intrinsic::x86_avx2_pmaxs_w: 10579 case Intrinsic::x86_avx2_pmaxs_d: 10580 Opcode = X86ISD::SMAX; 10581 break; 10582 case Intrinsic::x86_sse41_pminsb: 10583 case Intrinsic::x86_sse2_pmins_w: 10584 case Intrinsic::x86_sse41_pminsd: 10585 case Intrinsic::x86_avx2_pmins_b: 10586 case Intrinsic::x86_avx2_pmins_w: 10587 case Intrinsic::x86_avx2_pmins_d: 10588 Opcode = X86ISD::SMIN; 10589 break; 10590 } 10591 return DAG.getNode(Opcode, dl, Op.getValueType(), 10592 Op.getOperand(1), Op.getOperand(2)); 10593 } 10594 10595 // SSE/SSE2/AVX floating point max/min intrinsics. 10596 case Intrinsic::x86_sse_max_ps: 10597 case Intrinsic::x86_sse2_max_pd: 10598 case Intrinsic::x86_avx_max_ps_256: 10599 case Intrinsic::x86_avx_max_pd_256: 10600 case Intrinsic::x86_sse_min_ps: 10601 case Intrinsic::x86_sse2_min_pd: 10602 case Intrinsic::x86_avx_min_ps_256: 10603 case Intrinsic::x86_avx_min_pd_256: { 10604 unsigned Opcode; 10605 switch (IntNo) { 10606 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10607 case Intrinsic::x86_sse_max_ps: 10608 case Intrinsic::x86_sse2_max_pd: 10609 case Intrinsic::x86_avx_max_ps_256: 10610 case Intrinsic::x86_avx_max_pd_256: 10611 Opcode = X86ISD::FMAX; 10612 break; 10613 case Intrinsic::x86_sse_min_ps: 10614 case Intrinsic::x86_sse2_min_pd: 10615 case Intrinsic::x86_avx_min_ps_256: 10616 case Intrinsic::x86_avx_min_pd_256: 10617 Opcode = X86ISD::FMIN; 10618 break; 10619 } 10620 return DAG.getNode(Opcode, dl, Op.getValueType(), 10621 Op.getOperand(1), Op.getOperand(2)); 10622 } 10623 10624 // AVX2 variable shift intrinsics 10625 case Intrinsic::x86_avx2_psllv_d: 10626 case Intrinsic::x86_avx2_psllv_q: 10627 case Intrinsic::x86_avx2_psllv_d_256: 10628 case Intrinsic::x86_avx2_psllv_q_256: 10629 case Intrinsic::x86_avx2_psrlv_d: 10630 case Intrinsic::x86_avx2_psrlv_q: 10631 case Intrinsic::x86_avx2_psrlv_d_256: 10632 case Intrinsic::x86_avx2_psrlv_q_256: 10633 case Intrinsic::x86_avx2_psrav_d: 10634 case Intrinsic::x86_avx2_psrav_d_256: { 10635 unsigned Opcode; 10636 switch (IntNo) { 10637 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10638 case Intrinsic::x86_avx2_psllv_d: 10639 case Intrinsic::x86_avx2_psllv_q: 10640 case Intrinsic::x86_avx2_psllv_d_256: 10641 case Intrinsic::x86_avx2_psllv_q_256: 10642 Opcode = ISD::SHL; 10643 break; 10644 case Intrinsic::x86_avx2_psrlv_d: 10645 case Intrinsic::x86_avx2_psrlv_q: 10646 case Intrinsic::x86_avx2_psrlv_d_256: 10647 case Intrinsic::x86_avx2_psrlv_q_256: 10648 Opcode = ISD::SRL; 10649 break; 10650 case Intrinsic::x86_avx2_psrav_d: 10651 case Intrinsic::x86_avx2_psrav_d_256: 10652 Opcode = ISD::SRA; 10653 break; 10654 } 10655 return DAG.getNode(Opcode, dl, Op.getValueType(), 10656 Op.getOperand(1), Op.getOperand(2)); 10657 } 10658 10659 case Intrinsic::x86_ssse3_pshuf_b_128: 10660 case Intrinsic::x86_avx2_pshuf_b: 10661 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 10662 Op.getOperand(1), Op.getOperand(2)); 10663 10664 case Intrinsic::x86_ssse3_psign_b_128: 10665 case Intrinsic::x86_ssse3_psign_w_128: 10666 case Intrinsic::x86_ssse3_psign_d_128: 10667 case Intrinsic::x86_avx2_psign_b: 10668 case Intrinsic::x86_avx2_psign_w: 10669 case Intrinsic::x86_avx2_psign_d: 10670 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 10671 Op.getOperand(1), Op.getOperand(2)); 10672 10673 case Intrinsic::x86_sse41_insertps: 10674 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 10675 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10676 10677 case Intrinsic::x86_avx_vperm2f128_ps_256: 10678 case Intrinsic::x86_avx_vperm2f128_pd_256: 10679 case Intrinsic::x86_avx_vperm2f128_si_256: 10680 case Intrinsic::x86_avx2_vperm2i128: 10681 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 10682 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10683 10684 case Intrinsic::x86_avx2_permd: 10685 case Intrinsic::x86_avx2_permps: 10686 // Operands intentionally swapped. Mask is last operand to intrinsic, 10687 // but second operand for node/intruction. 10688 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), 10689 Op.getOperand(2), Op.getOperand(1)); 10690 10691 case Intrinsic::x86_sse_sqrt_ps: 10692 case Intrinsic::x86_sse2_sqrt_pd: 10693 case Intrinsic::x86_avx_sqrt_ps_256: 10694 case Intrinsic::x86_avx_sqrt_pd_256: 10695 return DAG.getNode(ISD::FSQRT, dl, Op.getValueType(), Op.getOperand(1)); 10696 10697 // ptest and testp intrinsics. The intrinsic these come from are designed to 10698 // return an integer value, not just an instruction so lower it to the ptest 10699 // or testp pattern and a setcc for the result. 10700 case Intrinsic::x86_sse41_ptestz: 10701 case Intrinsic::x86_sse41_ptestc: 10702 case Intrinsic::x86_sse41_ptestnzc: 10703 case Intrinsic::x86_avx_ptestz_256: 10704 case Intrinsic::x86_avx_ptestc_256: 10705 case Intrinsic::x86_avx_ptestnzc_256: 10706 case Intrinsic::x86_avx_vtestz_ps: 10707 case Intrinsic::x86_avx_vtestc_ps: 10708 case Intrinsic::x86_avx_vtestnzc_ps: 10709 case Intrinsic::x86_avx_vtestz_pd: 10710 case Intrinsic::x86_avx_vtestc_pd: 10711 case Intrinsic::x86_avx_vtestnzc_pd: 10712 case Intrinsic::x86_avx_vtestz_ps_256: 10713 case Intrinsic::x86_avx_vtestc_ps_256: 10714 case Intrinsic::x86_avx_vtestnzc_ps_256: 10715 case Intrinsic::x86_avx_vtestz_pd_256: 10716 case Intrinsic::x86_avx_vtestc_pd_256: 10717 case Intrinsic::x86_avx_vtestnzc_pd_256: { 10718 bool IsTestPacked = false; 10719 unsigned X86CC; 10720 switch (IntNo) { 10721 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 10722 case Intrinsic::x86_avx_vtestz_ps: 10723 case Intrinsic::x86_avx_vtestz_pd: 10724 case Intrinsic::x86_avx_vtestz_ps_256: 10725 case Intrinsic::x86_avx_vtestz_pd_256: 10726 IsTestPacked = true; // Fallthrough 10727 case Intrinsic::x86_sse41_ptestz: 10728 case Intrinsic::x86_avx_ptestz_256: 10729 // ZF = 1 10730 X86CC = X86::COND_E; 10731 break; 10732 case Intrinsic::x86_avx_vtestc_ps: 10733 case Intrinsic::x86_avx_vtestc_pd: 10734 case Intrinsic::x86_avx_vtestc_ps_256: 10735 case Intrinsic::x86_avx_vtestc_pd_256: 10736 IsTestPacked = true; // Fallthrough 10737 case Intrinsic::x86_sse41_ptestc: 10738 case Intrinsic::x86_avx_ptestc_256: 10739 // CF = 1 10740 X86CC = X86::COND_B; 10741 break; 10742 case Intrinsic::x86_avx_vtestnzc_ps: 10743 case Intrinsic::x86_avx_vtestnzc_pd: 10744 case Intrinsic::x86_avx_vtestnzc_ps_256: 10745 case Intrinsic::x86_avx_vtestnzc_pd_256: 10746 IsTestPacked = true; // Fallthrough 10747 case Intrinsic::x86_sse41_ptestnzc: 10748 case Intrinsic::x86_avx_ptestnzc_256: 10749 // ZF and CF = 0 10750 X86CC = X86::COND_A; 10751 break; 10752 } 10753 10754 SDValue LHS = Op.getOperand(1); 10755 SDValue RHS = Op.getOperand(2); 10756 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 10757 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 10758 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 10759 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 10760 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10761 } 10762 10763 // SSE/AVX shift intrinsics 10764 case Intrinsic::x86_sse2_psll_w: 10765 case Intrinsic::x86_sse2_psll_d: 10766 case Intrinsic::x86_sse2_psll_q: 10767 case Intrinsic::x86_avx2_psll_w: 10768 case Intrinsic::x86_avx2_psll_d: 10769 case Intrinsic::x86_avx2_psll_q: 10770 case Intrinsic::x86_sse2_psrl_w: 10771 case Intrinsic::x86_sse2_psrl_d: 10772 case Intrinsic::x86_sse2_psrl_q: 10773 case Intrinsic::x86_avx2_psrl_w: 10774 case Intrinsic::x86_avx2_psrl_d: 10775 case Intrinsic::x86_avx2_psrl_q: 10776 case Intrinsic::x86_sse2_psra_w: 10777 case Intrinsic::x86_sse2_psra_d: 10778 case Intrinsic::x86_avx2_psra_w: 10779 case Intrinsic::x86_avx2_psra_d: { 10780 unsigned Opcode; 10781 switch (IntNo) { 10782 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10783 case Intrinsic::x86_sse2_psll_w: 10784 case Intrinsic::x86_sse2_psll_d: 10785 case Intrinsic::x86_sse2_psll_q: 10786 case Intrinsic::x86_avx2_psll_w: 10787 case Intrinsic::x86_avx2_psll_d: 10788 case Intrinsic::x86_avx2_psll_q: 10789 Opcode = X86ISD::VSHL; 10790 break; 10791 case Intrinsic::x86_sse2_psrl_w: 10792 case Intrinsic::x86_sse2_psrl_d: 10793 case Intrinsic::x86_sse2_psrl_q: 10794 case Intrinsic::x86_avx2_psrl_w: 10795 case Intrinsic::x86_avx2_psrl_d: 10796 case Intrinsic::x86_avx2_psrl_q: 10797 Opcode = X86ISD::VSRL; 10798 break; 10799 case Intrinsic::x86_sse2_psra_w: 10800 case Intrinsic::x86_sse2_psra_d: 10801 case Intrinsic::x86_avx2_psra_w: 10802 case Intrinsic::x86_avx2_psra_d: 10803 Opcode = X86ISD::VSRA; 10804 break; 10805 } 10806 return DAG.getNode(Opcode, dl, Op.getValueType(), 10807 Op.getOperand(1), Op.getOperand(2)); 10808 } 10809 10810 // SSE/AVX immediate shift intrinsics 10811 case Intrinsic::x86_sse2_pslli_w: 10812 case Intrinsic::x86_sse2_pslli_d: 10813 case Intrinsic::x86_sse2_pslli_q: 10814 case Intrinsic::x86_avx2_pslli_w: 10815 case Intrinsic::x86_avx2_pslli_d: 10816 case Intrinsic::x86_avx2_pslli_q: 10817 case Intrinsic::x86_sse2_psrli_w: 10818 case Intrinsic::x86_sse2_psrli_d: 10819 case Intrinsic::x86_sse2_psrli_q: 10820 case Intrinsic::x86_avx2_psrli_w: 10821 case Intrinsic::x86_avx2_psrli_d: 10822 case Intrinsic::x86_avx2_psrli_q: 10823 case Intrinsic::x86_sse2_psrai_w: 10824 case Intrinsic::x86_sse2_psrai_d: 10825 case Intrinsic::x86_avx2_psrai_w: 10826 case Intrinsic::x86_avx2_psrai_d: { 10827 unsigned Opcode; 10828 switch (IntNo) { 10829 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10830 case Intrinsic::x86_sse2_pslli_w: 10831 case Intrinsic::x86_sse2_pslli_d: 10832 case Intrinsic::x86_sse2_pslli_q: 10833 case Intrinsic::x86_avx2_pslli_w: 10834 case Intrinsic::x86_avx2_pslli_d: 10835 case Intrinsic::x86_avx2_pslli_q: 10836 Opcode = X86ISD::VSHLI; 10837 break; 10838 case Intrinsic::x86_sse2_psrli_w: 10839 case Intrinsic::x86_sse2_psrli_d: 10840 case Intrinsic::x86_sse2_psrli_q: 10841 case Intrinsic::x86_avx2_psrli_w: 10842 case Intrinsic::x86_avx2_psrli_d: 10843 case Intrinsic::x86_avx2_psrli_q: 10844 Opcode = X86ISD::VSRLI; 10845 break; 10846 case Intrinsic::x86_sse2_psrai_w: 10847 case Intrinsic::x86_sse2_psrai_d: 10848 case Intrinsic::x86_avx2_psrai_w: 10849 case Intrinsic::x86_avx2_psrai_d: 10850 Opcode = X86ISD::VSRAI; 10851 break; 10852 } 10853 return getTargetVShiftNode(Opcode, dl, Op.getValueType(), 10854 Op.getOperand(1), Op.getOperand(2), DAG); 10855 } 10856 10857 case Intrinsic::x86_sse42_pcmpistria128: 10858 case Intrinsic::x86_sse42_pcmpestria128: 10859 case Intrinsic::x86_sse42_pcmpistric128: 10860 case Intrinsic::x86_sse42_pcmpestric128: 10861 case Intrinsic::x86_sse42_pcmpistrio128: 10862 case Intrinsic::x86_sse42_pcmpestrio128: 10863 case Intrinsic::x86_sse42_pcmpistris128: 10864 case Intrinsic::x86_sse42_pcmpestris128: 10865 case Intrinsic::x86_sse42_pcmpistriz128: 10866 case Intrinsic::x86_sse42_pcmpestriz128: { 10867 unsigned Opcode; 10868 unsigned X86CC; 10869 switch (IntNo) { 10870 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10871 case Intrinsic::x86_sse42_pcmpistria128: 10872 Opcode = X86ISD::PCMPISTRI; 10873 X86CC = X86::COND_A; 10874 break; 10875 case Intrinsic::x86_sse42_pcmpestria128: 10876 Opcode = X86ISD::PCMPESTRI; 10877 X86CC = X86::COND_A; 10878 break; 10879 case Intrinsic::x86_sse42_pcmpistric128: 10880 Opcode = X86ISD::PCMPISTRI; 10881 X86CC = X86::COND_B; 10882 break; 10883 case Intrinsic::x86_sse42_pcmpestric128: 10884 Opcode = X86ISD::PCMPESTRI; 10885 X86CC = X86::COND_B; 10886 break; 10887 case Intrinsic::x86_sse42_pcmpistrio128: 10888 Opcode = X86ISD::PCMPISTRI; 10889 X86CC = X86::COND_O; 10890 break; 10891 case Intrinsic::x86_sse42_pcmpestrio128: 10892 Opcode = X86ISD::PCMPESTRI; 10893 X86CC = X86::COND_O; 10894 break; 10895 case Intrinsic::x86_sse42_pcmpistris128: 10896 Opcode = X86ISD::PCMPISTRI; 10897 X86CC = X86::COND_S; 10898 break; 10899 case Intrinsic::x86_sse42_pcmpestris128: 10900 Opcode = X86ISD::PCMPESTRI; 10901 X86CC = X86::COND_S; 10902 break; 10903 case Intrinsic::x86_sse42_pcmpistriz128: 10904 Opcode = X86ISD::PCMPISTRI; 10905 X86CC = X86::COND_E; 10906 break; 10907 case Intrinsic::x86_sse42_pcmpestriz128: 10908 Opcode = X86ISD::PCMPESTRI; 10909 X86CC = X86::COND_E; 10910 break; 10911 } 10912 SmallVector<SDValue, 5> NewOps; 10913 NewOps.append(Op->op_begin()+1, Op->op_end()); 10914 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10915 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10916 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10917 DAG.getConstant(X86CC, MVT::i8), 10918 SDValue(PCMP.getNode(), 1)); 10919 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10920 } 10921 10922 case Intrinsic::x86_sse42_pcmpistri128: 10923 case Intrinsic::x86_sse42_pcmpestri128: { 10924 unsigned Opcode; 10925 if (IntNo == Intrinsic::x86_sse42_pcmpistri128) 10926 Opcode = X86ISD::PCMPISTRI; 10927 else 10928 Opcode = X86ISD::PCMPESTRI; 10929 10930 SmallVector<SDValue, 5> NewOps; 10931 NewOps.append(Op->op_begin()+1, Op->op_end()); 10932 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10933 return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10934 } 10935 case Intrinsic::x86_fma_vfmadd_ps: 10936 case Intrinsic::x86_fma_vfmadd_pd: 10937 case Intrinsic::x86_fma_vfmsub_ps: 10938 case Intrinsic::x86_fma_vfmsub_pd: 10939 case Intrinsic::x86_fma_vfnmadd_ps: 10940 case Intrinsic::x86_fma_vfnmadd_pd: 10941 case Intrinsic::x86_fma_vfnmsub_ps: 10942 case Intrinsic::x86_fma_vfnmsub_pd: 10943 case Intrinsic::x86_fma_vfmaddsub_ps: 10944 case Intrinsic::x86_fma_vfmaddsub_pd: 10945 case Intrinsic::x86_fma_vfmsubadd_ps: 10946 case Intrinsic::x86_fma_vfmsubadd_pd: 10947 case Intrinsic::x86_fma_vfmadd_ps_256: 10948 case Intrinsic::x86_fma_vfmadd_pd_256: 10949 case Intrinsic::x86_fma_vfmsub_ps_256: 10950 case Intrinsic::x86_fma_vfmsub_pd_256: 10951 case Intrinsic::x86_fma_vfnmadd_ps_256: 10952 case Intrinsic::x86_fma_vfnmadd_pd_256: 10953 case Intrinsic::x86_fma_vfnmsub_ps_256: 10954 case Intrinsic::x86_fma_vfnmsub_pd_256: 10955 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10956 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10957 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10958 case Intrinsic::x86_fma_vfmsubadd_pd_256: { 10959 unsigned Opc; 10960 switch (IntNo) { 10961 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10962 case Intrinsic::x86_fma_vfmadd_ps: 10963 case Intrinsic::x86_fma_vfmadd_pd: 10964 case Intrinsic::x86_fma_vfmadd_ps_256: 10965 case Intrinsic::x86_fma_vfmadd_pd_256: 10966 Opc = X86ISD::FMADD; 10967 break; 10968 case Intrinsic::x86_fma_vfmsub_ps: 10969 case Intrinsic::x86_fma_vfmsub_pd: 10970 case Intrinsic::x86_fma_vfmsub_ps_256: 10971 case Intrinsic::x86_fma_vfmsub_pd_256: 10972 Opc = X86ISD::FMSUB; 10973 break; 10974 case Intrinsic::x86_fma_vfnmadd_ps: 10975 case Intrinsic::x86_fma_vfnmadd_pd: 10976 case Intrinsic::x86_fma_vfnmadd_ps_256: 10977 case Intrinsic::x86_fma_vfnmadd_pd_256: 10978 Opc = X86ISD::FNMADD; 10979 break; 10980 case Intrinsic::x86_fma_vfnmsub_ps: 10981 case Intrinsic::x86_fma_vfnmsub_pd: 10982 case Intrinsic::x86_fma_vfnmsub_ps_256: 10983 case Intrinsic::x86_fma_vfnmsub_pd_256: 10984 Opc = X86ISD::FNMSUB; 10985 break; 10986 case Intrinsic::x86_fma_vfmaddsub_ps: 10987 case Intrinsic::x86_fma_vfmaddsub_pd: 10988 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10989 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10990 Opc = X86ISD::FMADDSUB; 10991 break; 10992 case Intrinsic::x86_fma_vfmsubadd_ps: 10993 case Intrinsic::x86_fma_vfmsubadd_pd: 10994 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10995 case Intrinsic::x86_fma_vfmsubadd_pd_256: 10996 Opc = X86ISD::FMSUBADD; 10997 break; 10998 } 10999 11000 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1), 11001 Op.getOperand(2), Op.getOperand(3)); 11002 } 11003 } 11004} 11005 11006static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { 11007 SDLoc dl(Op); 11008 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 11009 switch (IntNo) { 11010 default: return SDValue(); // Don't custom lower most intrinsics. 11011 11012 // RDRAND/RDSEED intrinsics. 11013 case Intrinsic::x86_rdrand_16: 11014 case Intrinsic::x86_rdrand_32: 11015 case Intrinsic::x86_rdrand_64: 11016 case Intrinsic::x86_rdseed_16: 11017 case Intrinsic::x86_rdseed_32: 11018 case Intrinsic::x86_rdseed_64: { 11019 unsigned Opcode = (IntNo == Intrinsic::x86_rdseed_16 || 11020 IntNo == Intrinsic::x86_rdseed_32 || 11021 IntNo == Intrinsic::x86_rdseed_64) ? X86ISD::RDSEED : 11022 X86ISD::RDRAND; 11023 // Emit the node with the right value type. 11024 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); 11025 SDValue Result = DAG.getNode(Opcode, dl, VTs, Op.getOperand(0)); 11026 11027 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1. 11028 // Otherwise return the value from Rand, which is always 0, casted to i32. 11029 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), 11030 DAG.getConstant(1, Op->getValueType(1)), 11031 DAG.getConstant(X86::COND_B, MVT::i32), 11032 SDValue(Result.getNode(), 1) }; 11033 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, 11034 DAG.getVTList(Op->getValueType(1), MVT::Glue), 11035 Ops, array_lengthof(Ops)); 11036 11037 // Return { result, isValid, chain }. 11038 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, 11039 SDValue(Result.getNode(), 2)); 11040 } 11041 11042 // XTEST intrinsics. 11043 case Intrinsic::x86_xtest: { 11044 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other); 11045 SDValue InTrans = DAG.getNode(X86ISD::XTEST, dl, VTs, Op.getOperand(0)); 11046 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 11047 DAG.getConstant(X86::COND_NE, MVT::i8), 11048 InTrans); 11049 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC); 11050 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), 11051 Ret, SDValue(InTrans.getNode(), 1)); 11052 } 11053 } 11054} 11055 11056SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 11057 SelectionDAG &DAG) const { 11058 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 11059 MFI->setReturnAddressIsTaken(true); 11060 11061 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11062 SDLoc dl(Op); 11063 EVT PtrVT = getPointerTy(); 11064 11065 if (Depth > 0) { 11066 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11067 const X86RegisterInfo *RegInfo = 11068 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11069 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT); 11070 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11071 DAG.getNode(ISD::ADD, dl, PtrVT, 11072 FrameAddr, Offset), 11073 MachinePointerInfo(), false, false, false, 0); 11074 } 11075 11076 // Just load the return address. 11077 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 11078 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11079 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 11080} 11081 11082SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 11083 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 11084 MFI->setFrameAddressIsTaken(true); 11085 11086 EVT VT = Op.getValueType(); 11087 SDLoc dl(Op); // FIXME probably not meaningful 11088 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11089 const X86RegisterInfo *RegInfo = 11090 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11091 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); 11092 assert(((FrameReg == X86::RBP && VT == MVT::i64) || 11093 (FrameReg == X86::EBP && VT == MVT::i32)) && 11094 "Invalid Frame Register!"); 11095 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 11096 while (Depth--) 11097 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 11098 MachinePointerInfo(), 11099 false, false, false, 0); 11100 return FrameAddr; 11101} 11102 11103SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 11104 SelectionDAG &DAG) const { 11105 const X86RegisterInfo *RegInfo = 11106 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11107 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); 11108} 11109 11110SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 11111 SDValue Chain = Op.getOperand(0); 11112 SDValue Offset = Op.getOperand(1); 11113 SDValue Handler = Op.getOperand(2); 11114 SDLoc dl (Op); 11115 11116 EVT PtrVT = getPointerTy(); 11117 const X86RegisterInfo *RegInfo = 11118 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11119 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); 11120 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) || 11121 (FrameReg == X86::EBP && PtrVT == MVT::i32)) && 11122 "Invalid Frame Register!"); 11123 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT); 11124 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX; 11125 11126 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame, 11127 DAG.getIntPtrConstant(RegInfo->getSlotSize())); 11128 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset); 11129 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 11130 false, false, 0); 11131 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 11132 11133 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain, 11134 DAG.getRegister(StoreAddrReg, PtrVT)); 11135} 11136 11137SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 11138 SelectionDAG &DAG) const { 11139 SDLoc DL(Op); 11140 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, 11141 DAG.getVTList(MVT::i32, MVT::Other), 11142 Op.getOperand(0), Op.getOperand(1)); 11143} 11144 11145SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 11146 SelectionDAG &DAG) const { 11147 SDLoc DL(Op); 11148 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 11149 Op.getOperand(0), Op.getOperand(1)); 11150} 11151 11152static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { 11153 return Op.getOperand(0); 11154} 11155 11156SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 11157 SelectionDAG &DAG) const { 11158 SDValue Root = Op.getOperand(0); 11159 SDValue Trmp = Op.getOperand(1); // trampoline 11160 SDValue FPtr = Op.getOperand(2); // nested function 11161 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 11162 SDLoc dl (Op); 11163 11164 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 11165 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 11166 11167 if (Subtarget->is64Bit()) { 11168 SDValue OutChains[6]; 11169 11170 // Large code-model. 11171 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 11172 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 11173 11174 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7; 11175 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7; 11176 11177 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 11178 11179 // Load the pointer to the nested function into R11. 11180 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 11181 SDValue Addr = Trmp; 11182 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11183 Addr, MachinePointerInfo(TrmpAddr), 11184 false, false, 0); 11185 11186 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11187 DAG.getConstant(2, MVT::i64)); 11188 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 11189 MachinePointerInfo(TrmpAddr, 2), 11190 false, false, 2); 11191 11192 // Load the 'nest' parameter value into R10. 11193 // R10 is specified in X86CallingConv.td 11194 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 11195 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11196 DAG.getConstant(10, MVT::i64)); 11197 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11198 Addr, MachinePointerInfo(TrmpAddr, 10), 11199 false, false, 0); 11200 11201 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11202 DAG.getConstant(12, MVT::i64)); 11203 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 11204 MachinePointerInfo(TrmpAddr, 12), 11205 false, false, 2); 11206 11207 // Jump to the nested function. 11208 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 11209 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11210 DAG.getConstant(20, MVT::i64)); 11211 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11212 Addr, MachinePointerInfo(TrmpAddr, 20), 11213 false, false, 0); 11214 11215 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 11216 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11217 DAG.getConstant(22, MVT::i64)); 11218 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 11219 MachinePointerInfo(TrmpAddr, 22), 11220 false, false, 0); 11221 11222 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 11223 } else { 11224 const Function *Func = 11225 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 11226 CallingConv::ID CC = Func->getCallingConv(); 11227 unsigned NestReg; 11228 11229 switch (CC) { 11230 default: 11231 llvm_unreachable("Unsupported calling convention"); 11232 case CallingConv::C: 11233 case CallingConv::X86_StdCall: { 11234 // Pass 'nest' parameter in ECX. 11235 // Must be kept in sync with X86CallingConv.td 11236 NestReg = X86::ECX; 11237 11238 // Check that ECX wasn't needed by an 'inreg' parameter. 11239 FunctionType *FTy = Func->getFunctionType(); 11240 const AttributeSet &Attrs = Func->getAttributes(); 11241 11242 if (!Attrs.isEmpty() && !Func->isVarArg()) { 11243 unsigned InRegCount = 0; 11244 unsigned Idx = 1; 11245 11246 for (FunctionType::param_iterator I = FTy->param_begin(), 11247 E = FTy->param_end(); I != E; ++I, ++Idx) 11248 if (Attrs.hasAttribute(Idx, Attribute::InReg)) 11249 // FIXME: should only count parameters that are lowered to integers. 11250 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 11251 11252 if (InRegCount > 2) { 11253 report_fatal_error("Nest register in use - reduce number of inreg" 11254 " parameters!"); 11255 } 11256 } 11257 break; 11258 } 11259 case CallingConv::X86_FastCall: 11260 case CallingConv::X86_ThisCall: 11261 case CallingConv::Fast: 11262 // Pass 'nest' parameter in EAX. 11263 // Must be kept in sync with X86CallingConv.td 11264 NestReg = X86::EAX; 11265 break; 11266 } 11267 11268 SDValue OutChains[4]; 11269 SDValue Addr, Disp; 11270 11271 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11272 DAG.getConstant(10, MVT::i32)); 11273 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 11274 11275 // This is storing the opcode for MOV32ri. 11276 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 11277 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; 11278 OutChains[0] = DAG.getStore(Root, dl, 11279 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 11280 Trmp, MachinePointerInfo(TrmpAddr), 11281 false, false, 0); 11282 11283 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11284 DAG.getConstant(1, MVT::i32)); 11285 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 11286 MachinePointerInfo(TrmpAddr, 1), 11287 false, false, 1); 11288 11289 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 11290 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11291 DAG.getConstant(5, MVT::i32)); 11292 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 11293 MachinePointerInfo(TrmpAddr, 5), 11294 false, false, 1); 11295 11296 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11297 DAG.getConstant(6, MVT::i32)); 11298 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 11299 MachinePointerInfo(TrmpAddr, 6), 11300 false, false, 1); 11301 11302 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 11303 } 11304} 11305 11306SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 11307 SelectionDAG &DAG) const { 11308 /* 11309 The rounding mode is in bits 11:10 of FPSR, and has the following 11310 settings: 11311 00 Round to nearest 11312 01 Round to -inf 11313 10 Round to +inf 11314 11 Round to 0 11315 11316 FLT_ROUNDS, on the other hand, expects the following: 11317 -1 Undefined 11318 0 Round to 0 11319 1 Round to nearest 11320 2 Round to +inf 11321 3 Round to -inf 11322 11323 To perform the conversion, we do: 11324 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 11325 */ 11326 11327 MachineFunction &MF = DAG.getMachineFunction(); 11328 const TargetMachine &TM = MF.getTarget(); 11329 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 11330 unsigned StackAlignment = TFI.getStackAlignment(); 11331 EVT VT = Op.getValueType(); 11332 SDLoc DL(Op); 11333 11334 // Save FP Control Word to stack slot 11335 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 11336 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 11337 11338 MachineMemOperand *MMO = 11339 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 11340 MachineMemOperand::MOStore, 2, 2); 11341 11342 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 11343 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 11344 DAG.getVTList(MVT::Other), 11345 Ops, array_lengthof(Ops), MVT::i16, 11346 MMO); 11347 11348 // Load FP Control Word from stack slot 11349 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 11350 MachinePointerInfo(), false, false, false, 0); 11351 11352 // Transform as necessary 11353 SDValue CWD1 = 11354 DAG.getNode(ISD::SRL, DL, MVT::i16, 11355 DAG.getNode(ISD::AND, DL, MVT::i16, 11356 CWD, DAG.getConstant(0x800, MVT::i16)), 11357 DAG.getConstant(11, MVT::i8)); 11358 SDValue CWD2 = 11359 DAG.getNode(ISD::SRL, DL, MVT::i16, 11360 DAG.getNode(ISD::AND, DL, MVT::i16, 11361 CWD, DAG.getConstant(0x400, MVT::i16)), 11362 DAG.getConstant(9, MVT::i8)); 11363 11364 SDValue RetVal = 11365 DAG.getNode(ISD::AND, DL, MVT::i16, 11366 DAG.getNode(ISD::ADD, DL, MVT::i16, 11367 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 11368 DAG.getConstant(1, MVT::i16)), 11369 DAG.getConstant(3, MVT::i16)); 11370 11371 return DAG.getNode((VT.getSizeInBits() < 16 ? 11372 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 11373} 11374 11375static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { 11376 EVT VT = Op.getValueType(); 11377 EVT OpVT = VT; 11378 unsigned NumBits = VT.getSizeInBits(); 11379 SDLoc dl(Op); 11380 11381 Op = Op.getOperand(0); 11382 if (VT == MVT::i8) { 11383 // Zero extend to i32 since there is not an i8 bsr. 11384 OpVT = MVT::i32; 11385 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 11386 } 11387 11388 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 11389 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 11390 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 11391 11392 // If src is zero (i.e. bsr sets ZF), returns NumBits. 11393 SDValue Ops[] = { 11394 Op, 11395 DAG.getConstant(NumBits+NumBits-1, OpVT), 11396 DAG.getConstant(X86::COND_E, MVT::i8), 11397 Op.getValue(1) 11398 }; 11399 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 11400 11401 // Finally xor with NumBits-1. 11402 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 11403 11404 if (VT == MVT::i8) 11405 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 11406 return Op; 11407} 11408 11409static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { 11410 EVT VT = Op.getValueType(); 11411 EVT OpVT = VT; 11412 unsigned NumBits = VT.getSizeInBits(); 11413 SDLoc dl(Op); 11414 11415 Op = Op.getOperand(0); 11416 if (VT == MVT::i8) { 11417 // Zero extend to i32 since there is not an i8 bsr. 11418 OpVT = MVT::i32; 11419 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 11420 } 11421 11422 // Issue a bsr (scan bits in reverse). 11423 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 11424 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 11425 11426 // And xor with NumBits-1. 11427 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 11428 11429 if (VT == MVT::i8) 11430 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 11431 return Op; 11432} 11433 11434static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { 11435 EVT VT = Op.getValueType(); 11436 unsigned NumBits = VT.getSizeInBits(); 11437 SDLoc dl(Op); 11438 Op = Op.getOperand(0); 11439 11440 // Issue a bsf (scan bits forward) which also sets EFLAGS. 11441 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 11442 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 11443 11444 // If src is zero (i.e. bsf sets ZF), returns NumBits. 11445 SDValue Ops[] = { 11446 Op, 11447 DAG.getConstant(NumBits, VT), 11448 DAG.getConstant(X86::COND_E, MVT::i8), 11449 Op.getValue(1) 11450 }; 11451 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 11452} 11453 11454// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 11455// ones, and then concatenate the result back. 11456static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 11457 EVT VT = Op.getValueType(); 11458 11459 assert(VT.is256BitVector() && VT.isInteger() && 11460 "Unsupported value type for operation"); 11461 11462 unsigned NumElems = VT.getVectorNumElements(); 11463 SDLoc dl(Op); 11464 11465 // Extract the LHS vectors 11466 SDValue LHS = Op.getOperand(0); 11467 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 11468 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 11469 11470 // Extract the RHS vectors 11471 SDValue RHS = Op.getOperand(1); 11472 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 11473 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 11474 11475 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11476 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11477 11478 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 11479 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 11480 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 11481} 11482 11483static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) { 11484 assert(Op.getValueType().is256BitVector() && 11485 Op.getValueType().isInteger() && 11486 "Only handle AVX 256-bit vector integer operation"); 11487 return Lower256IntArith(Op, DAG); 11488} 11489 11490static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { 11491 assert(Op.getValueType().is256BitVector() && 11492 Op.getValueType().isInteger() && 11493 "Only handle AVX 256-bit vector integer operation"); 11494 return Lower256IntArith(Op, DAG); 11495} 11496 11497static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, 11498 SelectionDAG &DAG) { 11499 SDLoc dl(Op); 11500 EVT VT = Op.getValueType(); 11501 11502 // Decompose 256-bit ops into smaller 128-bit ops. 11503 if (VT.is256BitVector() && !Subtarget->hasInt256()) 11504 return Lower256IntArith(Op, DAG); 11505 11506 SDValue A = Op.getOperand(0); 11507 SDValue B = Op.getOperand(1); 11508 11509 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle. 11510 if (VT == MVT::v4i32) { 11511 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() && 11512 "Should not custom lower when pmuldq is available!"); 11513 11514 // Extract the odd parts. 11515 static const int UnpackMask[] = { 1, -1, 3, -1 }; 11516 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask); 11517 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask); 11518 11519 // Multiply the even parts. 11520 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B); 11521 // Now multiply odd parts. 11522 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds); 11523 11524 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens); 11525 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds); 11526 11527 // Merge the two vectors back together with a shuffle. This expands into 2 11528 // shuffles. 11529 static const int ShufMask[] = { 0, 4, 2, 6 }; 11530 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask); 11531 } 11532 11533 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && 11534 "Only know how to lower V2I64/V4I64 multiply"); 11535 11536 // Ahi = psrlqi(a, 32); 11537 // Bhi = psrlqi(b, 32); 11538 // 11539 // AloBlo = pmuludq(a, b); 11540 // AloBhi = pmuludq(a, Bhi); 11541 // AhiBlo = pmuludq(Ahi, b); 11542 11543 // AloBhi = psllqi(AloBhi, 32); 11544 // AhiBlo = psllqi(AhiBlo, 32); 11545 // return AloBlo + AloBhi + AhiBlo; 11546 11547 SDValue ShAmt = DAG.getConstant(32, MVT::i32); 11548 11549 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt); 11550 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt); 11551 11552 // Bit cast to 32-bit vectors for MULUDQ 11553 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32; 11554 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A); 11555 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B); 11556 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi); 11557 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi); 11558 11559 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); 11560 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); 11561 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); 11562 11563 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt); 11564 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt); 11565 11566 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 11567 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 11568} 11569 11570SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { 11571 EVT VT = Op.getValueType(); 11572 EVT EltTy = VT.getVectorElementType(); 11573 unsigned NumElts = VT.getVectorNumElements(); 11574 SDValue N0 = Op.getOperand(0); 11575 SDLoc dl(Op); 11576 11577 // Lower sdiv X, pow2-const. 11578 BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(Op.getOperand(1)); 11579 if (!C) 11580 return SDValue(); 11581 11582 APInt SplatValue, SplatUndef; 11583 unsigned SplatBitSize; 11584 bool HasAnyUndefs; 11585 if (!C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 11586 HasAnyUndefs) || 11587 EltTy.getSizeInBits() < SplatBitSize) 11588 return SDValue(); 11589 11590 if ((SplatValue != 0) && 11591 (SplatValue.isPowerOf2() || (-SplatValue).isPowerOf2())) { 11592 unsigned lg2 = SplatValue.countTrailingZeros(); 11593 // Splat the sign bit. 11594 SDValue Sz = DAG.getConstant(EltTy.getSizeInBits()-1, MVT::i32); 11595 SDValue SGN = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, N0, Sz, DAG); 11596 // Add (N0 < 0) ? abs2 - 1 : 0; 11597 SDValue Amt = DAG.getConstant(EltTy.getSizeInBits() - lg2, MVT::i32); 11598 SDValue SRL = getTargetVShiftNode(X86ISD::VSRLI, dl, VT, SGN, Amt, DAG); 11599 SDValue ADD = DAG.getNode(ISD::ADD, dl, VT, N0, SRL); 11600 SDValue Lg2Amt = DAG.getConstant(lg2, MVT::i32); 11601 SDValue SRA = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, ADD, Lg2Amt, DAG); 11602 11603 // If we're dividing by a positive value, we're done. Otherwise, we must 11604 // negate the result. 11605 if (SplatValue.isNonNegative()) 11606 return SRA; 11607 11608 SmallVector<SDValue, 16> V(NumElts, DAG.getConstant(0, EltTy)); 11609 SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], NumElts); 11610 return DAG.getNode(ISD::SUB, dl, VT, Zero, SRA); 11611 } 11612 return SDValue(); 11613} 11614 11615static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, 11616 const X86Subtarget *Subtarget) { 11617 EVT VT = Op.getValueType(); 11618 SDLoc dl(Op); 11619 SDValue R = Op.getOperand(0); 11620 SDValue Amt = Op.getOperand(1); 11621 11622 // Optimize shl/srl/sra with constant shift amount. 11623 if (isSplatVector(Amt.getNode())) { 11624 SDValue SclrAmt = Amt->getOperand(0); 11625 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 11626 uint64_t ShiftAmt = C->getZExtValue(); 11627 11628 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 11629 (Subtarget->hasInt256() && 11630 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) { 11631 if (Op.getOpcode() == ISD::SHL) 11632 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 11633 DAG.getConstant(ShiftAmt, MVT::i32)); 11634 if (Op.getOpcode() == ISD::SRL) 11635 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 11636 DAG.getConstant(ShiftAmt, MVT::i32)); 11637 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 11638 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 11639 DAG.getConstant(ShiftAmt, MVT::i32)); 11640 } 11641 11642 if (VT == MVT::v16i8) { 11643 if (Op.getOpcode() == ISD::SHL) { 11644 // Make a large shift. 11645 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 11646 DAG.getConstant(ShiftAmt, MVT::i32)); 11647 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11648 // Zero out the rightmost bits. 11649 SmallVector<SDValue, 16> V(16, 11650 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11651 MVT::i8)); 11652 return DAG.getNode(ISD::AND, dl, VT, SHL, 11653 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11654 } 11655 if (Op.getOpcode() == ISD::SRL) { 11656 // Make a large shift. 11657 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 11658 DAG.getConstant(ShiftAmt, MVT::i32)); 11659 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11660 // Zero out the leftmost bits. 11661 SmallVector<SDValue, 16> V(16, 11662 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11663 MVT::i8)); 11664 return DAG.getNode(ISD::AND, dl, VT, SRL, 11665 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11666 } 11667 if (Op.getOpcode() == ISD::SRA) { 11668 if (ShiftAmt == 7) { 11669 // R s>> 7 === R s< 0 11670 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11671 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11672 } 11673 11674 // R s>> a === ((R u>> a) ^ m) - m 11675 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11676 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 11677 MVT::i8)); 11678 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 11679 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11680 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11681 return Res; 11682 } 11683 llvm_unreachable("Unknown shift opcode."); 11684 } 11685 11686 if (Subtarget->hasInt256() && VT == MVT::v32i8) { 11687 if (Op.getOpcode() == ISD::SHL) { 11688 // Make a large shift. 11689 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 11690 DAG.getConstant(ShiftAmt, MVT::i32)); 11691 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11692 // Zero out the rightmost bits. 11693 SmallVector<SDValue, 32> V(32, 11694 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11695 MVT::i8)); 11696 return DAG.getNode(ISD::AND, dl, VT, SHL, 11697 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11698 } 11699 if (Op.getOpcode() == ISD::SRL) { 11700 // Make a large shift. 11701 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 11702 DAG.getConstant(ShiftAmt, MVT::i32)); 11703 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11704 // Zero out the leftmost bits. 11705 SmallVector<SDValue, 32> V(32, 11706 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11707 MVT::i8)); 11708 return DAG.getNode(ISD::AND, dl, VT, SRL, 11709 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11710 } 11711 if (Op.getOpcode() == ISD::SRA) { 11712 if (ShiftAmt == 7) { 11713 // R s>> 7 === R s< 0 11714 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11715 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11716 } 11717 11718 // R s>> a === ((R u>> a) ^ m) - m 11719 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11720 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 11721 MVT::i8)); 11722 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 11723 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11724 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11725 return Res; 11726 } 11727 llvm_unreachable("Unknown shift opcode."); 11728 } 11729 } 11730 } 11731 11732 // Special case in 32-bit mode, where i64 is expanded into high and low parts. 11733 if (!Subtarget->is64Bit() && 11734 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) && 11735 Amt.getOpcode() == ISD::BITCAST && 11736 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { 11737 Amt = Amt.getOperand(0); 11738 unsigned Ratio = Amt.getValueType().getVectorNumElements() / 11739 VT.getVectorNumElements(); 11740 unsigned RatioInLog2 = Log2_32_Ceil(Ratio); 11741 uint64_t ShiftAmt = 0; 11742 for (unsigned i = 0; i != Ratio; ++i) { 11743 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i)); 11744 if (C == 0) 11745 return SDValue(); 11746 // 6 == Log2(64) 11747 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2))); 11748 } 11749 // Check remaining shift amounts. 11750 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { 11751 uint64_t ShAmt = 0; 11752 for (unsigned j = 0; j != Ratio; ++j) { 11753 ConstantSDNode *C = 11754 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j)); 11755 if (C == 0) 11756 return SDValue(); 11757 // 6 == Log2(64) 11758 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2))); 11759 } 11760 if (ShAmt != ShiftAmt) 11761 return SDValue(); 11762 } 11763 switch (Op.getOpcode()) { 11764 default: 11765 llvm_unreachable("Unknown shift opcode!"); 11766 case ISD::SHL: 11767 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 11768 DAG.getConstant(ShiftAmt, MVT::i32)); 11769 case ISD::SRL: 11770 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 11771 DAG.getConstant(ShiftAmt, MVT::i32)); 11772 case ISD::SRA: 11773 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 11774 DAG.getConstant(ShiftAmt, MVT::i32)); 11775 } 11776 } 11777 11778 return SDValue(); 11779} 11780 11781static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, 11782 const X86Subtarget* Subtarget) { 11783 EVT VT = Op.getValueType(); 11784 SDLoc dl(Op); 11785 SDValue R = Op.getOperand(0); 11786 SDValue Amt = Op.getOperand(1); 11787 11788 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) || 11789 VT == MVT::v4i32 || VT == MVT::v8i16 || 11790 (Subtarget->hasInt256() && 11791 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) || 11792 VT == MVT::v8i32 || VT == MVT::v16i16))) { 11793 SDValue BaseShAmt; 11794 EVT EltVT = VT.getVectorElementType(); 11795 11796 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 11797 unsigned NumElts = VT.getVectorNumElements(); 11798 unsigned i, j; 11799 for (i = 0; i != NumElts; ++i) { 11800 if (Amt.getOperand(i).getOpcode() == ISD::UNDEF) 11801 continue; 11802 break; 11803 } 11804 for (j = i; j != NumElts; ++j) { 11805 SDValue Arg = Amt.getOperand(j); 11806 if (Arg.getOpcode() == ISD::UNDEF) continue; 11807 if (Arg != Amt.getOperand(i)) 11808 break; 11809 } 11810 if (i != NumElts && j == NumElts) 11811 BaseShAmt = Amt.getOperand(i); 11812 } else { 11813 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) 11814 Amt = Amt.getOperand(0); 11815 if (Amt.getOpcode() == ISD::VECTOR_SHUFFLE && 11816 cast<ShuffleVectorSDNode>(Amt)->isSplat()) { 11817 SDValue InVec = Amt.getOperand(0); 11818 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 11819 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 11820 unsigned i = 0; 11821 for (; i != NumElts; ++i) { 11822 SDValue Arg = InVec.getOperand(i); 11823 if (Arg.getOpcode() == ISD::UNDEF) continue; 11824 BaseShAmt = Arg; 11825 break; 11826 } 11827 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 11828 if (ConstantSDNode *C = 11829 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 11830 unsigned SplatIdx = 11831 cast<ShuffleVectorSDNode>(Amt)->getSplatIndex(); 11832 if (C->getZExtValue() == SplatIdx) 11833 BaseShAmt = InVec.getOperand(1); 11834 } 11835 } 11836 if (BaseShAmt.getNode() == 0) 11837 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Amt, 11838 DAG.getIntPtrConstant(0)); 11839 } 11840 } 11841 11842 if (BaseShAmt.getNode()) { 11843 if (EltVT.bitsGT(MVT::i32)) 11844 BaseShAmt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BaseShAmt); 11845 else if (EltVT.bitsLT(MVT::i32)) 11846 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt); 11847 11848 switch (Op.getOpcode()) { 11849 default: 11850 llvm_unreachable("Unknown shift opcode!"); 11851 case ISD::SHL: 11852 switch (VT.getSimpleVT().SimpleTy) { 11853 default: return SDValue(); 11854 case MVT::v2i64: 11855 case MVT::v4i32: 11856 case MVT::v8i16: 11857 case MVT::v4i64: 11858 case MVT::v8i32: 11859 case MVT::v16i16: 11860 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG); 11861 } 11862 case ISD::SRA: 11863 switch (VT.getSimpleVT().SimpleTy) { 11864 default: return SDValue(); 11865 case MVT::v4i32: 11866 case MVT::v8i16: 11867 case MVT::v8i32: 11868 case MVT::v16i16: 11869 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG); 11870 } 11871 case ISD::SRL: 11872 switch (VT.getSimpleVT().SimpleTy) { 11873 default: return SDValue(); 11874 case MVT::v2i64: 11875 case MVT::v4i32: 11876 case MVT::v8i16: 11877 case MVT::v4i64: 11878 case MVT::v8i32: 11879 case MVT::v16i16: 11880 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG); 11881 } 11882 } 11883 } 11884 } 11885 11886 // Special case in 32-bit mode, where i64 is expanded into high and low parts. 11887 if (!Subtarget->is64Bit() && 11888 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) && 11889 Amt.getOpcode() == ISD::BITCAST && 11890 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { 11891 Amt = Amt.getOperand(0); 11892 unsigned Ratio = Amt.getValueType().getVectorNumElements() / 11893 VT.getVectorNumElements(); 11894 std::vector<SDValue> Vals(Ratio); 11895 for (unsigned i = 0; i != Ratio; ++i) 11896 Vals[i] = Amt.getOperand(i); 11897 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { 11898 for (unsigned j = 0; j != Ratio; ++j) 11899 if (Vals[j] != Amt.getOperand(i + j)) 11900 return SDValue(); 11901 } 11902 switch (Op.getOpcode()) { 11903 default: 11904 llvm_unreachable("Unknown shift opcode!"); 11905 case ISD::SHL: 11906 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1)); 11907 case ISD::SRL: 11908 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1)); 11909 case ISD::SRA: 11910 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1)); 11911 } 11912 } 11913 11914 return SDValue(); 11915} 11916 11917SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 11918 11919 EVT VT = Op.getValueType(); 11920 SDLoc dl(Op); 11921 SDValue R = Op.getOperand(0); 11922 SDValue Amt = Op.getOperand(1); 11923 SDValue V; 11924 11925 if (!Subtarget->hasSSE2()) 11926 return SDValue(); 11927 11928 V = LowerScalarImmediateShift(Op, DAG, Subtarget); 11929 if (V.getNode()) 11930 return V; 11931 11932 V = LowerScalarVariableShift(Op, DAG, Subtarget); 11933 if (V.getNode()) 11934 return V; 11935 11936 // AVX2 has VPSLLV/VPSRAV/VPSRLV. 11937 if (Subtarget->hasInt256()) { 11938 if (Op.getOpcode() == ISD::SRL && 11939 (VT == MVT::v2i64 || VT == MVT::v4i32 || 11940 VT == MVT::v4i64 || VT == MVT::v8i32)) 11941 return Op; 11942 if (Op.getOpcode() == ISD::SHL && 11943 (VT == MVT::v2i64 || VT == MVT::v4i32 || 11944 VT == MVT::v4i64 || VT == MVT::v8i32)) 11945 return Op; 11946 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32)) 11947 return Op; 11948 } 11949 11950 // Lower SHL with variable shift amount. 11951 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 11952 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT)); 11953 11954 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT)); 11955 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 11956 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 11957 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 11958 } 11959 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 11960 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 11961 11962 // a = a << 5; 11963 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT)); 11964 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 11965 11966 // Turn 'a' into a mask suitable for VSELECT 11967 SDValue VSelM = DAG.getConstant(0x80, VT); 11968 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11969 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11970 11971 SDValue CM1 = DAG.getConstant(0x0f, VT); 11972 SDValue CM2 = DAG.getConstant(0x3f, VT); 11973 11974 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 11975 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 11976 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11977 DAG.getConstant(4, MVT::i32), DAG); 11978 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11979 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11980 11981 // a += a 11982 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11983 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11984 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11985 11986 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 11987 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 11988 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11989 DAG.getConstant(2, MVT::i32), DAG); 11990 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11991 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11992 11993 // a += a 11994 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11995 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11996 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11997 11998 // return VSELECT(r, r+r, a); 11999 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 12000 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 12001 return R; 12002 } 12003 12004 // Decompose 256-bit shifts into smaller 128-bit shifts. 12005 if (VT.is256BitVector()) { 12006 unsigned NumElems = VT.getVectorNumElements(); 12007 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 12008 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 12009 12010 // Extract the two vectors 12011 SDValue V1 = Extract128BitVector(R, 0, DAG, dl); 12012 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); 12013 12014 // Recreate the shift amount vectors 12015 SDValue Amt1, Amt2; 12016 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 12017 // Constant shift amount 12018 SmallVector<SDValue, 4> Amt1Csts; 12019 SmallVector<SDValue, 4> Amt2Csts; 12020 for (unsigned i = 0; i != NumElems/2; ++i) 12021 Amt1Csts.push_back(Amt->getOperand(i)); 12022 for (unsigned i = NumElems/2; i != NumElems; ++i) 12023 Amt2Csts.push_back(Amt->getOperand(i)); 12024 12025 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 12026 &Amt1Csts[0], NumElems/2); 12027 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 12028 &Amt2Csts[0], NumElems/2); 12029 } else { 12030 // Variable shift amount 12031 Amt1 = Extract128BitVector(Amt, 0, DAG, dl); 12032 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); 12033 } 12034 12035 // Issue new vector shifts for the smaller types 12036 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 12037 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 12038 12039 // Concatenate the result back 12040 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 12041 } 12042 12043 return SDValue(); 12044} 12045 12046static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { 12047 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 12048 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 12049 // looks for this combo and may remove the "setcc" instruction if the "setcc" 12050 // has only one use. 12051 SDNode *N = Op.getNode(); 12052 SDValue LHS = N->getOperand(0); 12053 SDValue RHS = N->getOperand(1); 12054 unsigned BaseOp = 0; 12055 unsigned Cond = 0; 12056 SDLoc DL(Op); 12057 switch (Op.getOpcode()) { 12058 default: llvm_unreachable("Unknown ovf instruction!"); 12059 case ISD::SADDO: 12060 // A subtract of one will be selected as a INC. Note that INC doesn't 12061 // set CF, so we can't do this for UADDO. 12062 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 12063 if (C->isOne()) { 12064 BaseOp = X86ISD::INC; 12065 Cond = X86::COND_O; 12066 break; 12067 } 12068 BaseOp = X86ISD::ADD; 12069 Cond = X86::COND_O; 12070 break; 12071 case ISD::UADDO: 12072 BaseOp = X86ISD::ADD; 12073 Cond = X86::COND_B; 12074 break; 12075 case ISD::SSUBO: 12076 // A subtract of one will be selected as a DEC. Note that DEC doesn't 12077 // set CF, so we can't do this for USUBO. 12078 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 12079 if (C->isOne()) { 12080 BaseOp = X86ISD::DEC; 12081 Cond = X86::COND_O; 12082 break; 12083 } 12084 BaseOp = X86ISD::SUB; 12085 Cond = X86::COND_O; 12086 break; 12087 case ISD::USUBO: 12088 BaseOp = X86ISD::SUB; 12089 Cond = X86::COND_B; 12090 break; 12091 case ISD::SMULO: 12092 BaseOp = X86ISD::SMUL; 12093 Cond = X86::COND_O; 12094 break; 12095 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 12096 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 12097 MVT::i32); 12098 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 12099 12100 SDValue SetCC = 12101 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 12102 DAG.getConstant(X86::COND_O, MVT::i32), 12103 SDValue(Sum.getNode(), 2)); 12104 12105 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 12106 } 12107 } 12108 12109 // Also sets EFLAGS. 12110 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 12111 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 12112 12113 SDValue SetCC = 12114 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 12115 DAG.getConstant(Cond, MVT::i32), 12116 SDValue(Sum.getNode(), 1)); 12117 12118 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 12119} 12120 12121SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 12122 SelectionDAG &DAG) const { 12123 SDLoc dl(Op); 12124 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 12125 EVT VT = Op.getValueType(); 12126 12127 if (!Subtarget->hasSSE2() || !VT.isVector()) 12128 return SDValue(); 12129 12130 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 12131 ExtraVT.getScalarType().getSizeInBits(); 12132 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 12133 12134 switch (VT.getSimpleVT().SimpleTy) { 12135 default: return SDValue(); 12136 case MVT::v8i32: 12137 case MVT::v16i16: 12138 if (!Subtarget->hasFp256()) 12139 return SDValue(); 12140 if (!Subtarget->hasInt256()) { 12141 // needs to be split 12142 unsigned NumElems = VT.getVectorNumElements(); 12143 12144 // Extract the LHS vectors 12145 SDValue LHS = Op.getOperand(0); 12146 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 12147 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 12148 12149 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 12150 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 12151 12152 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 12153 unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); 12154 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 12155 ExtraNumElems/2); 12156 SDValue Extra = DAG.getValueType(ExtraVT); 12157 12158 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 12159 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 12160 12161 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2); 12162 } 12163 // fall through 12164 case MVT::v4i32: 12165 case MVT::v8i16: { 12166 // (sext (vzext x)) -> (vsext x) 12167 SDValue Op0 = Op.getOperand(0); 12168 SDValue Op00 = Op0.getOperand(0); 12169 SDValue Tmp1; 12170 // Hopefully, this VECTOR_SHUFFLE is just a VZEXT. 12171 if (Op0.getOpcode() == ISD::BITCAST && 12172 Op00.getOpcode() == ISD::VECTOR_SHUFFLE) 12173 Tmp1 = LowerVectorIntExtend(Op00, DAG); 12174 if (Tmp1.getNode()) { 12175 SDValue Tmp1Op0 = Tmp1.getOperand(0); 12176 assert(Tmp1Op0.getOpcode() == X86ISD::VZEXT && 12177 "This optimization is invalid without a VZEXT."); 12178 return DAG.getNode(X86ISD::VSEXT, dl, VT, Tmp1Op0.getOperand(0)); 12179 } 12180 12181 // If the above didn't work, then just use Shift-Left + Shift-Right. 12182 Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, Op0, ShAmt, DAG); 12183 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 12184 } 12185 } 12186} 12187 12188static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, 12189 SelectionDAG &DAG) { 12190 SDLoc dl(Op); 12191 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 12192 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 12193 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 12194 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 12195 12196 // The only fence that needs an instruction is a sequentially-consistent 12197 // cross-thread fence. 12198 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 12199 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 12200 // no-sse2). There isn't any reason to disable it if the target processor 12201 // supports it. 12202 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 12203 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 12204 12205 SDValue Chain = Op.getOperand(0); 12206 SDValue Zero = DAG.getConstant(0, MVT::i32); 12207 SDValue Ops[] = { 12208 DAG.getRegister(X86::ESP, MVT::i32), // Base 12209 DAG.getTargetConstant(1, MVT::i8), // Scale 12210 DAG.getRegister(0, MVT::i32), // Index 12211 DAG.getTargetConstant(0, MVT::i32), // Disp 12212 DAG.getRegister(0, MVT::i32), // Segment. 12213 Zero, 12214 Chain 12215 }; 12216 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops); 12217 return SDValue(Res, 0); 12218 } 12219 12220 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 12221 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 12222} 12223 12224static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, 12225 SelectionDAG &DAG) { 12226 EVT T = Op.getValueType(); 12227 SDLoc DL(Op); 12228 unsigned Reg = 0; 12229 unsigned size = 0; 12230 switch(T.getSimpleVT().SimpleTy) { 12231 default: llvm_unreachable("Invalid value type!"); 12232 case MVT::i8: Reg = X86::AL; size = 1; break; 12233 case MVT::i16: Reg = X86::AX; size = 2; break; 12234 case MVT::i32: Reg = X86::EAX; size = 4; break; 12235 case MVT::i64: 12236 assert(Subtarget->is64Bit() && "Node not type legal!"); 12237 Reg = X86::RAX; size = 8; 12238 break; 12239 } 12240 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 12241 Op.getOperand(2), SDValue()); 12242 SDValue Ops[] = { cpIn.getValue(0), 12243 Op.getOperand(1), 12244 Op.getOperand(3), 12245 DAG.getTargetConstant(size, MVT::i8), 12246 cpIn.getValue(1) }; 12247 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12248 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 12249 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 12250 Ops, array_lengthof(Ops), T, MMO); 12251 SDValue cpOut = 12252 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 12253 return cpOut; 12254} 12255 12256static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, 12257 SelectionDAG &DAG) { 12258 assert(Subtarget->is64Bit() && "Result not type legalized?"); 12259 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12260 SDValue TheChain = Op.getOperand(0); 12261 SDLoc dl(Op); 12262 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 12263 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 12264 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 12265 rax.getValue(2)); 12266 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 12267 DAG.getConstant(32, MVT::i8)); 12268 SDValue Ops[] = { 12269 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 12270 rdx.getValue(1) 12271 }; 12272 return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); 12273} 12274 12275SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 12276 EVT SrcVT = Op.getOperand(0).getValueType(); 12277 EVT DstVT = Op.getValueType(); 12278 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 12279 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 12280 assert((DstVT == MVT::i64 || 12281 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 12282 "Unexpected custom BITCAST"); 12283 // i64 <=> MMX conversions are Legal. 12284 if (SrcVT==MVT::i64 && DstVT.isVector()) 12285 return Op; 12286 if (DstVT==MVT::i64 && SrcVT.isVector()) 12287 return Op; 12288 // MMX <=> MMX conversions are Legal. 12289 if (SrcVT.isVector() && DstVT.isVector()) 12290 return Op; 12291 // All other conversions need to be expanded. 12292 return SDValue(); 12293} 12294 12295static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { 12296 SDNode *Node = Op.getNode(); 12297 SDLoc dl(Node); 12298 EVT T = Node->getValueType(0); 12299 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 12300 DAG.getConstant(0, T), Node->getOperand(2)); 12301 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 12302 cast<AtomicSDNode>(Node)->getMemoryVT(), 12303 Node->getOperand(0), 12304 Node->getOperand(1), negOp, 12305 cast<AtomicSDNode>(Node)->getSrcValue(), 12306 cast<AtomicSDNode>(Node)->getAlignment(), 12307 cast<AtomicSDNode>(Node)->getOrdering(), 12308 cast<AtomicSDNode>(Node)->getSynchScope()); 12309} 12310 12311static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 12312 SDNode *Node = Op.getNode(); 12313 SDLoc dl(Node); 12314 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 12315 12316 // Convert seq_cst store -> xchg 12317 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 12318 // FIXME: On 32-bit, store -> fist or movq would be more efficient 12319 // (The only way to get a 16-byte store is cmpxchg16b) 12320 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 12321 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 12322 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 12323 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 12324 cast<AtomicSDNode>(Node)->getMemoryVT(), 12325 Node->getOperand(0), 12326 Node->getOperand(1), Node->getOperand(2), 12327 cast<AtomicSDNode>(Node)->getMemOperand(), 12328 cast<AtomicSDNode>(Node)->getOrdering(), 12329 cast<AtomicSDNode>(Node)->getSynchScope()); 12330 return Swap.getValue(1); 12331 } 12332 // Other atomic stores have a simple pattern. 12333 return Op; 12334} 12335 12336static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 12337 EVT VT = Op.getNode()->getValueType(0); 12338 12339 // Let legalize expand this if it isn't a legal type yet. 12340 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 12341 return SDValue(); 12342 12343 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 12344 12345 unsigned Opc; 12346 bool ExtraOp = false; 12347 switch (Op.getOpcode()) { 12348 default: llvm_unreachable("Invalid code"); 12349 case ISD::ADDC: Opc = X86ISD::ADD; break; 12350 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 12351 case ISD::SUBC: Opc = X86ISD::SUB; break; 12352 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 12353 } 12354 12355 if (!ExtraOp) 12356 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 12357 Op.getOperand(1)); 12358 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 12359 Op.getOperand(1), Op.getOperand(2)); 12360} 12361 12362SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 12363 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit()); 12364 12365 // For MacOSX, we want to call an alternative entry point: __sincos_stret, 12366 // which returns the values as { float, float } (in XMM0) or 12367 // { double, double } (which is returned in XMM0, XMM1). 12368 SDLoc dl(Op); 12369 SDValue Arg = Op.getOperand(0); 12370 EVT ArgVT = Arg.getValueType(); 12371 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 12372 12373 ArgListTy Args; 12374 ArgListEntry Entry; 12375 12376 Entry.Node = Arg; 12377 Entry.Ty = ArgTy; 12378 Entry.isSExt = false; 12379 Entry.isZExt = false; 12380 Args.push_back(Entry); 12381 12382 bool isF64 = ArgVT == MVT::f64; 12383 // Only optimize x86_64 for now. i386 is a bit messy. For f32, 12384 // the small struct {f32, f32} is returned in (eax, edx). For f64, 12385 // the results are returned via SRet in memory. 12386 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret"; 12387 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy()); 12388 12389 Type *RetTy = isF64 12390 ? (Type*)StructType::get(ArgTy, ArgTy, NULL) 12391 : (Type*)VectorType::get(ArgTy, 4); 12392 TargetLowering:: 12393 CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, 12394 false, false, false, false, 0, 12395 CallingConv::C, /*isTaillCall=*/false, 12396 /*doesNotRet=*/false, /*isReturnValueUsed*/true, 12397 Callee, Args, DAG, dl); 12398 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 12399 12400 if (isF64) 12401 // Returned in xmm0 and xmm1. 12402 return CallResult.first; 12403 12404 // Returned in bits 0:31 and 32:64 xmm0. 12405 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, 12406 CallResult.first, DAG.getIntPtrConstant(0)); 12407 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, 12408 CallResult.first, DAG.getIntPtrConstant(1)); 12409 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 12410 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal); 12411} 12412 12413/// LowerOperation - Provide custom lowering hooks for some operations. 12414/// 12415SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 12416 switch (Op.getOpcode()) { 12417 default: llvm_unreachable("Should not custom lower this!"); 12418 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 12419 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); 12420 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); 12421 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 12422 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 12423 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 12424 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 12425 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 12426 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 12427 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 12428 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); 12429 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); 12430 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 12431 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 12432 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 12433 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 12434 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 12435 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 12436 case ISD::SHL_PARTS: 12437 case ISD::SRA_PARTS: 12438 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 12439 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 12440 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 12441 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 12442 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG); 12443 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG); 12444 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, DAG); 12445 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 12446 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 12447 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 12448 case ISD::FABS: return LowerFABS(Op, DAG); 12449 case ISD::FNEG: return LowerFNEG(Op, DAG); 12450 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 12451 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 12452 case ISD::SETCC: return LowerSETCC(Op, DAG); 12453 case ISD::SELECT: return LowerSELECT(Op, DAG); 12454 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 12455 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 12456 case ISD::VASTART: return LowerVASTART(Op, DAG); 12457 case ISD::VAARG: return LowerVAARG(Op, DAG); 12458 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); 12459 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 12460 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 12461 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 12462 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 12463 case ISD::FRAME_TO_ARGS_OFFSET: 12464 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 12465 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 12466 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 12467 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 12468 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 12469 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 12470 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 12471 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 12472 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 12473 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 12474 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 12475 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); 12476 case ISD::SRA: 12477 case ISD::SRL: 12478 case ISD::SHL: return LowerShift(Op, DAG); 12479 case ISD::SADDO: 12480 case ISD::UADDO: 12481 case ISD::SSUBO: 12482 case ISD::USUBO: 12483 case ISD::SMULO: 12484 case ISD::UMULO: return LowerXALUO(Op, DAG); 12485 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); 12486 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 12487 case ISD::ADDC: 12488 case ISD::ADDE: 12489 case ISD::SUBC: 12490 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 12491 case ISD::ADD: return LowerADD(Op, DAG); 12492 case ISD::SUB: return LowerSUB(Op, DAG); 12493 case ISD::SDIV: return LowerSDIV(Op, DAG); 12494 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 12495 } 12496} 12497 12498static void ReplaceATOMIC_LOAD(SDNode *Node, 12499 SmallVectorImpl<SDValue> &Results, 12500 SelectionDAG &DAG) { 12501 SDLoc dl(Node); 12502 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 12503 12504 // Convert wide load -> cmpxchg8b/cmpxchg16b 12505 // FIXME: On 32-bit, load -> fild or movq would be more efficient 12506 // (The only way to get a 16-byte load is cmpxchg16b) 12507 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 12508 SDValue Zero = DAG.getConstant(0, VT); 12509 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 12510 Node->getOperand(0), 12511 Node->getOperand(1), Zero, Zero, 12512 cast<AtomicSDNode>(Node)->getMemOperand(), 12513 cast<AtomicSDNode>(Node)->getOrdering(), 12514 cast<AtomicSDNode>(Node)->getSynchScope()); 12515 Results.push_back(Swap.getValue(0)); 12516 Results.push_back(Swap.getValue(1)); 12517} 12518 12519static void 12520ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 12521 SelectionDAG &DAG, unsigned NewOp) { 12522 SDLoc dl(Node); 12523 assert (Node->getValueType(0) == MVT::i64 && 12524 "Only know how to expand i64 atomics"); 12525 12526 SDValue Chain = Node->getOperand(0); 12527 SDValue In1 = Node->getOperand(1); 12528 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 12529 Node->getOperand(2), DAG.getIntPtrConstant(0)); 12530 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 12531 Node->getOperand(2), DAG.getIntPtrConstant(1)); 12532 SDValue Ops[] = { Chain, In1, In2L, In2H }; 12533 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 12534 SDValue Result = 12535 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, array_lengthof(Ops), MVT::i64, 12536 cast<MemSDNode>(Node)->getMemOperand()); 12537 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 12538 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 12539 Results.push_back(Result.getValue(2)); 12540} 12541 12542/// ReplaceNodeResults - Replace a node with an illegal result type 12543/// with a new node built out of custom code. 12544void X86TargetLowering::ReplaceNodeResults(SDNode *N, 12545 SmallVectorImpl<SDValue>&Results, 12546 SelectionDAG &DAG) const { 12547 SDLoc dl(N); 12548 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12549 switch (N->getOpcode()) { 12550 default: 12551 llvm_unreachable("Do not know how to custom type legalize this operation!"); 12552 case ISD::SIGN_EXTEND_INREG: 12553 case ISD::ADDC: 12554 case ISD::ADDE: 12555 case ISD::SUBC: 12556 case ISD::SUBE: 12557 // We don't want to expand or promote these. 12558 return; 12559 case ISD::FP_TO_SINT: 12560 case ISD::FP_TO_UINT: { 12561 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 12562 12563 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType())) 12564 return; 12565 12566 std::pair<SDValue,SDValue> Vals = 12567 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); 12568 SDValue FIST = Vals.first, StackSlot = Vals.second; 12569 if (FIST.getNode() != 0) { 12570 EVT VT = N->getValueType(0); 12571 // Return a load from the stack slot. 12572 if (StackSlot.getNode() != 0) 12573 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 12574 MachinePointerInfo(), 12575 false, false, false, 0)); 12576 else 12577 Results.push_back(FIST); 12578 } 12579 return; 12580 } 12581 case ISD::UINT_TO_FP: { 12582 assert(Subtarget->hasSSE2() && "Requires at least SSE2!"); 12583 if (N->getOperand(0).getValueType() != MVT::v2i32 || 12584 N->getValueType(0) != MVT::v2f32) 12585 return; 12586 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, 12587 N->getOperand(0)); 12588 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 12589 MVT::f64); 12590 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias); 12591 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, 12592 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias)); 12593 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or); 12594 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias); 12595 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub)); 12596 return; 12597 } 12598 case ISD::FP_ROUND: { 12599 if (!TLI.isTypeLegal(N->getOperand(0).getValueType())) 12600 return; 12601 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0)); 12602 Results.push_back(V); 12603 return; 12604 } 12605 case ISD::READCYCLECOUNTER: { 12606 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12607 SDValue TheChain = N->getOperand(0); 12608 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 12609 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 12610 rd.getValue(1)); 12611 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 12612 eax.getValue(2)); 12613 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 12614 SDValue Ops[] = { eax, edx }; 12615 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 12616 array_lengthof(Ops))); 12617 Results.push_back(edx.getValue(1)); 12618 return; 12619 } 12620 case ISD::ATOMIC_CMP_SWAP: { 12621 EVT T = N->getValueType(0); 12622 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 12623 bool Regs64bit = T == MVT::i128; 12624 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 12625 SDValue cpInL, cpInH; 12626 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 12627 DAG.getConstant(0, HalfT)); 12628 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 12629 DAG.getConstant(1, HalfT)); 12630 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 12631 Regs64bit ? X86::RAX : X86::EAX, 12632 cpInL, SDValue()); 12633 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 12634 Regs64bit ? X86::RDX : X86::EDX, 12635 cpInH, cpInL.getValue(1)); 12636 SDValue swapInL, swapInH; 12637 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 12638 DAG.getConstant(0, HalfT)); 12639 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 12640 DAG.getConstant(1, HalfT)); 12641 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 12642 Regs64bit ? X86::RBX : X86::EBX, 12643 swapInL, cpInH.getValue(1)); 12644 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 12645 Regs64bit ? X86::RCX : X86::ECX, 12646 swapInH, swapInL.getValue(1)); 12647 SDValue Ops[] = { swapInH.getValue(0), 12648 N->getOperand(1), 12649 swapInH.getValue(1) }; 12650 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12651 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 12652 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 12653 X86ISD::LCMPXCHG8_DAG; 12654 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 12655 Ops, array_lengthof(Ops), T, MMO); 12656 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 12657 Regs64bit ? X86::RAX : X86::EAX, 12658 HalfT, Result.getValue(1)); 12659 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 12660 Regs64bit ? X86::RDX : X86::EDX, 12661 HalfT, cpOutL.getValue(2)); 12662 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 12663 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 12664 Results.push_back(cpOutH.getValue(1)); 12665 return; 12666 } 12667 case ISD::ATOMIC_LOAD_ADD: 12668 case ISD::ATOMIC_LOAD_AND: 12669 case ISD::ATOMIC_LOAD_NAND: 12670 case ISD::ATOMIC_LOAD_OR: 12671 case ISD::ATOMIC_LOAD_SUB: 12672 case ISD::ATOMIC_LOAD_XOR: 12673 case ISD::ATOMIC_LOAD_MAX: 12674 case ISD::ATOMIC_LOAD_MIN: 12675 case ISD::ATOMIC_LOAD_UMAX: 12676 case ISD::ATOMIC_LOAD_UMIN: 12677 case ISD::ATOMIC_SWAP: { 12678 unsigned Opc; 12679 switch (N->getOpcode()) { 12680 default: llvm_unreachable("Unexpected opcode"); 12681 case ISD::ATOMIC_LOAD_ADD: 12682 Opc = X86ISD::ATOMADD64_DAG; 12683 break; 12684 case ISD::ATOMIC_LOAD_AND: 12685 Opc = X86ISD::ATOMAND64_DAG; 12686 break; 12687 case ISD::ATOMIC_LOAD_NAND: 12688 Opc = X86ISD::ATOMNAND64_DAG; 12689 break; 12690 case ISD::ATOMIC_LOAD_OR: 12691 Opc = X86ISD::ATOMOR64_DAG; 12692 break; 12693 case ISD::ATOMIC_LOAD_SUB: 12694 Opc = X86ISD::ATOMSUB64_DAG; 12695 break; 12696 case ISD::ATOMIC_LOAD_XOR: 12697 Opc = X86ISD::ATOMXOR64_DAG; 12698 break; 12699 case ISD::ATOMIC_LOAD_MAX: 12700 Opc = X86ISD::ATOMMAX64_DAG; 12701 break; 12702 case ISD::ATOMIC_LOAD_MIN: 12703 Opc = X86ISD::ATOMMIN64_DAG; 12704 break; 12705 case ISD::ATOMIC_LOAD_UMAX: 12706 Opc = X86ISD::ATOMUMAX64_DAG; 12707 break; 12708 case ISD::ATOMIC_LOAD_UMIN: 12709 Opc = X86ISD::ATOMUMIN64_DAG; 12710 break; 12711 case ISD::ATOMIC_SWAP: 12712 Opc = X86ISD::ATOMSWAP64_DAG; 12713 break; 12714 } 12715 ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc); 12716 return; 12717 } 12718 case ISD::ATOMIC_LOAD: 12719 ReplaceATOMIC_LOAD(N, Results, DAG); 12720 } 12721} 12722 12723const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 12724 switch (Opcode) { 12725 default: return NULL; 12726 case X86ISD::BSF: return "X86ISD::BSF"; 12727 case X86ISD::BSR: return "X86ISD::BSR"; 12728 case X86ISD::SHLD: return "X86ISD::SHLD"; 12729 case X86ISD::SHRD: return "X86ISD::SHRD"; 12730 case X86ISD::FAND: return "X86ISD::FAND"; 12731 case X86ISD::FOR: return "X86ISD::FOR"; 12732 case X86ISD::FXOR: return "X86ISD::FXOR"; 12733 case X86ISD::FSRL: return "X86ISD::FSRL"; 12734 case X86ISD::FILD: return "X86ISD::FILD"; 12735 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 12736 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 12737 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 12738 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 12739 case X86ISD::FLD: return "X86ISD::FLD"; 12740 case X86ISD::FST: return "X86ISD::FST"; 12741 case X86ISD::CALL: return "X86ISD::CALL"; 12742 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 12743 case X86ISD::BT: return "X86ISD::BT"; 12744 case X86ISD::CMP: return "X86ISD::CMP"; 12745 case X86ISD::COMI: return "X86ISD::COMI"; 12746 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 12747 case X86ISD::SETCC: return "X86ISD::SETCC"; 12748 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 12749 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 12750 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 12751 case X86ISD::CMOV: return "X86ISD::CMOV"; 12752 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 12753 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 12754 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 12755 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 12756 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 12757 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 12758 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 12759 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 12760 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 12761 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 12762 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 12763 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 12764 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 12765 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 12766 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 12767 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 12768 case X86ISD::BLENDI: return "X86ISD::BLENDI"; 12769 case X86ISD::SUBUS: return "X86ISD::SUBUS"; 12770 case X86ISD::HADD: return "X86ISD::HADD"; 12771 case X86ISD::HSUB: return "X86ISD::HSUB"; 12772 case X86ISD::FHADD: return "X86ISD::FHADD"; 12773 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 12774 case X86ISD::UMAX: return "X86ISD::UMAX"; 12775 case X86ISD::UMIN: return "X86ISD::UMIN"; 12776 case X86ISD::SMAX: return "X86ISD::SMAX"; 12777 case X86ISD::SMIN: return "X86ISD::SMIN"; 12778 case X86ISD::FMAX: return "X86ISD::FMAX"; 12779 case X86ISD::FMIN: return "X86ISD::FMIN"; 12780 case X86ISD::FMAXC: return "X86ISD::FMAXC"; 12781 case X86ISD::FMINC: return "X86ISD::FMINC"; 12782 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 12783 case X86ISD::FRCP: return "X86ISD::FRCP"; 12784 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 12785 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; 12786 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 12787 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; 12788 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; 12789 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 12790 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 12791 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 12792 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; 12793 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 12794 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 12795 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 12796 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 12797 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 12798 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 12799 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 12800 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 12801 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 12802 case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL"; 12803 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 12804 case X86ISD::VZEXT: return "X86ISD::VZEXT"; 12805 case X86ISD::VSEXT: return "X86ISD::VSEXT"; 12806 case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; 12807 case X86ISD::VFPROUND: return "X86ISD::VFPROUND"; 12808 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 12809 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 12810 case X86ISD::VSHL: return "X86ISD::VSHL"; 12811 case X86ISD::VSRL: return "X86ISD::VSRL"; 12812 case X86ISD::VSRA: return "X86ISD::VSRA"; 12813 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 12814 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 12815 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 12816 case X86ISD::CMPP: return "X86ISD::CMPP"; 12817 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 12818 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 12819 case X86ISD::ADD: return "X86ISD::ADD"; 12820 case X86ISD::SUB: return "X86ISD::SUB"; 12821 case X86ISD::ADC: return "X86ISD::ADC"; 12822 case X86ISD::SBB: return "X86ISD::SBB"; 12823 case X86ISD::SMUL: return "X86ISD::SMUL"; 12824 case X86ISD::UMUL: return "X86ISD::UMUL"; 12825 case X86ISD::INC: return "X86ISD::INC"; 12826 case X86ISD::DEC: return "X86ISD::DEC"; 12827 case X86ISD::OR: return "X86ISD::OR"; 12828 case X86ISD::XOR: return "X86ISD::XOR"; 12829 case X86ISD::AND: return "X86ISD::AND"; 12830 case X86ISD::BLSI: return "X86ISD::BLSI"; 12831 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 12832 case X86ISD::BLSR: return "X86ISD::BLSR"; 12833 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 12834 case X86ISD::PTEST: return "X86ISD::PTEST"; 12835 case X86ISD::TESTP: return "X86ISD::TESTP"; 12836 case X86ISD::PALIGNR: return "X86ISD::PALIGNR"; 12837 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 12838 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 12839 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 12840 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 12841 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 12842 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 12843 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 12844 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 12845 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 12846 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 12847 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 12848 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 12849 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 12850 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 12851 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 12852 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 12853 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 12854 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 12855 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 12856 case X86ISD::VPERMV: return "X86ISD::VPERMV"; 12857 case X86ISD::VPERMI: return "X86ISD::VPERMI"; 12858 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; 12859 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 12860 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 12861 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 12862 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 12863 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 12864 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; 12865 case X86ISD::SAHF: return "X86ISD::SAHF"; 12866 case X86ISD::RDRAND: return "X86ISD::RDRAND"; 12867 case X86ISD::RDSEED: return "X86ISD::RDSEED"; 12868 case X86ISD::FMADD: return "X86ISD::FMADD"; 12869 case X86ISD::FMSUB: return "X86ISD::FMSUB"; 12870 case X86ISD::FNMADD: return "X86ISD::FNMADD"; 12871 case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; 12872 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; 12873 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; 12874 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI"; 12875 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI"; 12876 case X86ISD::XTEST: return "X86ISD::XTEST"; 12877 } 12878} 12879 12880// isLegalAddressingMode - Return true if the addressing mode represented 12881// by AM is legal for this target, for a load/store of the specified type. 12882bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 12883 Type *Ty) const { 12884 // X86 supports extremely general addressing modes. 12885 CodeModel::Model M = getTargetMachine().getCodeModel(); 12886 Reloc::Model R = getTargetMachine().getRelocationModel(); 12887 12888 // X86 allows a sign-extended 32-bit immediate field as a displacement. 12889 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 12890 return false; 12891 12892 if (AM.BaseGV) { 12893 unsigned GVFlags = 12894 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 12895 12896 // If a reference to this global requires an extra load, we can't fold it. 12897 if (isGlobalStubReference(GVFlags)) 12898 return false; 12899 12900 // If BaseGV requires a register for the PIC base, we cannot also have a 12901 // BaseReg specified. 12902 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 12903 return false; 12904 12905 // If lower 4G is not available, then we must use rip-relative addressing. 12906 if ((M != CodeModel::Small || R != Reloc::Static) && 12907 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 12908 return false; 12909 } 12910 12911 switch (AM.Scale) { 12912 case 0: 12913 case 1: 12914 case 2: 12915 case 4: 12916 case 8: 12917 // These scales always work. 12918 break; 12919 case 3: 12920 case 5: 12921 case 9: 12922 // These scales are formed with basereg+scalereg. Only accept if there is 12923 // no basereg yet. 12924 if (AM.HasBaseReg) 12925 return false; 12926 break; 12927 default: // Other stuff never works. 12928 return false; 12929 } 12930 12931 return true; 12932} 12933 12934bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 12935 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 12936 return false; 12937 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 12938 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 12939 return NumBits1 > NumBits2; 12940} 12941 12942bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { 12943 return isInt<32>(Imm); 12944} 12945 12946bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { 12947 // Can also use sub to handle negated immediates. 12948 return isInt<32>(Imm); 12949} 12950 12951bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 12952 if (!VT1.isInteger() || !VT2.isInteger()) 12953 return false; 12954 unsigned NumBits1 = VT1.getSizeInBits(); 12955 unsigned NumBits2 = VT2.getSizeInBits(); 12956 return NumBits1 > NumBits2; 12957} 12958 12959bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 12960 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12961 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 12962} 12963 12964bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 12965 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12966 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 12967} 12968 12969bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 12970 EVT VT1 = Val.getValueType(); 12971 if (isZExtFree(VT1, VT2)) 12972 return true; 12973 12974 if (Val.getOpcode() != ISD::LOAD) 12975 return false; 12976 12977 if (!VT1.isSimple() || !VT1.isInteger() || 12978 !VT2.isSimple() || !VT2.isInteger()) 12979 return false; 12980 12981 switch (VT1.getSimpleVT().SimpleTy) { 12982 default: break; 12983 case MVT::i8: 12984 case MVT::i16: 12985 case MVT::i32: 12986 // X86 has 8, 16, and 32-bit zero-extending loads. 12987 return true; 12988 } 12989 12990 return false; 12991} 12992 12993bool 12994X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 12995 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4())) 12996 return false; 12997 12998 VT = VT.getScalarType(); 12999 13000 if (!VT.isSimple()) 13001 return false; 13002 13003 switch (VT.getSimpleVT().SimpleTy) { 13004 case MVT::f32: 13005 case MVT::f64: 13006 return true; 13007 default: 13008 break; 13009 } 13010 13011 return false; 13012} 13013 13014bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 13015 // i16 instructions are longer (0x66 prefix) and potentially slower. 13016 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 13017} 13018 13019/// isShuffleMaskLegal - Targets can use this to indicate that they only 13020/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 13021/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 13022/// are assumed to be legal. 13023bool 13024X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 13025 EVT VT) const { 13026 // Very little shuffling can be done for 64-bit vectors right now. 13027 if (VT.getSizeInBits() == 64) 13028 return false; 13029 13030 // FIXME: pshufb, blends, shifts. 13031 return (VT.getVectorNumElements() == 2 || 13032 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 13033 isMOVLMask(M, VT) || 13034 isSHUFPMask(M, VT, Subtarget->hasFp256()) || 13035 isPSHUFDMask(M, VT) || 13036 isPSHUFHWMask(M, VT, Subtarget->hasInt256()) || 13037 isPSHUFLWMask(M, VT, Subtarget->hasInt256()) || 13038 isPALIGNRMask(M, VT, Subtarget) || 13039 isUNPCKLMask(M, VT, Subtarget->hasInt256()) || 13040 isUNPCKHMask(M, VT, Subtarget->hasInt256()) || 13041 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasInt256()) || 13042 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasInt256())); 13043} 13044 13045bool 13046X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 13047 EVT VT) const { 13048 unsigned NumElts = VT.getVectorNumElements(); 13049 // FIXME: This collection of masks seems suspect. 13050 if (NumElts == 2) 13051 return true; 13052 if (NumElts == 4 && VT.is128BitVector()) { 13053 return (isMOVLMask(Mask, VT) || 13054 isCommutedMOVLMask(Mask, VT, true) || 13055 isSHUFPMask(Mask, VT, Subtarget->hasFp256()) || 13056 isSHUFPMask(Mask, VT, Subtarget->hasFp256(), /* Commuted */ true)); 13057 } 13058 return false; 13059} 13060 13061//===----------------------------------------------------------------------===// 13062// X86 Scheduler Hooks 13063//===----------------------------------------------------------------------===// 13064 13065/// Utility function to emit xbegin specifying the start of an RTM region. 13066static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB, 13067 const TargetInstrInfo *TII) { 13068 DebugLoc DL = MI->getDebugLoc(); 13069 13070 const BasicBlock *BB = MBB->getBasicBlock(); 13071 MachineFunction::iterator I = MBB; 13072 ++I; 13073 13074 // For the v = xbegin(), we generate 13075 // 13076 // thisMBB: 13077 // xbegin sinkMBB 13078 // 13079 // mainMBB: 13080 // eax = -1 13081 // 13082 // sinkMBB: 13083 // v = eax 13084 13085 MachineBasicBlock *thisMBB = MBB; 13086 MachineFunction *MF = MBB->getParent(); 13087 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13088 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13089 MF->insert(I, mainMBB); 13090 MF->insert(I, sinkMBB); 13091 13092 // Transfer the remainder of BB and its successor edges to sinkMBB. 13093 sinkMBB->splice(sinkMBB->begin(), MBB, 13094 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13095 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13096 13097 // thisMBB: 13098 // xbegin sinkMBB 13099 // # fallthrough to mainMBB 13100 // # abortion to sinkMBB 13101 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB); 13102 thisMBB->addSuccessor(mainMBB); 13103 thisMBB->addSuccessor(sinkMBB); 13104 13105 // mainMBB: 13106 // EAX = -1 13107 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1); 13108 mainMBB->addSuccessor(sinkMBB); 13109 13110 // sinkMBB: 13111 // EAX is live into the sinkMBB 13112 sinkMBB->addLiveIn(X86::EAX); 13113 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13114 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 13115 .addReg(X86::EAX); 13116 13117 MI->eraseFromParent(); 13118 return sinkMBB; 13119} 13120 13121// Get CMPXCHG opcode for the specified data type. 13122static unsigned getCmpXChgOpcode(EVT VT) { 13123 switch (VT.getSimpleVT().SimpleTy) { 13124 case MVT::i8: return X86::LCMPXCHG8; 13125 case MVT::i16: return X86::LCMPXCHG16; 13126 case MVT::i32: return X86::LCMPXCHG32; 13127 case MVT::i64: return X86::LCMPXCHG64; 13128 default: 13129 break; 13130 } 13131 llvm_unreachable("Invalid operand size!"); 13132} 13133 13134// Get LOAD opcode for the specified data type. 13135static unsigned getLoadOpcode(EVT VT) { 13136 switch (VT.getSimpleVT().SimpleTy) { 13137 case MVT::i8: return X86::MOV8rm; 13138 case MVT::i16: return X86::MOV16rm; 13139 case MVT::i32: return X86::MOV32rm; 13140 case MVT::i64: return X86::MOV64rm; 13141 default: 13142 break; 13143 } 13144 llvm_unreachable("Invalid operand size!"); 13145} 13146 13147// Get opcode of the non-atomic one from the specified atomic instruction. 13148static unsigned getNonAtomicOpcode(unsigned Opc) { 13149 switch (Opc) { 13150 case X86::ATOMAND8: return X86::AND8rr; 13151 case X86::ATOMAND16: return X86::AND16rr; 13152 case X86::ATOMAND32: return X86::AND32rr; 13153 case X86::ATOMAND64: return X86::AND64rr; 13154 case X86::ATOMOR8: return X86::OR8rr; 13155 case X86::ATOMOR16: return X86::OR16rr; 13156 case X86::ATOMOR32: return X86::OR32rr; 13157 case X86::ATOMOR64: return X86::OR64rr; 13158 case X86::ATOMXOR8: return X86::XOR8rr; 13159 case X86::ATOMXOR16: return X86::XOR16rr; 13160 case X86::ATOMXOR32: return X86::XOR32rr; 13161 case X86::ATOMXOR64: return X86::XOR64rr; 13162 } 13163 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13164} 13165 13166// Get opcode of the non-atomic one from the specified atomic instruction with 13167// extra opcode. 13168static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc, 13169 unsigned &ExtraOpc) { 13170 switch (Opc) { 13171 case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr; 13172 case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr; 13173 case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr; 13174 case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr; 13175 case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr; 13176 case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr; 13177 case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr; 13178 case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr; 13179 case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr; 13180 case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr; 13181 case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr; 13182 case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr; 13183 case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr; 13184 case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr; 13185 case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr; 13186 case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr; 13187 case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr; 13188 case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr; 13189 case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr; 13190 case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr; 13191 } 13192 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13193} 13194 13195// Get opcode of the non-atomic one from the specified atomic instruction for 13196// 64-bit data type on 32-bit target. 13197static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) { 13198 switch (Opc) { 13199 case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr; 13200 case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr; 13201 case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr; 13202 case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr; 13203 case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr; 13204 case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr; 13205 case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr; 13206 case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr; 13207 case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr; 13208 case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr; 13209 } 13210 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13211} 13212 13213// Get opcode of the non-atomic one from the specified atomic instruction for 13214// 64-bit data type on 32-bit target with extra opcode. 13215static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc, 13216 unsigned &HiOpc, 13217 unsigned &ExtraOpc) { 13218 switch (Opc) { 13219 case X86::ATOMNAND6432: 13220 ExtraOpc = X86::NOT32r; 13221 HiOpc = X86::AND32rr; 13222 return X86::AND32rr; 13223 } 13224 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13225} 13226 13227// Get pseudo CMOV opcode from the specified data type. 13228static unsigned getPseudoCMOVOpc(EVT VT) { 13229 switch (VT.getSimpleVT().SimpleTy) { 13230 case MVT::i8: return X86::CMOV_GR8; 13231 case MVT::i16: return X86::CMOV_GR16; 13232 case MVT::i32: return X86::CMOV_GR32; 13233 default: 13234 break; 13235 } 13236 llvm_unreachable("Unknown CMOV opcode!"); 13237} 13238 13239// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions. 13240// They will be translated into a spin-loop or compare-exchange loop from 13241// 13242// ... 13243// dst = atomic-fetch-op MI.addr, MI.val 13244// ... 13245// 13246// to 13247// 13248// ... 13249// t1 = LOAD MI.addr 13250// loop: 13251// t4 = phi(t1, t3 / loop) 13252// t2 = OP MI.val, t4 13253// EAX = t4 13254// LCMPXCHG [MI.addr], t2, [EAX is implicitly used & defined] 13255// t3 = EAX 13256// JNE loop 13257// sink: 13258// dst = t3 13259// ... 13260MachineBasicBlock * 13261X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, 13262 MachineBasicBlock *MBB) const { 13263 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13264 DebugLoc DL = MI->getDebugLoc(); 13265 13266 MachineFunction *MF = MBB->getParent(); 13267 MachineRegisterInfo &MRI = MF->getRegInfo(); 13268 13269 const BasicBlock *BB = MBB->getBasicBlock(); 13270 MachineFunction::iterator I = MBB; 13271 ++I; 13272 13273 assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 && 13274 "Unexpected number of operands"); 13275 13276 assert(MI->hasOneMemOperand() && 13277 "Expected atomic-load-op to have one memoperand"); 13278 13279 // Memory Reference 13280 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13281 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13282 13283 unsigned DstReg, SrcReg; 13284 unsigned MemOpndSlot; 13285 13286 unsigned CurOp = 0; 13287 13288 DstReg = MI->getOperand(CurOp++).getReg(); 13289 MemOpndSlot = CurOp; 13290 CurOp += X86::AddrNumOperands; 13291 SrcReg = MI->getOperand(CurOp++).getReg(); 13292 13293 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 13294 MVT::SimpleValueType VT = *RC->vt_begin(); 13295 unsigned t1 = MRI.createVirtualRegister(RC); 13296 unsigned t2 = MRI.createVirtualRegister(RC); 13297 unsigned t3 = MRI.createVirtualRegister(RC); 13298 unsigned t4 = MRI.createVirtualRegister(RC); 13299 unsigned PhyReg = getX86SubSuperRegister(X86::EAX, VT); 13300 13301 unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT); 13302 unsigned LOADOpc = getLoadOpcode(VT); 13303 13304 // For the atomic load-arith operator, we generate 13305 // 13306 // thisMBB: 13307 // t1 = LOAD [MI.addr] 13308 // mainMBB: 13309 // t4 = phi(t1 / thisMBB, t3 / mainMBB) 13310 // t1 = OP MI.val, EAX 13311 // EAX = t4 13312 // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 13313 // t3 = EAX 13314 // JNE mainMBB 13315 // sinkMBB: 13316 // dst = t3 13317 13318 MachineBasicBlock *thisMBB = MBB; 13319 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13320 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13321 MF->insert(I, mainMBB); 13322 MF->insert(I, sinkMBB); 13323 13324 MachineInstrBuilder MIB; 13325 13326 // Transfer the remainder of BB and its successor edges to sinkMBB. 13327 sinkMBB->splice(sinkMBB->begin(), MBB, 13328 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13329 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13330 13331 // thisMBB: 13332 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1); 13333 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13334 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13335 if (NewMO.isReg()) 13336 NewMO.setIsKill(false); 13337 MIB.addOperand(NewMO); 13338 } 13339 for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) { 13340 unsigned flags = (*MMOI)->getFlags(); 13341 flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad; 13342 MachineMemOperand *MMO = 13343 MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags, 13344 (*MMOI)->getSize(), 13345 (*MMOI)->getBaseAlignment(), 13346 (*MMOI)->getTBAAInfo(), 13347 (*MMOI)->getRanges()); 13348 MIB.addMemOperand(MMO); 13349 } 13350 13351 thisMBB->addSuccessor(mainMBB); 13352 13353 // mainMBB: 13354 MachineBasicBlock *origMainMBB = mainMBB; 13355 13356 // Add a PHI. 13357 MachineInstr *Phi = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4) 13358 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB); 13359 13360 unsigned Opc = MI->getOpcode(); 13361 switch (Opc) { 13362 default: 13363 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13364 case X86::ATOMAND8: 13365 case X86::ATOMAND16: 13366 case X86::ATOMAND32: 13367 case X86::ATOMAND64: 13368 case X86::ATOMOR8: 13369 case X86::ATOMOR16: 13370 case X86::ATOMOR32: 13371 case X86::ATOMOR64: 13372 case X86::ATOMXOR8: 13373 case X86::ATOMXOR16: 13374 case X86::ATOMXOR32: 13375 case X86::ATOMXOR64: { 13376 unsigned ARITHOpc = getNonAtomicOpcode(Opc); 13377 BuildMI(mainMBB, DL, TII->get(ARITHOpc), t2).addReg(SrcReg) 13378 .addReg(t4); 13379 break; 13380 } 13381 case X86::ATOMNAND8: 13382 case X86::ATOMNAND16: 13383 case X86::ATOMNAND32: 13384 case X86::ATOMNAND64: { 13385 unsigned Tmp = MRI.createVirtualRegister(RC); 13386 unsigned NOTOpc; 13387 unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc); 13388 BuildMI(mainMBB, DL, TII->get(ANDOpc), Tmp).addReg(SrcReg) 13389 .addReg(t4); 13390 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2).addReg(Tmp); 13391 break; 13392 } 13393 case X86::ATOMMAX8: 13394 case X86::ATOMMAX16: 13395 case X86::ATOMMAX32: 13396 case X86::ATOMMAX64: 13397 case X86::ATOMMIN8: 13398 case X86::ATOMMIN16: 13399 case X86::ATOMMIN32: 13400 case X86::ATOMMIN64: 13401 case X86::ATOMUMAX8: 13402 case X86::ATOMUMAX16: 13403 case X86::ATOMUMAX32: 13404 case X86::ATOMUMAX64: 13405 case X86::ATOMUMIN8: 13406 case X86::ATOMUMIN16: 13407 case X86::ATOMUMIN32: 13408 case X86::ATOMUMIN64: { 13409 unsigned CMPOpc; 13410 unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc); 13411 13412 BuildMI(mainMBB, DL, TII->get(CMPOpc)) 13413 .addReg(SrcReg) 13414 .addReg(t4); 13415 13416 if (Subtarget->hasCMov()) { 13417 if (VT != MVT::i8) { 13418 // Native support 13419 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2) 13420 .addReg(SrcReg) 13421 .addReg(t4); 13422 } else { 13423 // Promote i8 to i32 to use CMOV32 13424 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 13425 const TargetRegisterClass *RC32 = 13426 TRI->getSubClassWithSubReg(getRegClassFor(MVT::i32), X86::sub_8bit); 13427 unsigned SrcReg32 = MRI.createVirtualRegister(RC32); 13428 unsigned AccReg32 = MRI.createVirtualRegister(RC32); 13429 unsigned Tmp = MRI.createVirtualRegister(RC32); 13430 13431 unsigned Undef = MRI.createVirtualRegister(RC32); 13432 BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef); 13433 13434 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32) 13435 .addReg(Undef) 13436 .addReg(SrcReg) 13437 .addImm(X86::sub_8bit); 13438 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32) 13439 .addReg(Undef) 13440 .addReg(t4) 13441 .addImm(X86::sub_8bit); 13442 13443 BuildMI(mainMBB, DL, TII->get(CMOVOpc), Tmp) 13444 .addReg(SrcReg32) 13445 .addReg(AccReg32); 13446 13447 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t2) 13448 .addReg(Tmp, 0, X86::sub_8bit); 13449 } 13450 } else { 13451 // Use pseudo select and lower them. 13452 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 13453 "Invalid atomic-load-op transformation!"); 13454 unsigned SelOpc = getPseudoCMOVOpc(VT); 13455 X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc); 13456 assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!"); 13457 MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t2) 13458 .addReg(SrcReg).addReg(t4) 13459 .addImm(CC); 13460 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13461 // Replace the original PHI node as mainMBB is changed after CMOV 13462 // lowering. 13463 BuildMI(*origMainMBB, Phi, DL, TII->get(X86::PHI), t4) 13464 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB); 13465 Phi->eraseFromParent(); 13466 } 13467 break; 13468 } 13469 } 13470 13471 // Copy PhyReg back from virtual register. 13472 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), PhyReg) 13473 .addReg(t4); 13474 13475 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 13476 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13477 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13478 if (NewMO.isReg()) 13479 NewMO.setIsKill(false); 13480 MIB.addOperand(NewMO); 13481 } 13482 MIB.addReg(t2); 13483 MIB.setMemRefs(MMOBegin, MMOEnd); 13484 13485 // Copy PhyReg back to virtual register. 13486 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3) 13487 .addReg(PhyReg); 13488 13489 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 13490 13491 mainMBB->addSuccessor(origMainMBB); 13492 mainMBB->addSuccessor(sinkMBB); 13493 13494 // sinkMBB: 13495 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13496 TII->get(TargetOpcode::COPY), DstReg) 13497 .addReg(t3); 13498 13499 MI->eraseFromParent(); 13500 return sinkMBB; 13501} 13502 13503// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic 13504// instructions. They will be translated into a spin-loop or compare-exchange 13505// loop from 13506// 13507// ... 13508// dst = atomic-fetch-op MI.addr, MI.val 13509// ... 13510// 13511// to 13512// 13513// ... 13514// t1L = LOAD [MI.addr + 0] 13515// t1H = LOAD [MI.addr + 4] 13516// loop: 13517// t4L = phi(t1L, t3L / loop) 13518// t4H = phi(t1H, t3H / loop) 13519// t2L = OP MI.val.lo, t4L 13520// t2H = OP MI.val.hi, t4H 13521// EAX = t4L 13522// EDX = t4H 13523// EBX = t2L 13524// ECX = t2H 13525// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 13526// t3L = EAX 13527// t3H = EDX 13528// JNE loop 13529// sink: 13530// dstL = t3L 13531// dstH = t3H 13532// ... 13533MachineBasicBlock * 13534X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, 13535 MachineBasicBlock *MBB) const { 13536 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13537 DebugLoc DL = MI->getDebugLoc(); 13538 13539 MachineFunction *MF = MBB->getParent(); 13540 MachineRegisterInfo &MRI = MF->getRegInfo(); 13541 13542 const BasicBlock *BB = MBB->getBasicBlock(); 13543 MachineFunction::iterator I = MBB; 13544 ++I; 13545 13546 assert(MI->getNumOperands() <= X86::AddrNumOperands + 7 && 13547 "Unexpected number of operands"); 13548 13549 assert(MI->hasOneMemOperand() && 13550 "Expected atomic-load-op32 to have one memoperand"); 13551 13552 // Memory Reference 13553 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13554 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13555 13556 unsigned DstLoReg, DstHiReg; 13557 unsigned SrcLoReg, SrcHiReg; 13558 unsigned MemOpndSlot; 13559 13560 unsigned CurOp = 0; 13561 13562 DstLoReg = MI->getOperand(CurOp++).getReg(); 13563 DstHiReg = MI->getOperand(CurOp++).getReg(); 13564 MemOpndSlot = CurOp; 13565 CurOp += X86::AddrNumOperands; 13566 SrcLoReg = MI->getOperand(CurOp++).getReg(); 13567 SrcHiReg = MI->getOperand(CurOp++).getReg(); 13568 13569 const TargetRegisterClass *RC = &X86::GR32RegClass; 13570 const TargetRegisterClass *RC8 = &X86::GR8RegClass; 13571 13572 unsigned t1L = MRI.createVirtualRegister(RC); 13573 unsigned t1H = MRI.createVirtualRegister(RC); 13574 unsigned t2L = MRI.createVirtualRegister(RC); 13575 unsigned t2H = MRI.createVirtualRegister(RC); 13576 unsigned t3L = MRI.createVirtualRegister(RC); 13577 unsigned t3H = MRI.createVirtualRegister(RC); 13578 unsigned t4L = MRI.createVirtualRegister(RC); 13579 unsigned t4H = MRI.createVirtualRegister(RC); 13580 13581 unsigned LCMPXCHGOpc = X86::LCMPXCHG8B; 13582 unsigned LOADOpc = X86::MOV32rm; 13583 13584 // For the atomic load-arith operator, we generate 13585 // 13586 // thisMBB: 13587 // t1L = LOAD [MI.addr + 0] 13588 // t1H = LOAD [MI.addr + 4] 13589 // mainMBB: 13590 // t4L = phi(t1L / thisMBB, t3L / mainMBB) 13591 // t4H = phi(t1H / thisMBB, t3H / mainMBB) 13592 // t2L = OP MI.val.lo, t4L 13593 // t2H = OP MI.val.hi, t4H 13594 // EBX = t2L 13595 // ECX = t2H 13596 // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 13597 // t3L = EAX 13598 // t3H = EDX 13599 // JNE loop 13600 // sinkMBB: 13601 // dstL = t3L 13602 // dstH = t3H 13603 13604 MachineBasicBlock *thisMBB = MBB; 13605 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13606 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13607 MF->insert(I, mainMBB); 13608 MF->insert(I, sinkMBB); 13609 13610 MachineInstrBuilder MIB; 13611 13612 // Transfer the remainder of BB and its successor edges to sinkMBB. 13613 sinkMBB->splice(sinkMBB->begin(), MBB, 13614 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13615 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13616 13617 // thisMBB: 13618 // Lo 13619 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1L); 13620 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13621 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13622 if (NewMO.isReg()) 13623 NewMO.setIsKill(false); 13624 MIB.addOperand(NewMO); 13625 } 13626 for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) { 13627 unsigned flags = (*MMOI)->getFlags(); 13628 flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad; 13629 MachineMemOperand *MMO = 13630 MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags, 13631 (*MMOI)->getSize(), 13632 (*MMOI)->getBaseAlignment(), 13633 (*MMOI)->getTBAAInfo(), 13634 (*MMOI)->getRanges()); 13635 MIB.addMemOperand(MMO); 13636 }; 13637 MachineInstr *LowMI = MIB; 13638 13639 // Hi 13640 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1H); 13641 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13642 if (i == X86::AddrDisp) { 13643 MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) 13644 } else { 13645 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13646 if (NewMO.isReg()) 13647 NewMO.setIsKill(false); 13648 MIB.addOperand(NewMO); 13649 } 13650 } 13651 MIB.setMemRefs(LowMI->memoperands_begin(), LowMI->memoperands_end()); 13652 13653 thisMBB->addSuccessor(mainMBB); 13654 13655 // mainMBB: 13656 MachineBasicBlock *origMainMBB = mainMBB; 13657 13658 // Add PHIs. 13659 MachineInstr *PhiL = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4L) 13660 .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB); 13661 MachineInstr *PhiH = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4H) 13662 .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB); 13663 13664 unsigned Opc = MI->getOpcode(); 13665 switch (Opc) { 13666 default: 13667 llvm_unreachable("Unhandled atomic-load-op6432 opcode!"); 13668 case X86::ATOMAND6432: 13669 case X86::ATOMOR6432: 13670 case X86::ATOMXOR6432: 13671 case X86::ATOMADD6432: 13672 case X86::ATOMSUB6432: { 13673 unsigned HiOpc; 13674 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 13675 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(t4L) 13676 .addReg(SrcLoReg); 13677 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(t4H) 13678 .addReg(SrcHiReg); 13679 break; 13680 } 13681 case X86::ATOMNAND6432: { 13682 unsigned HiOpc, NOTOpc; 13683 unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc); 13684 unsigned TmpL = MRI.createVirtualRegister(RC); 13685 unsigned TmpH = MRI.createVirtualRegister(RC); 13686 BuildMI(mainMBB, DL, TII->get(LoOpc), TmpL).addReg(SrcLoReg) 13687 .addReg(t4L); 13688 BuildMI(mainMBB, DL, TII->get(HiOpc), TmpH).addReg(SrcHiReg) 13689 .addReg(t4H); 13690 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2L).addReg(TmpL); 13691 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2H).addReg(TmpH); 13692 break; 13693 } 13694 case X86::ATOMMAX6432: 13695 case X86::ATOMMIN6432: 13696 case X86::ATOMUMAX6432: 13697 case X86::ATOMUMIN6432: { 13698 unsigned HiOpc; 13699 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 13700 unsigned cL = MRI.createVirtualRegister(RC8); 13701 unsigned cH = MRI.createVirtualRegister(RC8); 13702 unsigned cL32 = MRI.createVirtualRegister(RC); 13703 unsigned cH32 = MRI.createVirtualRegister(RC); 13704 unsigned cc = MRI.createVirtualRegister(RC); 13705 // cl := cmp src_lo, lo 13706 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 13707 .addReg(SrcLoReg).addReg(t4L); 13708 BuildMI(mainMBB, DL, TII->get(LoOpc), cL); 13709 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL); 13710 // ch := cmp src_hi, hi 13711 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 13712 .addReg(SrcHiReg).addReg(t4H); 13713 BuildMI(mainMBB, DL, TII->get(HiOpc), cH); 13714 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH); 13715 // cc := if (src_hi == hi) ? cl : ch; 13716 if (Subtarget->hasCMov()) { 13717 BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc) 13718 .addReg(cH32).addReg(cL32); 13719 } else { 13720 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc) 13721 .addReg(cH32).addReg(cL32) 13722 .addImm(X86::COND_E); 13723 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13724 } 13725 BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc); 13726 if (Subtarget->hasCMov()) { 13727 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2L) 13728 .addReg(SrcLoReg).addReg(t4L); 13729 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2H) 13730 .addReg(SrcHiReg).addReg(t4H); 13731 } else { 13732 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2L) 13733 .addReg(SrcLoReg).addReg(t4L) 13734 .addImm(X86::COND_NE); 13735 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13736 // As the lowered CMOV won't clobber EFLAGS, we could reuse it for the 13737 // 2nd CMOV lowering. 13738 mainMBB->addLiveIn(X86::EFLAGS); 13739 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2H) 13740 .addReg(SrcHiReg).addReg(t4H) 13741 .addImm(X86::COND_NE); 13742 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13743 // Replace the original PHI node as mainMBB is changed after CMOV 13744 // lowering. 13745 BuildMI(*origMainMBB, PhiL, DL, TII->get(X86::PHI), t4L) 13746 .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB); 13747 BuildMI(*origMainMBB, PhiH, DL, TII->get(X86::PHI), t4H) 13748 .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB); 13749 PhiL->eraseFromParent(); 13750 PhiH->eraseFromParent(); 13751 } 13752 break; 13753 } 13754 case X86::ATOMSWAP6432: { 13755 unsigned HiOpc; 13756 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 13757 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg); 13758 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg); 13759 break; 13760 } 13761 } 13762 13763 // Copy EDX:EAX back from HiReg:LoReg 13764 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(t4L); 13765 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(t4H); 13766 // Copy ECX:EBX from t1H:t1L 13767 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t2L); 13768 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t2H); 13769 13770 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 13771 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13772 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13773 if (NewMO.isReg()) 13774 NewMO.setIsKill(false); 13775 MIB.addOperand(NewMO); 13776 } 13777 MIB.setMemRefs(MMOBegin, MMOEnd); 13778 13779 // Copy EDX:EAX back to t3H:t3L 13780 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3L).addReg(X86::EAX); 13781 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3H).addReg(X86::EDX); 13782 13783 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 13784 13785 mainMBB->addSuccessor(origMainMBB); 13786 mainMBB->addSuccessor(sinkMBB); 13787 13788 // sinkMBB: 13789 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13790 TII->get(TargetOpcode::COPY), DstLoReg) 13791 .addReg(t3L); 13792 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13793 TII->get(TargetOpcode::COPY), DstHiReg) 13794 .addReg(t3H); 13795 13796 MI->eraseFromParent(); 13797 return sinkMBB; 13798} 13799 13800// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 13801// or XMM0_V32I8 in AVX all of this code can be replaced with that 13802// in the .td file. 13803static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB, 13804 const TargetInstrInfo *TII) { 13805 unsigned Opc; 13806 switch (MI->getOpcode()) { 13807 default: llvm_unreachable("illegal opcode!"); 13808 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break; 13809 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break; 13810 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break; 13811 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break; 13812 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break; 13813 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break; 13814 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break; 13815 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break; 13816 } 13817 13818 DebugLoc dl = MI->getDebugLoc(); 13819 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 13820 13821 unsigned NumArgs = MI->getNumOperands(); 13822 for (unsigned i = 1; i < NumArgs; ++i) { 13823 MachineOperand &Op = MI->getOperand(i); 13824 if (!(Op.isReg() && Op.isImplicit())) 13825 MIB.addOperand(Op); 13826 } 13827 if (MI->hasOneMemOperand()) 13828 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 13829 13830 BuildMI(*BB, MI, dl, 13831 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 13832 .addReg(X86::XMM0); 13833 13834 MI->eraseFromParent(); 13835 return BB; 13836} 13837 13838// FIXME: Custom handling because TableGen doesn't support multiple implicit 13839// defs in an instruction pattern 13840static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB, 13841 const TargetInstrInfo *TII) { 13842 unsigned Opc; 13843 switch (MI->getOpcode()) { 13844 default: llvm_unreachable("illegal opcode!"); 13845 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break; 13846 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break; 13847 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break; 13848 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break; 13849 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break; 13850 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break; 13851 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break; 13852 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break; 13853 } 13854 13855 DebugLoc dl = MI->getDebugLoc(); 13856 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 13857 13858 unsigned NumArgs = MI->getNumOperands(); // remove the results 13859 for (unsigned i = 1; i < NumArgs; ++i) { 13860 MachineOperand &Op = MI->getOperand(i); 13861 if (!(Op.isReg() && Op.isImplicit())) 13862 MIB.addOperand(Op); 13863 } 13864 if (MI->hasOneMemOperand()) 13865 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 13866 13867 BuildMI(*BB, MI, dl, 13868 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 13869 .addReg(X86::ECX); 13870 13871 MI->eraseFromParent(); 13872 return BB; 13873} 13874 13875static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB, 13876 const TargetInstrInfo *TII, 13877 const X86Subtarget* Subtarget) { 13878 DebugLoc dl = MI->getDebugLoc(); 13879 13880 // Address into RAX/EAX, other two args into ECX, EDX. 13881 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 13882 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 13883 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 13884 for (int i = 0; i < X86::AddrNumOperands; ++i) 13885 MIB.addOperand(MI->getOperand(i)); 13886 13887 unsigned ValOps = X86::AddrNumOperands; 13888 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 13889 .addReg(MI->getOperand(ValOps).getReg()); 13890 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 13891 .addReg(MI->getOperand(ValOps+1).getReg()); 13892 13893 // The instruction doesn't actually take any operands though. 13894 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 13895 13896 MI->eraseFromParent(); // The pseudo is gone now. 13897 return BB; 13898} 13899 13900MachineBasicBlock * 13901X86TargetLowering::EmitVAARG64WithCustomInserter( 13902 MachineInstr *MI, 13903 MachineBasicBlock *MBB) const { 13904 // Emit va_arg instruction on X86-64. 13905 13906 // Operands to this pseudo-instruction: 13907 // 0 ) Output : destination address (reg) 13908 // 1-5) Input : va_list address (addr, i64mem) 13909 // 6 ) ArgSize : Size (in bytes) of vararg type 13910 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 13911 // 8 ) Align : Alignment of type 13912 // 9 ) EFLAGS (implicit-def) 13913 13914 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 13915 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 13916 13917 unsigned DestReg = MI->getOperand(0).getReg(); 13918 MachineOperand &Base = MI->getOperand(1); 13919 MachineOperand &Scale = MI->getOperand(2); 13920 MachineOperand &Index = MI->getOperand(3); 13921 MachineOperand &Disp = MI->getOperand(4); 13922 MachineOperand &Segment = MI->getOperand(5); 13923 unsigned ArgSize = MI->getOperand(6).getImm(); 13924 unsigned ArgMode = MI->getOperand(7).getImm(); 13925 unsigned Align = MI->getOperand(8).getImm(); 13926 13927 // Memory Reference 13928 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 13929 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13930 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13931 13932 // Machine Information 13933 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13934 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 13935 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 13936 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 13937 DebugLoc DL = MI->getDebugLoc(); 13938 13939 // struct va_list { 13940 // i32 gp_offset 13941 // i32 fp_offset 13942 // i64 overflow_area (address) 13943 // i64 reg_save_area (address) 13944 // } 13945 // sizeof(va_list) = 24 13946 // alignment(va_list) = 8 13947 13948 unsigned TotalNumIntRegs = 6; 13949 unsigned TotalNumXMMRegs = 8; 13950 bool UseGPOffset = (ArgMode == 1); 13951 bool UseFPOffset = (ArgMode == 2); 13952 unsigned MaxOffset = TotalNumIntRegs * 8 + 13953 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 13954 13955 /* Align ArgSize to a multiple of 8 */ 13956 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 13957 bool NeedsAlign = (Align > 8); 13958 13959 MachineBasicBlock *thisMBB = MBB; 13960 MachineBasicBlock *overflowMBB; 13961 MachineBasicBlock *offsetMBB; 13962 MachineBasicBlock *endMBB; 13963 13964 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 13965 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 13966 unsigned OffsetReg = 0; 13967 13968 if (!UseGPOffset && !UseFPOffset) { 13969 // If we only pull from the overflow region, we don't create a branch. 13970 // We don't need to alter control flow. 13971 OffsetDestReg = 0; // unused 13972 OverflowDestReg = DestReg; 13973 13974 offsetMBB = NULL; 13975 overflowMBB = thisMBB; 13976 endMBB = thisMBB; 13977 } else { 13978 // First emit code to check if gp_offset (or fp_offset) is below the bound. 13979 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 13980 // If not, pull from overflow_area. (branch to overflowMBB) 13981 // 13982 // thisMBB 13983 // | . 13984 // | . 13985 // offsetMBB overflowMBB 13986 // | . 13987 // | . 13988 // endMBB 13989 13990 // Registers for the PHI in endMBB 13991 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 13992 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 13993 13994 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 13995 MachineFunction *MF = MBB->getParent(); 13996 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13997 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13998 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13999 14000 MachineFunction::iterator MBBIter = MBB; 14001 ++MBBIter; 14002 14003 // Insert the new basic blocks 14004 MF->insert(MBBIter, offsetMBB); 14005 MF->insert(MBBIter, overflowMBB); 14006 MF->insert(MBBIter, endMBB); 14007 14008 // Transfer the remainder of MBB and its successor edges to endMBB. 14009 endMBB->splice(endMBB->begin(), thisMBB, 14010 llvm::next(MachineBasicBlock::iterator(MI)), 14011 thisMBB->end()); 14012 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 14013 14014 // Make offsetMBB and overflowMBB successors of thisMBB 14015 thisMBB->addSuccessor(offsetMBB); 14016 thisMBB->addSuccessor(overflowMBB); 14017 14018 // endMBB is a successor of both offsetMBB and overflowMBB 14019 offsetMBB->addSuccessor(endMBB); 14020 overflowMBB->addSuccessor(endMBB); 14021 14022 // Load the offset value into a register 14023 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 14024 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 14025 .addOperand(Base) 14026 .addOperand(Scale) 14027 .addOperand(Index) 14028 .addDisp(Disp, UseFPOffset ? 4 : 0) 14029 .addOperand(Segment) 14030 .setMemRefs(MMOBegin, MMOEnd); 14031 14032 // Check if there is enough room left to pull this argument. 14033 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 14034 .addReg(OffsetReg) 14035 .addImm(MaxOffset + 8 - ArgSizeA8); 14036 14037 // Branch to "overflowMBB" if offset >= max 14038 // Fall through to "offsetMBB" otherwise 14039 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 14040 .addMBB(overflowMBB); 14041 } 14042 14043 // In offsetMBB, emit code to use the reg_save_area. 14044 if (offsetMBB) { 14045 assert(OffsetReg != 0); 14046 14047 // Read the reg_save_area address. 14048 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 14049 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 14050 .addOperand(Base) 14051 .addOperand(Scale) 14052 .addOperand(Index) 14053 .addDisp(Disp, 16) 14054 .addOperand(Segment) 14055 .setMemRefs(MMOBegin, MMOEnd); 14056 14057 // Zero-extend the offset 14058 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 14059 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 14060 .addImm(0) 14061 .addReg(OffsetReg) 14062 .addImm(X86::sub_32bit); 14063 14064 // Add the offset to the reg_save_area to get the final address. 14065 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 14066 .addReg(OffsetReg64) 14067 .addReg(RegSaveReg); 14068 14069 // Compute the offset for the next argument 14070 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 14071 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 14072 .addReg(OffsetReg) 14073 .addImm(UseFPOffset ? 16 : 8); 14074 14075 // Store it back into the va_list. 14076 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 14077 .addOperand(Base) 14078 .addOperand(Scale) 14079 .addOperand(Index) 14080 .addDisp(Disp, UseFPOffset ? 4 : 0) 14081 .addOperand(Segment) 14082 .addReg(NextOffsetReg) 14083 .setMemRefs(MMOBegin, MMOEnd); 14084 14085 // Jump to endMBB 14086 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 14087 .addMBB(endMBB); 14088 } 14089 14090 // 14091 // Emit code to use overflow area 14092 // 14093 14094 // Load the overflow_area address into a register. 14095 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 14096 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 14097 .addOperand(Base) 14098 .addOperand(Scale) 14099 .addOperand(Index) 14100 .addDisp(Disp, 8) 14101 .addOperand(Segment) 14102 .setMemRefs(MMOBegin, MMOEnd); 14103 14104 // If we need to align it, do so. Otherwise, just copy the address 14105 // to OverflowDestReg. 14106 if (NeedsAlign) { 14107 // Align the overflow address 14108 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 14109 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 14110 14111 // aligned_addr = (addr + (align-1)) & ~(align-1) 14112 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 14113 .addReg(OverflowAddrReg) 14114 .addImm(Align-1); 14115 14116 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 14117 .addReg(TmpReg) 14118 .addImm(~(uint64_t)(Align-1)); 14119 } else { 14120 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 14121 .addReg(OverflowAddrReg); 14122 } 14123 14124 // Compute the next overflow address after this argument. 14125 // (the overflow address should be kept 8-byte aligned) 14126 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 14127 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 14128 .addReg(OverflowDestReg) 14129 .addImm(ArgSizeA8); 14130 14131 // Store the new overflow address. 14132 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 14133 .addOperand(Base) 14134 .addOperand(Scale) 14135 .addOperand(Index) 14136 .addDisp(Disp, 8) 14137 .addOperand(Segment) 14138 .addReg(NextAddrReg) 14139 .setMemRefs(MMOBegin, MMOEnd); 14140 14141 // If we branched, emit the PHI to the front of endMBB. 14142 if (offsetMBB) { 14143 BuildMI(*endMBB, endMBB->begin(), DL, 14144 TII->get(X86::PHI), DestReg) 14145 .addReg(OffsetDestReg).addMBB(offsetMBB) 14146 .addReg(OverflowDestReg).addMBB(overflowMBB); 14147 } 14148 14149 // Erase the pseudo instruction 14150 MI->eraseFromParent(); 14151 14152 return endMBB; 14153} 14154 14155MachineBasicBlock * 14156X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 14157 MachineInstr *MI, 14158 MachineBasicBlock *MBB) const { 14159 // Emit code to save XMM registers to the stack. The ABI says that the 14160 // number of registers to save is given in %al, so it's theoretically 14161 // possible to do an indirect jump trick to avoid saving all of them, 14162 // however this code takes a simpler approach and just executes all 14163 // of the stores if %al is non-zero. It's less code, and it's probably 14164 // easier on the hardware branch predictor, and stores aren't all that 14165 // expensive anyway. 14166 14167 // Create the new basic blocks. One block contains all the XMM stores, 14168 // and one block is the final destination regardless of whether any 14169 // stores were performed. 14170 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 14171 MachineFunction *F = MBB->getParent(); 14172 MachineFunction::iterator MBBIter = MBB; 14173 ++MBBIter; 14174 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 14175 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 14176 F->insert(MBBIter, XMMSaveMBB); 14177 F->insert(MBBIter, EndMBB); 14178 14179 // Transfer the remainder of MBB and its successor edges to EndMBB. 14180 EndMBB->splice(EndMBB->begin(), MBB, 14181 llvm::next(MachineBasicBlock::iterator(MI)), 14182 MBB->end()); 14183 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 14184 14185 // The original block will now fall through to the XMM save block. 14186 MBB->addSuccessor(XMMSaveMBB); 14187 // The XMMSaveMBB will fall through to the end block. 14188 XMMSaveMBB->addSuccessor(EndMBB); 14189 14190 // Now add the instructions. 14191 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14192 DebugLoc DL = MI->getDebugLoc(); 14193 14194 unsigned CountReg = MI->getOperand(0).getReg(); 14195 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 14196 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 14197 14198 if (!Subtarget->isTargetWin64()) { 14199 // If %al is 0, branch around the XMM save block. 14200 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 14201 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 14202 MBB->addSuccessor(EndMBB); 14203 } 14204 14205 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr; 14206 // In the XMM save block, save all the XMM argument registers. 14207 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 14208 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 14209 MachineMemOperand *MMO = 14210 F->getMachineMemOperand( 14211 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 14212 MachineMemOperand::MOStore, 14213 /*Size=*/16, /*Align=*/16); 14214 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 14215 .addFrameIndex(RegSaveFrameIndex) 14216 .addImm(/*Scale=*/1) 14217 .addReg(/*IndexReg=*/0) 14218 .addImm(/*Disp=*/Offset) 14219 .addReg(/*Segment=*/0) 14220 .addReg(MI->getOperand(i).getReg()) 14221 .addMemOperand(MMO); 14222 } 14223 14224 MI->eraseFromParent(); // The pseudo instruction is gone now. 14225 14226 return EndMBB; 14227} 14228 14229// The EFLAGS operand of SelectItr might be missing a kill marker 14230// because there were multiple uses of EFLAGS, and ISel didn't know 14231// which to mark. Figure out whether SelectItr should have had a 14232// kill marker, and set it if it should. Returns the correct kill 14233// marker value. 14234static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, 14235 MachineBasicBlock* BB, 14236 const TargetRegisterInfo* TRI) { 14237 // Scan forward through BB for a use/def of EFLAGS. 14238 MachineBasicBlock::iterator miI(llvm::next(SelectItr)); 14239 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 14240 const MachineInstr& mi = *miI; 14241 if (mi.readsRegister(X86::EFLAGS)) 14242 return false; 14243 if (mi.definesRegister(X86::EFLAGS)) 14244 break; // Should have kill-flag - update below. 14245 } 14246 14247 // If we hit the end of the block, check whether EFLAGS is live into a 14248 // successor. 14249 if (miI == BB->end()) { 14250 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 14251 sEnd = BB->succ_end(); 14252 sItr != sEnd; ++sItr) { 14253 MachineBasicBlock* succ = *sItr; 14254 if (succ->isLiveIn(X86::EFLAGS)) 14255 return false; 14256 } 14257 } 14258 14259 // We found a def, or hit the end of the basic block and EFLAGS wasn't live 14260 // out. SelectMI should have a kill flag on EFLAGS. 14261 SelectItr->addRegisterKilled(X86::EFLAGS, TRI); 14262 return true; 14263} 14264 14265MachineBasicBlock * 14266X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 14267 MachineBasicBlock *BB) const { 14268 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14269 DebugLoc DL = MI->getDebugLoc(); 14270 14271 // To "insert" a SELECT_CC instruction, we actually have to insert the 14272 // diamond control-flow pattern. The incoming instruction knows the 14273 // destination vreg to set, the condition code register to branch on, the 14274 // true/false values to select between, and a branch opcode to use. 14275 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 14276 MachineFunction::iterator It = BB; 14277 ++It; 14278 14279 // thisMBB: 14280 // ... 14281 // TrueVal = ... 14282 // cmpTY ccX, r1, r2 14283 // bCC copy1MBB 14284 // fallthrough --> copy0MBB 14285 MachineBasicBlock *thisMBB = BB; 14286 MachineFunction *F = BB->getParent(); 14287 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 14288 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 14289 F->insert(It, copy0MBB); 14290 F->insert(It, sinkMBB); 14291 14292 // If the EFLAGS register isn't dead in the terminator, then claim that it's 14293 // live into the sink and copy blocks. 14294 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 14295 if (!MI->killsRegister(X86::EFLAGS) && 14296 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) { 14297 copy0MBB->addLiveIn(X86::EFLAGS); 14298 sinkMBB->addLiveIn(X86::EFLAGS); 14299 } 14300 14301 // Transfer the remainder of BB and its successor edges to sinkMBB. 14302 sinkMBB->splice(sinkMBB->begin(), BB, 14303 llvm::next(MachineBasicBlock::iterator(MI)), 14304 BB->end()); 14305 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 14306 14307 // Add the true and fallthrough blocks as its successors. 14308 BB->addSuccessor(copy0MBB); 14309 BB->addSuccessor(sinkMBB); 14310 14311 // Create the conditional branch instruction. 14312 unsigned Opc = 14313 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 14314 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 14315 14316 // copy0MBB: 14317 // %FalseValue = ... 14318 // # fallthrough to sinkMBB 14319 copy0MBB->addSuccessor(sinkMBB); 14320 14321 // sinkMBB: 14322 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 14323 // ... 14324 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14325 TII->get(X86::PHI), MI->getOperand(0).getReg()) 14326 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 14327 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 14328 14329 MI->eraseFromParent(); // The pseudo instruction is gone now. 14330 return sinkMBB; 14331} 14332 14333MachineBasicBlock * 14334X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 14335 bool Is64Bit) const { 14336 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14337 DebugLoc DL = MI->getDebugLoc(); 14338 MachineFunction *MF = BB->getParent(); 14339 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 14340 14341 assert(getTargetMachine().Options.EnableSegmentedStacks); 14342 14343 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 14344 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 14345 14346 // BB: 14347 // ... [Till the alloca] 14348 // If stacklet is not large enough, jump to mallocMBB 14349 // 14350 // bumpMBB: 14351 // Allocate by subtracting from RSP 14352 // Jump to continueMBB 14353 // 14354 // mallocMBB: 14355 // Allocate by call to runtime 14356 // 14357 // continueMBB: 14358 // ... 14359 // [rest of original BB] 14360 // 14361 14362 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14363 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14364 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14365 14366 MachineRegisterInfo &MRI = MF->getRegInfo(); 14367 const TargetRegisterClass *AddrRegClass = 14368 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 14369 14370 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 14371 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 14372 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 14373 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 14374 sizeVReg = MI->getOperand(1).getReg(), 14375 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 14376 14377 MachineFunction::iterator MBBIter = BB; 14378 ++MBBIter; 14379 14380 MF->insert(MBBIter, bumpMBB); 14381 MF->insert(MBBIter, mallocMBB); 14382 MF->insert(MBBIter, continueMBB); 14383 14384 continueMBB->splice(continueMBB->begin(), BB, llvm::next 14385 (MachineBasicBlock::iterator(MI)), BB->end()); 14386 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 14387 14388 // Add code to the main basic block to check if the stack limit has been hit, 14389 // and if so, jump to mallocMBB otherwise to bumpMBB. 14390 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 14391 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 14392 .addReg(tmpSPVReg).addReg(sizeVReg); 14393 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 14394 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 14395 .addReg(SPLimitVReg); 14396 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 14397 14398 // bumpMBB simply decreases the stack pointer, since we know the current 14399 // stacklet has enough space. 14400 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 14401 .addReg(SPLimitVReg); 14402 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 14403 .addReg(SPLimitVReg); 14404 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 14405 14406 // Calls into a routine in libgcc to allocate more space from the heap. 14407 const uint32_t *RegMask = 14408 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 14409 if (Is64Bit) { 14410 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 14411 .addReg(sizeVReg); 14412 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 14413 .addExternalSymbol("__morestack_allocate_stack_space") 14414 .addRegMask(RegMask) 14415 .addReg(X86::RDI, RegState::Implicit) 14416 .addReg(X86::RAX, RegState::ImplicitDefine); 14417 } else { 14418 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 14419 .addImm(12); 14420 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 14421 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 14422 .addExternalSymbol("__morestack_allocate_stack_space") 14423 .addRegMask(RegMask) 14424 .addReg(X86::EAX, RegState::ImplicitDefine); 14425 } 14426 14427 if (!Is64Bit) 14428 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 14429 .addImm(16); 14430 14431 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 14432 .addReg(Is64Bit ? X86::RAX : X86::EAX); 14433 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 14434 14435 // Set up the CFG correctly. 14436 BB->addSuccessor(bumpMBB); 14437 BB->addSuccessor(mallocMBB); 14438 mallocMBB->addSuccessor(continueMBB); 14439 bumpMBB->addSuccessor(continueMBB); 14440 14441 // Take care of the PHI nodes. 14442 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 14443 MI->getOperand(0).getReg()) 14444 .addReg(mallocPtrVReg).addMBB(mallocMBB) 14445 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 14446 14447 // Delete the original pseudo instruction. 14448 MI->eraseFromParent(); 14449 14450 // And we're done. 14451 return continueMBB; 14452} 14453 14454MachineBasicBlock * 14455X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 14456 MachineBasicBlock *BB) const { 14457 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14458 DebugLoc DL = MI->getDebugLoc(); 14459 14460 assert(!Subtarget->isTargetEnvMacho()); 14461 14462 // The lowering is pretty easy: we're just emitting the call to _alloca. The 14463 // non-trivial part is impdef of ESP. 14464 14465 if (Subtarget->isTargetWin64()) { 14466 if (Subtarget->isTargetCygMing()) { 14467 // ___chkstk(Mingw64): 14468 // Clobbers R10, R11, RAX and EFLAGS. 14469 // Updates RSP. 14470 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 14471 .addExternalSymbol("___chkstk") 14472 .addReg(X86::RAX, RegState::Implicit) 14473 .addReg(X86::RSP, RegState::Implicit) 14474 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 14475 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 14476 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 14477 } else { 14478 // __chkstk(MSVCRT): does not update stack pointer. 14479 // Clobbers R10, R11 and EFLAGS. 14480 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 14481 .addExternalSymbol("__chkstk") 14482 .addReg(X86::RAX, RegState::Implicit) 14483 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 14484 // RAX has the offset to be subtracted from RSP. 14485 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 14486 .addReg(X86::RSP) 14487 .addReg(X86::RAX); 14488 } 14489 } else { 14490 const char *StackProbeSymbol = 14491 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 14492 14493 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 14494 .addExternalSymbol(StackProbeSymbol) 14495 .addReg(X86::EAX, RegState::Implicit) 14496 .addReg(X86::ESP, RegState::Implicit) 14497 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 14498 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 14499 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 14500 } 14501 14502 MI->eraseFromParent(); // The pseudo instruction is gone now. 14503 return BB; 14504} 14505 14506MachineBasicBlock * 14507X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 14508 MachineBasicBlock *BB) const { 14509 // This is pretty easy. We're taking the value that we received from 14510 // our load from the relocation, sticking it in either RDI (x86-64) 14511 // or EAX and doing an indirect call. The return value will then 14512 // be in the normal return register. 14513 const X86InstrInfo *TII 14514 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 14515 DebugLoc DL = MI->getDebugLoc(); 14516 MachineFunction *F = BB->getParent(); 14517 14518 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 14519 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 14520 14521 // Get a register mask for the lowered call. 14522 // FIXME: The 32-bit calls have non-standard calling conventions. Use a 14523 // proper register mask. 14524 const uint32_t *RegMask = 14525 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 14526 if (Subtarget->is64Bit()) { 14527 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 14528 TII->get(X86::MOV64rm), X86::RDI) 14529 .addReg(X86::RIP) 14530 .addImm(0).addReg(0) 14531 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 14532 MI->getOperand(3).getTargetFlags()) 14533 .addReg(0); 14534 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 14535 addDirectMem(MIB, X86::RDI); 14536 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); 14537 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 14538 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 14539 TII->get(X86::MOV32rm), X86::EAX) 14540 .addReg(0) 14541 .addImm(0).addReg(0) 14542 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 14543 MI->getOperand(3).getTargetFlags()) 14544 .addReg(0); 14545 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 14546 addDirectMem(MIB, X86::EAX); 14547 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 14548 } else { 14549 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 14550 TII->get(X86::MOV32rm), X86::EAX) 14551 .addReg(TII->getGlobalBaseReg(F)) 14552 .addImm(0).addReg(0) 14553 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 14554 MI->getOperand(3).getTargetFlags()) 14555 .addReg(0); 14556 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 14557 addDirectMem(MIB, X86::EAX); 14558 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 14559 } 14560 14561 MI->eraseFromParent(); // The pseudo instruction is gone now. 14562 return BB; 14563} 14564 14565MachineBasicBlock * 14566X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 14567 MachineBasicBlock *MBB) const { 14568 DebugLoc DL = MI->getDebugLoc(); 14569 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14570 14571 MachineFunction *MF = MBB->getParent(); 14572 MachineRegisterInfo &MRI = MF->getRegInfo(); 14573 14574 const BasicBlock *BB = MBB->getBasicBlock(); 14575 MachineFunction::iterator I = MBB; 14576 ++I; 14577 14578 // Memory Reference 14579 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 14580 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 14581 14582 unsigned DstReg; 14583 unsigned MemOpndSlot = 0; 14584 14585 unsigned CurOp = 0; 14586 14587 DstReg = MI->getOperand(CurOp++).getReg(); 14588 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 14589 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 14590 unsigned mainDstReg = MRI.createVirtualRegister(RC); 14591 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 14592 14593 MemOpndSlot = CurOp; 14594 14595 MVT PVT = getPointerTy(); 14596 assert((PVT == MVT::i64 || PVT == MVT::i32) && 14597 "Invalid Pointer Size!"); 14598 14599 // For v = setjmp(buf), we generate 14600 // 14601 // thisMBB: 14602 // buf[LabelOffset] = restoreMBB 14603 // SjLjSetup restoreMBB 14604 // 14605 // mainMBB: 14606 // v_main = 0 14607 // 14608 // sinkMBB: 14609 // v = phi(main, restore) 14610 // 14611 // restoreMBB: 14612 // v_restore = 1 14613 14614 MachineBasicBlock *thisMBB = MBB; 14615 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 14616 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 14617 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); 14618 MF->insert(I, mainMBB); 14619 MF->insert(I, sinkMBB); 14620 MF->push_back(restoreMBB); 14621 14622 MachineInstrBuilder MIB; 14623 14624 // Transfer the remainder of BB and its successor edges to sinkMBB. 14625 sinkMBB->splice(sinkMBB->begin(), MBB, 14626 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 14627 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 14628 14629 // thisMBB: 14630 unsigned PtrStoreOpc = 0; 14631 unsigned LabelReg = 0; 14632 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 14633 Reloc::Model RM = getTargetMachine().getRelocationModel(); 14634 bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) && 14635 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC); 14636 14637 // Prepare IP either in reg or imm. 14638 if (!UseImmLabel) { 14639 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; 14640 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 14641 LabelReg = MRI.createVirtualRegister(PtrRC); 14642 if (Subtarget->is64Bit()) { 14643 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg) 14644 .addReg(X86::RIP) 14645 .addImm(0) 14646 .addReg(0) 14647 .addMBB(restoreMBB) 14648 .addReg(0); 14649 } else { 14650 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII); 14651 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg) 14652 .addReg(XII->getGlobalBaseReg(MF)) 14653 .addImm(0) 14654 .addReg(0) 14655 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference()) 14656 .addReg(0); 14657 } 14658 } else 14659 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; 14660 // Store IP 14661 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc)); 14662 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14663 if (i == X86::AddrDisp) 14664 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset); 14665 else 14666 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 14667 } 14668 if (!UseImmLabel) 14669 MIB.addReg(LabelReg); 14670 else 14671 MIB.addMBB(restoreMBB); 14672 MIB.setMemRefs(MMOBegin, MMOEnd); 14673 // Setup 14674 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) 14675 .addMBB(restoreMBB); 14676 14677 const X86RegisterInfo *RegInfo = 14678 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 14679 MIB.addRegMask(RegInfo->getNoPreservedMask()); 14680 thisMBB->addSuccessor(mainMBB); 14681 thisMBB->addSuccessor(restoreMBB); 14682 14683 // mainMBB: 14684 // EAX = 0 14685 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg); 14686 mainMBB->addSuccessor(sinkMBB); 14687 14688 // sinkMBB: 14689 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14690 TII->get(X86::PHI), DstReg) 14691 .addReg(mainDstReg).addMBB(mainMBB) 14692 .addReg(restoreDstReg).addMBB(restoreMBB); 14693 14694 // restoreMBB: 14695 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); 14696 BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB); 14697 restoreMBB->addSuccessor(sinkMBB); 14698 14699 MI->eraseFromParent(); 14700 return sinkMBB; 14701} 14702 14703MachineBasicBlock * 14704X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 14705 MachineBasicBlock *MBB) const { 14706 DebugLoc DL = MI->getDebugLoc(); 14707 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14708 14709 MachineFunction *MF = MBB->getParent(); 14710 MachineRegisterInfo &MRI = MF->getRegInfo(); 14711 14712 // Memory Reference 14713 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 14714 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 14715 14716 MVT PVT = getPointerTy(); 14717 assert((PVT == MVT::i64 || PVT == MVT::i32) && 14718 "Invalid Pointer Size!"); 14719 14720 const TargetRegisterClass *RC = 14721 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; 14722 unsigned Tmp = MRI.createVirtualRegister(RC); 14723 // Since FP is only updated here but NOT referenced, it's treated as GPR. 14724 const X86RegisterInfo *RegInfo = 14725 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 14726 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; 14727 unsigned SP = RegInfo->getStackRegister(); 14728 14729 MachineInstrBuilder MIB; 14730 14731 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 14732 const int64_t SPOffset = 2 * PVT.getStoreSize(); 14733 14734 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; 14735 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r; 14736 14737 // Reload FP 14738 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); 14739 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 14740 MIB.addOperand(MI->getOperand(i)); 14741 MIB.setMemRefs(MMOBegin, MMOEnd); 14742 // Reload IP 14743 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); 14744 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14745 if (i == X86::AddrDisp) 14746 MIB.addDisp(MI->getOperand(i), LabelOffset); 14747 else 14748 MIB.addOperand(MI->getOperand(i)); 14749 } 14750 MIB.setMemRefs(MMOBegin, MMOEnd); 14751 // Reload SP 14752 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP); 14753 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14754 if (i == X86::AddrDisp) 14755 MIB.addDisp(MI->getOperand(i), SPOffset); 14756 else 14757 MIB.addOperand(MI->getOperand(i)); 14758 } 14759 MIB.setMemRefs(MMOBegin, MMOEnd); 14760 // Jump 14761 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); 14762 14763 MI->eraseFromParent(); 14764 return MBB; 14765} 14766 14767MachineBasicBlock * 14768X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 14769 MachineBasicBlock *BB) const { 14770 switch (MI->getOpcode()) { 14771 default: llvm_unreachable("Unexpected instr type to insert"); 14772 case X86::TAILJMPd64: 14773 case X86::TAILJMPr64: 14774 case X86::TAILJMPm64: 14775 llvm_unreachable("TAILJMP64 would not be touched here."); 14776 case X86::TCRETURNdi64: 14777 case X86::TCRETURNri64: 14778 case X86::TCRETURNmi64: 14779 return BB; 14780 case X86::WIN_ALLOCA: 14781 return EmitLoweredWinAlloca(MI, BB); 14782 case X86::SEG_ALLOCA_32: 14783 return EmitLoweredSegAlloca(MI, BB, false); 14784 case X86::SEG_ALLOCA_64: 14785 return EmitLoweredSegAlloca(MI, BB, true); 14786 case X86::TLSCall_32: 14787 case X86::TLSCall_64: 14788 return EmitLoweredTLSCall(MI, BB); 14789 case X86::CMOV_GR8: 14790 case X86::CMOV_FR32: 14791 case X86::CMOV_FR64: 14792 case X86::CMOV_V4F32: 14793 case X86::CMOV_V2F64: 14794 case X86::CMOV_V2I64: 14795 case X86::CMOV_V8F32: 14796 case X86::CMOV_V4F64: 14797 case X86::CMOV_V4I64: 14798 case X86::CMOV_GR16: 14799 case X86::CMOV_GR32: 14800 case X86::CMOV_RFP32: 14801 case X86::CMOV_RFP64: 14802 case X86::CMOV_RFP80: 14803 return EmitLoweredSelect(MI, BB); 14804 14805 case X86::FP32_TO_INT16_IN_MEM: 14806 case X86::FP32_TO_INT32_IN_MEM: 14807 case X86::FP32_TO_INT64_IN_MEM: 14808 case X86::FP64_TO_INT16_IN_MEM: 14809 case X86::FP64_TO_INT32_IN_MEM: 14810 case X86::FP64_TO_INT64_IN_MEM: 14811 case X86::FP80_TO_INT16_IN_MEM: 14812 case X86::FP80_TO_INT32_IN_MEM: 14813 case X86::FP80_TO_INT64_IN_MEM: { 14814 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14815 DebugLoc DL = MI->getDebugLoc(); 14816 14817 // Change the floating point control register to use "round towards zero" 14818 // mode when truncating to an integer value. 14819 MachineFunction *F = BB->getParent(); 14820 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 14821 addFrameReference(BuildMI(*BB, MI, DL, 14822 TII->get(X86::FNSTCW16m)), CWFrameIdx); 14823 14824 // Load the old value of the high byte of the control word... 14825 unsigned OldCW = 14826 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); 14827 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 14828 CWFrameIdx); 14829 14830 // Set the high part to be round to zero... 14831 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 14832 .addImm(0xC7F); 14833 14834 // Reload the modified control word now... 14835 addFrameReference(BuildMI(*BB, MI, DL, 14836 TII->get(X86::FLDCW16m)), CWFrameIdx); 14837 14838 // Restore the memory image of control word to original value 14839 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 14840 .addReg(OldCW); 14841 14842 // Get the X86 opcode to use. 14843 unsigned Opc; 14844 switch (MI->getOpcode()) { 14845 default: llvm_unreachable("illegal opcode!"); 14846 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 14847 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 14848 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 14849 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 14850 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 14851 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 14852 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 14853 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 14854 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 14855 } 14856 14857 X86AddressMode AM; 14858 MachineOperand &Op = MI->getOperand(0); 14859 if (Op.isReg()) { 14860 AM.BaseType = X86AddressMode::RegBase; 14861 AM.Base.Reg = Op.getReg(); 14862 } else { 14863 AM.BaseType = X86AddressMode::FrameIndexBase; 14864 AM.Base.FrameIndex = Op.getIndex(); 14865 } 14866 Op = MI->getOperand(1); 14867 if (Op.isImm()) 14868 AM.Scale = Op.getImm(); 14869 Op = MI->getOperand(2); 14870 if (Op.isImm()) 14871 AM.IndexReg = Op.getImm(); 14872 Op = MI->getOperand(3); 14873 if (Op.isGlobal()) { 14874 AM.GV = Op.getGlobal(); 14875 } else { 14876 AM.Disp = Op.getImm(); 14877 } 14878 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 14879 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 14880 14881 // Reload the original control word now. 14882 addFrameReference(BuildMI(*BB, MI, DL, 14883 TII->get(X86::FLDCW16m)), CWFrameIdx); 14884 14885 MI->eraseFromParent(); // The pseudo instruction is gone now. 14886 return BB; 14887 } 14888 // String/text processing lowering. 14889 case X86::PCMPISTRM128REG: 14890 case X86::VPCMPISTRM128REG: 14891 case X86::PCMPISTRM128MEM: 14892 case X86::VPCMPISTRM128MEM: 14893 case X86::PCMPESTRM128REG: 14894 case X86::VPCMPESTRM128REG: 14895 case X86::PCMPESTRM128MEM: 14896 case X86::VPCMPESTRM128MEM: 14897 assert(Subtarget->hasSSE42() && 14898 "Target must have SSE4.2 or AVX features enabled"); 14899 return EmitPCMPSTRM(MI, BB, getTargetMachine().getInstrInfo()); 14900 14901 // String/text processing lowering. 14902 case X86::PCMPISTRIREG: 14903 case X86::VPCMPISTRIREG: 14904 case X86::PCMPISTRIMEM: 14905 case X86::VPCMPISTRIMEM: 14906 case X86::PCMPESTRIREG: 14907 case X86::VPCMPESTRIREG: 14908 case X86::PCMPESTRIMEM: 14909 case X86::VPCMPESTRIMEM: 14910 assert(Subtarget->hasSSE42() && 14911 "Target must have SSE4.2 or AVX features enabled"); 14912 return EmitPCMPSTRI(MI, BB, getTargetMachine().getInstrInfo()); 14913 14914 // Thread synchronization. 14915 case X86::MONITOR: 14916 return EmitMonitor(MI, BB, getTargetMachine().getInstrInfo(), Subtarget); 14917 14918 // xbegin 14919 case X86::XBEGIN: 14920 return EmitXBegin(MI, BB, getTargetMachine().getInstrInfo()); 14921 14922 // Atomic Lowering. 14923 case X86::ATOMAND8: 14924 case X86::ATOMAND16: 14925 case X86::ATOMAND32: 14926 case X86::ATOMAND64: 14927 // Fall through 14928 case X86::ATOMOR8: 14929 case X86::ATOMOR16: 14930 case X86::ATOMOR32: 14931 case X86::ATOMOR64: 14932 // Fall through 14933 case X86::ATOMXOR16: 14934 case X86::ATOMXOR8: 14935 case X86::ATOMXOR32: 14936 case X86::ATOMXOR64: 14937 // Fall through 14938 case X86::ATOMNAND8: 14939 case X86::ATOMNAND16: 14940 case X86::ATOMNAND32: 14941 case X86::ATOMNAND64: 14942 // Fall through 14943 case X86::ATOMMAX8: 14944 case X86::ATOMMAX16: 14945 case X86::ATOMMAX32: 14946 case X86::ATOMMAX64: 14947 // Fall through 14948 case X86::ATOMMIN8: 14949 case X86::ATOMMIN16: 14950 case X86::ATOMMIN32: 14951 case X86::ATOMMIN64: 14952 // Fall through 14953 case X86::ATOMUMAX8: 14954 case X86::ATOMUMAX16: 14955 case X86::ATOMUMAX32: 14956 case X86::ATOMUMAX64: 14957 // Fall through 14958 case X86::ATOMUMIN8: 14959 case X86::ATOMUMIN16: 14960 case X86::ATOMUMIN32: 14961 case X86::ATOMUMIN64: 14962 return EmitAtomicLoadArith(MI, BB); 14963 14964 // This group does 64-bit operations on a 32-bit host. 14965 case X86::ATOMAND6432: 14966 case X86::ATOMOR6432: 14967 case X86::ATOMXOR6432: 14968 case X86::ATOMNAND6432: 14969 case X86::ATOMADD6432: 14970 case X86::ATOMSUB6432: 14971 case X86::ATOMMAX6432: 14972 case X86::ATOMMIN6432: 14973 case X86::ATOMUMAX6432: 14974 case X86::ATOMUMIN6432: 14975 case X86::ATOMSWAP6432: 14976 return EmitAtomicLoadArith6432(MI, BB); 14977 14978 case X86::VASTART_SAVE_XMM_REGS: 14979 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 14980 14981 case X86::VAARG_64: 14982 return EmitVAARG64WithCustomInserter(MI, BB); 14983 14984 case X86::EH_SjLj_SetJmp32: 14985 case X86::EH_SjLj_SetJmp64: 14986 return emitEHSjLjSetJmp(MI, BB); 14987 14988 case X86::EH_SjLj_LongJmp32: 14989 case X86::EH_SjLj_LongJmp64: 14990 return emitEHSjLjLongJmp(MI, BB); 14991 } 14992} 14993 14994//===----------------------------------------------------------------------===// 14995// X86 Optimization Hooks 14996//===----------------------------------------------------------------------===// 14997 14998void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 14999 APInt &KnownZero, 15000 APInt &KnownOne, 15001 const SelectionDAG &DAG, 15002 unsigned Depth) const { 15003 unsigned BitWidth = KnownZero.getBitWidth(); 15004 unsigned Opc = Op.getOpcode(); 15005 assert((Opc >= ISD::BUILTIN_OP_END || 15006 Opc == ISD::INTRINSIC_WO_CHAIN || 15007 Opc == ISD::INTRINSIC_W_CHAIN || 15008 Opc == ISD::INTRINSIC_VOID) && 15009 "Should use MaskedValueIsZero if you don't know whether Op" 15010 " is a target node!"); 15011 15012 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 15013 switch (Opc) { 15014 default: break; 15015 case X86ISD::ADD: 15016 case X86ISD::SUB: 15017 case X86ISD::ADC: 15018 case X86ISD::SBB: 15019 case X86ISD::SMUL: 15020 case X86ISD::UMUL: 15021 case X86ISD::INC: 15022 case X86ISD::DEC: 15023 case X86ISD::OR: 15024 case X86ISD::XOR: 15025 case X86ISD::AND: 15026 // These nodes' second result is a boolean. 15027 if (Op.getResNo() == 0) 15028 break; 15029 // Fallthrough 15030 case X86ISD::SETCC: 15031 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 15032 break; 15033 case ISD::INTRINSIC_WO_CHAIN: { 15034 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 15035 unsigned NumLoBits = 0; 15036 switch (IntId) { 15037 default: break; 15038 case Intrinsic::x86_sse_movmsk_ps: 15039 case Intrinsic::x86_avx_movmsk_ps_256: 15040 case Intrinsic::x86_sse2_movmsk_pd: 15041 case Intrinsic::x86_avx_movmsk_pd_256: 15042 case Intrinsic::x86_mmx_pmovmskb: 15043 case Intrinsic::x86_sse2_pmovmskb_128: 15044 case Intrinsic::x86_avx2_pmovmskb: { 15045 // High bits of movmskp{s|d}, pmovmskb are known zero. 15046 switch (IntId) { 15047 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 15048 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 15049 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 15050 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 15051 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 15052 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 15053 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 15054 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 15055 } 15056 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits); 15057 break; 15058 } 15059 } 15060 break; 15061 } 15062 } 15063} 15064 15065unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 15066 unsigned Depth) const { 15067 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 15068 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 15069 return Op.getValueType().getScalarType().getSizeInBits(); 15070 15071 // Fallback case. 15072 return 1; 15073} 15074 15075/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 15076/// node is a GlobalAddress + offset. 15077bool X86TargetLowering::isGAPlusOffset(SDNode *N, 15078 const GlobalValue* &GA, 15079 int64_t &Offset) const { 15080 if (N->getOpcode() == X86ISD::Wrapper) { 15081 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 15082 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 15083 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 15084 return true; 15085 } 15086 } 15087 return TargetLowering::isGAPlusOffset(N, GA, Offset); 15088} 15089 15090/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 15091/// same as extracting the high 128-bit part of 256-bit vector and then 15092/// inserting the result into the low part of a new 256-bit vector 15093static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 15094 EVT VT = SVOp->getValueType(0); 15095 unsigned NumElems = VT.getVectorNumElements(); 15096 15097 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 15098 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) 15099 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 15100 SVOp->getMaskElt(j) >= 0) 15101 return false; 15102 15103 return true; 15104} 15105 15106/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 15107/// same as extracting the low 128-bit part of 256-bit vector and then 15108/// inserting the result into the high part of a new 256-bit vector 15109static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 15110 EVT VT = SVOp->getValueType(0); 15111 unsigned NumElems = VT.getVectorNumElements(); 15112 15113 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 15114 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) 15115 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 15116 SVOp->getMaskElt(j) >= 0) 15117 return false; 15118 15119 return true; 15120} 15121 15122/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 15123static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 15124 TargetLowering::DAGCombinerInfo &DCI, 15125 const X86Subtarget* Subtarget) { 15126 SDLoc dl(N); 15127 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 15128 SDValue V1 = SVOp->getOperand(0); 15129 SDValue V2 = SVOp->getOperand(1); 15130 EVT VT = SVOp->getValueType(0); 15131 unsigned NumElems = VT.getVectorNumElements(); 15132 15133 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 15134 V2.getOpcode() == ISD::CONCAT_VECTORS) { 15135 // 15136 // 0,0,0,... 15137 // | 15138 // V UNDEF BUILD_VECTOR UNDEF 15139 // \ / \ / 15140 // CONCAT_VECTOR CONCAT_VECTOR 15141 // \ / 15142 // \ / 15143 // RESULT: V + zero extended 15144 // 15145 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 15146 V2.getOperand(1).getOpcode() != ISD::UNDEF || 15147 V1.getOperand(1).getOpcode() != ISD::UNDEF) 15148 return SDValue(); 15149 15150 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 15151 return SDValue(); 15152 15153 // To match the shuffle mask, the first half of the mask should 15154 // be exactly the first vector, and all the rest a splat with the 15155 // first element of the second one. 15156 for (unsigned i = 0; i != NumElems/2; ++i) 15157 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 15158 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 15159 return SDValue(); 15160 15161 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 15162 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 15163 if (Ld->hasNUsesOfValue(1, 0)) { 15164 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 15165 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 15166 SDValue ResNode = 15167 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 15168 array_lengthof(Ops), 15169 Ld->getMemoryVT(), 15170 Ld->getPointerInfo(), 15171 Ld->getAlignment(), 15172 false/*isVolatile*/, true/*ReadMem*/, 15173 false/*WriteMem*/); 15174 15175 // Make sure the newly-created LOAD is in the same position as Ld in 15176 // terms of dependency. We create a TokenFactor for Ld and ResNode, 15177 // and update uses of Ld's output chain to use the TokenFactor. 15178 if (Ld->hasAnyUseOfValue(1)) { 15179 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 15180 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1)); 15181 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain); 15182 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1), 15183 SDValue(ResNode.getNode(), 1)); 15184 } 15185 15186 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 15187 } 15188 } 15189 15190 // Emit a zeroed vector and insert the desired subvector on its 15191 // first half. 15192 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 15193 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); 15194 return DCI.CombineTo(N, InsV); 15195 } 15196 15197 //===--------------------------------------------------------------------===// 15198 // Combine some shuffles into subvector extracts and inserts: 15199 // 15200 15201 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 15202 if (isShuffleHigh128VectorInsertLow(SVOp)) { 15203 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); 15204 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); 15205 return DCI.CombineTo(N, InsV); 15206 } 15207 15208 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 15209 if (isShuffleLow128VectorInsertHigh(SVOp)) { 15210 SDValue V = Extract128BitVector(V1, 0, DAG, dl); 15211 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); 15212 return DCI.CombineTo(N, InsV); 15213 } 15214 15215 return SDValue(); 15216} 15217 15218/// PerformShuffleCombine - Performs several different shuffle combines. 15219static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 15220 TargetLowering::DAGCombinerInfo &DCI, 15221 const X86Subtarget *Subtarget) { 15222 SDLoc dl(N); 15223 EVT VT = N->getValueType(0); 15224 15225 // Don't create instructions with illegal types after legalize types has run. 15226 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15227 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 15228 return SDValue(); 15229 15230 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 15231 if (Subtarget->hasFp256() && VT.is256BitVector() && 15232 N->getOpcode() == ISD::VECTOR_SHUFFLE) 15233 return PerformShuffleCombine256(N, DAG, DCI, Subtarget); 15234 15235 // Only handle 128 wide vector from here on. 15236 if (!VT.is128BitVector()) 15237 return SDValue(); 15238 15239 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 15240 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 15241 // consecutive, non-overlapping, and in the right order. 15242 SmallVector<SDValue, 16> Elts; 15243 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 15244 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 15245 15246 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 15247} 15248 15249/// PerformTruncateCombine - Converts truncate operation to 15250/// a sequence of vector shuffle operations. 15251/// It is possible when we truncate 256-bit vector to 128-bit vector 15252static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 15253 TargetLowering::DAGCombinerInfo &DCI, 15254 const X86Subtarget *Subtarget) { 15255 return SDValue(); 15256} 15257 15258/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target 15259/// specific shuffle of a load can be folded into a single element load. 15260/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but 15261/// shuffles have been customed lowered so we need to handle those here. 15262static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, 15263 TargetLowering::DAGCombinerInfo &DCI) { 15264 if (DCI.isBeforeLegalizeOps()) 15265 return SDValue(); 15266 15267 SDValue InVec = N->getOperand(0); 15268 SDValue EltNo = N->getOperand(1); 15269 15270 if (!isa<ConstantSDNode>(EltNo)) 15271 return SDValue(); 15272 15273 EVT VT = InVec.getValueType(); 15274 15275 bool HasShuffleIntoBitcast = false; 15276 if (InVec.getOpcode() == ISD::BITCAST) { 15277 // Don't duplicate a load with other uses. 15278 if (!InVec.hasOneUse()) 15279 return SDValue(); 15280 EVT BCVT = InVec.getOperand(0).getValueType(); 15281 if (BCVT.getVectorNumElements() != VT.getVectorNumElements()) 15282 return SDValue(); 15283 InVec = InVec.getOperand(0); 15284 HasShuffleIntoBitcast = true; 15285 } 15286 15287 if (!isTargetShuffle(InVec.getOpcode())) 15288 return SDValue(); 15289 15290 // Don't duplicate a load with other uses. 15291 if (!InVec.hasOneUse()) 15292 return SDValue(); 15293 15294 SmallVector<int, 16> ShuffleMask; 15295 bool UnaryShuffle; 15296 if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, 15297 UnaryShuffle)) 15298 return SDValue(); 15299 15300 // Select the input vector, guarding against out of range extract vector. 15301 unsigned NumElems = VT.getVectorNumElements(); 15302 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 15303 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt]; 15304 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0) 15305 : InVec.getOperand(1); 15306 15307 // If inputs to shuffle are the same for both ops, then allow 2 uses 15308 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1; 15309 15310 if (LdNode.getOpcode() == ISD::BITCAST) { 15311 // Don't duplicate a load with other uses. 15312 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) 15313 return SDValue(); 15314 15315 AllowedUses = 1; // only allow 1 load use if we have a bitcast 15316 LdNode = LdNode.getOperand(0); 15317 } 15318 15319 if (!ISD::isNormalLoad(LdNode.getNode())) 15320 return SDValue(); 15321 15322 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); 15323 15324 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) 15325 return SDValue(); 15326 15327 if (HasShuffleIntoBitcast) { 15328 // If there's a bitcast before the shuffle, check if the load type and 15329 // alignment is valid. 15330 unsigned Align = LN0->getAlignment(); 15331 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15332 unsigned NewAlign = TLI.getDataLayout()-> 15333 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 15334 15335 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 15336 return SDValue(); 15337 } 15338 15339 // All checks match so transform back to vector_shuffle so that DAG combiner 15340 // can finish the job 15341 SDLoc dl(N); 15342 15343 // Create shuffle node taking into account the case that its a unary shuffle 15344 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); 15345 Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl, 15346 InVec.getOperand(0), Shuffle, 15347 &ShuffleMask[0]); 15348 Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 15349 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, 15350 EltNo); 15351} 15352 15353/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 15354/// generation and convert it from being a bunch of shuffles and extracts 15355/// to a simple store and scalar loads to extract the elements. 15356static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 15357 TargetLowering::DAGCombinerInfo &DCI) { 15358 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI); 15359 if (NewOp.getNode()) 15360 return NewOp; 15361 15362 SDValue InputVector = N->getOperand(0); 15363 // Detect whether we are trying to convert from mmx to i32 and the bitcast 15364 // from mmx to v2i32 has a single usage. 15365 if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST && 15366 InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx && 15367 InputVector.hasOneUse() && N->getValueType(0) == MVT::i32) 15368 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector), 15369 N->getValueType(0), 15370 InputVector.getNode()->getOperand(0)); 15371 15372 // Only operate on vectors of 4 elements, where the alternative shuffling 15373 // gets to be more expensive. 15374 if (InputVector.getValueType() != MVT::v4i32) 15375 return SDValue(); 15376 15377 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 15378 // single use which is a sign-extend or zero-extend, and all elements are 15379 // used. 15380 SmallVector<SDNode *, 4> Uses; 15381 unsigned ExtractedElements = 0; 15382 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 15383 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 15384 if (UI.getUse().getResNo() != InputVector.getResNo()) 15385 return SDValue(); 15386 15387 SDNode *Extract = *UI; 15388 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 15389 return SDValue(); 15390 15391 if (Extract->getValueType(0) != MVT::i32) 15392 return SDValue(); 15393 if (!Extract->hasOneUse()) 15394 return SDValue(); 15395 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 15396 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 15397 return SDValue(); 15398 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 15399 return SDValue(); 15400 15401 // Record which element was extracted. 15402 ExtractedElements |= 15403 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 15404 15405 Uses.push_back(Extract); 15406 } 15407 15408 // If not all the elements were used, this may not be worthwhile. 15409 if (ExtractedElements != 15) 15410 return SDValue(); 15411 15412 // Ok, we've now decided to do the transformation. 15413 SDLoc dl(InputVector); 15414 15415 // Store the value to a temporary stack slot. 15416 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 15417 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 15418 MachinePointerInfo(), false, false, 0); 15419 15420 // Replace each use (extract) with a load of the appropriate element. 15421 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 15422 UE = Uses.end(); UI != UE; ++UI) { 15423 SDNode *Extract = *UI; 15424 15425 // cOMpute the element's address. 15426 SDValue Idx = Extract->getOperand(1); 15427 unsigned EltSize = 15428 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 15429 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 15430 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15431 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 15432 15433 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 15434 StackPtr, OffsetVal); 15435 15436 // Load the scalar. 15437 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 15438 ScalarAddr, MachinePointerInfo(), 15439 false, false, false, 0); 15440 15441 // Replace the exact with the load. 15442 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 15443 } 15444 15445 // The replacement was made in place; don't return anything. 15446 return SDValue(); 15447} 15448 15449/// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match. 15450static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, 15451 SDValue RHS, SelectionDAG &DAG, 15452 const X86Subtarget *Subtarget) { 15453 if (!VT.isVector()) 15454 return 0; 15455 15456 switch (VT.getSimpleVT().SimpleTy) { 15457 default: return 0; 15458 case MVT::v32i8: 15459 case MVT::v16i16: 15460 case MVT::v8i32: 15461 if (!Subtarget->hasAVX2()) 15462 return 0; 15463 case MVT::v16i8: 15464 case MVT::v8i16: 15465 case MVT::v4i32: 15466 if (!Subtarget->hasSSE2()) 15467 return 0; 15468 } 15469 15470 // SSE2 has only a small subset of the operations. 15471 bool hasUnsigned = Subtarget->hasSSE41() || 15472 (Subtarget->hasSSE2() && VT == MVT::v16i8); 15473 bool hasSigned = Subtarget->hasSSE41() || 15474 (Subtarget->hasSSE2() && VT == MVT::v8i16); 15475 15476 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15477 15478 // Check for x CC y ? x : y. 15479 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 15480 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 15481 switch (CC) { 15482 default: break; 15483 case ISD::SETULT: 15484 case ISD::SETULE: 15485 return hasUnsigned ? X86ISD::UMIN : 0; 15486 case ISD::SETUGT: 15487 case ISD::SETUGE: 15488 return hasUnsigned ? X86ISD::UMAX : 0; 15489 case ISD::SETLT: 15490 case ISD::SETLE: 15491 return hasSigned ? X86ISD::SMIN : 0; 15492 case ISD::SETGT: 15493 case ISD::SETGE: 15494 return hasSigned ? X86ISD::SMAX : 0; 15495 } 15496 // Check for x CC y ? y : x -- a min/max with reversed arms. 15497 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 15498 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 15499 switch (CC) { 15500 default: break; 15501 case ISD::SETULT: 15502 case ISD::SETULE: 15503 return hasUnsigned ? X86ISD::UMAX : 0; 15504 case ISD::SETUGT: 15505 case ISD::SETUGE: 15506 return hasUnsigned ? X86ISD::UMIN : 0; 15507 case ISD::SETLT: 15508 case ISD::SETLE: 15509 return hasSigned ? X86ISD::SMAX : 0; 15510 case ISD::SETGT: 15511 case ISD::SETGE: 15512 return hasSigned ? X86ISD::SMIN : 0; 15513 } 15514 } 15515 15516 return 0; 15517} 15518 15519/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 15520/// nodes. 15521static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 15522 TargetLowering::DAGCombinerInfo &DCI, 15523 const X86Subtarget *Subtarget) { 15524 SDLoc DL(N); 15525 SDValue Cond = N->getOperand(0); 15526 // Get the LHS/RHS of the select. 15527 SDValue LHS = N->getOperand(1); 15528 SDValue RHS = N->getOperand(2); 15529 EVT VT = LHS.getValueType(); 15530 15531 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 15532 // instructions match the semantics of the common C idiom x<y?x:y but not 15533 // x<=y?x:y, because of how they handle negative zero (which can be 15534 // ignored in unsafe-math mode). 15535 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 15536 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 15537 (Subtarget->hasSSE2() || 15538 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 15539 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15540 15541 unsigned Opcode = 0; 15542 // Check for x CC y ? x : y. 15543 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 15544 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 15545 switch (CC) { 15546 default: break; 15547 case ISD::SETULT: 15548 // Converting this to a min would handle NaNs incorrectly, and swapping 15549 // the operands would cause it to handle comparisons between positive 15550 // and negative zero incorrectly. 15551 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 15552 if (!DAG.getTarget().Options.UnsafeFPMath && 15553 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 15554 break; 15555 std::swap(LHS, RHS); 15556 } 15557 Opcode = X86ISD::FMIN; 15558 break; 15559 case ISD::SETOLE: 15560 // Converting this to a min would handle comparisons between positive 15561 // and negative zero incorrectly. 15562 if (!DAG.getTarget().Options.UnsafeFPMath && 15563 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 15564 break; 15565 Opcode = X86ISD::FMIN; 15566 break; 15567 case ISD::SETULE: 15568 // Converting this to a min would handle both negative zeros and NaNs 15569 // incorrectly, but we can swap the operands to fix both. 15570 std::swap(LHS, RHS); 15571 case ISD::SETOLT: 15572 case ISD::SETLT: 15573 case ISD::SETLE: 15574 Opcode = X86ISD::FMIN; 15575 break; 15576 15577 case ISD::SETOGE: 15578 // Converting this to a max would handle comparisons between positive 15579 // and negative zero incorrectly. 15580 if (!DAG.getTarget().Options.UnsafeFPMath && 15581 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 15582 break; 15583 Opcode = X86ISD::FMAX; 15584 break; 15585 case ISD::SETUGT: 15586 // Converting this to a max would handle NaNs incorrectly, and swapping 15587 // the operands would cause it to handle comparisons between positive 15588 // and negative zero incorrectly. 15589 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 15590 if (!DAG.getTarget().Options.UnsafeFPMath && 15591 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 15592 break; 15593 std::swap(LHS, RHS); 15594 } 15595 Opcode = X86ISD::FMAX; 15596 break; 15597 case ISD::SETUGE: 15598 // Converting this to a max would handle both negative zeros and NaNs 15599 // incorrectly, but we can swap the operands to fix both. 15600 std::swap(LHS, RHS); 15601 case ISD::SETOGT: 15602 case ISD::SETGT: 15603 case ISD::SETGE: 15604 Opcode = X86ISD::FMAX; 15605 break; 15606 } 15607 // Check for x CC y ? y : x -- a min/max with reversed arms. 15608 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 15609 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 15610 switch (CC) { 15611 default: break; 15612 case ISD::SETOGE: 15613 // Converting this to a min would handle comparisons between positive 15614 // and negative zero incorrectly, and swapping the operands would 15615 // cause it to handle NaNs incorrectly. 15616 if (!DAG.getTarget().Options.UnsafeFPMath && 15617 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 15618 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 15619 break; 15620 std::swap(LHS, RHS); 15621 } 15622 Opcode = X86ISD::FMIN; 15623 break; 15624 case ISD::SETUGT: 15625 // Converting this to a min would handle NaNs incorrectly. 15626 if (!DAG.getTarget().Options.UnsafeFPMath && 15627 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 15628 break; 15629 Opcode = X86ISD::FMIN; 15630 break; 15631 case ISD::SETUGE: 15632 // Converting this to a min would handle both negative zeros and NaNs 15633 // incorrectly, but we can swap the operands to fix both. 15634 std::swap(LHS, RHS); 15635 case ISD::SETOGT: 15636 case ISD::SETGT: 15637 case ISD::SETGE: 15638 Opcode = X86ISD::FMIN; 15639 break; 15640 15641 case ISD::SETULT: 15642 // Converting this to a max would handle NaNs incorrectly. 15643 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 15644 break; 15645 Opcode = X86ISD::FMAX; 15646 break; 15647 case ISD::SETOLE: 15648 // Converting this to a max would handle comparisons between positive 15649 // and negative zero incorrectly, and swapping the operands would 15650 // cause it to handle NaNs incorrectly. 15651 if (!DAG.getTarget().Options.UnsafeFPMath && 15652 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 15653 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 15654 break; 15655 std::swap(LHS, RHS); 15656 } 15657 Opcode = X86ISD::FMAX; 15658 break; 15659 case ISD::SETULE: 15660 // Converting this to a max would handle both negative zeros and NaNs 15661 // incorrectly, but we can swap the operands to fix both. 15662 std::swap(LHS, RHS); 15663 case ISD::SETOLT: 15664 case ISD::SETLT: 15665 case ISD::SETLE: 15666 Opcode = X86ISD::FMAX; 15667 break; 15668 } 15669 } 15670 15671 if (Opcode) 15672 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 15673 } 15674 15675 // If this is a select between two integer constants, try to do some 15676 // optimizations. 15677 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 15678 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 15679 // Don't do this for crazy integer types. 15680 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 15681 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 15682 // so that TrueC (the true value) is larger than FalseC. 15683 bool NeedsCondInvert = false; 15684 15685 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 15686 // Efficiently invertible. 15687 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 15688 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 15689 isa<ConstantSDNode>(Cond.getOperand(1))))) { 15690 NeedsCondInvert = true; 15691 std::swap(TrueC, FalseC); 15692 } 15693 15694 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 15695 if (FalseC->getAPIntValue() == 0 && 15696 TrueC->getAPIntValue().isPowerOf2()) { 15697 if (NeedsCondInvert) // Invert the condition if needed. 15698 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 15699 DAG.getConstant(1, Cond.getValueType())); 15700 15701 // Zero extend the condition if needed. 15702 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 15703 15704 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 15705 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 15706 DAG.getConstant(ShAmt, MVT::i8)); 15707 } 15708 15709 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 15710 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 15711 if (NeedsCondInvert) // Invert the condition if needed. 15712 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 15713 DAG.getConstant(1, Cond.getValueType())); 15714 15715 // Zero extend the condition if needed. 15716 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 15717 FalseC->getValueType(0), Cond); 15718 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 15719 SDValue(FalseC, 0)); 15720 } 15721 15722 // Optimize cases that will turn into an LEA instruction. This requires 15723 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 15724 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 15725 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 15726 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 15727 15728 bool isFastMultiplier = false; 15729 if (Diff < 10) { 15730 switch ((unsigned char)Diff) { 15731 default: break; 15732 case 1: // result = add base, cond 15733 case 2: // result = lea base( , cond*2) 15734 case 3: // result = lea base(cond, cond*2) 15735 case 4: // result = lea base( , cond*4) 15736 case 5: // result = lea base(cond, cond*4) 15737 case 8: // result = lea base( , cond*8) 15738 case 9: // result = lea base(cond, cond*8) 15739 isFastMultiplier = true; 15740 break; 15741 } 15742 } 15743 15744 if (isFastMultiplier) { 15745 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 15746 if (NeedsCondInvert) // Invert the condition if needed. 15747 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 15748 DAG.getConstant(1, Cond.getValueType())); 15749 15750 // Zero extend the condition if needed. 15751 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 15752 Cond); 15753 // Scale the condition by the difference. 15754 if (Diff != 1) 15755 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 15756 DAG.getConstant(Diff, Cond.getValueType())); 15757 15758 // Add the base if non-zero. 15759 if (FalseC->getAPIntValue() != 0) 15760 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 15761 SDValue(FalseC, 0)); 15762 return Cond; 15763 } 15764 } 15765 } 15766 } 15767 15768 // Canonicalize max and min: 15769 // (x > y) ? x : y -> (x >= y) ? x : y 15770 // (x < y) ? x : y -> (x <= y) ? x : y 15771 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 15772 // the need for an extra compare 15773 // against zero. e.g. 15774 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 15775 // subl %esi, %edi 15776 // testl %edi, %edi 15777 // movl $0, %eax 15778 // cmovgl %edi, %eax 15779 // => 15780 // xorl %eax, %eax 15781 // subl %esi, $edi 15782 // cmovsl %eax, %edi 15783 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 15784 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 15785 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 15786 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15787 switch (CC) { 15788 default: break; 15789 case ISD::SETLT: 15790 case ISD::SETGT: { 15791 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 15792 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), 15793 Cond.getOperand(0), Cond.getOperand(1), NewCC); 15794 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 15795 } 15796 } 15797 } 15798 15799 // Match VSELECTs into subs with unsigned saturation. 15800 if (!DCI.isBeforeLegalize() && 15801 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC && 15802 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors. 15803 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) || 15804 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) { 15805 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15806 15807 // Check if one of the arms of the VSELECT is a zero vector. If it's on the 15808 // left side invert the predicate to simplify logic below. 15809 SDValue Other; 15810 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 15811 Other = RHS; 15812 CC = ISD::getSetCCInverse(CC, true); 15813 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) { 15814 Other = LHS; 15815 } 15816 15817 if (Other.getNode() && Other->getNumOperands() == 2 && 15818 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) { 15819 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1); 15820 SDValue CondRHS = Cond->getOperand(1); 15821 15822 // Look for a general sub with unsigned saturation first. 15823 // x >= y ? x-y : 0 --> subus x, y 15824 // x > y ? x-y : 0 --> subus x, y 15825 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) && 15826 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS)) 15827 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); 15828 15829 // If the RHS is a constant we have to reverse the const canonicalization. 15830 // x > C-1 ? x+-C : 0 --> subus x, C 15831 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD && 15832 isSplatVector(CondRHS.getNode()) && isSplatVector(OpRHS.getNode())) { 15833 APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue(); 15834 if (CondRHS.getConstantOperandVal(0) == -A-1) 15835 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, 15836 DAG.getConstant(-A, VT)); 15837 } 15838 15839 // Another special case: If C was a sign bit, the sub has been 15840 // canonicalized into a xor. 15841 // FIXME: Would it be better to use ComputeMaskedBits to determine whether 15842 // it's safe to decanonicalize the xor? 15843 // x s< 0 ? x^C : 0 --> subus x, C 15844 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR && 15845 ISD::isBuildVectorAllZeros(CondRHS.getNode()) && 15846 isSplatVector(OpRHS.getNode())) { 15847 APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue(); 15848 if (A.isSignBit()) 15849 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); 15850 } 15851 } 15852 } 15853 15854 // Try to match a min/max vector operation. 15855 if (!DCI.isBeforeLegalize() && 15856 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) 15857 if (unsigned Op = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget)) 15858 return DAG.getNode(Op, DL, N->getValueType(0), LHS, RHS); 15859 15860 // Simplify vector selection if the selector will be produced by CMPP*/PCMP*. 15861 if (!DCI.isBeforeLegalize() && N->getOpcode() == ISD::VSELECT && 15862 Cond.getOpcode() == ISD::SETCC) { 15863 15864 assert(Cond.getValueType().isVector() && 15865 "vector select expects a vector selector!"); 15866 15867 EVT IntVT = Cond.getValueType(); 15868 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode()); 15869 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 15870 15871 if (!TValIsAllOnes && !FValIsAllZeros) { 15872 // Try invert the condition if true value is not all 1s and false value 15873 // is not all 0s. 15874 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode()); 15875 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode()); 15876 15877 if (TValIsAllZeros || FValIsAllOnes) { 15878 SDValue CC = Cond.getOperand(2); 15879 ISD::CondCode NewCC = 15880 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 15881 Cond.getOperand(0).getValueType().isInteger()); 15882 Cond = DAG.getSetCC(DL, IntVT, Cond.getOperand(0), Cond.getOperand(1), NewCC); 15883 std::swap(LHS, RHS); 15884 TValIsAllOnes = FValIsAllOnes; 15885 FValIsAllZeros = TValIsAllZeros; 15886 } 15887 } 15888 15889 if (TValIsAllOnes || FValIsAllZeros) { 15890 SDValue Ret; 15891 15892 if (TValIsAllOnes && FValIsAllZeros) 15893 Ret = Cond; 15894 else if (TValIsAllOnes) 15895 Ret = DAG.getNode(ISD::OR, DL, IntVT, Cond, 15896 DAG.getNode(ISD::BITCAST, DL, IntVT, RHS)); 15897 else if (FValIsAllZeros) 15898 Ret = DAG.getNode(ISD::AND, DL, IntVT, Cond, 15899 DAG.getNode(ISD::BITCAST, DL, IntVT, LHS)); 15900 15901 return DAG.getNode(ISD::BITCAST, DL, VT, Ret); 15902 } 15903 } 15904 15905 // If we know that this node is legal then we know that it is going to be 15906 // matched by one of the SSE/AVX BLEND instructions. These instructions only 15907 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 15908 // to simplify previous instructions. 15909 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15910 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 15911 !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { 15912 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 15913 15914 // Don't optimize vector selects that map to mask-registers. 15915 if (BitWidth == 1) 15916 return SDValue(); 15917 15918 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 15919 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 15920 15921 APInt KnownZero, KnownOne; 15922 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 15923 DCI.isBeforeLegalizeOps()); 15924 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 15925 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 15926 DCI.CommitTargetLoweringOpt(TLO); 15927 } 15928 15929 return SDValue(); 15930} 15931 15932// Check whether a boolean test is testing a boolean value generated by 15933// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition 15934// code. 15935// 15936// Simplify the following patterns: 15937// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or 15938// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) 15939// to (Op EFLAGS Cond) 15940// 15941// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or 15942// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) 15943// to (Op EFLAGS !Cond) 15944// 15945// where Op could be BRCOND or CMOV. 15946// 15947static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { 15948 // Quit if not CMP and SUB with its value result used. 15949 if (Cmp.getOpcode() != X86ISD::CMP && 15950 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) 15951 return SDValue(); 15952 15953 // Quit if not used as a boolean value. 15954 if (CC != X86::COND_E && CC != X86::COND_NE) 15955 return SDValue(); 15956 15957 // Check CMP operands. One of them should be 0 or 1 and the other should be 15958 // an SetCC or extended from it. 15959 SDValue Op1 = Cmp.getOperand(0); 15960 SDValue Op2 = Cmp.getOperand(1); 15961 15962 SDValue SetCC; 15963 const ConstantSDNode* C = 0; 15964 bool needOppositeCond = (CC == X86::COND_E); 15965 bool checkAgainstTrue = false; // Is it a comparison against 1? 15966 15967 if ((C = dyn_cast<ConstantSDNode>(Op1))) 15968 SetCC = Op2; 15969 else if ((C = dyn_cast<ConstantSDNode>(Op2))) 15970 SetCC = Op1; 15971 else // Quit if all operands are not constants. 15972 return SDValue(); 15973 15974 if (C->getZExtValue() == 1) { 15975 needOppositeCond = !needOppositeCond; 15976 checkAgainstTrue = true; 15977 } else if (C->getZExtValue() != 0) 15978 // Quit if the constant is neither 0 or 1. 15979 return SDValue(); 15980 15981 bool truncatedToBoolWithAnd = false; 15982 // Skip (zext $x), (trunc $x), or (and $x, 1) node. 15983 while (SetCC.getOpcode() == ISD::ZERO_EXTEND || 15984 SetCC.getOpcode() == ISD::TRUNCATE || 15985 SetCC.getOpcode() == ISD::AND) { 15986 if (SetCC.getOpcode() == ISD::AND) { 15987 int OpIdx = -1; 15988 ConstantSDNode *CS; 15989 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) && 15990 CS->getZExtValue() == 1) 15991 OpIdx = 1; 15992 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) && 15993 CS->getZExtValue() == 1) 15994 OpIdx = 0; 15995 if (OpIdx == -1) 15996 break; 15997 SetCC = SetCC.getOperand(OpIdx); 15998 truncatedToBoolWithAnd = true; 15999 } else 16000 SetCC = SetCC.getOperand(0); 16001 } 16002 16003 switch (SetCC.getOpcode()) { 16004 case X86ISD::SETCC_CARRY: 16005 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to 16006 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1, 16007 // i.e. it's a comparison against true but the result of SETCC_CARRY is not 16008 // truncated to i1 using 'and'. 16009 if (checkAgainstTrue && !truncatedToBoolWithAnd) 16010 break; 16011 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && 16012 "Invalid use of SETCC_CARRY!"); 16013 // FALL THROUGH 16014 case X86ISD::SETCC: 16015 // Set the condition code or opposite one if necessary. 16016 CC = X86::CondCode(SetCC.getConstantOperandVal(0)); 16017 if (needOppositeCond) 16018 CC = X86::GetOppositeBranchCondition(CC); 16019 return SetCC.getOperand(1); 16020 case X86ISD::CMOV: { 16021 // Check whether false/true value has canonical one, i.e. 0 or 1. 16022 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0)); 16023 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1)); 16024 // Quit if true value is not a constant. 16025 if (!TVal) 16026 return SDValue(); 16027 // Quit if false value is not a constant. 16028 if (!FVal) { 16029 SDValue Op = SetCC.getOperand(0); 16030 // Skip 'zext' or 'trunc' node. 16031 if (Op.getOpcode() == ISD::ZERO_EXTEND || 16032 Op.getOpcode() == ISD::TRUNCATE) 16033 Op = Op.getOperand(0); 16034 // A special case for rdrand/rdseed, where 0 is set if false cond is 16035 // found. 16036 if ((Op.getOpcode() != X86ISD::RDRAND && 16037 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0) 16038 return SDValue(); 16039 } 16040 // Quit if false value is not the constant 0 or 1. 16041 bool FValIsFalse = true; 16042 if (FVal && FVal->getZExtValue() != 0) { 16043 if (FVal->getZExtValue() != 1) 16044 return SDValue(); 16045 // If FVal is 1, opposite cond is needed. 16046 needOppositeCond = !needOppositeCond; 16047 FValIsFalse = false; 16048 } 16049 // Quit if TVal is not the constant opposite of FVal. 16050 if (FValIsFalse && TVal->getZExtValue() != 1) 16051 return SDValue(); 16052 if (!FValIsFalse && TVal->getZExtValue() != 0) 16053 return SDValue(); 16054 CC = X86::CondCode(SetCC.getConstantOperandVal(2)); 16055 if (needOppositeCond) 16056 CC = X86::GetOppositeBranchCondition(CC); 16057 return SetCC.getOperand(3); 16058 } 16059 } 16060 16061 return SDValue(); 16062} 16063 16064/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 16065static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 16066 TargetLowering::DAGCombinerInfo &DCI, 16067 const X86Subtarget *Subtarget) { 16068 SDLoc DL(N); 16069 16070 // If the flag operand isn't dead, don't touch this CMOV. 16071 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 16072 return SDValue(); 16073 16074 SDValue FalseOp = N->getOperand(0); 16075 SDValue TrueOp = N->getOperand(1); 16076 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 16077 SDValue Cond = N->getOperand(3); 16078 16079 if (CC == X86::COND_E || CC == X86::COND_NE) { 16080 switch (Cond.getOpcode()) { 16081 default: break; 16082 case X86ISD::BSR: 16083 case X86ISD::BSF: 16084 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 16085 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 16086 return (CC == X86::COND_E) ? FalseOp : TrueOp; 16087 } 16088 } 16089 16090 SDValue Flags; 16091 16092 Flags = checkBoolTestSetCCCombine(Cond, CC); 16093 if (Flags.getNode() && 16094 // Extra check as FCMOV only supports a subset of X86 cond. 16095 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { 16096 SDValue Ops[] = { FalseOp, TrueOp, 16097 DAG.getConstant(CC, MVT::i8), Flags }; 16098 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), 16099 Ops, array_lengthof(Ops)); 16100 } 16101 16102 // If this is a select between two integer constants, try to do some 16103 // optimizations. Note that the operands are ordered the opposite of SELECT 16104 // operands. 16105 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 16106 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 16107 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 16108 // larger than FalseC (the false value). 16109 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 16110 CC = X86::GetOppositeBranchCondition(CC); 16111 std::swap(TrueC, FalseC); 16112 std::swap(TrueOp, FalseOp); 16113 } 16114 16115 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 16116 // This is efficient for any integer data type (including i8/i16) and 16117 // shift amount. 16118 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 16119 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16120 DAG.getConstant(CC, MVT::i8), Cond); 16121 16122 // Zero extend the condition if needed. 16123 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 16124 16125 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 16126 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 16127 DAG.getConstant(ShAmt, MVT::i8)); 16128 if (N->getNumValues() == 2) // Dead flag value? 16129 return DCI.CombineTo(N, Cond, SDValue()); 16130 return Cond; 16131 } 16132 16133 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 16134 // for any integer data type, including i8/i16. 16135 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 16136 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16137 DAG.getConstant(CC, MVT::i8), Cond); 16138 16139 // Zero extend the condition if needed. 16140 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 16141 FalseC->getValueType(0), Cond); 16142 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16143 SDValue(FalseC, 0)); 16144 16145 if (N->getNumValues() == 2) // Dead flag value? 16146 return DCI.CombineTo(N, Cond, SDValue()); 16147 return Cond; 16148 } 16149 16150 // Optimize cases that will turn into an LEA instruction. This requires 16151 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 16152 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 16153 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 16154 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 16155 16156 bool isFastMultiplier = false; 16157 if (Diff < 10) { 16158 switch ((unsigned char)Diff) { 16159 default: break; 16160 case 1: // result = add base, cond 16161 case 2: // result = lea base( , cond*2) 16162 case 3: // result = lea base(cond, cond*2) 16163 case 4: // result = lea base( , cond*4) 16164 case 5: // result = lea base(cond, cond*4) 16165 case 8: // result = lea base( , cond*8) 16166 case 9: // result = lea base(cond, cond*8) 16167 isFastMultiplier = true; 16168 break; 16169 } 16170 } 16171 16172 if (isFastMultiplier) { 16173 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 16174 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16175 DAG.getConstant(CC, MVT::i8), Cond); 16176 // Zero extend the condition if needed. 16177 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 16178 Cond); 16179 // Scale the condition by the difference. 16180 if (Diff != 1) 16181 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 16182 DAG.getConstant(Diff, Cond.getValueType())); 16183 16184 // Add the base if non-zero. 16185 if (FalseC->getAPIntValue() != 0) 16186 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16187 SDValue(FalseC, 0)); 16188 if (N->getNumValues() == 2) // Dead flag value? 16189 return DCI.CombineTo(N, Cond, SDValue()); 16190 return Cond; 16191 } 16192 } 16193 } 16194 } 16195 16196 // Handle these cases: 16197 // (select (x != c), e, c) -> select (x != c), e, x), 16198 // (select (x == c), c, e) -> select (x == c), x, e) 16199 // where the c is an integer constant, and the "select" is the combination 16200 // of CMOV and CMP. 16201 // 16202 // The rationale for this change is that the conditional-move from a constant 16203 // needs two instructions, however, conditional-move from a register needs 16204 // only one instruction. 16205 // 16206 // CAVEAT: By replacing a constant with a symbolic value, it may obscure 16207 // some instruction-combining opportunities. This opt needs to be 16208 // postponed as late as possible. 16209 // 16210 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { 16211 // the DCI.xxxx conditions are provided to postpone the optimization as 16212 // late as possible. 16213 16214 ConstantSDNode *CmpAgainst = 0; 16215 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && 16216 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) && 16217 !isa<ConstantSDNode>(Cond.getOperand(0))) { 16218 16219 if (CC == X86::COND_NE && 16220 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) { 16221 CC = X86::GetOppositeBranchCondition(CC); 16222 std::swap(TrueOp, FalseOp); 16223 } 16224 16225 if (CC == X86::COND_E && 16226 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { 16227 SDValue Ops[] = { FalseOp, Cond.getOperand(0), 16228 DAG.getConstant(CC, MVT::i8), Cond }; 16229 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops, 16230 array_lengthof(Ops)); 16231 } 16232 } 16233 } 16234 16235 return SDValue(); 16236} 16237 16238/// PerformMulCombine - Optimize a single multiply with constant into two 16239/// in order to implement it with two cheaper instructions, e.g. 16240/// LEA + SHL, LEA + LEA. 16241static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 16242 TargetLowering::DAGCombinerInfo &DCI) { 16243 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 16244 return SDValue(); 16245 16246 EVT VT = N->getValueType(0); 16247 if (VT != MVT::i64) 16248 return SDValue(); 16249 16250 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 16251 if (!C) 16252 return SDValue(); 16253 uint64_t MulAmt = C->getZExtValue(); 16254 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 16255 return SDValue(); 16256 16257 uint64_t MulAmt1 = 0; 16258 uint64_t MulAmt2 = 0; 16259 if ((MulAmt % 9) == 0) { 16260 MulAmt1 = 9; 16261 MulAmt2 = MulAmt / 9; 16262 } else if ((MulAmt % 5) == 0) { 16263 MulAmt1 = 5; 16264 MulAmt2 = MulAmt / 5; 16265 } else if ((MulAmt % 3) == 0) { 16266 MulAmt1 = 3; 16267 MulAmt2 = MulAmt / 3; 16268 } 16269 if (MulAmt2 && 16270 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 16271 SDLoc DL(N); 16272 16273 if (isPowerOf2_64(MulAmt2) && 16274 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 16275 // If second multiplifer is pow2, issue it first. We want the multiply by 16276 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 16277 // is an add. 16278 std::swap(MulAmt1, MulAmt2); 16279 16280 SDValue NewMul; 16281 if (isPowerOf2_64(MulAmt1)) 16282 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 16283 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 16284 else 16285 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 16286 DAG.getConstant(MulAmt1, VT)); 16287 16288 if (isPowerOf2_64(MulAmt2)) 16289 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 16290 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 16291 else 16292 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 16293 DAG.getConstant(MulAmt2, VT)); 16294 16295 // Do not add new nodes to DAG combiner worklist. 16296 DCI.CombineTo(N, NewMul, false); 16297 } 16298 return SDValue(); 16299} 16300 16301static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 16302 SDValue N0 = N->getOperand(0); 16303 SDValue N1 = N->getOperand(1); 16304 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 16305 EVT VT = N0.getValueType(); 16306 16307 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 16308 // since the result of setcc_c is all zero's or all ones. 16309 if (VT.isInteger() && !VT.isVector() && 16310 N1C && N0.getOpcode() == ISD::AND && 16311 N0.getOperand(1).getOpcode() == ISD::Constant) { 16312 SDValue N00 = N0.getOperand(0); 16313 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 16314 ((N00.getOpcode() == ISD::ANY_EXTEND || 16315 N00.getOpcode() == ISD::ZERO_EXTEND) && 16316 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 16317 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 16318 APInt ShAmt = N1C->getAPIntValue(); 16319 Mask = Mask.shl(ShAmt); 16320 if (Mask != 0) 16321 return DAG.getNode(ISD::AND, SDLoc(N), VT, 16322 N00, DAG.getConstant(Mask, VT)); 16323 } 16324 } 16325 16326 // Hardware support for vector shifts is sparse which makes us scalarize the 16327 // vector operations in many cases. Also, on sandybridge ADD is faster than 16328 // shl. 16329 // (shl V, 1) -> add V,V 16330 if (isSplatVector(N1.getNode())) { 16331 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 16332 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 16333 // We shift all of the values by one. In many cases we do not have 16334 // hardware support for this operation. This is better expressed as an ADD 16335 // of two values. 16336 if (N1C && (1 == N1C->getZExtValue())) { 16337 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); 16338 } 16339 } 16340 16341 return SDValue(); 16342} 16343 16344/// \brief Returns a vector of 0s if the node in input is a vector logical 16345/// shift by a constant amount which is known to be bigger than or equal 16346/// to the vector element size in bits. 16347static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG, 16348 const X86Subtarget *Subtarget) { 16349 EVT VT = N->getValueType(0); 16350 16351 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 16352 (!Subtarget->hasInt256() || 16353 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 16354 return SDValue(); 16355 16356 SDValue Amt = N->getOperand(1); 16357 SDLoc DL(N); 16358 if (isSplatVector(Amt.getNode())) { 16359 SDValue SclrAmt = Amt->getOperand(0); 16360 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 16361 APInt ShiftAmt = C->getAPIntValue(); 16362 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits(); 16363 16364 // SSE2/AVX2 logical shifts always return a vector of 0s 16365 // if the shift amount is bigger than or equal to 16366 // the element size. The constant shift amount will be 16367 // encoded as a 8-bit immediate. 16368 if (ShiftAmt.trunc(8).uge(MaxAmount)) 16369 return getZeroVector(VT, Subtarget, DAG, DL); 16370 } 16371 } 16372 16373 return SDValue(); 16374} 16375 16376/// PerformShiftCombine - Combine shifts. 16377static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 16378 TargetLowering::DAGCombinerInfo &DCI, 16379 const X86Subtarget *Subtarget) { 16380 if (N->getOpcode() == ISD::SHL) { 16381 SDValue V = PerformSHLCombine(N, DAG); 16382 if (V.getNode()) return V; 16383 } 16384 16385 if (N->getOpcode() != ISD::SRA) { 16386 // Try to fold this logical shift into a zero vector. 16387 SDValue V = performShiftToAllZeros(N, DAG, Subtarget); 16388 if (V.getNode()) return V; 16389 } 16390 16391 return SDValue(); 16392} 16393 16394// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 16395// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 16396// and friends. Likewise for OR -> CMPNEQSS. 16397static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 16398 TargetLowering::DAGCombinerInfo &DCI, 16399 const X86Subtarget *Subtarget) { 16400 unsigned opcode; 16401 16402 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 16403 // we're requiring SSE2 for both. 16404 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 16405 SDValue N0 = N->getOperand(0); 16406 SDValue N1 = N->getOperand(1); 16407 SDValue CMP0 = N0->getOperand(1); 16408 SDValue CMP1 = N1->getOperand(1); 16409 SDLoc DL(N); 16410 16411 // The SETCCs should both refer to the same CMP. 16412 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 16413 return SDValue(); 16414 16415 SDValue CMP00 = CMP0->getOperand(0); 16416 SDValue CMP01 = CMP0->getOperand(1); 16417 EVT VT = CMP00.getValueType(); 16418 16419 if (VT == MVT::f32 || VT == MVT::f64) { 16420 bool ExpectingFlags = false; 16421 // Check for any users that want flags: 16422 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 16423 !ExpectingFlags && UI != UE; ++UI) 16424 switch (UI->getOpcode()) { 16425 default: 16426 case ISD::BR_CC: 16427 case ISD::BRCOND: 16428 case ISD::SELECT: 16429 ExpectingFlags = true; 16430 break; 16431 case ISD::CopyToReg: 16432 case ISD::SIGN_EXTEND: 16433 case ISD::ZERO_EXTEND: 16434 case ISD::ANY_EXTEND: 16435 break; 16436 } 16437 16438 if (!ExpectingFlags) { 16439 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 16440 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 16441 16442 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 16443 X86::CondCode tmp = cc0; 16444 cc0 = cc1; 16445 cc1 = tmp; 16446 } 16447 16448 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 16449 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 16450 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 16451 X86ISD::NodeType NTOperator = is64BitFP ? 16452 X86ISD::FSETCCsd : X86ISD::FSETCCss; 16453 // FIXME: need symbolic constants for these magic numbers. 16454 // See X86ATTInstPrinter.cpp:printSSECC(). 16455 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 16456 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 16457 DAG.getConstant(x86cc, MVT::i8)); 16458 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 16459 OnesOrZeroesF); 16460 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 16461 DAG.getConstant(1, MVT::i32)); 16462 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 16463 return OneBitOfTruth; 16464 } 16465 } 16466 } 16467 } 16468 return SDValue(); 16469} 16470 16471/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 16472/// so it can be folded inside ANDNP. 16473static bool CanFoldXORWithAllOnes(const SDNode *N) { 16474 EVT VT = N->getValueType(0); 16475 16476 // Match direct AllOnes for 128 and 256-bit vectors 16477 if (ISD::isBuildVectorAllOnes(N)) 16478 return true; 16479 16480 // Look through a bit convert. 16481 if (N->getOpcode() == ISD::BITCAST) 16482 N = N->getOperand(0).getNode(); 16483 16484 // Sometimes the operand may come from a insert_subvector building a 256-bit 16485 // allones vector 16486 if (VT.is256BitVector() && 16487 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 16488 SDValue V1 = N->getOperand(0); 16489 SDValue V2 = N->getOperand(1); 16490 16491 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 16492 V1.getOperand(0).getOpcode() == ISD::UNDEF && 16493 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 16494 ISD::isBuildVectorAllOnes(V2.getNode())) 16495 return true; 16496 } 16497 16498 return false; 16499} 16500 16501// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized 16502// register. In most cases we actually compare or select YMM-sized registers 16503// and mixing the two types creates horrible code. This method optimizes 16504// some of the transition sequences. 16505static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, 16506 TargetLowering::DAGCombinerInfo &DCI, 16507 const X86Subtarget *Subtarget) { 16508 EVT VT = N->getValueType(0); 16509 if (!VT.is256BitVector()) 16510 return SDValue(); 16511 16512 assert((N->getOpcode() == ISD::ANY_EXTEND || 16513 N->getOpcode() == ISD::ZERO_EXTEND || 16514 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node"); 16515 16516 SDValue Narrow = N->getOperand(0); 16517 EVT NarrowVT = Narrow->getValueType(0); 16518 if (!NarrowVT.is128BitVector()) 16519 return SDValue(); 16520 16521 if (Narrow->getOpcode() != ISD::XOR && 16522 Narrow->getOpcode() != ISD::AND && 16523 Narrow->getOpcode() != ISD::OR) 16524 return SDValue(); 16525 16526 SDValue N0 = Narrow->getOperand(0); 16527 SDValue N1 = Narrow->getOperand(1); 16528 SDLoc DL(Narrow); 16529 16530 // The Left side has to be a trunc. 16531 if (N0.getOpcode() != ISD::TRUNCATE) 16532 return SDValue(); 16533 16534 // The type of the truncated inputs. 16535 EVT WideVT = N0->getOperand(0)->getValueType(0); 16536 if (WideVT != VT) 16537 return SDValue(); 16538 16539 // The right side has to be a 'trunc' or a constant vector. 16540 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE; 16541 bool RHSConst = (isSplatVector(N1.getNode()) && 16542 isa<ConstantSDNode>(N1->getOperand(0))); 16543 if (!RHSTrunc && !RHSConst) 16544 return SDValue(); 16545 16546 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16547 16548 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT)) 16549 return SDValue(); 16550 16551 // Set N0 and N1 to hold the inputs to the new wide operation. 16552 N0 = N0->getOperand(0); 16553 if (RHSConst) { 16554 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(), 16555 N1->getOperand(0)); 16556 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1); 16557 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, &C[0], C.size()); 16558 } else if (RHSTrunc) { 16559 N1 = N1->getOperand(0); 16560 } 16561 16562 // Generate the wide operation. 16563 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1); 16564 unsigned Opcode = N->getOpcode(); 16565 switch (Opcode) { 16566 case ISD::ANY_EXTEND: 16567 return Op; 16568 case ISD::ZERO_EXTEND: { 16569 unsigned InBits = NarrowVT.getScalarType().getSizeInBits(); 16570 APInt Mask = APInt::getAllOnesValue(InBits); 16571 Mask = Mask.zext(VT.getScalarType().getSizeInBits()); 16572 return DAG.getNode(ISD::AND, DL, VT, 16573 Op, DAG.getConstant(Mask, VT)); 16574 } 16575 case ISD::SIGN_EXTEND: 16576 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, 16577 Op, DAG.getValueType(NarrowVT)); 16578 default: 16579 llvm_unreachable("Unexpected opcode"); 16580 } 16581} 16582 16583static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 16584 TargetLowering::DAGCombinerInfo &DCI, 16585 const X86Subtarget *Subtarget) { 16586 EVT VT = N->getValueType(0); 16587 if (DCI.isBeforeLegalizeOps()) 16588 return SDValue(); 16589 16590 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 16591 if (R.getNode()) 16592 return R; 16593 16594 // Create BLSI, and BLSR instructions 16595 // BLSI is X & (-X) 16596 // BLSR is X & (X-1) 16597 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 16598 SDValue N0 = N->getOperand(0); 16599 SDValue N1 = N->getOperand(1); 16600 SDLoc DL(N); 16601 16602 // Check LHS for neg 16603 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 16604 isZero(N0.getOperand(0))) 16605 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 16606 16607 // Check RHS for neg 16608 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 16609 isZero(N1.getOperand(0))) 16610 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 16611 16612 // Check LHS for X-1 16613 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 16614 isAllOnes(N0.getOperand(1))) 16615 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 16616 16617 // Check RHS for X-1 16618 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 16619 isAllOnes(N1.getOperand(1))) 16620 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 16621 16622 return SDValue(); 16623 } 16624 16625 // Want to form ANDNP nodes: 16626 // 1) In the hopes of then easily combining them with OR and AND nodes 16627 // to form PBLEND/PSIGN. 16628 // 2) To match ANDN packed intrinsics 16629 if (VT != MVT::v2i64 && VT != MVT::v4i64) 16630 return SDValue(); 16631 16632 SDValue N0 = N->getOperand(0); 16633 SDValue N1 = N->getOperand(1); 16634 SDLoc DL(N); 16635 16636 // Check LHS for vnot 16637 if (N0.getOpcode() == ISD::XOR && 16638 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 16639 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 16640 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 16641 16642 // Check RHS for vnot 16643 if (N1.getOpcode() == ISD::XOR && 16644 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 16645 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 16646 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 16647 16648 return SDValue(); 16649} 16650 16651static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 16652 TargetLowering::DAGCombinerInfo &DCI, 16653 const X86Subtarget *Subtarget) { 16654 EVT VT = N->getValueType(0); 16655 if (DCI.isBeforeLegalizeOps()) 16656 return SDValue(); 16657 16658 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 16659 if (R.getNode()) 16660 return R; 16661 16662 SDValue N0 = N->getOperand(0); 16663 SDValue N1 = N->getOperand(1); 16664 16665 // look for psign/blend 16666 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 16667 if (!Subtarget->hasSSSE3() || 16668 (VT == MVT::v4i64 && !Subtarget->hasInt256())) 16669 return SDValue(); 16670 16671 // Canonicalize pandn to RHS 16672 if (N0.getOpcode() == X86ISD::ANDNP) 16673 std::swap(N0, N1); 16674 // or (and (m, y), (pandn m, x)) 16675 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 16676 SDValue Mask = N1.getOperand(0); 16677 SDValue X = N1.getOperand(1); 16678 SDValue Y; 16679 if (N0.getOperand(0) == Mask) 16680 Y = N0.getOperand(1); 16681 if (N0.getOperand(1) == Mask) 16682 Y = N0.getOperand(0); 16683 16684 // Check to see if the mask appeared in both the AND and ANDNP and 16685 if (!Y.getNode()) 16686 return SDValue(); 16687 16688 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 16689 // Look through mask bitcast. 16690 if (Mask.getOpcode() == ISD::BITCAST) 16691 Mask = Mask.getOperand(0); 16692 if (X.getOpcode() == ISD::BITCAST) 16693 X = X.getOperand(0); 16694 if (Y.getOpcode() == ISD::BITCAST) 16695 Y = Y.getOperand(0); 16696 16697 EVT MaskVT = Mask.getValueType(); 16698 16699 // Validate that the Mask operand is a vector sra node. 16700 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 16701 // there is no psrai.b 16702 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 16703 unsigned SraAmt = ~0; 16704 if (Mask.getOpcode() == ISD::SRA) { 16705 SDValue Amt = Mask.getOperand(1); 16706 if (isSplatVector(Amt.getNode())) { 16707 SDValue SclrAmt = Amt->getOperand(0); 16708 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) 16709 SraAmt = C->getZExtValue(); 16710 } 16711 } else if (Mask.getOpcode() == X86ISD::VSRAI) { 16712 SDValue SraC = Mask.getOperand(1); 16713 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 16714 } 16715 if ((SraAmt + 1) != EltBits) 16716 return SDValue(); 16717 16718 SDLoc DL(N); 16719 16720 // Now we know we at least have a plendvb with the mask val. See if 16721 // we can form a psignb/w/d. 16722 // psign = x.type == y.type == mask.type && y = sub(0, x); 16723 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 16724 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 16725 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 16726 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 16727 "Unsupported VT for PSIGN"); 16728 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 16729 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 16730 } 16731 // PBLENDVB only available on SSE 4.1 16732 if (!Subtarget->hasSSE41()) 16733 return SDValue(); 16734 16735 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 16736 16737 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 16738 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 16739 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 16740 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 16741 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 16742 } 16743 } 16744 16745 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 16746 return SDValue(); 16747 16748 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 16749 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 16750 std::swap(N0, N1); 16751 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 16752 return SDValue(); 16753 if (!N0.hasOneUse() || !N1.hasOneUse()) 16754 return SDValue(); 16755 16756 SDValue ShAmt0 = N0.getOperand(1); 16757 if (ShAmt0.getValueType() != MVT::i8) 16758 return SDValue(); 16759 SDValue ShAmt1 = N1.getOperand(1); 16760 if (ShAmt1.getValueType() != MVT::i8) 16761 return SDValue(); 16762 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 16763 ShAmt0 = ShAmt0.getOperand(0); 16764 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 16765 ShAmt1 = ShAmt1.getOperand(0); 16766 16767 SDLoc DL(N); 16768 unsigned Opc = X86ISD::SHLD; 16769 SDValue Op0 = N0.getOperand(0); 16770 SDValue Op1 = N1.getOperand(0); 16771 if (ShAmt0.getOpcode() == ISD::SUB) { 16772 Opc = X86ISD::SHRD; 16773 std::swap(Op0, Op1); 16774 std::swap(ShAmt0, ShAmt1); 16775 } 16776 16777 unsigned Bits = VT.getSizeInBits(); 16778 if (ShAmt1.getOpcode() == ISD::SUB) { 16779 SDValue Sum = ShAmt1.getOperand(0); 16780 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 16781 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 16782 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 16783 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 16784 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 16785 return DAG.getNode(Opc, DL, VT, 16786 Op0, Op1, 16787 DAG.getNode(ISD::TRUNCATE, DL, 16788 MVT::i8, ShAmt0)); 16789 } 16790 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 16791 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 16792 if (ShAmt0C && 16793 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 16794 return DAG.getNode(Opc, DL, VT, 16795 N0.getOperand(0), N1.getOperand(0), 16796 DAG.getNode(ISD::TRUNCATE, DL, 16797 MVT::i8, ShAmt0)); 16798 } 16799 16800 return SDValue(); 16801} 16802 16803// Generate NEG and CMOV for integer abs. 16804static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 16805 EVT VT = N->getValueType(0); 16806 16807 // Since X86 does not have CMOV for 8-bit integer, we don't convert 16808 // 8-bit integer abs to NEG and CMOV. 16809 if (VT.isInteger() && VT.getSizeInBits() == 8) 16810 return SDValue(); 16811 16812 SDValue N0 = N->getOperand(0); 16813 SDValue N1 = N->getOperand(1); 16814 SDLoc DL(N); 16815 16816 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 16817 // and change it to SUB and CMOV. 16818 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 16819 N0.getOpcode() == ISD::ADD && 16820 N0.getOperand(1) == N1 && 16821 N1.getOpcode() == ISD::SRA && 16822 N1.getOperand(0) == N0.getOperand(0)) 16823 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 16824 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { 16825 // Generate SUB & CMOV. 16826 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), 16827 DAG.getConstant(0, VT), N0.getOperand(0)); 16828 16829 SDValue Ops[] = { N0.getOperand(0), Neg, 16830 DAG.getConstant(X86::COND_GE, MVT::i8), 16831 SDValue(Neg.getNode(), 1) }; 16832 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), 16833 Ops, array_lengthof(Ops)); 16834 } 16835 return SDValue(); 16836} 16837 16838// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 16839static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 16840 TargetLowering::DAGCombinerInfo &DCI, 16841 const X86Subtarget *Subtarget) { 16842 EVT VT = N->getValueType(0); 16843 if (DCI.isBeforeLegalizeOps()) 16844 return SDValue(); 16845 16846 if (Subtarget->hasCMov()) { 16847 SDValue RV = performIntegerAbsCombine(N, DAG); 16848 if (RV.getNode()) 16849 return RV; 16850 } 16851 16852 // Try forming BMI if it is available. 16853 if (!Subtarget->hasBMI()) 16854 return SDValue(); 16855 16856 if (VT != MVT::i32 && VT != MVT::i64) 16857 return SDValue(); 16858 16859 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 16860 16861 // Create BLSMSK instructions by finding X ^ (X-1) 16862 SDValue N0 = N->getOperand(0); 16863 SDValue N1 = N->getOperand(1); 16864 SDLoc DL(N); 16865 16866 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 16867 isAllOnes(N0.getOperand(1))) 16868 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 16869 16870 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 16871 isAllOnes(N1.getOperand(1))) 16872 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 16873 16874 return SDValue(); 16875} 16876 16877/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 16878static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 16879 TargetLowering::DAGCombinerInfo &DCI, 16880 const X86Subtarget *Subtarget) { 16881 LoadSDNode *Ld = cast<LoadSDNode>(N); 16882 EVT RegVT = Ld->getValueType(0); 16883 EVT MemVT = Ld->getMemoryVT(); 16884 SDLoc dl(Ld); 16885 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16886 unsigned RegSz = RegVT.getSizeInBits(); 16887 16888 // On Sandybridge unaligned 256bit loads are inefficient. 16889 ISD::LoadExtType Ext = Ld->getExtensionType(); 16890 unsigned Alignment = Ld->getAlignment(); 16891 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8; 16892 if (RegVT.is256BitVector() && !Subtarget->hasInt256() && 16893 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) { 16894 unsigned NumElems = RegVT.getVectorNumElements(); 16895 if (NumElems < 2) 16896 return SDValue(); 16897 16898 SDValue Ptr = Ld->getBasePtr(); 16899 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy()); 16900 16901 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 16902 NumElems/2); 16903 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, 16904 Ld->getPointerInfo(), Ld->isVolatile(), 16905 Ld->isNonTemporal(), Ld->isInvariant(), 16906 Alignment); 16907 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 16908 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, 16909 Ld->getPointerInfo(), Ld->isVolatile(), 16910 Ld->isNonTemporal(), Ld->isInvariant(), 16911 std::min(16U, Alignment)); 16912 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 16913 Load1.getValue(1), 16914 Load2.getValue(1)); 16915 16916 SDValue NewVec = DAG.getUNDEF(RegVT); 16917 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl); 16918 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl); 16919 return DCI.CombineTo(N, NewVec, TF, true); 16920 } 16921 16922 // If this is a vector EXT Load then attempt to optimize it using a 16923 // shuffle. If SSSE3 is not available we may emit an illegal shuffle but the 16924 // expansion is still better than scalar code. 16925 // We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise we'll 16926 // emit a shuffle and a arithmetic shift. 16927 // TODO: It is possible to support ZExt by zeroing the undef values 16928 // during the shuffle phase or after the shuffle. 16929 if (RegVT.isVector() && RegVT.isInteger() && Subtarget->hasSSE2() && 16930 (Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)) { 16931 assert(MemVT != RegVT && "Cannot extend to the same type"); 16932 assert(MemVT.isVector() && "Must load a vector from memory"); 16933 16934 unsigned NumElems = RegVT.getVectorNumElements(); 16935 unsigned MemSz = MemVT.getSizeInBits(); 16936 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 16937 16938 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) 16939 return SDValue(); 16940 16941 // All sizes must be a power of two. 16942 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) 16943 return SDValue(); 16944 16945 // Attempt to load the original value using scalar loads. 16946 // Find the largest scalar type that divides the total loaded size. 16947 MVT SclrLoadTy = MVT::i8; 16948 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 16949 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 16950 MVT Tp = (MVT::SimpleValueType)tp; 16951 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { 16952 SclrLoadTy = Tp; 16953 } 16954 } 16955 16956 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 16957 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && 16958 (64 <= MemSz)) 16959 SclrLoadTy = MVT::f64; 16960 16961 // Calculate the number of scalar loads that we need to perform 16962 // in order to load our vector from memory. 16963 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); 16964 if (Ext == ISD::SEXTLOAD && NumLoads > 1) 16965 return SDValue(); 16966 16967 unsigned loadRegZize = RegSz; 16968 if (Ext == ISD::SEXTLOAD && RegSz == 256) 16969 loadRegZize /= 2; 16970 16971 // Represent our vector as a sequence of elements which are the 16972 // largest scalar that we can load. 16973 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 16974 loadRegZize/SclrLoadTy.getSizeInBits()); 16975 16976 // Represent the data using the same element type that is stored in 16977 // memory. In practice, we ''widen'' MemVT. 16978 EVT WideVecVT = 16979 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 16980 loadRegZize/MemVT.getScalarType().getSizeInBits()); 16981 16982 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && 16983 "Invalid vector type"); 16984 16985 // We can't shuffle using an illegal type. 16986 if (!TLI.isTypeLegal(WideVecVT)) 16987 return SDValue(); 16988 16989 SmallVector<SDValue, 8> Chains; 16990 SDValue Ptr = Ld->getBasePtr(); 16991 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, 16992 TLI.getPointerTy()); 16993 SDValue Res = DAG.getUNDEF(LoadUnitVecVT); 16994 16995 for (unsigned i = 0; i < NumLoads; ++i) { 16996 // Perform a single load. 16997 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 16998 Ptr, Ld->getPointerInfo(), 16999 Ld->isVolatile(), Ld->isNonTemporal(), 17000 Ld->isInvariant(), Ld->getAlignment()); 17001 Chains.push_back(ScalarLoad.getValue(1)); 17002 // Create the first element type using SCALAR_TO_VECTOR in order to avoid 17003 // another round of DAGCombining. 17004 if (i == 0) 17005 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); 17006 else 17007 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, 17008 ScalarLoad, DAG.getIntPtrConstant(i)); 17009 17010 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 17011 } 17012 17013 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 17014 Chains.size()); 17015 17016 // Bitcast the loaded value to a vector of the original element type, in 17017 // the size of the target vector type. 17018 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); 17019 unsigned SizeRatio = RegSz/MemSz; 17020 17021 if (Ext == ISD::SEXTLOAD) { 17022 // If we have SSE4.1 we can directly emit a VSEXT node. 17023 if (Subtarget->hasSSE41()) { 17024 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec); 17025 return DCI.CombineTo(N, Sext, TF, true); 17026 } 17027 17028 // Otherwise we'll shuffle the small elements in the high bits of the 17029 // larger type and perform an arithmetic shift. If the shift is not legal 17030 // it's better to scalarize. 17031 if (!TLI.isOperationLegalOrCustom(ISD::SRA, RegVT)) 17032 return SDValue(); 17033 17034 // Redistribute the loaded elements into the different locations. 17035 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 17036 for (unsigned i = 0; i != NumElems; ++i) 17037 ShuffleVec[i*SizeRatio + SizeRatio-1] = i; 17038 17039 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 17040 DAG.getUNDEF(WideVecVT), 17041 &ShuffleVec[0]); 17042 17043 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 17044 17045 // Build the arithmetic shift. 17046 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() - 17047 MemVT.getVectorElementType().getSizeInBits(); 17048 Shuff = DAG.getNode(ISD::SRA, dl, RegVT, Shuff, 17049 DAG.getConstant(Amt, RegVT)); 17050 17051 return DCI.CombineTo(N, Shuff, TF, true); 17052 } 17053 17054 // Redistribute the loaded elements into the different locations. 17055 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 17056 for (unsigned i = 0; i != NumElems; ++i) 17057 ShuffleVec[i*SizeRatio] = i; 17058 17059 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 17060 DAG.getUNDEF(WideVecVT), 17061 &ShuffleVec[0]); 17062 17063 // Bitcast to the requested type. 17064 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 17065 // Replace the original load with the new sequence 17066 // and return the new chain. 17067 return DCI.CombineTo(N, Shuff, TF, true); 17068 } 17069 17070 return SDValue(); 17071} 17072 17073/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 17074static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 17075 const X86Subtarget *Subtarget) { 17076 StoreSDNode *St = cast<StoreSDNode>(N); 17077 EVT VT = St->getValue().getValueType(); 17078 EVT StVT = St->getMemoryVT(); 17079 SDLoc dl(St); 17080 SDValue StoredVal = St->getOperand(1); 17081 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17082 17083 // If we are saving a concatenation of two XMM registers, perform two stores. 17084 // On Sandy Bridge, 256-bit memory operations are executed by two 17085 // 128-bit ports. However, on Haswell it is better to issue a single 256-bit 17086 // memory operation. 17087 unsigned Alignment = St->getAlignment(); 17088 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8; 17089 if (VT.is256BitVector() && !Subtarget->hasInt256() && 17090 StVT == VT && !IsAligned) { 17091 unsigned NumElems = VT.getVectorNumElements(); 17092 if (NumElems < 2) 17093 return SDValue(); 17094 17095 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl); 17096 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl); 17097 17098 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 17099 SDValue Ptr0 = St->getBasePtr(); 17100 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 17101 17102 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 17103 St->getPointerInfo(), St->isVolatile(), 17104 St->isNonTemporal(), Alignment); 17105 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 17106 St->getPointerInfo(), St->isVolatile(), 17107 St->isNonTemporal(), 17108 std::min(16U, Alignment)); 17109 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 17110 } 17111 17112 // Optimize trunc store (of multiple scalars) to shuffle and store. 17113 // First, pack all of the elements in one place. Next, store to memory 17114 // in fewer chunks. 17115 if (St->isTruncatingStore() && VT.isVector()) { 17116 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17117 unsigned NumElems = VT.getVectorNumElements(); 17118 assert(StVT != VT && "Cannot truncate to the same type"); 17119 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 17120 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 17121 17122 // From, To sizes and ElemCount must be pow of two 17123 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 17124 // We are going to use the original vector elt for storing. 17125 // Accumulated smaller vector elements must be a multiple of the store size. 17126 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 17127 17128 unsigned SizeRatio = FromSz / ToSz; 17129 17130 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 17131 17132 // Create a type on which we perform the shuffle 17133 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 17134 StVT.getScalarType(), NumElems*SizeRatio); 17135 17136 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 17137 17138 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 17139 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 17140 for (unsigned i = 0; i != NumElems; ++i) 17141 ShuffleVec[i] = i * SizeRatio; 17142 17143 // Can't shuffle using an illegal type. 17144 if (!TLI.isTypeLegal(WideVecVT)) 17145 return SDValue(); 17146 17147 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 17148 DAG.getUNDEF(WideVecVT), 17149 &ShuffleVec[0]); 17150 // At this point all of the data is stored at the bottom of the 17151 // register. We now need to save it to mem. 17152 17153 // Find the largest store unit 17154 MVT StoreType = MVT::i8; 17155 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 17156 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 17157 MVT Tp = (MVT::SimpleValueType)tp; 17158 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) 17159 StoreType = Tp; 17160 } 17161 17162 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 17163 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && 17164 (64 <= NumElems * ToSz)) 17165 StoreType = MVT::f64; 17166 17167 // Bitcast the original vector into a vector of store-size units 17168 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 17169 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); 17170 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 17171 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 17172 SmallVector<SDValue, 8> Chains; 17173 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 17174 TLI.getPointerTy()); 17175 SDValue Ptr = St->getBasePtr(); 17176 17177 // Perform one or more big stores into memory. 17178 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { 17179 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 17180 StoreType, ShuffWide, 17181 DAG.getIntPtrConstant(i)); 17182 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 17183 St->getPointerInfo(), St->isVolatile(), 17184 St->isNonTemporal(), St->getAlignment()); 17185 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 17186 Chains.push_back(Ch); 17187 } 17188 17189 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 17190 Chains.size()); 17191 } 17192 17193 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 17194 // the FP state in cases where an emms may be missing. 17195 // A preferable solution to the general problem is to figure out the right 17196 // places to insert EMMS. This qualifies as a quick hack. 17197 17198 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 17199 if (VT.getSizeInBits() != 64) 17200 return SDValue(); 17201 17202 const Function *F = DAG.getMachineFunction().getFunction(); 17203 bool NoImplicitFloatOps = F->getAttributes(). 17204 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 17205 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 17206 && Subtarget->hasSSE2(); 17207 if ((VT.isVector() || 17208 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 17209 isa<LoadSDNode>(St->getValue()) && 17210 !cast<LoadSDNode>(St->getValue())->isVolatile() && 17211 St->getChain().hasOneUse() && !St->isVolatile()) { 17212 SDNode* LdVal = St->getValue().getNode(); 17213 LoadSDNode *Ld = 0; 17214 int TokenFactorIndex = -1; 17215 SmallVector<SDValue, 8> Ops; 17216 SDNode* ChainVal = St->getChain().getNode(); 17217 // Must be a store of a load. We currently handle two cases: the load 17218 // is a direct child, and it's under an intervening TokenFactor. It is 17219 // possible to dig deeper under nested TokenFactors. 17220 if (ChainVal == LdVal) 17221 Ld = cast<LoadSDNode>(St->getChain()); 17222 else if (St->getValue().hasOneUse() && 17223 ChainVal->getOpcode() == ISD::TokenFactor) { 17224 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) { 17225 if (ChainVal->getOperand(i).getNode() == LdVal) { 17226 TokenFactorIndex = i; 17227 Ld = cast<LoadSDNode>(St->getValue()); 17228 } else 17229 Ops.push_back(ChainVal->getOperand(i)); 17230 } 17231 } 17232 17233 if (!Ld || !ISD::isNormalLoad(Ld)) 17234 return SDValue(); 17235 17236 // If this is not the MMX case, i.e. we are just turning i64 load/store 17237 // into f64 load/store, avoid the transformation if there are multiple 17238 // uses of the loaded value. 17239 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 17240 return SDValue(); 17241 17242 SDLoc LdDL(Ld); 17243 SDLoc StDL(N); 17244 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 17245 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 17246 // pair instead. 17247 if (Subtarget->is64Bit() || F64IsLegal) { 17248 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 17249 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 17250 Ld->getPointerInfo(), Ld->isVolatile(), 17251 Ld->isNonTemporal(), Ld->isInvariant(), 17252 Ld->getAlignment()); 17253 SDValue NewChain = NewLd.getValue(1); 17254 if (TokenFactorIndex != -1) { 17255 Ops.push_back(NewChain); 17256 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 17257 Ops.size()); 17258 } 17259 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 17260 St->getPointerInfo(), 17261 St->isVolatile(), St->isNonTemporal(), 17262 St->getAlignment()); 17263 } 17264 17265 // Otherwise, lower to two pairs of 32-bit loads / stores. 17266 SDValue LoAddr = Ld->getBasePtr(); 17267 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 17268 DAG.getConstant(4, MVT::i32)); 17269 17270 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 17271 Ld->getPointerInfo(), 17272 Ld->isVolatile(), Ld->isNonTemporal(), 17273 Ld->isInvariant(), Ld->getAlignment()); 17274 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 17275 Ld->getPointerInfo().getWithOffset(4), 17276 Ld->isVolatile(), Ld->isNonTemporal(), 17277 Ld->isInvariant(), 17278 MinAlign(Ld->getAlignment(), 4)); 17279 17280 SDValue NewChain = LoLd.getValue(1); 17281 if (TokenFactorIndex != -1) { 17282 Ops.push_back(LoLd); 17283 Ops.push_back(HiLd); 17284 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 17285 Ops.size()); 17286 } 17287 17288 LoAddr = St->getBasePtr(); 17289 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 17290 DAG.getConstant(4, MVT::i32)); 17291 17292 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 17293 St->getPointerInfo(), 17294 St->isVolatile(), St->isNonTemporal(), 17295 St->getAlignment()); 17296 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 17297 St->getPointerInfo().getWithOffset(4), 17298 St->isVolatile(), 17299 St->isNonTemporal(), 17300 MinAlign(St->getAlignment(), 4)); 17301 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 17302 } 17303 return SDValue(); 17304} 17305 17306/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 17307/// and return the operands for the horizontal operation in LHS and RHS. A 17308/// horizontal operation performs the binary operation on successive elements 17309/// of its first operand, then on successive elements of its second operand, 17310/// returning the resulting values in a vector. For example, if 17311/// A = < float a0, float a1, float a2, float a3 > 17312/// and 17313/// B = < float b0, float b1, float b2, float b3 > 17314/// then the result of doing a horizontal operation on A and B is 17315/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 17316/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 17317/// A horizontal-op B, for some already available A and B, and if so then LHS is 17318/// set to A, RHS to B, and the routine returns 'true'. 17319/// Note that the binary operation should have the property that if one of the 17320/// operands is UNDEF then the result is UNDEF. 17321static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 17322 // Look for the following pattern: if 17323 // A = < float a0, float a1, float a2, float a3 > 17324 // B = < float b0, float b1, float b2, float b3 > 17325 // and 17326 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 17327 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 17328 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 17329 // which is A horizontal-op B. 17330 17331 // At least one of the operands should be a vector shuffle. 17332 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 17333 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 17334 return false; 17335 17336 EVT VT = LHS.getValueType(); 17337 17338 assert((VT.is128BitVector() || VT.is256BitVector()) && 17339 "Unsupported vector type for horizontal add/sub"); 17340 17341 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 17342 // operate independently on 128-bit lanes. 17343 unsigned NumElts = VT.getVectorNumElements(); 17344 unsigned NumLanes = VT.getSizeInBits()/128; 17345 unsigned NumLaneElts = NumElts / NumLanes; 17346 assert((NumLaneElts % 2 == 0) && 17347 "Vector type should have an even number of elements in each lane"); 17348 unsigned HalfLaneElts = NumLaneElts/2; 17349 17350 // View LHS in the form 17351 // LHS = VECTOR_SHUFFLE A, B, LMask 17352 // If LHS is not a shuffle then pretend it is the shuffle 17353 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 17354 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 17355 // type VT. 17356 SDValue A, B; 17357 SmallVector<int, 16> LMask(NumElts); 17358 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 17359 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 17360 A = LHS.getOperand(0); 17361 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 17362 B = LHS.getOperand(1); 17363 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 17364 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 17365 } else { 17366 if (LHS.getOpcode() != ISD::UNDEF) 17367 A = LHS; 17368 for (unsigned i = 0; i != NumElts; ++i) 17369 LMask[i] = i; 17370 } 17371 17372 // Likewise, view RHS in the form 17373 // RHS = VECTOR_SHUFFLE C, D, RMask 17374 SDValue C, D; 17375 SmallVector<int, 16> RMask(NumElts); 17376 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 17377 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 17378 C = RHS.getOperand(0); 17379 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 17380 D = RHS.getOperand(1); 17381 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 17382 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 17383 } else { 17384 if (RHS.getOpcode() != ISD::UNDEF) 17385 C = RHS; 17386 for (unsigned i = 0; i != NumElts; ++i) 17387 RMask[i] = i; 17388 } 17389 17390 // Check that the shuffles are both shuffling the same vectors. 17391 if (!(A == C && B == D) && !(A == D && B == C)) 17392 return false; 17393 17394 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 17395 if (!A.getNode() && !B.getNode()) 17396 return false; 17397 17398 // If A and B occur in reverse order in RHS, then "swap" them (which means 17399 // rewriting the mask). 17400 if (A != C) 17401 CommuteVectorShuffleMask(RMask, NumElts); 17402 17403 // At this point LHS and RHS are equivalent to 17404 // LHS = VECTOR_SHUFFLE A, B, LMask 17405 // RHS = VECTOR_SHUFFLE A, B, RMask 17406 // Check that the masks correspond to performing a horizontal operation. 17407 for (unsigned i = 0; i != NumElts; ++i) { 17408 int LIdx = LMask[i], RIdx = RMask[i]; 17409 17410 // Ignore any UNDEF components. 17411 if (LIdx < 0 || RIdx < 0 || 17412 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 17413 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 17414 continue; 17415 17416 // Check that successive elements are being operated on. If not, this is 17417 // not a horizontal operation. 17418 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 17419 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 17420 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 17421 if (!(LIdx == Index && RIdx == Index + 1) && 17422 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 17423 return false; 17424 } 17425 17426 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 17427 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 17428 return true; 17429} 17430 17431/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 17432static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 17433 const X86Subtarget *Subtarget) { 17434 EVT VT = N->getValueType(0); 17435 SDValue LHS = N->getOperand(0); 17436 SDValue RHS = N->getOperand(1); 17437 17438 // Try to synthesize horizontal adds from adds of shuffles. 17439 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 17440 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 17441 isHorizontalBinOp(LHS, RHS, true)) 17442 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS); 17443 return SDValue(); 17444} 17445 17446/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 17447static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 17448 const X86Subtarget *Subtarget) { 17449 EVT VT = N->getValueType(0); 17450 SDValue LHS = N->getOperand(0); 17451 SDValue RHS = N->getOperand(1); 17452 17453 // Try to synthesize horizontal subs from subs of shuffles. 17454 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 17455 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 17456 isHorizontalBinOp(LHS, RHS, false)) 17457 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS); 17458 return SDValue(); 17459} 17460 17461/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 17462/// X86ISD::FXOR nodes. 17463static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 17464 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 17465 // F[X]OR(0.0, x) -> x 17466 // F[X]OR(x, 0.0) -> x 17467 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 17468 if (C->getValueAPF().isPosZero()) 17469 return N->getOperand(1); 17470 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 17471 if (C->getValueAPF().isPosZero()) 17472 return N->getOperand(0); 17473 return SDValue(); 17474} 17475 17476/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and 17477/// X86ISD::FMAX nodes. 17478static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { 17479 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX); 17480 17481 // Only perform optimizations if UnsafeMath is used. 17482 if (!DAG.getTarget().Options.UnsafeFPMath) 17483 return SDValue(); 17484 17485 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes 17486 // into FMINC and FMAXC, which are Commutative operations. 17487 unsigned NewOp = 0; 17488 switch (N->getOpcode()) { 17489 default: llvm_unreachable("unknown opcode"); 17490 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; 17491 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; 17492 } 17493 17494 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0), 17495 N->getOperand(0), N->getOperand(1)); 17496} 17497 17498/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 17499static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 17500 // FAND(0.0, x) -> 0.0 17501 // FAND(x, 0.0) -> 0.0 17502 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 17503 if (C->getValueAPF().isPosZero()) 17504 return N->getOperand(0); 17505 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 17506 if (C->getValueAPF().isPosZero()) 17507 return N->getOperand(1); 17508 return SDValue(); 17509} 17510 17511static SDValue PerformBTCombine(SDNode *N, 17512 SelectionDAG &DAG, 17513 TargetLowering::DAGCombinerInfo &DCI) { 17514 // BT ignores high bits in the bit index operand. 17515 SDValue Op1 = N->getOperand(1); 17516 if (Op1.hasOneUse()) { 17517 unsigned BitWidth = Op1.getValueSizeInBits(); 17518 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 17519 APInt KnownZero, KnownOne; 17520 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 17521 !DCI.isBeforeLegalizeOps()); 17522 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17523 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 17524 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 17525 DCI.CommitTargetLoweringOpt(TLO); 17526 } 17527 return SDValue(); 17528} 17529 17530static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 17531 SDValue Op = N->getOperand(0); 17532 if (Op.getOpcode() == ISD::BITCAST) 17533 Op = Op.getOperand(0); 17534 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 17535 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 17536 VT.getVectorElementType().getSizeInBits() == 17537 OpVT.getVectorElementType().getSizeInBits()) { 17538 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 17539 } 17540 return SDValue(); 17541} 17542 17543static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, 17544 const X86Subtarget *Subtarget) { 17545 EVT VT = N->getValueType(0); 17546 if (!VT.isVector()) 17547 return SDValue(); 17548 17549 SDValue N0 = N->getOperand(0); 17550 SDValue N1 = N->getOperand(1); 17551 EVT ExtraVT = cast<VTSDNode>(N1)->getVT(); 17552 SDLoc dl(N); 17553 17554 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the 17555 // both SSE and AVX2 since there is no sign-extended shift right 17556 // operation on a vector with 64-bit elements. 17557 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) -> 17558 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT))) 17559 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND || 17560 N0.getOpcode() == ISD::SIGN_EXTEND)) { 17561 SDValue N00 = N0.getOperand(0); 17562 17563 // EXTLOAD has a better solution on AVX2, 17564 // it may be replaced with X86ISD::VSEXT node. 17565 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256()) 17566 if (!ISD::isNormalLoad(N00.getNode())) 17567 return SDValue(); 17568 17569 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) { 17570 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, 17571 N00, N1); 17572 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp); 17573 } 17574 } 17575 return SDValue(); 17576} 17577 17578static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, 17579 TargetLowering::DAGCombinerInfo &DCI, 17580 const X86Subtarget *Subtarget) { 17581 if (!DCI.isBeforeLegalizeOps()) 17582 return SDValue(); 17583 17584 if (!Subtarget->hasFp256()) 17585 return SDValue(); 17586 17587 EVT VT = N->getValueType(0); 17588 if (VT.isVector() && VT.getSizeInBits() == 256) { 17589 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget); 17590 if (R.getNode()) 17591 return R; 17592 } 17593 17594 return SDValue(); 17595} 17596 17597static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, 17598 const X86Subtarget* Subtarget) { 17599 SDLoc dl(N); 17600 EVT VT = N->getValueType(0); 17601 17602 // Let legalize expand this if it isn't a legal type yet. 17603 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 17604 return SDValue(); 17605 17606 EVT ScalarVT = VT.getScalarType(); 17607 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || 17608 (!Subtarget->hasFMA() && !Subtarget->hasFMA4())) 17609 return SDValue(); 17610 17611 SDValue A = N->getOperand(0); 17612 SDValue B = N->getOperand(1); 17613 SDValue C = N->getOperand(2); 17614 17615 bool NegA = (A.getOpcode() == ISD::FNEG); 17616 bool NegB = (B.getOpcode() == ISD::FNEG); 17617 bool NegC = (C.getOpcode() == ISD::FNEG); 17618 17619 // Negative multiplication when NegA xor NegB 17620 bool NegMul = (NegA != NegB); 17621 if (NegA) 17622 A = A.getOperand(0); 17623 if (NegB) 17624 B = B.getOperand(0); 17625 if (NegC) 17626 C = C.getOperand(0); 17627 17628 unsigned Opcode; 17629 if (!NegMul) 17630 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB; 17631 else 17632 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB; 17633 17634 return DAG.getNode(Opcode, dl, VT, A, B, C); 17635} 17636 17637static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 17638 TargetLowering::DAGCombinerInfo &DCI, 17639 const X86Subtarget *Subtarget) { 17640 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 17641 // (and (i32 x86isd::setcc_carry), 1) 17642 // This eliminates the zext. This transformation is necessary because 17643 // ISD::SETCC is always legalized to i8. 17644 SDLoc dl(N); 17645 SDValue N0 = N->getOperand(0); 17646 EVT VT = N->getValueType(0); 17647 17648 if (N0.getOpcode() == ISD::AND && 17649 N0.hasOneUse() && 17650 N0.getOperand(0).hasOneUse()) { 17651 SDValue N00 = N0.getOperand(0); 17652 if (N00.getOpcode() == X86ISD::SETCC_CARRY) { 17653 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 17654 if (!C || C->getZExtValue() != 1) 17655 return SDValue(); 17656 return DAG.getNode(ISD::AND, dl, VT, 17657 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 17658 N00.getOperand(0), N00.getOperand(1)), 17659 DAG.getConstant(1, VT)); 17660 } 17661 } 17662 17663 if (VT.is256BitVector()) { 17664 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget); 17665 if (R.getNode()) 17666 return R; 17667 } 17668 17669 return SDValue(); 17670} 17671 17672// Optimize x == -y --> x+y == 0 17673// x != -y --> x+y != 0 17674static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { 17675 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 17676 SDValue LHS = N->getOperand(0); 17677 SDValue RHS = N->getOperand(1); 17678 17679 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) 17680 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) 17681 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { 17682 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), 17683 LHS.getValueType(), RHS, LHS.getOperand(1)); 17684 return DAG.getSetCC(SDLoc(N), N->getValueType(0), 17685 addV, DAG.getConstant(0, addV.getValueType()), CC); 17686 } 17687 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) 17688 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) 17689 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { 17690 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), 17691 RHS.getValueType(), LHS, RHS.getOperand(1)); 17692 return DAG.getSetCC(SDLoc(N), N->getValueType(0), 17693 addV, DAG.getConstant(0, addV.getValueType()), CC); 17694 } 17695 return SDValue(); 17696} 17697 17698// Helper function of PerformSETCCCombine. It is to materialize "setb reg" 17699// as "sbb reg,reg", since it can be extended without zext and produces 17700// an all-ones bit which is more useful than 0/1 in some cases. 17701static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { 17702 return DAG.getNode(ISD::AND, DL, MVT::i8, 17703 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 17704 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), 17705 DAG.getConstant(1, MVT::i8)); 17706} 17707 17708// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 17709static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, 17710 TargetLowering::DAGCombinerInfo &DCI, 17711 const X86Subtarget *Subtarget) { 17712 SDLoc DL(N); 17713 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); 17714 SDValue EFLAGS = N->getOperand(1); 17715 17716 if (CC == X86::COND_A) { 17717 // Try to convert COND_A into COND_B in an attempt to facilitate 17718 // materializing "setb reg". 17719 // 17720 // Do not flip "e > c", where "c" is a constant, because Cmp instruction 17721 // cannot take an immediate as its first operand. 17722 // 17723 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && 17724 EFLAGS.getValueType().isInteger() && 17725 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { 17726 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), 17727 EFLAGS.getNode()->getVTList(), 17728 EFLAGS.getOperand(1), EFLAGS.getOperand(0)); 17729 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); 17730 return MaterializeSETB(DL, NewEFLAGS, DAG); 17731 } 17732 } 17733 17734 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 17735 // a zext and produces an all-ones bit which is more useful than 0/1 in some 17736 // cases. 17737 if (CC == X86::COND_B) 17738 return MaterializeSETB(DL, EFLAGS, DAG); 17739 17740 SDValue Flags; 17741 17742 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 17743 if (Flags.getNode()) { 17744 SDValue Cond = DAG.getConstant(CC, MVT::i8); 17745 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); 17746 } 17747 17748 return SDValue(); 17749} 17750 17751// Optimize branch condition evaluation. 17752// 17753static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, 17754 TargetLowering::DAGCombinerInfo &DCI, 17755 const X86Subtarget *Subtarget) { 17756 SDLoc DL(N); 17757 SDValue Chain = N->getOperand(0); 17758 SDValue Dest = N->getOperand(1); 17759 SDValue EFLAGS = N->getOperand(3); 17760 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); 17761 17762 SDValue Flags; 17763 17764 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 17765 if (Flags.getNode()) { 17766 SDValue Cond = DAG.getConstant(CC, MVT::i8); 17767 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, 17768 Flags); 17769 } 17770 17771 return SDValue(); 17772} 17773 17774static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 17775 const X86TargetLowering *XTLI) { 17776 SDValue Op0 = N->getOperand(0); 17777 EVT InVT = Op0->getValueType(0); 17778 17779 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) 17780 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 17781 SDLoc dl(N); 17782 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 17783 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); 17784 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 17785 } 17786 17787 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 17788 // a 32-bit target where SSE doesn't support i64->FP operations. 17789 if (Op0.getOpcode() == ISD::LOAD) { 17790 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 17791 EVT VT = Ld->getValueType(0); 17792 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 17793 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 17794 !XTLI->getSubtarget()->is64Bit() && 17795 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 17796 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 17797 Ld->getChain(), Op0, DAG); 17798 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 17799 return FILDChain; 17800 } 17801 } 17802 return SDValue(); 17803} 17804 17805// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 17806static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 17807 X86TargetLowering::DAGCombinerInfo &DCI) { 17808 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 17809 // the result is either zero or one (depending on the input carry bit). 17810 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 17811 if (X86::isZeroNode(N->getOperand(0)) && 17812 X86::isZeroNode(N->getOperand(1)) && 17813 // We don't have a good way to replace an EFLAGS use, so only do this when 17814 // dead right now. 17815 SDValue(N, 1).use_empty()) { 17816 SDLoc DL(N); 17817 EVT VT = N->getValueType(0); 17818 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 17819 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 17820 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 17821 DAG.getConstant(X86::COND_B,MVT::i8), 17822 N->getOperand(2)), 17823 DAG.getConstant(1, VT)); 17824 return DCI.CombineTo(N, Res1, CarryOut); 17825 } 17826 17827 return SDValue(); 17828} 17829 17830// fold (add Y, (sete X, 0)) -> adc 0, Y 17831// (add Y, (setne X, 0)) -> sbb -1, Y 17832// (sub (sete X, 0), Y) -> sbb 0, Y 17833// (sub (setne X, 0), Y) -> adc -1, Y 17834static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 17835 SDLoc DL(N); 17836 17837 // Look through ZExts. 17838 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 17839 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 17840 return SDValue(); 17841 17842 SDValue SetCC = Ext.getOperand(0); 17843 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 17844 return SDValue(); 17845 17846 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 17847 if (CC != X86::COND_E && CC != X86::COND_NE) 17848 return SDValue(); 17849 17850 SDValue Cmp = SetCC.getOperand(1); 17851 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 17852 !X86::isZeroNode(Cmp.getOperand(1)) || 17853 !Cmp.getOperand(0).getValueType().isInteger()) 17854 return SDValue(); 17855 17856 SDValue CmpOp0 = Cmp.getOperand(0); 17857 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 17858 DAG.getConstant(1, CmpOp0.getValueType())); 17859 17860 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 17861 if (CC == X86::COND_NE) 17862 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 17863 DL, OtherVal.getValueType(), OtherVal, 17864 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 17865 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 17866 DL, OtherVal.getValueType(), OtherVal, 17867 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 17868} 17869 17870/// PerformADDCombine - Do target-specific dag combines on integer adds. 17871static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 17872 const X86Subtarget *Subtarget) { 17873 EVT VT = N->getValueType(0); 17874 SDValue Op0 = N->getOperand(0); 17875 SDValue Op1 = N->getOperand(1); 17876 17877 // Try to synthesize horizontal adds from adds of shuffles. 17878 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 17879 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 17880 isHorizontalBinOp(Op0, Op1, true)) 17881 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1); 17882 17883 return OptimizeConditionalInDecrement(N, DAG); 17884} 17885 17886static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 17887 const X86Subtarget *Subtarget) { 17888 SDValue Op0 = N->getOperand(0); 17889 SDValue Op1 = N->getOperand(1); 17890 17891 // X86 can't encode an immediate LHS of a sub. See if we can push the 17892 // negation into a preceding instruction. 17893 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 17894 // If the RHS of the sub is a XOR with one use and a constant, invert the 17895 // immediate. Then add one to the LHS of the sub so we can turn 17896 // X-Y -> X+~Y+1, saving one register. 17897 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 17898 isa<ConstantSDNode>(Op1.getOperand(1))) { 17899 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 17900 EVT VT = Op0.getValueType(); 17901 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, 17902 Op1.getOperand(0), 17903 DAG.getConstant(~XorC, VT)); 17904 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor, 17905 DAG.getConstant(C->getAPIntValue()+1, VT)); 17906 } 17907 } 17908 17909 // Try to synthesize horizontal adds from adds of shuffles. 17910 EVT VT = N->getValueType(0); 17911 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 17912 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 17913 isHorizontalBinOp(Op0, Op1, true)) 17914 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1); 17915 17916 return OptimizeConditionalInDecrement(N, DAG); 17917} 17918 17919/// performVZEXTCombine - Performs build vector combines 17920static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, 17921 TargetLowering::DAGCombinerInfo &DCI, 17922 const X86Subtarget *Subtarget) { 17923 // (vzext (bitcast (vzext (x)) -> (vzext x) 17924 SDValue In = N->getOperand(0); 17925 while (In.getOpcode() == ISD::BITCAST) 17926 In = In.getOperand(0); 17927 17928 if (In.getOpcode() != X86ISD::VZEXT) 17929 return SDValue(); 17930 17931 return DAG.getNode(X86ISD::VZEXT, SDLoc(N), N->getValueType(0), 17932 In.getOperand(0)); 17933} 17934 17935SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 17936 DAGCombinerInfo &DCI) const { 17937 SelectionDAG &DAG = DCI.DAG; 17938 switch (N->getOpcode()) { 17939 default: break; 17940 case ISD::EXTRACT_VECTOR_ELT: 17941 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); 17942 case ISD::VSELECT: 17943 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 17944 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget); 17945 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 17946 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 17947 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 17948 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 17949 case ISD::SHL: 17950 case ISD::SRA: 17951 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget); 17952 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 17953 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 17954 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 17955 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); 17956 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 17957 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 17958 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 17959 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 17960 case X86ISD::FXOR: 17961 case X86ISD::FOR: return PerformFORCombine(N, DAG); 17962 case X86ISD::FMIN: 17963 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); 17964 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 17965 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 17966 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 17967 case ISD::ANY_EXTEND: 17968 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); 17969 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); 17970 case ISD::SIGN_EXTEND_INREG: return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget); 17971 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget); 17972 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); 17973 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget); 17974 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); 17975 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget); 17976 case X86ISD::SHUFP: // Handle all target specific shuffles 17977 case X86ISD::PALIGNR: 17978 case X86ISD::UNPCKH: 17979 case X86ISD::UNPCKL: 17980 case X86ISD::MOVHLPS: 17981 case X86ISD::MOVLHPS: 17982 case X86ISD::PSHUFD: 17983 case X86ISD::PSHUFHW: 17984 case X86ISD::PSHUFLW: 17985 case X86ISD::MOVSS: 17986 case X86ISD::MOVSD: 17987 case X86ISD::VPERMILP: 17988 case X86ISD::VPERM2X128: 17989 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 17990 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); 17991 } 17992 17993 return SDValue(); 17994} 17995 17996/// isTypeDesirableForOp - Return true if the target has native support for 17997/// the specified value type and it is 'desirable' to use the type for the 17998/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 17999/// instruction encodings are longer and some i16 instructions are slow. 18000bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 18001 if (!isTypeLegal(VT)) 18002 return false; 18003 if (VT != MVT::i16) 18004 return true; 18005 18006 switch (Opc) { 18007 default: 18008 return true; 18009 case ISD::LOAD: 18010 case ISD::SIGN_EXTEND: 18011 case ISD::ZERO_EXTEND: 18012 case ISD::ANY_EXTEND: 18013 case ISD::SHL: 18014 case ISD::SRL: 18015 case ISD::SUB: 18016 case ISD::ADD: 18017 case ISD::MUL: 18018 case ISD::AND: 18019 case ISD::OR: 18020 case ISD::XOR: 18021 return false; 18022 } 18023} 18024 18025/// IsDesirableToPromoteOp - This method query the target whether it is 18026/// beneficial for dag combiner to promote the specified node. If true, it 18027/// should return the desired promotion type by reference. 18028bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 18029 EVT VT = Op.getValueType(); 18030 if (VT != MVT::i16) 18031 return false; 18032 18033 bool Promote = false; 18034 bool Commute = false; 18035 switch (Op.getOpcode()) { 18036 default: break; 18037 case ISD::LOAD: { 18038 LoadSDNode *LD = cast<LoadSDNode>(Op); 18039 // If the non-extending load has a single use and it's not live out, then it 18040 // might be folded. 18041 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 18042 Op.hasOneUse()*/) { 18043 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 18044 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 18045 // The only case where we'd want to promote LOAD (rather then it being 18046 // promoted as an operand is when it's only use is liveout. 18047 if (UI->getOpcode() != ISD::CopyToReg) 18048 return false; 18049 } 18050 } 18051 Promote = true; 18052 break; 18053 } 18054 case ISD::SIGN_EXTEND: 18055 case ISD::ZERO_EXTEND: 18056 case ISD::ANY_EXTEND: 18057 Promote = true; 18058 break; 18059 case ISD::SHL: 18060 case ISD::SRL: { 18061 SDValue N0 = Op.getOperand(0); 18062 // Look out for (store (shl (load), x)). 18063 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 18064 return false; 18065 Promote = true; 18066 break; 18067 } 18068 case ISD::ADD: 18069 case ISD::MUL: 18070 case ISD::AND: 18071 case ISD::OR: 18072 case ISD::XOR: 18073 Commute = true; 18074 // fallthrough 18075 case ISD::SUB: { 18076 SDValue N0 = Op.getOperand(0); 18077 SDValue N1 = Op.getOperand(1); 18078 if (!Commute && MayFoldLoad(N1)) 18079 return false; 18080 // Avoid disabling potential load folding opportunities. 18081 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 18082 return false; 18083 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 18084 return false; 18085 Promote = true; 18086 } 18087 } 18088 18089 PVT = MVT::i32; 18090 return Promote; 18091} 18092 18093//===----------------------------------------------------------------------===// 18094// X86 Inline Assembly Support 18095//===----------------------------------------------------------------------===// 18096 18097namespace { 18098 // Helper to match a string separated by whitespace. 18099 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 18100 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 18101 18102 for (unsigned i = 0, e = args.size(); i != e; ++i) { 18103 StringRef piece(*args[i]); 18104 if (!s.startswith(piece)) // Check if the piece matches. 18105 return false; 18106 18107 s = s.substr(piece.size()); 18108 StringRef::size_type pos = s.find_first_not_of(" \t"); 18109 if (pos == 0) // We matched a prefix. 18110 return false; 18111 18112 s = s.substr(pos); 18113 } 18114 18115 return s.empty(); 18116 } 18117 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 18118} 18119 18120bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 18121 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 18122 18123 std::string AsmStr = IA->getAsmString(); 18124 18125 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 18126 if (!Ty || Ty->getBitWidth() % 16 != 0) 18127 return false; 18128 18129 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 18130 SmallVector<StringRef, 4> AsmPieces; 18131 SplitString(AsmStr, AsmPieces, ";\n"); 18132 18133 switch (AsmPieces.size()) { 18134 default: return false; 18135 case 1: 18136 // FIXME: this should verify that we are targeting a 486 or better. If not, 18137 // we will turn this bswap into something that will be lowered to logical 18138 // ops instead of emitting the bswap asm. For now, we don't support 486 or 18139 // lower so don't worry about this. 18140 // bswap $0 18141 if (matchAsm(AsmPieces[0], "bswap", "$0") || 18142 matchAsm(AsmPieces[0], "bswapl", "$0") || 18143 matchAsm(AsmPieces[0], "bswapq", "$0") || 18144 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 18145 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 18146 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 18147 // No need to check constraints, nothing other than the equivalent of 18148 // "=r,0" would be valid here. 18149 return IntrinsicLowering::LowerToByteSwap(CI); 18150 } 18151 18152 // rorw $$8, ${0:w} --> llvm.bswap.i16 18153 if (CI->getType()->isIntegerTy(16) && 18154 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 18155 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 18156 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 18157 AsmPieces.clear(); 18158 const std::string &ConstraintsStr = IA->getConstraintString(); 18159 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 18160 array_pod_sort(AsmPieces.begin(), AsmPieces.end()); 18161 if (AsmPieces.size() == 4 && 18162 AsmPieces[0] == "~{cc}" && 18163 AsmPieces[1] == "~{dirflag}" && 18164 AsmPieces[2] == "~{flags}" && 18165 AsmPieces[3] == "~{fpsr}") 18166 return IntrinsicLowering::LowerToByteSwap(CI); 18167 } 18168 break; 18169 case 3: 18170 if (CI->getType()->isIntegerTy(32) && 18171 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 18172 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 18173 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 18174 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 18175 AsmPieces.clear(); 18176 const std::string &ConstraintsStr = IA->getConstraintString(); 18177 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 18178 array_pod_sort(AsmPieces.begin(), AsmPieces.end()); 18179 if (AsmPieces.size() == 4 && 18180 AsmPieces[0] == "~{cc}" && 18181 AsmPieces[1] == "~{dirflag}" && 18182 AsmPieces[2] == "~{flags}" && 18183 AsmPieces[3] == "~{fpsr}") 18184 return IntrinsicLowering::LowerToByteSwap(CI); 18185 } 18186 18187 if (CI->getType()->isIntegerTy(64)) { 18188 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 18189 if (Constraints.size() >= 2 && 18190 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 18191 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 18192 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 18193 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 18194 matchAsm(AsmPieces[1], "bswap", "%edx") && 18195 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 18196 return IntrinsicLowering::LowerToByteSwap(CI); 18197 } 18198 } 18199 break; 18200 } 18201 return false; 18202} 18203 18204/// getConstraintType - Given a constraint letter, return the type of 18205/// constraint it is for this target. 18206X86TargetLowering::ConstraintType 18207X86TargetLowering::getConstraintType(const std::string &Constraint) const { 18208 if (Constraint.size() == 1) { 18209 switch (Constraint[0]) { 18210 case 'R': 18211 case 'q': 18212 case 'Q': 18213 case 'f': 18214 case 't': 18215 case 'u': 18216 case 'y': 18217 case 'x': 18218 case 'Y': 18219 case 'l': 18220 return C_RegisterClass; 18221 case 'a': 18222 case 'b': 18223 case 'c': 18224 case 'd': 18225 case 'S': 18226 case 'D': 18227 case 'A': 18228 return C_Register; 18229 case 'I': 18230 case 'J': 18231 case 'K': 18232 case 'L': 18233 case 'M': 18234 case 'N': 18235 case 'G': 18236 case 'C': 18237 case 'e': 18238 case 'Z': 18239 return C_Other; 18240 default: 18241 break; 18242 } 18243 } 18244 return TargetLowering::getConstraintType(Constraint); 18245} 18246 18247/// Examine constraint type and operand type and determine a weight value. 18248/// This object must already have been set up with the operand type 18249/// and the current alternative constraint selected. 18250TargetLowering::ConstraintWeight 18251 X86TargetLowering::getSingleConstraintMatchWeight( 18252 AsmOperandInfo &info, const char *constraint) const { 18253 ConstraintWeight weight = CW_Invalid; 18254 Value *CallOperandVal = info.CallOperandVal; 18255 // If we don't have a value, we can't do a match, 18256 // but allow it at the lowest weight. 18257 if (CallOperandVal == NULL) 18258 return CW_Default; 18259 Type *type = CallOperandVal->getType(); 18260 // Look at the constraint type. 18261 switch (*constraint) { 18262 default: 18263 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 18264 case 'R': 18265 case 'q': 18266 case 'Q': 18267 case 'a': 18268 case 'b': 18269 case 'c': 18270 case 'd': 18271 case 'S': 18272 case 'D': 18273 case 'A': 18274 if (CallOperandVal->getType()->isIntegerTy()) 18275 weight = CW_SpecificReg; 18276 break; 18277 case 'f': 18278 case 't': 18279 case 'u': 18280 if (type->isFloatingPointTy()) 18281 weight = CW_SpecificReg; 18282 break; 18283 case 'y': 18284 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 18285 weight = CW_SpecificReg; 18286 break; 18287 case 'x': 18288 case 'Y': 18289 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 18290 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256())) 18291 weight = CW_Register; 18292 break; 18293 case 'I': 18294 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 18295 if (C->getZExtValue() <= 31) 18296 weight = CW_Constant; 18297 } 18298 break; 18299 case 'J': 18300 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18301 if (C->getZExtValue() <= 63) 18302 weight = CW_Constant; 18303 } 18304 break; 18305 case 'K': 18306 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18307 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 18308 weight = CW_Constant; 18309 } 18310 break; 18311 case 'L': 18312 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18313 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 18314 weight = CW_Constant; 18315 } 18316 break; 18317 case 'M': 18318 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18319 if (C->getZExtValue() <= 3) 18320 weight = CW_Constant; 18321 } 18322 break; 18323 case 'N': 18324 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18325 if (C->getZExtValue() <= 0xff) 18326 weight = CW_Constant; 18327 } 18328 break; 18329 case 'G': 18330 case 'C': 18331 if (dyn_cast<ConstantFP>(CallOperandVal)) { 18332 weight = CW_Constant; 18333 } 18334 break; 18335 case 'e': 18336 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18337 if ((C->getSExtValue() >= -0x80000000LL) && 18338 (C->getSExtValue() <= 0x7fffffffLL)) 18339 weight = CW_Constant; 18340 } 18341 break; 18342 case 'Z': 18343 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18344 if (C->getZExtValue() <= 0xffffffff) 18345 weight = CW_Constant; 18346 } 18347 break; 18348 } 18349 return weight; 18350} 18351 18352/// LowerXConstraint - try to replace an X constraint, which matches anything, 18353/// with another that has more specific requirements based on the type of the 18354/// corresponding operand. 18355const char *X86TargetLowering:: 18356LowerXConstraint(EVT ConstraintVT) const { 18357 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 18358 // 'f' like normal targets. 18359 if (ConstraintVT.isFloatingPoint()) { 18360 if (Subtarget->hasSSE2()) 18361 return "Y"; 18362 if (Subtarget->hasSSE1()) 18363 return "x"; 18364 } 18365 18366 return TargetLowering::LowerXConstraint(ConstraintVT); 18367} 18368 18369/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 18370/// vector. If it is invalid, don't add anything to Ops. 18371void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 18372 std::string &Constraint, 18373 std::vector<SDValue>&Ops, 18374 SelectionDAG &DAG) const { 18375 SDValue Result(0, 0); 18376 18377 // Only support length 1 constraints for now. 18378 if (Constraint.length() > 1) return; 18379 18380 char ConstraintLetter = Constraint[0]; 18381 switch (ConstraintLetter) { 18382 default: break; 18383 case 'I': 18384 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18385 if (C->getZExtValue() <= 31) { 18386 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18387 break; 18388 } 18389 } 18390 return; 18391 case 'J': 18392 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18393 if (C->getZExtValue() <= 63) { 18394 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18395 break; 18396 } 18397 } 18398 return; 18399 case 'K': 18400 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18401 if (isInt<8>(C->getSExtValue())) { 18402 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18403 break; 18404 } 18405 } 18406 return; 18407 case 'N': 18408 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18409 if (C->getZExtValue() <= 255) { 18410 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18411 break; 18412 } 18413 } 18414 return; 18415 case 'e': { 18416 // 32-bit signed value 18417 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18418 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 18419 C->getSExtValue())) { 18420 // Widen to 64 bits here to get it sign extended. 18421 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 18422 break; 18423 } 18424 // FIXME gcc accepts some relocatable values here too, but only in certain 18425 // memory models; it's complicated. 18426 } 18427 return; 18428 } 18429 case 'Z': { 18430 // 32-bit unsigned value 18431 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18432 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 18433 C->getZExtValue())) { 18434 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18435 break; 18436 } 18437 } 18438 // FIXME gcc accepts some relocatable values here too, but only in certain 18439 // memory models; it's complicated. 18440 return; 18441 } 18442 case 'i': { 18443 // Literal immediates are always ok. 18444 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 18445 // Widen to 64 bits here to get it sign extended. 18446 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 18447 break; 18448 } 18449 18450 // In any sort of PIC mode addresses need to be computed at runtime by 18451 // adding in a register or some sort of table lookup. These can't 18452 // be used as immediates. 18453 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 18454 return; 18455 18456 // If we are in non-pic codegen mode, we allow the address of a global (with 18457 // an optional displacement) to be used with 'i'. 18458 GlobalAddressSDNode *GA = 0; 18459 int64_t Offset = 0; 18460 18461 // Match either (GA), (GA+C), (GA+C1+C2), etc. 18462 while (1) { 18463 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 18464 Offset += GA->getOffset(); 18465 break; 18466 } else if (Op.getOpcode() == ISD::ADD) { 18467 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 18468 Offset += C->getZExtValue(); 18469 Op = Op.getOperand(0); 18470 continue; 18471 } 18472 } else if (Op.getOpcode() == ISD::SUB) { 18473 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 18474 Offset += -C->getZExtValue(); 18475 Op = Op.getOperand(0); 18476 continue; 18477 } 18478 } 18479 18480 // Otherwise, this isn't something we can handle, reject it. 18481 return; 18482 } 18483 18484 const GlobalValue *GV = GA->getGlobal(); 18485 // If we require an extra load to get this address, as in PIC mode, we 18486 // can't accept it. 18487 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 18488 getTargetMachine()))) 18489 return; 18490 18491 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op), 18492 GA->getValueType(0), Offset); 18493 break; 18494 } 18495 } 18496 18497 if (Result.getNode()) { 18498 Ops.push_back(Result); 18499 return; 18500 } 18501 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 18502} 18503 18504std::pair<unsigned, const TargetRegisterClass*> 18505X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 18506 MVT VT) const { 18507 // First, see if this is a constraint that directly corresponds to an LLVM 18508 // register class. 18509 if (Constraint.size() == 1) { 18510 // GCC Constraint Letters 18511 switch (Constraint[0]) { 18512 default: break; 18513 // TODO: Slight differences here in allocation order and leaving 18514 // RIP in the class. Do they matter any more here than they do 18515 // in the normal allocation? 18516 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 18517 if (Subtarget->is64Bit()) { 18518 if (VT == MVT::i32 || VT == MVT::f32) 18519 return std::make_pair(0U, &X86::GR32RegClass); 18520 if (VT == MVT::i16) 18521 return std::make_pair(0U, &X86::GR16RegClass); 18522 if (VT == MVT::i8 || VT == MVT::i1) 18523 return std::make_pair(0U, &X86::GR8RegClass); 18524 if (VT == MVT::i64 || VT == MVT::f64) 18525 return std::make_pair(0U, &X86::GR64RegClass); 18526 break; 18527 } 18528 // 32-bit fallthrough 18529 case 'Q': // Q_REGS 18530 if (VT == MVT::i32 || VT == MVT::f32) 18531 return std::make_pair(0U, &X86::GR32_ABCDRegClass); 18532 if (VT == MVT::i16) 18533 return std::make_pair(0U, &X86::GR16_ABCDRegClass); 18534 if (VT == MVT::i8 || VT == MVT::i1) 18535 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); 18536 if (VT == MVT::i64) 18537 return std::make_pair(0U, &X86::GR64_ABCDRegClass); 18538 break; 18539 case 'r': // GENERAL_REGS 18540 case 'l': // INDEX_REGS 18541 if (VT == MVT::i8 || VT == MVT::i1) 18542 return std::make_pair(0U, &X86::GR8RegClass); 18543 if (VT == MVT::i16) 18544 return std::make_pair(0U, &X86::GR16RegClass); 18545 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 18546 return std::make_pair(0U, &X86::GR32RegClass); 18547 return std::make_pair(0U, &X86::GR64RegClass); 18548 case 'R': // LEGACY_REGS 18549 if (VT == MVT::i8 || VT == MVT::i1) 18550 return std::make_pair(0U, &X86::GR8_NOREXRegClass); 18551 if (VT == MVT::i16) 18552 return std::make_pair(0U, &X86::GR16_NOREXRegClass); 18553 if (VT == MVT::i32 || !Subtarget->is64Bit()) 18554 return std::make_pair(0U, &X86::GR32_NOREXRegClass); 18555 return std::make_pair(0U, &X86::GR64_NOREXRegClass); 18556 case 'f': // FP Stack registers. 18557 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 18558 // value to the correct fpstack register class. 18559 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 18560 return std::make_pair(0U, &X86::RFP32RegClass); 18561 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 18562 return std::make_pair(0U, &X86::RFP64RegClass); 18563 return std::make_pair(0U, &X86::RFP80RegClass); 18564 case 'y': // MMX_REGS if MMX allowed. 18565 if (!Subtarget->hasMMX()) break; 18566 return std::make_pair(0U, &X86::VR64RegClass); 18567 case 'Y': // SSE_REGS if SSE2 allowed 18568 if (!Subtarget->hasSSE2()) break; 18569 // FALL THROUGH. 18570 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 18571 if (!Subtarget->hasSSE1()) break; 18572 18573 switch (VT.SimpleTy) { 18574 default: break; 18575 // Scalar SSE types. 18576 case MVT::f32: 18577 case MVT::i32: 18578 return std::make_pair(0U, &X86::FR32RegClass); 18579 case MVT::f64: 18580 case MVT::i64: 18581 return std::make_pair(0U, &X86::FR64RegClass); 18582 // Vector types. 18583 case MVT::v16i8: 18584 case MVT::v8i16: 18585 case MVT::v4i32: 18586 case MVT::v2i64: 18587 case MVT::v4f32: 18588 case MVT::v2f64: 18589 return std::make_pair(0U, &X86::VR128RegClass); 18590 // AVX types. 18591 case MVT::v32i8: 18592 case MVT::v16i16: 18593 case MVT::v8i32: 18594 case MVT::v4i64: 18595 case MVT::v8f32: 18596 case MVT::v4f64: 18597 return std::make_pair(0U, &X86::VR256RegClass); 18598 case MVT::v8f64: 18599 case MVT::v16f32: 18600 case MVT::v16i32: 18601 case MVT::v8i64: 18602 return std::make_pair(0U, &X86::VR512RegClass); 18603 } 18604 break; 18605 } 18606 } 18607 18608 // Use the default implementation in TargetLowering to convert the register 18609 // constraint into a member of a register class. 18610 std::pair<unsigned, const TargetRegisterClass*> Res; 18611 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 18612 18613 // Not found as a standard register? 18614 if (Res.second == 0) { 18615 // Map st(0) -> st(7) -> ST0 18616 if (Constraint.size() == 7 && Constraint[0] == '{' && 18617 tolower(Constraint[1]) == 's' && 18618 tolower(Constraint[2]) == 't' && 18619 Constraint[3] == '(' && 18620 (Constraint[4] >= '0' && Constraint[4] <= '7') && 18621 Constraint[5] == ')' && 18622 Constraint[6] == '}') { 18623 18624 Res.first = X86::ST0+Constraint[4]-'0'; 18625 Res.second = &X86::RFP80RegClass; 18626 return Res; 18627 } 18628 18629 // GCC allows "st(0)" to be called just plain "st". 18630 if (StringRef("{st}").equals_lower(Constraint)) { 18631 Res.first = X86::ST0; 18632 Res.second = &X86::RFP80RegClass; 18633 return Res; 18634 } 18635 18636 // flags -> EFLAGS 18637 if (StringRef("{flags}").equals_lower(Constraint)) { 18638 Res.first = X86::EFLAGS; 18639 Res.second = &X86::CCRRegClass; 18640 return Res; 18641 } 18642 18643 // 'A' means EAX + EDX. 18644 if (Constraint == "A") { 18645 Res.first = X86::EAX; 18646 Res.second = &X86::GR32_ADRegClass; 18647 return Res; 18648 } 18649 return Res; 18650 } 18651 18652 // Otherwise, check to see if this is a register class of the wrong value 18653 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 18654 // turn into {ax},{dx}. 18655 if (Res.second->hasType(VT)) 18656 return Res; // Correct type already, nothing to do. 18657 18658 // All of the single-register GCC register classes map their values onto 18659 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 18660 // really want an 8-bit or 32-bit register, map to the appropriate register 18661 // class and return the appropriate register. 18662 if (Res.second == &X86::GR16RegClass) { 18663 if (VT == MVT::i8 || VT == MVT::i1) { 18664 unsigned DestReg = 0; 18665 switch (Res.first) { 18666 default: break; 18667 case X86::AX: DestReg = X86::AL; break; 18668 case X86::DX: DestReg = X86::DL; break; 18669 case X86::CX: DestReg = X86::CL; break; 18670 case X86::BX: DestReg = X86::BL; break; 18671 } 18672 if (DestReg) { 18673 Res.first = DestReg; 18674 Res.second = &X86::GR8RegClass; 18675 } 18676 } else if (VT == MVT::i32 || VT == MVT::f32) { 18677 unsigned DestReg = 0; 18678 switch (Res.first) { 18679 default: break; 18680 case X86::AX: DestReg = X86::EAX; break; 18681 case X86::DX: DestReg = X86::EDX; break; 18682 case X86::CX: DestReg = X86::ECX; break; 18683 case X86::BX: DestReg = X86::EBX; break; 18684 case X86::SI: DestReg = X86::ESI; break; 18685 case X86::DI: DestReg = X86::EDI; break; 18686 case X86::BP: DestReg = X86::EBP; break; 18687 case X86::SP: DestReg = X86::ESP; break; 18688 } 18689 if (DestReg) { 18690 Res.first = DestReg; 18691 Res.second = &X86::GR32RegClass; 18692 } 18693 } else if (VT == MVT::i64 || VT == MVT::f64) { 18694 unsigned DestReg = 0; 18695 switch (Res.first) { 18696 default: break; 18697 case X86::AX: DestReg = X86::RAX; break; 18698 case X86::DX: DestReg = X86::RDX; break; 18699 case X86::CX: DestReg = X86::RCX; break; 18700 case X86::BX: DestReg = X86::RBX; break; 18701 case X86::SI: DestReg = X86::RSI; break; 18702 case X86::DI: DestReg = X86::RDI; break; 18703 case X86::BP: DestReg = X86::RBP; break; 18704 case X86::SP: DestReg = X86::RSP; break; 18705 } 18706 if (DestReg) { 18707 Res.first = DestReg; 18708 Res.second = &X86::GR64RegClass; 18709 } 18710 } 18711 } else if (Res.second == &X86::FR32RegClass || 18712 Res.second == &X86::FR64RegClass || 18713 Res.second == &X86::VR128RegClass || 18714 Res.second == &X86::VR256RegClass || 18715 Res.second == &X86::FR32XRegClass || 18716 Res.second == &X86::FR64XRegClass || 18717 Res.second == &X86::VR128XRegClass || 18718 Res.second == &X86::VR256XRegClass || 18719 Res.second == &X86::VR512RegClass) { 18720 // Handle references to XMM physical registers that got mapped into the 18721 // wrong class. This can happen with constraints like {xmm0} where the 18722 // target independent register mapper will just pick the first match it can 18723 // find, ignoring the required type. 18724 18725 if (VT == MVT::f32 || VT == MVT::i32) 18726 Res.second = &X86::FR32RegClass; 18727 else if (VT == MVT::f64 || VT == MVT::i64) 18728 Res.second = &X86::FR64RegClass; 18729 else if (X86::VR128RegClass.hasType(VT)) 18730 Res.second = &X86::VR128RegClass; 18731 else if (X86::VR256RegClass.hasType(VT)) 18732 Res.second = &X86::VR256RegClass; 18733 else if (X86::VR512RegClass.hasType(VT)) 18734 Res.second = &X86::VR512RegClass; 18735 } 18736 18737 return Res; 18738} 18739