X86ISelLowering.cpp revision 4edfa2278aa34876abffe67bfb66c0f92bd597a5
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86ISelLowering.h" 17#include "Utils/X86ShuffleDecode.h" 18#include "X86.h" 19#include "X86InstrBuilder.h" 20#include "X86TargetMachine.h" 21#include "X86TargetObjectFile.h" 22#include "llvm/ADT/SmallSet.h" 23#include "llvm/ADT/Statistic.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/VariadicFunction.h" 26#include "llvm/CodeGen/IntrinsicLowering.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineJumpTableInfo.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/IR/CallingConv.h" 34#include "llvm/IR/Constants.h" 35#include "llvm/IR/DerivedTypes.h" 36#include "llvm/IR/Function.h" 37#include "llvm/IR/GlobalAlias.h" 38#include "llvm/IR/GlobalVariable.h" 39#include "llvm/IR/Instructions.h" 40#include "llvm/IR/Intrinsics.h" 41#include "llvm/IR/LLVMContext.h" 42#include "llvm/MC/MCAsmInfo.h" 43#include "llvm/MC/MCContext.h" 44#include "llvm/MC/MCExpr.h" 45#include "llvm/MC/MCSymbol.h" 46#include "llvm/Support/CallSite.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Target/TargetOptions.h" 51#include <bitset> 52#include <cctype> 53using namespace llvm; 54 55STATISTIC(NumTailCalls, "Number of tail calls"); 56 57// Forward declarations. 58static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal, 62 SelectionDAG &DAG, SDLoc dl, 63 unsigned vectorWidth) { 64 assert((vectorWidth == 128 || vectorWidth == 256) && 65 "Unsupported vector width"); 66 EVT VT = Vec.getValueType(); 67 EVT ElVT = VT.getVectorElementType(); 68 unsigned Factor = VT.getSizeInBits()/vectorWidth; 69 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 70 VT.getVectorNumElements()/Factor); 71 72 // Extract from UNDEF is UNDEF. 73 if (Vec.getOpcode() == ISD::UNDEF) 74 return DAG.getUNDEF(ResultVT); 75 76 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR 77 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits(); 78 79 // This is the index of the first element of the vectorWidth-bit chunk 80 // we want. 81 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth) 82 * ElemsPerChunk); 83 84 // If the input is a buildvector just emit a smaller one. 85 if (Vec.getOpcode() == ISD::BUILD_VECTOR) 86 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT, 87 Vec->op_begin()+NormalizedIdxVal, ElemsPerChunk); 88 89 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 90 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 91 VecIdx); 92 93 return Result; 94 95} 96/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 97/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128 98/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4 99/// instructions or a simple subregister reference. Idx is an index in the 100/// 128 bits we want. It need not be aligned to a 128-bit bounday. That makes 101/// lowering EXTRACT_VECTOR_ELT operations easier. 102static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 103 SelectionDAG &DAG, SDLoc dl) { 104 assert((Vec.getValueType().is256BitVector() || 105 Vec.getValueType().is512BitVector()) && "Unexpected vector size!"); 106 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128); 107} 108 109/// Generate a DAG to grab 256-bits from a 512-bit vector. 110static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal, 111 SelectionDAG &DAG, SDLoc dl) { 112 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!"); 113 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256); 114} 115 116static SDValue InsertSubVector(SDValue Result, SDValue Vec, 117 unsigned IdxVal, SelectionDAG &DAG, 118 SDLoc dl, unsigned vectorWidth) { 119 assert((vectorWidth == 128 || vectorWidth == 256) && 120 "Unsupported vector width"); 121 // Inserting UNDEF is Result 122 if (Vec.getOpcode() == ISD::UNDEF) 123 return Result; 124 EVT VT = Vec.getValueType(); 125 EVT ElVT = VT.getVectorElementType(); 126 EVT ResultVT = Result.getValueType(); 127 128 // Insert the relevant vectorWidth bits. 129 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits(); 130 131 // This is the index of the first element of the vectorWidth-bit chunk 132 // we want. 133 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth) 134 * ElemsPerChunk); 135 136 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 137 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 138 VecIdx); 139} 140/// Generate a DAG to put 128-bits into a vector > 128 bits. This 141/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or 142/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a 143/// simple superregister reference. Idx is an index in the 128 bits 144/// we want. It need not be aligned to a 128-bit bounday. That makes 145/// lowering INSERT_VECTOR_ELT operations easier. 146static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 147 unsigned IdxVal, SelectionDAG &DAG, 148 SDLoc dl) { 149 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!"); 150 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128); 151} 152 153static SDValue Insert256BitVector(SDValue Result, SDValue Vec, 154 unsigned IdxVal, SelectionDAG &DAG, 155 SDLoc dl) { 156 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!"); 157 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256); 158} 159 160/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 161/// instructions. This is used because creating CONCAT_VECTOR nodes of 162/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 163/// large BUILD_VECTORS. 164static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 165 unsigned NumElems, SelectionDAG &DAG, 166 SDLoc dl) { 167 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 168 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 169} 170 171static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT, 172 unsigned NumElems, SelectionDAG &DAG, 173 SDLoc dl) { 174 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 175 return Insert256BitVector(V, V2, NumElems/2, DAG, dl); 176} 177 178static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 179 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 180 bool is64Bit = Subtarget->is64Bit(); 181 182 if (Subtarget->isTargetEnvMacho()) { 183 if (is64Bit) 184 return new X86_64MachoTargetObjectFile(); 185 return new TargetLoweringObjectFileMachO(); 186 } 187 188 if (Subtarget->isTargetLinux()) 189 return new X86LinuxTargetObjectFile(); 190 if (Subtarget->isTargetELF()) 191 return new TargetLoweringObjectFileELF(); 192 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 193 return new TargetLoweringObjectFileCOFF(); 194 llvm_unreachable("unknown subtarget type"); 195} 196 197X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 198 : TargetLowering(TM, createTLOF(TM)) { 199 Subtarget = &TM.getSubtarget<X86Subtarget>(); 200 X86ScalarSSEf64 = Subtarget->hasSSE2(); 201 X86ScalarSSEf32 = Subtarget->hasSSE1(); 202 TD = getDataLayout(); 203 204 resetOperationActions(); 205} 206 207void X86TargetLowering::resetOperationActions() { 208 const TargetMachine &TM = getTargetMachine(); 209 static bool FirstTimeThrough = true; 210 211 // If none of the target options have changed, then we don't need to reset the 212 // operation actions. 213 if (!FirstTimeThrough && TO == TM.Options) return; 214 215 if (!FirstTimeThrough) { 216 // Reinitialize the actions. 217 initActions(); 218 FirstTimeThrough = false; 219 } 220 221 TO = TM.Options; 222 223 // Set up the TargetLowering object. 224 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 225 226 // X86 is weird, it always uses i8 for shift amounts and setcc results. 227 setBooleanContents(ZeroOrOneBooleanContent); 228 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 229 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 230 231 // For 64-bit since we have so many registers use the ILP scheduler, for 232 // 32-bit code use the register pressure specific scheduling. 233 // For Atom, always use ILP scheduling. 234 if (Subtarget->isAtom()) 235 setSchedulingPreference(Sched::ILP); 236 else if (Subtarget->is64Bit()) 237 setSchedulingPreference(Sched::ILP); 238 else 239 setSchedulingPreference(Sched::RegPressure); 240 const X86RegisterInfo *RegInfo = 241 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); 242 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); 243 244 // Bypass expensive divides on Atom when compiling with O2 245 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) { 246 addBypassSlowDiv(32, 8); 247 if (Subtarget->is64Bit()) 248 addBypassSlowDiv(64, 16); 249 } 250 251 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 252 // Setup Windows compiler runtime calls. 253 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 254 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 255 setLibcallName(RTLIB::SREM_I64, "_allrem"); 256 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 257 setLibcallName(RTLIB::MUL_I64, "_allmul"); 258 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 259 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 260 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 261 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 262 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 263 264 // The _ftol2 runtime function has an unusual calling conv, which 265 // is modeled by a special pseudo-instruction. 266 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 267 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 268 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 269 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 270 } 271 272 if (Subtarget->isTargetDarwin()) { 273 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 274 setUseUnderscoreSetJmp(false); 275 setUseUnderscoreLongJmp(false); 276 } else if (Subtarget->isTargetMingw()) { 277 // MS runtime is weird: it exports _setjmp, but longjmp! 278 setUseUnderscoreSetJmp(true); 279 setUseUnderscoreLongJmp(false); 280 } else { 281 setUseUnderscoreSetJmp(true); 282 setUseUnderscoreLongJmp(true); 283 } 284 285 // Set up the register classes. 286 addRegisterClass(MVT::i8, &X86::GR8RegClass); 287 addRegisterClass(MVT::i16, &X86::GR16RegClass); 288 addRegisterClass(MVT::i32, &X86::GR32RegClass); 289 if (Subtarget->is64Bit()) 290 addRegisterClass(MVT::i64, &X86::GR64RegClass); 291 292 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 293 294 // We don't accept any truncstore of integer registers. 295 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 296 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 297 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 298 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 299 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 300 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 301 302 // SETOEQ and SETUNE require checking two conditions. 303 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 304 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 305 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 306 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 307 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 308 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 309 310 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 311 // operation. 312 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 313 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 314 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 315 316 if (Subtarget->is64Bit()) { 317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 318 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 319 } else if (!TM.Options.UseSoftFloat) { 320 // We have an algorithm for SSE2->double, and we turn this into a 321 // 64-bit FILD followed by conditional FADD for other targets. 322 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 323 // We have an algorithm for SSE2, and we turn this into a 64-bit 324 // FILD for other targets. 325 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 326 } 327 328 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 329 // this operation. 330 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 331 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 332 333 if (!TM.Options.UseSoftFloat) { 334 // SSE has no i16 to fp conversion, only i32 335 if (X86ScalarSSEf32) { 336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 337 // f32 and f64 cases are Legal, f80 case is not 338 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 339 } else { 340 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 341 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 342 } 343 } else { 344 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 345 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 346 } 347 348 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 349 // are Legal, f80 is custom lowered. 350 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 351 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 352 353 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 354 // this operation. 355 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 356 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 357 358 if (X86ScalarSSEf32) { 359 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 360 // f32 and f64 cases are Legal, f80 case is not 361 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 362 } else { 363 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 364 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 365 } 366 367 // Handle FP_TO_UINT by promoting the destination to a larger signed 368 // conversion. 369 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 370 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 371 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 372 373 if (Subtarget->is64Bit()) { 374 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 375 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 376 } else if (!TM.Options.UseSoftFloat) { 377 // Since AVX is a superset of SSE3, only check for SSE here. 378 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 379 // Expand FP_TO_UINT into a select. 380 // FIXME: We would like to use a Custom expander here eventually to do 381 // the optimal thing for SSE vs. the default expansion in the legalizer. 382 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 383 else 384 // With SSE3 we can use fisttpll to convert to a signed i64; without 385 // SSE, we're stuck with a fistpll. 386 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 387 } 388 389 if (isTargetFTOL()) { 390 // Use the _ftol2 runtime function, which has a pseudo-instruction 391 // to handle its weird calling convention. 392 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 393 } 394 395 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 396 if (!X86ScalarSSEf64) { 397 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 398 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 399 if (Subtarget->is64Bit()) { 400 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 401 // Without SSE, i64->f64 goes through memory. 402 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 403 } 404 } 405 406 // Scalar integer divide and remainder are lowered to use operations that 407 // produce two results, to match the available instructions. This exposes 408 // the two-result form to trivial CSE, which is able to combine x/y and x%y 409 // into a single instruction. 410 // 411 // Scalar integer multiply-high is also lowered to use two-result 412 // operations, to match the available instructions. However, plain multiply 413 // (low) operations are left as Legal, as there are single-result 414 // instructions for this in x86. Using the two-result multiply instructions 415 // when both high and low results are needed must be arranged by dagcombine. 416 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 417 MVT VT = IntVTs[i]; 418 setOperationAction(ISD::MULHS, VT, Expand); 419 setOperationAction(ISD::MULHU, VT, Expand); 420 setOperationAction(ISD::SDIV, VT, Expand); 421 setOperationAction(ISD::UDIV, VT, Expand); 422 setOperationAction(ISD::SREM, VT, Expand); 423 setOperationAction(ISD::UREM, VT, Expand); 424 425 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 426 setOperationAction(ISD::ADDC, VT, Custom); 427 setOperationAction(ISD::ADDE, VT, Custom); 428 setOperationAction(ISD::SUBC, VT, Custom); 429 setOperationAction(ISD::SUBE, VT, Custom); 430 } 431 432 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 433 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 434 setOperationAction(ISD::BR_CC , MVT::f32, Expand); 435 setOperationAction(ISD::BR_CC , MVT::f64, Expand); 436 setOperationAction(ISD::BR_CC , MVT::f80, Expand); 437 setOperationAction(ISD::BR_CC , MVT::i8, Expand); 438 setOperationAction(ISD::BR_CC , MVT::i16, Expand); 439 setOperationAction(ISD::BR_CC , MVT::i32, Expand); 440 setOperationAction(ISD::BR_CC , MVT::i64, Expand); 441 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 442 if (Subtarget->is64Bit()) 443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 445 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 446 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 447 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 448 setOperationAction(ISD::FREM , MVT::f32 , Expand); 449 setOperationAction(ISD::FREM , MVT::f64 , Expand); 450 setOperationAction(ISD::FREM , MVT::f80 , Expand); 451 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 452 453 // Promote the i8 variants and force them on up to i32 which has a shorter 454 // encoding. 455 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 456 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 457 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 458 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 459 if (Subtarget->hasBMI()) { 460 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 462 if (Subtarget->is64Bit()) 463 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 464 } else { 465 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 466 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 467 if (Subtarget->is64Bit()) 468 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 469 } 470 471 if (Subtarget->hasLZCNT()) { 472 // When promoting the i8 variants, force them to i32 for a shorter 473 // encoding. 474 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 475 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 477 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 478 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 480 if (Subtarget->is64Bit()) 481 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 482 } else { 483 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 484 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 485 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 487 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 488 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 489 if (Subtarget->is64Bit()) { 490 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 491 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 492 } 493 } 494 495 if (Subtarget->hasPOPCNT()) { 496 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 497 } else { 498 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 499 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 500 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 501 if (Subtarget->is64Bit()) 502 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 503 } 504 505 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 506 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 507 508 // These should be promoted to a larger select which is supported. 509 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 510 // X86 wants to expand cmov itself. 511 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 512 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 513 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 514 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 515 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 516 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 517 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 518 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 519 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 520 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 521 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 522 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 523 if (Subtarget->is64Bit()) { 524 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 525 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 526 } 527 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 528 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 529 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 530 // support continuation, user-level threading, and etc.. As a result, no 531 // other SjLj exception interfaces are implemented and please don't build 532 // your own exception handling based on them. 533 // LLVM/Clang supports zero-cost DWARF exception handling. 534 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 535 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 536 537 // Darwin ABI issue. 538 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 539 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 540 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 541 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 542 if (Subtarget->is64Bit()) 543 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 544 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 545 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 546 if (Subtarget->is64Bit()) { 547 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 548 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 549 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 550 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 551 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 552 } 553 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 554 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 555 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 556 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 557 if (Subtarget->is64Bit()) { 558 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 559 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 560 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 561 } 562 563 if (Subtarget->hasSSE1()) 564 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 565 566 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 567 568 // Expand certain atomics 569 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 570 MVT VT = IntVTs[i]; 571 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 572 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 573 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 574 } 575 576 if (!Subtarget->is64Bit()) { 577 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 578 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 579 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 580 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 581 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 582 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 583 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 584 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 585 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 586 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 587 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 588 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 589 } 590 591 if (Subtarget->hasCmpxchg16b()) { 592 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 593 } 594 595 // FIXME - use subtarget debug flags 596 if (!Subtarget->isTargetDarwin() && 597 !Subtarget->isTargetELF() && 598 !Subtarget->isTargetCygMing()) { 599 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 600 } 601 602 if (Subtarget->is64Bit()) { 603 setExceptionPointerRegister(X86::RAX); 604 setExceptionSelectorRegister(X86::RDX); 605 } else { 606 setExceptionPointerRegister(X86::EAX); 607 setExceptionSelectorRegister(X86::EDX); 608 } 609 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 610 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 611 612 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 613 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 614 615 setOperationAction(ISD::TRAP, MVT::Other, Legal); 616 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 617 618 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 619 setOperationAction(ISD::VASTART , MVT::Other, Custom); 620 setOperationAction(ISD::VAEND , MVT::Other, Expand); 621 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) { 622 // TargetInfo::X86_64ABIBuiltinVaList 623 setOperationAction(ISD::VAARG , MVT::Other, Custom); 624 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 625 } else { 626 // TargetInfo::CharPtrBuiltinVaList 627 setOperationAction(ISD::VAARG , MVT::Other, Expand); 628 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 629 } 630 631 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 632 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 633 634 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 635 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 636 MVT::i64 : MVT::i32, Custom); 637 else if (TM.Options.EnableSegmentedStacks) 638 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 639 MVT::i64 : MVT::i32, Custom); 640 else 641 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 642 MVT::i64 : MVT::i32, Expand); 643 644 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 645 // f32 and f64 use SSE. 646 // Set up the FP register classes. 647 addRegisterClass(MVT::f32, &X86::FR32RegClass); 648 addRegisterClass(MVT::f64, &X86::FR64RegClass); 649 650 // Use ANDPD to simulate FABS. 651 setOperationAction(ISD::FABS , MVT::f64, Custom); 652 setOperationAction(ISD::FABS , MVT::f32, Custom); 653 654 // Use XORP to simulate FNEG. 655 setOperationAction(ISD::FNEG , MVT::f64, Custom); 656 setOperationAction(ISD::FNEG , MVT::f32, Custom); 657 658 // Use ANDPD and ORPD to simulate FCOPYSIGN. 659 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 660 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 661 662 // Lower this to FGETSIGNx86 plus an AND. 663 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 664 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 665 666 // We don't support sin/cos/fmod 667 setOperationAction(ISD::FSIN , MVT::f64, Expand); 668 setOperationAction(ISD::FCOS , MVT::f64, Expand); 669 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 670 setOperationAction(ISD::FSIN , MVT::f32, Expand); 671 setOperationAction(ISD::FCOS , MVT::f32, Expand); 672 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 673 674 // Expand FP immediates into loads from the stack, except for the special 675 // cases we handle. 676 addLegalFPImmediate(APFloat(+0.0)); // xorpd 677 addLegalFPImmediate(APFloat(+0.0f)); // xorps 678 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 679 // Use SSE for f32, x87 for f64. 680 // Set up the FP register classes. 681 addRegisterClass(MVT::f32, &X86::FR32RegClass); 682 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 683 684 // Use ANDPS to simulate FABS. 685 setOperationAction(ISD::FABS , MVT::f32, Custom); 686 687 // Use XORP to simulate FNEG. 688 setOperationAction(ISD::FNEG , MVT::f32, Custom); 689 690 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 691 692 // Use ANDPS and ORPS to simulate FCOPYSIGN. 693 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 694 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 695 696 // We don't support sin/cos/fmod 697 setOperationAction(ISD::FSIN , MVT::f32, Expand); 698 setOperationAction(ISD::FCOS , MVT::f32, Expand); 699 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 700 701 // Special cases we handle for FP constants. 702 addLegalFPImmediate(APFloat(+0.0f)); // xorps 703 addLegalFPImmediate(APFloat(+0.0)); // FLD0 704 addLegalFPImmediate(APFloat(+1.0)); // FLD1 705 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 706 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 707 708 if (!TM.Options.UnsafeFPMath) { 709 setOperationAction(ISD::FSIN , MVT::f64, Expand); 710 setOperationAction(ISD::FCOS , MVT::f64, Expand); 711 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 712 } 713 } else if (!TM.Options.UseSoftFloat) { 714 // f32 and f64 in x87. 715 // Set up the FP register classes. 716 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 717 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 718 719 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 720 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 721 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 722 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 723 724 if (!TM.Options.UnsafeFPMath) { 725 setOperationAction(ISD::FSIN , MVT::f64, Expand); 726 setOperationAction(ISD::FSIN , MVT::f32, Expand); 727 setOperationAction(ISD::FCOS , MVT::f64, Expand); 728 setOperationAction(ISD::FCOS , MVT::f32, Expand); 729 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 730 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 731 } 732 addLegalFPImmediate(APFloat(+0.0)); // FLD0 733 addLegalFPImmediate(APFloat(+1.0)); // FLD1 734 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 735 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 736 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 737 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 738 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 739 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 740 } 741 742 // We don't support FMA. 743 setOperationAction(ISD::FMA, MVT::f64, Expand); 744 setOperationAction(ISD::FMA, MVT::f32, Expand); 745 746 // Long double always uses X87. 747 if (!TM.Options.UseSoftFloat) { 748 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 749 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 750 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 751 { 752 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 753 addLegalFPImmediate(TmpFlt); // FLD0 754 TmpFlt.changeSign(); 755 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 756 757 bool ignored; 758 APFloat TmpFlt2(+1.0); 759 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 760 &ignored); 761 addLegalFPImmediate(TmpFlt2); // FLD1 762 TmpFlt2.changeSign(); 763 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 764 } 765 766 if (!TM.Options.UnsafeFPMath) { 767 setOperationAction(ISD::FSIN , MVT::f80, Expand); 768 setOperationAction(ISD::FCOS , MVT::f80, Expand); 769 setOperationAction(ISD::FSINCOS, MVT::f80, Expand); 770 } 771 772 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 773 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 774 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 775 setOperationAction(ISD::FRINT, MVT::f80, Expand); 776 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 777 setOperationAction(ISD::FMA, MVT::f80, Expand); 778 } 779 780 // Always use a library call for pow. 781 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 782 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 783 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 784 785 setOperationAction(ISD::FLOG, MVT::f80, Expand); 786 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 787 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 788 setOperationAction(ISD::FEXP, MVT::f80, Expand); 789 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 790 791 // First set operation action for all vector types to either promote 792 // (for widening) or expand (for scalarization). Then we will selectively 793 // turn on ones that can be effectively codegen'd. 794 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 795 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 796 MVT VT = (MVT::SimpleValueType)i; 797 setOperationAction(ISD::ADD , VT, Expand); 798 setOperationAction(ISD::SUB , VT, Expand); 799 setOperationAction(ISD::FADD, VT, Expand); 800 setOperationAction(ISD::FNEG, VT, Expand); 801 setOperationAction(ISD::FSUB, VT, Expand); 802 setOperationAction(ISD::MUL , VT, Expand); 803 setOperationAction(ISD::FMUL, VT, Expand); 804 setOperationAction(ISD::SDIV, VT, Expand); 805 setOperationAction(ISD::UDIV, VT, Expand); 806 setOperationAction(ISD::FDIV, VT, Expand); 807 setOperationAction(ISD::SREM, VT, Expand); 808 setOperationAction(ISD::UREM, VT, Expand); 809 setOperationAction(ISD::LOAD, VT, Expand); 810 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 811 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand); 812 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 813 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand); 814 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand); 815 setOperationAction(ISD::FABS, VT, Expand); 816 setOperationAction(ISD::FSIN, VT, Expand); 817 setOperationAction(ISD::FSINCOS, VT, Expand); 818 setOperationAction(ISD::FCOS, VT, Expand); 819 setOperationAction(ISD::FSINCOS, VT, Expand); 820 setOperationAction(ISD::FREM, VT, Expand); 821 setOperationAction(ISD::FMA, VT, Expand); 822 setOperationAction(ISD::FPOWI, VT, Expand); 823 setOperationAction(ISD::FSQRT, VT, Expand); 824 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 825 setOperationAction(ISD::FFLOOR, VT, Expand); 826 setOperationAction(ISD::FCEIL, VT, Expand); 827 setOperationAction(ISD::FTRUNC, VT, Expand); 828 setOperationAction(ISD::FRINT, VT, Expand); 829 setOperationAction(ISD::FNEARBYINT, VT, Expand); 830 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 831 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 832 setOperationAction(ISD::SDIVREM, VT, Expand); 833 setOperationAction(ISD::UDIVREM, VT, Expand); 834 setOperationAction(ISD::FPOW, VT, Expand); 835 setOperationAction(ISD::CTPOP, VT, Expand); 836 setOperationAction(ISD::CTTZ, VT, Expand); 837 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 838 setOperationAction(ISD::CTLZ, VT, Expand); 839 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 840 setOperationAction(ISD::SHL, VT, Expand); 841 setOperationAction(ISD::SRA, VT, Expand); 842 setOperationAction(ISD::SRL, VT, Expand); 843 setOperationAction(ISD::ROTL, VT, Expand); 844 setOperationAction(ISD::ROTR, VT, Expand); 845 setOperationAction(ISD::BSWAP, VT, Expand); 846 setOperationAction(ISD::SETCC, VT, Expand); 847 setOperationAction(ISD::FLOG, VT, Expand); 848 setOperationAction(ISD::FLOG2, VT, Expand); 849 setOperationAction(ISD::FLOG10, VT, Expand); 850 setOperationAction(ISD::FEXP, VT, Expand); 851 setOperationAction(ISD::FEXP2, VT, Expand); 852 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 853 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 854 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 855 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 856 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand); 857 setOperationAction(ISD::TRUNCATE, VT, Expand); 858 setOperationAction(ISD::SIGN_EXTEND, VT, Expand); 859 setOperationAction(ISD::ZERO_EXTEND, VT, Expand); 860 setOperationAction(ISD::ANY_EXTEND, VT, Expand); 861 setOperationAction(ISD::VSELECT, VT, Expand); 862 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 863 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 864 setTruncStoreAction(VT, 865 (MVT::SimpleValueType)InnerVT, Expand); 866 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 867 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 868 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 869 } 870 871 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 872 // with -msoft-float, disable use of MMX as well. 873 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 874 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 875 // No operations on x86mmx supported, everything uses intrinsics. 876 } 877 878 // MMX-sized vectors (other than x86mmx) are expected to be expanded 879 // into smaller operations. 880 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 881 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 882 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 883 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 884 setOperationAction(ISD::AND, MVT::v8i8, Expand); 885 setOperationAction(ISD::AND, MVT::v4i16, Expand); 886 setOperationAction(ISD::AND, MVT::v2i32, Expand); 887 setOperationAction(ISD::AND, MVT::v1i64, Expand); 888 setOperationAction(ISD::OR, MVT::v8i8, Expand); 889 setOperationAction(ISD::OR, MVT::v4i16, Expand); 890 setOperationAction(ISD::OR, MVT::v2i32, Expand); 891 setOperationAction(ISD::OR, MVT::v1i64, Expand); 892 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 893 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 894 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 895 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 896 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 897 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 898 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 899 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 900 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 901 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 902 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 903 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 904 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 905 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 906 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 907 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 908 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 909 910 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 911 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 912 913 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 914 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 915 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 916 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 917 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 918 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 919 setOperationAction(ISD::FABS, MVT::v4f32, Custom); 920 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 921 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 922 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 923 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 924 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 925 } 926 927 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 928 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 929 930 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 931 // registers cannot be used even for integer operations. 932 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 933 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 934 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 935 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 936 937 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 938 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 939 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 940 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 941 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 942 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 943 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 944 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 945 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 946 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 947 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 948 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 949 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 950 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 951 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 952 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 953 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 954 setOperationAction(ISD::FABS, MVT::v2f64, Custom); 955 956 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 957 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 958 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 959 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 960 961 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 962 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 963 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 964 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 965 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 966 967 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 968 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 969 MVT VT = (MVT::SimpleValueType)i; 970 // Do not attempt to custom lower non-power-of-2 vectors 971 if (!isPowerOf2_32(VT.getVectorNumElements())) 972 continue; 973 // Do not attempt to custom lower non-128-bit vectors 974 if (!VT.is128BitVector()) 975 continue; 976 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 977 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 978 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 979 } 980 981 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 982 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 983 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 984 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 985 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 986 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 987 988 if (Subtarget->is64Bit()) { 989 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 990 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 991 } 992 993 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 994 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 995 MVT VT = (MVT::SimpleValueType)i; 996 997 // Do not attempt to promote non-128-bit vectors 998 if (!VT.is128BitVector()) 999 continue; 1000 1001 setOperationAction(ISD::AND, VT, Promote); 1002 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 1003 setOperationAction(ISD::OR, VT, Promote); 1004 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 1005 setOperationAction(ISD::XOR, VT, Promote); 1006 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 1007 setOperationAction(ISD::LOAD, VT, Promote); 1008 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 1009 setOperationAction(ISD::SELECT, VT, Promote); 1010 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 1011 } 1012 1013 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1014 1015 // Custom lower v2i64 and v2f64 selects. 1016 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 1017 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 1018 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 1019 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 1020 1021 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 1022 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 1023 1024 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 1025 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 1026 // As there is no 64-bit GPR available, we need build a special custom 1027 // sequence to convert from v2i32 to v2f32. 1028 if (!Subtarget->is64Bit()) 1029 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); 1030 1031 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 1032 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); 1033 1034 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); 1035 } 1036 1037 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) { 1038 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 1039 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 1040 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 1041 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1042 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1043 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1044 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1045 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1046 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1047 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1048 1049 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1050 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1051 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1052 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 1053 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 1054 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 1055 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 1056 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 1057 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 1058 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 1059 1060 // FIXME: Do we need to handle scalar-to-vector here? 1061 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 1062 1063 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 1064 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 1065 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 1066 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 1067 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 1068 1069 // i8 and i16 vectors are custom , because the source register and source 1070 // source memory operand types are not the same width. f32 vectors are 1071 // custom since the immediate controlling the insert encodes additional 1072 // information. 1073 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 1074 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 1075 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 1076 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 1077 1078 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 1079 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 1080 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 1081 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 1082 1083 // FIXME: these should be Legal but thats only for the case where 1084 // the index is constant. For now custom expand to deal with that. 1085 if (Subtarget->is64Bit()) { 1086 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 1087 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 1088 } 1089 } 1090 1091 if (Subtarget->hasSSE2()) { 1092 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 1093 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 1094 1095 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 1096 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 1097 1098 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 1099 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 1100 1101 // In the customized shift lowering, the legal cases in AVX2 will be 1102 // recognized. 1103 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1104 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1105 1106 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1107 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1108 1109 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1110 1111 setOperationAction(ISD::SDIV, MVT::v8i16, Custom); 1112 setOperationAction(ISD::SDIV, MVT::v4i32, Custom); 1113 } 1114 1115 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) { 1116 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1117 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1118 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1119 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1120 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1121 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1122 1123 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1124 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1125 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1126 1127 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1128 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1129 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1130 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1131 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1132 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); 1133 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal); 1134 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal); 1135 setOperationAction(ISD::FRINT, MVT::v8f32, Legal); 1136 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal); 1137 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1138 setOperationAction(ISD::FABS, MVT::v8f32, Custom); 1139 1140 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1141 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1142 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1143 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1144 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1145 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1146 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1147 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1148 setOperationAction(ISD::FRINT, MVT::v4f64, Legal); 1149 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal); 1150 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1151 setOperationAction(ISD::FABS, MVT::v4f64, Custom); 1152 1153 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); 1154 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom); 1155 1156 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 1157 1158 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1159 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote); 1160 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1161 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1162 1163 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1164 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); 1165 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 1166 1167 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); 1168 1169 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1170 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1171 1172 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1173 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1174 1175 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1176 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1177 1178 setOperationAction(ISD::SDIV, MVT::v16i16, Custom); 1179 1180 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1181 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1182 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1183 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1184 1185 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1186 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1187 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1188 1189 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1190 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1191 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1192 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1193 1194 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); 1195 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); 1196 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom); 1197 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1198 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom); 1199 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom); 1200 1201 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { 1202 setOperationAction(ISD::FMA, MVT::v8f32, Legal); 1203 setOperationAction(ISD::FMA, MVT::v4f64, Legal); 1204 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 1205 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 1206 setOperationAction(ISD::FMA, MVT::f32, Legal); 1207 setOperationAction(ISD::FMA, MVT::f64, Legal); 1208 } 1209 1210 if (Subtarget->hasInt256()) { 1211 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1212 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1213 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1214 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1215 1216 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1217 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1218 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1219 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1220 1221 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1222 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1223 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1224 // Don't lower v32i8 because there is no 128-bit byte mul 1225 1226 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1227 1228 setOperationAction(ISD::SDIV, MVT::v8i32, Custom); 1229 } else { 1230 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1231 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1232 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1233 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1234 1235 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1236 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1237 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1238 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1239 1240 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1241 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1242 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1243 // Don't lower v32i8 because there is no 128-bit byte mul 1244 } 1245 1246 // In the customized shift lowering, the legal cases in AVX2 will be 1247 // recognized. 1248 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1249 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1250 1251 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1252 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1253 1254 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1255 1256 // Custom lower several nodes for 256-bit types. 1257 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1258 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1259 MVT VT = (MVT::SimpleValueType)i; 1260 1261 // Extract subvector is special because the value type 1262 // (result) is 128-bit but the source is 256-bit wide. 1263 if (VT.is128BitVector()) 1264 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1265 1266 // Do not attempt to custom lower other non-256-bit vectors 1267 if (!VT.is256BitVector()) 1268 continue; 1269 1270 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1271 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1272 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1273 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1274 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1275 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1276 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1277 } 1278 1279 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1280 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1281 MVT VT = (MVT::SimpleValueType)i; 1282 1283 // Do not attempt to promote non-256-bit vectors 1284 if (!VT.is256BitVector()) 1285 continue; 1286 1287 setOperationAction(ISD::AND, VT, Promote); 1288 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1289 setOperationAction(ISD::OR, VT, Promote); 1290 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1291 setOperationAction(ISD::XOR, VT, Promote); 1292 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1293 setOperationAction(ISD::LOAD, VT, Promote); 1294 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1295 setOperationAction(ISD::SELECT, VT, Promote); 1296 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1297 } 1298 } 1299 1300 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) { 1301 addRegisterClass(MVT::v16i32, &X86::VR512RegClass); 1302 addRegisterClass(MVT::v16f32, &X86::VR512RegClass); 1303 addRegisterClass(MVT::v8i64, &X86::VR512RegClass); 1304 addRegisterClass(MVT::v8f64, &X86::VR512RegClass); 1305 1306 addRegisterClass(MVT::v8i1, &X86::VK8RegClass); 1307 addRegisterClass(MVT::v16i1, &X86::VK16RegClass); 1308 1309 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, Legal); 1310 setOperationAction(ISD::LOAD, MVT::v16f32, Legal); 1311 setOperationAction(ISD::LOAD, MVT::v8f64, Legal); 1312 setOperationAction(ISD::LOAD, MVT::v8i64, Legal); 1313 setOperationAction(ISD::LOAD, MVT::v16i32, Legal); 1314 setOperationAction(ISD::LOAD, MVT::v16i1, Legal); 1315 1316 setOperationAction(ISD::FADD, MVT::v16f32, Legal); 1317 setOperationAction(ISD::FSUB, MVT::v16f32, Legal); 1318 setOperationAction(ISD::FMUL, MVT::v16f32, Legal); 1319 setOperationAction(ISD::FDIV, MVT::v16f32, Legal); 1320 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal); 1321 setOperationAction(ISD::FNEG, MVT::v16f32, Custom); 1322 1323 setOperationAction(ISD::FADD, MVT::v8f64, Legal); 1324 setOperationAction(ISD::FSUB, MVT::v8f64, Legal); 1325 setOperationAction(ISD::FMUL, MVT::v8f64, Legal); 1326 setOperationAction(ISD::FDIV, MVT::v8f64, Legal); 1327 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal); 1328 setOperationAction(ISD::FNEG, MVT::v8f64, Custom); 1329 setOperationAction(ISD::FMA, MVT::v8f64, Legal); 1330 setOperationAction(ISD::FMA, MVT::v16f32, Legal); 1331 setOperationAction(ISD::SDIV, MVT::v16i32, Custom); 1332 1333 1334 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal); 1335 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal); 1336 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal); 1337 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal); 1338 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal); 1339 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal); 1340 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal); 1341 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal); 1342 1343 setOperationAction(ISD::TRUNCATE, MVT::i1, Legal); 1344 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom); 1345 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom); 1346 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom); 1347 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom); 1348 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); 1349 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); 1350 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); 1351 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); 1352 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom); 1353 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom); 1354 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom); 1355 1356 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom); 1357 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom); 1358 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom); 1359 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom); 1360 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom); 1361 1362 setOperationAction(ISD::SETCC, MVT::v16i1, Custom); 1363 setOperationAction(ISD::SETCC, MVT::v8i1, Custom); 1364 1365 setOperationAction(ISD::MUL, MVT::v8i64, Custom); 1366 1367 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom); 1368 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom); 1369 setOperationAction(ISD::SELECT, MVT::v8f64, Custom); 1370 setOperationAction(ISD::SELECT, MVT::v8i64, Custom); 1371 setOperationAction(ISD::SELECT, MVT::v16f32, Custom); 1372 1373 setOperationAction(ISD::ADD, MVT::v8i64, Legal); 1374 setOperationAction(ISD::ADD, MVT::v16i32, Legal); 1375 1376 setOperationAction(ISD::SUB, MVT::v8i64, Legal); 1377 setOperationAction(ISD::SUB, MVT::v16i32, Legal); 1378 1379 setOperationAction(ISD::MUL, MVT::v16i32, Legal); 1380 1381 setOperationAction(ISD::SRL, MVT::v8i64, Custom); 1382 setOperationAction(ISD::SRL, MVT::v16i32, Custom); 1383 1384 setOperationAction(ISD::SHL, MVT::v8i64, Custom); 1385 setOperationAction(ISD::SHL, MVT::v16i32, Custom); 1386 1387 setOperationAction(ISD::SRA, MVT::v8i64, Custom); 1388 setOperationAction(ISD::SRA, MVT::v16i32, Custom); 1389 1390 setOperationAction(ISD::AND, MVT::v8i64, Legal); 1391 setOperationAction(ISD::OR, MVT::v8i64, Legal); 1392 setOperationAction(ISD::XOR, MVT::v8i64, Legal); 1393 setOperationAction(ISD::AND, MVT::v16i32, Legal); 1394 setOperationAction(ISD::OR, MVT::v16i32, Legal); 1395 setOperationAction(ISD::XOR, MVT::v16i32, Legal); 1396 1397 // Custom lower several nodes. 1398 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1399 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1400 MVT VT = (MVT::SimpleValueType)i; 1401 1402 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 1403 // Extract subvector is special because the value type 1404 // (result) is 256/128-bit but the source is 512-bit wide. 1405 if (VT.is128BitVector() || VT.is256BitVector()) 1406 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1407 1408 if (VT.getVectorElementType() == MVT::i1) 1409 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 1410 1411 // Do not attempt to custom lower other non-512-bit vectors 1412 if (!VT.is512BitVector()) 1413 continue; 1414 1415 if ( EltSize >= 32) { 1416 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1417 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1418 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1419 setOperationAction(ISD::VSELECT, VT, Legal); 1420 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1421 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1422 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1423 } 1424 } 1425 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) { 1426 MVT VT = (MVT::SimpleValueType)i; 1427 1428 // Do not attempt to promote non-256-bit vectors 1429 if (!VT.is512BitVector()) 1430 continue; 1431 1432 setOperationAction(ISD::SELECT, VT, Promote); 1433 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64); 1434 } 1435 }// has AVX-512 1436 1437 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1438 // of this type with custom code. 1439 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1440 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1441 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1442 Custom); 1443 } 1444 1445 // We want to custom lower some of our intrinsics. 1446 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1447 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1448 1449 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1450 // handle type legalization for these operations here. 1451 // 1452 // FIXME: We really should do custom legalization for addition and 1453 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1454 // than generic legalization for 64-bit multiplication-with-overflow, though. 1455 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1456 // Add/Sub/Mul with overflow operations are custom lowered. 1457 MVT VT = IntVTs[i]; 1458 setOperationAction(ISD::SADDO, VT, Custom); 1459 setOperationAction(ISD::UADDO, VT, Custom); 1460 setOperationAction(ISD::SSUBO, VT, Custom); 1461 setOperationAction(ISD::USUBO, VT, Custom); 1462 setOperationAction(ISD::SMULO, VT, Custom); 1463 setOperationAction(ISD::UMULO, VT, Custom); 1464 } 1465 1466 // There are no 8-bit 3-address imul/mul instructions 1467 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1468 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1469 1470 if (!Subtarget->is64Bit()) { 1471 // These libcalls are not available in 32-bit. 1472 setLibcallName(RTLIB::SHL_I128, 0); 1473 setLibcallName(RTLIB::SRL_I128, 0); 1474 setLibcallName(RTLIB::SRA_I128, 0); 1475 } 1476 1477 // Combine sin / cos into one node or libcall if possible. 1478 if (Subtarget->hasSinCos()) { 1479 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 1480 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 1481 if (Subtarget->isTargetDarwin()) { 1482 // For MacOSX, we don't want to the normal expansion of a libcall to 1483 // sincos. We want to issue a libcall to __sincos_stret to avoid memory 1484 // traffic. 1485 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 1486 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 1487 } 1488 } 1489 1490 // We have target-specific dag combine patterns for the following nodes: 1491 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1492 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1493 setTargetDAGCombine(ISD::VSELECT); 1494 setTargetDAGCombine(ISD::SELECT); 1495 setTargetDAGCombine(ISD::SHL); 1496 setTargetDAGCombine(ISD::SRA); 1497 setTargetDAGCombine(ISD::SRL); 1498 setTargetDAGCombine(ISD::OR); 1499 setTargetDAGCombine(ISD::AND); 1500 setTargetDAGCombine(ISD::ADD); 1501 setTargetDAGCombine(ISD::FADD); 1502 setTargetDAGCombine(ISD::FSUB); 1503 setTargetDAGCombine(ISD::FMA); 1504 setTargetDAGCombine(ISD::SUB); 1505 setTargetDAGCombine(ISD::LOAD); 1506 setTargetDAGCombine(ISD::STORE); 1507 setTargetDAGCombine(ISD::ZERO_EXTEND); 1508 setTargetDAGCombine(ISD::ANY_EXTEND); 1509 setTargetDAGCombine(ISD::SIGN_EXTEND); 1510 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 1511 setTargetDAGCombine(ISD::TRUNCATE); 1512 setTargetDAGCombine(ISD::SINT_TO_FP); 1513 setTargetDAGCombine(ISD::SETCC); 1514 if (Subtarget->is64Bit()) 1515 setTargetDAGCombine(ISD::MUL); 1516 setTargetDAGCombine(ISD::XOR); 1517 1518 computeRegisterProperties(); 1519 1520 // On Darwin, -Os means optimize for size without hurting performance, 1521 // do not reduce the limit. 1522 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1523 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1524 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1525 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1526 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1527 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1528 setPrefLoopAlignment(4); // 2^4 bytes. 1529 1530 // Predictable cmov don't hurt on atom because it's in-order. 1531 PredictableSelectIsExpensive = !Subtarget->isAtom(); 1532 1533 setPrefFunctionAlignment(4); // 2^4 bytes. 1534} 1535 1536EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 1537 if (!VT.isVector()) return MVT::i8; 1538 return VT.changeVectorElementTypeToInteger(); 1539} 1540 1541/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1542/// the desired ByVal argument alignment. 1543static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1544 if (MaxAlign == 16) 1545 return; 1546 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1547 if (VTy->getBitWidth() == 128) 1548 MaxAlign = 16; 1549 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1550 unsigned EltAlign = 0; 1551 getMaxByValAlign(ATy->getElementType(), EltAlign); 1552 if (EltAlign > MaxAlign) 1553 MaxAlign = EltAlign; 1554 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1555 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1556 unsigned EltAlign = 0; 1557 getMaxByValAlign(STy->getElementType(i), EltAlign); 1558 if (EltAlign > MaxAlign) 1559 MaxAlign = EltAlign; 1560 if (MaxAlign == 16) 1561 break; 1562 } 1563 } 1564} 1565 1566/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1567/// function arguments in the caller parameter area. For X86, aggregates 1568/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1569/// are at 4-byte boundaries. 1570unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1571 if (Subtarget->is64Bit()) { 1572 // Max of 8 and alignment of type. 1573 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1574 if (TyAlign > 8) 1575 return TyAlign; 1576 return 8; 1577 } 1578 1579 unsigned Align = 4; 1580 if (Subtarget->hasSSE1()) 1581 getMaxByValAlign(Ty, Align); 1582 return Align; 1583} 1584 1585/// getOptimalMemOpType - Returns the target specific optimal type for load 1586/// and store operations as a result of memset, memcpy, and memmove 1587/// lowering. If DstAlign is zero that means it's safe to destination 1588/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1589/// means there isn't a need to check it against alignment requirement, 1590/// probably because the source does not need to be loaded. If 'IsMemset' is 1591/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 1592/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 1593/// source is constant so it does not need to be loaded. 1594/// It returns EVT::Other if the type should be determined using generic 1595/// target-independent logic. 1596EVT 1597X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1598 unsigned DstAlign, unsigned SrcAlign, 1599 bool IsMemset, bool ZeroMemset, 1600 bool MemcpyStrSrc, 1601 MachineFunction &MF) const { 1602 const Function *F = MF.getFunction(); 1603 if ((!IsMemset || ZeroMemset) && 1604 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1605 Attribute::NoImplicitFloat)) { 1606 if (Size >= 16 && 1607 (Subtarget->isUnalignedMemAccessFast() || 1608 ((DstAlign == 0 || DstAlign >= 16) && 1609 (SrcAlign == 0 || SrcAlign >= 16)))) { 1610 if (Size >= 32) { 1611 if (Subtarget->hasInt256()) 1612 return MVT::v8i32; 1613 if (Subtarget->hasFp256()) 1614 return MVT::v8f32; 1615 } 1616 if (Subtarget->hasSSE2()) 1617 return MVT::v4i32; 1618 if (Subtarget->hasSSE1()) 1619 return MVT::v4f32; 1620 } else if (!MemcpyStrSrc && Size >= 8 && 1621 !Subtarget->is64Bit() && 1622 Subtarget->hasSSE2()) { 1623 // Do not use f64 to lower memcpy if source is string constant. It's 1624 // better to use i32 to avoid the loads. 1625 return MVT::f64; 1626 } 1627 } 1628 if (Subtarget->is64Bit() && Size >= 8) 1629 return MVT::i64; 1630 return MVT::i32; 1631} 1632 1633bool X86TargetLowering::isSafeMemOpType(MVT VT) const { 1634 if (VT == MVT::f32) 1635 return X86ScalarSSEf32; 1636 else if (VT == MVT::f64) 1637 return X86ScalarSSEf64; 1638 return true; 1639} 1640 1641bool 1642X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const { 1643 if (Fast) 1644 *Fast = Subtarget->isUnalignedMemAccessFast(); 1645 return true; 1646} 1647 1648/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1649/// current function. The returned value is a member of the 1650/// MachineJumpTableInfo::JTEntryKind enum. 1651unsigned X86TargetLowering::getJumpTableEncoding() const { 1652 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1653 // symbol. 1654 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1655 Subtarget->isPICStyleGOT()) 1656 return MachineJumpTableInfo::EK_Custom32; 1657 1658 // Otherwise, use the normal jump table encoding heuristics. 1659 return TargetLowering::getJumpTableEncoding(); 1660} 1661 1662const MCExpr * 1663X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1664 const MachineBasicBlock *MBB, 1665 unsigned uid,MCContext &Ctx) const{ 1666 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1667 Subtarget->isPICStyleGOT()); 1668 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1669 // entries. 1670 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1671 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1672} 1673 1674/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1675/// jumptable. 1676SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1677 SelectionDAG &DAG) const { 1678 if (!Subtarget->is64Bit()) 1679 // This doesn't have SDLoc associated with it, but is not really the 1680 // same as a Register. 1681 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()); 1682 return Table; 1683} 1684 1685/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1686/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1687/// MCExpr. 1688const MCExpr *X86TargetLowering:: 1689getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1690 MCContext &Ctx) const { 1691 // X86-64 uses RIP relative addressing based on the jump table label. 1692 if (Subtarget->isPICStyleRIPRel()) 1693 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1694 1695 // Otherwise, the reference is relative to the PIC base. 1696 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1697} 1698 1699// FIXME: Why this routine is here? Move to RegInfo! 1700std::pair<const TargetRegisterClass*, uint8_t> 1701X86TargetLowering::findRepresentativeClass(MVT VT) const{ 1702 const TargetRegisterClass *RRC = 0; 1703 uint8_t Cost = 1; 1704 switch (VT.SimpleTy) { 1705 default: 1706 return TargetLowering::findRepresentativeClass(VT); 1707 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1708 RRC = Subtarget->is64Bit() ? 1709 (const TargetRegisterClass*)&X86::GR64RegClass : 1710 (const TargetRegisterClass*)&X86::GR32RegClass; 1711 break; 1712 case MVT::x86mmx: 1713 RRC = &X86::VR64RegClass; 1714 break; 1715 case MVT::f32: case MVT::f64: 1716 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1717 case MVT::v4f32: case MVT::v2f64: 1718 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1719 case MVT::v4f64: 1720 RRC = &X86::VR128RegClass; 1721 break; 1722 } 1723 return std::make_pair(RRC, Cost); 1724} 1725 1726bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1727 unsigned &Offset) const { 1728 if (!Subtarget->isTargetLinux()) 1729 return false; 1730 1731 if (Subtarget->is64Bit()) { 1732 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1733 Offset = 0x28; 1734 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1735 AddressSpace = 256; 1736 else 1737 AddressSpace = 257; 1738 } else { 1739 // %gs:0x14 on i386 1740 Offset = 0x14; 1741 AddressSpace = 256; 1742 } 1743 return true; 1744} 1745 1746//===----------------------------------------------------------------------===// 1747// Return Value Calling Convention Implementation 1748//===----------------------------------------------------------------------===// 1749 1750#include "X86GenCallingConv.inc" 1751 1752bool 1753X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1754 MachineFunction &MF, bool isVarArg, 1755 const SmallVectorImpl<ISD::OutputArg> &Outs, 1756 LLVMContext &Context) const { 1757 SmallVector<CCValAssign, 16> RVLocs; 1758 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1759 RVLocs, Context); 1760 return CCInfo.CheckReturn(Outs, RetCC_X86); 1761} 1762 1763SDValue 1764X86TargetLowering::LowerReturn(SDValue Chain, 1765 CallingConv::ID CallConv, bool isVarArg, 1766 const SmallVectorImpl<ISD::OutputArg> &Outs, 1767 const SmallVectorImpl<SDValue> &OutVals, 1768 SDLoc dl, SelectionDAG &DAG) const { 1769 MachineFunction &MF = DAG.getMachineFunction(); 1770 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1771 1772 SmallVector<CCValAssign, 16> RVLocs; 1773 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1774 RVLocs, *DAG.getContext()); 1775 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1776 1777 SDValue Flag; 1778 SmallVector<SDValue, 6> RetOps; 1779 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1780 // Operand #1 = Bytes To Pop 1781 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1782 MVT::i16)); 1783 1784 // Copy the result values into the output registers. 1785 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1786 CCValAssign &VA = RVLocs[i]; 1787 assert(VA.isRegLoc() && "Can only return in registers!"); 1788 SDValue ValToCopy = OutVals[i]; 1789 EVT ValVT = ValToCopy.getValueType(); 1790 1791 // Promote values to the appropriate types 1792 if (VA.getLocInfo() == CCValAssign::SExt) 1793 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1794 else if (VA.getLocInfo() == CCValAssign::ZExt) 1795 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1796 else if (VA.getLocInfo() == CCValAssign::AExt) 1797 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1798 else if (VA.getLocInfo() == CCValAssign::BCvt) 1799 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1800 1801 // If this is x86-64, and we disabled SSE, we can't return FP values, 1802 // or SSE or MMX vectors. 1803 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1804 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1805 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1806 report_fatal_error("SSE register return with SSE disabled"); 1807 } 1808 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1809 // llvm-gcc has never done it right and no one has noticed, so this 1810 // should be OK for now. 1811 if (ValVT == MVT::f64 && 1812 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1813 report_fatal_error("SSE2 register return with SSE2 disabled"); 1814 1815 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1816 // the RET instruction and handled by the FP Stackifier. 1817 if (VA.getLocReg() == X86::ST0 || 1818 VA.getLocReg() == X86::ST1) { 1819 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1820 // change the value to the FP stack register class. 1821 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1822 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1823 RetOps.push_back(ValToCopy); 1824 // Don't emit a copytoreg. 1825 continue; 1826 } 1827 1828 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1829 // which is returned in RAX / RDX. 1830 if (Subtarget->is64Bit()) { 1831 if (ValVT == MVT::x86mmx) { 1832 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1833 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1834 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1835 ValToCopy); 1836 // If we don't have SSE2 available, convert to v4f32 so the generated 1837 // register is legal. 1838 if (!Subtarget->hasSSE2()) 1839 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1840 } 1841 } 1842 } 1843 1844 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1845 Flag = Chain.getValue(1); 1846 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1847 } 1848 1849 // The x86-64 ABIs require that for returning structs by value we copy 1850 // the sret argument into %rax/%eax (depending on ABI) for the return. 1851 // Win32 requires us to put the sret argument to %eax as well. 1852 // We saved the argument into a virtual register in the entry block, 1853 // so now we copy the value out and into %rax/%eax. 1854 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() && 1855 (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { 1856 MachineFunction &MF = DAG.getMachineFunction(); 1857 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1858 unsigned Reg = FuncInfo->getSRetReturnReg(); 1859 assert(Reg && 1860 "SRetReturnReg should have been set in LowerFormalArguments()."); 1861 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1862 1863 unsigned RetValReg 1864 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ? 1865 X86::RAX : X86::EAX; 1866 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag); 1867 Flag = Chain.getValue(1); 1868 1869 // RAX/EAX now acts like a return value. 1870 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy())); 1871 } 1872 1873 RetOps[0] = Chain; // Update chain. 1874 1875 // Add the flag if we have it. 1876 if (Flag.getNode()) 1877 RetOps.push_back(Flag); 1878 1879 return DAG.getNode(X86ISD::RET_FLAG, dl, 1880 MVT::Other, &RetOps[0], RetOps.size()); 1881} 1882 1883bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1884 if (N->getNumValues() != 1) 1885 return false; 1886 if (!N->hasNUsesOfValue(1, 0)) 1887 return false; 1888 1889 SDValue TCChain = Chain; 1890 SDNode *Copy = *N->use_begin(); 1891 if (Copy->getOpcode() == ISD::CopyToReg) { 1892 // If the copy has a glue operand, we conservatively assume it isn't safe to 1893 // perform a tail call. 1894 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1895 return false; 1896 TCChain = Copy->getOperand(0); 1897 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1898 return false; 1899 1900 bool HasRet = false; 1901 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1902 UI != UE; ++UI) { 1903 if (UI->getOpcode() != X86ISD::RET_FLAG) 1904 return false; 1905 HasRet = true; 1906 } 1907 1908 if (!HasRet) 1909 return false; 1910 1911 Chain = TCChain; 1912 return true; 1913} 1914 1915MVT 1916X86TargetLowering::getTypeForExtArgOrReturn(MVT VT, 1917 ISD::NodeType ExtendKind) const { 1918 MVT ReturnMVT; 1919 // TODO: Is this also valid on 32-bit? 1920 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1921 ReturnMVT = MVT::i8; 1922 else 1923 ReturnMVT = MVT::i32; 1924 1925 MVT MinVT = getRegisterType(ReturnMVT); 1926 return VT.bitsLT(MinVT) ? MinVT : VT; 1927} 1928 1929/// LowerCallResult - Lower the result values of a call into the 1930/// appropriate copies out of appropriate physical registers. 1931/// 1932SDValue 1933X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1934 CallingConv::ID CallConv, bool isVarArg, 1935 const SmallVectorImpl<ISD::InputArg> &Ins, 1936 SDLoc dl, SelectionDAG &DAG, 1937 SmallVectorImpl<SDValue> &InVals) const { 1938 1939 // Assign locations to each value returned by this call. 1940 SmallVector<CCValAssign, 16> RVLocs; 1941 bool Is64Bit = Subtarget->is64Bit(); 1942 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1943 getTargetMachine(), RVLocs, *DAG.getContext()); 1944 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1945 1946 // Copy all of the result registers out of their specified physreg. 1947 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1948 CCValAssign &VA = RVLocs[i]; 1949 EVT CopyVT = VA.getValVT(); 1950 1951 // If this is x86-64, and we disabled SSE, we can't return FP values 1952 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1953 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1954 report_fatal_error("SSE register return with SSE disabled"); 1955 } 1956 1957 SDValue Val; 1958 1959 // If this is a call to a function that returns an fp value on the floating 1960 // point stack, we must guarantee the value is popped from the stack, so 1961 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1962 // if the return value is not used. We use the FpPOP_RETVAL instruction 1963 // instead. 1964 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1965 // If we prefer to use the value in xmm registers, copy it out as f80 and 1966 // use a truncate to move it from fp stack reg to xmm reg. 1967 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1968 SDValue Ops[] = { Chain, InFlag }; 1969 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1970 MVT::Other, MVT::Glue, Ops), 1); 1971 Val = Chain.getValue(0); 1972 1973 // Round the f80 to the right size, which also moves it to the appropriate 1974 // xmm register. 1975 if (CopyVT != VA.getValVT()) 1976 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1977 // This truncation won't change the value. 1978 DAG.getIntPtrConstant(1)); 1979 } else { 1980 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1981 CopyVT, InFlag).getValue(1); 1982 Val = Chain.getValue(0); 1983 } 1984 InFlag = Chain.getValue(2); 1985 InVals.push_back(Val); 1986 } 1987 1988 return Chain; 1989} 1990 1991//===----------------------------------------------------------------------===// 1992// C & StdCall & Fast Calling Convention implementation 1993//===----------------------------------------------------------------------===// 1994// StdCall calling convention seems to be standard for many Windows' API 1995// routines and around. It differs from C calling convention just a little: 1996// callee should clean up the stack, not caller. Symbols should be also 1997// decorated in some fancy way :) It doesn't support any vector arguments. 1998// For info on fast calling convention see Fast Calling Convention (tail call) 1999// implementation LowerX86_32FastCCCallTo. 2000 2001/// CallIsStructReturn - Determines whether a call uses struct return 2002/// semantics. 2003enum StructReturnType { 2004 NotStructReturn, 2005 RegStructReturn, 2006 StackStructReturn 2007}; 2008static StructReturnType 2009callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 2010 if (Outs.empty()) 2011 return NotStructReturn; 2012 2013 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 2014 if (!Flags.isSRet()) 2015 return NotStructReturn; 2016 if (Flags.isInReg()) 2017 return RegStructReturn; 2018 return StackStructReturn; 2019} 2020 2021/// ArgsAreStructReturn - Determines whether a function uses struct 2022/// return semantics. 2023static StructReturnType 2024argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 2025 if (Ins.empty()) 2026 return NotStructReturn; 2027 2028 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 2029 if (!Flags.isSRet()) 2030 return NotStructReturn; 2031 if (Flags.isInReg()) 2032 return RegStructReturn; 2033 return StackStructReturn; 2034} 2035 2036/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 2037/// by "Src" to address "Dst" with size and alignment information specified by 2038/// the specific parameter attribute. The copy will be passed as a byval 2039/// function parameter. 2040static SDValue 2041CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 2042 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 2043 SDLoc dl) { 2044 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 2045 2046 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 2047 /*isVolatile*/false, /*AlwaysInline=*/true, 2048 MachinePointerInfo(), MachinePointerInfo()); 2049} 2050 2051/// IsTailCallConvention - Return true if the calling convention is one that 2052/// supports tail call optimization. 2053static bool IsTailCallConvention(CallingConv::ID CC) { 2054 return (CC == CallingConv::Fast || CC == CallingConv::GHC || 2055 CC == CallingConv::HiPE); 2056} 2057 2058/// \brief Return true if the calling convention is a C calling convention. 2059static bool IsCCallConvention(CallingConv::ID CC) { 2060 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 || 2061 CC == CallingConv::X86_64_SysV); 2062} 2063 2064bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2065 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 2066 return false; 2067 2068 CallSite CS(CI); 2069 CallingConv::ID CalleeCC = CS.getCallingConv(); 2070 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) 2071 return false; 2072 2073 return true; 2074} 2075 2076/// FuncIsMadeTailCallSafe - Return true if the function is being made into 2077/// a tailcall target by changing its ABI. 2078static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 2079 bool GuaranteedTailCallOpt) { 2080 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 2081} 2082 2083SDValue 2084X86TargetLowering::LowerMemArgument(SDValue Chain, 2085 CallingConv::ID CallConv, 2086 const SmallVectorImpl<ISD::InputArg> &Ins, 2087 SDLoc dl, SelectionDAG &DAG, 2088 const CCValAssign &VA, 2089 MachineFrameInfo *MFI, 2090 unsigned i) const { 2091 // Create the nodes corresponding to a load from this parameter slot. 2092 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2093 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 2094 getTargetMachine().Options.GuaranteedTailCallOpt); 2095 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 2096 EVT ValVT; 2097 2098 // If value is passed by pointer we have address passed instead of the value 2099 // itself. 2100 if (VA.getLocInfo() == CCValAssign::Indirect) 2101 ValVT = VA.getLocVT(); 2102 else 2103 ValVT = VA.getValVT(); 2104 2105 // FIXME: For now, all byval parameter objects are marked mutable. This can be 2106 // changed with more analysis. 2107 // In case of tail call optimization mark all arguments mutable. Since they 2108 // could be overwritten by lowering of arguments in case of a tail call. 2109 if (Flags.isByVal()) { 2110 unsigned Bytes = Flags.getByValSize(); 2111 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 2112 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 2113 return DAG.getFrameIndex(FI, getPointerTy()); 2114 } else { 2115 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 2116 VA.getLocMemOffset(), isImmutable); 2117 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2118 return DAG.getLoad(ValVT, dl, Chain, FIN, 2119 MachinePointerInfo::getFixedStack(FI), 2120 false, false, false, 0); 2121 } 2122} 2123 2124SDValue 2125X86TargetLowering::LowerFormalArguments(SDValue Chain, 2126 CallingConv::ID CallConv, 2127 bool isVarArg, 2128 const SmallVectorImpl<ISD::InputArg> &Ins, 2129 SDLoc dl, 2130 SelectionDAG &DAG, 2131 SmallVectorImpl<SDValue> &InVals) 2132 const { 2133 MachineFunction &MF = DAG.getMachineFunction(); 2134 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2135 2136 const Function* Fn = MF.getFunction(); 2137 if (Fn->hasExternalLinkage() && 2138 Subtarget->isTargetCygMing() && 2139 Fn->getName() == "main") 2140 FuncInfo->setForceFramePointer(true); 2141 2142 MachineFrameInfo *MFI = MF.getFrameInfo(); 2143 bool Is64Bit = Subtarget->is64Bit(); 2144 bool IsWindows = Subtarget->isTargetWindows(); 2145 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv); 2146 2147 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2148 "Var args not supported with calling convention fastcc, ghc or hipe"); 2149 2150 // Assign locations to all of the incoming arguments. 2151 SmallVector<CCValAssign, 16> ArgLocs; 2152 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2153 ArgLocs, *DAG.getContext()); 2154 2155 // Allocate shadow area for Win64 2156 if (IsWin64) 2157 CCInfo.AllocateStack(32, 8); 2158 2159 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 2160 2161 unsigned LastVal = ~0U; 2162 SDValue ArgValue; 2163 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2164 CCValAssign &VA = ArgLocs[i]; 2165 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 2166 // places. 2167 assert(VA.getValNo() != LastVal && 2168 "Don't support value assigned to multiple locs yet"); 2169 (void)LastVal; 2170 LastVal = VA.getValNo(); 2171 2172 if (VA.isRegLoc()) { 2173 EVT RegVT = VA.getLocVT(); 2174 const TargetRegisterClass *RC; 2175 if (RegVT == MVT::i32) 2176 RC = &X86::GR32RegClass; 2177 else if (Is64Bit && RegVT == MVT::i64) 2178 RC = &X86::GR64RegClass; 2179 else if (RegVT == MVT::f32) 2180 RC = &X86::FR32RegClass; 2181 else if (RegVT == MVT::f64) 2182 RC = &X86::FR64RegClass; 2183 else if (RegVT.is512BitVector()) 2184 RC = &X86::VR512RegClass; 2185 else if (RegVT.is256BitVector()) 2186 RC = &X86::VR256RegClass; 2187 else if (RegVT.is128BitVector()) 2188 RC = &X86::VR128RegClass; 2189 else if (RegVT == MVT::x86mmx) 2190 RC = &X86::VR64RegClass; 2191 else if (RegVT == MVT::v8i1) 2192 RC = &X86::VK8RegClass; 2193 else if (RegVT == MVT::v16i1) 2194 RC = &X86::VK16RegClass; 2195 else 2196 llvm_unreachable("Unknown argument type!"); 2197 2198 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2199 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2200 2201 // If this is an 8 or 16-bit value, it is really passed promoted to 32 2202 // bits. Insert an assert[sz]ext to capture this, then truncate to the 2203 // right size. 2204 if (VA.getLocInfo() == CCValAssign::SExt) 2205 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2206 DAG.getValueType(VA.getValVT())); 2207 else if (VA.getLocInfo() == CCValAssign::ZExt) 2208 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2209 DAG.getValueType(VA.getValVT())); 2210 else if (VA.getLocInfo() == CCValAssign::BCvt) 2211 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2212 2213 if (VA.isExtInLoc()) { 2214 // Handle MMX values passed in XMM regs. 2215 if (RegVT.isVector()) 2216 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue); 2217 else 2218 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2219 } 2220 } else { 2221 assert(VA.isMemLoc()); 2222 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 2223 } 2224 2225 // If value is passed via pointer - do a load. 2226 if (VA.getLocInfo() == CCValAssign::Indirect) 2227 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 2228 MachinePointerInfo(), false, false, false, 0); 2229 2230 InVals.push_back(ArgValue); 2231 } 2232 2233 // The x86-64 ABIs require that for returning structs by value we copy 2234 // the sret argument into %rax/%eax (depending on ABI) for the return. 2235 // Win32 requires us to put the sret argument to %eax as well. 2236 // Save the argument into a virtual register so that we can access it 2237 // from the return points. 2238 if (MF.getFunction()->hasStructRetAttr() && 2239 (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { 2240 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2241 unsigned Reg = FuncInfo->getSRetReturnReg(); 2242 if (!Reg) { 2243 MVT PtrTy = getPointerTy(); 2244 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); 2245 FuncInfo->setSRetReturnReg(Reg); 2246 } 2247 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 2248 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 2249 } 2250 2251 unsigned StackSize = CCInfo.getNextStackOffset(); 2252 // Align stack specially for tail calls. 2253 if (FuncIsMadeTailCallSafe(CallConv, 2254 MF.getTarget().Options.GuaranteedTailCallOpt)) 2255 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 2256 2257 // If the function takes variable number of arguments, make a frame index for 2258 // the start of the first vararg value... for expansion of llvm.va_start. 2259 if (isVarArg) { 2260 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 2261 CallConv != CallingConv::X86_ThisCall)) { 2262 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 2263 } 2264 if (Is64Bit) { 2265 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 2266 2267 // FIXME: We should really autogenerate these arrays 2268 static const uint16_t GPR64ArgRegsWin64[] = { 2269 X86::RCX, X86::RDX, X86::R8, X86::R9 2270 }; 2271 static const uint16_t GPR64ArgRegs64Bit[] = { 2272 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 2273 }; 2274 static const uint16_t XMMArgRegs64Bit[] = { 2275 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2276 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2277 }; 2278 const uint16_t *GPR64ArgRegs; 2279 unsigned NumXMMRegs = 0; 2280 2281 if (IsWin64) { 2282 // The XMM registers which might contain var arg parameters are shadowed 2283 // in their paired GPR. So we only need to save the GPR to their home 2284 // slots. 2285 TotalNumIntRegs = 4; 2286 GPR64ArgRegs = GPR64ArgRegsWin64; 2287 } else { 2288 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 2289 GPR64ArgRegs = GPR64ArgRegs64Bit; 2290 2291 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 2292 TotalNumXMMRegs); 2293 } 2294 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 2295 TotalNumIntRegs); 2296 2297 bool NoImplicitFloatOps = Fn->getAttributes(). 2298 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 2299 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 2300 "SSE register cannot be used when SSE is disabled!"); 2301 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2302 NoImplicitFloatOps) && 2303 "SSE register cannot be used when SSE is disabled!"); 2304 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2305 !Subtarget->hasSSE1()) 2306 // Kernel mode asks for SSE to be disabled, so don't push them 2307 // on the stack. 2308 TotalNumXMMRegs = 0; 2309 2310 if (IsWin64) { 2311 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2312 // Get to the caller-allocated home save location. Add 8 to account 2313 // for the return address. 2314 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2315 FuncInfo->setRegSaveFrameIndex( 2316 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2317 // Fixup to set vararg frame on shadow area (4 x i64). 2318 if (NumIntRegs < 4) 2319 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2320 } else { 2321 // For X86-64, if there are vararg parameters that are passed via 2322 // registers, then we must store them to their spots on the stack so 2323 // they may be loaded by deferencing the result of va_next. 2324 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2325 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2326 FuncInfo->setRegSaveFrameIndex( 2327 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2328 false)); 2329 } 2330 2331 // Store the integer parameter registers. 2332 SmallVector<SDValue, 8> MemOps; 2333 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2334 getPointerTy()); 2335 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2336 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2337 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2338 DAG.getIntPtrConstant(Offset)); 2339 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2340 &X86::GR64RegClass); 2341 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2342 SDValue Store = 2343 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2344 MachinePointerInfo::getFixedStack( 2345 FuncInfo->getRegSaveFrameIndex(), Offset), 2346 false, false, 0); 2347 MemOps.push_back(Store); 2348 Offset += 8; 2349 } 2350 2351 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2352 // Now store the XMM (fp + vector) parameter registers. 2353 SmallVector<SDValue, 11> SaveXMMOps; 2354 SaveXMMOps.push_back(Chain); 2355 2356 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2357 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2358 SaveXMMOps.push_back(ALVal); 2359 2360 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2361 FuncInfo->getRegSaveFrameIndex())); 2362 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2363 FuncInfo->getVarArgsFPOffset())); 2364 2365 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2366 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2367 &X86::VR128RegClass); 2368 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2369 SaveXMMOps.push_back(Val); 2370 } 2371 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2372 MVT::Other, 2373 &SaveXMMOps[0], SaveXMMOps.size())); 2374 } 2375 2376 if (!MemOps.empty()) 2377 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2378 &MemOps[0], MemOps.size()); 2379 } 2380 } 2381 2382 // Some CCs need callee pop. 2383 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2384 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2385 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2386 } else { 2387 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2388 // If this is an sret function, the return should pop the hidden pointer. 2389 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2390 argsAreStructReturn(Ins) == StackStructReturn) 2391 FuncInfo->setBytesToPopOnReturn(4); 2392 } 2393 2394 if (!Is64Bit) { 2395 // RegSaveFrameIndex is X86-64 only. 2396 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2397 if (CallConv == CallingConv::X86_FastCall || 2398 CallConv == CallingConv::X86_ThisCall) 2399 // fastcc functions can't have varargs. 2400 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2401 } 2402 2403 FuncInfo->setArgumentStackSize(StackSize); 2404 2405 return Chain; 2406} 2407 2408SDValue 2409X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2410 SDValue StackPtr, SDValue Arg, 2411 SDLoc dl, SelectionDAG &DAG, 2412 const CCValAssign &VA, 2413 ISD::ArgFlagsTy Flags) const { 2414 unsigned LocMemOffset = VA.getLocMemOffset(); 2415 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2416 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2417 if (Flags.isByVal()) 2418 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2419 2420 return DAG.getStore(Chain, dl, Arg, PtrOff, 2421 MachinePointerInfo::getStack(LocMemOffset), 2422 false, false, 0); 2423} 2424 2425/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2426/// optimization is performed and it is required. 2427SDValue 2428X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2429 SDValue &OutRetAddr, SDValue Chain, 2430 bool IsTailCall, bool Is64Bit, 2431 int FPDiff, SDLoc dl) const { 2432 // Adjust the Return address stack slot. 2433 EVT VT = getPointerTy(); 2434 OutRetAddr = getReturnAddressFrameIndex(DAG); 2435 2436 // Load the "old" Return address. 2437 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2438 false, false, false, 0); 2439 return SDValue(OutRetAddr.getNode(), 1); 2440} 2441 2442/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2443/// optimization is performed and it is required (FPDiff!=0). 2444static SDValue 2445EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2446 SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, 2447 unsigned SlotSize, int FPDiff, SDLoc dl) { 2448 // Store the return address to the appropriate stack slot. 2449 if (!FPDiff) return Chain; 2450 // Calculate the new stack slot for the return address. 2451 int NewReturnAddrFI = 2452 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize, 2453 false); 2454 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); 2455 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2456 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2457 false, false, 0); 2458 return Chain; 2459} 2460 2461SDValue 2462X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2463 SmallVectorImpl<SDValue> &InVals) const { 2464 SelectionDAG &DAG = CLI.DAG; 2465 SDLoc &dl = CLI.DL; 2466 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2467 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2468 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2469 SDValue Chain = CLI.Chain; 2470 SDValue Callee = CLI.Callee; 2471 CallingConv::ID CallConv = CLI.CallConv; 2472 bool &isTailCall = CLI.IsTailCall; 2473 bool isVarArg = CLI.IsVarArg; 2474 2475 MachineFunction &MF = DAG.getMachineFunction(); 2476 bool Is64Bit = Subtarget->is64Bit(); 2477 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv); 2478 bool IsWindows = Subtarget->isTargetWindows(); 2479 StructReturnType SR = callIsStructReturn(Outs); 2480 bool IsSibcall = false; 2481 2482 if (MF.getTarget().Options.DisableTailCalls) 2483 isTailCall = false; 2484 2485 if (isTailCall) { 2486 // Check if it's really possible to do a tail call. 2487 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2488 isVarArg, SR != NotStructReturn, 2489 MF.getFunction()->hasStructRetAttr(), CLI.RetTy, 2490 Outs, OutVals, Ins, DAG); 2491 2492 // Sibcalls are automatically detected tailcalls which do not require 2493 // ABI changes. 2494 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2495 IsSibcall = true; 2496 2497 if (isTailCall) 2498 ++NumTailCalls; 2499 } 2500 2501 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2502 "Var args not supported with calling convention fastcc, ghc or hipe"); 2503 2504 // Analyze operands of the call, assigning locations to each operand. 2505 SmallVector<CCValAssign, 16> ArgLocs; 2506 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2507 ArgLocs, *DAG.getContext()); 2508 2509 // Allocate shadow area for Win64 2510 if (IsWin64) 2511 CCInfo.AllocateStack(32, 8); 2512 2513 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2514 2515 // Get a count of how many bytes are to be pushed on the stack. 2516 unsigned NumBytes = CCInfo.getNextStackOffset(); 2517 if (IsSibcall) 2518 // This is a sibcall. The memory operands are available in caller's 2519 // own caller's stack. 2520 NumBytes = 0; 2521 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2522 IsTailCallConvention(CallConv)) 2523 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2524 2525 int FPDiff = 0; 2526 if (isTailCall && !IsSibcall) { 2527 // Lower arguments at fp - stackoffset + fpdiff. 2528 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); 2529 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn(); 2530 2531 FPDiff = NumBytesCallerPushed - NumBytes; 2532 2533 // Set the delta of movement of the returnaddr stackslot. 2534 // But only set if delta is greater than previous delta. 2535 if (FPDiff < X86Info->getTCReturnAddrDelta()) 2536 X86Info->setTCReturnAddrDelta(FPDiff); 2537 } 2538 2539 if (!IsSibcall) 2540 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 2541 dl); 2542 2543 SDValue RetAddrFrIdx; 2544 // Load return address for tail calls. 2545 if (isTailCall && FPDiff) 2546 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2547 Is64Bit, FPDiff, dl); 2548 2549 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2550 SmallVector<SDValue, 8> MemOpChains; 2551 SDValue StackPtr; 2552 2553 // Walk the register/memloc assignments, inserting copies/loads. In the case 2554 // of tail call optimization arguments are handle later. 2555 const X86RegisterInfo *RegInfo = 2556 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 2557 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2558 CCValAssign &VA = ArgLocs[i]; 2559 EVT RegVT = VA.getLocVT(); 2560 SDValue Arg = OutVals[i]; 2561 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2562 bool isByVal = Flags.isByVal(); 2563 2564 // Promote the value if needed. 2565 switch (VA.getLocInfo()) { 2566 default: llvm_unreachable("Unknown loc info!"); 2567 case CCValAssign::Full: break; 2568 case CCValAssign::SExt: 2569 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2570 break; 2571 case CCValAssign::ZExt: 2572 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2573 break; 2574 case CCValAssign::AExt: 2575 if (RegVT.is128BitVector()) { 2576 // Special case: passing MMX values in XMM registers. 2577 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2578 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2579 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2580 } else 2581 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2582 break; 2583 case CCValAssign::BCvt: 2584 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2585 break; 2586 case CCValAssign::Indirect: { 2587 // Store the argument. 2588 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2589 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2590 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2591 MachinePointerInfo::getFixedStack(FI), 2592 false, false, 0); 2593 Arg = SpillSlot; 2594 break; 2595 } 2596 } 2597 2598 if (VA.isRegLoc()) { 2599 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2600 if (isVarArg && IsWin64) { 2601 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2602 // shadow reg if callee is a varargs function. 2603 unsigned ShadowReg = 0; 2604 switch (VA.getLocReg()) { 2605 case X86::XMM0: ShadowReg = X86::RCX; break; 2606 case X86::XMM1: ShadowReg = X86::RDX; break; 2607 case X86::XMM2: ShadowReg = X86::R8; break; 2608 case X86::XMM3: ShadowReg = X86::R9; break; 2609 } 2610 if (ShadowReg) 2611 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2612 } 2613 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2614 assert(VA.isMemLoc()); 2615 if (StackPtr.getNode() == 0) 2616 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 2617 getPointerTy()); 2618 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2619 dl, DAG, VA, Flags)); 2620 } 2621 } 2622 2623 if (!MemOpChains.empty()) 2624 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2625 &MemOpChains[0], MemOpChains.size()); 2626 2627 if (Subtarget->isPICStyleGOT()) { 2628 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2629 // GOT pointer. 2630 if (!isTailCall) { 2631 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2632 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy()))); 2633 } else { 2634 // If we are tail calling and generating PIC/GOT style code load the 2635 // address of the callee into ECX. The value in ecx is used as target of 2636 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2637 // for tail calls on PIC/GOT architectures. Normally we would just put the 2638 // address of GOT into ebx and then call target@PLT. But for tail calls 2639 // ebx would be restored (since ebx is callee saved) before jumping to the 2640 // target@PLT. 2641 2642 // Note: The actual moving to ECX is done further down. 2643 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2644 if (G && !G->getGlobal()->hasHiddenVisibility() && 2645 !G->getGlobal()->hasProtectedVisibility()) 2646 Callee = LowerGlobalAddress(Callee, DAG); 2647 else if (isa<ExternalSymbolSDNode>(Callee)) 2648 Callee = LowerExternalSymbol(Callee, DAG); 2649 } 2650 } 2651 2652 if (Is64Bit && isVarArg && !IsWin64) { 2653 // From AMD64 ABI document: 2654 // For calls that may call functions that use varargs or stdargs 2655 // (prototype-less calls or calls to functions containing ellipsis (...) in 2656 // the declaration) %al is used as hidden argument to specify the number 2657 // of SSE registers used. The contents of %al do not need to match exactly 2658 // the number of registers, but must be an ubound on the number of SSE 2659 // registers used and is in the range 0 - 8 inclusive. 2660 2661 // Count the number of XMM registers allocated. 2662 static const uint16_t XMMArgRegs[] = { 2663 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2664 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2665 }; 2666 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2667 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2668 && "SSE registers cannot be used when SSE is disabled"); 2669 2670 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2671 DAG.getConstant(NumXMMRegs, MVT::i8))); 2672 } 2673 2674 // For tail calls lower the arguments to the 'real' stack slot. 2675 if (isTailCall) { 2676 // Force all the incoming stack arguments to be loaded from the stack 2677 // before any new outgoing arguments are stored to the stack, because the 2678 // outgoing stack slots may alias the incoming argument stack slots, and 2679 // the alias isn't otherwise explicit. This is slightly more conservative 2680 // than necessary, because it means that each store effectively depends 2681 // on every argument instead of just those arguments it would clobber. 2682 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2683 2684 SmallVector<SDValue, 8> MemOpChains2; 2685 SDValue FIN; 2686 int FI = 0; 2687 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2688 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2689 CCValAssign &VA = ArgLocs[i]; 2690 if (VA.isRegLoc()) 2691 continue; 2692 assert(VA.isMemLoc()); 2693 SDValue Arg = OutVals[i]; 2694 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2695 // Create frame index. 2696 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2697 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2698 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2699 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2700 2701 if (Flags.isByVal()) { 2702 // Copy relative to framepointer. 2703 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2704 if (StackPtr.getNode() == 0) 2705 StackPtr = DAG.getCopyFromReg(Chain, dl, 2706 RegInfo->getStackRegister(), 2707 getPointerTy()); 2708 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2709 2710 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2711 ArgChain, 2712 Flags, DAG, dl)); 2713 } else { 2714 // Store relative to framepointer. 2715 MemOpChains2.push_back( 2716 DAG.getStore(ArgChain, dl, Arg, FIN, 2717 MachinePointerInfo::getFixedStack(FI), 2718 false, false, 0)); 2719 } 2720 } 2721 } 2722 2723 if (!MemOpChains2.empty()) 2724 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2725 &MemOpChains2[0], MemOpChains2.size()); 2726 2727 // Store the return address to the appropriate stack slot. 2728 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, 2729 getPointerTy(), RegInfo->getSlotSize(), 2730 FPDiff, dl); 2731 } 2732 2733 // Build a sequence of copy-to-reg nodes chained together with token chain 2734 // and flag operands which copy the outgoing args into registers. 2735 SDValue InFlag; 2736 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2737 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2738 RegsToPass[i].second, InFlag); 2739 InFlag = Chain.getValue(1); 2740 } 2741 2742 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2743 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2744 // In the 64-bit large code model, we have to make all calls 2745 // through a register, since the call instruction's 32-bit 2746 // pc-relative offset may not be large enough to hold the whole 2747 // address. 2748 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2749 // If the callee is a GlobalAddress node (quite common, every direct call 2750 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2751 // it. 2752 2753 // We should use extra load for direct calls to dllimported functions in 2754 // non-JIT mode. 2755 const GlobalValue *GV = G->getGlobal(); 2756 if (!GV->hasDLLImportLinkage()) { 2757 unsigned char OpFlags = 0; 2758 bool ExtraLoad = false; 2759 unsigned WrapperKind = ISD::DELETED_NODE; 2760 2761 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2762 // external symbols most go through the PLT in PIC mode. If the symbol 2763 // has hidden or protected visibility, or if it is static or local, then 2764 // we don't need to use the PLT - we can directly call it. 2765 if (Subtarget->isTargetELF() && 2766 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2767 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2768 OpFlags = X86II::MO_PLT; 2769 } else if (Subtarget->isPICStyleStubAny() && 2770 (GV->isDeclaration() || GV->isWeakForLinker()) && 2771 (!Subtarget->getTargetTriple().isMacOSX() || 2772 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2773 // PC-relative references to external symbols should go through $stub, 2774 // unless we're building with the leopard linker or later, which 2775 // automatically synthesizes these stubs. 2776 OpFlags = X86II::MO_DARWIN_STUB; 2777 } else if (Subtarget->isPICStyleRIPRel() && 2778 isa<Function>(GV) && 2779 cast<Function>(GV)->getAttributes(). 2780 hasAttribute(AttributeSet::FunctionIndex, 2781 Attribute::NonLazyBind)) { 2782 // If the function is marked as non-lazy, generate an indirect call 2783 // which loads from the GOT directly. This avoids runtime overhead 2784 // at the cost of eager binding (and one extra byte of encoding). 2785 OpFlags = X86II::MO_GOTPCREL; 2786 WrapperKind = X86ISD::WrapperRIP; 2787 ExtraLoad = true; 2788 } 2789 2790 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2791 G->getOffset(), OpFlags); 2792 2793 // Add a wrapper if needed. 2794 if (WrapperKind != ISD::DELETED_NODE) 2795 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2796 // Add extra indirection if needed. 2797 if (ExtraLoad) 2798 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2799 MachinePointerInfo::getGOT(), 2800 false, false, false, 0); 2801 } 2802 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2803 unsigned char OpFlags = 0; 2804 2805 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2806 // external symbols should go through the PLT. 2807 if (Subtarget->isTargetELF() && 2808 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2809 OpFlags = X86II::MO_PLT; 2810 } else if (Subtarget->isPICStyleStubAny() && 2811 (!Subtarget->getTargetTriple().isMacOSX() || 2812 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2813 // PC-relative references to external symbols should go through $stub, 2814 // unless we're building with the leopard linker or later, which 2815 // automatically synthesizes these stubs. 2816 OpFlags = X86II::MO_DARWIN_STUB; 2817 } 2818 2819 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2820 OpFlags); 2821 } 2822 2823 // Returns a chain & a flag for retval copy to use. 2824 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2825 SmallVector<SDValue, 8> Ops; 2826 2827 if (!IsSibcall && isTailCall) { 2828 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2829 DAG.getIntPtrConstant(0, true), InFlag, dl); 2830 InFlag = Chain.getValue(1); 2831 } 2832 2833 Ops.push_back(Chain); 2834 Ops.push_back(Callee); 2835 2836 if (isTailCall) 2837 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2838 2839 // Add argument registers to the end of the list so that they are known live 2840 // into the call. 2841 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2842 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2843 RegsToPass[i].second.getValueType())); 2844 2845 // Add a register mask operand representing the call-preserved registers. 2846 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2847 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2848 assert(Mask && "Missing call preserved mask for calling convention"); 2849 Ops.push_back(DAG.getRegisterMask(Mask)); 2850 2851 if (InFlag.getNode()) 2852 Ops.push_back(InFlag); 2853 2854 if (isTailCall) { 2855 // We used to do: 2856 //// If this is the first return lowered for this function, add the regs 2857 //// to the liveout set for the function. 2858 // This isn't right, although it's probably harmless on x86; liveouts 2859 // should be computed from returns not tail calls. Consider a void 2860 // function making a tail call to a function returning int. 2861 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 2862 } 2863 2864 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2865 InFlag = Chain.getValue(1); 2866 2867 // Create the CALLSEQ_END node. 2868 unsigned NumBytesForCalleeToPush; 2869 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2870 getTargetMachine().Options.GuaranteedTailCallOpt)) 2871 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2872 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2873 SR == StackStructReturn) 2874 // If this is a call to a struct-return function, the callee 2875 // pops the hidden struct pointer, so we have to push it back. 2876 // This is common for Darwin/X86, Linux & Mingw32 targets. 2877 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2878 NumBytesForCalleeToPush = 4; 2879 else 2880 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2881 2882 // Returns a flag for retval copy to use. 2883 if (!IsSibcall) { 2884 Chain = DAG.getCALLSEQ_END(Chain, 2885 DAG.getIntPtrConstant(NumBytes, true), 2886 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2887 true), 2888 InFlag, dl); 2889 InFlag = Chain.getValue(1); 2890 } 2891 2892 // Handle result values, copying them out of physregs into vregs that we 2893 // return. 2894 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2895 Ins, dl, DAG, InVals); 2896} 2897 2898//===----------------------------------------------------------------------===// 2899// Fast Calling Convention (tail call) implementation 2900//===----------------------------------------------------------------------===// 2901 2902// Like std call, callee cleans arguments, convention except that ECX is 2903// reserved for storing the tail called function address. Only 2 registers are 2904// free for argument passing (inreg). Tail call optimization is performed 2905// provided: 2906// * tailcallopt is enabled 2907// * caller/callee are fastcc 2908// On X86_64 architecture with GOT-style position independent code only local 2909// (within module) calls are supported at the moment. 2910// To keep the stack aligned according to platform abi the function 2911// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2912// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2913// If a tail called function callee has more arguments than the caller the 2914// caller needs to make sure that there is room to move the RETADDR to. This is 2915// achieved by reserving an area the size of the argument delta right after the 2916// original REtADDR, but before the saved framepointer or the spilled registers 2917// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2918// stack layout: 2919// arg1 2920// arg2 2921// RETADDR 2922// [ new RETADDR 2923// move area ] 2924// (possible EBP) 2925// ESI 2926// EDI 2927// local1 .. 2928 2929/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2930/// for a 16 byte align requirement. 2931unsigned 2932X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2933 SelectionDAG& DAG) const { 2934 MachineFunction &MF = DAG.getMachineFunction(); 2935 const TargetMachine &TM = MF.getTarget(); 2936 const X86RegisterInfo *RegInfo = 2937 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); 2938 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2939 unsigned StackAlignment = TFI.getStackAlignment(); 2940 uint64_t AlignMask = StackAlignment - 1; 2941 int64_t Offset = StackSize; 2942 unsigned SlotSize = RegInfo->getSlotSize(); 2943 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2944 // Number smaller than 12 so just add the difference. 2945 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2946 } else { 2947 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2948 Offset = ((~AlignMask) & Offset) + StackAlignment + 2949 (StackAlignment-SlotSize); 2950 } 2951 return Offset; 2952} 2953 2954/// MatchingStackOffset - Return true if the given stack call argument is 2955/// already available in the same position (relatively) of the caller's 2956/// incoming argument stack. 2957static 2958bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2959 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2960 const X86InstrInfo *TII) { 2961 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2962 int FI = INT_MAX; 2963 if (Arg.getOpcode() == ISD::CopyFromReg) { 2964 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2965 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2966 return false; 2967 MachineInstr *Def = MRI->getVRegDef(VR); 2968 if (!Def) 2969 return false; 2970 if (!Flags.isByVal()) { 2971 if (!TII->isLoadFromStackSlot(Def, FI)) 2972 return false; 2973 } else { 2974 unsigned Opcode = Def->getOpcode(); 2975 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2976 Def->getOperand(1).isFI()) { 2977 FI = Def->getOperand(1).getIndex(); 2978 Bytes = Flags.getByValSize(); 2979 } else 2980 return false; 2981 } 2982 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2983 if (Flags.isByVal()) 2984 // ByVal argument is passed in as a pointer but it's now being 2985 // dereferenced. e.g. 2986 // define @foo(%struct.X* %A) { 2987 // tail call @bar(%struct.X* byval %A) 2988 // } 2989 return false; 2990 SDValue Ptr = Ld->getBasePtr(); 2991 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2992 if (!FINode) 2993 return false; 2994 FI = FINode->getIndex(); 2995 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2996 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2997 FI = FINode->getIndex(); 2998 Bytes = Flags.getByValSize(); 2999 } else 3000 return false; 3001 3002 assert(FI != INT_MAX); 3003 if (!MFI->isFixedObjectIndex(FI)) 3004 return false; 3005 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 3006} 3007 3008/// IsEligibleForTailCallOptimization - Check whether the call is eligible 3009/// for tail call optimization. Targets which want to do tail call 3010/// optimization should implement this function. 3011bool 3012X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3013 CallingConv::ID CalleeCC, 3014 bool isVarArg, 3015 bool isCalleeStructRet, 3016 bool isCallerStructRet, 3017 Type *RetTy, 3018 const SmallVectorImpl<ISD::OutputArg> &Outs, 3019 const SmallVectorImpl<SDValue> &OutVals, 3020 const SmallVectorImpl<ISD::InputArg> &Ins, 3021 SelectionDAG &DAG) const { 3022 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC)) 3023 return false; 3024 3025 // If -tailcallopt is specified, make fastcc functions tail-callable. 3026 const MachineFunction &MF = DAG.getMachineFunction(); 3027 const Function *CallerF = MF.getFunction(); 3028 3029 // If the function return type is x86_fp80 and the callee return type is not, 3030 // then the FP_EXTEND of the call result is not a nop. It's not safe to 3031 // perform a tailcall optimization here. 3032 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) 3033 return false; 3034 3035 CallingConv::ID CallerCC = CallerF->getCallingConv(); 3036 bool CCMatch = CallerCC == CalleeCC; 3037 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC); 3038 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC); 3039 3040 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 3041 if (IsTailCallConvention(CalleeCC) && CCMatch) 3042 return true; 3043 return false; 3044 } 3045 3046 // Look for obvious safe cases to perform tail call optimization that do not 3047 // require ABI changes. This is what gcc calls sibcall. 3048 3049 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 3050 // emit a special epilogue. 3051 const X86RegisterInfo *RegInfo = 3052 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 3053 if (RegInfo->needsStackRealignment(MF)) 3054 return false; 3055 3056 // Also avoid sibcall optimization if either caller or callee uses struct 3057 // return semantics. 3058 if (isCalleeStructRet || isCallerStructRet) 3059 return false; 3060 3061 // An stdcall caller is expected to clean up its arguments; the callee 3062 // isn't going to do that. 3063 if (!CCMatch && CallerCC == CallingConv::X86_StdCall) 3064 return false; 3065 3066 // Do not sibcall optimize vararg calls unless all arguments are passed via 3067 // registers. 3068 if (isVarArg && !Outs.empty()) { 3069 3070 // Optimizing for varargs on Win64 is unlikely to be safe without 3071 // additional testing. 3072 if (IsCalleeWin64 || IsCallerWin64) 3073 return false; 3074 3075 SmallVector<CCValAssign, 16> ArgLocs; 3076 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 3077 getTargetMachine(), ArgLocs, *DAG.getContext()); 3078 3079 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 3080 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 3081 if (!ArgLocs[i].isRegLoc()) 3082 return false; 3083 } 3084 3085 // If the call result is in ST0 / ST1, it needs to be popped off the x87 3086 // stack. Therefore, if it's not used by the call it is not safe to optimize 3087 // this into a sibcall. 3088 bool Unused = false; 3089 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3090 if (!Ins[i].Used) { 3091 Unused = true; 3092 break; 3093 } 3094 } 3095 if (Unused) { 3096 SmallVector<CCValAssign, 16> RVLocs; 3097 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 3098 getTargetMachine(), RVLocs, *DAG.getContext()); 3099 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 3100 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3101 CCValAssign &VA = RVLocs[i]; 3102 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 3103 return false; 3104 } 3105 } 3106 3107 // If the calling conventions do not match, then we'd better make sure the 3108 // results are returned in the same way as what the caller expects. 3109 if (!CCMatch) { 3110 SmallVector<CCValAssign, 16> RVLocs1; 3111 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 3112 getTargetMachine(), RVLocs1, *DAG.getContext()); 3113 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 3114 3115 SmallVector<CCValAssign, 16> RVLocs2; 3116 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 3117 getTargetMachine(), RVLocs2, *DAG.getContext()); 3118 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 3119 3120 if (RVLocs1.size() != RVLocs2.size()) 3121 return false; 3122 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 3123 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 3124 return false; 3125 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 3126 return false; 3127 if (RVLocs1[i].isRegLoc()) { 3128 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 3129 return false; 3130 } else { 3131 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 3132 return false; 3133 } 3134 } 3135 } 3136 3137 // If the callee takes no arguments then go on to check the results of the 3138 // call. 3139 if (!Outs.empty()) { 3140 // Check if stack adjustment is needed. For now, do not do this if any 3141 // argument is passed on the stack. 3142 SmallVector<CCValAssign, 16> ArgLocs; 3143 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 3144 getTargetMachine(), ArgLocs, *DAG.getContext()); 3145 3146 // Allocate shadow area for Win64 3147 if (IsCalleeWin64) 3148 CCInfo.AllocateStack(32, 8); 3149 3150 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 3151 if (CCInfo.getNextStackOffset()) { 3152 MachineFunction &MF = DAG.getMachineFunction(); 3153 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 3154 return false; 3155 3156 // Check if the arguments are already laid out in the right way as 3157 // the caller's fixed stack objects. 3158 MachineFrameInfo *MFI = MF.getFrameInfo(); 3159 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 3160 const X86InstrInfo *TII = 3161 ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); 3162 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3163 CCValAssign &VA = ArgLocs[i]; 3164 SDValue Arg = OutVals[i]; 3165 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3166 if (VA.getLocInfo() == CCValAssign::Indirect) 3167 return false; 3168 if (!VA.isRegLoc()) { 3169 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 3170 MFI, MRI, TII)) 3171 return false; 3172 } 3173 } 3174 } 3175 3176 // If the tailcall address may be in a register, then make sure it's 3177 // possible to register allocate for it. In 32-bit, the call address can 3178 // only target EAX, EDX, or ECX since the tail call must be scheduled after 3179 // callee-saved registers are restored. These happen to be the same 3180 // registers used to pass 'inreg' arguments so watch out for those. 3181 if (!Subtarget->is64Bit() && 3182 ((!isa<GlobalAddressSDNode>(Callee) && 3183 !isa<ExternalSymbolSDNode>(Callee)) || 3184 getTargetMachine().getRelocationModel() == Reloc::PIC_)) { 3185 unsigned NumInRegs = 0; 3186 // In PIC we need an extra register to formulate the address computation 3187 // for the callee. 3188 unsigned MaxInRegs = 3189 (getTargetMachine().getRelocationModel() == Reloc::PIC_) ? 2 : 3; 3190 3191 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3192 CCValAssign &VA = ArgLocs[i]; 3193 if (!VA.isRegLoc()) 3194 continue; 3195 unsigned Reg = VA.getLocReg(); 3196 switch (Reg) { 3197 default: break; 3198 case X86::EAX: case X86::EDX: case X86::ECX: 3199 if (++NumInRegs == MaxInRegs) 3200 return false; 3201 break; 3202 } 3203 } 3204 } 3205 } 3206 3207 return true; 3208} 3209 3210FastISel * 3211X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 3212 const TargetLibraryInfo *libInfo) const { 3213 return X86::createFastISel(funcInfo, libInfo); 3214} 3215 3216//===----------------------------------------------------------------------===// 3217// Other Lowering Hooks 3218//===----------------------------------------------------------------------===// 3219 3220static bool MayFoldLoad(SDValue Op) { 3221 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 3222} 3223 3224static bool MayFoldIntoStore(SDValue Op) { 3225 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 3226} 3227 3228static bool isTargetShuffle(unsigned Opcode) { 3229 switch(Opcode) { 3230 default: return false; 3231 case X86ISD::PSHUFD: 3232 case X86ISD::PSHUFHW: 3233 case X86ISD::PSHUFLW: 3234 case X86ISD::SHUFP: 3235 case X86ISD::PALIGNR: 3236 case X86ISD::MOVLHPS: 3237 case X86ISD::MOVLHPD: 3238 case X86ISD::MOVHLPS: 3239 case X86ISD::MOVLPS: 3240 case X86ISD::MOVLPD: 3241 case X86ISD::MOVSHDUP: 3242 case X86ISD::MOVSLDUP: 3243 case X86ISD::MOVDDUP: 3244 case X86ISD::MOVSS: 3245 case X86ISD::MOVSD: 3246 case X86ISD::UNPCKL: 3247 case X86ISD::UNPCKH: 3248 case X86ISD::VPERMILP: 3249 case X86ISD::VPERM2X128: 3250 case X86ISD::VPERMI: 3251 return true; 3252 } 3253} 3254 3255static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3256 SDValue V1, SelectionDAG &DAG) { 3257 switch(Opc) { 3258 default: llvm_unreachable("Unknown x86 shuffle node"); 3259 case X86ISD::MOVSHDUP: 3260 case X86ISD::MOVSLDUP: 3261 case X86ISD::MOVDDUP: 3262 return DAG.getNode(Opc, dl, VT, V1); 3263 } 3264} 3265 3266static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3267 SDValue V1, unsigned TargetMask, 3268 SelectionDAG &DAG) { 3269 switch(Opc) { 3270 default: llvm_unreachable("Unknown x86 shuffle node"); 3271 case X86ISD::PSHUFD: 3272 case X86ISD::PSHUFHW: 3273 case X86ISD::PSHUFLW: 3274 case X86ISD::VPERMILP: 3275 case X86ISD::VPERMI: 3276 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 3277 } 3278} 3279 3280static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3281 SDValue V1, SDValue V2, unsigned TargetMask, 3282 SelectionDAG &DAG) { 3283 switch(Opc) { 3284 default: llvm_unreachable("Unknown x86 shuffle node"); 3285 case X86ISD::PALIGNR: 3286 case X86ISD::SHUFP: 3287 case X86ISD::VPERM2X128: 3288 return DAG.getNode(Opc, dl, VT, V1, V2, 3289 DAG.getConstant(TargetMask, MVT::i8)); 3290 } 3291} 3292 3293static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT, 3294 SDValue V1, SDValue V2, SelectionDAG &DAG) { 3295 switch(Opc) { 3296 default: llvm_unreachable("Unknown x86 shuffle node"); 3297 case X86ISD::MOVLHPS: 3298 case X86ISD::MOVLHPD: 3299 case X86ISD::MOVHLPS: 3300 case X86ISD::MOVLPS: 3301 case X86ISD::MOVLPD: 3302 case X86ISD::MOVSS: 3303 case X86ISD::MOVSD: 3304 case X86ISD::UNPCKL: 3305 case X86ISD::UNPCKH: 3306 return DAG.getNode(Opc, dl, VT, V1, V2); 3307 } 3308} 3309 3310SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 3311 MachineFunction &MF = DAG.getMachineFunction(); 3312 const X86RegisterInfo *RegInfo = 3313 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 3314 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 3315 int ReturnAddrIndex = FuncInfo->getRAIndex(); 3316 3317 if (ReturnAddrIndex == 0) { 3318 // Set up a frame object for the return address. 3319 unsigned SlotSize = RegInfo->getSlotSize(); 3320 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3321 -(int64_t)SlotSize, 3322 false); 3323 FuncInfo->setRAIndex(ReturnAddrIndex); 3324 } 3325 3326 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3327} 3328 3329bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3330 bool hasSymbolicDisplacement) { 3331 // Offset should fit into 32 bit immediate field. 3332 if (!isInt<32>(Offset)) 3333 return false; 3334 3335 // If we don't have a symbolic displacement - we don't have any extra 3336 // restrictions. 3337 if (!hasSymbolicDisplacement) 3338 return true; 3339 3340 // FIXME: Some tweaks might be needed for medium code model. 3341 if (M != CodeModel::Small && M != CodeModel::Kernel) 3342 return false; 3343 3344 // For small code model we assume that latest object is 16MB before end of 31 3345 // bits boundary. We may also accept pretty large negative constants knowing 3346 // that all objects are in the positive half of address space. 3347 if (M == CodeModel::Small && Offset < 16*1024*1024) 3348 return true; 3349 3350 // For kernel code model we know that all object resist in the negative half 3351 // of 32bits address space. We may not accept negative offsets, since they may 3352 // be just off and we may accept pretty large positive ones. 3353 if (M == CodeModel::Kernel && Offset > 0) 3354 return true; 3355 3356 return false; 3357} 3358 3359/// isCalleePop - Determines whether the callee is required to pop its 3360/// own arguments. Callee pop is necessary to support tail calls. 3361bool X86::isCalleePop(CallingConv::ID CallingConv, 3362 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3363 if (IsVarArg) 3364 return false; 3365 3366 switch (CallingConv) { 3367 default: 3368 return false; 3369 case CallingConv::X86_StdCall: 3370 return !is64Bit; 3371 case CallingConv::X86_FastCall: 3372 return !is64Bit; 3373 case CallingConv::X86_ThisCall: 3374 return !is64Bit; 3375 case CallingConv::Fast: 3376 return TailCallOpt; 3377 case CallingConv::GHC: 3378 return TailCallOpt; 3379 case CallingConv::HiPE: 3380 return TailCallOpt; 3381 } 3382} 3383 3384/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3385/// specific condition code, returning the condition code and the LHS/RHS of the 3386/// comparison to make. 3387static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3388 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3389 if (!isFP) { 3390 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3391 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3392 // X > -1 -> X == 0, jump !sign. 3393 RHS = DAG.getConstant(0, RHS.getValueType()); 3394 return X86::COND_NS; 3395 } 3396 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3397 // X < 0 -> X == 0, jump on sign. 3398 return X86::COND_S; 3399 } 3400 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3401 // X < 1 -> X <= 0 3402 RHS = DAG.getConstant(0, RHS.getValueType()); 3403 return X86::COND_LE; 3404 } 3405 } 3406 3407 switch (SetCCOpcode) { 3408 default: llvm_unreachable("Invalid integer condition!"); 3409 case ISD::SETEQ: return X86::COND_E; 3410 case ISD::SETGT: return X86::COND_G; 3411 case ISD::SETGE: return X86::COND_GE; 3412 case ISD::SETLT: return X86::COND_L; 3413 case ISD::SETLE: return X86::COND_LE; 3414 case ISD::SETNE: return X86::COND_NE; 3415 case ISD::SETULT: return X86::COND_B; 3416 case ISD::SETUGT: return X86::COND_A; 3417 case ISD::SETULE: return X86::COND_BE; 3418 case ISD::SETUGE: return X86::COND_AE; 3419 } 3420 } 3421 3422 // First determine if it is required or is profitable to flip the operands. 3423 3424 // If LHS is a foldable load, but RHS is not, flip the condition. 3425 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3426 !ISD::isNON_EXTLoad(RHS.getNode())) { 3427 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3428 std::swap(LHS, RHS); 3429 } 3430 3431 switch (SetCCOpcode) { 3432 default: break; 3433 case ISD::SETOLT: 3434 case ISD::SETOLE: 3435 case ISD::SETUGT: 3436 case ISD::SETUGE: 3437 std::swap(LHS, RHS); 3438 break; 3439 } 3440 3441 // On a floating point condition, the flags are set as follows: 3442 // ZF PF CF op 3443 // 0 | 0 | 0 | X > Y 3444 // 0 | 0 | 1 | X < Y 3445 // 1 | 0 | 0 | X == Y 3446 // 1 | 1 | 1 | unordered 3447 switch (SetCCOpcode) { 3448 default: llvm_unreachable("Condcode should be pre-legalized away"); 3449 case ISD::SETUEQ: 3450 case ISD::SETEQ: return X86::COND_E; 3451 case ISD::SETOLT: // flipped 3452 case ISD::SETOGT: 3453 case ISD::SETGT: return X86::COND_A; 3454 case ISD::SETOLE: // flipped 3455 case ISD::SETOGE: 3456 case ISD::SETGE: return X86::COND_AE; 3457 case ISD::SETUGT: // flipped 3458 case ISD::SETULT: 3459 case ISD::SETLT: return X86::COND_B; 3460 case ISD::SETUGE: // flipped 3461 case ISD::SETULE: 3462 case ISD::SETLE: return X86::COND_BE; 3463 case ISD::SETONE: 3464 case ISD::SETNE: return X86::COND_NE; 3465 case ISD::SETUO: return X86::COND_P; 3466 case ISD::SETO: return X86::COND_NP; 3467 case ISD::SETOEQ: 3468 case ISD::SETUNE: return X86::COND_INVALID; 3469 } 3470} 3471 3472/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3473/// code. Current x86 isa includes the following FP cmov instructions: 3474/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3475static bool hasFPCMov(unsigned X86CC) { 3476 switch (X86CC) { 3477 default: 3478 return false; 3479 case X86::COND_B: 3480 case X86::COND_BE: 3481 case X86::COND_E: 3482 case X86::COND_P: 3483 case X86::COND_A: 3484 case X86::COND_AE: 3485 case X86::COND_NE: 3486 case X86::COND_NP: 3487 return true; 3488 } 3489} 3490 3491/// isFPImmLegal - Returns true if the target can instruction select the 3492/// specified FP immediate natively. If false, the legalizer will 3493/// materialize the FP immediate as a load from a constant pool. 3494bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3495 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3496 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3497 return true; 3498 } 3499 return false; 3500} 3501 3502/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3503/// the specified range (L, H]. 3504static bool isUndefOrInRange(int Val, int Low, int Hi) { 3505 return (Val < 0) || (Val >= Low && Val < Hi); 3506} 3507 3508/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3509/// specified value. 3510static bool isUndefOrEqual(int Val, int CmpVal) { 3511 return (Val < 0 || Val == CmpVal); 3512} 3513 3514/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3515/// from position Pos and ending in Pos+Size, falls within the specified 3516/// sequential range (L, L+Pos]. or is undef. 3517static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3518 unsigned Pos, unsigned Size, int Low) { 3519 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3520 if (!isUndefOrEqual(Mask[i], Low)) 3521 return false; 3522 return true; 3523} 3524 3525/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3526/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3527/// the second operand. 3528static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT) { 3529 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3530 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3531 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3532 return (Mask[0] < 2 && Mask[1] < 2); 3533 return false; 3534} 3535 3536/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3537/// is suitable for input to PSHUFHW. 3538static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) { 3539 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16)) 3540 return false; 3541 3542 // Lower quadword copied in order or undef. 3543 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3544 return false; 3545 3546 // Upper quadword shuffled. 3547 for (unsigned i = 4; i != 8; ++i) 3548 if (!isUndefOrInRange(Mask[i], 4, 8)) 3549 return false; 3550 3551 if (VT == MVT::v16i16) { 3552 // Lower quadword copied in order or undef. 3553 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3554 return false; 3555 3556 // Upper quadword shuffled. 3557 for (unsigned i = 12; i != 16; ++i) 3558 if (!isUndefOrInRange(Mask[i], 12, 16)) 3559 return false; 3560 } 3561 3562 return true; 3563} 3564 3565/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3566/// is suitable for input to PSHUFLW. 3567static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) { 3568 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16)) 3569 return false; 3570 3571 // Upper quadword copied in order. 3572 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3573 return false; 3574 3575 // Lower quadword shuffled. 3576 for (unsigned i = 0; i != 4; ++i) 3577 if (!isUndefOrInRange(Mask[i], 0, 4)) 3578 return false; 3579 3580 if (VT == MVT::v16i16) { 3581 // Upper quadword copied in order. 3582 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3583 return false; 3584 3585 // Lower quadword shuffled. 3586 for (unsigned i = 8; i != 12; ++i) 3587 if (!isUndefOrInRange(Mask[i], 8, 12)) 3588 return false; 3589 } 3590 3591 return true; 3592} 3593 3594/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3595/// is suitable for input to PALIGNR. 3596static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT, 3597 const X86Subtarget *Subtarget) { 3598 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) || 3599 (VT.is256BitVector() && !Subtarget->hasInt256())) 3600 return false; 3601 3602 unsigned NumElts = VT.getVectorNumElements(); 3603 unsigned NumLanes = VT.is512BitVector() ? 1: VT.getSizeInBits()/128; 3604 unsigned NumLaneElts = NumElts/NumLanes; 3605 3606 // Do not handle 64-bit element shuffles with palignr. 3607 if (NumLaneElts == 2) 3608 return false; 3609 3610 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3611 unsigned i; 3612 for (i = 0; i != NumLaneElts; ++i) { 3613 if (Mask[i+l] >= 0) 3614 break; 3615 } 3616 3617 // Lane is all undef, go to next lane 3618 if (i == NumLaneElts) 3619 continue; 3620 3621 int Start = Mask[i+l]; 3622 3623 // Make sure its in this lane in one of the sources 3624 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3625 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3626 return false; 3627 3628 // If not lane 0, then we must match lane 0 3629 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3630 return false; 3631 3632 // Correct second source to be contiguous with first source 3633 if (Start >= (int)NumElts) 3634 Start -= NumElts - NumLaneElts; 3635 3636 // Make sure we're shifting in the right direction. 3637 if (Start <= (int)(i+l)) 3638 return false; 3639 3640 Start -= i; 3641 3642 // Check the rest of the elements to see if they are consecutive. 3643 for (++i; i != NumLaneElts; ++i) { 3644 int Idx = Mask[i+l]; 3645 3646 // Make sure its in this lane 3647 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3648 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3649 return false; 3650 3651 // If not lane 0, then we must match lane 0 3652 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3653 return false; 3654 3655 if (Idx >= (int)NumElts) 3656 Idx -= NumElts - NumLaneElts; 3657 3658 if (!isUndefOrEqual(Idx, Start+i)) 3659 return false; 3660 3661 } 3662 } 3663 3664 return true; 3665} 3666 3667/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3668/// the two vector operands have swapped position. 3669static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3670 unsigned NumElems) { 3671 for (unsigned i = 0; i != NumElems; ++i) { 3672 int idx = Mask[i]; 3673 if (idx < 0) 3674 continue; 3675 else if (idx < (int)NumElems) 3676 Mask[i] = idx + NumElems; 3677 else 3678 Mask[i] = idx - NumElems; 3679 } 3680} 3681 3682/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3683/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3684/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3685/// reverse of what x86 shuffles want. 3686static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) { 3687 3688 unsigned NumElems = VT.getVectorNumElements(); 3689 unsigned NumLanes = VT.getSizeInBits()/128; 3690 unsigned NumLaneElems = NumElems/NumLanes; 3691 3692 if (NumLaneElems != 2 && NumLaneElems != 4) 3693 return false; 3694 3695 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 3696 bool symetricMaskRequired = 3697 (VT.getSizeInBits() >= 256) && (EltSize == 32); 3698 3699 // VSHUFPSY divides the resulting vector into 4 chunks. 3700 // The sources are also splitted into 4 chunks, and each destination 3701 // chunk must come from a different source chunk. 3702 // 3703 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3704 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3705 // 3706 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3707 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3708 // 3709 // VSHUFPDY divides the resulting vector into 4 chunks. 3710 // The sources are also splitted into 4 chunks, and each destination 3711 // chunk must come from a different source chunk. 3712 // 3713 // SRC1 => X3 X2 X1 X0 3714 // SRC2 => Y3 Y2 Y1 Y0 3715 // 3716 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3717 // 3718 SmallVector<int, 4> MaskVal(NumLaneElems, -1); 3719 unsigned HalfLaneElems = NumLaneElems/2; 3720 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3721 for (unsigned i = 0; i != NumLaneElems; ++i) { 3722 int Idx = Mask[i+l]; 3723 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3724 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3725 return false; 3726 // For VSHUFPSY, the mask of the second half must be the same as the 3727 // first but with the appropriate offsets. This works in the same way as 3728 // VPERMILPS works with masks. 3729 if (!symetricMaskRequired || Idx < 0) 3730 continue; 3731 if (MaskVal[i] < 0) { 3732 MaskVal[i] = Idx - l; 3733 continue; 3734 } 3735 if ((signed)(Idx - l) != MaskVal[i]) 3736 return false; 3737 } 3738 } 3739 3740 return true; 3741} 3742 3743/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3744/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3745static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) { 3746 if (!VT.is128BitVector()) 3747 return false; 3748 3749 unsigned NumElems = VT.getVectorNumElements(); 3750 3751 if (NumElems != 4) 3752 return false; 3753 3754 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3755 return isUndefOrEqual(Mask[0], 6) && 3756 isUndefOrEqual(Mask[1], 7) && 3757 isUndefOrEqual(Mask[2], 2) && 3758 isUndefOrEqual(Mask[3], 3); 3759} 3760 3761/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3762/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3763/// <2, 3, 2, 3> 3764static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) { 3765 if (!VT.is128BitVector()) 3766 return false; 3767 3768 unsigned NumElems = VT.getVectorNumElements(); 3769 3770 if (NumElems != 4) 3771 return false; 3772 3773 return isUndefOrEqual(Mask[0], 2) && 3774 isUndefOrEqual(Mask[1], 3) && 3775 isUndefOrEqual(Mask[2], 2) && 3776 isUndefOrEqual(Mask[3], 3); 3777} 3778 3779/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3780/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3781static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) { 3782 if (!VT.is128BitVector()) 3783 return false; 3784 3785 unsigned NumElems = VT.getVectorNumElements(); 3786 3787 if (NumElems != 2 && NumElems != 4) 3788 return false; 3789 3790 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3791 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3792 return false; 3793 3794 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3795 if (!isUndefOrEqual(Mask[i], i)) 3796 return false; 3797 3798 return true; 3799} 3800 3801/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3802/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3803static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) { 3804 if (!VT.is128BitVector()) 3805 return false; 3806 3807 unsigned NumElems = VT.getVectorNumElements(); 3808 3809 if (NumElems != 2 && NumElems != 4) 3810 return false; 3811 3812 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3813 if (!isUndefOrEqual(Mask[i], i)) 3814 return false; 3815 3816 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3817 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3818 return false; 3819 3820 return true; 3821} 3822 3823// 3824// Some special combinations that can be optimized. 3825// 3826static 3827SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3828 SelectionDAG &DAG) { 3829 MVT VT = SVOp->getSimpleValueType(0); 3830 SDLoc dl(SVOp); 3831 3832 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3833 return SDValue(); 3834 3835 ArrayRef<int> Mask = SVOp->getMask(); 3836 3837 // These are the special masks that may be optimized. 3838 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3839 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3840 bool MatchEvenMask = true; 3841 bool MatchOddMask = true; 3842 for (int i=0; i<8; ++i) { 3843 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3844 MatchEvenMask = false; 3845 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3846 MatchOddMask = false; 3847 } 3848 3849 if (!MatchEvenMask && !MatchOddMask) 3850 return SDValue(); 3851 3852 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3853 3854 SDValue Op0 = SVOp->getOperand(0); 3855 SDValue Op1 = SVOp->getOperand(1); 3856 3857 if (MatchEvenMask) { 3858 // Shift the second operand right to 32 bits. 3859 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; 3860 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); 3861 } else { 3862 // Shift the first operand left to 32 bits. 3863 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; 3864 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); 3865 } 3866 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; 3867 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); 3868} 3869 3870/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3871/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3872static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT, 3873 bool HasInt256, bool V2IsSplat = false) { 3874 3875 assert(VT.getSizeInBits() >= 128 && 3876 "Unsupported vector type for unpckl"); 3877 3878 // AVX defines UNPCK* to operate independently on 128-bit lanes. 3879 unsigned NumLanes; 3880 unsigned NumOf256BitLanes; 3881 unsigned NumElts = VT.getVectorNumElements(); 3882 if (VT.is256BitVector()) { 3883 if (NumElts != 4 && NumElts != 8 && 3884 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3885 return false; 3886 NumLanes = 2; 3887 NumOf256BitLanes = 1; 3888 } else if (VT.is512BitVector()) { 3889 assert(VT.getScalarType().getSizeInBits() >= 32 && 3890 "Unsupported vector type for unpckh"); 3891 NumLanes = 2; 3892 NumOf256BitLanes = 2; 3893 } else { 3894 NumLanes = 1; 3895 NumOf256BitLanes = 1; 3896 } 3897 3898 unsigned NumEltsInStride = NumElts/NumOf256BitLanes; 3899 unsigned NumLaneElts = NumEltsInStride/NumLanes; 3900 3901 for (unsigned l256 = 0; l256 < NumOf256BitLanes; l256 += 1) { 3902 for (unsigned l = 0; l != NumEltsInStride; l += NumLaneElts) { 3903 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) { 3904 int BitI = Mask[l256*NumEltsInStride+l+i]; 3905 int BitI1 = Mask[l256*NumEltsInStride+l+i+1]; 3906 if (!isUndefOrEqual(BitI, j+l256*NumElts)) 3907 return false; 3908 if (V2IsSplat && !isUndefOrEqual(BitI1, NumElts)) 3909 return false; 3910 if (!isUndefOrEqual(BitI1, j+l256*NumElts+NumEltsInStride)) 3911 return false; 3912 } 3913 } 3914 } 3915 return true; 3916} 3917 3918/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3919/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3920static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT, 3921 bool HasInt256, bool V2IsSplat = false) { 3922 assert(VT.getSizeInBits() >= 128 && 3923 "Unsupported vector type for unpckh"); 3924 3925 // AVX defines UNPCK* to operate independently on 128-bit lanes. 3926 unsigned NumLanes; 3927 unsigned NumOf256BitLanes; 3928 unsigned NumElts = VT.getVectorNumElements(); 3929 if (VT.is256BitVector()) { 3930 if (NumElts != 4 && NumElts != 8 && 3931 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3932 return false; 3933 NumLanes = 2; 3934 NumOf256BitLanes = 1; 3935 } else if (VT.is512BitVector()) { 3936 assert(VT.getScalarType().getSizeInBits() >= 32 && 3937 "Unsupported vector type for unpckh"); 3938 NumLanes = 2; 3939 NumOf256BitLanes = 2; 3940 } else { 3941 NumLanes = 1; 3942 NumOf256BitLanes = 1; 3943 } 3944 3945 unsigned NumEltsInStride = NumElts/NumOf256BitLanes; 3946 unsigned NumLaneElts = NumEltsInStride/NumLanes; 3947 3948 for (unsigned l256 = 0; l256 < NumOf256BitLanes; l256 += 1) { 3949 for (unsigned l = 0; l != NumEltsInStride; l += NumLaneElts) { 3950 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) { 3951 int BitI = Mask[l256*NumEltsInStride+l+i]; 3952 int BitI1 = Mask[l256*NumEltsInStride+l+i+1]; 3953 if (!isUndefOrEqual(BitI, j+l256*NumElts)) 3954 return false; 3955 if (V2IsSplat && !isUndefOrEqual(BitI1, NumElts)) 3956 return false; 3957 if (!isUndefOrEqual(BitI1, j+l256*NumElts+NumEltsInStride)) 3958 return false; 3959 } 3960 } 3961 } 3962 return true; 3963} 3964 3965/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3966/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3967/// <0, 0, 1, 1> 3968static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) { 3969 unsigned NumElts = VT.getVectorNumElements(); 3970 bool Is256BitVec = VT.is256BitVector(); 3971 3972 if (VT.is512BitVector()) 3973 return false; 3974 assert((VT.is128BitVector() || VT.is256BitVector()) && 3975 "Unsupported vector type for unpckh"); 3976 3977 if (Is256BitVec && NumElts != 4 && NumElts != 8 && 3978 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3979 return false; 3980 3981 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3982 // FIXME: Need a better way to get rid of this, there's no latency difference 3983 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3984 // the former later. We should also remove the "_undef" special mask. 3985 if (NumElts == 4 && Is256BitVec) 3986 return false; 3987 3988 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3989 // independently on 128-bit lanes. 3990 unsigned NumLanes = VT.getSizeInBits()/128; 3991 unsigned NumLaneElts = NumElts/NumLanes; 3992 3993 for (unsigned l = 0; l != NumElts; l += NumLaneElts) { 3994 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) { 3995 int BitI = Mask[l+i]; 3996 int BitI1 = Mask[l+i+1]; 3997 3998 if (!isUndefOrEqual(BitI, j)) 3999 return false; 4000 if (!isUndefOrEqual(BitI1, j)) 4001 return false; 4002 } 4003 } 4004 4005 return true; 4006} 4007 4008/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 4009/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 4010/// <2, 2, 3, 3> 4011static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) { 4012 unsigned NumElts = VT.getVectorNumElements(); 4013 4014 if (VT.is512BitVector()) 4015 return false; 4016 4017 assert((VT.is128BitVector() || VT.is256BitVector()) && 4018 "Unsupported vector type for unpckh"); 4019 4020 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 && 4021 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 4022 return false; 4023 4024 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 4025 // independently on 128-bit lanes. 4026 unsigned NumLanes = VT.getSizeInBits()/128; 4027 unsigned NumLaneElts = NumElts/NumLanes; 4028 4029 for (unsigned l = 0; l != NumElts; l += NumLaneElts) { 4030 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) { 4031 int BitI = Mask[l+i]; 4032 int BitI1 = Mask[l+i+1]; 4033 if (!isUndefOrEqual(BitI, j)) 4034 return false; 4035 if (!isUndefOrEqual(BitI1, j)) 4036 return false; 4037 } 4038 } 4039 return true; 4040} 4041 4042/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 4043/// specifies a shuffle of elements that is suitable for input to MOVSS, 4044/// MOVSD, and MOVD, i.e. setting the lowest element. 4045static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 4046 if (VT.getVectorElementType().getSizeInBits() < 32) 4047 return false; 4048 if (!VT.is128BitVector()) 4049 return false; 4050 4051 unsigned NumElts = VT.getVectorNumElements(); 4052 4053 if (!isUndefOrEqual(Mask[0], NumElts)) 4054 return false; 4055 4056 for (unsigned i = 1; i != NumElts; ++i) 4057 if (!isUndefOrEqual(Mask[i], i)) 4058 return false; 4059 4060 return true; 4061} 4062 4063/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 4064/// as permutations between 128-bit chunks or halves. As an example: this 4065/// shuffle bellow: 4066/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 4067/// The first half comes from the second half of V1 and the second half from the 4068/// the second half of V2. 4069static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) { 4070 if (!HasFp256 || !VT.is256BitVector()) 4071 return false; 4072 4073 // The shuffle result is divided into half A and half B. In total the two 4074 // sources have 4 halves, namely: C, D, E, F. The final values of A and 4075 // B must come from C, D, E or F. 4076 unsigned HalfSize = VT.getVectorNumElements()/2; 4077 bool MatchA = false, MatchB = false; 4078 4079 // Check if A comes from one of C, D, E, F. 4080 for (unsigned Half = 0; Half != 4; ++Half) { 4081 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 4082 MatchA = true; 4083 break; 4084 } 4085 } 4086 4087 // Check if B comes from one of C, D, E, F. 4088 for (unsigned Half = 0; Half != 4; ++Half) { 4089 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 4090 MatchB = true; 4091 break; 4092 } 4093 } 4094 4095 return MatchA && MatchB; 4096} 4097 4098/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 4099/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 4100static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 4101 MVT VT = SVOp->getSimpleValueType(0); 4102 4103 unsigned HalfSize = VT.getVectorNumElements()/2; 4104 4105 unsigned FstHalf = 0, SndHalf = 0; 4106 for (unsigned i = 0; i < HalfSize; ++i) { 4107 if (SVOp->getMaskElt(i) > 0) { 4108 FstHalf = SVOp->getMaskElt(i)/HalfSize; 4109 break; 4110 } 4111 } 4112 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 4113 if (SVOp->getMaskElt(i) > 0) { 4114 SndHalf = SVOp->getMaskElt(i)/HalfSize; 4115 break; 4116 } 4117 } 4118 4119 return (FstHalf | (SndHalf << 4)); 4120} 4121 4122// Symetric in-lane mask. Each lane has 4 elements (for imm8) 4123static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) { 4124 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4125 if (EltSize < 32) 4126 return false; 4127 4128 unsigned NumElts = VT.getVectorNumElements(); 4129 Imm8 = 0; 4130 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) { 4131 for (unsigned i = 0; i != NumElts; ++i) { 4132 if (Mask[i] < 0) 4133 continue; 4134 Imm8 |= Mask[i] << (i*2); 4135 } 4136 return true; 4137 } 4138 4139 unsigned LaneSize = 4; 4140 SmallVector<int, 4> MaskVal(LaneSize, -1); 4141 4142 for (unsigned l = 0; l != NumElts; l += LaneSize) { 4143 for (unsigned i = 0; i != LaneSize; ++i) { 4144 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 4145 return false; 4146 if (Mask[i+l] < 0) 4147 continue; 4148 if (MaskVal[i] < 0) { 4149 MaskVal[i] = Mask[i+l] - l; 4150 Imm8 |= MaskVal[i] << (i*2); 4151 continue; 4152 } 4153 if (Mask[i+l] != (signed)(MaskVal[i]+l)) 4154 return false; 4155 } 4156 } 4157 return true; 4158} 4159 4160/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 4161/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 4162/// Note that VPERMIL mask matching is different depending whether theunderlying 4163/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 4164/// to the same elements of the low, but to the higher half of the source. 4165/// In VPERMILPD the two lanes could be shuffled independently of each other 4166/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 4167static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) { 4168 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4169 if (VT.getSizeInBits() < 256 || EltSize < 32) 4170 return false; 4171 bool symetricMaskRequired = (EltSize == 32); 4172 unsigned NumElts = VT.getVectorNumElements(); 4173 4174 unsigned NumLanes = VT.getSizeInBits()/128; 4175 unsigned LaneSize = NumElts/NumLanes; 4176 // 2 or 4 elements in one lane 4177 4178 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1); 4179 for (unsigned l = 0; l != NumElts; l += LaneSize) { 4180 for (unsigned i = 0; i != LaneSize; ++i) { 4181 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 4182 return false; 4183 if (symetricMaskRequired) { 4184 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) { 4185 ExpectedMaskVal[i] = Mask[i+l] - l; 4186 continue; 4187 } 4188 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l)) 4189 return false; 4190 } 4191 } 4192 } 4193 return true; 4194} 4195 4196/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 4197/// of what x86 movss want. X86 movs requires the lowest element to be lowest 4198/// element of vector 2 and the other elements to come from vector 1 in order. 4199static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT, 4200 bool V2IsSplat = false, bool V2IsUndef = false) { 4201 if (!VT.is128BitVector()) 4202 return false; 4203 4204 unsigned NumOps = VT.getVectorNumElements(); 4205 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 4206 return false; 4207 4208 if (!isUndefOrEqual(Mask[0], 0)) 4209 return false; 4210 4211 for (unsigned i = 1; i != NumOps; ++i) 4212 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 4213 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 4214 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 4215 return false; 4216 4217 return true; 4218} 4219 4220/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 4221/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 4222/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 4223static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT, 4224 const X86Subtarget *Subtarget) { 4225 if (!Subtarget->hasSSE3()) 4226 return false; 4227 4228 unsigned NumElems = VT.getVectorNumElements(); 4229 4230 if ((VT.is128BitVector() && NumElems != 4) || 4231 (VT.is256BitVector() && NumElems != 8) || 4232 (VT.is512BitVector() && NumElems != 16)) 4233 return false; 4234 4235 // "i+1" is the value the indexed mask element must have 4236 for (unsigned i = 0; i != NumElems; i += 2) 4237 if (!isUndefOrEqual(Mask[i], i+1) || 4238 !isUndefOrEqual(Mask[i+1], i+1)) 4239 return false; 4240 4241 return true; 4242} 4243 4244/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 4245/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 4246/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 4247static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT, 4248 const X86Subtarget *Subtarget) { 4249 if (!Subtarget->hasSSE3()) 4250 return false; 4251 4252 unsigned NumElems = VT.getVectorNumElements(); 4253 4254 if ((VT.is128BitVector() && NumElems != 4) || 4255 (VT.is256BitVector() && NumElems != 8) || 4256 (VT.is512BitVector() && NumElems != 16)) 4257 return false; 4258 4259 // "i" is the value the indexed mask element must have 4260 for (unsigned i = 0; i != NumElems; i += 2) 4261 if (!isUndefOrEqual(Mask[i], i) || 4262 !isUndefOrEqual(Mask[i+1], i)) 4263 return false; 4264 4265 return true; 4266} 4267 4268/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 4269/// specifies a shuffle of elements that is suitable for input to 256-bit 4270/// version of MOVDDUP. 4271static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) { 4272 if (!HasFp256 || !VT.is256BitVector()) 4273 return false; 4274 4275 unsigned NumElts = VT.getVectorNumElements(); 4276 if (NumElts != 4) 4277 return false; 4278 4279 for (unsigned i = 0; i != NumElts/2; ++i) 4280 if (!isUndefOrEqual(Mask[i], 0)) 4281 return false; 4282 for (unsigned i = NumElts/2; i != NumElts; ++i) 4283 if (!isUndefOrEqual(Mask[i], NumElts/2)) 4284 return false; 4285 return true; 4286} 4287 4288/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 4289/// specifies a shuffle of elements that is suitable for input to 128-bit 4290/// version of MOVDDUP. 4291static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) { 4292 if (!VT.is128BitVector()) 4293 return false; 4294 4295 unsigned e = VT.getVectorNumElements() / 2; 4296 for (unsigned i = 0; i != e; ++i) 4297 if (!isUndefOrEqual(Mask[i], i)) 4298 return false; 4299 for (unsigned i = 0; i != e; ++i) 4300 if (!isUndefOrEqual(Mask[e+i], i)) 4301 return false; 4302 return true; 4303} 4304 4305/// isVEXTRACTIndex - Return true if the specified 4306/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 4307/// suitable for instruction that extract 128 or 256 bit vectors 4308static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) { 4309 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width"); 4310 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4311 return false; 4312 4313 // The index should be aligned on a vecWidth-bit boundary. 4314 uint64_t Index = 4315 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4316 4317 MVT VT = N->getSimpleValueType(0); 4318 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); 4319 bool Result = (Index * ElSize) % vecWidth == 0; 4320 4321 return Result; 4322} 4323 4324/// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR 4325/// operand specifies a subvector insert that is suitable for input to 4326/// insertion of 128 or 256-bit subvectors 4327static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) { 4328 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width"); 4329 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4330 return false; 4331 // The index should be aligned on a vecWidth-bit boundary. 4332 uint64_t Index = 4333 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4334 4335 MVT VT = N->getSimpleValueType(0); 4336 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); 4337 bool Result = (Index * ElSize) % vecWidth == 0; 4338 4339 return Result; 4340} 4341 4342bool X86::isVINSERT128Index(SDNode *N) { 4343 return isVINSERTIndex(N, 128); 4344} 4345 4346bool X86::isVINSERT256Index(SDNode *N) { 4347 return isVINSERTIndex(N, 256); 4348} 4349 4350bool X86::isVEXTRACT128Index(SDNode *N) { 4351 return isVEXTRACTIndex(N, 128); 4352} 4353 4354bool X86::isVEXTRACT256Index(SDNode *N) { 4355 return isVEXTRACTIndex(N, 256); 4356} 4357 4358/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 4359/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 4360/// Handles 128-bit and 256-bit. 4361static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 4362 MVT VT = N->getSimpleValueType(0); 4363 4364 assert((VT.getSizeInBits() >= 128) && 4365 "Unsupported vector type for PSHUF/SHUFP"); 4366 4367 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 4368 // independently on 128-bit lanes. 4369 unsigned NumElts = VT.getVectorNumElements(); 4370 unsigned NumLanes = VT.getSizeInBits()/128; 4371 unsigned NumLaneElts = NumElts/NumLanes; 4372 4373 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) && 4374 "Only supports 2, 4 or 8 elements per lane"); 4375 4376 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0; 4377 unsigned Mask = 0; 4378 for (unsigned i = 0; i != NumElts; ++i) { 4379 int Elt = N->getMaskElt(i); 4380 if (Elt < 0) continue; 4381 Elt &= NumLaneElts - 1; 4382 unsigned ShAmt = (i << Shift) % 8; 4383 Mask |= Elt << ShAmt; 4384 } 4385 4386 return Mask; 4387} 4388 4389/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 4390/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 4391static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 4392 MVT VT = N->getSimpleValueType(0); 4393 4394 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4395 "Unsupported vector type for PSHUFHW"); 4396 4397 unsigned NumElts = VT.getVectorNumElements(); 4398 4399 unsigned Mask = 0; 4400 for (unsigned l = 0; l != NumElts; l += 8) { 4401 // 8 nodes per lane, but we only care about the last 4. 4402 for (unsigned i = 0; i < 4; ++i) { 4403 int Elt = N->getMaskElt(l+i+4); 4404 if (Elt < 0) continue; 4405 Elt &= 0x3; // only 2-bits. 4406 Mask |= Elt << (i * 2); 4407 } 4408 } 4409 4410 return Mask; 4411} 4412 4413/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4414/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4415static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4416 MVT VT = N->getSimpleValueType(0); 4417 4418 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4419 "Unsupported vector type for PSHUFHW"); 4420 4421 unsigned NumElts = VT.getVectorNumElements(); 4422 4423 unsigned Mask = 0; 4424 for (unsigned l = 0; l != NumElts; l += 8) { 4425 // 8 nodes per lane, but we only care about the first 4. 4426 for (unsigned i = 0; i < 4; ++i) { 4427 int Elt = N->getMaskElt(l+i); 4428 if (Elt < 0) continue; 4429 Elt &= 0x3; // only 2-bits 4430 Mask |= Elt << (i * 2); 4431 } 4432 } 4433 4434 return Mask; 4435} 4436 4437/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4438/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4439static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4440 MVT VT = SVOp->getSimpleValueType(0); 4441 unsigned EltSize = VT.is512BitVector() ? 1 : 4442 VT.getVectorElementType().getSizeInBits() >> 3; 4443 4444 unsigned NumElts = VT.getVectorNumElements(); 4445 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128; 4446 unsigned NumLaneElts = NumElts/NumLanes; 4447 4448 int Val = 0; 4449 unsigned i; 4450 for (i = 0; i != NumElts; ++i) { 4451 Val = SVOp->getMaskElt(i); 4452 if (Val >= 0) 4453 break; 4454 } 4455 if (Val >= (int)NumElts) 4456 Val -= NumElts - NumLaneElts; 4457 4458 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4459 return (Val - i) * EltSize; 4460} 4461 4462static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) { 4463 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width"); 4464 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4465 llvm_unreachable("Illegal extract subvector for VEXTRACT"); 4466 4467 uint64_t Index = 4468 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4469 4470 MVT VecVT = N->getOperand(0).getSimpleValueType(); 4471 MVT ElVT = VecVT.getVectorElementType(); 4472 4473 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits(); 4474 return Index / NumElemsPerChunk; 4475} 4476 4477static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) { 4478 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width"); 4479 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4480 llvm_unreachable("Illegal insert subvector for VINSERT"); 4481 4482 uint64_t Index = 4483 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4484 4485 MVT VecVT = N->getSimpleValueType(0); 4486 MVT ElVT = VecVT.getVectorElementType(); 4487 4488 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits(); 4489 return Index / NumElemsPerChunk; 4490} 4491 4492/// getExtractVEXTRACT128Immediate - Return the appropriate immediate 4493/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4494/// and VINSERTI128 instructions. 4495unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) { 4496 return getExtractVEXTRACTImmediate(N, 128); 4497} 4498 4499/// getExtractVEXTRACT256Immediate - Return the appropriate immediate 4500/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4 4501/// and VINSERTI64x4 instructions. 4502unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) { 4503 return getExtractVEXTRACTImmediate(N, 256); 4504} 4505 4506/// getInsertVINSERT128Immediate - Return the appropriate immediate 4507/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4508/// and VINSERTI128 instructions. 4509unsigned X86::getInsertVINSERT128Immediate(SDNode *N) { 4510 return getInsertVINSERTImmediate(N, 128); 4511} 4512 4513/// getInsertVINSERT256Immediate - Return the appropriate immediate 4514/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4 4515/// and VINSERTI64x4 instructions. 4516unsigned X86::getInsertVINSERT256Immediate(SDNode *N) { 4517 return getInsertVINSERTImmediate(N, 256); 4518} 4519 4520/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4521/// constant +0.0. 4522bool X86::isZeroNode(SDValue Elt) { 4523 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Elt)) 4524 return CN->isNullValue(); 4525 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt)) 4526 return CFP->getValueAPF().isPosZero(); 4527 return false; 4528} 4529 4530/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4531/// their permute mask. 4532static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4533 SelectionDAG &DAG) { 4534 MVT VT = SVOp->getSimpleValueType(0); 4535 unsigned NumElems = VT.getVectorNumElements(); 4536 SmallVector<int, 8> MaskVec; 4537 4538 for (unsigned i = 0; i != NumElems; ++i) { 4539 int Idx = SVOp->getMaskElt(i); 4540 if (Idx >= 0) { 4541 if (Idx < (int)NumElems) 4542 Idx += NumElems; 4543 else 4544 Idx -= NumElems; 4545 } 4546 MaskVec.push_back(Idx); 4547 } 4548 return DAG.getVectorShuffle(VT, SDLoc(SVOp), SVOp->getOperand(1), 4549 SVOp->getOperand(0), &MaskVec[0]); 4550} 4551 4552/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4553/// match movhlps. The lower half elements should come from upper half of 4554/// V1 (and in order), and the upper half elements should come from the upper 4555/// half of V2 (and in order). 4556static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) { 4557 if (!VT.is128BitVector()) 4558 return false; 4559 if (VT.getVectorNumElements() != 4) 4560 return false; 4561 for (unsigned i = 0, e = 2; i != e; ++i) 4562 if (!isUndefOrEqual(Mask[i], i+2)) 4563 return false; 4564 for (unsigned i = 2; i != 4; ++i) 4565 if (!isUndefOrEqual(Mask[i], i+4)) 4566 return false; 4567 return true; 4568} 4569 4570/// isScalarLoadToVector - Returns true if the node is a scalar load that 4571/// is promoted to a vector. It also returns the LoadSDNode by reference if 4572/// required. 4573static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4574 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4575 return false; 4576 N = N->getOperand(0).getNode(); 4577 if (!ISD::isNON_EXTLoad(N)) 4578 return false; 4579 if (LD) 4580 *LD = cast<LoadSDNode>(N); 4581 return true; 4582} 4583 4584// Test whether the given value is a vector value which will be legalized 4585// into a load. 4586static bool WillBeConstantPoolLoad(SDNode *N) { 4587 if (N->getOpcode() != ISD::BUILD_VECTOR) 4588 return false; 4589 4590 // Check for any non-constant elements. 4591 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4592 switch (N->getOperand(i).getNode()->getOpcode()) { 4593 case ISD::UNDEF: 4594 case ISD::ConstantFP: 4595 case ISD::Constant: 4596 break; 4597 default: 4598 return false; 4599 } 4600 4601 // Vectors of all-zeros and all-ones are materialized with special 4602 // instructions rather than being loaded. 4603 return !ISD::isBuildVectorAllZeros(N) && 4604 !ISD::isBuildVectorAllOnes(N); 4605} 4606 4607/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4608/// match movlp{s|d}. The lower half elements should come from lower half of 4609/// V1 (and in order), and the upper half elements should come from the upper 4610/// half of V2 (and in order). And since V1 will become the source of the 4611/// MOVLP, it must be either a vector load or a scalar load to vector. 4612static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4613 ArrayRef<int> Mask, MVT VT) { 4614 if (!VT.is128BitVector()) 4615 return false; 4616 4617 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4618 return false; 4619 // Is V2 is a vector load, don't do this transformation. We will try to use 4620 // load folding shufps op. 4621 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4622 return false; 4623 4624 unsigned NumElems = VT.getVectorNumElements(); 4625 4626 if (NumElems != 2 && NumElems != 4) 4627 return false; 4628 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4629 if (!isUndefOrEqual(Mask[i], i)) 4630 return false; 4631 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4632 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4633 return false; 4634 return true; 4635} 4636 4637/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4638/// all the same. 4639static bool isSplatVector(SDNode *N) { 4640 if (N->getOpcode() != ISD::BUILD_VECTOR) 4641 return false; 4642 4643 SDValue SplatValue = N->getOperand(0); 4644 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4645 if (N->getOperand(i) != SplatValue) 4646 return false; 4647 return true; 4648} 4649 4650/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4651/// to an zero vector. 4652/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4653static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4654 SDValue V1 = N->getOperand(0); 4655 SDValue V2 = N->getOperand(1); 4656 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4657 for (unsigned i = 0; i != NumElems; ++i) { 4658 int Idx = N->getMaskElt(i); 4659 if (Idx >= (int)NumElems) { 4660 unsigned Opc = V2.getOpcode(); 4661 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4662 continue; 4663 if (Opc != ISD::BUILD_VECTOR || 4664 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4665 return false; 4666 } else if (Idx >= 0) { 4667 unsigned Opc = V1.getOpcode(); 4668 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4669 continue; 4670 if (Opc != ISD::BUILD_VECTOR || 4671 !X86::isZeroNode(V1.getOperand(Idx))) 4672 return false; 4673 } 4674 } 4675 return true; 4676} 4677 4678/// getZeroVector - Returns a vector of specified type with all zero elements. 4679/// 4680static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4681 SelectionDAG &DAG, SDLoc dl) { 4682 assert(VT.isVector() && "Expected a vector type"); 4683 4684 // Always build SSE zero vectors as <4 x i32> bitcasted 4685 // to their dest type. This ensures they get CSE'd. 4686 SDValue Vec; 4687 if (VT.is128BitVector()) { // SSE 4688 if (Subtarget->hasSSE2()) { // SSE2 4689 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4690 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4691 } else { // SSE1 4692 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4693 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4694 } 4695 } else if (VT.is256BitVector()) { // AVX 4696 if (Subtarget->hasInt256()) { // AVX2 4697 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4698 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4699 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 4700 array_lengthof(Ops)); 4701 } else { 4702 // 256-bit logic and arithmetic instructions in AVX are all 4703 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4704 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4705 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4706 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 4707 array_lengthof(Ops)); 4708 } 4709 } else if (VT.is512BitVector()) { // AVX-512 4710 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4711 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, 4712 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4713 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops, 16); 4714 } else 4715 llvm_unreachable("Unexpected vector type"); 4716 4717 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4718} 4719 4720/// getOnesVector - Returns a vector of specified type with all bits set. 4721/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4722/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4723/// Then bitcast to their original type, ensuring they get CSE'd. 4724static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG, 4725 SDLoc dl) { 4726 assert(VT.isVector() && "Expected a vector type"); 4727 4728 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4729 SDValue Vec; 4730 if (VT.is256BitVector()) { 4731 if (HasInt256) { // AVX2 4732 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4733 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 4734 array_lengthof(Ops)); 4735 } else { // AVX 4736 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4737 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4738 } 4739 } else if (VT.is128BitVector()) { 4740 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4741 } else 4742 llvm_unreachable("Unexpected vector type"); 4743 4744 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4745} 4746 4747/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4748/// that point to V2 points to its first element. 4749static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4750 for (unsigned i = 0; i != NumElems; ++i) { 4751 if (Mask[i] > (int)NumElems) { 4752 Mask[i] = NumElems; 4753 } 4754 } 4755} 4756 4757/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4758/// operation of specified width. 4759static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1, 4760 SDValue V2) { 4761 unsigned NumElems = VT.getVectorNumElements(); 4762 SmallVector<int, 8> Mask; 4763 Mask.push_back(NumElems); 4764 for (unsigned i = 1; i != NumElems; ++i) 4765 Mask.push_back(i); 4766 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4767} 4768 4769/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4770static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1, 4771 SDValue V2) { 4772 unsigned NumElems = VT.getVectorNumElements(); 4773 SmallVector<int, 8> Mask; 4774 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4775 Mask.push_back(i); 4776 Mask.push_back(i + NumElems); 4777 } 4778 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4779} 4780 4781/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4782static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1, 4783 SDValue V2) { 4784 unsigned NumElems = VT.getVectorNumElements(); 4785 SmallVector<int, 8> Mask; 4786 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4787 Mask.push_back(i + Half); 4788 Mask.push_back(i + NumElems + Half); 4789 } 4790 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4791} 4792 4793// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4794// a generic shuffle instruction because the target has no such instructions. 4795// Generate shuffles which repeat i16 and i8 several times until they can be 4796// represented by v4f32 and then be manipulated by target suported shuffles. 4797static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4798 MVT VT = V.getSimpleValueType(); 4799 int NumElems = VT.getVectorNumElements(); 4800 SDLoc dl(V); 4801 4802 while (NumElems > 4) { 4803 if (EltNo < NumElems/2) { 4804 V = getUnpackl(DAG, dl, VT, V, V); 4805 } else { 4806 V = getUnpackh(DAG, dl, VT, V, V); 4807 EltNo -= NumElems/2; 4808 } 4809 NumElems >>= 1; 4810 } 4811 return V; 4812} 4813 4814/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4815static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4816 MVT VT = V.getSimpleValueType(); 4817 SDLoc dl(V); 4818 4819 if (VT.is128BitVector()) { 4820 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4821 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4822 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4823 &SplatMask[0]); 4824 } else if (VT.is256BitVector()) { 4825 // To use VPERMILPS to splat scalars, the second half of indicies must 4826 // refer to the higher part, which is a duplication of the lower one, 4827 // because VPERMILPS can only handle in-lane permutations. 4828 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4829 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4830 4831 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4832 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4833 &SplatMask[0]); 4834 } else 4835 llvm_unreachable("Vector size not supported"); 4836 4837 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4838} 4839 4840/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4841static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4842 MVT SrcVT = SV->getSimpleValueType(0); 4843 SDValue V1 = SV->getOperand(0); 4844 SDLoc dl(SV); 4845 4846 int EltNo = SV->getSplatIndex(); 4847 int NumElems = SrcVT.getVectorNumElements(); 4848 bool Is256BitVec = SrcVT.is256BitVector(); 4849 4850 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) && 4851 "Unknown how to promote splat for type"); 4852 4853 // Extract the 128-bit part containing the splat element and update 4854 // the splat element index when it refers to the higher register. 4855 if (Is256BitVec) { 4856 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4857 if (EltNo >= NumElems/2) 4858 EltNo -= NumElems/2; 4859 } 4860 4861 // All i16 and i8 vector types can't be used directly by a generic shuffle 4862 // instruction because the target has no such instruction. Generate shuffles 4863 // which repeat i16 and i8 several times until they fit in i32, and then can 4864 // be manipulated by target suported shuffles. 4865 MVT EltVT = SrcVT.getVectorElementType(); 4866 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4867 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4868 4869 // Recreate the 256-bit vector and place the same 128-bit vector 4870 // into the low and high part. This is necessary because we want 4871 // to use VPERM* to shuffle the vectors 4872 if (Is256BitVec) { 4873 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4874 } 4875 4876 return getLegalSplat(DAG, V1, EltNo); 4877} 4878 4879/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4880/// vector of zero or undef vector. This produces a shuffle where the low 4881/// element of V2 is swizzled into the zero/undef vector, landing at element 4882/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4883static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4884 bool IsZero, 4885 const X86Subtarget *Subtarget, 4886 SelectionDAG &DAG) { 4887 MVT VT = V2.getSimpleValueType(); 4888 SDValue V1 = IsZero 4889 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT); 4890 unsigned NumElems = VT.getVectorNumElements(); 4891 SmallVector<int, 16> MaskVec; 4892 for (unsigned i = 0; i != NumElems; ++i) 4893 // If this is the insertion idx, put the low elt of V2 here. 4894 MaskVec.push_back(i == Idx ? NumElems : i); 4895 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]); 4896} 4897 4898/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4899/// target specific opcode. Returns true if the Mask could be calculated. 4900/// Sets IsUnary to true if only uses one source. 4901static bool getTargetShuffleMask(SDNode *N, MVT VT, 4902 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4903 unsigned NumElems = VT.getVectorNumElements(); 4904 SDValue ImmN; 4905 4906 IsUnary = false; 4907 switch(N->getOpcode()) { 4908 case X86ISD::SHUFP: 4909 ImmN = N->getOperand(N->getNumOperands()-1); 4910 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4911 break; 4912 case X86ISD::UNPCKH: 4913 DecodeUNPCKHMask(VT, Mask); 4914 break; 4915 case X86ISD::UNPCKL: 4916 DecodeUNPCKLMask(VT, Mask); 4917 break; 4918 case X86ISD::MOVHLPS: 4919 DecodeMOVHLPSMask(NumElems, Mask); 4920 break; 4921 case X86ISD::MOVLHPS: 4922 DecodeMOVLHPSMask(NumElems, Mask); 4923 break; 4924 case X86ISD::PALIGNR: 4925 ImmN = N->getOperand(N->getNumOperands()-1); 4926 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4927 break; 4928 case X86ISD::PSHUFD: 4929 case X86ISD::VPERMILP: 4930 ImmN = N->getOperand(N->getNumOperands()-1); 4931 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4932 IsUnary = true; 4933 break; 4934 case X86ISD::PSHUFHW: 4935 ImmN = N->getOperand(N->getNumOperands()-1); 4936 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4937 IsUnary = true; 4938 break; 4939 case X86ISD::PSHUFLW: 4940 ImmN = N->getOperand(N->getNumOperands()-1); 4941 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4942 IsUnary = true; 4943 break; 4944 case X86ISD::VPERMI: 4945 ImmN = N->getOperand(N->getNumOperands()-1); 4946 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4947 IsUnary = true; 4948 break; 4949 case X86ISD::MOVSS: 4950 case X86ISD::MOVSD: { 4951 // The index 0 always comes from the first element of the second source, 4952 // this is why MOVSS and MOVSD are used in the first place. The other 4953 // elements come from the other positions of the first source vector 4954 Mask.push_back(NumElems); 4955 for (unsigned i = 1; i != NumElems; ++i) { 4956 Mask.push_back(i); 4957 } 4958 break; 4959 } 4960 case X86ISD::VPERM2X128: 4961 ImmN = N->getOperand(N->getNumOperands()-1); 4962 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4963 if (Mask.empty()) return false; 4964 break; 4965 case X86ISD::MOVDDUP: 4966 case X86ISD::MOVLHPD: 4967 case X86ISD::MOVLPD: 4968 case X86ISD::MOVLPS: 4969 case X86ISD::MOVSHDUP: 4970 case X86ISD::MOVSLDUP: 4971 // Not yet implemented 4972 return false; 4973 default: llvm_unreachable("unknown target shuffle node"); 4974 } 4975 4976 return true; 4977} 4978 4979/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4980/// element of the result of the vector shuffle. 4981static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4982 unsigned Depth) { 4983 if (Depth == 6) 4984 return SDValue(); // Limit search depth. 4985 4986 SDValue V = SDValue(N, 0); 4987 EVT VT = V.getValueType(); 4988 unsigned Opcode = V.getOpcode(); 4989 4990 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4991 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4992 int Elt = SV->getMaskElt(Index); 4993 4994 if (Elt < 0) 4995 return DAG.getUNDEF(VT.getVectorElementType()); 4996 4997 unsigned NumElems = VT.getVectorNumElements(); 4998 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4999 : SV->getOperand(1); 5000 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 5001 } 5002 5003 // Recurse into target specific vector shuffles to find scalars. 5004 if (isTargetShuffle(Opcode)) { 5005 MVT ShufVT = V.getSimpleValueType(); 5006 unsigned NumElems = ShufVT.getVectorNumElements(); 5007 SmallVector<int, 16> ShuffleMask; 5008 bool IsUnary; 5009 5010 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 5011 return SDValue(); 5012 5013 int Elt = ShuffleMask[Index]; 5014 if (Elt < 0) 5015 return DAG.getUNDEF(ShufVT.getVectorElementType()); 5016 5017 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 5018 : N->getOperand(1); 5019 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 5020 Depth+1); 5021 } 5022 5023 // Actual nodes that may contain scalar elements 5024 if (Opcode == ISD::BITCAST) { 5025 V = V.getOperand(0); 5026 EVT SrcVT = V.getValueType(); 5027 unsigned NumElems = VT.getVectorNumElements(); 5028 5029 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 5030 return SDValue(); 5031 } 5032 5033 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 5034 return (Index == 0) ? V.getOperand(0) 5035 : DAG.getUNDEF(VT.getVectorElementType()); 5036 5037 if (V.getOpcode() == ISD::BUILD_VECTOR) 5038 return V.getOperand(Index); 5039 5040 return SDValue(); 5041} 5042 5043/// getNumOfConsecutiveZeros - Return the number of elements of a vector 5044/// shuffle operation which come from a consecutively from a zero. The 5045/// search can start in two different directions, from left or right. 5046/// We count undefs as zeros until PreferredNum is reached. 5047static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, 5048 unsigned NumElems, bool ZerosFromLeft, 5049 SelectionDAG &DAG, 5050 unsigned PreferredNum = -1U) { 5051 unsigned NumZeros = 0; 5052 for (unsigned i = 0; i != NumElems; ++i) { 5053 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1; 5054 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 5055 if (!Elt.getNode()) 5056 break; 5057 5058 if (X86::isZeroNode(Elt)) 5059 ++NumZeros; 5060 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum. 5061 NumZeros = std::min(NumZeros + 1, PreferredNum); 5062 else 5063 break; 5064 } 5065 5066 return NumZeros; 5067} 5068 5069/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 5070/// correspond consecutively to elements from one of the vector operands, 5071/// starting from its index OpIdx. Also tell OpNum which source vector operand. 5072static 5073bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 5074 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 5075 unsigned NumElems, unsigned &OpNum) { 5076 bool SeenV1 = false; 5077 bool SeenV2 = false; 5078 5079 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 5080 int Idx = SVOp->getMaskElt(i); 5081 // Ignore undef indicies 5082 if (Idx < 0) 5083 continue; 5084 5085 if (Idx < (int)NumElems) 5086 SeenV1 = true; 5087 else 5088 SeenV2 = true; 5089 5090 // Only accept consecutive elements from the same vector 5091 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 5092 return false; 5093 } 5094 5095 OpNum = SeenV1 ? 0 : 1; 5096 return true; 5097} 5098 5099/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 5100/// logical left shift of a vector. 5101static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 5102 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 5103 unsigned NumElems = 5104 SVOp->getSimpleValueType(0).getVectorNumElements(); 5105 unsigned NumZeros = getNumOfConsecutiveZeros( 5106 SVOp, NumElems, false /* check zeros from right */, DAG, 5107 SVOp->getMaskElt(0)); 5108 unsigned OpSrc; 5109 5110 if (!NumZeros) 5111 return false; 5112 5113 // Considering the elements in the mask that are not consecutive zeros, 5114 // check if they consecutively come from only one of the source vectors. 5115 // 5116 // V1 = {X, A, B, C} 0 5117 // \ \ \ / 5118 // vector_shuffle V1, V2 <1, 2, 3, X> 5119 // 5120 if (!isShuffleMaskConsecutive(SVOp, 5121 0, // Mask Start Index 5122 NumElems-NumZeros, // Mask End Index(exclusive) 5123 NumZeros, // Where to start looking in the src vector 5124 NumElems, // Number of elements in vector 5125 OpSrc)) // Which source operand ? 5126 return false; 5127 5128 isLeft = false; 5129 ShAmt = NumZeros; 5130 ShVal = SVOp->getOperand(OpSrc); 5131 return true; 5132} 5133 5134/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 5135/// logical left shift of a vector. 5136static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 5137 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 5138 unsigned NumElems = 5139 SVOp->getSimpleValueType(0).getVectorNumElements(); 5140 unsigned NumZeros = getNumOfConsecutiveZeros( 5141 SVOp, NumElems, true /* check zeros from left */, DAG, 5142 NumElems - SVOp->getMaskElt(NumElems - 1) - 1); 5143 unsigned OpSrc; 5144 5145 if (!NumZeros) 5146 return false; 5147 5148 // Considering the elements in the mask that are not consecutive zeros, 5149 // check if they consecutively come from only one of the source vectors. 5150 // 5151 // 0 { A, B, X, X } = V2 5152 // / \ / / 5153 // vector_shuffle V1, V2 <X, X, 4, 5> 5154 // 5155 if (!isShuffleMaskConsecutive(SVOp, 5156 NumZeros, // Mask Start Index 5157 NumElems, // Mask End Index(exclusive) 5158 0, // Where to start looking in the src vector 5159 NumElems, // Number of elements in vector 5160 OpSrc)) // Which source operand ? 5161 return false; 5162 5163 isLeft = true; 5164 ShAmt = NumZeros; 5165 ShVal = SVOp->getOperand(OpSrc); 5166 return true; 5167} 5168 5169/// isVectorShift - Returns true if the shuffle can be implemented as a 5170/// logical left or right shift of a vector. 5171static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 5172 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 5173 // Although the logic below support any bitwidth size, there are no 5174 // shift instructions which handle more than 128-bit vectors. 5175 if (!SVOp->getSimpleValueType(0).is128BitVector()) 5176 return false; 5177 5178 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 5179 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 5180 return true; 5181 5182 return false; 5183} 5184 5185/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 5186/// 5187static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 5188 unsigned NumNonZero, unsigned NumZero, 5189 SelectionDAG &DAG, 5190 const X86Subtarget* Subtarget, 5191 const TargetLowering &TLI) { 5192 if (NumNonZero > 8) 5193 return SDValue(); 5194 5195 SDLoc dl(Op); 5196 SDValue V(0, 0); 5197 bool First = true; 5198 for (unsigned i = 0; i < 16; ++i) { 5199 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 5200 if (ThisIsNonZero && First) { 5201 if (NumZero) 5202 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 5203 else 5204 V = DAG.getUNDEF(MVT::v8i16); 5205 First = false; 5206 } 5207 5208 if ((i & 1) != 0) { 5209 SDValue ThisElt(0, 0), LastElt(0, 0); 5210 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 5211 if (LastIsNonZero) { 5212 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 5213 MVT::i16, Op.getOperand(i-1)); 5214 } 5215 if (ThisIsNonZero) { 5216 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 5217 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 5218 ThisElt, DAG.getConstant(8, MVT::i8)); 5219 if (LastIsNonZero) 5220 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 5221 } else 5222 ThisElt = LastElt; 5223 5224 if (ThisElt.getNode()) 5225 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 5226 DAG.getIntPtrConstant(i/2)); 5227 } 5228 } 5229 5230 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 5231} 5232 5233/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 5234/// 5235static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 5236 unsigned NumNonZero, unsigned NumZero, 5237 SelectionDAG &DAG, 5238 const X86Subtarget* Subtarget, 5239 const TargetLowering &TLI) { 5240 if (NumNonZero > 4) 5241 return SDValue(); 5242 5243 SDLoc dl(Op); 5244 SDValue V(0, 0); 5245 bool First = true; 5246 for (unsigned i = 0; i < 8; ++i) { 5247 bool isNonZero = (NonZeros & (1 << i)) != 0; 5248 if (isNonZero) { 5249 if (First) { 5250 if (NumZero) 5251 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 5252 else 5253 V = DAG.getUNDEF(MVT::v8i16); 5254 First = false; 5255 } 5256 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 5257 MVT::v8i16, V, Op.getOperand(i), 5258 DAG.getIntPtrConstant(i)); 5259 } 5260 } 5261 5262 return V; 5263} 5264 5265/// getVShift - Return a vector logical shift node. 5266/// 5267static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 5268 unsigned NumBits, SelectionDAG &DAG, 5269 const TargetLowering &TLI, SDLoc dl) { 5270 assert(VT.is128BitVector() && "Unknown type for VShift"); 5271 EVT ShVT = MVT::v2i64; 5272 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 5273 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 5274 return DAG.getNode(ISD::BITCAST, dl, VT, 5275 DAG.getNode(Opc, dl, ShVT, SrcOp, 5276 DAG.getConstant(NumBits, 5277 TLI.getScalarShiftAmountTy(SrcOp.getValueType())))); 5278} 5279 5280static SDValue 5281LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) { 5282 5283 // Check if the scalar load can be widened into a vector load. And if 5284 // the address is "base + cst" see if the cst can be "absorbed" into 5285 // the shuffle mask. 5286 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 5287 SDValue Ptr = LD->getBasePtr(); 5288 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 5289 return SDValue(); 5290 EVT PVT = LD->getValueType(0); 5291 if (PVT != MVT::i32 && PVT != MVT::f32) 5292 return SDValue(); 5293 5294 int FI = -1; 5295 int64_t Offset = 0; 5296 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 5297 FI = FINode->getIndex(); 5298 Offset = 0; 5299 } else if (DAG.isBaseWithConstantOffset(Ptr) && 5300 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 5301 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5302 Offset = Ptr.getConstantOperandVal(1); 5303 Ptr = Ptr.getOperand(0); 5304 } else { 5305 return SDValue(); 5306 } 5307 5308 // FIXME: 256-bit vector instructions don't require a strict alignment, 5309 // improve this code to support it better. 5310 unsigned RequiredAlign = VT.getSizeInBits()/8; 5311 SDValue Chain = LD->getChain(); 5312 // Make sure the stack object alignment is at least 16 or 32. 5313 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 5314 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 5315 if (MFI->isFixedObjectIndex(FI)) { 5316 // Can't change the alignment. FIXME: It's possible to compute 5317 // the exact stack offset and reference FI + adjust offset instead. 5318 // If someone *really* cares about this. That's the way to implement it. 5319 return SDValue(); 5320 } else { 5321 MFI->setObjectAlignment(FI, RequiredAlign); 5322 } 5323 } 5324 5325 // (Offset % 16 or 32) must be multiple of 4. Then address is then 5326 // Ptr + (Offset & ~15). 5327 if (Offset < 0) 5328 return SDValue(); 5329 if ((Offset % RequiredAlign) & 3) 5330 return SDValue(); 5331 int64_t StartOffset = Offset & ~(RequiredAlign-1); 5332 if (StartOffset) 5333 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(), 5334 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 5335 5336 int EltNo = (Offset - StartOffset) >> 2; 5337 unsigned NumElems = VT.getVectorNumElements(); 5338 5339 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 5340 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 5341 LD->getPointerInfo().getWithOffset(StartOffset), 5342 false, false, false, 0); 5343 5344 SmallVector<int, 8> Mask; 5345 for (unsigned i = 0; i != NumElems; ++i) 5346 Mask.push_back(EltNo); 5347 5348 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 5349 } 5350 5351 return SDValue(); 5352} 5353 5354/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 5355/// vector of type 'VT', see if the elements can be replaced by a single large 5356/// load which has the same value as a build_vector whose operands are 'elts'. 5357/// 5358/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 5359/// 5360/// FIXME: we'd also like to handle the case where the last elements are zero 5361/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 5362/// There's even a handy isZeroNode for that purpose. 5363static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 5364 SDLoc &DL, SelectionDAG &DAG) { 5365 EVT EltVT = VT.getVectorElementType(); 5366 unsigned NumElems = Elts.size(); 5367 5368 LoadSDNode *LDBase = NULL; 5369 unsigned LastLoadedElt = -1U; 5370 5371 // For each element in the initializer, see if we've found a load or an undef. 5372 // If we don't find an initial load element, or later load elements are 5373 // non-consecutive, bail out. 5374 for (unsigned i = 0; i < NumElems; ++i) { 5375 SDValue Elt = Elts[i]; 5376 5377 if (!Elt.getNode() || 5378 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 5379 return SDValue(); 5380 if (!LDBase) { 5381 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 5382 return SDValue(); 5383 LDBase = cast<LoadSDNode>(Elt.getNode()); 5384 LastLoadedElt = i; 5385 continue; 5386 } 5387 if (Elt.getOpcode() == ISD::UNDEF) 5388 continue; 5389 5390 LoadSDNode *LD = cast<LoadSDNode>(Elt); 5391 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 5392 return SDValue(); 5393 LastLoadedElt = i; 5394 } 5395 5396 // If we have found an entire vector of loads and undefs, then return a large 5397 // load of the entire vector width starting at the base pointer. If we found 5398 // consecutive loads for the low half, generate a vzext_load node. 5399 if (LastLoadedElt == NumElems - 1) { 5400 SDValue NewLd = SDValue(); 5401 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 5402 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5403 LDBase->getPointerInfo(), 5404 LDBase->isVolatile(), LDBase->isNonTemporal(), 5405 LDBase->isInvariant(), 0); 5406 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5407 LDBase->getPointerInfo(), 5408 LDBase->isVolatile(), LDBase->isNonTemporal(), 5409 LDBase->isInvariant(), LDBase->getAlignment()); 5410 5411 if (LDBase->hasAnyUseOfValue(1)) { 5412 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5413 SDValue(LDBase, 1), 5414 SDValue(NewLd.getNode(), 1)); 5415 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5416 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5417 SDValue(NewLd.getNode(), 1)); 5418 } 5419 5420 return NewLd; 5421 } 5422 if (NumElems == 4 && LastLoadedElt == 1 && 5423 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 5424 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 5425 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 5426 SDValue ResNode = 5427 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 5428 array_lengthof(Ops), MVT::i64, 5429 LDBase->getPointerInfo(), 5430 LDBase->getAlignment(), 5431 false/*isVolatile*/, true/*ReadMem*/, 5432 false/*WriteMem*/); 5433 5434 // Make sure the newly-created LOAD is in the same position as LDBase in 5435 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and 5436 // update uses of LDBase's output chain to use the TokenFactor. 5437 if (LDBase->hasAnyUseOfValue(1)) { 5438 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5439 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); 5440 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5441 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5442 SDValue(ResNode.getNode(), 1)); 5443 } 5444 5445 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 5446 } 5447 return SDValue(); 5448} 5449 5450/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 5451/// to generate a splat value for the following cases: 5452/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 5453/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 5454/// a scalar load, or a constant. 5455/// The VBROADCAST node is returned when a pattern is found, 5456/// or SDValue() otherwise. 5457static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget, 5458 SelectionDAG &DAG) { 5459 if (!Subtarget->hasFp256()) 5460 return SDValue(); 5461 5462 MVT VT = Op.getSimpleValueType(); 5463 SDLoc dl(Op); 5464 5465 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && 5466 "Unsupported vector type for broadcast."); 5467 5468 SDValue Ld; 5469 bool ConstSplatVal; 5470 5471 switch (Op.getOpcode()) { 5472 default: 5473 // Unknown pattern found. 5474 return SDValue(); 5475 5476 case ISD::BUILD_VECTOR: { 5477 // The BUILD_VECTOR node must be a splat. 5478 if (!isSplatVector(Op.getNode())) 5479 return SDValue(); 5480 5481 Ld = Op.getOperand(0); 5482 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5483 Ld.getOpcode() == ISD::ConstantFP); 5484 5485 // The suspected load node has several users. Make sure that all 5486 // of its users are from the BUILD_VECTOR node. 5487 // Constants may have multiple users. 5488 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5489 return SDValue(); 5490 break; 5491 } 5492 5493 case ISD::VECTOR_SHUFFLE: { 5494 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5495 5496 // Shuffles must have a splat mask where the first element is 5497 // broadcasted. 5498 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5499 return SDValue(); 5500 5501 SDValue Sc = Op.getOperand(0); 5502 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5503 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5504 5505 if (!Subtarget->hasInt256()) 5506 return SDValue(); 5507 5508 // Use the register form of the broadcast instruction available on AVX2. 5509 if (VT.getSizeInBits() >= 256) 5510 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5511 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5512 } 5513 5514 Ld = Sc.getOperand(0); 5515 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5516 Ld.getOpcode() == ISD::ConstantFP); 5517 5518 // The scalar_to_vector node and the suspected 5519 // load node must have exactly one user. 5520 // Constants may have multiple users. 5521 5522 // AVX-512 has register version of the broadcast 5523 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() && 5524 Ld.getValueType().getSizeInBits() >= 32; 5525 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) && 5526 !hasRegVer)) 5527 return SDValue(); 5528 break; 5529 } 5530 } 5531 5532 bool IsGE256 = (VT.getSizeInBits() >= 256); 5533 5534 // Handle the broadcasting a single constant scalar from the constant pool 5535 // into a vector. On Sandybridge it is still better to load a constant vector 5536 // from the constant pool and not to broadcast it from a scalar. 5537 if (ConstSplatVal && Subtarget->hasInt256()) { 5538 EVT CVT = Ld.getValueType(); 5539 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5540 unsigned ScalarSize = CVT.getSizeInBits(); 5541 5542 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)) { 5543 const Constant *C = 0; 5544 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5545 C = CI->getConstantIntValue(); 5546 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5547 C = CF->getConstantFPValue(); 5548 5549 assert(C && "Invalid constant type"); 5550 5551 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5552 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy()); 5553 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5554 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5555 MachinePointerInfo::getConstantPool(), 5556 false, false, false, Alignment); 5557 5558 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5559 } 5560 } 5561 5562 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5563 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5564 5565 // Handle AVX2 in-register broadcasts. 5566 if (!IsLoad && Subtarget->hasInt256() && 5567 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64))) 5568 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5569 5570 // The scalar source must be a normal load. 5571 if (!IsLoad) 5572 return SDValue(); 5573 5574 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)) 5575 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5576 5577 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5578 // double since there is no vbroadcastsd xmm 5579 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) { 5580 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5581 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5582 } 5583 5584 // Unsupported broadcast. 5585 return SDValue(); 5586} 5587 5588static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) { 5589 MVT VT = Op.getSimpleValueType(); 5590 5591 // Skip if insert_vec_elt is not supported. 5592 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5593 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) 5594 return SDValue(); 5595 5596 SDLoc DL(Op); 5597 unsigned NumElems = Op.getNumOperands(); 5598 5599 SDValue VecIn1; 5600 SDValue VecIn2; 5601 SmallVector<unsigned, 4> InsertIndices; 5602 SmallVector<int, 8> Mask(NumElems, -1); 5603 5604 for (unsigned i = 0; i != NumElems; ++i) { 5605 unsigned Opc = Op.getOperand(i).getOpcode(); 5606 5607 if (Opc == ISD::UNDEF) 5608 continue; 5609 5610 if (Opc != ISD::EXTRACT_VECTOR_ELT) { 5611 // Quit if more than 1 elements need inserting. 5612 if (InsertIndices.size() > 1) 5613 return SDValue(); 5614 5615 InsertIndices.push_back(i); 5616 continue; 5617 } 5618 5619 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0); 5620 SDValue ExtIdx = Op.getOperand(i).getOperand(1); 5621 5622 // Quit if extracted from vector of different type. 5623 if (ExtractedFromVec.getValueType() != VT) 5624 return SDValue(); 5625 5626 // Quit if non-constant index. 5627 if (!isa<ConstantSDNode>(ExtIdx)) 5628 return SDValue(); 5629 5630 if (VecIn1.getNode() == 0) 5631 VecIn1 = ExtractedFromVec; 5632 else if (VecIn1 != ExtractedFromVec) { 5633 if (VecIn2.getNode() == 0) 5634 VecIn2 = ExtractedFromVec; 5635 else if (VecIn2 != ExtractedFromVec) 5636 // Quit if more than 2 vectors to shuffle 5637 return SDValue(); 5638 } 5639 5640 unsigned Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue(); 5641 5642 if (ExtractedFromVec == VecIn1) 5643 Mask[i] = Idx; 5644 else if (ExtractedFromVec == VecIn2) 5645 Mask[i] = Idx + NumElems; 5646 } 5647 5648 if (VecIn1.getNode() == 0) 5649 return SDValue(); 5650 5651 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 5652 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]); 5653 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) { 5654 unsigned Idx = InsertIndices[i]; 5655 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), 5656 DAG.getIntPtrConstant(Idx)); 5657 } 5658 5659 return NV; 5660} 5661 5662// Lower BUILD_VECTOR operation for v8i1 and v16i1 types. 5663SDValue 5664X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const { 5665 5666 MVT VT = Op.getSimpleValueType(); 5667 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) && 5668 "Unexpected type in LowerBUILD_VECTORvXi1!"); 5669 5670 SDLoc dl(Op); 5671 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5672 SDValue Cst = DAG.getTargetConstant(0, MVT::i1); 5673 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, 5674 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 5675 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 5676 Ops, VT.getVectorNumElements()); 5677 } 5678 5679 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5680 SDValue Cst = DAG.getTargetConstant(1, MVT::i1); 5681 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst, 5682 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 5683 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 5684 Ops, VT.getVectorNumElements()); 5685 } 5686 5687 bool AllContants = true; 5688 uint64_t Immediate = 0; 5689 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) { 5690 SDValue In = Op.getOperand(idx); 5691 if (In.getOpcode() == ISD::UNDEF) 5692 continue; 5693 if (!isa<ConstantSDNode>(In)) { 5694 AllContants = false; 5695 break; 5696 } 5697 if (cast<ConstantSDNode>(In)->getZExtValue()) 5698 Immediate |= (1ULL << idx); 5699 } 5700 5701 if (AllContants) { 5702 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, 5703 DAG.getConstant(Immediate, MVT::i16)); 5704 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask, 5705 DAG.getIntPtrConstant(0)); 5706 } 5707 5708 // Splat vector (with undefs) 5709 SDValue In = Op.getOperand(0); 5710 for (unsigned i = 1, e = Op.getNumOperands(); i != e; ++i) { 5711 if (Op.getOperand(i) != In && Op.getOperand(i).getOpcode() != ISD::UNDEF) 5712 llvm_unreachable("Unsupported predicate operation"); 5713 } 5714 5715 SDValue EFLAGS, X86CC; 5716 if (In.getOpcode() == ISD::SETCC) { 5717 SDValue Op0 = In.getOperand(0); 5718 SDValue Op1 = In.getOperand(1); 5719 ISD::CondCode CC = cast<CondCodeSDNode>(In.getOperand(2))->get(); 5720 bool isFP = Op1.getValueType().isFloatingPoint(); 5721 unsigned X86CCVal = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 5722 5723 assert(X86CCVal != X86::COND_INVALID && "Unsupported predicate operation"); 5724 5725 X86CC = DAG.getConstant(X86CCVal, MVT::i8); 5726 EFLAGS = EmitCmp(Op0, Op1, X86CCVal, DAG); 5727 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 5728 } else if (In.getOpcode() == X86ISD::SETCC) { 5729 X86CC = In.getOperand(0); 5730 EFLAGS = In.getOperand(1); 5731 } else { 5732 // The algorithm: 5733 // Bit1 = In & 0x1 5734 // if (Bit1 != 0) 5735 // ZF = 0 5736 // else 5737 // ZF = 1 5738 // if (ZF == 0) 5739 // res = allOnes ### CMOVNE -1, %res 5740 // else 5741 // res = allZero 5742 MVT InVT = In.getSimpleValueType(); 5743 SDValue Bit1 = DAG.getNode(ISD::AND, dl, InVT, In, DAG.getConstant(1, InVT)); 5744 EFLAGS = EmitTest(Bit1, X86::COND_NE, DAG); 5745 X86CC = DAG.getConstant(X86::COND_NE, MVT::i8); 5746 } 5747 5748 if (VT == MVT::v16i1) { 5749 SDValue Cst1 = DAG.getConstant(-1, MVT::i16); 5750 SDValue Cst0 = DAG.getConstant(0, MVT::i16); 5751 SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i16, 5752 Cst0, Cst1, X86CC, EFLAGS); 5753 return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp); 5754 } 5755 5756 if (VT == MVT::v8i1) { 5757 SDValue Cst1 = DAG.getConstant(-1, MVT::i32); 5758 SDValue Cst0 = DAG.getConstant(0, MVT::i32); 5759 SDValue CmovOp = DAG.getNode(X86ISD::CMOV, dl, MVT::i32, 5760 Cst0, Cst1, X86CC, EFLAGS); 5761 CmovOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CmovOp); 5762 return DAG.getNode(ISD::BITCAST, dl, VT, CmovOp); 5763 } 5764 llvm_unreachable("Unsupported predicate operation"); 5765} 5766 5767SDValue 5768X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5769 SDLoc dl(Op); 5770 5771 MVT VT = Op.getSimpleValueType(); 5772 MVT ExtVT = VT.getVectorElementType(); 5773 unsigned NumElems = Op.getNumOperands(); 5774 5775 // Generate vectors for predicate vectors. 5776 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512()) 5777 return LowerBUILD_VECTORvXi1(Op, DAG); 5778 5779 // Vectors containing all zeros can be matched by pxor and xorps later 5780 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5781 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5782 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5783 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) 5784 return Op; 5785 5786 return getZeroVector(VT, Subtarget, DAG, dl); 5787 } 5788 5789 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5790 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5791 // vpcmpeqd on 256-bit vectors. 5792 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) { 5793 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256())) 5794 return Op; 5795 5796 if (!VT.is512BitVector()) 5797 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl); 5798 } 5799 5800 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG); 5801 if (Broadcast.getNode()) 5802 return Broadcast; 5803 5804 unsigned EVTBits = ExtVT.getSizeInBits(); 5805 5806 unsigned NumZero = 0; 5807 unsigned NumNonZero = 0; 5808 unsigned NonZeros = 0; 5809 bool IsAllConstants = true; 5810 SmallSet<SDValue, 8> Values; 5811 for (unsigned i = 0; i < NumElems; ++i) { 5812 SDValue Elt = Op.getOperand(i); 5813 if (Elt.getOpcode() == ISD::UNDEF) 5814 continue; 5815 Values.insert(Elt); 5816 if (Elt.getOpcode() != ISD::Constant && 5817 Elt.getOpcode() != ISD::ConstantFP) 5818 IsAllConstants = false; 5819 if (X86::isZeroNode(Elt)) 5820 NumZero++; 5821 else { 5822 NonZeros |= (1 << i); 5823 NumNonZero++; 5824 } 5825 } 5826 5827 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5828 if (NumNonZero == 0) 5829 return DAG.getUNDEF(VT); 5830 5831 // Special case for single non-zero, non-undef, element. 5832 if (NumNonZero == 1) { 5833 unsigned Idx = countTrailingZeros(NonZeros); 5834 SDValue Item = Op.getOperand(Idx); 5835 5836 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5837 // the value are obviously zero, truncate the value to i32 and do the 5838 // insertion that way. Only do this if the value is non-constant or if the 5839 // value is a constant being inserted into element 0. It is cheaper to do 5840 // a constant pool load than it is to do a movd + shuffle. 5841 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5842 (!IsAllConstants || Idx == 0)) { 5843 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5844 // Handle SSE only. 5845 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5846 EVT VecVT = MVT::v4i32; 5847 unsigned VecElts = 4; 5848 5849 // Truncate the value (which may itself be a constant) to i32, and 5850 // convert it to a vector with movd (S2V+shuffle to zero extend). 5851 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5852 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5853 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5854 5855 // Now we have our 32-bit value zero extended in the low element of 5856 // a vector. If Idx != 0, swizzle it into place. 5857 if (Idx != 0) { 5858 SmallVector<int, 4> Mask; 5859 Mask.push_back(Idx); 5860 for (unsigned i = 1; i != VecElts; ++i) 5861 Mask.push_back(i); 5862 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5863 &Mask[0]); 5864 } 5865 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5866 } 5867 } 5868 5869 // If we have a constant or non-constant insertion into the low element of 5870 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5871 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5872 // depending on what the source datatype is. 5873 if (Idx == 0) { 5874 if (NumZero == 0) 5875 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5876 5877 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5878 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5879 if (VT.is256BitVector() || VT.is512BitVector()) { 5880 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5881 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5882 Item, DAG.getIntPtrConstant(0)); 5883 } 5884 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5885 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5886 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5887 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5888 } 5889 5890 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5891 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5892 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5893 if (VT.is256BitVector()) { 5894 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5895 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5896 } else { 5897 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5898 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5899 } 5900 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5901 } 5902 } 5903 5904 // Is it a vector logical left shift? 5905 if (NumElems == 2 && Idx == 1 && 5906 X86::isZeroNode(Op.getOperand(0)) && 5907 !X86::isZeroNode(Op.getOperand(1))) { 5908 unsigned NumBits = VT.getSizeInBits(); 5909 return getVShift(true, VT, 5910 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5911 VT, Op.getOperand(1)), 5912 NumBits/2, DAG, *this, dl); 5913 } 5914 5915 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5916 return SDValue(); 5917 5918 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5919 // is a non-constant being inserted into an element other than the low one, 5920 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5921 // movd/movss) to move this into the low element, then shuffle it into 5922 // place. 5923 if (EVTBits == 32) { 5924 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5925 5926 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5927 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5928 SmallVector<int, 8> MaskVec; 5929 for (unsigned i = 0; i != NumElems; ++i) 5930 MaskVec.push_back(i == Idx ? 0 : 1); 5931 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5932 } 5933 } 5934 5935 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5936 if (Values.size() == 1) { 5937 if (EVTBits == 32) { 5938 // Instead of a shuffle like this: 5939 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5940 // Check if it's possible to issue this instead. 5941 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5942 unsigned Idx = countTrailingZeros(NonZeros); 5943 SDValue Item = Op.getOperand(Idx); 5944 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5945 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5946 } 5947 return SDValue(); 5948 } 5949 5950 // A vector full of immediates; various special cases are already 5951 // handled, so this is best done with a single constant-pool load. 5952 if (IsAllConstants) 5953 return SDValue(); 5954 5955 // For AVX-length vectors, build the individual 128-bit pieces and use 5956 // shuffles to put them in place. 5957 if (VT.is256BitVector()) { 5958 SmallVector<SDValue, 32> V; 5959 for (unsigned i = 0; i != NumElems; ++i) 5960 V.push_back(Op.getOperand(i)); 5961 5962 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5963 5964 // Build both the lower and upper subvector. 5965 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5966 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5967 NumElems/2); 5968 5969 // Recreate the wider vector with the lower and upper part. 5970 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5971 } 5972 5973 // Let legalizer expand 2-wide build_vectors. 5974 if (EVTBits == 64) { 5975 if (NumNonZero == 1) { 5976 // One half is zero or undef. 5977 unsigned Idx = countTrailingZeros(NonZeros); 5978 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5979 Op.getOperand(Idx)); 5980 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5981 } 5982 return SDValue(); 5983 } 5984 5985 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5986 if (EVTBits == 8 && NumElems == 16) { 5987 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5988 Subtarget, *this); 5989 if (V.getNode()) return V; 5990 } 5991 5992 if (EVTBits == 16 && NumElems == 8) { 5993 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5994 Subtarget, *this); 5995 if (V.getNode()) return V; 5996 } 5997 5998 // If element VT is == 32 bits, turn it into a number of shuffles. 5999 SmallVector<SDValue, 8> V(NumElems); 6000 if (NumElems == 4 && NumZero > 0) { 6001 for (unsigned i = 0; i < 4; ++i) { 6002 bool isZero = !(NonZeros & (1 << i)); 6003 if (isZero) 6004 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 6005 else 6006 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 6007 } 6008 6009 for (unsigned i = 0; i < 2; ++i) { 6010 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 6011 default: break; 6012 case 0: 6013 V[i] = V[i*2]; // Must be a zero vector. 6014 break; 6015 case 1: 6016 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 6017 break; 6018 case 2: 6019 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 6020 break; 6021 case 3: 6022 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 6023 break; 6024 } 6025 } 6026 6027 bool Reverse1 = (NonZeros & 0x3) == 2; 6028 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 6029 int MaskVec[] = { 6030 Reverse1 ? 1 : 0, 6031 Reverse1 ? 0 : 1, 6032 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 6033 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 6034 }; 6035 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 6036 } 6037 6038 if (Values.size() > 1 && VT.is128BitVector()) { 6039 // Check for a build vector of consecutive loads. 6040 for (unsigned i = 0; i < NumElems; ++i) 6041 V[i] = Op.getOperand(i); 6042 6043 // Check for elements which are consecutive loads. 6044 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 6045 if (LD.getNode()) 6046 return LD; 6047 6048 // Check for a build vector from mostly shuffle plus few inserting. 6049 SDValue Sh = buildFromShuffleMostly(Op, DAG); 6050 if (Sh.getNode()) 6051 return Sh; 6052 6053 // For SSE 4.1, use insertps to put the high elements into the low element. 6054 if (getSubtarget()->hasSSE41()) { 6055 SDValue Result; 6056 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 6057 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 6058 else 6059 Result = DAG.getUNDEF(VT); 6060 6061 for (unsigned i = 1; i < NumElems; ++i) { 6062 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 6063 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 6064 Op.getOperand(i), DAG.getIntPtrConstant(i)); 6065 } 6066 return Result; 6067 } 6068 6069 // Otherwise, expand into a number of unpckl*, start by extending each of 6070 // our (non-undef) elements to the full vector width with the element in the 6071 // bottom slot of the vector (which generates no code for SSE). 6072 for (unsigned i = 0; i < NumElems; ++i) { 6073 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 6074 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 6075 else 6076 V[i] = DAG.getUNDEF(VT); 6077 } 6078 6079 // Next, we iteratively mix elements, e.g. for v4f32: 6080 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 6081 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 6082 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 6083 unsigned EltStride = NumElems >> 1; 6084 while (EltStride != 0) { 6085 for (unsigned i = 0; i < EltStride; ++i) { 6086 // If V[i+EltStride] is undef and this is the first round of mixing, 6087 // then it is safe to just drop this shuffle: V[i] is already in the 6088 // right place, the one element (since it's the first round) being 6089 // inserted as undef can be dropped. This isn't safe for successive 6090 // rounds because they will permute elements within both vectors. 6091 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 6092 EltStride == NumElems/2) 6093 continue; 6094 6095 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 6096 } 6097 EltStride >>= 1; 6098 } 6099 return V[0]; 6100 } 6101 return SDValue(); 6102} 6103 6104// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 6105// to create 256-bit vectors from two other 128-bit ones. 6106static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 6107 SDLoc dl(Op); 6108 MVT ResVT = Op.getSimpleValueType(); 6109 6110 assert((ResVT.is256BitVector() || 6111 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide"); 6112 6113 SDValue V1 = Op.getOperand(0); 6114 SDValue V2 = Op.getOperand(1); 6115 unsigned NumElems = ResVT.getVectorNumElements(); 6116 if(ResVT.is256BitVector()) 6117 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 6118 6119 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 6120} 6121 6122static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 6123 assert(Op.getNumOperands() == 2); 6124 6125 // AVX/AVX-512 can use the vinsertf128 instruction to create 256-bit vectors 6126 // from two other 128-bit ones. 6127 return LowerAVXCONCAT_VECTORS(Op, DAG); 6128} 6129 6130// Try to lower a shuffle node into a simple blend instruction. 6131static SDValue 6132LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 6133 const X86Subtarget *Subtarget, SelectionDAG &DAG) { 6134 SDValue V1 = SVOp->getOperand(0); 6135 SDValue V2 = SVOp->getOperand(1); 6136 SDLoc dl(SVOp); 6137 MVT VT = SVOp->getSimpleValueType(0); 6138 MVT EltVT = VT.getVectorElementType(); 6139 unsigned NumElems = VT.getVectorNumElements(); 6140 6141 if (!Subtarget->hasSSE41() || EltVT == MVT::i8) 6142 return SDValue(); 6143 if (!Subtarget->hasInt256() && VT == MVT::v16i16) 6144 return SDValue(); 6145 6146 // Check the mask for BLEND and build the value. 6147 unsigned MaskValue = 0; 6148 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise. 6149 unsigned NumLanes = (NumElems-1)/8 + 1; 6150 unsigned NumElemsInLane = NumElems / NumLanes; 6151 6152 // Blend for v16i16 should be symetric for the both lanes. 6153 for (unsigned i = 0; i < NumElemsInLane; ++i) { 6154 6155 int SndLaneEltIdx = (NumLanes == 2) ? 6156 SVOp->getMaskElt(i + NumElemsInLane) : -1; 6157 int EltIdx = SVOp->getMaskElt(i); 6158 6159 if ((EltIdx < 0 || EltIdx == (int)i) && 6160 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane))) 6161 continue; 6162 6163 if (((unsigned)EltIdx == (i + NumElems)) && 6164 (SndLaneEltIdx < 0 || 6165 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane)) 6166 MaskValue |= (1<<i); 6167 else 6168 return SDValue(); 6169 } 6170 6171 // Convert i32 vectors to floating point if it is not AVX2. 6172 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors. 6173 MVT BlendVT = VT; 6174 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) { 6175 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()), 6176 NumElems); 6177 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1); 6178 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2); 6179 } 6180 6181 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2, 6182 DAG.getConstant(MaskValue, MVT::i32)); 6183 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 6184} 6185 6186// v8i16 shuffles - Prefer shuffles in the following order: 6187// 1. [all] pshuflw, pshufhw, optional move 6188// 2. [ssse3] 1 x pshufb 6189// 3. [ssse3] 2 x pshufb + 1 x por 6190// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 6191static SDValue 6192LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, 6193 SelectionDAG &DAG) { 6194 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6195 SDValue V1 = SVOp->getOperand(0); 6196 SDValue V2 = SVOp->getOperand(1); 6197 SDLoc dl(SVOp); 6198 SmallVector<int, 8> MaskVals; 6199 6200 // Determine if more than 1 of the words in each of the low and high quadwords 6201 // of the result come from the same quadword of one of the two inputs. Undef 6202 // mask values count as coming from any quadword, for better codegen. 6203 unsigned LoQuad[] = { 0, 0, 0, 0 }; 6204 unsigned HiQuad[] = { 0, 0, 0, 0 }; 6205 std::bitset<4> InputQuads; 6206 for (unsigned i = 0; i < 8; ++i) { 6207 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 6208 int EltIdx = SVOp->getMaskElt(i); 6209 MaskVals.push_back(EltIdx); 6210 if (EltIdx < 0) { 6211 ++Quad[0]; 6212 ++Quad[1]; 6213 ++Quad[2]; 6214 ++Quad[3]; 6215 continue; 6216 } 6217 ++Quad[EltIdx / 4]; 6218 InputQuads.set(EltIdx / 4); 6219 } 6220 6221 int BestLoQuad = -1; 6222 unsigned MaxQuad = 1; 6223 for (unsigned i = 0; i < 4; ++i) { 6224 if (LoQuad[i] > MaxQuad) { 6225 BestLoQuad = i; 6226 MaxQuad = LoQuad[i]; 6227 } 6228 } 6229 6230 int BestHiQuad = -1; 6231 MaxQuad = 1; 6232 for (unsigned i = 0; i < 4; ++i) { 6233 if (HiQuad[i] > MaxQuad) { 6234 BestHiQuad = i; 6235 MaxQuad = HiQuad[i]; 6236 } 6237 } 6238 6239 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 6240 // of the two input vectors, shuffle them into one input vector so only a 6241 // single pshufb instruction is necessary. If There are more than 2 input 6242 // quads, disable the next transformation since it does not help SSSE3. 6243 bool V1Used = InputQuads[0] || InputQuads[1]; 6244 bool V2Used = InputQuads[2] || InputQuads[3]; 6245 if (Subtarget->hasSSSE3()) { 6246 if (InputQuads.count() == 2 && V1Used && V2Used) { 6247 BestLoQuad = InputQuads[0] ? 0 : 1; 6248 BestHiQuad = InputQuads[2] ? 2 : 3; 6249 } 6250 if (InputQuads.count() > 2) { 6251 BestLoQuad = -1; 6252 BestHiQuad = -1; 6253 } 6254 } 6255 6256 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 6257 // the shuffle mask. If a quad is scored as -1, that means that it contains 6258 // words from all 4 input quadwords. 6259 SDValue NewV; 6260 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 6261 int MaskV[] = { 6262 BestLoQuad < 0 ? 0 : BestLoQuad, 6263 BestHiQuad < 0 ? 1 : BestHiQuad 6264 }; 6265 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 6266 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 6267 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 6268 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 6269 6270 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 6271 // source words for the shuffle, to aid later transformations. 6272 bool AllWordsInNewV = true; 6273 bool InOrder[2] = { true, true }; 6274 for (unsigned i = 0; i != 8; ++i) { 6275 int idx = MaskVals[i]; 6276 if (idx != (int)i) 6277 InOrder[i/4] = false; 6278 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 6279 continue; 6280 AllWordsInNewV = false; 6281 break; 6282 } 6283 6284 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 6285 if (AllWordsInNewV) { 6286 for (int i = 0; i != 8; ++i) { 6287 int idx = MaskVals[i]; 6288 if (idx < 0) 6289 continue; 6290 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 6291 if ((idx != i) && idx < 4) 6292 pshufhw = false; 6293 if ((idx != i) && idx > 3) 6294 pshuflw = false; 6295 } 6296 V1 = NewV; 6297 V2Used = false; 6298 BestLoQuad = 0; 6299 BestHiQuad = 1; 6300 } 6301 6302 // If we've eliminated the use of V2, and the new mask is a pshuflw or 6303 // pshufhw, that's as cheap as it gets. Return the new shuffle. 6304 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 6305 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 6306 unsigned TargetMask = 0; 6307 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 6308 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 6309 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 6310 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 6311 getShufflePSHUFLWImmediate(SVOp); 6312 V1 = NewV.getOperand(0); 6313 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 6314 } 6315 } 6316 6317 // Promote splats to a larger type which usually leads to more efficient code. 6318 // FIXME: Is this true if pshufb is available? 6319 if (SVOp->isSplat()) 6320 return PromoteSplat(SVOp, DAG); 6321 6322 // If we have SSSE3, and all words of the result are from 1 input vector, 6323 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 6324 // is present, fall back to case 4. 6325 if (Subtarget->hasSSSE3()) { 6326 SmallVector<SDValue,16> pshufbMask; 6327 6328 // If we have elements from both input vectors, set the high bit of the 6329 // shuffle mask element to zero out elements that come from V2 in the V1 6330 // mask, and elements that come from V1 in the V2 mask, so that the two 6331 // results can be OR'd together. 6332 bool TwoInputs = V1Used && V2Used; 6333 for (unsigned i = 0; i != 8; ++i) { 6334 int EltIdx = MaskVals[i] * 2; 6335 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 6336 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 6337 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 6338 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 6339 } 6340 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 6341 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 6342 DAG.getNode(ISD::BUILD_VECTOR, dl, 6343 MVT::v16i8, &pshufbMask[0], 16)); 6344 if (!TwoInputs) 6345 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 6346 6347 // Calculate the shuffle mask for the second input, shuffle it, and 6348 // OR it with the first shuffled input. 6349 pshufbMask.clear(); 6350 for (unsigned i = 0; i != 8; ++i) { 6351 int EltIdx = MaskVals[i] * 2; 6352 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 6353 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; 6354 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 6355 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 6356 } 6357 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 6358 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 6359 DAG.getNode(ISD::BUILD_VECTOR, dl, 6360 MVT::v16i8, &pshufbMask[0], 16)); 6361 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 6362 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 6363 } 6364 6365 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 6366 // and update MaskVals with new element order. 6367 std::bitset<8> InOrder; 6368 if (BestLoQuad >= 0) { 6369 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 6370 for (int i = 0; i != 4; ++i) { 6371 int idx = MaskVals[i]; 6372 if (idx < 0) { 6373 InOrder.set(i); 6374 } else if ((idx / 4) == BestLoQuad) { 6375 MaskV[i] = idx & 3; 6376 InOrder.set(i); 6377 } 6378 } 6379 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 6380 &MaskV[0]); 6381 6382 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 6383 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 6384 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 6385 NewV.getOperand(0), 6386 getShufflePSHUFLWImmediate(SVOp), DAG); 6387 } 6388 } 6389 6390 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 6391 // and update MaskVals with the new element order. 6392 if (BestHiQuad >= 0) { 6393 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 6394 for (unsigned i = 4; i != 8; ++i) { 6395 int idx = MaskVals[i]; 6396 if (idx < 0) { 6397 InOrder.set(i); 6398 } else if ((idx / 4) == BestHiQuad) { 6399 MaskV[i] = (idx & 3) + 4; 6400 InOrder.set(i); 6401 } 6402 } 6403 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 6404 &MaskV[0]); 6405 6406 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 6407 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 6408 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 6409 NewV.getOperand(0), 6410 getShufflePSHUFHWImmediate(SVOp), DAG); 6411 } 6412 } 6413 6414 // In case BestHi & BestLo were both -1, which means each quadword has a word 6415 // from each of the four input quadwords, calculate the InOrder bitvector now 6416 // before falling through to the insert/extract cleanup. 6417 if (BestLoQuad == -1 && BestHiQuad == -1) { 6418 NewV = V1; 6419 for (int i = 0; i != 8; ++i) 6420 if (MaskVals[i] < 0 || MaskVals[i] == i) 6421 InOrder.set(i); 6422 } 6423 6424 // The other elements are put in the right place using pextrw and pinsrw. 6425 for (unsigned i = 0; i != 8; ++i) { 6426 if (InOrder[i]) 6427 continue; 6428 int EltIdx = MaskVals[i]; 6429 if (EltIdx < 0) 6430 continue; 6431 SDValue ExtOp = (EltIdx < 8) ? 6432 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 6433 DAG.getIntPtrConstant(EltIdx)) : 6434 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 6435 DAG.getIntPtrConstant(EltIdx - 8)); 6436 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 6437 DAG.getIntPtrConstant(i)); 6438 } 6439 return NewV; 6440} 6441 6442// v16i8 shuffles - Prefer shuffles in the following order: 6443// 1. [ssse3] 1 x pshufb 6444// 2. [ssse3] 2 x pshufb + 1 x por 6445// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 6446static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 6447 const X86Subtarget* Subtarget, 6448 SelectionDAG &DAG) { 6449 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6450 SDValue V1 = SVOp->getOperand(0); 6451 SDValue V2 = SVOp->getOperand(1); 6452 SDLoc dl(SVOp); 6453 ArrayRef<int> MaskVals = SVOp->getMask(); 6454 6455 // Promote splats to a larger type which usually leads to more efficient code. 6456 // FIXME: Is this true if pshufb is available? 6457 if (SVOp->isSplat()) 6458 return PromoteSplat(SVOp, DAG); 6459 6460 // If we have SSSE3, case 1 is generated when all result bytes come from 6461 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 6462 // present, fall back to case 3. 6463 6464 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 6465 if (Subtarget->hasSSSE3()) { 6466 SmallVector<SDValue,16> pshufbMask; 6467 6468 // If all result elements are from one input vector, then only translate 6469 // undef mask values to 0x80 (zero out result) in the pshufb mask. 6470 // 6471 // Otherwise, we have elements from both input vectors, and must zero out 6472 // elements that come from V2 in the first mask, and V1 in the second mask 6473 // so that we can OR them together. 6474 for (unsigned i = 0; i != 16; ++i) { 6475 int EltIdx = MaskVals[i]; 6476 if (EltIdx < 0 || EltIdx >= 16) 6477 EltIdx = 0x80; 6478 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6479 } 6480 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 6481 DAG.getNode(ISD::BUILD_VECTOR, dl, 6482 MVT::v16i8, &pshufbMask[0], 16)); 6483 6484 // As PSHUFB will zero elements with negative indices, it's safe to ignore 6485 // the 2nd operand if it's undefined or zero. 6486 if (V2.getOpcode() == ISD::UNDEF || 6487 ISD::isBuildVectorAllZeros(V2.getNode())) 6488 return V1; 6489 6490 // Calculate the shuffle mask for the second input, shuffle it, and 6491 // OR it with the first shuffled input. 6492 pshufbMask.clear(); 6493 for (unsigned i = 0; i != 16; ++i) { 6494 int EltIdx = MaskVals[i]; 6495 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; 6496 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6497 } 6498 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 6499 DAG.getNode(ISD::BUILD_VECTOR, dl, 6500 MVT::v16i8, &pshufbMask[0], 16)); 6501 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 6502 } 6503 6504 // No SSSE3 - Calculate in place words and then fix all out of place words 6505 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 6506 // the 16 different words that comprise the two doublequadword input vectors. 6507 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 6508 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 6509 SDValue NewV = V1; 6510 for (int i = 0; i != 8; ++i) { 6511 int Elt0 = MaskVals[i*2]; 6512 int Elt1 = MaskVals[i*2+1]; 6513 6514 // This word of the result is all undef, skip it. 6515 if (Elt0 < 0 && Elt1 < 0) 6516 continue; 6517 6518 // This word of the result is already in the correct place, skip it. 6519 if ((Elt0 == i*2) && (Elt1 == i*2+1)) 6520 continue; 6521 6522 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 6523 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 6524 SDValue InsElt; 6525 6526 // If Elt0 and Elt1 are defined, are consecutive, and can be load 6527 // using a single extract together, load it and store it. 6528 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 6529 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6530 DAG.getIntPtrConstant(Elt1 / 2)); 6531 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6532 DAG.getIntPtrConstant(i)); 6533 continue; 6534 } 6535 6536 // If Elt1 is defined, extract it from the appropriate source. If the 6537 // source byte is not also odd, shift the extracted word left 8 bits 6538 // otherwise clear the bottom 8 bits if we need to do an or. 6539 if (Elt1 >= 0) { 6540 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6541 DAG.getIntPtrConstant(Elt1 / 2)); 6542 if ((Elt1 & 1) == 0) 6543 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 6544 DAG.getConstant(8, 6545 TLI.getShiftAmountTy(InsElt.getValueType()))); 6546 else if (Elt0 >= 0) 6547 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 6548 DAG.getConstant(0xFF00, MVT::i16)); 6549 } 6550 // If Elt0 is defined, extract it from the appropriate source. If the 6551 // source byte is not also even, shift the extracted word right 8 bits. If 6552 // Elt1 was also defined, OR the extracted values together before 6553 // inserting them in the result. 6554 if (Elt0 >= 0) { 6555 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 6556 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 6557 if ((Elt0 & 1) != 0) 6558 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 6559 DAG.getConstant(8, 6560 TLI.getShiftAmountTy(InsElt0.getValueType()))); 6561 else if (Elt1 >= 0) 6562 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 6563 DAG.getConstant(0x00FF, MVT::i16)); 6564 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 6565 : InsElt0; 6566 } 6567 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6568 DAG.getIntPtrConstant(i)); 6569 } 6570 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 6571} 6572 6573// v32i8 shuffles - Translate to VPSHUFB if possible. 6574static 6575SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, 6576 const X86Subtarget *Subtarget, 6577 SelectionDAG &DAG) { 6578 MVT VT = SVOp->getSimpleValueType(0); 6579 SDValue V1 = SVOp->getOperand(0); 6580 SDValue V2 = SVOp->getOperand(1); 6581 SDLoc dl(SVOp); 6582 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); 6583 6584 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6585 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode()); 6586 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode()); 6587 6588 // VPSHUFB may be generated if 6589 // (1) one of input vector is undefined or zeroinitializer. 6590 // The mask value 0x80 puts 0 in the corresponding slot of the vector. 6591 // And (2) the mask indexes don't cross the 128-bit lane. 6592 if (VT != MVT::v32i8 || !Subtarget->hasInt256() || 6593 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero)) 6594 return SDValue(); 6595 6596 if (V1IsAllZero && !V2IsAllZero) { 6597 CommuteVectorShuffleMask(MaskVals, 32); 6598 V1 = V2; 6599 } 6600 SmallVector<SDValue, 32> pshufbMask; 6601 for (unsigned i = 0; i != 32; i++) { 6602 int EltIdx = MaskVals[i]; 6603 if (EltIdx < 0 || EltIdx >= 32) 6604 EltIdx = 0x80; 6605 else { 6606 if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16)) 6607 // Cross lane is not allowed. 6608 return SDValue(); 6609 EltIdx &= 0xf; 6610 } 6611 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6612 } 6613 return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1, 6614 DAG.getNode(ISD::BUILD_VECTOR, dl, 6615 MVT::v32i8, &pshufbMask[0], 32)); 6616} 6617 6618/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 6619/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 6620/// done when every pair / quad of shuffle mask elements point to elements in 6621/// the right sequence. e.g. 6622/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 6623static 6624SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 6625 SelectionDAG &DAG) { 6626 MVT VT = SVOp->getSimpleValueType(0); 6627 SDLoc dl(SVOp); 6628 unsigned NumElems = VT.getVectorNumElements(); 6629 MVT NewVT; 6630 unsigned Scale; 6631 switch (VT.SimpleTy) { 6632 default: llvm_unreachable("Unexpected!"); 6633 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; 6634 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; 6635 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; 6636 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; 6637 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; 6638 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; 6639 } 6640 6641 SmallVector<int, 8> MaskVec; 6642 for (unsigned i = 0; i != NumElems; i += Scale) { 6643 int StartIdx = -1; 6644 for (unsigned j = 0; j != Scale; ++j) { 6645 int EltIdx = SVOp->getMaskElt(i+j); 6646 if (EltIdx < 0) 6647 continue; 6648 if (StartIdx < 0) 6649 StartIdx = (EltIdx / Scale); 6650 if (EltIdx != (int)(StartIdx*Scale + j)) 6651 return SDValue(); 6652 } 6653 MaskVec.push_back(StartIdx); 6654 } 6655 6656 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); 6657 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); 6658 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 6659} 6660 6661/// getVZextMovL - Return a zero-extending vector move low node. 6662/// 6663static SDValue getVZextMovL(MVT VT, MVT OpVT, 6664 SDValue SrcOp, SelectionDAG &DAG, 6665 const X86Subtarget *Subtarget, SDLoc dl) { 6666 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 6667 LoadSDNode *LD = NULL; 6668 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 6669 LD = dyn_cast<LoadSDNode>(SrcOp); 6670 if (!LD) { 6671 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 6672 // instead. 6673 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 6674 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 6675 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 6676 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 6677 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 6678 // PR2108 6679 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 6680 return DAG.getNode(ISD::BITCAST, dl, VT, 6681 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6682 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6683 OpVT, 6684 SrcOp.getOperand(0) 6685 .getOperand(0)))); 6686 } 6687 } 6688 } 6689 6690 return DAG.getNode(ISD::BITCAST, dl, VT, 6691 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6692 DAG.getNode(ISD::BITCAST, dl, 6693 OpVT, SrcOp))); 6694} 6695 6696/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 6697/// which could not be matched by any known target speficic shuffle 6698static SDValue 6699LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6700 6701 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); 6702 if (NewOp.getNode()) 6703 return NewOp; 6704 6705 MVT VT = SVOp->getSimpleValueType(0); 6706 6707 unsigned NumElems = VT.getVectorNumElements(); 6708 unsigned NumLaneElems = NumElems / 2; 6709 6710 SDLoc dl(SVOp); 6711 MVT EltVT = VT.getVectorElementType(); 6712 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); 6713 SDValue Output[2]; 6714 6715 SmallVector<int, 16> Mask; 6716 for (unsigned l = 0; l < 2; ++l) { 6717 // Build a shuffle mask for the output, discovering on the fly which 6718 // input vectors to use as shuffle operands (recorded in InputUsed). 6719 // If building a suitable shuffle vector proves too hard, then bail 6720 // out with UseBuildVector set. 6721 bool UseBuildVector = false; 6722 int InputUsed[2] = { -1, -1 }; // Not yet discovered. 6723 unsigned LaneStart = l * NumLaneElems; 6724 for (unsigned i = 0; i != NumLaneElems; ++i) { 6725 // The mask element. This indexes into the input. 6726 int Idx = SVOp->getMaskElt(i+LaneStart); 6727 if (Idx < 0) { 6728 // the mask element does not index into any input vector. 6729 Mask.push_back(-1); 6730 continue; 6731 } 6732 6733 // The input vector this mask element indexes into. 6734 int Input = Idx / NumLaneElems; 6735 6736 // Turn the index into an offset from the start of the input vector. 6737 Idx -= Input * NumLaneElems; 6738 6739 // Find or create a shuffle vector operand to hold this input. 6740 unsigned OpNo; 6741 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { 6742 if (InputUsed[OpNo] == Input) 6743 // This input vector is already an operand. 6744 break; 6745 if (InputUsed[OpNo] < 0) { 6746 // Create a new operand for this input vector. 6747 InputUsed[OpNo] = Input; 6748 break; 6749 } 6750 } 6751 6752 if (OpNo >= array_lengthof(InputUsed)) { 6753 // More than two input vectors used! Give up on trying to create a 6754 // shuffle vector. Insert all elements into a BUILD_VECTOR instead. 6755 UseBuildVector = true; 6756 break; 6757 } 6758 6759 // Add the mask index for the new shuffle vector. 6760 Mask.push_back(Idx + OpNo * NumLaneElems); 6761 } 6762 6763 if (UseBuildVector) { 6764 SmallVector<SDValue, 16> SVOps; 6765 for (unsigned i = 0; i != NumLaneElems; ++i) { 6766 // The mask element. This indexes into the input. 6767 int Idx = SVOp->getMaskElt(i+LaneStart); 6768 if (Idx < 0) { 6769 SVOps.push_back(DAG.getUNDEF(EltVT)); 6770 continue; 6771 } 6772 6773 // The input vector this mask element indexes into. 6774 int Input = Idx / NumElems; 6775 6776 // Turn the index into an offset from the start of the input vector. 6777 Idx -= Input * NumElems; 6778 6779 // Extract the vector element by hand. 6780 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6781 SVOp->getOperand(Input), 6782 DAG.getIntPtrConstant(Idx))); 6783 } 6784 6785 // Construct the output using a BUILD_VECTOR. 6786 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], 6787 SVOps.size()); 6788 } else if (InputUsed[0] < 0) { 6789 // No input vectors were used! The result is undefined. 6790 Output[l] = DAG.getUNDEF(NVT); 6791 } else { 6792 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), 6793 (InputUsed[0] % 2) * NumLaneElems, 6794 DAG, dl); 6795 // If only one input was used, use an undefined vector for the other. 6796 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : 6797 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), 6798 (InputUsed[1] % 2) * NumLaneElems, DAG, dl); 6799 // At least one input vector was used. Create a new shuffle vector. 6800 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); 6801 } 6802 6803 Mask.clear(); 6804 } 6805 6806 // Concatenate the result back 6807 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); 6808} 6809 6810/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6811/// 4 elements, and match them with several different shuffle types. 6812static SDValue 6813LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6814 SDValue V1 = SVOp->getOperand(0); 6815 SDValue V2 = SVOp->getOperand(1); 6816 SDLoc dl(SVOp); 6817 MVT VT = SVOp->getSimpleValueType(0); 6818 6819 assert(VT.is128BitVector() && "Unsupported vector size"); 6820 6821 std::pair<int, int> Locs[4]; 6822 int Mask1[] = { -1, -1, -1, -1 }; 6823 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6824 6825 unsigned NumHi = 0; 6826 unsigned NumLo = 0; 6827 for (unsigned i = 0; i != 4; ++i) { 6828 int Idx = PermMask[i]; 6829 if (Idx < 0) { 6830 Locs[i] = std::make_pair(-1, -1); 6831 } else { 6832 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6833 if (Idx < 4) { 6834 Locs[i] = std::make_pair(0, NumLo); 6835 Mask1[NumLo] = Idx; 6836 NumLo++; 6837 } else { 6838 Locs[i] = std::make_pair(1, NumHi); 6839 if (2+NumHi < 4) 6840 Mask1[2+NumHi] = Idx; 6841 NumHi++; 6842 } 6843 } 6844 } 6845 6846 if (NumLo <= 2 && NumHi <= 2) { 6847 // If no more than two elements come from either vector. This can be 6848 // implemented with two shuffles. First shuffle gather the elements. 6849 // The second shuffle, which takes the first shuffle as both of its 6850 // vector operands, put the elements into the right order. 6851 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6852 6853 int Mask2[] = { -1, -1, -1, -1 }; 6854 6855 for (unsigned i = 0; i != 4; ++i) 6856 if (Locs[i].first != -1) { 6857 unsigned Idx = (i < 2) ? 0 : 4; 6858 Idx += Locs[i].first * 2 + Locs[i].second; 6859 Mask2[i] = Idx; 6860 } 6861 6862 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6863 } 6864 6865 if (NumLo == 3 || NumHi == 3) { 6866 // Otherwise, we must have three elements from one vector, call it X, and 6867 // one element from the other, call it Y. First, use a shufps to build an 6868 // intermediate vector with the one element from Y and the element from X 6869 // that will be in the same half in the final destination (the indexes don't 6870 // matter). Then, use a shufps to build the final vector, taking the half 6871 // containing the element from Y from the intermediate, and the other half 6872 // from X. 6873 if (NumHi == 3) { 6874 // Normalize it so the 3 elements come from V1. 6875 CommuteVectorShuffleMask(PermMask, 4); 6876 std::swap(V1, V2); 6877 } 6878 6879 // Find the element from V2. 6880 unsigned HiIndex; 6881 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6882 int Val = PermMask[HiIndex]; 6883 if (Val < 0) 6884 continue; 6885 if (Val >= 4) 6886 break; 6887 } 6888 6889 Mask1[0] = PermMask[HiIndex]; 6890 Mask1[1] = -1; 6891 Mask1[2] = PermMask[HiIndex^1]; 6892 Mask1[3] = -1; 6893 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6894 6895 if (HiIndex >= 2) { 6896 Mask1[0] = PermMask[0]; 6897 Mask1[1] = PermMask[1]; 6898 Mask1[2] = HiIndex & 1 ? 6 : 4; 6899 Mask1[3] = HiIndex & 1 ? 4 : 6; 6900 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6901 } 6902 6903 Mask1[0] = HiIndex & 1 ? 2 : 0; 6904 Mask1[1] = HiIndex & 1 ? 0 : 2; 6905 Mask1[2] = PermMask[2]; 6906 Mask1[3] = PermMask[3]; 6907 if (Mask1[2] >= 0) 6908 Mask1[2] += 4; 6909 if (Mask1[3] >= 0) 6910 Mask1[3] += 4; 6911 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6912 } 6913 6914 // Break it into (shuffle shuffle_hi, shuffle_lo). 6915 int LoMask[] = { -1, -1, -1, -1 }; 6916 int HiMask[] = { -1, -1, -1, -1 }; 6917 6918 int *MaskPtr = LoMask; 6919 unsigned MaskIdx = 0; 6920 unsigned LoIdx = 0; 6921 unsigned HiIdx = 2; 6922 for (unsigned i = 0; i != 4; ++i) { 6923 if (i == 2) { 6924 MaskPtr = HiMask; 6925 MaskIdx = 1; 6926 LoIdx = 0; 6927 HiIdx = 2; 6928 } 6929 int Idx = PermMask[i]; 6930 if (Idx < 0) { 6931 Locs[i] = std::make_pair(-1, -1); 6932 } else if (Idx < 4) { 6933 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6934 MaskPtr[LoIdx] = Idx; 6935 LoIdx++; 6936 } else { 6937 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6938 MaskPtr[HiIdx] = Idx; 6939 HiIdx++; 6940 } 6941 } 6942 6943 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6944 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6945 int MaskOps[] = { -1, -1, -1, -1 }; 6946 for (unsigned i = 0; i != 4; ++i) 6947 if (Locs[i].first != -1) 6948 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6949 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6950} 6951 6952static bool MayFoldVectorLoad(SDValue V) { 6953 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6954 V = V.getOperand(0); 6955 6956 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6957 V = V.getOperand(0); 6958 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6959 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6960 // BUILD_VECTOR (load), undef 6961 V = V.getOperand(0); 6962 6963 return MayFoldLoad(V); 6964} 6965 6966static 6967SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) { 6968 MVT VT = Op.getSimpleValueType(); 6969 6970 // Canonizalize to v2f64. 6971 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6972 return DAG.getNode(ISD::BITCAST, dl, VT, 6973 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6974 V1, DAG)); 6975} 6976 6977static 6978SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, 6979 bool HasSSE2) { 6980 SDValue V1 = Op.getOperand(0); 6981 SDValue V2 = Op.getOperand(1); 6982 MVT VT = Op.getSimpleValueType(); 6983 6984 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6985 6986 if (HasSSE2 && VT == MVT::v2f64) 6987 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6988 6989 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6990 return DAG.getNode(ISD::BITCAST, dl, VT, 6991 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6992 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6993 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6994} 6995 6996static 6997SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) { 6998 SDValue V1 = Op.getOperand(0); 6999 SDValue V2 = Op.getOperand(1); 7000 MVT VT = Op.getSimpleValueType(); 7001 7002 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 7003 "unsupported shuffle type"); 7004 7005 if (V2.getOpcode() == ISD::UNDEF) 7006 V2 = V1; 7007 7008 // v4i32 or v4f32 7009 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 7010} 7011 7012static 7013SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 7014 SDValue V1 = Op.getOperand(0); 7015 SDValue V2 = Op.getOperand(1); 7016 MVT VT = Op.getSimpleValueType(); 7017 unsigned NumElems = VT.getVectorNumElements(); 7018 7019 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 7020 // operand of these instructions is only memory, so check if there's a 7021 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 7022 // same masks. 7023 bool CanFoldLoad = false; 7024 7025 // Trivial case, when V2 comes from a load. 7026 if (MayFoldVectorLoad(V2)) 7027 CanFoldLoad = true; 7028 7029 // When V1 is a load, it can be folded later into a store in isel, example: 7030 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 7031 // turns into: 7032 // (MOVLPSmr addr:$src1, VR128:$src2) 7033 // So, recognize this potential and also use MOVLPS or MOVLPD 7034 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 7035 CanFoldLoad = true; 7036 7037 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7038 if (CanFoldLoad) { 7039 if (HasSSE2 && NumElems == 2) 7040 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 7041 7042 if (NumElems == 4) 7043 // If we don't care about the second element, proceed to use movss. 7044 if (SVOp->getMaskElt(1) != -1) 7045 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 7046 } 7047 7048 // movl and movlp will both match v2i64, but v2i64 is never matched by 7049 // movl earlier because we make it strict to avoid messing with the movlp load 7050 // folding logic (see the code above getMOVLP call). Match it here then, 7051 // this is horrible, but will stay like this until we move all shuffle 7052 // matching to x86 specific nodes. Note that for the 1st condition all 7053 // types are matched with movsd. 7054 if (HasSSE2) { 7055 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 7056 // as to remove this logic from here, as much as possible 7057 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT)) 7058 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 7059 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 7060 } 7061 7062 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 7063 7064 // Invert the operand order and use SHUFPS to match it. 7065 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 7066 getShuffleSHUFImmediate(SVOp), DAG); 7067} 7068 7069// Reduce a vector shuffle to zext. 7070static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget, 7071 SelectionDAG &DAG) { 7072 // PMOVZX is only available from SSE41. 7073 if (!Subtarget->hasSSE41()) 7074 return SDValue(); 7075 7076 MVT VT = Op.getSimpleValueType(); 7077 7078 // Only AVX2 support 256-bit vector integer extending. 7079 if (!Subtarget->hasInt256() && VT.is256BitVector()) 7080 return SDValue(); 7081 7082 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7083 SDLoc DL(Op); 7084 SDValue V1 = Op.getOperand(0); 7085 SDValue V2 = Op.getOperand(1); 7086 unsigned NumElems = VT.getVectorNumElements(); 7087 7088 // Extending is an unary operation and the element type of the source vector 7089 // won't be equal to or larger than i64. 7090 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() || 7091 VT.getVectorElementType() == MVT::i64) 7092 return SDValue(); 7093 7094 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4. 7095 unsigned Shift = 1; // Start from 2, i.e. 1 << 1. 7096 while ((1U << Shift) < NumElems) { 7097 if (SVOp->getMaskElt(1U << Shift) == 1) 7098 break; 7099 Shift += 1; 7100 // The maximal ratio is 8, i.e. from i8 to i64. 7101 if (Shift > 3) 7102 return SDValue(); 7103 } 7104 7105 // Check the shuffle mask. 7106 unsigned Mask = (1U << Shift) - 1; 7107 for (unsigned i = 0; i != NumElems; ++i) { 7108 int EltIdx = SVOp->getMaskElt(i); 7109 if ((i & Mask) != 0 && EltIdx != -1) 7110 return SDValue(); 7111 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift)) 7112 return SDValue(); 7113 } 7114 7115 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift; 7116 MVT NeVT = MVT::getIntegerVT(NBits); 7117 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift); 7118 7119 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT)) 7120 return SDValue(); 7121 7122 // Simplify the operand as it's prepared to be fed into shuffle. 7123 unsigned SignificantBits = NVT.getSizeInBits() >> Shift; 7124 if (V1.getOpcode() == ISD::BITCAST && 7125 V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 7126 V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7127 V1.getOperand(0).getOperand(0) 7128 .getSimpleValueType().getSizeInBits() == SignificantBits) { 7129 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x) 7130 SDValue V = V1.getOperand(0).getOperand(0).getOperand(0); 7131 ConstantSDNode *CIdx = 7132 dyn_cast<ConstantSDNode>(V1.getOperand(0).getOperand(0).getOperand(1)); 7133 // If it's foldable, i.e. normal load with single use, we will let code 7134 // selection to fold it. Otherwise, we will short the conversion sequence. 7135 if (CIdx && CIdx->getZExtValue() == 0 && 7136 (!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse())) { 7137 MVT FullVT = V.getSimpleValueType(); 7138 MVT V1VT = V1.getSimpleValueType(); 7139 if (FullVT.getSizeInBits() > V1VT.getSizeInBits()) { 7140 // The "ext_vec_elt" node is wider than the result node. 7141 // In this case we should extract subvector from V. 7142 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast (extract_subvector x)). 7143 unsigned Ratio = FullVT.getSizeInBits() / V1VT.getSizeInBits(); 7144 MVT SubVecVT = MVT::getVectorVT(FullVT.getVectorElementType(), 7145 FullVT.getVectorNumElements()/Ratio); 7146 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, V, 7147 DAG.getIntPtrConstant(0)); 7148 } 7149 V1 = DAG.getNode(ISD::BITCAST, DL, V1VT, V); 7150 } 7151 } 7152 7153 return DAG.getNode(ISD::BITCAST, DL, VT, 7154 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1)); 7155} 7156 7157static SDValue 7158NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget, 7159 SelectionDAG &DAG) { 7160 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7161 MVT VT = Op.getSimpleValueType(); 7162 SDLoc dl(Op); 7163 SDValue V1 = Op.getOperand(0); 7164 SDValue V2 = Op.getOperand(1); 7165 7166 if (isZeroShuffle(SVOp)) 7167 return getZeroVector(VT, Subtarget, DAG, dl); 7168 7169 // Handle splat operations 7170 if (SVOp->isSplat()) { 7171 // Use vbroadcast whenever the splat comes from a foldable load 7172 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG); 7173 if (Broadcast.getNode()) 7174 return Broadcast; 7175 } 7176 7177 // Check integer expanding shuffles. 7178 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG); 7179 if (NewOp.getNode()) 7180 return NewOp; 7181 7182 // If the shuffle can be profitably rewritten as a narrower shuffle, then 7183 // do it! 7184 if (VT == MVT::v8i16 || VT == MVT::v16i8 || 7185 VT == MVT::v16i16 || VT == MVT::v32i8) { 7186 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 7187 if (NewOp.getNode()) 7188 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 7189 } else if ((VT == MVT::v4i32 || 7190 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 7191 // FIXME: Figure out a cleaner way to do this. 7192 // Try to make use of movq to zero out the top part. 7193 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 7194 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 7195 if (NewOp.getNode()) { 7196 MVT NewVT = NewOp.getSimpleValueType(); 7197 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), 7198 NewVT, true, false)) 7199 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), 7200 DAG, Subtarget, dl); 7201 } 7202 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 7203 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 7204 if (NewOp.getNode()) { 7205 MVT NewVT = NewOp.getSimpleValueType(); 7206 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT)) 7207 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), 7208 DAG, Subtarget, dl); 7209 } 7210 } 7211 } 7212 return SDValue(); 7213} 7214 7215SDValue 7216X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 7217 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 7218 SDValue V1 = Op.getOperand(0); 7219 SDValue V2 = Op.getOperand(1); 7220 MVT VT = Op.getSimpleValueType(); 7221 SDLoc dl(Op); 7222 unsigned NumElems = VT.getVectorNumElements(); 7223 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 7224 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 7225 bool V1IsSplat = false; 7226 bool V2IsSplat = false; 7227 bool HasSSE2 = Subtarget->hasSSE2(); 7228 bool HasFp256 = Subtarget->hasFp256(); 7229 bool HasInt256 = Subtarget->hasInt256(); 7230 MachineFunction &MF = DAG.getMachineFunction(); 7231 bool OptForSize = MF.getFunction()->getAttributes(). 7232 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 7233 7234 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 7235 7236 if (V1IsUndef && V2IsUndef) 7237 return DAG.getUNDEF(VT); 7238 7239 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 7240 7241 // Vector shuffle lowering takes 3 steps: 7242 // 7243 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 7244 // narrowing and commutation of operands should be handled. 7245 // 2) Matching of shuffles with known shuffle masks to x86 target specific 7246 // shuffle nodes. 7247 // 3) Rewriting of unmatched masks into new generic shuffle operations, 7248 // so the shuffle can be broken into other shuffles and the legalizer can 7249 // try the lowering again. 7250 // 7251 // The general idea is that no vector_shuffle operation should be left to 7252 // be matched during isel, all of them must be converted to a target specific 7253 // node here. 7254 7255 // Normalize the input vectors. Here splats, zeroed vectors, profitable 7256 // narrowing and commutation of operands should be handled. The actual code 7257 // doesn't include all of those, work in progress... 7258 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG); 7259 if (NewOp.getNode()) 7260 return NewOp; 7261 7262 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end()); 7263 7264 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 7265 // unpckh_undef). Only use pshufd if speed is more important than size. 7266 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256)) 7267 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 7268 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256)) 7269 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 7270 7271 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() && 7272 V2IsUndef && MayFoldVectorLoad(V1)) 7273 return getMOVDDup(Op, dl, V1, DAG); 7274 7275 if (isMOVHLPS_v_undef_Mask(M, VT)) 7276 return getMOVHighToLow(Op, dl, DAG); 7277 7278 // Use to match splats 7279 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef && 7280 (VT == MVT::v2f64 || VT == MVT::v2i64)) 7281 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 7282 7283 if (isPSHUFDMask(M, VT)) { 7284 // The actual implementation will match the mask in the if above and then 7285 // during isel it can match several different instructions, not only pshufd 7286 // as its name says, sad but true, emulate the behavior for now... 7287 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 7288 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 7289 7290 unsigned TargetMask = getShuffleSHUFImmediate(SVOp); 7291 7292 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 7293 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 7294 7295 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64)) 7296 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, 7297 DAG); 7298 7299 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 7300 TargetMask, DAG); 7301 } 7302 7303 if (isPALIGNRMask(M, VT, Subtarget)) 7304 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2, 7305 getShufflePALIGNRImmediate(SVOp), 7306 DAG); 7307 7308 // Check if this can be converted into a logical shift. 7309 bool isLeft = false; 7310 unsigned ShAmt = 0; 7311 SDValue ShVal; 7312 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 7313 if (isShift && ShVal.hasOneUse()) { 7314 // If the shifted value has multiple uses, it may be cheaper to use 7315 // v_set0 + movlhps or movhlps, etc. 7316 MVT EltVT = VT.getVectorElementType(); 7317 ShAmt *= EltVT.getSizeInBits(); 7318 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 7319 } 7320 7321 if (isMOVLMask(M, VT)) { 7322 if (ISD::isBuildVectorAllZeros(V1.getNode())) 7323 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 7324 if (!isMOVLPMask(M, VT)) { 7325 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 7326 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 7327 7328 if (VT == MVT::v4i32 || VT == MVT::v4f32) 7329 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 7330 } 7331 } 7332 7333 // FIXME: fold these into legal mask. 7334 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256)) 7335 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 7336 7337 if (isMOVHLPSMask(M, VT)) 7338 return getMOVHighToLow(Op, dl, DAG); 7339 7340 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget)) 7341 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 7342 7343 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget)) 7344 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 7345 7346 if (isMOVLPMask(M, VT)) 7347 return getMOVLP(Op, dl, DAG, HasSSE2); 7348 7349 if (ShouldXformToMOVHLPS(M, VT) || 7350 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT)) 7351 return CommuteVectorShuffle(SVOp, DAG); 7352 7353 if (isShift) { 7354 // No better options. Use a vshldq / vsrldq. 7355 MVT EltVT = VT.getVectorElementType(); 7356 ShAmt *= EltVT.getSizeInBits(); 7357 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 7358 } 7359 7360 bool Commuted = false; 7361 // FIXME: This should also accept a bitcast of a splat? Be careful, not 7362 // 1,1,1,1 -> v8i16 though. 7363 V1IsSplat = isSplatVector(V1.getNode()); 7364 V2IsSplat = isSplatVector(V2.getNode()); 7365 7366 // Canonicalize the splat or undef, if present, to be on the RHS. 7367 if (!V2IsUndef && V1IsSplat && !V2IsSplat) { 7368 CommuteVectorShuffleMask(M, NumElems); 7369 std::swap(V1, V2); 7370 std::swap(V1IsSplat, V2IsSplat); 7371 Commuted = true; 7372 } 7373 7374 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 7375 // Shuffling low element of v1 into undef, just return v1. 7376 if (V2IsUndef) 7377 return V1; 7378 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 7379 // the instruction selector will not match, so get a canonical MOVL with 7380 // swapped operands to undo the commute. 7381 return getMOVL(DAG, dl, VT, V2, V1); 7382 } 7383 7384 if (isUNPCKLMask(M, VT, HasInt256)) 7385 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 7386 7387 if (isUNPCKHMask(M, VT, HasInt256)) 7388 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 7389 7390 if (V2IsSplat) { 7391 // Normalize mask so all entries that point to V2 points to its first 7392 // element then try to match unpck{h|l} again. If match, return a 7393 // new vector_shuffle with the corrected mask.p 7394 SmallVector<int, 8> NewMask(M.begin(), M.end()); 7395 NormalizeMask(NewMask, NumElems); 7396 if (isUNPCKLMask(NewMask, VT, HasInt256, true)) 7397 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 7398 if (isUNPCKHMask(NewMask, VT, HasInt256, true)) 7399 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 7400 } 7401 7402 if (Commuted) { 7403 // Commute is back and try unpck* again. 7404 // FIXME: this seems wrong. 7405 CommuteVectorShuffleMask(M, NumElems); 7406 std::swap(V1, V2); 7407 std::swap(V1IsSplat, V2IsSplat); 7408 Commuted = false; 7409 7410 if (isUNPCKLMask(M, VT, HasInt256)) 7411 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 7412 7413 if (isUNPCKHMask(M, VT, HasInt256)) 7414 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 7415 } 7416 7417 // Normalize the node to match x86 shuffle ops if needed 7418 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true))) 7419 return CommuteVectorShuffle(SVOp, DAG); 7420 7421 // The checks below are all present in isShuffleMaskLegal, but they are 7422 // inlined here right now to enable us to directly emit target specific 7423 // nodes, and remove one by one until they don't return Op anymore. 7424 7425 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 7426 SVOp->getSplatIndex() == 0 && V2IsUndef) { 7427 if (VT == MVT::v2f64 || VT == MVT::v2i64) 7428 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 7429 } 7430 7431 if (isPSHUFHWMask(M, VT, HasInt256)) 7432 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 7433 getShufflePSHUFHWImmediate(SVOp), 7434 DAG); 7435 7436 if (isPSHUFLWMask(M, VT, HasInt256)) 7437 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 7438 getShufflePSHUFLWImmediate(SVOp), 7439 DAG); 7440 7441 if (isSHUFPMask(M, VT)) 7442 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 7443 getShuffleSHUFImmediate(SVOp), DAG); 7444 7445 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256)) 7446 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 7447 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256)) 7448 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 7449 7450 //===--------------------------------------------------------------------===// 7451 // Generate target specific nodes for 128 or 256-bit shuffles only 7452 // supported in the AVX instruction set. 7453 // 7454 7455 // Handle VMOVDDUPY permutations 7456 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256)) 7457 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 7458 7459 // Handle VPERMILPS/D* permutations 7460 if (isVPERMILPMask(M, VT)) { 7461 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32) 7462 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, 7463 getShuffleSHUFImmediate(SVOp), DAG); 7464 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 7465 getShuffleSHUFImmediate(SVOp), DAG); 7466 } 7467 7468 // Handle VPERM2F128/VPERM2I128 permutations 7469 if (isVPERM2X128Mask(M, VT, HasFp256)) 7470 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 7471 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 7472 7473 SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG); 7474 if (BlendOp.getNode()) 7475 return BlendOp; 7476 7477 unsigned Imm8; 7478 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8)) 7479 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG); 7480 7481 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) || 7482 VT.is512BitVector()) { 7483 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits()); 7484 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems); 7485 SmallVector<SDValue, 16> permclMask; 7486 for (unsigned i = 0; i != NumElems; ++i) { 7487 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT)); 7488 } 7489 7490 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, 7491 &permclMask[0], NumElems); 7492 if (V2IsUndef) 7493 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 7494 return DAG.getNode(X86ISD::VPERMV, dl, VT, 7495 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); 7496 return DAG.getNode(X86ISD::VPERMV3, dl, VT, 7497 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1, V2); 7498 } 7499 7500 //===--------------------------------------------------------------------===// 7501 // Since no target specific shuffle was selected for this generic one, 7502 // lower it into other known shuffles. FIXME: this isn't true yet, but 7503 // this is the plan. 7504 // 7505 7506 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 7507 if (VT == MVT::v8i16) { 7508 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG); 7509 if (NewOp.getNode()) 7510 return NewOp; 7511 } 7512 7513 if (VT == MVT::v16i8) { 7514 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG); 7515 if (NewOp.getNode()) 7516 return NewOp; 7517 } 7518 7519 if (VT == MVT::v32i8) { 7520 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG); 7521 if (NewOp.getNode()) 7522 return NewOp; 7523 } 7524 7525 // Handle all 128-bit wide vectors with 4 elements, and match them with 7526 // several different shuffle types. 7527 if (NumElems == 4 && VT.is128BitVector()) 7528 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 7529 7530 // Handle general 256-bit shuffles 7531 if (VT.is256BitVector()) 7532 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 7533 7534 return SDValue(); 7535} 7536 7537static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { 7538 MVT VT = Op.getSimpleValueType(); 7539 SDLoc dl(Op); 7540 7541 if (!Op.getOperand(0).getSimpleValueType().is128BitVector()) 7542 return SDValue(); 7543 7544 if (VT.getSizeInBits() == 8) { 7545 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 7546 Op.getOperand(0), Op.getOperand(1)); 7547 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7548 DAG.getValueType(VT)); 7549 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7550 } 7551 7552 if (VT.getSizeInBits() == 16) { 7553 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7554 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 7555 if (Idx == 0) 7556 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7557 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7558 DAG.getNode(ISD::BITCAST, dl, 7559 MVT::v4i32, 7560 Op.getOperand(0)), 7561 Op.getOperand(1))); 7562 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 7563 Op.getOperand(0), Op.getOperand(1)); 7564 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7565 DAG.getValueType(VT)); 7566 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7567 } 7568 7569 if (VT == MVT::f32) { 7570 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 7571 // the result back to FR32 register. It's only worth matching if the 7572 // result has a single use which is a store or a bitcast to i32. And in 7573 // the case of a store, it's not worth it if the index is a constant 0, 7574 // because a MOVSSmr can be used instead, which is smaller and faster. 7575 if (!Op.hasOneUse()) 7576 return SDValue(); 7577 SDNode *User = *Op.getNode()->use_begin(); 7578 if ((User->getOpcode() != ISD::STORE || 7579 (isa<ConstantSDNode>(Op.getOperand(1)) && 7580 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 7581 (User->getOpcode() != ISD::BITCAST || 7582 User->getValueType(0) != MVT::i32)) 7583 return SDValue(); 7584 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7585 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 7586 Op.getOperand(0)), 7587 Op.getOperand(1)); 7588 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 7589 } 7590 7591 if (VT == MVT::i32 || VT == MVT::i64) { 7592 // ExtractPS/pextrq works with constant index. 7593 if (isa<ConstantSDNode>(Op.getOperand(1))) 7594 return Op; 7595 } 7596 return SDValue(); 7597} 7598 7599SDValue 7600X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7601 SelectionDAG &DAG) const { 7602 SDLoc dl(Op); 7603 if (!isa<ConstantSDNode>(Op.getOperand(1))) 7604 return SDValue(); 7605 7606 SDValue Vec = Op.getOperand(0); 7607 MVT VecVT = Vec.getSimpleValueType(); 7608 7609 // If this is a 256-bit vector result, first extract the 128-bit vector and 7610 // then extract the element from the 128-bit vector. 7611 if (VecVT.is256BitVector() || VecVT.is512BitVector()) { 7612 SDValue Idx = Op.getOperand(1); 7613 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7614 7615 // Get the 128-bit vector. 7616 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); 7617 MVT EltVT = VecVT.getVectorElementType(); 7618 7619 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits(); 7620 7621 //if (IdxVal >= NumElems/2) 7622 // IdxVal -= NumElems/2; 7623 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk; 7624 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 7625 DAG.getConstant(IdxVal, MVT::i32)); 7626 } 7627 7628 assert(VecVT.is128BitVector() && "Unexpected vector length"); 7629 7630 if (Subtarget->hasSSE41()) { 7631 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 7632 if (Res.getNode()) 7633 return Res; 7634 } 7635 7636 MVT VT = Op.getSimpleValueType(); 7637 // TODO: handle v16i8. 7638 if (VT.getSizeInBits() == 16) { 7639 SDValue Vec = Op.getOperand(0); 7640 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7641 if (Idx == 0) 7642 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7643 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7644 DAG.getNode(ISD::BITCAST, dl, 7645 MVT::v4i32, Vec), 7646 Op.getOperand(1))); 7647 // Transform it so it match pextrw which produces a 32-bit result. 7648 MVT EltVT = MVT::i32; 7649 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 7650 Op.getOperand(0), Op.getOperand(1)); 7651 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 7652 DAG.getValueType(VT)); 7653 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7654 } 7655 7656 if (VT.getSizeInBits() == 32) { 7657 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7658 if (Idx == 0) 7659 return Op; 7660 7661 // SHUFPS the element to the lowest double word, then movss. 7662 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 7663 MVT VVT = Op.getOperand(0).getSimpleValueType(); 7664 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7665 DAG.getUNDEF(VVT), Mask); 7666 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7667 DAG.getIntPtrConstant(0)); 7668 } 7669 7670 if (VT.getSizeInBits() == 64) { 7671 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 7672 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 7673 // to match extract_elt for f64. 7674 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7675 if (Idx == 0) 7676 return Op; 7677 7678 // UNPCKHPD the element to the lowest double word, then movsd. 7679 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 7680 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 7681 int Mask[2] = { 1, -1 }; 7682 MVT VVT = Op.getOperand(0).getSimpleValueType(); 7683 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7684 DAG.getUNDEF(VVT), Mask); 7685 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7686 DAG.getIntPtrConstant(0)); 7687 } 7688 7689 return SDValue(); 7690} 7691 7692static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { 7693 MVT VT = Op.getSimpleValueType(); 7694 MVT EltVT = VT.getVectorElementType(); 7695 SDLoc dl(Op); 7696 7697 SDValue N0 = Op.getOperand(0); 7698 SDValue N1 = Op.getOperand(1); 7699 SDValue N2 = Op.getOperand(2); 7700 7701 if (!VT.is128BitVector()) 7702 return SDValue(); 7703 7704 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 7705 isa<ConstantSDNode>(N2)) { 7706 unsigned Opc; 7707 if (VT == MVT::v8i16) 7708 Opc = X86ISD::PINSRW; 7709 else if (VT == MVT::v16i8) 7710 Opc = X86ISD::PINSRB; 7711 else 7712 Opc = X86ISD::PINSRB; 7713 7714 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 7715 // argument. 7716 if (N1.getValueType() != MVT::i32) 7717 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7718 if (N2.getValueType() != MVT::i32) 7719 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7720 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 7721 } 7722 7723 if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 7724 // Bits [7:6] of the constant are the source select. This will always be 7725 // zero here. The DAG Combiner may combine an extract_elt index into these 7726 // bits. For example (insert (extract, 3), 2) could be matched by putting 7727 // the '3' into bits [7:6] of X86ISD::INSERTPS. 7728 // Bits [5:4] of the constant are the destination select. This is the 7729 // value of the incoming immediate. 7730 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 7731 // combine either bitwise AND or insert of float 0.0 to set these bits. 7732 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 7733 // Create this as a scalar to vector.. 7734 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 7735 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 7736 } 7737 7738 if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) { 7739 // PINSR* works with constant index. 7740 return Op; 7741 } 7742 return SDValue(); 7743} 7744 7745SDValue 7746X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 7747 MVT VT = Op.getSimpleValueType(); 7748 MVT EltVT = VT.getVectorElementType(); 7749 7750 SDLoc dl(Op); 7751 SDValue N0 = Op.getOperand(0); 7752 SDValue N1 = Op.getOperand(1); 7753 SDValue N2 = Op.getOperand(2); 7754 7755 // If this is a 256-bit vector result, first extract the 128-bit vector, 7756 // insert the element into the extracted half and then place it back. 7757 if (VT.is256BitVector() || VT.is512BitVector()) { 7758 if (!isa<ConstantSDNode>(N2)) 7759 return SDValue(); 7760 7761 // Get the desired 128-bit vector half. 7762 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 7763 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); 7764 7765 // Insert the element into the desired half. 7766 unsigned NumEltsIn128 = 128/EltVT.getSizeInBits(); 7767 unsigned IdxIn128 = IdxVal - (IdxVal/NumEltsIn128) * NumEltsIn128; 7768 7769 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, 7770 DAG.getConstant(IdxIn128, MVT::i32)); 7771 7772 // Insert the changed part back to the 256-bit vector 7773 return Insert128BitVector(N0, V, IdxVal, DAG, dl); 7774 } 7775 7776 if (Subtarget->hasSSE41()) 7777 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 7778 7779 if (EltVT == MVT::i8) 7780 return SDValue(); 7781 7782 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 7783 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 7784 // as its second argument. 7785 if (N1.getValueType() != MVT::i32) 7786 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7787 if (N2.getValueType() != MVT::i32) 7788 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7789 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 7790 } 7791 return SDValue(); 7792} 7793 7794static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { 7795 SDLoc dl(Op); 7796 MVT OpVT = Op.getSimpleValueType(); 7797 7798 // If this is a 256-bit vector result, first insert into a 128-bit 7799 // vector and then insert into the 256-bit vector. 7800 if (!OpVT.is128BitVector()) { 7801 // Insert into a 128-bit vector. 7802 unsigned SizeFactor = OpVT.getSizeInBits()/128; 7803 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(), 7804 OpVT.getVectorNumElements() / SizeFactor); 7805 7806 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7807 7808 // Insert the 128-bit vector. 7809 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); 7810 } 7811 7812 if (OpVT == MVT::v1i64 && 7813 Op.getOperand(0).getValueType() == MVT::i64) 7814 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7815 7816 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7817 assert(OpVT.is128BitVector() && "Expected an SSE type!"); 7818 return DAG.getNode(ISD::BITCAST, dl, OpVT, 7819 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7820} 7821 7822// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7823// a simple subregister reference or explicit instructions to grab 7824// upper bits of a vector. 7825static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7826 SelectionDAG &DAG) { 7827 SDLoc dl(Op); 7828 SDValue In = Op.getOperand(0); 7829 SDValue Idx = Op.getOperand(1); 7830 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7831 MVT ResVT = Op.getSimpleValueType(); 7832 MVT InVT = In.getSimpleValueType(); 7833 7834 if (Subtarget->hasFp256()) { 7835 if (ResVT.is128BitVector() && 7836 (InVT.is256BitVector() || InVT.is512BitVector()) && 7837 isa<ConstantSDNode>(Idx)) { 7838 return Extract128BitVector(In, IdxVal, DAG, dl); 7839 } 7840 if (ResVT.is256BitVector() && InVT.is512BitVector() && 7841 isa<ConstantSDNode>(Idx)) { 7842 return Extract256BitVector(In, IdxVal, DAG, dl); 7843 } 7844 } 7845 return SDValue(); 7846} 7847 7848// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7849// simple superregister reference or explicit instructions to insert 7850// the upper bits of a vector. 7851static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7852 SelectionDAG &DAG) { 7853 if (Subtarget->hasFp256()) { 7854 SDLoc dl(Op.getNode()); 7855 SDValue Vec = Op.getNode()->getOperand(0); 7856 SDValue SubVec = Op.getNode()->getOperand(1); 7857 SDValue Idx = Op.getNode()->getOperand(2); 7858 7859 if ((Op.getNode()->getSimpleValueType(0).is256BitVector() || 7860 Op.getNode()->getSimpleValueType(0).is512BitVector()) && 7861 SubVec.getNode()->getSimpleValueType(0).is128BitVector() && 7862 isa<ConstantSDNode>(Idx)) { 7863 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7864 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); 7865 } 7866 7867 if (Op.getNode()->getSimpleValueType(0).is512BitVector() && 7868 SubVec.getNode()->getSimpleValueType(0).is256BitVector() && 7869 isa<ConstantSDNode>(Idx)) { 7870 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7871 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl); 7872 } 7873 } 7874 return SDValue(); 7875} 7876 7877// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7878// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7879// one of the above mentioned nodes. It has to be wrapped because otherwise 7880// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7881// be used to form addressing mode. These wrapped nodes will be selected 7882// into MOV32ri. 7883SDValue 7884X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7885 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7886 7887 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7888 // global base reg. 7889 unsigned char OpFlag = 0; 7890 unsigned WrapperKind = X86ISD::Wrapper; 7891 CodeModel::Model M = getTargetMachine().getCodeModel(); 7892 7893 if (Subtarget->isPICStyleRIPRel() && 7894 (M == CodeModel::Small || M == CodeModel::Kernel)) 7895 WrapperKind = X86ISD::WrapperRIP; 7896 else if (Subtarget->isPICStyleGOT()) 7897 OpFlag = X86II::MO_GOTOFF; 7898 else if (Subtarget->isPICStyleStubPIC()) 7899 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7900 7901 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7902 CP->getAlignment(), 7903 CP->getOffset(), OpFlag); 7904 SDLoc DL(CP); 7905 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7906 // With PIC, the address is actually $g + Offset. 7907 if (OpFlag) { 7908 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7909 DAG.getNode(X86ISD::GlobalBaseReg, 7910 SDLoc(), getPointerTy()), 7911 Result); 7912 } 7913 7914 return Result; 7915} 7916 7917SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7918 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7919 7920 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7921 // global base reg. 7922 unsigned char OpFlag = 0; 7923 unsigned WrapperKind = X86ISD::Wrapper; 7924 CodeModel::Model M = getTargetMachine().getCodeModel(); 7925 7926 if (Subtarget->isPICStyleRIPRel() && 7927 (M == CodeModel::Small || M == CodeModel::Kernel)) 7928 WrapperKind = X86ISD::WrapperRIP; 7929 else if (Subtarget->isPICStyleGOT()) 7930 OpFlag = X86II::MO_GOTOFF; 7931 else if (Subtarget->isPICStyleStubPIC()) 7932 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7933 7934 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7935 OpFlag); 7936 SDLoc DL(JT); 7937 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7938 7939 // With PIC, the address is actually $g + Offset. 7940 if (OpFlag) 7941 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7942 DAG.getNode(X86ISD::GlobalBaseReg, 7943 SDLoc(), getPointerTy()), 7944 Result); 7945 7946 return Result; 7947} 7948 7949SDValue 7950X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7951 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7952 7953 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7954 // global base reg. 7955 unsigned char OpFlag = 0; 7956 unsigned WrapperKind = X86ISD::Wrapper; 7957 CodeModel::Model M = getTargetMachine().getCodeModel(); 7958 7959 if (Subtarget->isPICStyleRIPRel() && 7960 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7961 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7962 OpFlag = X86II::MO_GOTPCREL; 7963 WrapperKind = X86ISD::WrapperRIP; 7964 } else if (Subtarget->isPICStyleGOT()) { 7965 OpFlag = X86II::MO_GOT; 7966 } else if (Subtarget->isPICStyleStubPIC()) { 7967 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7968 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7969 OpFlag = X86II::MO_DARWIN_NONLAZY; 7970 } 7971 7972 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7973 7974 SDLoc DL(Op); 7975 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7976 7977 // With PIC, the address is actually $g + Offset. 7978 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7979 !Subtarget->is64Bit()) { 7980 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7981 DAG.getNode(X86ISD::GlobalBaseReg, 7982 SDLoc(), getPointerTy()), 7983 Result); 7984 } 7985 7986 // For symbols that require a load from a stub to get the address, emit the 7987 // load. 7988 if (isGlobalStubReference(OpFlag)) 7989 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7990 MachinePointerInfo::getGOT(), false, false, false, 0); 7991 7992 return Result; 7993} 7994 7995SDValue 7996X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7997 // Create the TargetBlockAddressAddress node. 7998 unsigned char OpFlags = 7999 Subtarget->ClassifyBlockAddressReference(); 8000 CodeModel::Model M = getTargetMachine().getCodeModel(); 8001 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 8002 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); 8003 SDLoc dl(Op); 8004 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, 8005 OpFlags); 8006 8007 if (Subtarget->isPICStyleRIPRel() && 8008 (M == CodeModel::Small || M == CodeModel::Kernel)) 8009 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 8010 else 8011 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 8012 8013 // With PIC, the address is actually $g + Offset. 8014 if (isGlobalRelativeToPICBase(OpFlags)) { 8015 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 8016 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 8017 Result); 8018 } 8019 8020 return Result; 8021} 8022 8023SDValue 8024X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, 8025 int64_t Offset, SelectionDAG &DAG) const { 8026 // Create the TargetGlobalAddress node, folding in the constant 8027 // offset if it is legal. 8028 unsigned char OpFlags = 8029 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 8030 CodeModel::Model M = getTargetMachine().getCodeModel(); 8031 SDValue Result; 8032 if (OpFlags == X86II::MO_NO_FLAG && 8033 X86::isOffsetSuitableForCodeModel(Offset, M)) { 8034 // A direct static reference to a global. 8035 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 8036 Offset = 0; 8037 } else { 8038 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 8039 } 8040 8041 if (Subtarget->isPICStyleRIPRel() && 8042 (M == CodeModel::Small || M == CodeModel::Kernel)) 8043 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 8044 else 8045 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 8046 8047 // With PIC, the address is actually $g + Offset. 8048 if (isGlobalRelativeToPICBase(OpFlags)) { 8049 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 8050 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 8051 Result); 8052 } 8053 8054 // For globals that require a load from a stub to get the address, emit the 8055 // load. 8056 if (isGlobalStubReference(OpFlags)) 8057 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 8058 MachinePointerInfo::getGOT(), false, false, false, 0); 8059 8060 // If there was a non-zero offset that we didn't fold, create an explicit 8061 // addition for it. 8062 if (Offset != 0) 8063 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 8064 DAG.getConstant(Offset, getPointerTy())); 8065 8066 return Result; 8067} 8068 8069SDValue 8070X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 8071 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 8072 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 8073 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG); 8074} 8075 8076static SDValue 8077GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 8078 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 8079 unsigned char OperandFlags, bool LocalDynamic = false) { 8080 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 8081 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8082 SDLoc dl(GA); 8083 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 8084 GA->getValueType(0), 8085 GA->getOffset(), 8086 OperandFlags); 8087 8088 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR 8089 : X86ISD::TLSADDR; 8090 8091 if (InFlag) { 8092 SDValue Ops[] = { Chain, TGA, *InFlag }; 8093 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); 8094 } else { 8095 SDValue Ops[] = { Chain, TGA }; 8096 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); 8097 } 8098 8099 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 8100 MFI->setAdjustsStack(true); 8101 8102 SDValue Flag = Chain.getValue(1); 8103 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 8104} 8105 8106// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 8107static SDValue 8108LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 8109 const EVT PtrVT) { 8110 SDValue InFlag; 8111 SDLoc dl(GA); // ? function entry point might be better 8112 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 8113 DAG.getNode(X86ISD::GlobalBaseReg, 8114 SDLoc(), PtrVT), InFlag); 8115 InFlag = Chain.getValue(1); 8116 8117 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 8118} 8119 8120// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 8121static SDValue 8122LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 8123 const EVT PtrVT) { 8124 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 8125 X86::RAX, X86II::MO_TLSGD); 8126} 8127 8128static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, 8129 SelectionDAG &DAG, 8130 const EVT PtrVT, 8131 bool is64Bit) { 8132 SDLoc dl(GA); 8133 8134 // Get the start address of the TLS block for this module. 8135 X86MachineFunctionInfo* MFI = DAG.getMachineFunction() 8136 .getInfo<X86MachineFunctionInfo>(); 8137 MFI->incNumLocalDynamicTLSAccesses(); 8138 8139 SDValue Base; 8140 if (is64Bit) { 8141 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, 8142 X86II::MO_TLSLD, /*LocalDynamic=*/true); 8143 } else { 8144 SDValue InFlag; 8145 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 8146 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag); 8147 InFlag = Chain.getValue(1); 8148 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, 8149 X86II::MO_TLSLDM, /*LocalDynamic=*/true); 8150 } 8151 8152 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations 8153 // of Base. 8154 8155 // Build x@dtpoff. 8156 unsigned char OperandFlags = X86II::MO_DTPOFF; 8157 unsigned WrapperKind = X86ISD::Wrapper; 8158 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 8159 GA->getValueType(0), 8160 GA->getOffset(), OperandFlags); 8161 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 8162 8163 // Add x@dtpoff with the base. 8164 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); 8165} 8166 8167// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. 8168static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 8169 const EVT PtrVT, TLSModel::Model model, 8170 bool is64Bit, bool isPIC) { 8171 SDLoc dl(GA); 8172 8173 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 8174 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 8175 is64Bit ? 257 : 256)); 8176 8177 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 8178 DAG.getIntPtrConstant(0), 8179 MachinePointerInfo(Ptr), 8180 false, false, false, 0); 8181 8182 unsigned char OperandFlags = 0; 8183 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 8184 // initialexec. 8185 unsigned WrapperKind = X86ISD::Wrapper; 8186 if (model == TLSModel::LocalExec) { 8187 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 8188 } else if (model == TLSModel::InitialExec) { 8189 if (is64Bit) { 8190 OperandFlags = X86II::MO_GOTTPOFF; 8191 WrapperKind = X86ISD::WrapperRIP; 8192 } else { 8193 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; 8194 } 8195 } else { 8196 llvm_unreachable("Unexpected model"); 8197 } 8198 8199 // emit "addl x@ntpoff,%eax" (local exec) 8200 // or "addl x@indntpoff,%eax" (initial exec) 8201 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) 8202 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 8203 GA->getValueType(0), 8204 GA->getOffset(), OperandFlags); 8205 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 8206 8207 if (model == TLSModel::InitialExec) { 8208 if (isPIC && !is64Bit) { 8209 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, 8210 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), 8211 Offset); 8212 } 8213 8214 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 8215 MachinePointerInfo::getGOT(), false, false, false, 8216 0); 8217 } 8218 8219 // The address of the thread local variable is the add of the thread 8220 // pointer with the offset of the variable. 8221 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 8222} 8223 8224SDValue 8225X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 8226 8227 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 8228 const GlobalValue *GV = GA->getGlobal(); 8229 8230 if (Subtarget->isTargetELF()) { 8231 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 8232 8233 switch (model) { 8234 case TLSModel::GeneralDynamic: 8235 if (Subtarget->is64Bit()) 8236 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 8237 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 8238 case TLSModel::LocalDynamic: 8239 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), 8240 Subtarget->is64Bit()); 8241 case TLSModel::InitialExec: 8242 case TLSModel::LocalExec: 8243 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 8244 Subtarget->is64Bit(), 8245 getTargetMachine().getRelocationModel() == Reloc::PIC_); 8246 } 8247 llvm_unreachable("Unknown TLS model."); 8248 } 8249 8250 if (Subtarget->isTargetDarwin()) { 8251 // Darwin only has one model of TLS. Lower to that. 8252 unsigned char OpFlag = 0; 8253 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 8254 X86ISD::WrapperRIP : X86ISD::Wrapper; 8255 8256 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 8257 // global base reg. 8258 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 8259 !Subtarget->is64Bit(); 8260 if (PIC32) 8261 OpFlag = X86II::MO_TLVP_PIC_BASE; 8262 else 8263 OpFlag = X86II::MO_TLVP; 8264 SDLoc DL(Op); 8265 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 8266 GA->getValueType(0), 8267 GA->getOffset(), OpFlag); 8268 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 8269 8270 // With PIC32, the address is actually $g + Offset. 8271 if (PIC32) 8272 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 8273 DAG.getNode(X86ISD::GlobalBaseReg, 8274 SDLoc(), getPointerTy()), 8275 Offset); 8276 8277 // Lowering the machine isd will make sure everything is in the right 8278 // location. 8279 SDValue Chain = DAG.getEntryNode(); 8280 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8281 SDValue Args[] = { Chain, Offset }; 8282 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 8283 8284 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 8285 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 8286 MFI->setAdjustsStack(true); 8287 8288 // And our return value (tls address) is in the standard call return value 8289 // location. 8290 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 8291 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 8292 Chain.getValue(1)); 8293 } 8294 8295 if (Subtarget->isTargetWindows() || Subtarget->isTargetMingw()) { 8296 // Just use the implicit TLS architecture 8297 // Need to generate someting similar to: 8298 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage 8299 // ; from TEB 8300 // mov ecx, dword [rel _tls_index]: Load index (from C runtime) 8301 // mov rcx, qword [rdx+rcx*8] 8302 // mov eax, .tls$:tlsvar 8303 // [rax+rcx] contains the address 8304 // Windows 64bit: gs:0x58 8305 // Windows 32bit: fs:__tls_array 8306 8307 // If GV is an alias then use the aliasee for determining 8308 // thread-localness. 8309 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 8310 GV = GA->resolveAliasedGlobal(false); 8311 SDLoc dl(GA); 8312 SDValue Chain = DAG.getEntryNode(); 8313 8314 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or 8315 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly 8316 // use its literal value of 0x2C. 8317 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit() 8318 ? Type::getInt8PtrTy(*DAG.getContext(), 8319 256) 8320 : Type::getInt32PtrTy(*DAG.getContext(), 8321 257)); 8322 8323 SDValue TlsArray = Subtarget->is64Bit() ? DAG.getIntPtrConstant(0x58) : 8324 (Subtarget->isTargetMingw() ? DAG.getIntPtrConstant(0x2C) : 8325 DAG.getExternalSymbol("_tls_array", getPointerTy())); 8326 8327 SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, TlsArray, 8328 MachinePointerInfo(Ptr), 8329 false, false, false, 0); 8330 8331 // Load the _tls_index variable 8332 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy()); 8333 if (Subtarget->is64Bit()) 8334 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain, 8335 IDX, MachinePointerInfo(), MVT::i32, 8336 false, false, 0); 8337 else 8338 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), 8339 false, false, false, 0); 8340 8341 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), 8342 getPointerTy()); 8343 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); 8344 8345 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); 8346 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(), 8347 false, false, false, 0); 8348 8349 // Get the offset of start of .tls section 8350 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 8351 GA->getValueType(0), 8352 GA->getOffset(), X86II::MO_SECREL); 8353 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA); 8354 8355 // The address of the thread local variable is the add of the thread 8356 // pointer with the offset of the variable. 8357 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset); 8358 } 8359 8360 llvm_unreachable("TLS not implemented for this target."); 8361} 8362 8363/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 8364/// and take a 2 x i32 value to shift plus a shift amount. 8365SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 8366 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 8367 EVT VT = Op.getValueType(); 8368 unsigned VTBits = VT.getSizeInBits(); 8369 SDLoc dl(Op); 8370 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 8371 SDValue ShOpLo = Op.getOperand(0); 8372 SDValue ShOpHi = Op.getOperand(1); 8373 SDValue ShAmt = Op.getOperand(2); 8374 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 8375 DAG.getConstant(VTBits - 1, MVT::i8)) 8376 : DAG.getConstant(0, VT); 8377 8378 SDValue Tmp2, Tmp3; 8379 if (Op.getOpcode() == ISD::SHL_PARTS) { 8380 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 8381 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 8382 } else { 8383 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 8384 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 8385 } 8386 8387 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 8388 DAG.getConstant(VTBits, MVT::i8)); 8389 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 8390 AndNode, DAG.getConstant(0, MVT::i8)); 8391 8392 SDValue Hi, Lo; 8393 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8394 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 8395 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 8396 8397 if (Op.getOpcode() == ISD::SHL_PARTS) { 8398 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 8399 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 8400 } else { 8401 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 8402 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 8403 } 8404 8405 SDValue Ops[2] = { Lo, Hi }; 8406 return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); 8407} 8408 8409SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 8410 SelectionDAG &DAG) const { 8411 EVT SrcVT = Op.getOperand(0).getValueType(); 8412 8413 if (SrcVT.isVector()) 8414 return SDValue(); 8415 8416 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 8417 "Unknown SINT_TO_FP to lower!"); 8418 8419 // These are really Legal; return the operand so the caller accepts it as 8420 // Legal. 8421 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 8422 return Op; 8423 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 8424 Subtarget->is64Bit()) { 8425 return Op; 8426 } 8427 8428 SDLoc dl(Op); 8429 unsigned Size = SrcVT.getSizeInBits()/8; 8430 MachineFunction &MF = DAG.getMachineFunction(); 8431 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 8432 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8433 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8434 StackSlot, 8435 MachinePointerInfo::getFixedStack(SSFI), 8436 false, false, 0); 8437 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 8438} 8439 8440SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 8441 SDValue StackSlot, 8442 SelectionDAG &DAG) const { 8443 // Build the FILD 8444 SDLoc DL(Op); 8445 SDVTList Tys; 8446 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 8447 if (useSSE) 8448 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 8449 else 8450 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 8451 8452 unsigned ByteSize = SrcVT.getSizeInBits()/8; 8453 8454 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 8455 MachineMemOperand *MMO; 8456 if (FI) { 8457 int SSFI = FI->getIndex(); 8458 MMO = 8459 DAG.getMachineFunction() 8460 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8461 MachineMemOperand::MOLoad, ByteSize, ByteSize); 8462 } else { 8463 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 8464 StackSlot = StackSlot.getOperand(1); 8465 } 8466 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 8467 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 8468 X86ISD::FILD, DL, 8469 Tys, Ops, array_lengthof(Ops), 8470 SrcVT, MMO); 8471 8472 if (useSSE) { 8473 Chain = Result.getValue(1); 8474 SDValue InFlag = Result.getValue(2); 8475 8476 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 8477 // shouldn't be necessary except that RFP cannot be live across 8478 // multiple blocks. When stackifier is fixed, they can be uncoupled. 8479 MachineFunction &MF = DAG.getMachineFunction(); 8480 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 8481 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 8482 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8483 Tys = DAG.getVTList(MVT::Other); 8484 SDValue Ops[] = { 8485 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 8486 }; 8487 MachineMemOperand *MMO = 8488 DAG.getMachineFunction() 8489 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8490 MachineMemOperand::MOStore, SSFISize, SSFISize); 8491 8492 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 8493 Ops, array_lengthof(Ops), 8494 Op.getValueType(), MMO); 8495 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 8496 MachinePointerInfo::getFixedStack(SSFI), 8497 false, false, false, 0); 8498 } 8499 8500 return Result; 8501} 8502 8503// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 8504SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 8505 SelectionDAG &DAG) const { 8506 // This algorithm is not obvious. Here it is what we're trying to output: 8507 /* 8508 movq %rax, %xmm0 8509 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 8510 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 8511 #ifdef __SSE3__ 8512 haddpd %xmm0, %xmm0 8513 #else 8514 pshufd $0x4e, %xmm0, %xmm1 8515 addpd %xmm1, %xmm0 8516 #endif 8517 */ 8518 8519 SDLoc dl(Op); 8520 LLVMContext *Context = DAG.getContext(); 8521 8522 // Build some magic constants. 8523 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; 8524 Constant *C0 = ConstantDataVector::get(*Context, CV0); 8525 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 8526 8527 SmallVector<Constant*,2> CV1; 8528 CV1.push_back( 8529 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8530 APInt(64, 0x4330000000000000ULL)))); 8531 CV1.push_back( 8532 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8533 APInt(64, 0x4530000000000000ULL)))); 8534 Constant *C1 = ConstantVector::get(CV1); 8535 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 8536 8537 // Load the 64-bit value into an XMM register. 8538 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 8539 Op.getOperand(0)); 8540 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 8541 MachinePointerInfo::getConstantPool(), 8542 false, false, false, 16); 8543 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 8544 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 8545 CLod0); 8546 8547 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 8548 MachinePointerInfo::getConstantPool(), 8549 false, false, false, 16); 8550 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 8551 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 8552 SDValue Result; 8553 8554 if (Subtarget->hasSSE3()) { 8555 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 8556 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 8557 } else { 8558 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 8559 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 8560 S2F, 0x4E, DAG); 8561 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 8562 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 8563 Sub); 8564 } 8565 8566 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 8567 DAG.getIntPtrConstant(0)); 8568} 8569 8570// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 8571SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 8572 SelectionDAG &DAG) const { 8573 SDLoc dl(Op); 8574 // FP constant to bias correct the final result. 8575 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 8576 MVT::f64); 8577 8578 // Load the 32-bit value into an XMM register. 8579 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 8580 Op.getOperand(0)); 8581 8582 // Zero out the upper parts of the register. 8583 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 8584 8585 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8586 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 8587 DAG.getIntPtrConstant(0)); 8588 8589 // Or the load with the bias. 8590 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 8591 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8592 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8593 MVT::v2f64, Load)), 8594 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8595 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8596 MVT::v2f64, Bias))); 8597 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8598 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 8599 DAG.getIntPtrConstant(0)); 8600 8601 // Subtract the bias. 8602 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 8603 8604 // Handle final rounding. 8605 EVT DestVT = Op.getValueType(); 8606 8607 if (DestVT.bitsLT(MVT::f64)) 8608 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 8609 DAG.getIntPtrConstant(0)); 8610 if (DestVT.bitsGT(MVT::f64)) 8611 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 8612 8613 // Handle final rounding. 8614 return Sub; 8615} 8616 8617SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, 8618 SelectionDAG &DAG) const { 8619 SDValue N0 = Op.getOperand(0); 8620 EVT SVT = N0.getValueType(); 8621 SDLoc dl(Op); 8622 8623 assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || 8624 SVT == MVT::v8i8 || SVT == MVT::v8i16) && 8625 "Custom UINT_TO_FP is not supported!"); 8626 8627 EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, 8628 SVT.getVectorNumElements()); 8629 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), 8630 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0)); 8631} 8632 8633SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 8634 SelectionDAG &DAG) const { 8635 SDValue N0 = Op.getOperand(0); 8636 SDLoc dl(Op); 8637 8638 if (Op.getValueType().isVector()) 8639 return lowerUINT_TO_FP_vec(Op, DAG); 8640 8641 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 8642 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 8643 // the optimization here. 8644 if (DAG.SignBitIsZero(N0)) 8645 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 8646 8647 EVT SrcVT = N0.getValueType(); 8648 EVT DstVT = Op.getValueType(); 8649 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 8650 return LowerUINT_TO_FP_i64(Op, DAG); 8651 if (SrcVT == MVT::i32 && X86ScalarSSEf64) 8652 return LowerUINT_TO_FP_i32(Op, DAG); 8653 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 8654 return SDValue(); 8655 8656 // Make a 64-bit buffer, and use it to build an FILD. 8657 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 8658 if (SrcVT == MVT::i32) { 8659 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 8660 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 8661 getPointerTy(), StackSlot, WordOff); 8662 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8663 StackSlot, MachinePointerInfo(), 8664 false, false, 0); 8665 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 8666 OffsetSlot, MachinePointerInfo(), 8667 false, false, 0); 8668 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 8669 return Fild; 8670 } 8671 8672 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 8673 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8674 StackSlot, MachinePointerInfo(), 8675 false, false, 0); 8676 // For i64 source, we need to add the appropriate power of 2 if the input 8677 // was negative. This is the same as the optimization in 8678 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 8679 // we must be careful to do the computation in x87 extended precision, not 8680 // in SSE. (The generic code can't know it's OK to do this, or how to.) 8681 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 8682 MachineMemOperand *MMO = 8683 DAG.getMachineFunction() 8684 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8685 MachineMemOperand::MOLoad, 8, 8); 8686 8687 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 8688 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 8689 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 8690 array_lengthof(Ops), MVT::i64, MMO); 8691 8692 APInt FF(32, 0x5F800000ULL); 8693 8694 // Check whether the sign bit is set. 8695 SDValue SignSet = DAG.getSetCC(dl, 8696 getSetCCResultType(*DAG.getContext(), MVT::i64), 8697 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 8698 ISD::SETLT); 8699 8700 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 8701 SDValue FudgePtr = DAG.getConstantPool( 8702 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 8703 getPointerTy()); 8704 8705 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 8706 SDValue Zero = DAG.getIntPtrConstant(0); 8707 SDValue Four = DAG.getIntPtrConstant(4); 8708 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 8709 Zero, Four); 8710 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 8711 8712 // Load the value out, extending it from f32 to f80. 8713 // FIXME: Avoid the extend by constructing the right constant pool? 8714 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 8715 FudgePtr, MachinePointerInfo::getConstantPool(), 8716 MVT::f32, false, false, 4); 8717 // Extend everything to 80 bits to force it to be done on x87. 8718 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 8719 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 8720} 8721 8722std::pair<SDValue,SDValue> 8723X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, 8724 bool IsSigned, bool IsReplace) const { 8725 SDLoc DL(Op); 8726 8727 EVT DstTy = Op.getValueType(); 8728 8729 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) { 8730 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 8731 DstTy = MVT::i64; 8732 } 8733 8734 assert(DstTy.getSimpleVT() <= MVT::i64 && 8735 DstTy.getSimpleVT() >= MVT::i16 && 8736 "Unknown FP_TO_INT to lower!"); 8737 8738 // These are really Legal. 8739 if (DstTy == MVT::i32 && 8740 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8741 return std::make_pair(SDValue(), SDValue()); 8742 if (Subtarget->is64Bit() && 8743 DstTy == MVT::i64 && 8744 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8745 return std::make_pair(SDValue(), SDValue()); 8746 8747 // We lower FP->int64 either into FISTP64 followed by a load from a temporary 8748 // stack slot, or into the FTOL runtime function. 8749 MachineFunction &MF = DAG.getMachineFunction(); 8750 unsigned MemSize = DstTy.getSizeInBits()/8; 8751 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8752 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8753 8754 unsigned Opc; 8755 if (!IsSigned && isIntegerTypeFTOL(DstTy)) 8756 Opc = X86ISD::WIN_FTOL; 8757 else 8758 switch (DstTy.getSimpleVT().SimpleTy) { 8759 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 8760 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 8761 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 8762 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 8763 } 8764 8765 SDValue Chain = DAG.getEntryNode(); 8766 SDValue Value = Op.getOperand(0); 8767 EVT TheVT = Op.getOperand(0).getValueType(); 8768 // FIXME This causes a redundant load/store if the SSE-class value is already 8769 // in memory, such as if it is on the callstack. 8770 if (isScalarFPTypeInSSEReg(TheVT)) { 8771 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 8772 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 8773 MachinePointerInfo::getFixedStack(SSFI), 8774 false, false, 0); 8775 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 8776 SDValue Ops[] = { 8777 Chain, StackSlot, DAG.getValueType(TheVT) 8778 }; 8779 8780 MachineMemOperand *MMO = 8781 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8782 MachineMemOperand::MOLoad, MemSize, MemSize); 8783 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 8784 array_lengthof(Ops), DstTy, MMO); 8785 Chain = Value.getValue(1); 8786 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8787 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8788 } 8789 8790 MachineMemOperand *MMO = 8791 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8792 MachineMemOperand::MOStore, MemSize, MemSize); 8793 8794 if (Opc != X86ISD::WIN_FTOL) { 8795 // Build the FP_TO_INT*_IN_MEM 8796 SDValue Ops[] = { Chain, Value, StackSlot }; 8797 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 8798 Ops, array_lengthof(Ops), DstTy, 8799 MMO); 8800 return std::make_pair(FIST, StackSlot); 8801 } else { 8802 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, 8803 DAG.getVTList(MVT::Other, MVT::Glue), 8804 Chain, Value); 8805 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX, 8806 MVT::i32, ftol.getValue(1)); 8807 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX, 8808 MVT::i32, eax.getValue(2)); 8809 SDValue Ops[] = { eax, edx }; 8810 SDValue pair = IsReplace 8811 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, array_lengthof(Ops)) 8812 : DAG.getMergeValues(Ops, array_lengthof(Ops), DL); 8813 return std::make_pair(pair, SDValue()); 8814 } 8815} 8816 8817static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG, 8818 const X86Subtarget *Subtarget) { 8819 MVT VT = Op->getSimpleValueType(0); 8820 SDValue In = Op->getOperand(0); 8821 MVT InVT = In.getSimpleValueType(); 8822 SDLoc dl(Op); 8823 8824 // Optimize vectors in AVX mode: 8825 // 8826 // v8i16 -> v8i32 8827 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 8828 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 8829 // Concat upper and lower parts. 8830 // 8831 // v4i32 -> v4i64 8832 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 8833 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 8834 // Concat upper and lower parts. 8835 // 8836 8837 if (((VT != MVT::v8i32) || (InVT != MVT::v8i16)) && 8838 ((VT != MVT::v4i64) || (InVT != MVT::v4i32))) 8839 return SDValue(); 8840 8841 if (Subtarget->hasInt256()) 8842 return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, In); 8843 8844 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl); 8845 SDValue Undef = DAG.getUNDEF(InVT); 8846 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND; 8847 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef); 8848 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef); 8849 8850 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(), 8851 VT.getVectorNumElements()/2); 8852 8853 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 8854 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 8855 8856 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 8857} 8858 8859static SDValue LowerZERO_EXTEND_AVX512(SDValue Op, 8860 SelectionDAG &DAG) { 8861 MVT VT = Op->getValueType(0).getSimpleVT(); 8862 SDValue In = Op->getOperand(0); 8863 MVT InVT = In.getValueType().getSimpleVT(); 8864 SDLoc DL(Op); 8865 unsigned int NumElts = VT.getVectorNumElements(); 8866 if (NumElts != 8 && NumElts != 16) 8867 return SDValue(); 8868 8869 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) 8870 return DAG.getNode(X86ISD::VZEXT, DL, VT, In); 8871 8872 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32; 8873 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8874 // Now we have only mask extension 8875 assert(InVT.getVectorElementType() == MVT::i1); 8876 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType()); 8877 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue(); 8878 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy()); 8879 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 8880 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP, 8881 MachinePointerInfo::getConstantPool(), 8882 false, false, false, Alignment); 8883 8884 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld); 8885 if (VT.is512BitVector()) 8886 return Brcst; 8887 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst); 8888} 8889 8890static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget, 8891 SelectionDAG &DAG) { 8892 if (Subtarget->hasFp256()) { 8893 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget); 8894 if (Res.getNode()) 8895 return Res; 8896 } 8897 8898 return SDValue(); 8899} 8900 8901static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget, 8902 SelectionDAG &DAG) { 8903 SDLoc DL(Op); 8904 MVT VT = Op.getSimpleValueType(); 8905 SDValue In = Op.getOperand(0); 8906 MVT SVT = In.getSimpleValueType(); 8907 8908 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1) 8909 return LowerZERO_EXTEND_AVX512(Op, DAG); 8910 8911 if (Subtarget->hasFp256()) { 8912 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget); 8913 if (Res.getNode()) 8914 return Res; 8915 } 8916 8917 if (!VT.is256BitVector() || !SVT.is128BitVector() || 8918 VT.getVectorNumElements() != SVT.getVectorNumElements()) 8919 return SDValue(); 8920 8921 assert(Subtarget->hasFp256() && "256-bit vector is observed without AVX!"); 8922 8923 // AVX2 has better support of integer extending. 8924 if (Subtarget->hasInt256()) 8925 return DAG.getNode(X86ISD::VZEXT, DL, VT, In); 8926 8927 SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In); 8928 static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1}; 8929 SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, 8930 DAG.getVectorShuffle(MVT::v8i16, DL, In, 8931 DAG.getUNDEF(MVT::v8i16), 8932 &Mask[0])); 8933 8934 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi); 8935} 8936 8937SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 8938 SDLoc DL(Op); 8939 MVT VT = Op.getSimpleValueType(); 8940 SDValue In = Op.getOperand(0); 8941 MVT InVT = In.getSimpleValueType(); 8942 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() && 8943 "Invalid TRUNCATE operation"); 8944 8945 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) { 8946 if (VT.getVectorElementType().getSizeInBits() >=8) 8947 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In); 8948 8949 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type"); 8950 unsigned NumElts = InVT.getVectorNumElements(); 8951 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type"); 8952 if (InVT.getSizeInBits() < 512) { 8953 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64; 8954 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In); 8955 InVT = ExtVT; 8956 } 8957 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType()); 8958 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue(); 8959 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 8960 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 8961 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP, 8962 MachinePointerInfo::getConstantPool(), 8963 false, false, false, Alignment); 8964 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld); 8965 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In); 8966 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And); 8967 } 8968 8969 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) { 8970 // On AVX2, v4i64 -> v4i32 becomes VPERMD. 8971 if (Subtarget->hasInt256()) { 8972 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; 8973 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In); 8974 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32), 8975 ShufMask); 8976 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In, 8977 DAG.getIntPtrConstant(0)); 8978 } 8979 8980 // On AVX, v4i64 -> v4i32 becomes a sequence that uses PSHUFD and MOVLHPS. 8981 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8982 DAG.getIntPtrConstant(0)); 8983 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8984 DAG.getIntPtrConstant(2)); 8985 8986 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo); 8987 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi); 8988 8989 // The PSHUFD mask: 8990 static const int ShufMask1[] = {0, 2, 0, 0}; 8991 SDValue Undef = DAG.getUNDEF(VT); 8992 OpLo = DAG.getVectorShuffle(VT, DL, OpLo, Undef, ShufMask1); 8993 OpHi = DAG.getVectorShuffle(VT, DL, OpHi, Undef, ShufMask1); 8994 8995 // The MOVLHPS mask: 8996 static const int ShufMask2[] = {0, 1, 4, 5}; 8997 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask2); 8998 } 8999 9000 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) { 9001 // On AVX2, v8i32 -> v8i16 becomed PSHUFB. 9002 if (Subtarget->hasInt256()) { 9003 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In); 9004 9005 SmallVector<SDValue,32> pshufbMask; 9006 for (unsigned i = 0; i < 2; ++i) { 9007 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); 9008 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); 9009 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); 9010 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); 9011 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); 9012 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); 9013 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); 9014 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); 9015 for (unsigned j = 0; j < 8; ++j) 9016 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 9017 } 9018 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, 9019 &pshufbMask[0], 32); 9020 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV); 9021 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In); 9022 9023 static const int ShufMask[] = {0, 2, -1, -1}; 9024 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64), 9025 &ShufMask[0]); 9026 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 9027 DAG.getIntPtrConstant(0)); 9028 return DAG.getNode(ISD::BITCAST, DL, VT, In); 9029 } 9030 9031 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, 9032 DAG.getIntPtrConstant(0)); 9033 9034 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, 9035 DAG.getIntPtrConstant(4)); 9036 9037 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo); 9038 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi); 9039 9040 // The PSHUFB mask: 9041 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, 9042 -1, -1, -1, -1, -1, -1, -1, -1}; 9043 9044 SDValue Undef = DAG.getUNDEF(MVT::v16i8); 9045 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1); 9046 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1); 9047 9048 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo); 9049 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi); 9050 9051 // The MOVLHPS Mask: 9052 static const int ShufMask2[] = {0, 1, 4, 5}; 9053 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2); 9054 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res); 9055 } 9056 9057 // Handle truncation of V256 to V128 using shuffles. 9058 if (!VT.is128BitVector() || !InVT.is256BitVector()) 9059 return SDValue(); 9060 9061 assert(Subtarget->hasFp256() && "256-bit vector without AVX!"); 9062 9063 unsigned NumElems = VT.getVectorNumElements(); 9064 EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 9065 NumElems * 2); 9066 9067 SmallVector<int, 16> MaskVec(NumElems * 2, -1); 9068 // Prepare truncation shuffle mask 9069 for (unsigned i = 0; i != NumElems; ++i) 9070 MaskVec[i] = i * 2; 9071 SDValue V = DAG.getVectorShuffle(NVT, DL, 9072 DAG.getNode(ISD::BITCAST, DL, NVT, In), 9073 DAG.getUNDEF(NVT), &MaskVec[0]); 9074 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, 9075 DAG.getIntPtrConstant(0)); 9076} 9077 9078SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 9079 SelectionDAG &DAG) const { 9080 MVT VT = Op.getSimpleValueType(); 9081 if (VT.isVector()) { 9082 if (VT == MVT::v8i16) 9083 return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, 9084 DAG.getNode(ISD::FP_TO_SINT, SDLoc(Op), 9085 MVT::v8i32, Op.getOperand(0))); 9086 return SDValue(); 9087 } 9088 9089 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 9090 /*IsSigned=*/ true, /*IsReplace=*/ false); 9091 SDValue FIST = Vals.first, StackSlot = Vals.second; 9092 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 9093 if (FIST.getNode() == 0) return Op; 9094 9095 if (StackSlot.getNode()) 9096 // Load the result. 9097 return DAG.getLoad(Op.getValueType(), SDLoc(Op), 9098 FIST, StackSlot, MachinePointerInfo(), 9099 false, false, false, 0); 9100 9101 // The node is the result. 9102 return FIST; 9103} 9104 9105SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 9106 SelectionDAG &DAG) const { 9107 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 9108 /*IsSigned=*/ false, /*IsReplace=*/ false); 9109 SDValue FIST = Vals.first, StackSlot = Vals.second; 9110 assert(FIST.getNode() && "Unexpected failure"); 9111 9112 if (StackSlot.getNode()) 9113 // Load the result. 9114 return DAG.getLoad(Op.getValueType(), SDLoc(Op), 9115 FIST, StackSlot, MachinePointerInfo(), 9116 false, false, false, 0); 9117 9118 // The node is the result. 9119 return FIST; 9120} 9121 9122static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { 9123 SDLoc DL(Op); 9124 MVT VT = Op.getSimpleValueType(); 9125 SDValue In = Op.getOperand(0); 9126 MVT SVT = In.getSimpleValueType(); 9127 9128 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!"); 9129 9130 return DAG.getNode(X86ISD::VFPEXT, DL, VT, 9131 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, 9132 In, DAG.getUNDEF(SVT))); 9133} 9134 9135SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { 9136 LLVMContext *Context = DAG.getContext(); 9137 SDLoc dl(Op); 9138 MVT VT = Op.getSimpleValueType(); 9139 MVT EltVT = VT; 9140 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 9141 if (VT.isVector()) { 9142 EltVT = VT.getVectorElementType(); 9143 NumElts = VT.getVectorNumElements(); 9144 } 9145 Constant *C; 9146 if (EltVT == MVT::f64) 9147 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 9148 APInt(64, ~(1ULL << 63)))); 9149 else 9150 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle, 9151 APInt(32, ~(1U << 31)))); 9152 C = ConstantVector::getSplat(NumElts, C); 9153 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 9154 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 9155 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 9156 MachinePointerInfo::getConstantPool(), 9157 false, false, false, Alignment); 9158 if (VT.isVector()) { 9159 MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 9160 return DAG.getNode(ISD::BITCAST, dl, VT, 9161 DAG.getNode(ISD::AND, dl, ANDVT, 9162 DAG.getNode(ISD::BITCAST, dl, ANDVT, 9163 Op.getOperand(0)), 9164 DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask))); 9165 } 9166 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 9167} 9168 9169SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 9170 LLVMContext *Context = DAG.getContext(); 9171 SDLoc dl(Op); 9172 MVT VT = Op.getSimpleValueType(); 9173 MVT EltVT = VT; 9174 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 9175 if (VT.isVector()) { 9176 EltVT = VT.getVectorElementType(); 9177 NumElts = VT.getVectorNumElements(); 9178 } 9179 Constant *C; 9180 if (EltVT == MVT::f64) 9181 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 9182 APInt(64, 1ULL << 63))); 9183 else 9184 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle, 9185 APInt(32, 1U << 31))); 9186 C = ConstantVector::getSplat(NumElts, C); 9187 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 9188 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 9189 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 9190 MachinePointerInfo::getConstantPool(), 9191 false, false, false, Alignment); 9192 if (VT.isVector()) { 9193 MVT XORVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits()/64); 9194 return DAG.getNode(ISD::BITCAST, dl, VT, 9195 DAG.getNode(ISD::XOR, dl, XORVT, 9196 DAG.getNode(ISD::BITCAST, dl, XORVT, 9197 Op.getOperand(0)), 9198 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 9199 } 9200 9201 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 9202} 9203 9204SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 9205 LLVMContext *Context = DAG.getContext(); 9206 SDValue Op0 = Op.getOperand(0); 9207 SDValue Op1 = Op.getOperand(1); 9208 SDLoc dl(Op); 9209 MVT VT = Op.getSimpleValueType(); 9210 MVT SrcVT = Op1.getSimpleValueType(); 9211 9212 // If second operand is smaller, extend it first. 9213 if (SrcVT.bitsLT(VT)) { 9214 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 9215 SrcVT = VT; 9216 } 9217 // And if it is bigger, shrink it first. 9218 if (SrcVT.bitsGT(VT)) { 9219 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 9220 SrcVT = VT; 9221 } 9222 9223 // At this point the operands and the result should have the same 9224 // type, and that won't be f80 since that is not custom lowered. 9225 9226 // First get the sign bit of second operand. 9227 SmallVector<Constant*,4> CV; 9228 if (SrcVT == MVT::f64) { 9229 const fltSemantics &Sem = APFloat::IEEEdouble; 9230 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 1ULL << 63)))); 9231 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0)))); 9232 } else { 9233 const fltSemantics &Sem = APFloat::IEEEsingle; 9234 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 1U << 31)))); 9235 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 9236 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 9237 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 9238 } 9239 Constant *C = ConstantVector::get(CV); 9240 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 9241 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 9242 MachinePointerInfo::getConstantPool(), 9243 false, false, false, 16); 9244 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 9245 9246 // Shift sign bit right or left if the two operands have different types. 9247 if (SrcVT.bitsGT(VT)) { 9248 // Op0 is MVT::f32, Op1 is MVT::f64. 9249 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 9250 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 9251 DAG.getConstant(32, MVT::i32)); 9252 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 9253 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 9254 DAG.getIntPtrConstant(0)); 9255 } 9256 9257 // Clear first operand sign bit. 9258 CV.clear(); 9259 if (VT == MVT::f64) { 9260 const fltSemantics &Sem = APFloat::IEEEdouble; 9261 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, 9262 APInt(64, ~(1ULL << 63))))); 9263 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0)))); 9264 } else { 9265 const fltSemantics &Sem = APFloat::IEEEsingle; 9266 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, 9267 APInt(32, ~(1U << 31))))); 9268 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 9269 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 9270 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 9271 } 9272 C = ConstantVector::get(CV); 9273 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 9274 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 9275 MachinePointerInfo::getConstantPool(), 9276 false, false, false, 16); 9277 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 9278 9279 // Or the value with the sign bit. 9280 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 9281} 9282 9283static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { 9284 SDValue N0 = Op.getOperand(0); 9285 SDLoc dl(Op); 9286 MVT VT = Op.getSimpleValueType(); 9287 9288 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 9289 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 9290 DAG.getConstant(1, VT)); 9291 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 9292} 9293 9294// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able. 9295// 9296static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget, 9297 SelectionDAG &DAG) { 9298 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree."); 9299 9300 if (!Subtarget->hasSSE41()) 9301 return SDValue(); 9302 9303 if (!Op->hasOneUse()) 9304 return SDValue(); 9305 9306 SDNode *N = Op.getNode(); 9307 SDLoc DL(N); 9308 9309 SmallVector<SDValue, 8> Opnds; 9310 DenseMap<SDValue, unsigned> VecInMap; 9311 EVT VT = MVT::Other; 9312 9313 // Recognize a special case where a vector is casted into wide integer to 9314 // test all 0s. 9315 Opnds.push_back(N->getOperand(0)); 9316 Opnds.push_back(N->getOperand(1)); 9317 9318 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { 9319 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot; 9320 // BFS traverse all OR'd operands. 9321 if (I->getOpcode() == ISD::OR) { 9322 Opnds.push_back(I->getOperand(0)); 9323 Opnds.push_back(I->getOperand(1)); 9324 // Re-evaluate the number of nodes to be traversed. 9325 e += 2; // 2 more nodes (LHS and RHS) are pushed. 9326 continue; 9327 } 9328 9329 // Quit if a non-EXTRACT_VECTOR_ELT 9330 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9331 return SDValue(); 9332 9333 // Quit if without a constant index. 9334 SDValue Idx = I->getOperand(1); 9335 if (!isa<ConstantSDNode>(Idx)) 9336 return SDValue(); 9337 9338 SDValue ExtractedFromVec = I->getOperand(0); 9339 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec); 9340 if (M == VecInMap.end()) { 9341 VT = ExtractedFromVec.getValueType(); 9342 // Quit if not 128/256-bit vector. 9343 if (!VT.is128BitVector() && !VT.is256BitVector()) 9344 return SDValue(); 9345 // Quit if not the same type. 9346 if (VecInMap.begin() != VecInMap.end() && 9347 VT != VecInMap.begin()->first.getValueType()) 9348 return SDValue(); 9349 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; 9350 } 9351 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue(); 9352 } 9353 9354 assert((VT.is128BitVector() || VT.is256BitVector()) && 9355 "Not extracted from 128-/256-bit vector."); 9356 9357 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; 9358 SmallVector<SDValue, 8> VecIns; 9359 9360 for (DenseMap<SDValue, unsigned>::const_iterator 9361 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { 9362 // Quit if not all elements are used. 9363 if (I->second != FullMask) 9364 return SDValue(); 9365 VecIns.push_back(I->first); 9366 } 9367 9368 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 9369 9370 // Cast all vectors into TestVT for PTEST. 9371 for (unsigned i = 0, e = VecIns.size(); i < e; ++i) 9372 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]); 9373 9374 // If more than one full vectors are evaluated, OR them first before PTEST. 9375 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { 9376 // Each iteration will OR 2 nodes and append the result until there is only 9377 // 1 node left, i.e. the final OR'd value of all vectors. 9378 SDValue LHS = VecIns[Slot]; 9379 SDValue RHS = VecIns[Slot + 1]; 9380 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); 9381 } 9382 9383 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, 9384 VecIns.back(), VecIns.back()); 9385} 9386 9387/// Emit nodes that will be selected as "test Op0,Op0", or something 9388/// equivalent. 9389SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 9390 SelectionDAG &DAG) const { 9391 SDLoc dl(Op); 9392 9393 // CF and OF aren't always set the way we want. Determine which 9394 // of these we need. 9395 bool NeedCF = false; 9396 bool NeedOF = false; 9397 switch (X86CC) { 9398 default: break; 9399 case X86::COND_A: case X86::COND_AE: 9400 case X86::COND_B: case X86::COND_BE: 9401 NeedCF = true; 9402 break; 9403 case X86::COND_G: case X86::COND_GE: 9404 case X86::COND_L: case X86::COND_LE: 9405 case X86::COND_O: case X86::COND_NO: 9406 NeedOF = true; 9407 break; 9408 } 9409 9410 // See if we can use the EFLAGS value from the operand instead of 9411 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 9412 // we prove that the arithmetic won't overflow, we can't use OF or CF. 9413 if (Op.getResNo() != 0 || NeedOF || NeedCF) 9414 // Emit a CMP with 0, which is the TEST pattern. 9415 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 9416 DAG.getConstant(0, Op.getValueType())); 9417 9418 unsigned Opcode = 0; 9419 unsigned NumOperands = 0; 9420 9421 // Truncate operations may prevent the merge of the SETCC instruction 9422 // and the arithmetic intruction before it. Attempt to truncate the operands 9423 // of the arithmetic instruction and use a reduced bit-width instruction. 9424 bool NeedTruncation = false; 9425 SDValue ArithOp = Op; 9426 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { 9427 SDValue Arith = Op->getOperand(0); 9428 // Both the trunc and the arithmetic op need to have one user each. 9429 if (Arith->hasOneUse()) 9430 switch (Arith.getOpcode()) { 9431 default: break; 9432 case ISD::ADD: 9433 case ISD::SUB: 9434 case ISD::AND: 9435 case ISD::OR: 9436 case ISD::XOR: { 9437 NeedTruncation = true; 9438 ArithOp = Arith; 9439 } 9440 } 9441 } 9442 9443 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation 9444 // which may be the result of a CAST. We use the variable 'Op', which is the 9445 // non-casted variable when we check for possible users. 9446 switch (ArithOp.getOpcode()) { 9447 case ISD::ADD: 9448 // Due to an isel shortcoming, be conservative if this add is likely to be 9449 // selected as part of a load-modify-store instruction. When the root node 9450 // in a match is a store, isel doesn't know how to remap non-chain non-flag 9451 // uses of other nodes in the match, such as the ADD in this case. This 9452 // leads to the ADD being left around and reselected, with the result being 9453 // two adds in the output. Alas, even if none our users are stores, that 9454 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 9455 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 9456 // climbing the DAG back to the root, and it doesn't seem to be worth the 9457 // effort. 9458 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 9459 UE = Op.getNode()->use_end(); UI != UE; ++UI) 9460 if (UI->getOpcode() != ISD::CopyToReg && 9461 UI->getOpcode() != ISD::SETCC && 9462 UI->getOpcode() != ISD::STORE) 9463 goto default_case; 9464 9465 if (ConstantSDNode *C = 9466 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) { 9467 // An add of one will be selected as an INC. 9468 if (C->getAPIntValue() == 1) { 9469 Opcode = X86ISD::INC; 9470 NumOperands = 1; 9471 break; 9472 } 9473 9474 // An add of negative one (subtract of one) will be selected as a DEC. 9475 if (C->getAPIntValue().isAllOnesValue()) { 9476 Opcode = X86ISD::DEC; 9477 NumOperands = 1; 9478 break; 9479 } 9480 } 9481 9482 // Otherwise use a regular EFLAGS-setting add. 9483 Opcode = X86ISD::ADD; 9484 NumOperands = 2; 9485 break; 9486 case ISD::AND: { 9487 // If the primary and result isn't used, don't bother using X86ISD::AND, 9488 // because a TEST instruction will be better. 9489 bool NonFlagUse = false; 9490 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 9491 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 9492 SDNode *User = *UI; 9493 unsigned UOpNo = UI.getOperandNo(); 9494 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 9495 // Look pass truncate. 9496 UOpNo = User->use_begin().getOperandNo(); 9497 User = *User->use_begin(); 9498 } 9499 9500 if (User->getOpcode() != ISD::BRCOND && 9501 User->getOpcode() != ISD::SETCC && 9502 !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) { 9503 NonFlagUse = true; 9504 break; 9505 } 9506 } 9507 9508 if (!NonFlagUse) 9509 break; 9510 } 9511 // FALL THROUGH 9512 case ISD::SUB: 9513 case ISD::OR: 9514 case ISD::XOR: 9515 // Due to the ISEL shortcoming noted above, be conservative if this op is 9516 // likely to be selected as part of a load-modify-store instruction. 9517 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 9518 UE = Op.getNode()->use_end(); UI != UE; ++UI) 9519 if (UI->getOpcode() == ISD::STORE) 9520 goto default_case; 9521 9522 // Otherwise use a regular EFLAGS-setting instruction. 9523 switch (ArithOp.getOpcode()) { 9524 default: llvm_unreachable("unexpected operator!"); 9525 case ISD::SUB: Opcode = X86ISD::SUB; break; 9526 case ISD::XOR: Opcode = X86ISD::XOR; break; 9527 case ISD::AND: Opcode = X86ISD::AND; break; 9528 case ISD::OR: { 9529 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) { 9530 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG); 9531 if (EFLAGS.getNode()) 9532 return EFLAGS; 9533 } 9534 Opcode = X86ISD::OR; 9535 break; 9536 } 9537 } 9538 9539 NumOperands = 2; 9540 break; 9541 case X86ISD::ADD: 9542 case X86ISD::SUB: 9543 case X86ISD::INC: 9544 case X86ISD::DEC: 9545 case X86ISD::OR: 9546 case X86ISD::XOR: 9547 case X86ISD::AND: 9548 return SDValue(Op.getNode(), 1); 9549 default: 9550 default_case: 9551 break; 9552 } 9553 9554 // If we found that truncation is beneficial, perform the truncation and 9555 // update 'Op'. 9556 if (NeedTruncation) { 9557 EVT VT = Op.getValueType(); 9558 SDValue WideVal = Op->getOperand(0); 9559 EVT WideVT = WideVal.getValueType(); 9560 unsigned ConvertedOp = 0; 9561 // Use a target machine opcode to prevent further DAGCombine 9562 // optimizations that may separate the arithmetic operations 9563 // from the setcc node. 9564 switch (WideVal.getOpcode()) { 9565 default: break; 9566 case ISD::ADD: ConvertedOp = X86ISD::ADD; break; 9567 case ISD::SUB: ConvertedOp = X86ISD::SUB; break; 9568 case ISD::AND: ConvertedOp = X86ISD::AND; break; 9569 case ISD::OR: ConvertedOp = X86ISD::OR; break; 9570 case ISD::XOR: ConvertedOp = X86ISD::XOR; break; 9571 } 9572 9573 if (ConvertedOp) { 9574 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9575 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { 9576 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); 9577 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); 9578 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); 9579 } 9580 } 9581 } 9582 9583 if (Opcode == 0) 9584 // Emit a CMP with 0, which is the TEST pattern. 9585 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 9586 DAG.getConstant(0, Op.getValueType())); 9587 9588 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 9589 SmallVector<SDValue, 4> Ops; 9590 for (unsigned i = 0; i != NumOperands; ++i) 9591 Ops.push_back(Op.getOperand(i)); 9592 9593 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 9594 DAG.ReplaceAllUsesWith(Op, New); 9595 return SDValue(New.getNode(), 1); 9596} 9597 9598/// Emit nodes that will be selected as "cmp Op0,Op1", or something 9599/// equivalent. 9600SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 9601 SelectionDAG &DAG) const { 9602 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 9603 if (C->getAPIntValue() == 0) 9604 return EmitTest(Op0, X86CC, DAG); 9605 9606 SDLoc dl(Op0); 9607 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || 9608 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { 9609 // Use SUB instead of CMP to enable CSE between SUB and CMP. 9610 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); 9611 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, 9612 Op0, Op1); 9613 return SDValue(Sub.getNode(), 1); 9614 } 9615 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 9616} 9617 9618/// Convert a comparison if required by the subtarget. 9619SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, 9620 SelectionDAG &DAG) const { 9621 // If the subtarget does not support the FUCOMI instruction, floating-point 9622 // comparisons have to be converted. 9623 if (Subtarget->hasCMov() || 9624 Cmp.getOpcode() != X86ISD::CMP || 9625 !Cmp.getOperand(0).getValueType().isFloatingPoint() || 9626 !Cmp.getOperand(1).getValueType().isFloatingPoint()) 9627 return Cmp; 9628 9629 // The instruction selector will select an FUCOM instruction instead of 9630 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence 9631 // build an SDNode sequence that transfers the result from FPSW into EFLAGS: 9632 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) 9633 SDLoc dl(Cmp); 9634 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); 9635 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); 9636 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, 9637 DAG.getConstant(8, MVT::i8)); 9638 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); 9639 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); 9640} 9641 9642static bool isAllOnes(SDValue V) { 9643 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9644 return C && C->isAllOnesValue(); 9645} 9646 9647/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 9648/// if it's possible. 9649SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 9650 SDLoc dl, SelectionDAG &DAG) const { 9651 SDValue Op0 = And.getOperand(0); 9652 SDValue Op1 = And.getOperand(1); 9653 if (Op0.getOpcode() == ISD::TRUNCATE) 9654 Op0 = Op0.getOperand(0); 9655 if (Op1.getOpcode() == ISD::TRUNCATE) 9656 Op1 = Op1.getOperand(0); 9657 9658 SDValue LHS, RHS; 9659 if (Op1.getOpcode() == ISD::SHL) 9660 std::swap(Op0, Op1); 9661 if (Op0.getOpcode() == ISD::SHL) { 9662 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 9663 if (And00C->getZExtValue() == 1) { 9664 // If we looked past a truncate, check that it's only truncating away 9665 // known zeros. 9666 unsigned BitWidth = Op0.getValueSizeInBits(); 9667 unsigned AndBitWidth = And.getValueSizeInBits(); 9668 if (BitWidth > AndBitWidth) { 9669 APInt Zeros, Ones; 9670 DAG.ComputeMaskedBits(Op0, Zeros, Ones); 9671 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 9672 return SDValue(); 9673 } 9674 LHS = Op1; 9675 RHS = Op0.getOperand(1); 9676 } 9677 } else if (Op1.getOpcode() == ISD::Constant) { 9678 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 9679 uint64_t AndRHSVal = AndRHS->getZExtValue(); 9680 SDValue AndLHS = Op0; 9681 9682 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 9683 LHS = AndLHS.getOperand(0); 9684 RHS = AndLHS.getOperand(1); 9685 } 9686 9687 // Use BT if the immediate can't be encoded in a TEST instruction. 9688 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 9689 LHS = AndLHS; 9690 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 9691 } 9692 } 9693 9694 if (LHS.getNode()) { 9695 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 9696 // instruction. Since the shift amount is in-range-or-undefined, we know 9697 // that doing a bittest on the i32 value is ok. We extend to i32 because 9698 // the encoding for the i16 version is larger than the i32 version. 9699 // Also promote i16 to i32 for performance / code size reason. 9700 if (LHS.getValueType() == MVT::i8 || 9701 LHS.getValueType() == MVT::i16) 9702 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 9703 9704 // If the operand types disagree, extend the shift amount to match. Since 9705 // BT ignores high bits (like shifts) we can use anyextend. 9706 if (LHS.getValueType() != RHS.getValueType()) 9707 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 9708 9709 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 9710 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 9711 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9712 DAG.getConstant(Cond, MVT::i8), BT); 9713 } 9714 9715 return SDValue(); 9716} 9717 9718/// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point 9719/// mask CMPs. 9720static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0, 9721 SDValue &Op1) { 9722 unsigned SSECC; 9723 bool Swap = false; 9724 9725 // SSE Condition code mapping: 9726 // 0 - EQ 9727 // 1 - LT 9728 // 2 - LE 9729 // 3 - UNORD 9730 // 4 - NEQ 9731 // 5 - NLT 9732 // 6 - NLE 9733 // 7 - ORD 9734 switch (SetCCOpcode) { 9735 default: llvm_unreachable("Unexpected SETCC condition"); 9736 case ISD::SETOEQ: 9737 case ISD::SETEQ: SSECC = 0; break; 9738 case ISD::SETOGT: 9739 case ISD::SETGT: Swap = true; // Fallthrough 9740 case ISD::SETLT: 9741 case ISD::SETOLT: SSECC = 1; break; 9742 case ISD::SETOGE: 9743 case ISD::SETGE: Swap = true; // Fallthrough 9744 case ISD::SETLE: 9745 case ISD::SETOLE: SSECC = 2; break; 9746 case ISD::SETUO: SSECC = 3; break; 9747 case ISD::SETUNE: 9748 case ISD::SETNE: SSECC = 4; break; 9749 case ISD::SETULE: Swap = true; // Fallthrough 9750 case ISD::SETUGE: SSECC = 5; break; 9751 case ISD::SETULT: Swap = true; // Fallthrough 9752 case ISD::SETUGT: SSECC = 6; break; 9753 case ISD::SETO: SSECC = 7; break; 9754 case ISD::SETUEQ: 9755 case ISD::SETONE: SSECC = 8; break; 9756 } 9757 if (Swap) 9758 std::swap(Op0, Op1); 9759 9760 return SSECC; 9761} 9762 9763// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 9764// ones, and then concatenate the result back. 9765static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 9766 MVT VT = Op.getSimpleValueType(); 9767 9768 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && 9769 "Unsupported value type for operation"); 9770 9771 unsigned NumElems = VT.getVectorNumElements(); 9772 SDLoc dl(Op); 9773 SDValue CC = Op.getOperand(2); 9774 9775 // Extract the LHS vectors 9776 SDValue LHS = Op.getOperand(0); 9777 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 9778 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 9779 9780 // Extract the RHS vectors 9781 SDValue RHS = Op.getOperand(1); 9782 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 9783 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 9784 9785 // Issue the operation on the smaller types and concatenate the result back 9786 MVT EltVT = VT.getVectorElementType(); 9787 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9788 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9789 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 9790 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 9791} 9792 9793static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) { 9794 SDValue Cond; 9795 SDValue Op0 = Op.getOperand(0); 9796 SDValue Op1 = Op.getOperand(1); 9797 SDValue CC = Op.getOperand(2); 9798 MVT VT = Op.getSimpleValueType(); 9799 9800 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 32 && 9801 Op.getValueType().getScalarType() == MVT::i1 && 9802 "Cannot set masked compare for this operation"); 9803 9804 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 9805 SDLoc dl(Op); 9806 9807 bool Unsigned = false; 9808 unsigned SSECC; 9809 switch (SetCCOpcode) { 9810 default: llvm_unreachable("Unexpected SETCC condition"); 9811 case ISD::SETNE: SSECC = 4; break; 9812 case ISD::SETEQ: SSECC = 0; break; 9813 case ISD::SETUGT: Unsigned = true; 9814 case ISD::SETGT: SSECC = 6; break; // NLE 9815 case ISD::SETULT: Unsigned = true; 9816 case ISD::SETLT: SSECC = 1; break; 9817 case ISD::SETUGE: Unsigned = true; 9818 case ISD::SETGE: SSECC = 5; break; // NLT 9819 case ISD::SETULE: Unsigned = true; 9820 case ISD::SETLE: SSECC = 2; break; 9821 } 9822 unsigned Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM; 9823 return DAG.getNode(Opc, dl, VT, Op0, Op1, 9824 DAG.getConstant(SSECC, MVT::i8)); 9825 9826} 9827 9828static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, 9829 SelectionDAG &DAG) { 9830 SDValue Cond; 9831 SDValue Op0 = Op.getOperand(0); 9832 SDValue Op1 = Op.getOperand(1); 9833 SDValue CC = Op.getOperand(2); 9834 MVT VT = Op.getSimpleValueType(); 9835 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 9836 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint(); 9837 SDLoc dl(Op); 9838 9839 if (isFP) { 9840#ifndef NDEBUG 9841 MVT EltVT = Op0.getSimpleValueType().getVectorElementType(); 9842 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 9843#endif 9844 9845 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1); 9846 unsigned Opc = X86ISD::CMPP; 9847 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) { 9848 assert(VT.getVectorNumElements() <= 16); 9849 Opc = X86ISD::CMPM; 9850 } 9851 // In the two special cases we can't handle, emit two comparisons. 9852 if (SSECC == 8) { 9853 unsigned CC0, CC1; 9854 unsigned CombineOpc; 9855 if (SetCCOpcode == ISD::SETUEQ) { 9856 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR; 9857 } else { 9858 assert(SetCCOpcode == ISD::SETONE); 9859 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND; 9860 } 9861 9862 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1, 9863 DAG.getConstant(CC0, MVT::i8)); 9864 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1, 9865 DAG.getConstant(CC1, MVT::i8)); 9866 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); 9867 } 9868 // Handle all other FP comparisons here. 9869 return DAG.getNode(Opc, dl, VT, Op0, Op1, 9870 DAG.getConstant(SSECC, MVT::i8)); 9871 } 9872 9873 // Break 256-bit integer vector compare into smaller ones. 9874 if (VT.is256BitVector() && !Subtarget->hasInt256()) 9875 return Lower256IntVSETCC(Op, DAG); 9876 9877 bool MaskResult = (VT.getVectorElementType() == MVT::i1); 9878 EVT OpVT = Op1.getValueType(); 9879 if (Subtarget->hasAVX512()) { 9880 if (Op1.getValueType().is512BitVector() || 9881 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32)) 9882 return LowerIntVSETCC_AVX512(Op, DAG); 9883 9884 // In AVX-512 architecture setcc returns mask with i1 elements, 9885 // But there is no compare instruction for i8 and i16 elements. 9886 // We are not talking about 512-bit operands in this case, these 9887 // types are illegal. 9888 if (MaskResult && 9889 (OpVT.getVectorElementType().getSizeInBits() < 32 && 9890 OpVT.getVectorElementType().getSizeInBits() >= 8)) 9891 return DAG.getNode(ISD::TRUNCATE, dl, VT, 9892 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC)); 9893 } 9894 9895 // We are handling one of the integer comparisons here. Since SSE only has 9896 // GT and EQ comparisons for integer, swapping operands and multiple 9897 // operations may be required for some comparisons. 9898 unsigned Opc; 9899 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false; 9900 9901 switch (SetCCOpcode) { 9902 default: llvm_unreachable("Unexpected SETCC condition"); 9903 case ISD::SETNE: Invert = true; 9904 case ISD::SETEQ: Opc = MaskResult? X86ISD::PCMPEQM: X86ISD::PCMPEQ; break; 9905 case ISD::SETLT: Swap = true; 9906 case ISD::SETGT: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT; break; 9907 case ISD::SETGE: Swap = true; 9908 case ISD::SETLE: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT; 9909 Invert = true; break; 9910 case ISD::SETULT: Swap = true; 9911 case ISD::SETUGT: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT; 9912 FlipSigns = true; break; 9913 case ISD::SETUGE: Swap = true; 9914 case ISD::SETULE: Opc = MaskResult? X86ISD::PCMPGTM: X86ISD::PCMPGT; 9915 FlipSigns = true; Invert = true; break; 9916 } 9917 9918 // Special case: Use min/max operations for SETULE/SETUGE 9919 MVT VET = VT.getVectorElementType(); 9920 bool hasMinMax = 9921 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32)) 9922 || (Subtarget->hasSSE2() && (VET == MVT::i8)); 9923 9924 if (hasMinMax) { 9925 switch (SetCCOpcode) { 9926 default: break; 9927 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break; 9928 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break; 9929 } 9930 9931 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; } 9932 } 9933 9934 if (Swap) 9935 std::swap(Op0, Op1); 9936 9937 // Check that the operation in question is available (most are plain SSE2, 9938 // but PCMPGTQ and PCMPEQQ have different requirements). 9939 if (VT == MVT::v2i64) { 9940 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) { 9941 assert(Subtarget->hasSSE2() && "Don't know how to lower!"); 9942 9943 // First cast everything to the right type. 9944 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); 9945 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); 9946 9947 // Since SSE has no unsigned integer comparisons, we need to flip the sign 9948 // bits of the inputs before performing those operations. The lower 9949 // compare is always unsigned. 9950 SDValue SB; 9951 if (FlipSigns) { 9952 SB = DAG.getConstant(0x80000000U, MVT::v4i32); 9953 } else { 9954 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32); 9955 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32); 9956 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, 9957 Sign, Zero, Sign, Zero); 9958 } 9959 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB); 9960 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB); 9961 9962 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2)) 9963 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1); 9964 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1); 9965 9966 // Create masks for only the low parts/high parts of the 64 bit integers. 9967 static const int MaskHi[] = { 1, 1, 3, 3 }; 9968 static const int MaskLo[] = { 0, 0, 2, 2 }; 9969 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi); 9970 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo); 9971 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi); 9972 9973 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo); 9974 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi); 9975 9976 if (Invert) 9977 Result = DAG.getNOT(dl, Result, MVT::v4i32); 9978 9979 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9980 } 9981 9982 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) { 9983 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with 9984 // pcmpeqd + pshufd + pand. 9985 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!"); 9986 9987 // First cast everything to the right type. 9988 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); 9989 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); 9990 9991 // Do the compare. 9992 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1); 9993 9994 // Make sure the lower and upper halves are both all-ones. 9995 static const int Mask[] = { 1, 0, 3, 2 }; 9996 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask); 9997 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf); 9998 9999 if (Invert) 10000 Result = DAG.getNOT(dl, Result, MVT::v4i32); 10001 10002 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 10003 } 10004 } 10005 10006 // Since SSE has no unsigned integer comparisons, we need to flip the sign 10007 // bits of the inputs before performing those operations. 10008 if (FlipSigns) { 10009 EVT EltVT = VT.getVectorElementType(); 10010 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT); 10011 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB); 10012 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB); 10013 } 10014 10015 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 10016 10017 // If the logical-not of the result is required, perform that now. 10018 if (Invert) 10019 Result = DAG.getNOT(dl, Result, VT); 10020 10021 if (MinMax) 10022 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result); 10023 10024 return Result; 10025} 10026 10027SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 10028 10029 MVT VT = Op.getSimpleValueType(); 10030 10031 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG); 10032 10033 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer"); 10034 SDValue Op0 = Op.getOperand(0); 10035 SDValue Op1 = Op.getOperand(1); 10036 SDLoc dl(Op); 10037 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 10038 10039 // Optimize to BT if possible. 10040 // Lower (X & (1 << N)) == 0 to BT(X, N). 10041 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 10042 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 10043 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 10044 Op1.getOpcode() == ISD::Constant && 10045 cast<ConstantSDNode>(Op1)->isNullValue() && 10046 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 10047 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 10048 if (NewSetCC.getNode()) 10049 return NewSetCC; 10050 } 10051 10052 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 10053 // these. 10054 if (Op1.getOpcode() == ISD::Constant && 10055 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 10056 cast<ConstantSDNode>(Op1)->isNullValue()) && 10057 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 10058 10059 // If the input is a setcc, then reuse the input setcc or use a new one with 10060 // the inverted condition. 10061 if (Op0.getOpcode() == X86ISD::SETCC) { 10062 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 10063 bool Invert = (CC == ISD::SETNE) ^ 10064 cast<ConstantSDNode>(Op1)->isNullValue(); 10065 if (!Invert) return Op0; 10066 10067 CCode = X86::GetOppositeBranchCondition(CCode); 10068 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10069 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 10070 } 10071 } 10072 10073 bool isFP = Op1.getSimpleValueType().isFloatingPoint(); 10074 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 10075 if (X86CC == X86::COND_INVALID) 10076 return SDValue(); 10077 10078 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 10079 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 10080 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10081 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 10082} 10083 10084// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 10085static bool isX86LogicalCmp(SDValue Op) { 10086 unsigned Opc = Op.getNode()->getOpcode(); 10087 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || 10088 Opc == X86ISD::SAHF) 10089 return true; 10090 if (Op.getResNo() == 1 && 10091 (Opc == X86ISD::ADD || 10092 Opc == X86ISD::SUB || 10093 Opc == X86ISD::ADC || 10094 Opc == X86ISD::SBB || 10095 Opc == X86ISD::SMUL || 10096 Opc == X86ISD::UMUL || 10097 Opc == X86ISD::INC || 10098 Opc == X86ISD::DEC || 10099 Opc == X86ISD::OR || 10100 Opc == X86ISD::XOR || 10101 Opc == X86ISD::AND)) 10102 return true; 10103 10104 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 10105 return true; 10106 10107 return false; 10108} 10109 10110static bool isZero(SDValue V) { 10111 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 10112 return C && C->isNullValue(); 10113} 10114 10115static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { 10116 if (V.getOpcode() != ISD::TRUNCATE) 10117 return false; 10118 10119 SDValue VOp0 = V.getOperand(0); 10120 unsigned InBits = VOp0.getValueSizeInBits(); 10121 unsigned Bits = V.getValueSizeInBits(); 10122 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); 10123} 10124 10125SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 10126 bool addTest = true; 10127 SDValue Cond = Op.getOperand(0); 10128 SDValue Op1 = Op.getOperand(1); 10129 SDValue Op2 = Op.getOperand(2); 10130 SDLoc DL(Op); 10131 EVT VT = Op1.getValueType(); 10132 SDValue CC; 10133 10134 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops 10135 // are available. Otherwise fp cmovs get lowered into a less efficient branch 10136 // sequence later on. 10137 if (Cond.getOpcode() == ISD::SETCC && 10138 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) || 10139 (Subtarget->hasSSE1() && VT == MVT::f32)) && 10140 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) { 10141 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1); 10142 int SSECC = translateX86FSETCC( 10143 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1); 10144 10145 if (SSECC != 8) { 10146 unsigned Opcode = VT == MVT::f32 ? X86ISD::FSETCCss : X86ISD::FSETCCsd; 10147 SDValue Cmp = DAG.getNode(Opcode, DL, VT, CondOp0, CondOp1, 10148 DAG.getConstant(SSECC, MVT::i8)); 10149 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2); 10150 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1); 10151 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And); 10152 } 10153 } 10154 10155 if (Cond.getOpcode() == ISD::SETCC) { 10156 SDValue NewCond = LowerSETCC(Cond, DAG); 10157 if (NewCond.getNode()) 10158 Cond = NewCond; 10159 } 10160 10161 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 10162 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 10163 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 10164 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 10165 if (Cond.getOpcode() == X86ISD::SETCC && 10166 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 10167 isZero(Cond.getOperand(1).getOperand(1))) { 10168 SDValue Cmp = Cond.getOperand(1); 10169 10170 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 10171 10172 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 10173 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 10174 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 10175 10176 SDValue CmpOp0 = Cmp.getOperand(0); 10177 // Apply further optimizations for special cases 10178 // (select (x != 0), -1, 0) -> neg & sbb 10179 // (select (x == 0), 0, -1) -> neg & sbb 10180 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y)) 10181 if (YC->isNullValue() && 10182 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { 10183 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); 10184 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, 10185 DAG.getConstant(0, CmpOp0.getValueType()), 10186 CmpOp0); 10187 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 10188 DAG.getConstant(X86::COND_B, MVT::i8), 10189 SDValue(Neg.getNode(), 1)); 10190 return Res; 10191 } 10192 10193 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 10194 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 10195 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 10196 10197 SDValue Res = // Res = 0 or -1. 10198 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 10199 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 10200 10201 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 10202 Res = DAG.getNOT(DL, Res, Res.getValueType()); 10203 10204 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 10205 if (N2C == 0 || !N2C->isNullValue()) 10206 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 10207 return Res; 10208 } 10209 } 10210 10211 // Look past (and (setcc_carry (cmp ...)), 1). 10212 if (Cond.getOpcode() == ISD::AND && 10213 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 10214 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 10215 if (C && C->getAPIntValue() == 1) 10216 Cond = Cond.getOperand(0); 10217 } 10218 10219 // If condition flag is set by a X86ISD::CMP, then use it as the condition 10220 // setting operand in place of the X86ISD::SETCC. 10221 unsigned CondOpcode = Cond.getOpcode(); 10222 if (CondOpcode == X86ISD::SETCC || 10223 CondOpcode == X86ISD::SETCC_CARRY) { 10224 CC = Cond.getOperand(0); 10225 10226 SDValue Cmp = Cond.getOperand(1); 10227 unsigned Opc = Cmp.getOpcode(); 10228 MVT VT = Op.getSimpleValueType(); 10229 10230 bool IllegalFPCMov = false; 10231 if (VT.isFloatingPoint() && !VT.isVector() && 10232 !isScalarFPTypeInSSEReg(VT)) // FPStack? 10233 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 10234 10235 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 10236 Opc == X86ISD::BT) { // FIXME 10237 Cond = Cmp; 10238 addTest = false; 10239 } 10240 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 10241 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 10242 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 10243 Cond.getOperand(0).getValueType() != MVT::i8)) { 10244 SDValue LHS = Cond.getOperand(0); 10245 SDValue RHS = Cond.getOperand(1); 10246 unsigned X86Opcode; 10247 unsigned X86Cond; 10248 SDVTList VTs; 10249 switch (CondOpcode) { 10250 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 10251 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 10252 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 10253 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 10254 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 10255 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 10256 default: llvm_unreachable("unexpected overflowing operator"); 10257 } 10258 if (CondOpcode == ISD::UMULO) 10259 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 10260 MVT::i32); 10261 else 10262 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 10263 10264 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 10265 10266 if (CondOpcode == ISD::UMULO) 10267 Cond = X86Op.getValue(2); 10268 else 10269 Cond = X86Op.getValue(1); 10270 10271 CC = DAG.getConstant(X86Cond, MVT::i8); 10272 addTest = false; 10273 } 10274 10275 if (addTest) { 10276 // Look pass the truncate if the high bits are known zero. 10277 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 10278 Cond = Cond.getOperand(0); 10279 10280 // We know the result of AND is compared against zero. Try to match 10281 // it to BT. 10282 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 10283 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 10284 if (NewSetCC.getNode()) { 10285 CC = NewSetCC.getOperand(0); 10286 Cond = NewSetCC.getOperand(1); 10287 addTest = false; 10288 } 10289 } 10290 } 10291 10292 if (addTest) { 10293 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10294 Cond = EmitTest(Cond, X86::COND_NE, DAG); 10295 } 10296 10297 // a < b ? -1 : 0 -> RES = ~setcc_carry 10298 // a < b ? 0 : -1 -> RES = setcc_carry 10299 // a >= b ? -1 : 0 -> RES = setcc_carry 10300 // a >= b ? 0 : -1 -> RES = ~setcc_carry 10301 if (Cond.getOpcode() == X86ISD::SUB) { 10302 Cond = ConvertCmpIfNecessary(Cond, DAG); 10303 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 10304 10305 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 10306 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 10307 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 10308 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 10309 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 10310 return DAG.getNOT(DL, Res, Res.getValueType()); 10311 return Res; 10312 } 10313 } 10314 10315 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate 10316 // widen the cmov and push the truncate through. This avoids introducing a new 10317 // branch during isel and doesn't add any extensions. 10318 if (Op.getValueType() == MVT::i8 && 10319 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) { 10320 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); 10321 if (T1.getValueType() == T2.getValueType() && 10322 // Blacklist CopyFromReg to avoid partial register stalls. 10323 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){ 10324 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue); 10325 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond); 10326 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); 10327 } 10328 } 10329 10330 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 10331 // condition is true. 10332 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 10333 SDValue Ops[] = { Op2, Op1, CC, Cond }; 10334 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 10335} 10336 10337static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, SelectionDAG &DAG) { 10338 MVT VT = Op->getSimpleValueType(0); 10339 SDValue In = Op->getOperand(0); 10340 MVT InVT = In.getSimpleValueType(); 10341 SDLoc dl(Op); 10342 10343 unsigned int NumElts = VT.getVectorNumElements(); 10344 if (NumElts != 8 && NumElts != 16) 10345 return SDValue(); 10346 10347 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) 10348 return DAG.getNode(X86ISD::VSEXT, dl, VT, In); 10349 10350 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10351 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type"); 10352 10353 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32; 10354 Constant *C = ConstantInt::get(*DAG.getContext(), 10355 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits())); 10356 10357 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy()); 10358 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 10359 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP, 10360 MachinePointerInfo::getConstantPool(), 10361 false, false, false, Alignment); 10362 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld); 10363 if (VT.is512BitVector()) 10364 return Brcst; 10365 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst); 10366} 10367 10368static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget, 10369 SelectionDAG &DAG) { 10370 MVT VT = Op->getSimpleValueType(0); 10371 SDValue In = Op->getOperand(0); 10372 MVT InVT = In.getSimpleValueType(); 10373 SDLoc dl(Op); 10374 10375 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1) 10376 return LowerSIGN_EXTEND_AVX512(Op, DAG); 10377 10378 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) && 10379 (VT != MVT::v8i32 || InVT != MVT::v8i16)) 10380 return SDValue(); 10381 10382 if (Subtarget->hasInt256()) 10383 return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, In); 10384 10385 // Optimize vectors in AVX mode 10386 // Sign extend v8i16 to v8i32 and 10387 // v4i32 to v4i64 10388 // 10389 // Divide input vector into two parts 10390 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} 10391 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 10392 // concat the vectors to original VT 10393 10394 unsigned NumElems = InVT.getVectorNumElements(); 10395 SDValue Undef = DAG.getUNDEF(InVT); 10396 10397 SmallVector<int,8> ShufMask1(NumElems, -1); 10398 for (unsigned i = 0; i != NumElems/2; ++i) 10399 ShufMask1[i] = i; 10400 10401 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]); 10402 10403 SmallVector<int,8> ShufMask2(NumElems, -1); 10404 for (unsigned i = 0; i != NumElems/2; ++i) 10405 ShufMask2[i] = i + NumElems/2; 10406 10407 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]); 10408 10409 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), 10410 VT.getVectorNumElements()/2); 10411 10412 OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); 10413 OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); 10414 10415 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 10416} 10417 10418// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 10419// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 10420// from the AND / OR. 10421static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 10422 Opc = Op.getOpcode(); 10423 if (Opc != ISD::OR && Opc != ISD::AND) 10424 return false; 10425 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 10426 Op.getOperand(0).hasOneUse() && 10427 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 10428 Op.getOperand(1).hasOneUse()); 10429} 10430 10431// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 10432// 1 and that the SETCC node has a single use. 10433static bool isXor1OfSetCC(SDValue Op) { 10434 if (Op.getOpcode() != ISD::XOR) 10435 return false; 10436 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 10437 if (N1C && N1C->getAPIntValue() == 1) { 10438 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 10439 Op.getOperand(0).hasOneUse(); 10440 } 10441 return false; 10442} 10443 10444SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 10445 bool addTest = true; 10446 SDValue Chain = Op.getOperand(0); 10447 SDValue Cond = Op.getOperand(1); 10448 SDValue Dest = Op.getOperand(2); 10449 SDLoc dl(Op); 10450 SDValue CC; 10451 bool Inverted = false; 10452 10453 if (Cond.getOpcode() == ISD::SETCC) { 10454 // Check for setcc([su]{add,sub,mul}o == 0). 10455 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 10456 isa<ConstantSDNode>(Cond.getOperand(1)) && 10457 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 10458 Cond.getOperand(0).getResNo() == 1 && 10459 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 10460 Cond.getOperand(0).getOpcode() == ISD::UADDO || 10461 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 10462 Cond.getOperand(0).getOpcode() == ISD::USUBO || 10463 Cond.getOperand(0).getOpcode() == ISD::SMULO || 10464 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 10465 Inverted = true; 10466 Cond = Cond.getOperand(0); 10467 } else { 10468 SDValue NewCond = LowerSETCC(Cond, DAG); 10469 if (NewCond.getNode()) 10470 Cond = NewCond; 10471 } 10472 } 10473#if 0 10474 // FIXME: LowerXALUO doesn't handle these!! 10475 else if (Cond.getOpcode() == X86ISD::ADD || 10476 Cond.getOpcode() == X86ISD::SUB || 10477 Cond.getOpcode() == X86ISD::SMUL || 10478 Cond.getOpcode() == X86ISD::UMUL) 10479 Cond = LowerXALUO(Cond, DAG); 10480#endif 10481 10482 // Look pass (and (setcc_carry (cmp ...)), 1). 10483 if (Cond.getOpcode() == ISD::AND && 10484 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 10485 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 10486 if (C && C->getAPIntValue() == 1) 10487 Cond = Cond.getOperand(0); 10488 } 10489 10490 // If condition flag is set by a X86ISD::CMP, then use it as the condition 10491 // setting operand in place of the X86ISD::SETCC. 10492 unsigned CondOpcode = Cond.getOpcode(); 10493 if (CondOpcode == X86ISD::SETCC || 10494 CondOpcode == X86ISD::SETCC_CARRY) { 10495 CC = Cond.getOperand(0); 10496 10497 SDValue Cmp = Cond.getOperand(1); 10498 unsigned Opc = Cmp.getOpcode(); 10499 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 10500 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 10501 Cond = Cmp; 10502 addTest = false; 10503 } else { 10504 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 10505 default: break; 10506 case X86::COND_O: 10507 case X86::COND_B: 10508 // These can only come from an arithmetic instruction with overflow, 10509 // e.g. SADDO, UADDO. 10510 Cond = Cond.getNode()->getOperand(1); 10511 addTest = false; 10512 break; 10513 } 10514 } 10515 } 10516 CondOpcode = Cond.getOpcode(); 10517 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 10518 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 10519 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 10520 Cond.getOperand(0).getValueType() != MVT::i8)) { 10521 SDValue LHS = Cond.getOperand(0); 10522 SDValue RHS = Cond.getOperand(1); 10523 unsigned X86Opcode; 10524 unsigned X86Cond; 10525 SDVTList VTs; 10526 switch (CondOpcode) { 10527 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 10528 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 10529 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 10530 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 10531 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 10532 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 10533 default: llvm_unreachable("unexpected overflowing operator"); 10534 } 10535 if (Inverted) 10536 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 10537 if (CondOpcode == ISD::UMULO) 10538 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 10539 MVT::i32); 10540 else 10541 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 10542 10543 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 10544 10545 if (CondOpcode == ISD::UMULO) 10546 Cond = X86Op.getValue(2); 10547 else 10548 Cond = X86Op.getValue(1); 10549 10550 CC = DAG.getConstant(X86Cond, MVT::i8); 10551 addTest = false; 10552 } else { 10553 unsigned CondOpc; 10554 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 10555 SDValue Cmp = Cond.getOperand(0).getOperand(1); 10556 if (CondOpc == ISD::OR) { 10557 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 10558 // two branches instead of an explicit OR instruction with a 10559 // separate test. 10560 if (Cmp == Cond.getOperand(1).getOperand(1) && 10561 isX86LogicalCmp(Cmp)) { 10562 CC = Cond.getOperand(0).getOperand(0); 10563 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10564 Chain, Dest, CC, Cmp); 10565 CC = Cond.getOperand(1).getOperand(0); 10566 Cond = Cmp; 10567 addTest = false; 10568 } 10569 } else { // ISD::AND 10570 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 10571 // two branches instead of an explicit AND instruction with a 10572 // separate test. However, we only do this if this block doesn't 10573 // have a fall-through edge, because this requires an explicit 10574 // jmp when the condition is false. 10575 if (Cmp == Cond.getOperand(1).getOperand(1) && 10576 isX86LogicalCmp(Cmp) && 10577 Op.getNode()->hasOneUse()) { 10578 X86::CondCode CCode = 10579 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 10580 CCode = X86::GetOppositeBranchCondition(CCode); 10581 CC = DAG.getConstant(CCode, MVT::i8); 10582 SDNode *User = *Op.getNode()->use_begin(); 10583 // Look for an unconditional branch following this conditional branch. 10584 // We need this because we need to reverse the successors in order 10585 // to implement FCMP_OEQ. 10586 if (User->getOpcode() == ISD::BR) { 10587 SDValue FalseBB = User->getOperand(1); 10588 SDNode *NewBR = 10589 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 10590 assert(NewBR == User); 10591 (void)NewBR; 10592 Dest = FalseBB; 10593 10594 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10595 Chain, Dest, CC, Cmp); 10596 X86::CondCode CCode = 10597 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 10598 CCode = X86::GetOppositeBranchCondition(CCode); 10599 CC = DAG.getConstant(CCode, MVT::i8); 10600 Cond = Cmp; 10601 addTest = false; 10602 } 10603 } 10604 } 10605 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 10606 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 10607 // It should be transformed during dag combiner except when the condition 10608 // is set by a arithmetics with overflow node. 10609 X86::CondCode CCode = 10610 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 10611 CCode = X86::GetOppositeBranchCondition(CCode); 10612 CC = DAG.getConstant(CCode, MVT::i8); 10613 Cond = Cond.getOperand(0).getOperand(1); 10614 addTest = false; 10615 } else if (Cond.getOpcode() == ISD::SETCC && 10616 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 10617 // For FCMP_OEQ, we can emit 10618 // two branches instead of an explicit AND instruction with a 10619 // separate test. However, we only do this if this block doesn't 10620 // have a fall-through edge, because this requires an explicit 10621 // jmp when the condition is false. 10622 if (Op.getNode()->hasOneUse()) { 10623 SDNode *User = *Op.getNode()->use_begin(); 10624 // Look for an unconditional branch following this conditional branch. 10625 // We need this because we need to reverse the successors in order 10626 // to implement FCMP_OEQ. 10627 if (User->getOpcode() == ISD::BR) { 10628 SDValue FalseBB = User->getOperand(1); 10629 SDNode *NewBR = 10630 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 10631 assert(NewBR == User); 10632 (void)NewBR; 10633 Dest = FalseBB; 10634 10635 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 10636 Cond.getOperand(0), Cond.getOperand(1)); 10637 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 10638 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10639 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10640 Chain, Dest, CC, Cmp); 10641 CC = DAG.getConstant(X86::COND_P, MVT::i8); 10642 Cond = Cmp; 10643 addTest = false; 10644 } 10645 } 10646 } else if (Cond.getOpcode() == ISD::SETCC && 10647 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 10648 // For FCMP_UNE, we can emit 10649 // two branches instead of an explicit AND instruction with a 10650 // separate test. However, we only do this if this block doesn't 10651 // have a fall-through edge, because this requires an explicit 10652 // jmp when the condition is false. 10653 if (Op.getNode()->hasOneUse()) { 10654 SDNode *User = *Op.getNode()->use_begin(); 10655 // Look for an unconditional branch following this conditional branch. 10656 // We need this because we need to reverse the successors in order 10657 // to implement FCMP_UNE. 10658 if (User->getOpcode() == ISD::BR) { 10659 SDValue FalseBB = User->getOperand(1); 10660 SDNode *NewBR = 10661 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 10662 assert(NewBR == User); 10663 (void)NewBR; 10664 10665 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 10666 Cond.getOperand(0), Cond.getOperand(1)); 10667 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 10668 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10669 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10670 Chain, Dest, CC, Cmp); 10671 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 10672 Cond = Cmp; 10673 addTest = false; 10674 Dest = FalseBB; 10675 } 10676 } 10677 } 10678 } 10679 10680 if (addTest) { 10681 // Look pass the truncate if the high bits are known zero. 10682 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 10683 Cond = Cond.getOperand(0); 10684 10685 // We know the result of AND is compared against zero. Try to match 10686 // it to BT. 10687 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 10688 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 10689 if (NewSetCC.getNode()) { 10690 CC = NewSetCC.getOperand(0); 10691 Cond = NewSetCC.getOperand(1); 10692 addTest = false; 10693 } 10694 } 10695 } 10696 10697 if (addTest) { 10698 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10699 Cond = EmitTest(Cond, X86::COND_NE, DAG); 10700 } 10701 Cond = ConvertCmpIfNecessary(Cond, DAG); 10702 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10703 Chain, Dest, CC, Cond); 10704} 10705 10706// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 10707// Calls to _alloca is needed to probe the stack when allocating more than 4k 10708// bytes in one go. Touching the stack at 4K increments is necessary to ensure 10709// that the guard pages used by the OS virtual memory manager are allocated in 10710// correct sequence. 10711SDValue 10712X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 10713 SelectionDAG &DAG) const { 10714 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 10715 getTargetMachine().Options.EnableSegmentedStacks) && 10716 "This should be used only on Windows targets or when segmented stacks " 10717 "are being used"); 10718 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 10719 SDLoc dl(Op); 10720 10721 // Get the inputs. 10722 SDValue Chain = Op.getOperand(0); 10723 SDValue Size = Op.getOperand(1); 10724 // FIXME: Ensure alignment here 10725 10726 bool Is64Bit = Subtarget->is64Bit(); 10727 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 10728 10729 if (getTargetMachine().Options.EnableSegmentedStacks) { 10730 MachineFunction &MF = DAG.getMachineFunction(); 10731 MachineRegisterInfo &MRI = MF.getRegInfo(); 10732 10733 if (Is64Bit) { 10734 // The 64 bit implementation of segmented stacks needs to clobber both r10 10735 // r11. This makes it impossible to use it along with nested parameters. 10736 const Function *F = MF.getFunction(); 10737 10738 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 10739 I != E; ++I) 10740 if (I->hasNestAttr()) 10741 report_fatal_error("Cannot use segmented stacks with functions that " 10742 "have nested arguments."); 10743 } 10744 10745 const TargetRegisterClass *AddrRegClass = 10746 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 10747 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 10748 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 10749 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 10750 DAG.getRegister(Vreg, SPTy)); 10751 SDValue Ops1[2] = { Value, Chain }; 10752 return DAG.getMergeValues(Ops1, 2, dl); 10753 } else { 10754 SDValue Flag; 10755 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 10756 10757 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 10758 Flag = Chain.getValue(1); 10759 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 10760 10761 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 10762 Flag = Chain.getValue(1); 10763 10764 const X86RegisterInfo *RegInfo = 10765 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 10766 Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 10767 SPTy).getValue(1); 10768 10769 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 10770 return DAG.getMergeValues(Ops1, 2, dl); 10771 } 10772} 10773 10774SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 10775 MachineFunction &MF = DAG.getMachineFunction(); 10776 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 10777 10778 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 10779 SDLoc DL(Op); 10780 10781 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 10782 // vastart just stores the address of the VarArgsFrameIndex slot into the 10783 // memory location argument. 10784 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 10785 getPointerTy()); 10786 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 10787 MachinePointerInfo(SV), false, false, 0); 10788 } 10789 10790 // __va_list_tag: 10791 // gp_offset (0 - 6 * 8) 10792 // fp_offset (48 - 48 + 8 * 16) 10793 // overflow_arg_area (point to parameters coming in memory). 10794 // reg_save_area 10795 SmallVector<SDValue, 8> MemOps; 10796 SDValue FIN = Op.getOperand(1); 10797 // Store gp_offset 10798 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 10799 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 10800 MVT::i32), 10801 FIN, MachinePointerInfo(SV), false, false, 0); 10802 MemOps.push_back(Store); 10803 10804 // Store fp_offset 10805 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10806 FIN, DAG.getIntPtrConstant(4)); 10807 Store = DAG.getStore(Op.getOperand(0), DL, 10808 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 10809 MVT::i32), 10810 FIN, MachinePointerInfo(SV, 4), false, false, 0); 10811 MemOps.push_back(Store); 10812 10813 // Store ptr to overflow_arg_area 10814 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10815 FIN, DAG.getIntPtrConstant(4)); 10816 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 10817 getPointerTy()); 10818 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 10819 MachinePointerInfo(SV, 8), 10820 false, false, 0); 10821 MemOps.push_back(Store); 10822 10823 // Store ptr to reg_save_area. 10824 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10825 FIN, DAG.getIntPtrConstant(8)); 10826 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 10827 getPointerTy()); 10828 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 10829 MachinePointerInfo(SV, 16), false, false, 0); 10830 MemOps.push_back(Store); 10831 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 10832 &MemOps[0], MemOps.size()); 10833} 10834 10835SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 10836 assert(Subtarget->is64Bit() && 10837 "LowerVAARG only handles 64-bit va_arg!"); 10838 assert((Subtarget->isTargetLinux() || 10839 Subtarget->isTargetDarwin()) && 10840 "Unhandled target in LowerVAARG"); 10841 assert(Op.getNode()->getNumOperands() == 4); 10842 SDValue Chain = Op.getOperand(0); 10843 SDValue SrcPtr = Op.getOperand(1); 10844 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 10845 unsigned Align = Op.getConstantOperandVal(3); 10846 SDLoc dl(Op); 10847 10848 EVT ArgVT = Op.getNode()->getValueType(0); 10849 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 10850 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy); 10851 uint8_t ArgMode; 10852 10853 // Decide which area this value should be read from. 10854 // TODO: Implement the AMD64 ABI in its entirety. This simple 10855 // selection mechanism works only for the basic types. 10856 if (ArgVT == MVT::f80) { 10857 llvm_unreachable("va_arg for f80 not yet implemented"); 10858 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 10859 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 10860 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 10861 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 10862 } else { 10863 llvm_unreachable("Unhandled argument type in LowerVAARG"); 10864 } 10865 10866 if (ArgMode == 2) { 10867 // Sanity Check: Make sure using fp_offset makes sense. 10868 assert(!getTargetMachine().Options.UseSoftFloat && 10869 !(DAG.getMachineFunction() 10870 .getFunction()->getAttributes() 10871 .hasAttribute(AttributeSet::FunctionIndex, 10872 Attribute::NoImplicitFloat)) && 10873 Subtarget->hasSSE1()); 10874 } 10875 10876 // Insert VAARG_64 node into the DAG 10877 // VAARG_64 returns two values: Variable Argument Address, Chain 10878 SmallVector<SDValue, 11> InstOps; 10879 InstOps.push_back(Chain); 10880 InstOps.push_back(SrcPtr); 10881 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 10882 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 10883 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 10884 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 10885 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 10886 VTs, &InstOps[0], InstOps.size(), 10887 MVT::i64, 10888 MachinePointerInfo(SV), 10889 /*Align=*/0, 10890 /*Volatile=*/false, 10891 /*ReadMem=*/true, 10892 /*WriteMem=*/true); 10893 Chain = VAARG.getValue(1); 10894 10895 // Load the next argument and return it 10896 return DAG.getLoad(ArgVT, dl, 10897 Chain, 10898 VAARG, 10899 MachinePointerInfo(), 10900 false, false, false, 0); 10901} 10902 10903static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, 10904 SelectionDAG &DAG) { 10905 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 10906 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 10907 SDValue Chain = Op.getOperand(0); 10908 SDValue DstPtr = Op.getOperand(1); 10909 SDValue SrcPtr = Op.getOperand(2); 10910 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 10911 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 10912 SDLoc DL(Op); 10913 10914 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 10915 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 10916 false, 10917 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 10918} 10919 10920// getTargetVShiftNode - Handle vector element shifts where the shift amount 10921// may or may not be a constant. Takes immediate version of shift as input. 10922static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, EVT VT, 10923 SDValue SrcOp, SDValue ShAmt, 10924 SelectionDAG &DAG) { 10925 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 10926 10927 if (isa<ConstantSDNode>(ShAmt)) { 10928 // Constant may be a TargetConstant. Use a regular constant. 10929 uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 10930 switch (Opc) { 10931 default: llvm_unreachable("Unknown target vector shift node"); 10932 case X86ISD::VSHLI: 10933 case X86ISD::VSRLI: 10934 case X86ISD::VSRAI: 10935 return DAG.getNode(Opc, dl, VT, SrcOp, 10936 DAG.getConstant(ShiftAmt, MVT::i32)); 10937 } 10938 } 10939 10940 // Change opcode to non-immediate version 10941 switch (Opc) { 10942 default: llvm_unreachable("Unknown target vector shift node"); 10943 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 10944 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 10945 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 10946 } 10947 10948 // Need to build a vector containing shift amount 10949 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 10950 SDValue ShOps[4]; 10951 ShOps[0] = ShAmt; 10952 ShOps[1] = DAG.getConstant(0, MVT::i32); 10953 ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32); 10954 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 10955 10956 // The return type has to be a 128-bit type with the same element 10957 // type as the input type. 10958 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10959 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); 10960 10961 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); 10962 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 10963} 10964 10965static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { 10966 SDLoc dl(Op); 10967 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10968 switch (IntNo) { 10969 default: return SDValue(); // Don't custom lower most intrinsics. 10970 // Comparison intrinsics. 10971 case Intrinsic::x86_sse_comieq_ss: 10972 case Intrinsic::x86_sse_comilt_ss: 10973 case Intrinsic::x86_sse_comile_ss: 10974 case Intrinsic::x86_sse_comigt_ss: 10975 case Intrinsic::x86_sse_comige_ss: 10976 case Intrinsic::x86_sse_comineq_ss: 10977 case Intrinsic::x86_sse_ucomieq_ss: 10978 case Intrinsic::x86_sse_ucomilt_ss: 10979 case Intrinsic::x86_sse_ucomile_ss: 10980 case Intrinsic::x86_sse_ucomigt_ss: 10981 case Intrinsic::x86_sse_ucomige_ss: 10982 case Intrinsic::x86_sse_ucomineq_ss: 10983 case Intrinsic::x86_sse2_comieq_sd: 10984 case Intrinsic::x86_sse2_comilt_sd: 10985 case Intrinsic::x86_sse2_comile_sd: 10986 case Intrinsic::x86_sse2_comigt_sd: 10987 case Intrinsic::x86_sse2_comige_sd: 10988 case Intrinsic::x86_sse2_comineq_sd: 10989 case Intrinsic::x86_sse2_ucomieq_sd: 10990 case Intrinsic::x86_sse2_ucomilt_sd: 10991 case Intrinsic::x86_sse2_ucomile_sd: 10992 case Intrinsic::x86_sse2_ucomigt_sd: 10993 case Intrinsic::x86_sse2_ucomige_sd: 10994 case Intrinsic::x86_sse2_ucomineq_sd: { 10995 unsigned Opc; 10996 ISD::CondCode CC; 10997 switch (IntNo) { 10998 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10999 case Intrinsic::x86_sse_comieq_ss: 11000 case Intrinsic::x86_sse2_comieq_sd: 11001 Opc = X86ISD::COMI; 11002 CC = ISD::SETEQ; 11003 break; 11004 case Intrinsic::x86_sse_comilt_ss: 11005 case Intrinsic::x86_sse2_comilt_sd: 11006 Opc = X86ISD::COMI; 11007 CC = ISD::SETLT; 11008 break; 11009 case Intrinsic::x86_sse_comile_ss: 11010 case Intrinsic::x86_sse2_comile_sd: 11011 Opc = X86ISD::COMI; 11012 CC = ISD::SETLE; 11013 break; 11014 case Intrinsic::x86_sse_comigt_ss: 11015 case Intrinsic::x86_sse2_comigt_sd: 11016 Opc = X86ISD::COMI; 11017 CC = ISD::SETGT; 11018 break; 11019 case Intrinsic::x86_sse_comige_ss: 11020 case Intrinsic::x86_sse2_comige_sd: 11021 Opc = X86ISD::COMI; 11022 CC = ISD::SETGE; 11023 break; 11024 case Intrinsic::x86_sse_comineq_ss: 11025 case Intrinsic::x86_sse2_comineq_sd: 11026 Opc = X86ISD::COMI; 11027 CC = ISD::SETNE; 11028 break; 11029 case Intrinsic::x86_sse_ucomieq_ss: 11030 case Intrinsic::x86_sse2_ucomieq_sd: 11031 Opc = X86ISD::UCOMI; 11032 CC = ISD::SETEQ; 11033 break; 11034 case Intrinsic::x86_sse_ucomilt_ss: 11035 case Intrinsic::x86_sse2_ucomilt_sd: 11036 Opc = X86ISD::UCOMI; 11037 CC = ISD::SETLT; 11038 break; 11039 case Intrinsic::x86_sse_ucomile_ss: 11040 case Intrinsic::x86_sse2_ucomile_sd: 11041 Opc = X86ISD::UCOMI; 11042 CC = ISD::SETLE; 11043 break; 11044 case Intrinsic::x86_sse_ucomigt_ss: 11045 case Intrinsic::x86_sse2_ucomigt_sd: 11046 Opc = X86ISD::UCOMI; 11047 CC = ISD::SETGT; 11048 break; 11049 case Intrinsic::x86_sse_ucomige_ss: 11050 case Intrinsic::x86_sse2_ucomige_sd: 11051 Opc = X86ISD::UCOMI; 11052 CC = ISD::SETGE; 11053 break; 11054 case Intrinsic::x86_sse_ucomineq_ss: 11055 case Intrinsic::x86_sse2_ucomineq_sd: 11056 Opc = X86ISD::UCOMI; 11057 CC = ISD::SETNE; 11058 break; 11059 } 11060 11061 SDValue LHS = Op.getOperand(1); 11062 SDValue RHS = Op.getOperand(2); 11063 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 11064 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 11065 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 11066 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 11067 DAG.getConstant(X86CC, MVT::i8), Cond); 11068 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 11069 } 11070 11071 // Arithmetic intrinsics. 11072 case Intrinsic::x86_sse2_pmulu_dq: 11073 case Intrinsic::x86_avx2_pmulu_dq: 11074 return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(), 11075 Op.getOperand(1), Op.getOperand(2)); 11076 11077 // SSE2/AVX2 sub with unsigned saturation intrinsics 11078 case Intrinsic::x86_sse2_psubus_b: 11079 case Intrinsic::x86_sse2_psubus_w: 11080 case Intrinsic::x86_avx2_psubus_b: 11081 case Intrinsic::x86_avx2_psubus_w: 11082 return DAG.getNode(X86ISD::SUBUS, dl, Op.getValueType(), 11083 Op.getOperand(1), Op.getOperand(2)); 11084 11085 // SSE3/AVX horizontal add/sub intrinsics 11086 case Intrinsic::x86_sse3_hadd_ps: 11087 case Intrinsic::x86_sse3_hadd_pd: 11088 case Intrinsic::x86_avx_hadd_ps_256: 11089 case Intrinsic::x86_avx_hadd_pd_256: 11090 case Intrinsic::x86_sse3_hsub_ps: 11091 case Intrinsic::x86_sse3_hsub_pd: 11092 case Intrinsic::x86_avx_hsub_ps_256: 11093 case Intrinsic::x86_avx_hsub_pd_256: 11094 case Intrinsic::x86_ssse3_phadd_w_128: 11095 case Intrinsic::x86_ssse3_phadd_d_128: 11096 case Intrinsic::x86_avx2_phadd_w: 11097 case Intrinsic::x86_avx2_phadd_d: 11098 case Intrinsic::x86_ssse3_phsub_w_128: 11099 case Intrinsic::x86_ssse3_phsub_d_128: 11100 case Intrinsic::x86_avx2_phsub_w: 11101 case Intrinsic::x86_avx2_phsub_d: { 11102 unsigned Opcode; 11103 switch (IntNo) { 11104 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11105 case Intrinsic::x86_sse3_hadd_ps: 11106 case Intrinsic::x86_sse3_hadd_pd: 11107 case Intrinsic::x86_avx_hadd_ps_256: 11108 case Intrinsic::x86_avx_hadd_pd_256: 11109 Opcode = X86ISD::FHADD; 11110 break; 11111 case Intrinsic::x86_sse3_hsub_ps: 11112 case Intrinsic::x86_sse3_hsub_pd: 11113 case Intrinsic::x86_avx_hsub_ps_256: 11114 case Intrinsic::x86_avx_hsub_pd_256: 11115 Opcode = X86ISD::FHSUB; 11116 break; 11117 case Intrinsic::x86_ssse3_phadd_w_128: 11118 case Intrinsic::x86_ssse3_phadd_d_128: 11119 case Intrinsic::x86_avx2_phadd_w: 11120 case Intrinsic::x86_avx2_phadd_d: 11121 Opcode = X86ISD::HADD; 11122 break; 11123 case Intrinsic::x86_ssse3_phsub_w_128: 11124 case Intrinsic::x86_ssse3_phsub_d_128: 11125 case Intrinsic::x86_avx2_phsub_w: 11126 case Intrinsic::x86_avx2_phsub_d: 11127 Opcode = X86ISD::HSUB; 11128 break; 11129 } 11130 return DAG.getNode(Opcode, dl, Op.getValueType(), 11131 Op.getOperand(1), Op.getOperand(2)); 11132 } 11133 11134 // SSE2/SSE41/AVX2 integer max/min intrinsics. 11135 case Intrinsic::x86_sse2_pmaxu_b: 11136 case Intrinsic::x86_sse41_pmaxuw: 11137 case Intrinsic::x86_sse41_pmaxud: 11138 case Intrinsic::x86_avx2_pmaxu_b: 11139 case Intrinsic::x86_avx2_pmaxu_w: 11140 case Intrinsic::x86_avx2_pmaxu_d: 11141 case Intrinsic::x86_sse2_pminu_b: 11142 case Intrinsic::x86_sse41_pminuw: 11143 case Intrinsic::x86_sse41_pminud: 11144 case Intrinsic::x86_avx2_pminu_b: 11145 case Intrinsic::x86_avx2_pminu_w: 11146 case Intrinsic::x86_avx2_pminu_d: 11147 case Intrinsic::x86_sse41_pmaxsb: 11148 case Intrinsic::x86_sse2_pmaxs_w: 11149 case Intrinsic::x86_sse41_pmaxsd: 11150 case Intrinsic::x86_avx2_pmaxs_b: 11151 case Intrinsic::x86_avx2_pmaxs_w: 11152 case Intrinsic::x86_avx2_pmaxs_d: 11153 case Intrinsic::x86_sse41_pminsb: 11154 case Intrinsic::x86_sse2_pmins_w: 11155 case Intrinsic::x86_sse41_pminsd: 11156 case Intrinsic::x86_avx2_pmins_b: 11157 case Intrinsic::x86_avx2_pmins_w: 11158 case Intrinsic::x86_avx2_pmins_d: { 11159 unsigned Opcode; 11160 switch (IntNo) { 11161 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11162 case Intrinsic::x86_sse2_pmaxu_b: 11163 case Intrinsic::x86_sse41_pmaxuw: 11164 case Intrinsic::x86_sse41_pmaxud: 11165 case Intrinsic::x86_avx2_pmaxu_b: 11166 case Intrinsic::x86_avx2_pmaxu_w: 11167 case Intrinsic::x86_avx2_pmaxu_d: 11168 Opcode = X86ISD::UMAX; 11169 break; 11170 case Intrinsic::x86_sse2_pminu_b: 11171 case Intrinsic::x86_sse41_pminuw: 11172 case Intrinsic::x86_sse41_pminud: 11173 case Intrinsic::x86_avx2_pminu_b: 11174 case Intrinsic::x86_avx2_pminu_w: 11175 case Intrinsic::x86_avx2_pminu_d: 11176 Opcode = X86ISD::UMIN; 11177 break; 11178 case Intrinsic::x86_sse41_pmaxsb: 11179 case Intrinsic::x86_sse2_pmaxs_w: 11180 case Intrinsic::x86_sse41_pmaxsd: 11181 case Intrinsic::x86_avx2_pmaxs_b: 11182 case Intrinsic::x86_avx2_pmaxs_w: 11183 case Intrinsic::x86_avx2_pmaxs_d: 11184 Opcode = X86ISD::SMAX; 11185 break; 11186 case Intrinsic::x86_sse41_pminsb: 11187 case Intrinsic::x86_sse2_pmins_w: 11188 case Intrinsic::x86_sse41_pminsd: 11189 case Intrinsic::x86_avx2_pmins_b: 11190 case Intrinsic::x86_avx2_pmins_w: 11191 case Intrinsic::x86_avx2_pmins_d: 11192 Opcode = X86ISD::SMIN; 11193 break; 11194 } 11195 return DAG.getNode(Opcode, dl, Op.getValueType(), 11196 Op.getOperand(1), Op.getOperand(2)); 11197 } 11198 11199 // SSE/SSE2/AVX floating point max/min intrinsics. 11200 case Intrinsic::x86_sse_max_ps: 11201 case Intrinsic::x86_sse2_max_pd: 11202 case Intrinsic::x86_avx_max_ps_256: 11203 case Intrinsic::x86_avx_max_pd_256: 11204 case Intrinsic::x86_avx512_max_ps_512: 11205 case Intrinsic::x86_avx512_max_pd_512: 11206 case Intrinsic::x86_sse_min_ps: 11207 case Intrinsic::x86_sse2_min_pd: 11208 case Intrinsic::x86_avx_min_ps_256: 11209 case Intrinsic::x86_avx_min_pd_256: 11210 case Intrinsic::x86_avx512_min_ps_512: 11211 case Intrinsic::x86_avx512_min_pd_512: { 11212 unsigned Opcode; 11213 switch (IntNo) { 11214 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11215 case Intrinsic::x86_sse_max_ps: 11216 case Intrinsic::x86_sse2_max_pd: 11217 case Intrinsic::x86_avx_max_ps_256: 11218 case Intrinsic::x86_avx_max_pd_256: 11219 case Intrinsic::x86_avx512_max_ps_512: 11220 case Intrinsic::x86_avx512_max_pd_512: 11221 Opcode = X86ISD::FMAX; 11222 break; 11223 case Intrinsic::x86_sse_min_ps: 11224 case Intrinsic::x86_sse2_min_pd: 11225 case Intrinsic::x86_avx_min_ps_256: 11226 case Intrinsic::x86_avx_min_pd_256: 11227 case Intrinsic::x86_avx512_min_ps_512: 11228 case Intrinsic::x86_avx512_min_pd_512: 11229 Opcode = X86ISD::FMIN; 11230 break; 11231 } 11232 return DAG.getNode(Opcode, dl, Op.getValueType(), 11233 Op.getOperand(1), Op.getOperand(2)); 11234 } 11235 11236 // AVX2 variable shift intrinsics 11237 case Intrinsic::x86_avx2_psllv_d: 11238 case Intrinsic::x86_avx2_psllv_q: 11239 case Intrinsic::x86_avx2_psllv_d_256: 11240 case Intrinsic::x86_avx2_psllv_q_256: 11241 case Intrinsic::x86_avx2_psrlv_d: 11242 case Intrinsic::x86_avx2_psrlv_q: 11243 case Intrinsic::x86_avx2_psrlv_d_256: 11244 case Intrinsic::x86_avx2_psrlv_q_256: 11245 case Intrinsic::x86_avx2_psrav_d: 11246 case Intrinsic::x86_avx2_psrav_d_256: { 11247 unsigned Opcode; 11248 switch (IntNo) { 11249 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11250 case Intrinsic::x86_avx2_psllv_d: 11251 case Intrinsic::x86_avx2_psllv_q: 11252 case Intrinsic::x86_avx2_psllv_d_256: 11253 case Intrinsic::x86_avx2_psllv_q_256: 11254 Opcode = ISD::SHL; 11255 break; 11256 case Intrinsic::x86_avx2_psrlv_d: 11257 case Intrinsic::x86_avx2_psrlv_q: 11258 case Intrinsic::x86_avx2_psrlv_d_256: 11259 case Intrinsic::x86_avx2_psrlv_q_256: 11260 Opcode = ISD::SRL; 11261 break; 11262 case Intrinsic::x86_avx2_psrav_d: 11263 case Intrinsic::x86_avx2_psrav_d_256: 11264 Opcode = ISD::SRA; 11265 break; 11266 } 11267 return DAG.getNode(Opcode, dl, Op.getValueType(), 11268 Op.getOperand(1), Op.getOperand(2)); 11269 } 11270 11271 case Intrinsic::x86_ssse3_pshuf_b_128: 11272 case Intrinsic::x86_avx2_pshuf_b: 11273 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 11274 Op.getOperand(1), Op.getOperand(2)); 11275 11276 case Intrinsic::x86_ssse3_psign_b_128: 11277 case Intrinsic::x86_ssse3_psign_w_128: 11278 case Intrinsic::x86_ssse3_psign_d_128: 11279 case Intrinsic::x86_avx2_psign_b: 11280 case Intrinsic::x86_avx2_psign_w: 11281 case Intrinsic::x86_avx2_psign_d: 11282 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 11283 Op.getOperand(1), Op.getOperand(2)); 11284 11285 case Intrinsic::x86_sse41_insertps: 11286 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 11287 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 11288 11289 case Intrinsic::x86_avx_vperm2f128_ps_256: 11290 case Intrinsic::x86_avx_vperm2f128_pd_256: 11291 case Intrinsic::x86_avx_vperm2f128_si_256: 11292 case Intrinsic::x86_avx2_vperm2i128: 11293 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 11294 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 11295 11296 case Intrinsic::x86_avx2_permd: 11297 case Intrinsic::x86_avx2_permps: 11298 // Operands intentionally swapped. Mask is last operand to intrinsic, 11299 // but second operand for node/intruction. 11300 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), 11301 Op.getOperand(2), Op.getOperand(1)); 11302 11303 case Intrinsic::x86_sse_sqrt_ps: 11304 case Intrinsic::x86_sse2_sqrt_pd: 11305 case Intrinsic::x86_avx_sqrt_ps_256: 11306 case Intrinsic::x86_avx_sqrt_pd_256: 11307 return DAG.getNode(ISD::FSQRT, dl, Op.getValueType(), Op.getOperand(1)); 11308 11309 // ptest and testp intrinsics. The intrinsic these come from are designed to 11310 // return an integer value, not just an instruction so lower it to the ptest 11311 // or testp pattern and a setcc for the result. 11312 case Intrinsic::x86_sse41_ptestz: 11313 case Intrinsic::x86_sse41_ptestc: 11314 case Intrinsic::x86_sse41_ptestnzc: 11315 case Intrinsic::x86_avx_ptestz_256: 11316 case Intrinsic::x86_avx_ptestc_256: 11317 case Intrinsic::x86_avx_ptestnzc_256: 11318 case Intrinsic::x86_avx_vtestz_ps: 11319 case Intrinsic::x86_avx_vtestc_ps: 11320 case Intrinsic::x86_avx_vtestnzc_ps: 11321 case Intrinsic::x86_avx_vtestz_pd: 11322 case Intrinsic::x86_avx_vtestc_pd: 11323 case Intrinsic::x86_avx_vtestnzc_pd: 11324 case Intrinsic::x86_avx_vtestz_ps_256: 11325 case Intrinsic::x86_avx_vtestc_ps_256: 11326 case Intrinsic::x86_avx_vtestnzc_ps_256: 11327 case Intrinsic::x86_avx_vtestz_pd_256: 11328 case Intrinsic::x86_avx_vtestc_pd_256: 11329 case Intrinsic::x86_avx_vtestnzc_pd_256: { 11330 bool IsTestPacked = false; 11331 unsigned X86CC; 11332 switch (IntNo) { 11333 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 11334 case Intrinsic::x86_avx_vtestz_ps: 11335 case Intrinsic::x86_avx_vtestz_pd: 11336 case Intrinsic::x86_avx_vtestz_ps_256: 11337 case Intrinsic::x86_avx_vtestz_pd_256: 11338 IsTestPacked = true; // Fallthrough 11339 case Intrinsic::x86_sse41_ptestz: 11340 case Intrinsic::x86_avx_ptestz_256: 11341 // ZF = 1 11342 X86CC = X86::COND_E; 11343 break; 11344 case Intrinsic::x86_avx_vtestc_ps: 11345 case Intrinsic::x86_avx_vtestc_pd: 11346 case Intrinsic::x86_avx_vtestc_ps_256: 11347 case Intrinsic::x86_avx_vtestc_pd_256: 11348 IsTestPacked = true; // Fallthrough 11349 case Intrinsic::x86_sse41_ptestc: 11350 case Intrinsic::x86_avx_ptestc_256: 11351 // CF = 1 11352 X86CC = X86::COND_B; 11353 break; 11354 case Intrinsic::x86_avx_vtestnzc_ps: 11355 case Intrinsic::x86_avx_vtestnzc_pd: 11356 case Intrinsic::x86_avx_vtestnzc_ps_256: 11357 case Intrinsic::x86_avx_vtestnzc_pd_256: 11358 IsTestPacked = true; // Fallthrough 11359 case Intrinsic::x86_sse41_ptestnzc: 11360 case Intrinsic::x86_avx_ptestnzc_256: 11361 // ZF and CF = 0 11362 X86CC = X86::COND_A; 11363 break; 11364 } 11365 11366 SDValue LHS = Op.getOperand(1); 11367 SDValue RHS = Op.getOperand(2); 11368 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 11369 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 11370 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 11371 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 11372 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 11373 } 11374 case Intrinsic::x86_avx512_kortestz: 11375 case Intrinsic::x86_avx512_kortestc: { 11376 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz)? X86::COND_E: X86::COND_B; 11377 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1)); 11378 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2)); 11379 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 11380 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS); 11381 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 11382 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 11383 } 11384 11385 // SSE/AVX shift intrinsics 11386 case Intrinsic::x86_sse2_psll_w: 11387 case Intrinsic::x86_sse2_psll_d: 11388 case Intrinsic::x86_sse2_psll_q: 11389 case Intrinsic::x86_avx2_psll_w: 11390 case Intrinsic::x86_avx2_psll_d: 11391 case Intrinsic::x86_avx2_psll_q: 11392 case Intrinsic::x86_sse2_psrl_w: 11393 case Intrinsic::x86_sse2_psrl_d: 11394 case Intrinsic::x86_sse2_psrl_q: 11395 case Intrinsic::x86_avx2_psrl_w: 11396 case Intrinsic::x86_avx2_psrl_d: 11397 case Intrinsic::x86_avx2_psrl_q: 11398 case Intrinsic::x86_sse2_psra_w: 11399 case Intrinsic::x86_sse2_psra_d: 11400 case Intrinsic::x86_avx2_psra_w: 11401 case Intrinsic::x86_avx2_psra_d: { 11402 unsigned Opcode; 11403 switch (IntNo) { 11404 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11405 case Intrinsic::x86_sse2_psll_w: 11406 case Intrinsic::x86_sse2_psll_d: 11407 case Intrinsic::x86_sse2_psll_q: 11408 case Intrinsic::x86_avx2_psll_w: 11409 case Intrinsic::x86_avx2_psll_d: 11410 case Intrinsic::x86_avx2_psll_q: 11411 Opcode = X86ISD::VSHL; 11412 break; 11413 case Intrinsic::x86_sse2_psrl_w: 11414 case Intrinsic::x86_sse2_psrl_d: 11415 case Intrinsic::x86_sse2_psrl_q: 11416 case Intrinsic::x86_avx2_psrl_w: 11417 case Intrinsic::x86_avx2_psrl_d: 11418 case Intrinsic::x86_avx2_psrl_q: 11419 Opcode = X86ISD::VSRL; 11420 break; 11421 case Intrinsic::x86_sse2_psra_w: 11422 case Intrinsic::x86_sse2_psra_d: 11423 case Intrinsic::x86_avx2_psra_w: 11424 case Intrinsic::x86_avx2_psra_d: 11425 Opcode = X86ISD::VSRA; 11426 break; 11427 } 11428 return DAG.getNode(Opcode, dl, Op.getValueType(), 11429 Op.getOperand(1), Op.getOperand(2)); 11430 } 11431 11432 // SSE/AVX immediate shift intrinsics 11433 case Intrinsic::x86_sse2_pslli_w: 11434 case Intrinsic::x86_sse2_pslli_d: 11435 case Intrinsic::x86_sse2_pslli_q: 11436 case Intrinsic::x86_avx2_pslli_w: 11437 case Intrinsic::x86_avx2_pslli_d: 11438 case Intrinsic::x86_avx2_pslli_q: 11439 case Intrinsic::x86_sse2_psrli_w: 11440 case Intrinsic::x86_sse2_psrli_d: 11441 case Intrinsic::x86_sse2_psrli_q: 11442 case Intrinsic::x86_avx2_psrli_w: 11443 case Intrinsic::x86_avx2_psrli_d: 11444 case Intrinsic::x86_avx2_psrli_q: 11445 case Intrinsic::x86_sse2_psrai_w: 11446 case Intrinsic::x86_sse2_psrai_d: 11447 case Intrinsic::x86_avx2_psrai_w: 11448 case Intrinsic::x86_avx2_psrai_d: { 11449 unsigned Opcode; 11450 switch (IntNo) { 11451 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11452 case Intrinsic::x86_sse2_pslli_w: 11453 case Intrinsic::x86_sse2_pslli_d: 11454 case Intrinsic::x86_sse2_pslli_q: 11455 case Intrinsic::x86_avx2_pslli_w: 11456 case Intrinsic::x86_avx2_pslli_d: 11457 case Intrinsic::x86_avx2_pslli_q: 11458 Opcode = X86ISD::VSHLI; 11459 break; 11460 case Intrinsic::x86_sse2_psrli_w: 11461 case Intrinsic::x86_sse2_psrli_d: 11462 case Intrinsic::x86_sse2_psrli_q: 11463 case Intrinsic::x86_avx2_psrli_w: 11464 case Intrinsic::x86_avx2_psrli_d: 11465 case Intrinsic::x86_avx2_psrli_q: 11466 Opcode = X86ISD::VSRLI; 11467 break; 11468 case Intrinsic::x86_sse2_psrai_w: 11469 case Intrinsic::x86_sse2_psrai_d: 11470 case Intrinsic::x86_avx2_psrai_w: 11471 case Intrinsic::x86_avx2_psrai_d: 11472 Opcode = X86ISD::VSRAI; 11473 break; 11474 } 11475 return getTargetVShiftNode(Opcode, dl, Op.getValueType(), 11476 Op.getOperand(1), Op.getOperand(2), DAG); 11477 } 11478 11479 case Intrinsic::x86_sse42_pcmpistria128: 11480 case Intrinsic::x86_sse42_pcmpestria128: 11481 case Intrinsic::x86_sse42_pcmpistric128: 11482 case Intrinsic::x86_sse42_pcmpestric128: 11483 case Intrinsic::x86_sse42_pcmpistrio128: 11484 case Intrinsic::x86_sse42_pcmpestrio128: 11485 case Intrinsic::x86_sse42_pcmpistris128: 11486 case Intrinsic::x86_sse42_pcmpestris128: 11487 case Intrinsic::x86_sse42_pcmpistriz128: 11488 case Intrinsic::x86_sse42_pcmpestriz128: { 11489 unsigned Opcode; 11490 unsigned X86CC; 11491 switch (IntNo) { 11492 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11493 case Intrinsic::x86_sse42_pcmpistria128: 11494 Opcode = X86ISD::PCMPISTRI; 11495 X86CC = X86::COND_A; 11496 break; 11497 case Intrinsic::x86_sse42_pcmpestria128: 11498 Opcode = X86ISD::PCMPESTRI; 11499 X86CC = X86::COND_A; 11500 break; 11501 case Intrinsic::x86_sse42_pcmpistric128: 11502 Opcode = X86ISD::PCMPISTRI; 11503 X86CC = X86::COND_B; 11504 break; 11505 case Intrinsic::x86_sse42_pcmpestric128: 11506 Opcode = X86ISD::PCMPESTRI; 11507 X86CC = X86::COND_B; 11508 break; 11509 case Intrinsic::x86_sse42_pcmpistrio128: 11510 Opcode = X86ISD::PCMPISTRI; 11511 X86CC = X86::COND_O; 11512 break; 11513 case Intrinsic::x86_sse42_pcmpestrio128: 11514 Opcode = X86ISD::PCMPESTRI; 11515 X86CC = X86::COND_O; 11516 break; 11517 case Intrinsic::x86_sse42_pcmpistris128: 11518 Opcode = X86ISD::PCMPISTRI; 11519 X86CC = X86::COND_S; 11520 break; 11521 case Intrinsic::x86_sse42_pcmpestris128: 11522 Opcode = X86ISD::PCMPESTRI; 11523 X86CC = X86::COND_S; 11524 break; 11525 case Intrinsic::x86_sse42_pcmpistriz128: 11526 Opcode = X86ISD::PCMPISTRI; 11527 X86CC = X86::COND_E; 11528 break; 11529 case Intrinsic::x86_sse42_pcmpestriz128: 11530 Opcode = X86ISD::PCMPESTRI; 11531 X86CC = X86::COND_E; 11532 break; 11533 } 11534 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end()); 11535 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 11536 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 11537 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 11538 DAG.getConstant(X86CC, MVT::i8), 11539 SDValue(PCMP.getNode(), 1)); 11540 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 11541 } 11542 11543 case Intrinsic::x86_sse42_pcmpistri128: 11544 case Intrinsic::x86_sse42_pcmpestri128: { 11545 unsigned Opcode; 11546 if (IntNo == Intrinsic::x86_sse42_pcmpistri128) 11547 Opcode = X86ISD::PCMPISTRI; 11548 else 11549 Opcode = X86ISD::PCMPESTRI; 11550 11551 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end()); 11552 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 11553 return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 11554 } 11555 case Intrinsic::x86_fma_vfmadd_ps: 11556 case Intrinsic::x86_fma_vfmadd_pd: 11557 case Intrinsic::x86_fma_vfmsub_ps: 11558 case Intrinsic::x86_fma_vfmsub_pd: 11559 case Intrinsic::x86_fma_vfnmadd_ps: 11560 case Intrinsic::x86_fma_vfnmadd_pd: 11561 case Intrinsic::x86_fma_vfnmsub_ps: 11562 case Intrinsic::x86_fma_vfnmsub_pd: 11563 case Intrinsic::x86_fma_vfmaddsub_ps: 11564 case Intrinsic::x86_fma_vfmaddsub_pd: 11565 case Intrinsic::x86_fma_vfmsubadd_ps: 11566 case Intrinsic::x86_fma_vfmsubadd_pd: 11567 case Intrinsic::x86_fma_vfmadd_ps_256: 11568 case Intrinsic::x86_fma_vfmadd_pd_256: 11569 case Intrinsic::x86_fma_vfmsub_ps_256: 11570 case Intrinsic::x86_fma_vfmsub_pd_256: 11571 case Intrinsic::x86_fma_vfnmadd_ps_256: 11572 case Intrinsic::x86_fma_vfnmadd_pd_256: 11573 case Intrinsic::x86_fma_vfnmsub_ps_256: 11574 case Intrinsic::x86_fma_vfnmsub_pd_256: 11575 case Intrinsic::x86_fma_vfmaddsub_ps_256: 11576 case Intrinsic::x86_fma_vfmaddsub_pd_256: 11577 case Intrinsic::x86_fma_vfmsubadd_ps_256: 11578 case Intrinsic::x86_fma_vfmsubadd_pd_256: { 11579 unsigned Opc; 11580 switch (IntNo) { 11581 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 11582 case Intrinsic::x86_fma_vfmadd_ps: 11583 case Intrinsic::x86_fma_vfmadd_pd: 11584 case Intrinsic::x86_fma_vfmadd_ps_256: 11585 case Intrinsic::x86_fma_vfmadd_pd_256: 11586 Opc = X86ISD::FMADD; 11587 break; 11588 case Intrinsic::x86_fma_vfmsub_ps: 11589 case Intrinsic::x86_fma_vfmsub_pd: 11590 case Intrinsic::x86_fma_vfmsub_ps_256: 11591 case Intrinsic::x86_fma_vfmsub_pd_256: 11592 Opc = X86ISD::FMSUB; 11593 break; 11594 case Intrinsic::x86_fma_vfnmadd_ps: 11595 case Intrinsic::x86_fma_vfnmadd_pd: 11596 case Intrinsic::x86_fma_vfnmadd_ps_256: 11597 case Intrinsic::x86_fma_vfnmadd_pd_256: 11598 Opc = X86ISD::FNMADD; 11599 break; 11600 case Intrinsic::x86_fma_vfnmsub_ps: 11601 case Intrinsic::x86_fma_vfnmsub_pd: 11602 case Intrinsic::x86_fma_vfnmsub_ps_256: 11603 case Intrinsic::x86_fma_vfnmsub_pd_256: 11604 Opc = X86ISD::FNMSUB; 11605 break; 11606 case Intrinsic::x86_fma_vfmaddsub_ps: 11607 case Intrinsic::x86_fma_vfmaddsub_pd: 11608 case Intrinsic::x86_fma_vfmaddsub_ps_256: 11609 case Intrinsic::x86_fma_vfmaddsub_pd_256: 11610 Opc = X86ISD::FMADDSUB; 11611 break; 11612 case Intrinsic::x86_fma_vfmsubadd_ps: 11613 case Intrinsic::x86_fma_vfmsubadd_pd: 11614 case Intrinsic::x86_fma_vfmsubadd_ps_256: 11615 case Intrinsic::x86_fma_vfmsubadd_pd_256: 11616 Opc = X86ISD::FMSUBADD; 11617 break; 11618 } 11619 11620 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1), 11621 Op.getOperand(2), Op.getOperand(3)); 11622 } 11623 } 11624} 11625 11626static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { 11627 SDLoc dl(Op); 11628 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 11629 switch (IntNo) { 11630 default: return SDValue(); // Don't custom lower most intrinsics. 11631 11632 // RDRAND/RDSEED intrinsics. 11633 case Intrinsic::x86_rdrand_16: 11634 case Intrinsic::x86_rdrand_32: 11635 case Intrinsic::x86_rdrand_64: 11636 case Intrinsic::x86_rdseed_16: 11637 case Intrinsic::x86_rdseed_32: 11638 case Intrinsic::x86_rdseed_64: { 11639 unsigned Opcode = (IntNo == Intrinsic::x86_rdseed_16 || 11640 IntNo == Intrinsic::x86_rdseed_32 || 11641 IntNo == Intrinsic::x86_rdseed_64) ? X86ISD::RDSEED : 11642 X86ISD::RDRAND; 11643 // Emit the node with the right value type. 11644 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); 11645 SDValue Result = DAG.getNode(Opcode, dl, VTs, Op.getOperand(0)); 11646 11647 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1. 11648 // Otherwise return the value from Rand, which is always 0, casted to i32. 11649 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), 11650 DAG.getConstant(1, Op->getValueType(1)), 11651 DAG.getConstant(X86::COND_B, MVT::i32), 11652 SDValue(Result.getNode(), 1) }; 11653 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, 11654 DAG.getVTList(Op->getValueType(1), MVT::Glue), 11655 Ops, array_lengthof(Ops)); 11656 11657 // Return { result, isValid, chain }. 11658 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, 11659 SDValue(Result.getNode(), 2)); 11660 } 11661 11662 // XTEST intrinsics. 11663 case Intrinsic::x86_xtest: { 11664 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other); 11665 SDValue InTrans = DAG.getNode(X86ISD::XTEST, dl, VTs, Op.getOperand(0)); 11666 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 11667 DAG.getConstant(X86::COND_NE, MVT::i8), 11668 InTrans); 11669 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC); 11670 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), 11671 Ret, SDValue(InTrans.getNode(), 1)); 11672 } 11673 } 11674} 11675 11676SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 11677 SelectionDAG &DAG) const { 11678 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 11679 MFI->setReturnAddressIsTaken(true); 11680 11681 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11682 SDLoc dl(Op); 11683 EVT PtrVT = getPointerTy(); 11684 11685 if (Depth > 0) { 11686 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11687 const X86RegisterInfo *RegInfo = 11688 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11689 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT); 11690 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11691 DAG.getNode(ISD::ADD, dl, PtrVT, 11692 FrameAddr, Offset), 11693 MachinePointerInfo(), false, false, false, 0); 11694 } 11695 11696 // Just load the return address. 11697 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 11698 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11699 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 11700} 11701 11702SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 11703 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 11704 MFI->setFrameAddressIsTaken(true); 11705 11706 EVT VT = Op.getValueType(); 11707 SDLoc dl(Op); // FIXME probably not meaningful 11708 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11709 const X86RegisterInfo *RegInfo = 11710 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11711 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); 11712 assert(((FrameReg == X86::RBP && VT == MVT::i64) || 11713 (FrameReg == X86::EBP && VT == MVT::i32)) && 11714 "Invalid Frame Register!"); 11715 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 11716 while (Depth--) 11717 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 11718 MachinePointerInfo(), 11719 false, false, false, 0); 11720 return FrameAddr; 11721} 11722 11723SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 11724 SelectionDAG &DAG) const { 11725 const X86RegisterInfo *RegInfo = 11726 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11727 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); 11728} 11729 11730SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 11731 SDValue Chain = Op.getOperand(0); 11732 SDValue Offset = Op.getOperand(1); 11733 SDValue Handler = Op.getOperand(2); 11734 SDLoc dl (Op); 11735 11736 EVT PtrVT = getPointerTy(); 11737 const X86RegisterInfo *RegInfo = 11738 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 11739 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); 11740 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) || 11741 (FrameReg == X86::EBP && PtrVT == MVT::i32)) && 11742 "Invalid Frame Register!"); 11743 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT); 11744 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX; 11745 11746 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame, 11747 DAG.getIntPtrConstant(RegInfo->getSlotSize())); 11748 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset); 11749 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 11750 false, false, 0); 11751 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 11752 11753 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain, 11754 DAG.getRegister(StoreAddrReg, PtrVT)); 11755} 11756 11757SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 11758 SelectionDAG &DAG) const { 11759 SDLoc DL(Op); 11760 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, 11761 DAG.getVTList(MVT::i32, MVT::Other), 11762 Op.getOperand(0), Op.getOperand(1)); 11763} 11764 11765SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 11766 SelectionDAG &DAG) const { 11767 SDLoc DL(Op); 11768 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 11769 Op.getOperand(0), Op.getOperand(1)); 11770} 11771 11772static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { 11773 return Op.getOperand(0); 11774} 11775 11776SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 11777 SelectionDAG &DAG) const { 11778 SDValue Root = Op.getOperand(0); 11779 SDValue Trmp = Op.getOperand(1); // trampoline 11780 SDValue FPtr = Op.getOperand(2); // nested function 11781 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 11782 SDLoc dl (Op); 11783 11784 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 11785 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 11786 11787 if (Subtarget->is64Bit()) { 11788 SDValue OutChains[6]; 11789 11790 // Large code-model. 11791 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 11792 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 11793 11794 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7; 11795 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7; 11796 11797 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 11798 11799 // Load the pointer to the nested function into R11. 11800 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 11801 SDValue Addr = Trmp; 11802 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11803 Addr, MachinePointerInfo(TrmpAddr), 11804 false, false, 0); 11805 11806 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11807 DAG.getConstant(2, MVT::i64)); 11808 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 11809 MachinePointerInfo(TrmpAddr, 2), 11810 false, false, 2); 11811 11812 // Load the 'nest' parameter value into R10. 11813 // R10 is specified in X86CallingConv.td 11814 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 11815 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11816 DAG.getConstant(10, MVT::i64)); 11817 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11818 Addr, MachinePointerInfo(TrmpAddr, 10), 11819 false, false, 0); 11820 11821 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11822 DAG.getConstant(12, MVT::i64)); 11823 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 11824 MachinePointerInfo(TrmpAddr, 12), 11825 false, false, 2); 11826 11827 // Jump to the nested function. 11828 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 11829 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11830 DAG.getConstant(20, MVT::i64)); 11831 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11832 Addr, MachinePointerInfo(TrmpAddr, 20), 11833 false, false, 0); 11834 11835 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 11836 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11837 DAG.getConstant(22, MVT::i64)); 11838 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 11839 MachinePointerInfo(TrmpAddr, 22), 11840 false, false, 0); 11841 11842 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 11843 } else { 11844 const Function *Func = 11845 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 11846 CallingConv::ID CC = Func->getCallingConv(); 11847 unsigned NestReg; 11848 11849 switch (CC) { 11850 default: 11851 llvm_unreachable("Unsupported calling convention"); 11852 case CallingConv::C: 11853 case CallingConv::X86_StdCall: { 11854 // Pass 'nest' parameter in ECX. 11855 // Must be kept in sync with X86CallingConv.td 11856 NestReg = X86::ECX; 11857 11858 // Check that ECX wasn't needed by an 'inreg' parameter. 11859 FunctionType *FTy = Func->getFunctionType(); 11860 const AttributeSet &Attrs = Func->getAttributes(); 11861 11862 if (!Attrs.isEmpty() && !Func->isVarArg()) { 11863 unsigned InRegCount = 0; 11864 unsigned Idx = 1; 11865 11866 for (FunctionType::param_iterator I = FTy->param_begin(), 11867 E = FTy->param_end(); I != E; ++I, ++Idx) 11868 if (Attrs.hasAttribute(Idx, Attribute::InReg)) 11869 // FIXME: should only count parameters that are lowered to integers. 11870 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 11871 11872 if (InRegCount > 2) { 11873 report_fatal_error("Nest register in use - reduce number of inreg" 11874 " parameters!"); 11875 } 11876 } 11877 break; 11878 } 11879 case CallingConv::X86_FastCall: 11880 case CallingConv::X86_ThisCall: 11881 case CallingConv::Fast: 11882 // Pass 'nest' parameter in EAX. 11883 // Must be kept in sync with X86CallingConv.td 11884 NestReg = X86::EAX; 11885 break; 11886 } 11887 11888 SDValue OutChains[4]; 11889 SDValue Addr, Disp; 11890 11891 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11892 DAG.getConstant(10, MVT::i32)); 11893 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 11894 11895 // This is storing the opcode for MOV32ri. 11896 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 11897 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; 11898 OutChains[0] = DAG.getStore(Root, dl, 11899 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 11900 Trmp, MachinePointerInfo(TrmpAddr), 11901 false, false, 0); 11902 11903 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11904 DAG.getConstant(1, MVT::i32)); 11905 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 11906 MachinePointerInfo(TrmpAddr, 1), 11907 false, false, 1); 11908 11909 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 11910 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11911 DAG.getConstant(5, MVT::i32)); 11912 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 11913 MachinePointerInfo(TrmpAddr, 5), 11914 false, false, 1); 11915 11916 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11917 DAG.getConstant(6, MVT::i32)); 11918 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 11919 MachinePointerInfo(TrmpAddr, 6), 11920 false, false, 1); 11921 11922 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 11923 } 11924} 11925 11926SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 11927 SelectionDAG &DAG) const { 11928 /* 11929 The rounding mode is in bits 11:10 of FPSR, and has the following 11930 settings: 11931 00 Round to nearest 11932 01 Round to -inf 11933 10 Round to +inf 11934 11 Round to 0 11935 11936 FLT_ROUNDS, on the other hand, expects the following: 11937 -1 Undefined 11938 0 Round to 0 11939 1 Round to nearest 11940 2 Round to +inf 11941 3 Round to -inf 11942 11943 To perform the conversion, we do: 11944 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 11945 */ 11946 11947 MachineFunction &MF = DAG.getMachineFunction(); 11948 const TargetMachine &TM = MF.getTarget(); 11949 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 11950 unsigned StackAlignment = TFI.getStackAlignment(); 11951 EVT VT = Op.getValueType(); 11952 SDLoc DL(Op); 11953 11954 // Save FP Control Word to stack slot 11955 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 11956 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 11957 11958 MachineMemOperand *MMO = 11959 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 11960 MachineMemOperand::MOStore, 2, 2); 11961 11962 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 11963 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 11964 DAG.getVTList(MVT::Other), 11965 Ops, array_lengthof(Ops), MVT::i16, 11966 MMO); 11967 11968 // Load FP Control Word from stack slot 11969 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 11970 MachinePointerInfo(), false, false, false, 0); 11971 11972 // Transform as necessary 11973 SDValue CWD1 = 11974 DAG.getNode(ISD::SRL, DL, MVT::i16, 11975 DAG.getNode(ISD::AND, DL, MVT::i16, 11976 CWD, DAG.getConstant(0x800, MVT::i16)), 11977 DAG.getConstant(11, MVT::i8)); 11978 SDValue CWD2 = 11979 DAG.getNode(ISD::SRL, DL, MVT::i16, 11980 DAG.getNode(ISD::AND, DL, MVT::i16, 11981 CWD, DAG.getConstant(0x400, MVT::i16)), 11982 DAG.getConstant(9, MVT::i8)); 11983 11984 SDValue RetVal = 11985 DAG.getNode(ISD::AND, DL, MVT::i16, 11986 DAG.getNode(ISD::ADD, DL, MVT::i16, 11987 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 11988 DAG.getConstant(1, MVT::i16)), 11989 DAG.getConstant(3, MVT::i16)); 11990 11991 return DAG.getNode((VT.getSizeInBits() < 16 ? 11992 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 11993} 11994 11995static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { 11996 EVT VT = Op.getValueType(); 11997 EVT OpVT = VT; 11998 unsigned NumBits = VT.getSizeInBits(); 11999 SDLoc dl(Op); 12000 12001 Op = Op.getOperand(0); 12002 if (VT == MVT::i8) { 12003 // Zero extend to i32 since there is not an i8 bsr. 12004 OpVT = MVT::i32; 12005 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 12006 } 12007 12008 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 12009 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 12010 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 12011 12012 // If src is zero (i.e. bsr sets ZF), returns NumBits. 12013 SDValue Ops[] = { 12014 Op, 12015 DAG.getConstant(NumBits+NumBits-1, OpVT), 12016 DAG.getConstant(X86::COND_E, MVT::i8), 12017 Op.getValue(1) 12018 }; 12019 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 12020 12021 // Finally xor with NumBits-1. 12022 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 12023 12024 if (VT == MVT::i8) 12025 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 12026 return Op; 12027} 12028 12029static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { 12030 EVT VT = Op.getValueType(); 12031 EVT OpVT = VT; 12032 unsigned NumBits = VT.getSizeInBits(); 12033 SDLoc dl(Op); 12034 12035 Op = Op.getOperand(0); 12036 if (VT == MVT::i8) { 12037 // Zero extend to i32 since there is not an i8 bsr. 12038 OpVT = MVT::i32; 12039 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 12040 } 12041 12042 // Issue a bsr (scan bits in reverse). 12043 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 12044 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 12045 12046 // And xor with NumBits-1. 12047 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 12048 12049 if (VT == MVT::i8) 12050 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 12051 return Op; 12052} 12053 12054static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { 12055 EVT VT = Op.getValueType(); 12056 unsigned NumBits = VT.getSizeInBits(); 12057 SDLoc dl(Op); 12058 Op = Op.getOperand(0); 12059 12060 // Issue a bsf (scan bits forward) which also sets EFLAGS. 12061 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 12062 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 12063 12064 // If src is zero (i.e. bsf sets ZF), returns NumBits. 12065 SDValue Ops[] = { 12066 Op, 12067 DAG.getConstant(NumBits, VT), 12068 DAG.getConstant(X86::COND_E, MVT::i8), 12069 Op.getValue(1) 12070 }; 12071 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 12072} 12073 12074// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 12075// ones, and then concatenate the result back. 12076static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 12077 EVT VT = Op.getValueType(); 12078 12079 assert(VT.is256BitVector() && VT.isInteger() && 12080 "Unsupported value type for operation"); 12081 12082 unsigned NumElems = VT.getVectorNumElements(); 12083 SDLoc dl(Op); 12084 12085 // Extract the LHS vectors 12086 SDValue LHS = Op.getOperand(0); 12087 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 12088 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 12089 12090 // Extract the RHS vectors 12091 SDValue RHS = Op.getOperand(1); 12092 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 12093 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 12094 12095 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 12096 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 12097 12098 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 12099 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 12100 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 12101} 12102 12103static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) { 12104 assert(Op.getValueType().is256BitVector() && 12105 Op.getValueType().isInteger() && 12106 "Only handle AVX 256-bit vector integer operation"); 12107 return Lower256IntArith(Op, DAG); 12108} 12109 12110static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { 12111 assert(Op.getValueType().is256BitVector() && 12112 Op.getValueType().isInteger() && 12113 "Only handle AVX 256-bit vector integer operation"); 12114 return Lower256IntArith(Op, DAG); 12115} 12116 12117static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, 12118 SelectionDAG &DAG) { 12119 SDLoc dl(Op); 12120 EVT VT = Op.getValueType(); 12121 12122 // Decompose 256-bit ops into smaller 128-bit ops. 12123 if (VT.is256BitVector() && !Subtarget->hasInt256()) 12124 return Lower256IntArith(Op, DAG); 12125 12126 SDValue A = Op.getOperand(0); 12127 SDValue B = Op.getOperand(1); 12128 12129 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle. 12130 if (VT == MVT::v4i32) { 12131 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() && 12132 "Should not custom lower when pmuldq is available!"); 12133 12134 // Extract the odd parts. 12135 static const int UnpackMask[] = { 1, -1, 3, -1 }; 12136 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask); 12137 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask); 12138 12139 // Multiply the even parts. 12140 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B); 12141 // Now multiply odd parts. 12142 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds); 12143 12144 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens); 12145 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds); 12146 12147 // Merge the two vectors back together with a shuffle. This expands into 2 12148 // shuffles. 12149 static const int ShufMask[] = { 0, 4, 2, 6 }; 12150 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask); 12151 } 12152 12153 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && 12154 "Only know how to lower V2I64/V4I64 multiply"); 12155 12156 // Ahi = psrlqi(a, 32); 12157 // Bhi = psrlqi(b, 32); 12158 // 12159 // AloBlo = pmuludq(a, b); 12160 // AloBhi = pmuludq(a, Bhi); 12161 // AhiBlo = pmuludq(Ahi, b); 12162 12163 // AloBhi = psllqi(AloBhi, 32); 12164 // AhiBlo = psllqi(AhiBlo, 32); 12165 // return AloBlo + AloBhi + AhiBlo; 12166 12167 SDValue ShAmt = DAG.getConstant(32, MVT::i32); 12168 12169 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt); 12170 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt); 12171 12172 // Bit cast to 32-bit vectors for MULUDQ 12173 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32; 12174 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A); 12175 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B); 12176 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi); 12177 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi); 12178 12179 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); 12180 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); 12181 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); 12182 12183 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt); 12184 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt); 12185 12186 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 12187 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 12188} 12189 12190static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 12191 EVT VT = Op.getValueType(); 12192 EVT EltTy = VT.getVectorElementType(); 12193 unsigned NumElts = VT.getVectorNumElements(); 12194 SDValue N0 = Op.getOperand(0); 12195 SDLoc dl(Op); 12196 12197 // Lower sdiv X, pow2-const. 12198 BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(Op.getOperand(1)); 12199 if (!C) 12200 return SDValue(); 12201 12202 APInt SplatValue, SplatUndef; 12203 unsigned SplatBitSize; 12204 bool HasAnyUndefs; 12205 if (!C->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 12206 HasAnyUndefs) || 12207 EltTy.getSizeInBits() < SplatBitSize) 12208 return SDValue(); 12209 12210 if ((SplatValue != 0) && 12211 (SplatValue.isPowerOf2() || (-SplatValue).isPowerOf2())) { 12212 unsigned lg2 = SplatValue.countTrailingZeros(); 12213 // Splat the sign bit. 12214 SDValue Sz = DAG.getConstant(EltTy.getSizeInBits()-1, MVT::i32); 12215 SDValue SGN = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, N0, Sz, DAG); 12216 // Add (N0 < 0) ? abs2 - 1 : 0; 12217 SDValue Amt = DAG.getConstant(EltTy.getSizeInBits() - lg2, MVT::i32); 12218 SDValue SRL = getTargetVShiftNode(X86ISD::VSRLI, dl, VT, SGN, Amt, DAG); 12219 SDValue ADD = DAG.getNode(ISD::ADD, dl, VT, N0, SRL); 12220 SDValue Lg2Amt = DAG.getConstant(lg2, MVT::i32); 12221 SDValue SRA = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, ADD, Lg2Amt, DAG); 12222 12223 // If we're dividing by a positive value, we're done. Otherwise, we must 12224 // negate the result. 12225 if (SplatValue.isNonNegative()) 12226 return SRA; 12227 12228 SmallVector<SDValue, 16> V(NumElts, DAG.getConstant(0, EltTy)); 12229 SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], NumElts); 12230 return DAG.getNode(ISD::SUB, dl, VT, Zero, SRA); 12231 } 12232 return SDValue(); 12233} 12234 12235static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, 12236 const X86Subtarget *Subtarget) { 12237 EVT VT = Op.getValueType(); 12238 SDLoc dl(Op); 12239 SDValue R = Op.getOperand(0); 12240 SDValue Amt = Op.getOperand(1); 12241 12242 // Optimize shl/srl/sra with constant shift amount. 12243 if (isSplatVector(Amt.getNode())) { 12244 SDValue SclrAmt = Amt->getOperand(0); 12245 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 12246 uint64_t ShiftAmt = C->getZExtValue(); 12247 12248 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 12249 (Subtarget->hasInt256() && 12250 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) || 12251 (Subtarget->hasAVX512() && 12252 (VT == MVT::v8i64 || VT == MVT::v16i32))) { 12253 if (Op.getOpcode() == ISD::SHL) 12254 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 12255 DAG.getConstant(ShiftAmt, MVT::i32)); 12256 if (Op.getOpcode() == ISD::SRL) 12257 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 12258 DAG.getConstant(ShiftAmt, MVT::i32)); 12259 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 12260 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 12261 DAG.getConstant(ShiftAmt, MVT::i32)); 12262 } 12263 12264 if (VT == MVT::v16i8) { 12265 if (Op.getOpcode() == ISD::SHL) { 12266 // Make a large shift. 12267 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 12268 DAG.getConstant(ShiftAmt, MVT::i32)); 12269 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 12270 // Zero out the rightmost bits. 12271 SmallVector<SDValue, 16> V(16, 12272 DAG.getConstant(uint8_t(-1U << ShiftAmt), 12273 MVT::i8)); 12274 return DAG.getNode(ISD::AND, dl, VT, SHL, 12275 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 12276 } 12277 if (Op.getOpcode() == ISD::SRL) { 12278 // Make a large shift. 12279 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 12280 DAG.getConstant(ShiftAmt, MVT::i32)); 12281 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 12282 // Zero out the leftmost bits. 12283 SmallVector<SDValue, 16> V(16, 12284 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 12285 MVT::i8)); 12286 return DAG.getNode(ISD::AND, dl, VT, SRL, 12287 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 12288 } 12289 if (Op.getOpcode() == ISD::SRA) { 12290 if (ShiftAmt == 7) { 12291 // R s>> 7 === R s< 0 12292 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 12293 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 12294 } 12295 12296 // R s>> a === ((R u>> a) ^ m) - m 12297 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 12298 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 12299 MVT::i8)); 12300 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 12301 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 12302 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 12303 return Res; 12304 } 12305 llvm_unreachable("Unknown shift opcode."); 12306 } 12307 12308 if (Subtarget->hasInt256() && VT == MVT::v32i8) { 12309 if (Op.getOpcode() == ISD::SHL) { 12310 // Make a large shift. 12311 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 12312 DAG.getConstant(ShiftAmt, MVT::i32)); 12313 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 12314 // Zero out the rightmost bits. 12315 SmallVector<SDValue, 32> V(32, 12316 DAG.getConstant(uint8_t(-1U << ShiftAmt), 12317 MVT::i8)); 12318 return DAG.getNode(ISD::AND, dl, VT, SHL, 12319 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 12320 } 12321 if (Op.getOpcode() == ISD::SRL) { 12322 // Make a large shift. 12323 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 12324 DAG.getConstant(ShiftAmt, MVT::i32)); 12325 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 12326 // Zero out the leftmost bits. 12327 SmallVector<SDValue, 32> V(32, 12328 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 12329 MVT::i8)); 12330 return DAG.getNode(ISD::AND, dl, VT, SRL, 12331 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 12332 } 12333 if (Op.getOpcode() == ISD::SRA) { 12334 if (ShiftAmt == 7) { 12335 // R s>> 7 === R s< 0 12336 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 12337 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 12338 } 12339 12340 // R s>> a === ((R u>> a) ^ m) - m 12341 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 12342 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 12343 MVT::i8)); 12344 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 12345 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 12346 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 12347 return Res; 12348 } 12349 llvm_unreachable("Unknown shift opcode."); 12350 } 12351 } 12352 } 12353 12354 // Special case in 32-bit mode, where i64 is expanded into high and low parts. 12355 if (!Subtarget->is64Bit() && 12356 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) && 12357 Amt.getOpcode() == ISD::BITCAST && 12358 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { 12359 Amt = Amt.getOperand(0); 12360 unsigned Ratio = Amt.getValueType().getVectorNumElements() / 12361 VT.getVectorNumElements(); 12362 unsigned RatioInLog2 = Log2_32_Ceil(Ratio); 12363 uint64_t ShiftAmt = 0; 12364 for (unsigned i = 0; i != Ratio; ++i) { 12365 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i)); 12366 if (C == 0) 12367 return SDValue(); 12368 // 6 == Log2(64) 12369 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2))); 12370 } 12371 // Check remaining shift amounts. 12372 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { 12373 uint64_t ShAmt = 0; 12374 for (unsigned j = 0; j != Ratio; ++j) { 12375 ConstantSDNode *C = 12376 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j)); 12377 if (C == 0) 12378 return SDValue(); 12379 // 6 == Log2(64) 12380 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2))); 12381 } 12382 if (ShAmt != ShiftAmt) 12383 return SDValue(); 12384 } 12385 switch (Op.getOpcode()) { 12386 default: 12387 llvm_unreachable("Unknown shift opcode!"); 12388 case ISD::SHL: 12389 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 12390 DAG.getConstant(ShiftAmt, MVT::i32)); 12391 case ISD::SRL: 12392 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 12393 DAG.getConstant(ShiftAmt, MVT::i32)); 12394 case ISD::SRA: 12395 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 12396 DAG.getConstant(ShiftAmt, MVT::i32)); 12397 } 12398 } 12399 12400 return SDValue(); 12401} 12402 12403static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, 12404 const X86Subtarget* Subtarget) { 12405 EVT VT = Op.getValueType(); 12406 SDLoc dl(Op); 12407 SDValue R = Op.getOperand(0); 12408 SDValue Amt = Op.getOperand(1); 12409 12410 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) || 12411 VT == MVT::v4i32 || VT == MVT::v8i16 || 12412 (Subtarget->hasInt256() && 12413 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) || 12414 VT == MVT::v8i32 || VT == MVT::v16i16)) || 12415 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) { 12416 SDValue BaseShAmt; 12417 EVT EltVT = VT.getVectorElementType(); 12418 12419 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 12420 unsigned NumElts = VT.getVectorNumElements(); 12421 unsigned i, j; 12422 for (i = 0; i != NumElts; ++i) { 12423 if (Amt.getOperand(i).getOpcode() == ISD::UNDEF) 12424 continue; 12425 break; 12426 } 12427 for (j = i; j != NumElts; ++j) { 12428 SDValue Arg = Amt.getOperand(j); 12429 if (Arg.getOpcode() == ISD::UNDEF) continue; 12430 if (Arg != Amt.getOperand(i)) 12431 break; 12432 } 12433 if (i != NumElts && j == NumElts) 12434 BaseShAmt = Amt.getOperand(i); 12435 } else { 12436 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) 12437 Amt = Amt.getOperand(0); 12438 if (Amt.getOpcode() == ISD::VECTOR_SHUFFLE && 12439 cast<ShuffleVectorSDNode>(Amt)->isSplat()) { 12440 SDValue InVec = Amt.getOperand(0); 12441 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 12442 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 12443 unsigned i = 0; 12444 for (; i != NumElts; ++i) { 12445 SDValue Arg = InVec.getOperand(i); 12446 if (Arg.getOpcode() == ISD::UNDEF) continue; 12447 BaseShAmt = Arg; 12448 break; 12449 } 12450 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 12451 if (ConstantSDNode *C = 12452 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 12453 unsigned SplatIdx = 12454 cast<ShuffleVectorSDNode>(Amt)->getSplatIndex(); 12455 if (C->getZExtValue() == SplatIdx) 12456 BaseShAmt = InVec.getOperand(1); 12457 } 12458 } 12459 if (BaseShAmt.getNode() == 0) 12460 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Amt, 12461 DAG.getIntPtrConstant(0)); 12462 } 12463 } 12464 12465 if (BaseShAmt.getNode()) { 12466 if (EltVT.bitsGT(MVT::i32)) 12467 BaseShAmt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BaseShAmt); 12468 else if (EltVT.bitsLT(MVT::i32)) 12469 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt); 12470 12471 switch (Op.getOpcode()) { 12472 default: 12473 llvm_unreachable("Unknown shift opcode!"); 12474 case ISD::SHL: 12475 switch (VT.getSimpleVT().SimpleTy) { 12476 default: return SDValue(); 12477 case MVT::v2i64: 12478 case MVT::v4i32: 12479 case MVT::v8i16: 12480 case MVT::v4i64: 12481 case MVT::v8i32: 12482 case MVT::v16i16: 12483 case MVT::v16i32: 12484 case MVT::v8i64: 12485 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG); 12486 } 12487 case ISD::SRA: 12488 switch (VT.getSimpleVT().SimpleTy) { 12489 default: return SDValue(); 12490 case MVT::v4i32: 12491 case MVT::v8i16: 12492 case MVT::v8i32: 12493 case MVT::v16i16: 12494 case MVT::v16i32: 12495 case MVT::v8i64: 12496 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG); 12497 } 12498 case ISD::SRL: 12499 switch (VT.getSimpleVT().SimpleTy) { 12500 default: return SDValue(); 12501 case MVT::v2i64: 12502 case MVT::v4i32: 12503 case MVT::v8i16: 12504 case MVT::v4i64: 12505 case MVT::v8i32: 12506 case MVT::v16i16: 12507 case MVT::v16i32: 12508 case MVT::v8i64: 12509 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG); 12510 } 12511 } 12512 } 12513 } 12514 12515 // Special case in 32-bit mode, where i64 is expanded into high and low parts. 12516 if (!Subtarget->is64Bit() && 12517 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) || 12518 (Subtarget->hasAVX512() && VT == MVT::v8i64)) && 12519 Amt.getOpcode() == ISD::BITCAST && 12520 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { 12521 Amt = Amt.getOperand(0); 12522 unsigned Ratio = Amt.getValueType().getVectorNumElements() / 12523 VT.getVectorNumElements(); 12524 std::vector<SDValue> Vals(Ratio); 12525 for (unsigned i = 0; i != Ratio; ++i) 12526 Vals[i] = Amt.getOperand(i); 12527 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { 12528 for (unsigned j = 0; j != Ratio; ++j) 12529 if (Vals[j] != Amt.getOperand(i + j)) 12530 return SDValue(); 12531 } 12532 switch (Op.getOpcode()) { 12533 default: 12534 llvm_unreachable("Unknown shift opcode!"); 12535 case ISD::SHL: 12536 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1)); 12537 case ISD::SRL: 12538 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1)); 12539 case ISD::SRA: 12540 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1)); 12541 } 12542 } 12543 12544 return SDValue(); 12545} 12546 12547static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget, 12548 SelectionDAG &DAG) { 12549 12550 EVT VT = Op.getValueType(); 12551 SDLoc dl(Op); 12552 SDValue R = Op.getOperand(0); 12553 SDValue Amt = Op.getOperand(1); 12554 SDValue V; 12555 12556 if (!Subtarget->hasSSE2()) 12557 return SDValue(); 12558 12559 V = LowerScalarImmediateShift(Op, DAG, Subtarget); 12560 if (V.getNode()) 12561 return V; 12562 12563 V = LowerScalarVariableShift(Op, DAG, Subtarget); 12564 if (V.getNode()) 12565 return V; 12566 12567 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64)) 12568 return Op; 12569 // AVX2 has VPSLLV/VPSRAV/VPSRLV. 12570 if (Subtarget->hasInt256()) { 12571 if (Op.getOpcode() == ISD::SRL && 12572 (VT == MVT::v2i64 || VT == MVT::v4i32 || 12573 VT == MVT::v4i64 || VT == MVT::v8i32)) 12574 return Op; 12575 if (Op.getOpcode() == ISD::SHL && 12576 (VT == MVT::v2i64 || VT == MVT::v4i32 || 12577 VT == MVT::v4i64 || VT == MVT::v8i32)) 12578 return Op; 12579 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32)) 12580 return Op; 12581 } 12582 12583 // Lower SHL with variable shift amount. 12584 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 12585 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT)); 12586 12587 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT)); 12588 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 12589 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 12590 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 12591 } 12592 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 12593 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 12594 12595 // a = a << 5; 12596 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT)); 12597 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 12598 12599 // Turn 'a' into a mask suitable for VSELECT 12600 SDValue VSelM = DAG.getConstant(0x80, VT); 12601 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 12602 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 12603 12604 SDValue CM1 = DAG.getConstant(0x0f, VT); 12605 SDValue CM2 = DAG.getConstant(0x3f, VT); 12606 12607 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 12608 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 12609 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 12610 DAG.getConstant(4, MVT::i32), DAG); 12611 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 12612 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 12613 12614 // a += a 12615 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 12616 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 12617 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 12618 12619 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 12620 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 12621 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 12622 DAG.getConstant(2, MVT::i32), DAG); 12623 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 12624 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 12625 12626 // a += a 12627 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 12628 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 12629 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 12630 12631 // return VSELECT(r, r+r, a); 12632 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 12633 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 12634 return R; 12635 } 12636 12637 // Decompose 256-bit shifts into smaller 128-bit shifts. 12638 if (VT.is256BitVector()) { 12639 unsigned NumElems = VT.getVectorNumElements(); 12640 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 12641 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 12642 12643 // Extract the two vectors 12644 SDValue V1 = Extract128BitVector(R, 0, DAG, dl); 12645 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); 12646 12647 // Recreate the shift amount vectors 12648 SDValue Amt1, Amt2; 12649 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 12650 // Constant shift amount 12651 SmallVector<SDValue, 4> Amt1Csts; 12652 SmallVector<SDValue, 4> Amt2Csts; 12653 for (unsigned i = 0; i != NumElems/2; ++i) 12654 Amt1Csts.push_back(Amt->getOperand(i)); 12655 for (unsigned i = NumElems/2; i != NumElems; ++i) 12656 Amt2Csts.push_back(Amt->getOperand(i)); 12657 12658 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 12659 &Amt1Csts[0], NumElems/2); 12660 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 12661 &Amt2Csts[0], NumElems/2); 12662 } else { 12663 // Variable shift amount 12664 Amt1 = Extract128BitVector(Amt, 0, DAG, dl); 12665 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); 12666 } 12667 12668 // Issue new vector shifts for the smaller types 12669 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 12670 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 12671 12672 // Concatenate the result back 12673 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 12674 } 12675 12676 return SDValue(); 12677} 12678 12679static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { 12680 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 12681 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 12682 // looks for this combo and may remove the "setcc" instruction if the "setcc" 12683 // has only one use. 12684 SDNode *N = Op.getNode(); 12685 SDValue LHS = N->getOperand(0); 12686 SDValue RHS = N->getOperand(1); 12687 unsigned BaseOp = 0; 12688 unsigned Cond = 0; 12689 SDLoc DL(Op); 12690 switch (Op.getOpcode()) { 12691 default: llvm_unreachable("Unknown ovf instruction!"); 12692 case ISD::SADDO: 12693 // A subtract of one will be selected as a INC. Note that INC doesn't 12694 // set CF, so we can't do this for UADDO. 12695 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 12696 if (C->isOne()) { 12697 BaseOp = X86ISD::INC; 12698 Cond = X86::COND_O; 12699 break; 12700 } 12701 BaseOp = X86ISD::ADD; 12702 Cond = X86::COND_O; 12703 break; 12704 case ISD::UADDO: 12705 BaseOp = X86ISD::ADD; 12706 Cond = X86::COND_B; 12707 break; 12708 case ISD::SSUBO: 12709 // A subtract of one will be selected as a DEC. Note that DEC doesn't 12710 // set CF, so we can't do this for USUBO. 12711 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 12712 if (C->isOne()) { 12713 BaseOp = X86ISD::DEC; 12714 Cond = X86::COND_O; 12715 break; 12716 } 12717 BaseOp = X86ISD::SUB; 12718 Cond = X86::COND_O; 12719 break; 12720 case ISD::USUBO: 12721 BaseOp = X86ISD::SUB; 12722 Cond = X86::COND_B; 12723 break; 12724 case ISD::SMULO: 12725 BaseOp = X86ISD::SMUL; 12726 Cond = X86::COND_O; 12727 break; 12728 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 12729 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 12730 MVT::i32); 12731 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 12732 12733 SDValue SetCC = 12734 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 12735 DAG.getConstant(X86::COND_O, MVT::i32), 12736 SDValue(Sum.getNode(), 2)); 12737 12738 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 12739 } 12740 } 12741 12742 // Also sets EFLAGS. 12743 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 12744 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 12745 12746 SDValue SetCC = 12747 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 12748 DAG.getConstant(Cond, MVT::i32), 12749 SDValue(Sum.getNode(), 1)); 12750 12751 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 12752} 12753 12754SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 12755 SelectionDAG &DAG) const { 12756 SDLoc dl(Op); 12757 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 12758 EVT VT = Op.getValueType(); 12759 12760 if (!Subtarget->hasSSE2() || !VT.isVector()) 12761 return SDValue(); 12762 12763 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 12764 ExtraVT.getScalarType().getSizeInBits(); 12765 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 12766 12767 switch (VT.getSimpleVT().SimpleTy) { 12768 default: return SDValue(); 12769 case MVT::v8i32: 12770 case MVT::v16i16: 12771 if (!Subtarget->hasFp256()) 12772 return SDValue(); 12773 if (!Subtarget->hasInt256()) { 12774 // needs to be split 12775 unsigned NumElems = VT.getVectorNumElements(); 12776 12777 // Extract the LHS vectors 12778 SDValue LHS = Op.getOperand(0); 12779 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 12780 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 12781 12782 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 12783 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 12784 12785 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 12786 unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); 12787 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 12788 ExtraNumElems/2); 12789 SDValue Extra = DAG.getValueType(ExtraVT); 12790 12791 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 12792 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 12793 12794 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2); 12795 } 12796 // fall through 12797 case MVT::v4i32: 12798 case MVT::v8i16: { 12799 // (sext (vzext x)) -> (vsext x) 12800 SDValue Op0 = Op.getOperand(0); 12801 SDValue Op00 = Op0.getOperand(0); 12802 SDValue Tmp1; 12803 // Hopefully, this VECTOR_SHUFFLE is just a VZEXT. 12804 if (Op0.getOpcode() == ISD::BITCAST && 12805 Op00.getOpcode() == ISD::VECTOR_SHUFFLE) 12806 Tmp1 = LowerVectorIntExtend(Op00, Subtarget, DAG); 12807 if (Tmp1.getNode()) { 12808 SDValue Tmp1Op0 = Tmp1.getOperand(0); 12809 assert(Tmp1Op0.getOpcode() == X86ISD::VZEXT && 12810 "This optimization is invalid without a VZEXT."); 12811 return DAG.getNode(X86ISD::VSEXT, dl, VT, Tmp1Op0.getOperand(0)); 12812 } 12813 12814 // If the above didn't work, then just use Shift-Left + Shift-Right. 12815 Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, Op0, ShAmt, DAG); 12816 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 12817 } 12818 } 12819} 12820 12821static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, 12822 SelectionDAG &DAG) { 12823 SDLoc dl(Op); 12824 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 12825 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 12826 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 12827 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 12828 12829 // The only fence that needs an instruction is a sequentially-consistent 12830 // cross-thread fence. 12831 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 12832 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 12833 // no-sse2). There isn't any reason to disable it if the target processor 12834 // supports it. 12835 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 12836 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 12837 12838 SDValue Chain = Op.getOperand(0); 12839 SDValue Zero = DAG.getConstant(0, MVT::i32); 12840 SDValue Ops[] = { 12841 DAG.getRegister(X86::ESP, MVT::i32), // Base 12842 DAG.getTargetConstant(1, MVT::i8), // Scale 12843 DAG.getRegister(0, MVT::i32), // Index 12844 DAG.getTargetConstant(0, MVT::i32), // Disp 12845 DAG.getRegister(0, MVT::i32), // Segment. 12846 Zero, 12847 Chain 12848 }; 12849 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops); 12850 return SDValue(Res, 0); 12851 } 12852 12853 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 12854 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 12855} 12856 12857static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, 12858 SelectionDAG &DAG) { 12859 EVT T = Op.getValueType(); 12860 SDLoc DL(Op); 12861 unsigned Reg = 0; 12862 unsigned size = 0; 12863 switch(T.getSimpleVT().SimpleTy) { 12864 default: llvm_unreachable("Invalid value type!"); 12865 case MVT::i8: Reg = X86::AL; size = 1; break; 12866 case MVT::i16: Reg = X86::AX; size = 2; break; 12867 case MVT::i32: Reg = X86::EAX; size = 4; break; 12868 case MVT::i64: 12869 assert(Subtarget->is64Bit() && "Node not type legal!"); 12870 Reg = X86::RAX; size = 8; 12871 break; 12872 } 12873 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 12874 Op.getOperand(2), SDValue()); 12875 SDValue Ops[] = { cpIn.getValue(0), 12876 Op.getOperand(1), 12877 Op.getOperand(3), 12878 DAG.getTargetConstant(size, MVT::i8), 12879 cpIn.getValue(1) }; 12880 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12881 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 12882 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 12883 Ops, array_lengthof(Ops), T, MMO); 12884 SDValue cpOut = 12885 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 12886 return cpOut; 12887} 12888 12889static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, 12890 SelectionDAG &DAG) { 12891 assert(Subtarget->is64Bit() && "Result not type legalized?"); 12892 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12893 SDValue TheChain = Op.getOperand(0); 12894 SDLoc dl(Op); 12895 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 12896 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 12897 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 12898 rax.getValue(2)); 12899 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 12900 DAG.getConstant(32, MVT::i8)); 12901 SDValue Ops[] = { 12902 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 12903 rdx.getValue(1) 12904 }; 12905 return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); 12906} 12907 12908static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget, 12909 SelectionDAG &DAG) { 12910 MVT SrcVT = Op.getOperand(0).getSimpleValueType(); 12911 MVT DstVT = Op.getSimpleValueType(); 12912 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 12913 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 12914 assert((DstVT == MVT::i64 || 12915 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 12916 "Unexpected custom BITCAST"); 12917 // i64 <=> MMX conversions are Legal. 12918 if (SrcVT==MVT::i64 && DstVT.isVector()) 12919 return Op; 12920 if (DstVT==MVT::i64 && SrcVT.isVector()) 12921 return Op; 12922 // MMX <=> MMX conversions are Legal. 12923 if (SrcVT.isVector() && DstVT.isVector()) 12924 return Op; 12925 // All other conversions need to be expanded. 12926 return SDValue(); 12927} 12928 12929static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { 12930 SDNode *Node = Op.getNode(); 12931 SDLoc dl(Node); 12932 EVT T = Node->getValueType(0); 12933 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 12934 DAG.getConstant(0, T), Node->getOperand(2)); 12935 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 12936 cast<AtomicSDNode>(Node)->getMemoryVT(), 12937 Node->getOperand(0), 12938 Node->getOperand(1), negOp, 12939 cast<AtomicSDNode>(Node)->getSrcValue(), 12940 cast<AtomicSDNode>(Node)->getAlignment(), 12941 cast<AtomicSDNode>(Node)->getOrdering(), 12942 cast<AtomicSDNode>(Node)->getSynchScope()); 12943} 12944 12945static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 12946 SDNode *Node = Op.getNode(); 12947 SDLoc dl(Node); 12948 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 12949 12950 // Convert seq_cst store -> xchg 12951 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 12952 // FIXME: On 32-bit, store -> fist or movq would be more efficient 12953 // (The only way to get a 16-byte store is cmpxchg16b) 12954 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 12955 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 12956 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 12957 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 12958 cast<AtomicSDNode>(Node)->getMemoryVT(), 12959 Node->getOperand(0), 12960 Node->getOperand(1), Node->getOperand(2), 12961 cast<AtomicSDNode>(Node)->getMemOperand(), 12962 cast<AtomicSDNode>(Node)->getOrdering(), 12963 cast<AtomicSDNode>(Node)->getSynchScope()); 12964 return Swap.getValue(1); 12965 } 12966 // Other atomic stores have a simple pattern. 12967 return Op; 12968} 12969 12970static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 12971 EVT VT = Op.getNode()->getValueType(0); 12972 12973 // Let legalize expand this if it isn't a legal type yet. 12974 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 12975 return SDValue(); 12976 12977 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 12978 12979 unsigned Opc; 12980 bool ExtraOp = false; 12981 switch (Op.getOpcode()) { 12982 default: llvm_unreachable("Invalid code"); 12983 case ISD::ADDC: Opc = X86ISD::ADD; break; 12984 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 12985 case ISD::SUBC: Opc = X86ISD::SUB; break; 12986 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 12987 } 12988 12989 if (!ExtraOp) 12990 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 12991 Op.getOperand(1)); 12992 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 12993 Op.getOperand(1), Op.getOperand(2)); 12994} 12995 12996static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget, 12997 SelectionDAG &DAG) { 12998 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit()); 12999 13000 // For MacOSX, we want to call an alternative entry point: __sincos_stret, 13001 // which returns the values as { float, float } (in XMM0) or 13002 // { double, double } (which is returned in XMM0, XMM1). 13003 SDLoc dl(Op); 13004 SDValue Arg = Op.getOperand(0); 13005 EVT ArgVT = Arg.getValueType(); 13006 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 13007 13008 TargetLowering::ArgListTy Args; 13009 TargetLowering::ArgListEntry Entry; 13010 13011 Entry.Node = Arg; 13012 Entry.Ty = ArgTy; 13013 Entry.isSExt = false; 13014 Entry.isZExt = false; 13015 Args.push_back(Entry); 13016 13017 bool isF64 = ArgVT == MVT::f64; 13018 // Only optimize x86_64 for now. i386 is a bit messy. For f32, 13019 // the small struct {f32, f32} is returned in (eax, edx). For f64, 13020 // the results are returned via SRet in memory. 13021 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret"; 13022 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13023 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy()); 13024 13025 Type *RetTy = isF64 13026 ? (Type*)StructType::get(ArgTy, ArgTy, NULL) 13027 : (Type*)VectorType::get(ArgTy, 4); 13028 TargetLowering:: 13029 CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, 13030 false, false, false, false, 0, 13031 CallingConv::C, /*isTaillCall=*/false, 13032 /*doesNotRet=*/false, /*isReturnValueUsed*/true, 13033 Callee, Args, DAG, dl); 13034 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 13035 13036 if (isF64) 13037 // Returned in xmm0 and xmm1. 13038 return CallResult.first; 13039 13040 // Returned in bits 0:31 and 32:64 xmm0. 13041 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, 13042 CallResult.first, DAG.getIntPtrConstant(0)); 13043 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, 13044 CallResult.first, DAG.getIntPtrConstant(1)); 13045 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 13046 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal); 13047} 13048 13049/// LowerOperation - Provide custom lowering hooks for some operations. 13050/// 13051SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 13052 switch (Op.getOpcode()) { 13053 default: llvm_unreachable("Should not custom lower this!"); 13054 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 13055 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); 13056 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); 13057 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 13058 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 13059 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 13060 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 13061 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 13062 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 13063 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 13064 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); 13065 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); 13066 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 13067 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 13068 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 13069 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 13070 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 13071 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 13072 case ISD::SHL_PARTS: 13073 case ISD::SRA_PARTS: 13074 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 13075 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 13076 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 13077 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 13078 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG); 13079 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG); 13080 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG); 13081 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 13082 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 13083 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 13084 case ISD::FABS: return LowerFABS(Op, DAG); 13085 case ISD::FNEG: return LowerFNEG(Op, DAG); 13086 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 13087 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 13088 case ISD::SETCC: return LowerSETCC(Op, DAG); 13089 case ISD::SELECT: return LowerSELECT(Op, DAG); 13090 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 13091 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 13092 case ISD::VASTART: return LowerVASTART(Op, DAG); 13093 case ISD::VAARG: return LowerVAARG(Op, DAG); 13094 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); 13095 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 13096 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 13097 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 13098 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 13099 case ISD::FRAME_TO_ARGS_OFFSET: 13100 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 13101 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 13102 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 13103 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 13104 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 13105 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 13106 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 13107 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 13108 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 13109 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 13110 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 13111 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); 13112 case ISD::SRA: 13113 case ISD::SRL: 13114 case ISD::SHL: return LowerShift(Op, Subtarget, DAG); 13115 case ISD::SADDO: 13116 case ISD::UADDO: 13117 case ISD::SSUBO: 13118 case ISD::USUBO: 13119 case ISD::SMULO: 13120 case ISD::UMULO: return LowerXALUO(Op, DAG); 13121 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); 13122 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG); 13123 case ISD::ADDC: 13124 case ISD::ADDE: 13125 case ISD::SUBC: 13126 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 13127 case ISD::ADD: return LowerADD(Op, DAG); 13128 case ISD::SUB: return LowerSUB(Op, DAG); 13129 case ISD::SDIV: return LowerSDIV(Op, DAG); 13130 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG); 13131 } 13132} 13133 13134static void ReplaceATOMIC_LOAD(SDNode *Node, 13135 SmallVectorImpl<SDValue> &Results, 13136 SelectionDAG &DAG) { 13137 SDLoc dl(Node); 13138 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 13139 13140 // Convert wide load -> cmpxchg8b/cmpxchg16b 13141 // FIXME: On 32-bit, load -> fild or movq would be more efficient 13142 // (The only way to get a 16-byte load is cmpxchg16b) 13143 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 13144 SDValue Zero = DAG.getConstant(0, VT); 13145 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 13146 Node->getOperand(0), 13147 Node->getOperand(1), Zero, Zero, 13148 cast<AtomicSDNode>(Node)->getMemOperand(), 13149 cast<AtomicSDNode>(Node)->getOrdering(), 13150 cast<AtomicSDNode>(Node)->getSynchScope()); 13151 Results.push_back(Swap.getValue(0)); 13152 Results.push_back(Swap.getValue(1)); 13153} 13154 13155static void 13156ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 13157 SelectionDAG &DAG, unsigned NewOp) { 13158 SDLoc dl(Node); 13159 assert (Node->getValueType(0) == MVT::i64 && 13160 "Only know how to expand i64 atomics"); 13161 13162 SDValue Chain = Node->getOperand(0); 13163 SDValue In1 = Node->getOperand(1); 13164 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 13165 Node->getOperand(2), DAG.getIntPtrConstant(0)); 13166 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 13167 Node->getOperand(2), DAG.getIntPtrConstant(1)); 13168 SDValue Ops[] = { Chain, In1, In2L, In2H }; 13169 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 13170 SDValue Result = 13171 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, array_lengthof(Ops), MVT::i64, 13172 cast<MemSDNode>(Node)->getMemOperand()); 13173 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 13174 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 13175 Results.push_back(Result.getValue(2)); 13176} 13177 13178/// ReplaceNodeResults - Replace a node with an illegal result type 13179/// with a new node built out of custom code. 13180void X86TargetLowering::ReplaceNodeResults(SDNode *N, 13181 SmallVectorImpl<SDValue>&Results, 13182 SelectionDAG &DAG) const { 13183 SDLoc dl(N); 13184 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13185 switch (N->getOpcode()) { 13186 default: 13187 llvm_unreachable("Do not know how to custom type legalize this operation!"); 13188 case ISD::SIGN_EXTEND_INREG: 13189 case ISD::ADDC: 13190 case ISD::ADDE: 13191 case ISD::SUBC: 13192 case ISD::SUBE: 13193 // We don't want to expand or promote these. 13194 return; 13195 case ISD::FP_TO_SINT: 13196 case ISD::FP_TO_UINT: { 13197 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 13198 13199 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType())) 13200 return; 13201 13202 std::pair<SDValue,SDValue> Vals = 13203 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); 13204 SDValue FIST = Vals.first, StackSlot = Vals.second; 13205 if (FIST.getNode() != 0) { 13206 EVT VT = N->getValueType(0); 13207 // Return a load from the stack slot. 13208 if (StackSlot.getNode() != 0) 13209 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 13210 MachinePointerInfo(), 13211 false, false, false, 0)); 13212 else 13213 Results.push_back(FIST); 13214 } 13215 return; 13216 } 13217 case ISD::UINT_TO_FP: { 13218 assert(Subtarget->hasSSE2() && "Requires at least SSE2!"); 13219 if (N->getOperand(0).getValueType() != MVT::v2i32 || 13220 N->getValueType(0) != MVT::v2f32) 13221 return; 13222 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, 13223 N->getOperand(0)); 13224 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 13225 MVT::f64); 13226 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias); 13227 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, 13228 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias)); 13229 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or); 13230 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias); 13231 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub)); 13232 return; 13233 } 13234 case ISD::FP_ROUND: { 13235 if (!TLI.isTypeLegal(N->getOperand(0).getValueType())) 13236 return; 13237 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0)); 13238 Results.push_back(V); 13239 return; 13240 } 13241 case ISD::READCYCLECOUNTER: { 13242 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 13243 SDValue TheChain = N->getOperand(0); 13244 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 13245 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 13246 rd.getValue(1)); 13247 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 13248 eax.getValue(2)); 13249 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 13250 SDValue Ops[] = { eax, edx }; 13251 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 13252 array_lengthof(Ops))); 13253 Results.push_back(edx.getValue(1)); 13254 return; 13255 } 13256 case ISD::ATOMIC_CMP_SWAP: { 13257 EVT T = N->getValueType(0); 13258 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 13259 bool Regs64bit = T == MVT::i128; 13260 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 13261 SDValue cpInL, cpInH; 13262 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 13263 DAG.getConstant(0, HalfT)); 13264 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 13265 DAG.getConstant(1, HalfT)); 13266 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 13267 Regs64bit ? X86::RAX : X86::EAX, 13268 cpInL, SDValue()); 13269 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 13270 Regs64bit ? X86::RDX : X86::EDX, 13271 cpInH, cpInL.getValue(1)); 13272 SDValue swapInL, swapInH; 13273 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 13274 DAG.getConstant(0, HalfT)); 13275 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 13276 DAG.getConstant(1, HalfT)); 13277 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 13278 Regs64bit ? X86::RBX : X86::EBX, 13279 swapInL, cpInH.getValue(1)); 13280 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 13281 Regs64bit ? X86::RCX : X86::ECX, 13282 swapInH, swapInL.getValue(1)); 13283 SDValue Ops[] = { swapInH.getValue(0), 13284 N->getOperand(1), 13285 swapInH.getValue(1) }; 13286 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 13287 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 13288 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 13289 X86ISD::LCMPXCHG8_DAG; 13290 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 13291 Ops, array_lengthof(Ops), T, MMO); 13292 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 13293 Regs64bit ? X86::RAX : X86::EAX, 13294 HalfT, Result.getValue(1)); 13295 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 13296 Regs64bit ? X86::RDX : X86::EDX, 13297 HalfT, cpOutL.getValue(2)); 13298 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 13299 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 13300 Results.push_back(cpOutH.getValue(1)); 13301 return; 13302 } 13303 case ISD::ATOMIC_LOAD_ADD: 13304 case ISD::ATOMIC_LOAD_AND: 13305 case ISD::ATOMIC_LOAD_NAND: 13306 case ISD::ATOMIC_LOAD_OR: 13307 case ISD::ATOMIC_LOAD_SUB: 13308 case ISD::ATOMIC_LOAD_XOR: 13309 case ISD::ATOMIC_LOAD_MAX: 13310 case ISD::ATOMIC_LOAD_MIN: 13311 case ISD::ATOMIC_LOAD_UMAX: 13312 case ISD::ATOMIC_LOAD_UMIN: 13313 case ISD::ATOMIC_SWAP: { 13314 unsigned Opc; 13315 switch (N->getOpcode()) { 13316 default: llvm_unreachable("Unexpected opcode"); 13317 case ISD::ATOMIC_LOAD_ADD: 13318 Opc = X86ISD::ATOMADD64_DAG; 13319 break; 13320 case ISD::ATOMIC_LOAD_AND: 13321 Opc = X86ISD::ATOMAND64_DAG; 13322 break; 13323 case ISD::ATOMIC_LOAD_NAND: 13324 Opc = X86ISD::ATOMNAND64_DAG; 13325 break; 13326 case ISD::ATOMIC_LOAD_OR: 13327 Opc = X86ISD::ATOMOR64_DAG; 13328 break; 13329 case ISD::ATOMIC_LOAD_SUB: 13330 Opc = X86ISD::ATOMSUB64_DAG; 13331 break; 13332 case ISD::ATOMIC_LOAD_XOR: 13333 Opc = X86ISD::ATOMXOR64_DAG; 13334 break; 13335 case ISD::ATOMIC_LOAD_MAX: 13336 Opc = X86ISD::ATOMMAX64_DAG; 13337 break; 13338 case ISD::ATOMIC_LOAD_MIN: 13339 Opc = X86ISD::ATOMMIN64_DAG; 13340 break; 13341 case ISD::ATOMIC_LOAD_UMAX: 13342 Opc = X86ISD::ATOMUMAX64_DAG; 13343 break; 13344 case ISD::ATOMIC_LOAD_UMIN: 13345 Opc = X86ISD::ATOMUMIN64_DAG; 13346 break; 13347 case ISD::ATOMIC_SWAP: 13348 Opc = X86ISD::ATOMSWAP64_DAG; 13349 break; 13350 } 13351 ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc); 13352 return; 13353 } 13354 case ISD::ATOMIC_LOAD: 13355 ReplaceATOMIC_LOAD(N, Results, DAG); 13356 } 13357} 13358 13359const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 13360 switch (Opcode) { 13361 default: return NULL; 13362 case X86ISD::BSF: return "X86ISD::BSF"; 13363 case X86ISD::BSR: return "X86ISD::BSR"; 13364 case X86ISD::SHLD: return "X86ISD::SHLD"; 13365 case X86ISD::SHRD: return "X86ISD::SHRD"; 13366 case X86ISD::FAND: return "X86ISD::FAND"; 13367 case X86ISD::FANDN: return "X86ISD::FANDN"; 13368 case X86ISD::FOR: return "X86ISD::FOR"; 13369 case X86ISD::FXOR: return "X86ISD::FXOR"; 13370 case X86ISD::FSRL: return "X86ISD::FSRL"; 13371 case X86ISD::FILD: return "X86ISD::FILD"; 13372 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 13373 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 13374 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 13375 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 13376 case X86ISD::FLD: return "X86ISD::FLD"; 13377 case X86ISD::FST: return "X86ISD::FST"; 13378 case X86ISD::CALL: return "X86ISD::CALL"; 13379 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 13380 case X86ISD::BT: return "X86ISD::BT"; 13381 case X86ISD::CMP: return "X86ISD::CMP"; 13382 case X86ISD::COMI: return "X86ISD::COMI"; 13383 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 13384 case X86ISD::CMPM: return "X86ISD::CMPM"; 13385 case X86ISD::CMPMU: return "X86ISD::CMPMU"; 13386 case X86ISD::SETCC: return "X86ISD::SETCC"; 13387 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 13388 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 13389 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 13390 case X86ISD::CMOV: return "X86ISD::CMOV"; 13391 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 13392 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 13393 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 13394 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 13395 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 13396 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 13397 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 13398 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 13399 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 13400 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 13401 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 13402 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 13403 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 13404 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 13405 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 13406 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 13407 case X86ISD::BLENDI: return "X86ISD::BLENDI"; 13408 case X86ISD::SUBUS: return "X86ISD::SUBUS"; 13409 case X86ISD::HADD: return "X86ISD::HADD"; 13410 case X86ISD::HSUB: return "X86ISD::HSUB"; 13411 case X86ISD::FHADD: return "X86ISD::FHADD"; 13412 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 13413 case X86ISD::UMAX: return "X86ISD::UMAX"; 13414 case X86ISD::UMIN: return "X86ISD::UMIN"; 13415 case X86ISD::SMAX: return "X86ISD::SMAX"; 13416 case X86ISD::SMIN: return "X86ISD::SMIN"; 13417 case X86ISD::FMAX: return "X86ISD::FMAX"; 13418 case X86ISD::FMIN: return "X86ISD::FMIN"; 13419 case X86ISD::FMAXC: return "X86ISD::FMAXC"; 13420 case X86ISD::FMINC: return "X86ISD::FMINC"; 13421 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 13422 case X86ISD::FRCP: return "X86ISD::FRCP"; 13423 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 13424 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; 13425 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 13426 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; 13427 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; 13428 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 13429 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 13430 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 13431 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; 13432 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 13433 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 13434 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 13435 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 13436 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 13437 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 13438 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 13439 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 13440 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 13441 case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL"; 13442 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 13443 case X86ISD::VZEXT: return "X86ISD::VZEXT"; 13444 case X86ISD::VSEXT: return "X86ISD::VSEXT"; 13445 case X86ISD::VTRUNC: return "X86ISD::VTRUNC"; 13446 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM"; 13447 case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; 13448 case X86ISD::VFPROUND: return "X86ISD::VFPROUND"; 13449 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 13450 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 13451 case X86ISD::VSHL: return "X86ISD::VSHL"; 13452 case X86ISD::VSRL: return "X86ISD::VSRL"; 13453 case X86ISD::VSRA: return "X86ISD::VSRA"; 13454 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 13455 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 13456 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 13457 case X86ISD::CMPP: return "X86ISD::CMPP"; 13458 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 13459 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 13460 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM"; 13461 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM"; 13462 case X86ISD::ADD: return "X86ISD::ADD"; 13463 case X86ISD::SUB: return "X86ISD::SUB"; 13464 case X86ISD::ADC: return "X86ISD::ADC"; 13465 case X86ISD::SBB: return "X86ISD::SBB"; 13466 case X86ISD::SMUL: return "X86ISD::SMUL"; 13467 case X86ISD::UMUL: return "X86ISD::UMUL"; 13468 case X86ISD::INC: return "X86ISD::INC"; 13469 case X86ISD::DEC: return "X86ISD::DEC"; 13470 case X86ISD::OR: return "X86ISD::OR"; 13471 case X86ISD::XOR: return "X86ISD::XOR"; 13472 case X86ISD::AND: return "X86ISD::AND"; 13473 case X86ISD::BLSI: return "X86ISD::BLSI"; 13474 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 13475 case X86ISD::BLSR: return "X86ISD::BLSR"; 13476 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 13477 case X86ISD::PTEST: return "X86ISD::PTEST"; 13478 case X86ISD::TESTP: return "X86ISD::TESTP"; 13479 case X86ISD::TESTM: return "X86ISD::TESTM"; 13480 case X86ISD::KORTEST: return "X86ISD::KORTEST"; 13481 case X86ISD::KTEST: return "X86ISD::KTEST"; 13482 case X86ISD::PALIGNR: return "X86ISD::PALIGNR"; 13483 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 13484 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 13485 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 13486 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 13487 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 13488 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 13489 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 13490 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 13491 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 13492 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 13493 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 13494 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 13495 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 13496 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 13497 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 13498 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 13499 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 13500 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM"; 13501 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 13502 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 13503 case X86ISD::VPERMV: return "X86ISD::VPERMV"; 13504 case X86ISD::VPERMV3: return "X86ISD::VPERMV3"; 13505 case X86ISD::VPERMI: return "X86ISD::VPERMI"; 13506 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; 13507 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 13508 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 13509 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 13510 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 13511 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 13512 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; 13513 case X86ISD::SAHF: return "X86ISD::SAHF"; 13514 case X86ISD::RDRAND: return "X86ISD::RDRAND"; 13515 case X86ISD::RDSEED: return "X86ISD::RDSEED"; 13516 case X86ISD::FMADD: return "X86ISD::FMADD"; 13517 case X86ISD::FMSUB: return "X86ISD::FMSUB"; 13518 case X86ISD::FNMADD: return "X86ISD::FNMADD"; 13519 case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; 13520 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; 13521 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; 13522 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI"; 13523 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI"; 13524 case X86ISD::XTEST: return "X86ISD::XTEST"; 13525 } 13526} 13527 13528// isLegalAddressingMode - Return true if the addressing mode represented 13529// by AM is legal for this target, for a load/store of the specified type. 13530bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 13531 Type *Ty) const { 13532 // X86 supports extremely general addressing modes. 13533 CodeModel::Model M = getTargetMachine().getCodeModel(); 13534 Reloc::Model R = getTargetMachine().getRelocationModel(); 13535 13536 // X86 allows a sign-extended 32-bit immediate field as a displacement. 13537 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 13538 return false; 13539 13540 if (AM.BaseGV) { 13541 unsigned GVFlags = 13542 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 13543 13544 // If a reference to this global requires an extra load, we can't fold it. 13545 if (isGlobalStubReference(GVFlags)) 13546 return false; 13547 13548 // If BaseGV requires a register for the PIC base, we cannot also have a 13549 // BaseReg specified. 13550 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 13551 return false; 13552 13553 // If lower 4G is not available, then we must use rip-relative addressing. 13554 if ((M != CodeModel::Small || R != Reloc::Static) && 13555 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 13556 return false; 13557 } 13558 13559 switch (AM.Scale) { 13560 case 0: 13561 case 1: 13562 case 2: 13563 case 4: 13564 case 8: 13565 // These scales always work. 13566 break; 13567 case 3: 13568 case 5: 13569 case 9: 13570 // These scales are formed with basereg+scalereg. Only accept if there is 13571 // no basereg yet. 13572 if (AM.HasBaseReg) 13573 return false; 13574 break; 13575 default: // Other stuff never works. 13576 return false; 13577 } 13578 13579 return true; 13580} 13581 13582bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 13583 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13584 return false; 13585 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 13586 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 13587 return NumBits1 > NumBits2; 13588} 13589 13590bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 13591 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13592 return false; 13593 13594 if (!isTypeLegal(EVT::getEVT(Ty1))) 13595 return false; 13596 13597 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 13598 13599 // Assuming the caller doesn't have a zeroext or signext return parameter, 13600 // truncation all the way down to i1 is valid. 13601 return true; 13602} 13603 13604bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13605 return isInt<32>(Imm); 13606} 13607 13608bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { 13609 // Can also use sub to handle negated immediates. 13610 return isInt<32>(Imm); 13611} 13612 13613bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 13614 if (!VT1.isInteger() || !VT2.isInteger()) 13615 return false; 13616 unsigned NumBits1 = VT1.getSizeInBits(); 13617 unsigned NumBits2 = VT2.getSizeInBits(); 13618 return NumBits1 > NumBits2; 13619} 13620 13621bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 13622 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 13623 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 13624} 13625 13626bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 13627 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 13628 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 13629} 13630 13631bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 13632 EVT VT1 = Val.getValueType(); 13633 if (isZExtFree(VT1, VT2)) 13634 return true; 13635 13636 if (Val.getOpcode() != ISD::LOAD) 13637 return false; 13638 13639 if (!VT1.isSimple() || !VT1.isInteger() || 13640 !VT2.isSimple() || !VT2.isInteger()) 13641 return false; 13642 13643 switch (VT1.getSimpleVT().SimpleTy) { 13644 default: break; 13645 case MVT::i8: 13646 case MVT::i16: 13647 case MVT::i32: 13648 // X86 has 8, 16, and 32-bit zero-extending loads. 13649 return true; 13650 } 13651 13652 return false; 13653} 13654 13655bool 13656X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 13657 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4())) 13658 return false; 13659 13660 VT = VT.getScalarType(); 13661 13662 if (!VT.isSimple()) 13663 return false; 13664 13665 switch (VT.getSimpleVT().SimpleTy) { 13666 case MVT::f32: 13667 case MVT::f64: 13668 return true; 13669 default: 13670 break; 13671 } 13672 13673 return false; 13674} 13675 13676bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 13677 // i16 instructions are longer (0x66 prefix) and potentially slower. 13678 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 13679} 13680 13681/// isShuffleMaskLegal - Targets can use this to indicate that they only 13682/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 13683/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 13684/// are assumed to be legal. 13685bool 13686X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 13687 EVT VT) const { 13688 if (!VT.isSimple()) 13689 return false; 13690 13691 MVT SVT = VT.getSimpleVT(); 13692 13693 // Very little shuffling can be done for 64-bit vectors right now. 13694 if (VT.getSizeInBits() == 64) 13695 return false; 13696 13697 // FIXME: pshufb, blends, shifts. 13698 return (SVT.getVectorNumElements() == 2 || 13699 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 13700 isMOVLMask(M, SVT) || 13701 isSHUFPMask(M, SVT) || 13702 isPSHUFDMask(M, SVT) || 13703 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) || 13704 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) || 13705 isPALIGNRMask(M, SVT, Subtarget) || 13706 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) || 13707 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) || 13708 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) || 13709 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256())); 13710} 13711 13712bool 13713X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 13714 EVT VT) const { 13715 if (!VT.isSimple()) 13716 return false; 13717 13718 MVT SVT = VT.getSimpleVT(); 13719 unsigned NumElts = SVT.getVectorNumElements(); 13720 // FIXME: This collection of masks seems suspect. 13721 if (NumElts == 2) 13722 return true; 13723 if (NumElts == 4 && SVT.is128BitVector()) { 13724 return (isMOVLMask(Mask, SVT) || 13725 isCommutedMOVLMask(Mask, SVT, true) || 13726 isSHUFPMask(Mask, SVT) || 13727 isSHUFPMask(Mask, SVT, /* Commuted */ true)); 13728 } 13729 return false; 13730} 13731 13732//===----------------------------------------------------------------------===// 13733// X86 Scheduler Hooks 13734//===----------------------------------------------------------------------===// 13735 13736/// Utility function to emit xbegin specifying the start of an RTM region. 13737static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB, 13738 const TargetInstrInfo *TII) { 13739 DebugLoc DL = MI->getDebugLoc(); 13740 13741 const BasicBlock *BB = MBB->getBasicBlock(); 13742 MachineFunction::iterator I = MBB; 13743 ++I; 13744 13745 // For the v = xbegin(), we generate 13746 // 13747 // thisMBB: 13748 // xbegin sinkMBB 13749 // 13750 // mainMBB: 13751 // eax = -1 13752 // 13753 // sinkMBB: 13754 // v = eax 13755 13756 MachineBasicBlock *thisMBB = MBB; 13757 MachineFunction *MF = MBB->getParent(); 13758 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13759 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13760 MF->insert(I, mainMBB); 13761 MF->insert(I, sinkMBB); 13762 13763 // Transfer the remainder of BB and its successor edges to sinkMBB. 13764 sinkMBB->splice(sinkMBB->begin(), MBB, 13765 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13766 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13767 13768 // thisMBB: 13769 // xbegin sinkMBB 13770 // # fallthrough to mainMBB 13771 // # abortion to sinkMBB 13772 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB); 13773 thisMBB->addSuccessor(mainMBB); 13774 thisMBB->addSuccessor(sinkMBB); 13775 13776 // mainMBB: 13777 // EAX = -1 13778 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1); 13779 mainMBB->addSuccessor(sinkMBB); 13780 13781 // sinkMBB: 13782 // EAX is live into the sinkMBB 13783 sinkMBB->addLiveIn(X86::EAX); 13784 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13785 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 13786 .addReg(X86::EAX); 13787 13788 MI->eraseFromParent(); 13789 return sinkMBB; 13790} 13791 13792// Get CMPXCHG opcode for the specified data type. 13793static unsigned getCmpXChgOpcode(EVT VT) { 13794 switch (VT.getSimpleVT().SimpleTy) { 13795 case MVT::i8: return X86::LCMPXCHG8; 13796 case MVT::i16: return X86::LCMPXCHG16; 13797 case MVT::i32: return X86::LCMPXCHG32; 13798 case MVT::i64: return X86::LCMPXCHG64; 13799 default: 13800 break; 13801 } 13802 llvm_unreachable("Invalid operand size!"); 13803} 13804 13805// Get LOAD opcode for the specified data type. 13806static unsigned getLoadOpcode(EVT VT) { 13807 switch (VT.getSimpleVT().SimpleTy) { 13808 case MVT::i8: return X86::MOV8rm; 13809 case MVT::i16: return X86::MOV16rm; 13810 case MVT::i32: return X86::MOV32rm; 13811 case MVT::i64: return X86::MOV64rm; 13812 default: 13813 break; 13814 } 13815 llvm_unreachable("Invalid operand size!"); 13816} 13817 13818// Get opcode of the non-atomic one from the specified atomic instruction. 13819static unsigned getNonAtomicOpcode(unsigned Opc) { 13820 switch (Opc) { 13821 case X86::ATOMAND8: return X86::AND8rr; 13822 case X86::ATOMAND16: return X86::AND16rr; 13823 case X86::ATOMAND32: return X86::AND32rr; 13824 case X86::ATOMAND64: return X86::AND64rr; 13825 case X86::ATOMOR8: return X86::OR8rr; 13826 case X86::ATOMOR16: return X86::OR16rr; 13827 case X86::ATOMOR32: return X86::OR32rr; 13828 case X86::ATOMOR64: return X86::OR64rr; 13829 case X86::ATOMXOR8: return X86::XOR8rr; 13830 case X86::ATOMXOR16: return X86::XOR16rr; 13831 case X86::ATOMXOR32: return X86::XOR32rr; 13832 case X86::ATOMXOR64: return X86::XOR64rr; 13833 } 13834 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13835} 13836 13837// Get opcode of the non-atomic one from the specified atomic instruction with 13838// extra opcode. 13839static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc, 13840 unsigned &ExtraOpc) { 13841 switch (Opc) { 13842 case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr; 13843 case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr; 13844 case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr; 13845 case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr; 13846 case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr; 13847 case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr; 13848 case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr; 13849 case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr; 13850 case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr; 13851 case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr; 13852 case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr; 13853 case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr; 13854 case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr; 13855 case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr; 13856 case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr; 13857 case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr; 13858 case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr; 13859 case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr; 13860 case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr; 13861 case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr; 13862 } 13863 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13864} 13865 13866// Get opcode of the non-atomic one from the specified atomic instruction for 13867// 64-bit data type on 32-bit target. 13868static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) { 13869 switch (Opc) { 13870 case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr; 13871 case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr; 13872 case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr; 13873 case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr; 13874 case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr; 13875 case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr; 13876 case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr; 13877 case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr; 13878 case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr; 13879 case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr; 13880 } 13881 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13882} 13883 13884// Get opcode of the non-atomic one from the specified atomic instruction for 13885// 64-bit data type on 32-bit target with extra opcode. 13886static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc, 13887 unsigned &HiOpc, 13888 unsigned &ExtraOpc) { 13889 switch (Opc) { 13890 case X86::ATOMNAND6432: 13891 ExtraOpc = X86::NOT32r; 13892 HiOpc = X86::AND32rr; 13893 return X86::AND32rr; 13894 } 13895 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13896} 13897 13898// Get pseudo CMOV opcode from the specified data type. 13899static unsigned getPseudoCMOVOpc(EVT VT) { 13900 switch (VT.getSimpleVT().SimpleTy) { 13901 case MVT::i8: return X86::CMOV_GR8; 13902 case MVT::i16: return X86::CMOV_GR16; 13903 case MVT::i32: return X86::CMOV_GR32; 13904 default: 13905 break; 13906 } 13907 llvm_unreachable("Unknown CMOV opcode!"); 13908} 13909 13910// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions. 13911// They will be translated into a spin-loop or compare-exchange loop from 13912// 13913// ... 13914// dst = atomic-fetch-op MI.addr, MI.val 13915// ... 13916// 13917// to 13918// 13919// ... 13920// t1 = LOAD MI.addr 13921// loop: 13922// t4 = phi(t1, t3 / loop) 13923// t2 = OP MI.val, t4 13924// EAX = t4 13925// LCMPXCHG [MI.addr], t2, [EAX is implicitly used & defined] 13926// t3 = EAX 13927// JNE loop 13928// sink: 13929// dst = t3 13930// ... 13931MachineBasicBlock * 13932X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, 13933 MachineBasicBlock *MBB) const { 13934 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13935 DebugLoc DL = MI->getDebugLoc(); 13936 13937 MachineFunction *MF = MBB->getParent(); 13938 MachineRegisterInfo &MRI = MF->getRegInfo(); 13939 13940 const BasicBlock *BB = MBB->getBasicBlock(); 13941 MachineFunction::iterator I = MBB; 13942 ++I; 13943 13944 assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 && 13945 "Unexpected number of operands"); 13946 13947 assert(MI->hasOneMemOperand() && 13948 "Expected atomic-load-op to have one memoperand"); 13949 13950 // Memory Reference 13951 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13952 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13953 13954 unsigned DstReg, SrcReg; 13955 unsigned MemOpndSlot; 13956 13957 unsigned CurOp = 0; 13958 13959 DstReg = MI->getOperand(CurOp++).getReg(); 13960 MemOpndSlot = CurOp; 13961 CurOp += X86::AddrNumOperands; 13962 SrcReg = MI->getOperand(CurOp++).getReg(); 13963 13964 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 13965 MVT::SimpleValueType VT = *RC->vt_begin(); 13966 unsigned t1 = MRI.createVirtualRegister(RC); 13967 unsigned t2 = MRI.createVirtualRegister(RC); 13968 unsigned t3 = MRI.createVirtualRegister(RC); 13969 unsigned t4 = MRI.createVirtualRegister(RC); 13970 unsigned PhyReg = getX86SubSuperRegister(X86::EAX, VT); 13971 13972 unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT); 13973 unsigned LOADOpc = getLoadOpcode(VT); 13974 13975 // For the atomic load-arith operator, we generate 13976 // 13977 // thisMBB: 13978 // t1 = LOAD [MI.addr] 13979 // mainMBB: 13980 // t4 = phi(t1 / thisMBB, t3 / mainMBB) 13981 // t1 = OP MI.val, EAX 13982 // EAX = t4 13983 // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 13984 // t3 = EAX 13985 // JNE mainMBB 13986 // sinkMBB: 13987 // dst = t3 13988 13989 MachineBasicBlock *thisMBB = MBB; 13990 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13991 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13992 MF->insert(I, mainMBB); 13993 MF->insert(I, sinkMBB); 13994 13995 MachineInstrBuilder MIB; 13996 13997 // Transfer the remainder of BB and its successor edges to sinkMBB. 13998 sinkMBB->splice(sinkMBB->begin(), MBB, 13999 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 14000 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 14001 14002 // thisMBB: 14003 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1); 14004 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14005 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 14006 if (NewMO.isReg()) 14007 NewMO.setIsKill(false); 14008 MIB.addOperand(NewMO); 14009 } 14010 for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) { 14011 unsigned flags = (*MMOI)->getFlags(); 14012 flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad; 14013 MachineMemOperand *MMO = 14014 MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags, 14015 (*MMOI)->getSize(), 14016 (*MMOI)->getBaseAlignment(), 14017 (*MMOI)->getTBAAInfo(), 14018 (*MMOI)->getRanges()); 14019 MIB.addMemOperand(MMO); 14020 } 14021 14022 thisMBB->addSuccessor(mainMBB); 14023 14024 // mainMBB: 14025 MachineBasicBlock *origMainMBB = mainMBB; 14026 14027 // Add a PHI. 14028 MachineInstr *Phi = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4) 14029 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB); 14030 14031 unsigned Opc = MI->getOpcode(); 14032 switch (Opc) { 14033 default: 14034 llvm_unreachable("Unhandled atomic-load-op opcode!"); 14035 case X86::ATOMAND8: 14036 case X86::ATOMAND16: 14037 case X86::ATOMAND32: 14038 case X86::ATOMAND64: 14039 case X86::ATOMOR8: 14040 case X86::ATOMOR16: 14041 case X86::ATOMOR32: 14042 case X86::ATOMOR64: 14043 case X86::ATOMXOR8: 14044 case X86::ATOMXOR16: 14045 case X86::ATOMXOR32: 14046 case X86::ATOMXOR64: { 14047 unsigned ARITHOpc = getNonAtomicOpcode(Opc); 14048 BuildMI(mainMBB, DL, TII->get(ARITHOpc), t2).addReg(SrcReg) 14049 .addReg(t4); 14050 break; 14051 } 14052 case X86::ATOMNAND8: 14053 case X86::ATOMNAND16: 14054 case X86::ATOMNAND32: 14055 case X86::ATOMNAND64: { 14056 unsigned Tmp = MRI.createVirtualRegister(RC); 14057 unsigned NOTOpc; 14058 unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc); 14059 BuildMI(mainMBB, DL, TII->get(ANDOpc), Tmp).addReg(SrcReg) 14060 .addReg(t4); 14061 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2).addReg(Tmp); 14062 break; 14063 } 14064 case X86::ATOMMAX8: 14065 case X86::ATOMMAX16: 14066 case X86::ATOMMAX32: 14067 case X86::ATOMMAX64: 14068 case X86::ATOMMIN8: 14069 case X86::ATOMMIN16: 14070 case X86::ATOMMIN32: 14071 case X86::ATOMMIN64: 14072 case X86::ATOMUMAX8: 14073 case X86::ATOMUMAX16: 14074 case X86::ATOMUMAX32: 14075 case X86::ATOMUMAX64: 14076 case X86::ATOMUMIN8: 14077 case X86::ATOMUMIN16: 14078 case X86::ATOMUMIN32: 14079 case X86::ATOMUMIN64: { 14080 unsigned CMPOpc; 14081 unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc); 14082 14083 BuildMI(mainMBB, DL, TII->get(CMPOpc)) 14084 .addReg(SrcReg) 14085 .addReg(t4); 14086 14087 if (Subtarget->hasCMov()) { 14088 if (VT != MVT::i8) { 14089 // Native support 14090 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2) 14091 .addReg(SrcReg) 14092 .addReg(t4); 14093 } else { 14094 // Promote i8 to i32 to use CMOV32 14095 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 14096 const TargetRegisterClass *RC32 = 14097 TRI->getSubClassWithSubReg(getRegClassFor(MVT::i32), X86::sub_8bit); 14098 unsigned SrcReg32 = MRI.createVirtualRegister(RC32); 14099 unsigned AccReg32 = MRI.createVirtualRegister(RC32); 14100 unsigned Tmp = MRI.createVirtualRegister(RC32); 14101 14102 unsigned Undef = MRI.createVirtualRegister(RC32); 14103 BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef); 14104 14105 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32) 14106 .addReg(Undef) 14107 .addReg(SrcReg) 14108 .addImm(X86::sub_8bit); 14109 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32) 14110 .addReg(Undef) 14111 .addReg(t4) 14112 .addImm(X86::sub_8bit); 14113 14114 BuildMI(mainMBB, DL, TII->get(CMOVOpc), Tmp) 14115 .addReg(SrcReg32) 14116 .addReg(AccReg32); 14117 14118 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t2) 14119 .addReg(Tmp, 0, X86::sub_8bit); 14120 } 14121 } else { 14122 // Use pseudo select and lower them. 14123 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 14124 "Invalid atomic-load-op transformation!"); 14125 unsigned SelOpc = getPseudoCMOVOpc(VT); 14126 X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc); 14127 assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!"); 14128 MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t2) 14129 .addReg(SrcReg).addReg(t4) 14130 .addImm(CC); 14131 mainMBB = EmitLoweredSelect(MIB, mainMBB); 14132 // Replace the original PHI node as mainMBB is changed after CMOV 14133 // lowering. 14134 BuildMI(*origMainMBB, Phi, DL, TII->get(X86::PHI), t4) 14135 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB); 14136 Phi->eraseFromParent(); 14137 } 14138 break; 14139 } 14140 } 14141 14142 // Copy PhyReg back from virtual register. 14143 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), PhyReg) 14144 .addReg(t4); 14145 14146 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 14147 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14148 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 14149 if (NewMO.isReg()) 14150 NewMO.setIsKill(false); 14151 MIB.addOperand(NewMO); 14152 } 14153 MIB.addReg(t2); 14154 MIB.setMemRefs(MMOBegin, MMOEnd); 14155 14156 // Copy PhyReg back to virtual register. 14157 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3) 14158 .addReg(PhyReg); 14159 14160 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 14161 14162 mainMBB->addSuccessor(origMainMBB); 14163 mainMBB->addSuccessor(sinkMBB); 14164 14165 // sinkMBB: 14166 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14167 TII->get(TargetOpcode::COPY), DstReg) 14168 .addReg(t3); 14169 14170 MI->eraseFromParent(); 14171 return sinkMBB; 14172} 14173 14174// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic 14175// instructions. They will be translated into a spin-loop or compare-exchange 14176// loop from 14177// 14178// ... 14179// dst = atomic-fetch-op MI.addr, MI.val 14180// ... 14181// 14182// to 14183// 14184// ... 14185// t1L = LOAD [MI.addr + 0] 14186// t1H = LOAD [MI.addr + 4] 14187// loop: 14188// t4L = phi(t1L, t3L / loop) 14189// t4H = phi(t1H, t3H / loop) 14190// t2L = OP MI.val.lo, t4L 14191// t2H = OP MI.val.hi, t4H 14192// EAX = t4L 14193// EDX = t4H 14194// EBX = t2L 14195// ECX = t2H 14196// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 14197// t3L = EAX 14198// t3H = EDX 14199// JNE loop 14200// sink: 14201// dstL = t3L 14202// dstH = t3H 14203// ... 14204MachineBasicBlock * 14205X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, 14206 MachineBasicBlock *MBB) const { 14207 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14208 DebugLoc DL = MI->getDebugLoc(); 14209 14210 MachineFunction *MF = MBB->getParent(); 14211 MachineRegisterInfo &MRI = MF->getRegInfo(); 14212 14213 const BasicBlock *BB = MBB->getBasicBlock(); 14214 MachineFunction::iterator I = MBB; 14215 ++I; 14216 14217 assert(MI->getNumOperands() <= X86::AddrNumOperands + 7 && 14218 "Unexpected number of operands"); 14219 14220 assert(MI->hasOneMemOperand() && 14221 "Expected atomic-load-op32 to have one memoperand"); 14222 14223 // Memory Reference 14224 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 14225 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 14226 14227 unsigned DstLoReg, DstHiReg; 14228 unsigned SrcLoReg, SrcHiReg; 14229 unsigned MemOpndSlot; 14230 14231 unsigned CurOp = 0; 14232 14233 DstLoReg = MI->getOperand(CurOp++).getReg(); 14234 DstHiReg = MI->getOperand(CurOp++).getReg(); 14235 MemOpndSlot = CurOp; 14236 CurOp += X86::AddrNumOperands; 14237 SrcLoReg = MI->getOperand(CurOp++).getReg(); 14238 SrcHiReg = MI->getOperand(CurOp++).getReg(); 14239 14240 const TargetRegisterClass *RC = &X86::GR32RegClass; 14241 const TargetRegisterClass *RC8 = &X86::GR8RegClass; 14242 14243 unsigned t1L = MRI.createVirtualRegister(RC); 14244 unsigned t1H = MRI.createVirtualRegister(RC); 14245 unsigned t2L = MRI.createVirtualRegister(RC); 14246 unsigned t2H = MRI.createVirtualRegister(RC); 14247 unsigned t3L = MRI.createVirtualRegister(RC); 14248 unsigned t3H = MRI.createVirtualRegister(RC); 14249 unsigned t4L = MRI.createVirtualRegister(RC); 14250 unsigned t4H = MRI.createVirtualRegister(RC); 14251 14252 unsigned LCMPXCHGOpc = X86::LCMPXCHG8B; 14253 unsigned LOADOpc = X86::MOV32rm; 14254 14255 // For the atomic load-arith operator, we generate 14256 // 14257 // thisMBB: 14258 // t1L = LOAD [MI.addr + 0] 14259 // t1H = LOAD [MI.addr + 4] 14260 // mainMBB: 14261 // t4L = phi(t1L / thisMBB, t3L / mainMBB) 14262 // t4H = phi(t1H / thisMBB, t3H / mainMBB) 14263 // t2L = OP MI.val.lo, t4L 14264 // t2H = OP MI.val.hi, t4H 14265 // EBX = t2L 14266 // ECX = t2H 14267 // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 14268 // t3L = EAX 14269 // t3H = EDX 14270 // JNE loop 14271 // sinkMBB: 14272 // dstL = t3L 14273 // dstH = t3H 14274 14275 MachineBasicBlock *thisMBB = MBB; 14276 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 14277 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 14278 MF->insert(I, mainMBB); 14279 MF->insert(I, sinkMBB); 14280 14281 MachineInstrBuilder MIB; 14282 14283 // Transfer the remainder of BB and its successor edges to sinkMBB. 14284 sinkMBB->splice(sinkMBB->begin(), MBB, 14285 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 14286 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 14287 14288 // thisMBB: 14289 // Lo 14290 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1L); 14291 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14292 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 14293 if (NewMO.isReg()) 14294 NewMO.setIsKill(false); 14295 MIB.addOperand(NewMO); 14296 } 14297 for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) { 14298 unsigned flags = (*MMOI)->getFlags(); 14299 flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad; 14300 MachineMemOperand *MMO = 14301 MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags, 14302 (*MMOI)->getSize(), 14303 (*MMOI)->getBaseAlignment(), 14304 (*MMOI)->getTBAAInfo(), 14305 (*MMOI)->getRanges()); 14306 MIB.addMemOperand(MMO); 14307 }; 14308 MachineInstr *LowMI = MIB; 14309 14310 // Hi 14311 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1H); 14312 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14313 if (i == X86::AddrDisp) { 14314 MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) 14315 } else { 14316 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 14317 if (NewMO.isReg()) 14318 NewMO.setIsKill(false); 14319 MIB.addOperand(NewMO); 14320 } 14321 } 14322 MIB.setMemRefs(LowMI->memoperands_begin(), LowMI->memoperands_end()); 14323 14324 thisMBB->addSuccessor(mainMBB); 14325 14326 // mainMBB: 14327 MachineBasicBlock *origMainMBB = mainMBB; 14328 14329 // Add PHIs. 14330 MachineInstr *PhiL = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4L) 14331 .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB); 14332 MachineInstr *PhiH = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4H) 14333 .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB); 14334 14335 unsigned Opc = MI->getOpcode(); 14336 switch (Opc) { 14337 default: 14338 llvm_unreachable("Unhandled atomic-load-op6432 opcode!"); 14339 case X86::ATOMAND6432: 14340 case X86::ATOMOR6432: 14341 case X86::ATOMXOR6432: 14342 case X86::ATOMADD6432: 14343 case X86::ATOMSUB6432: { 14344 unsigned HiOpc; 14345 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 14346 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(t4L) 14347 .addReg(SrcLoReg); 14348 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(t4H) 14349 .addReg(SrcHiReg); 14350 break; 14351 } 14352 case X86::ATOMNAND6432: { 14353 unsigned HiOpc, NOTOpc; 14354 unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc); 14355 unsigned TmpL = MRI.createVirtualRegister(RC); 14356 unsigned TmpH = MRI.createVirtualRegister(RC); 14357 BuildMI(mainMBB, DL, TII->get(LoOpc), TmpL).addReg(SrcLoReg) 14358 .addReg(t4L); 14359 BuildMI(mainMBB, DL, TII->get(HiOpc), TmpH).addReg(SrcHiReg) 14360 .addReg(t4H); 14361 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2L).addReg(TmpL); 14362 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2H).addReg(TmpH); 14363 break; 14364 } 14365 case X86::ATOMMAX6432: 14366 case X86::ATOMMIN6432: 14367 case X86::ATOMUMAX6432: 14368 case X86::ATOMUMIN6432: { 14369 unsigned HiOpc; 14370 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 14371 unsigned cL = MRI.createVirtualRegister(RC8); 14372 unsigned cH = MRI.createVirtualRegister(RC8); 14373 unsigned cL32 = MRI.createVirtualRegister(RC); 14374 unsigned cH32 = MRI.createVirtualRegister(RC); 14375 unsigned cc = MRI.createVirtualRegister(RC); 14376 // cl := cmp src_lo, lo 14377 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 14378 .addReg(SrcLoReg).addReg(t4L); 14379 BuildMI(mainMBB, DL, TII->get(LoOpc), cL); 14380 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL); 14381 // ch := cmp src_hi, hi 14382 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 14383 .addReg(SrcHiReg).addReg(t4H); 14384 BuildMI(mainMBB, DL, TII->get(HiOpc), cH); 14385 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH); 14386 // cc := if (src_hi == hi) ? cl : ch; 14387 if (Subtarget->hasCMov()) { 14388 BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc) 14389 .addReg(cH32).addReg(cL32); 14390 } else { 14391 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc) 14392 .addReg(cH32).addReg(cL32) 14393 .addImm(X86::COND_E); 14394 mainMBB = EmitLoweredSelect(MIB, mainMBB); 14395 } 14396 BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc); 14397 if (Subtarget->hasCMov()) { 14398 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2L) 14399 .addReg(SrcLoReg).addReg(t4L); 14400 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2H) 14401 .addReg(SrcHiReg).addReg(t4H); 14402 } else { 14403 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2L) 14404 .addReg(SrcLoReg).addReg(t4L) 14405 .addImm(X86::COND_NE); 14406 mainMBB = EmitLoweredSelect(MIB, mainMBB); 14407 // As the lowered CMOV won't clobber EFLAGS, we could reuse it for the 14408 // 2nd CMOV lowering. 14409 mainMBB->addLiveIn(X86::EFLAGS); 14410 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2H) 14411 .addReg(SrcHiReg).addReg(t4H) 14412 .addImm(X86::COND_NE); 14413 mainMBB = EmitLoweredSelect(MIB, mainMBB); 14414 // Replace the original PHI node as mainMBB is changed after CMOV 14415 // lowering. 14416 BuildMI(*origMainMBB, PhiL, DL, TII->get(X86::PHI), t4L) 14417 .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB); 14418 BuildMI(*origMainMBB, PhiH, DL, TII->get(X86::PHI), t4H) 14419 .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB); 14420 PhiL->eraseFromParent(); 14421 PhiH->eraseFromParent(); 14422 } 14423 break; 14424 } 14425 case X86::ATOMSWAP6432: { 14426 unsigned HiOpc; 14427 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 14428 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg); 14429 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg); 14430 break; 14431 } 14432 } 14433 14434 // Copy EDX:EAX back from HiReg:LoReg 14435 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(t4L); 14436 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(t4H); 14437 // Copy ECX:EBX from t1H:t1L 14438 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t2L); 14439 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t2H); 14440 14441 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 14442 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14443 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 14444 if (NewMO.isReg()) 14445 NewMO.setIsKill(false); 14446 MIB.addOperand(NewMO); 14447 } 14448 MIB.setMemRefs(MMOBegin, MMOEnd); 14449 14450 // Copy EDX:EAX back to t3H:t3L 14451 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3L).addReg(X86::EAX); 14452 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3H).addReg(X86::EDX); 14453 14454 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 14455 14456 mainMBB->addSuccessor(origMainMBB); 14457 mainMBB->addSuccessor(sinkMBB); 14458 14459 // sinkMBB: 14460 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14461 TII->get(TargetOpcode::COPY), DstLoReg) 14462 .addReg(t3L); 14463 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14464 TII->get(TargetOpcode::COPY), DstHiReg) 14465 .addReg(t3H); 14466 14467 MI->eraseFromParent(); 14468 return sinkMBB; 14469} 14470 14471// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 14472// or XMM0_V32I8 in AVX all of this code can be replaced with that 14473// in the .td file. 14474static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB, 14475 const TargetInstrInfo *TII) { 14476 unsigned Opc; 14477 switch (MI->getOpcode()) { 14478 default: llvm_unreachable("illegal opcode!"); 14479 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break; 14480 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break; 14481 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break; 14482 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break; 14483 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break; 14484 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break; 14485 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break; 14486 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break; 14487 } 14488 14489 DebugLoc dl = MI->getDebugLoc(); 14490 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 14491 14492 unsigned NumArgs = MI->getNumOperands(); 14493 for (unsigned i = 1; i < NumArgs; ++i) { 14494 MachineOperand &Op = MI->getOperand(i); 14495 if (!(Op.isReg() && Op.isImplicit())) 14496 MIB.addOperand(Op); 14497 } 14498 if (MI->hasOneMemOperand()) 14499 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 14500 14501 BuildMI(*BB, MI, dl, 14502 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 14503 .addReg(X86::XMM0); 14504 14505 MI->eraseFromParent(); 14506 return BB; 14507} 14508 14509// FIXME: Custom handling because TableGen doesn't support multiple implicit 14510// defs in an instruction pattern 14511static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB, 14512 const TargetInstrInfo *TII) { 14513 unsigned Opc; 14514 switch (MI->getOpcode()) { 14515 default: llvm_unreachable("illegal opcode!"); 14516 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break; 14517 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break; 14518 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break; 14519 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break; 14520 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break; 14521 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break; 14522 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break; 14523 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break; 14524 } 14525 14526 DebugLoc dl = MI->getDebugLoc(); 14527 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 14528 14529 unsigned NumArgs = MI->getNumOperands(); // remove the results 14530 for (unsigned i = 1; i < NumArgs; ++i) { 14531 MachineOperand &Op = MI->getOperand(i); 14532 if (!(Op.isReg() && Op.isImplicit())) 14533 MIB.addOperand(Op); 14534 } 14535 if (MI->hasOneMemOperand()) 14536 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 14537 14538 BuildMI(*BB, MI, dl, 14539 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 14540 .addReg(X86::ECX); 14541 14542 MI->eraseFromParent(); 14543 return BB; 14544} 14545 14546static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB, 14547 const TargetInstrInfo *TII, 14548 const X86Subtarget* Subtarget) { 14549 DebugLoc dl = MI->getDebugLoc(); 14550 14551 // Address into RAX/EAX, other two args into ECX, EDX. 14552 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 14553 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 14554 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 14555 for (int i = 0; i < X86::AddrNumOperands; ++i) 14556 MIB.addOperand(MI->getOperand(i)); 14557 14558 unsigned ValOps = X86::AddrNumOperands; 14559 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 14560 .addReg(MI->getOperand(ValOps).getReg()); 14561 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 14562 .addReg(MI->getOperand(ValOps+1).getReg()); 14563 14564 // The instruction doesn't actually take any operands though. 14565 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 14566 14567 MI->eraseFromParent(); // The pseudo is gone now. 14568 return BB; 14569} 14570 14571MachineBasicBlock * 14572X86TargetLowering::EmitVAARG64WithCustomInserter( 14573 MachineInstr *MI, 14574 MachineBasicBlock *MBB) const { 14575 // Emit va_arg instruction on X86-64. 14576 14577 // Operands to this pseudo-instruction: 14578 // 0 ) Output : destination address (reg) 14579 // 1-5) Input : va_list address (addr, i64mem) 14580 // 6 ) ArgSize : Size (in bytes) of vararg type 14581 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 14582 // 8 ) Align : Alignment of type 14583 // 9 ) EFLAGS (implicit-def) 14584 14585 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 14586 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 14587 14588 unsigned DestReg = MI->getOperand(0).getReg(); 14589 MachineOperand &Base = MI->getOperand(1); 14590 MachineOperand &Scale = MI->getOperand(2); 14591 MachineOperand &Index = MI->getOperand(3); 14592 MachineOperand &Disp = MI->getOperand(4); 14593 MachineOperand &Segment = MI->getOperand(5); 14594 unsigned ArgSize = MI->getOperand(6).getImm(); 14595 unsigned ArgMode = MI->getOperand(7).getImm(); 14596 unsigned Align = MI->getOperand(8).getImm(); 14597 14598 // Memory Reference 14599 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 14600 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 14601 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 14602 14603 // Machine Information 14604 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14605 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 14606 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 14607 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 14608 DebugLoc DL = MI->getDebugLoc(); 14609 14610 // struct va_list { 14611 // i32 gp_offset 14612 // i32 fp_offset 14613 // i64 overflow_area (address) 14614 // i64 reg_save_area (address) 14615 // } 14616 // sizeof(va_list) = 24 14617 // alignment(va_list) = 8 14618 14619 unsigned TotalNumIntRegs = 6; 14620 unsigned TotalNumXMMRegs = 8; 14621 bool UseGPOffset = (ArgMode == 1); 14622 bool UseFPOffset = (ArgMode == 2); 14623 unsigned MaxOffset = TotalNumIntRegs * 8 + 14624 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 14625 14626 /* Align ArgSize to a multiple of 8 */ 14627 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 14628 bool NeedsAlign = (Align > 8); 14629 14630 MachineBasicBlock *thisMBB = MBB; 14631 MachineBasicBlock *overflowMBB; 14632 MachineBasicBlock *offsetMBB; 14633 MachineBasicBlock *endMBB; 14634 14635 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 14636 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 14637 unsigned OffsetReg = 0; 14638 14639 if (!UseGPOffset && !UseFPOffset) { 14640 // If we only pull from the overflow region, we don't create a branch. 14641 // We don't need to alter control flow. 14642 OffsetDestReg = 0; // unused 14643 OverflowDestReg = DestReg; 14644 14645 offsetMBB = NULL; 14646 overflowMBB = thisMBB; 14647 endMBB = thisMBB; 14648 } else { 14649 // First emit code to check if gp_offset (or fp_offset) is below the bound. 14650 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 14651 // If not, pull from overflow_area. (branch to overflowMBB) 14652 // 14653 // thisMBB 14654 // | . 14655 // | . 14656 // offsetMBB overflowMBB 14657 // | . 14658 // | . 14659 // endMBB 14660 14661 // Registers for the PHI in endMBB 14662 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 14663 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 14664 14665 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 14666 MachineFunction *MF = MBB->getParent(); 14667 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14668 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14669 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14670 14671 MachineFunction::iterator MBBIter = MBB; 14672 ++MBBIter; 14673 14674 // Insert the new basic blocks 14675 MF->insert(MBBIter, offsetMBB); 14676 MF->insert(MBBIter, overflowMBB); 14677 MF->insert(MBBIter, endMBB); 14678 14679 // Transfer the remainder of MBB and its successor edges to endMBB. 14680 endMBB->splice(endMBB->begin(), thisMBB, 14681 llvm::next(MachineBasicBlock::iterator(MI)), 14682 thisMBB->end()); 14683 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 14684 14685 // Make offsetMBB and overflowMBB successors of thisMBB 14686 thisMBB->addSuccessor(offsetMBB); 14687 thisMBB->addSuccessor(overflowMBB); 14688 14689 // endMBB is a successor of both offsetMBB and overflowMBB 14690 offsetMBB->addSuccessor(endMBB); 14691 overflowMBB->addSuccessor(endMBB); 14692 14693 // Load the offset value into a register 14694 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 14695 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 14696 .addOperand(Base) 14697 .addOperand(Scale) 14698 .addOperand(Index) 14699 .addDisp(Disp, UseFPOffset ? 4 : 0) 14700 .addOperand(Segment) 14701 .setMemRefs(MMOBegin, MMOEnd); 14702 14703 // Check if there is enough room left to pull this argument. 14704 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 14705 .addReg(OffsetReg) 14706 .addImm(MaxOffset + 8 - ArgSizeA8); 14707 14708 // Branch to "overflowMBB" if offset >= max 14709 // Fall through to "offsetMBB" otherwise 14710 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 14711 .addMBB(overflowMBB); 14712 } 14713 14714 // In offsetMBB, emit code to use the reg_save_area. 14715 if (offsetMBB) { 14716 assert(OffsetReg != 0); 14717 14718 // Read the reg_save_area address. 14719 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 14720 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 14721 .addOperand(Base) 14722 .addOperand(Scale) 14723 .addOperand(Index) 14724 .addDisp(Disp, 16) 14725 .addOperand(Segment) 14726 .setMemRefs(MMOBegin, MMOEnd); 14727 14728 // Zero-extend the offset 14729 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 14730 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 14731 .addImm(0) 14732 .addReg(OffsetReg) 14733 .addImm(X86::sub_32bit); 14734 14735 // Add the offset to the reg_save_area to get the final address. 14736 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 14737 .addReg(OffsetReg64) 14738 .addReg(RegSaveReg); 14739 14740 // Compute the offset for the next argument 14741 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 14742 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 14743 .addReg(OffsetReg) 14744 .addImm(UseFPOffset ? 16 : 8); 14745 14746 // Store it back into the va_list. 14747 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 14748 .addOperand(Base) 14749 .addOperand(Scale) 14750 .addOperand(Index) 14751 .addDisp(Disp, UseFPOffset ? 4 : 0) 14752 .addOperand(Segment) 14753 .addReg(NextOffsetReg) 14754 .setMemRefs(MMOBegin, MMOEnd); 14755 14756 // Jump to endMBB 14757 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 14758 .addMBB(endMBB); 14759 } 14760 14761 // 14762 // Emit code to use overflow area 14763 // 14764 14765 // Load the overflow_area address into a register. 14766 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 14767 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 14768 .addOperand(Base) 14769 .addOperand(Scale) 14770 .addOperand(Index) 14771 .addDisp(Disp, 8) 14772 .addOperand(Segment) 14773 .setMemRefs(MMOBegin, MMOEnd); 14774 14775 // If we need to align it, do so. Otherwise, just copy the address 14776 // to OverflowDestReg. 14777 if (NeedsAlign) { 14778 // Align the overflow address 14779 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 14780 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 14781 14782 // aligned_addr = (addr + (align-1)) & ~(align-1) 14783 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 14784 .addReg(OverflowAddrReg) 14785 .addImm(Align-1); 14786 14787 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 14788 .addReg(TmpReg) 14789 .addImm(~(uint64_t)(Align-1)); 14790 } else { 14791 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 14792 .addReg(OverflowAddrReg); 14793 } 14794 14795 // Compute the next overflow address after this argument. 14796 // (the overflow address should be kept 8-byte aligned) 14797 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 14798 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 14799 .addReg(OverflowDestReg) 14800 .addImm(ArgSizeA8); 14801 14802 // Store the new overflow address. 14803 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 14804 .addOperand(Base) 14805 .addOperand(Scale) 14806 .addOperand(Index) 14807 .addDisp(Disp, 8) 14808 .addOperand(Segment) 14809 .addReg(NextAddrReg) 14810 .setMemRefs(MMOBegin, MMOEnd); 14811 14812 // If we branched, emit the PHI to the front of endMBB. 14813 if (offsetMBB) { 14814 BuildMI(*endMBB, endMBB->begin(), DL, 14815 TII->get(X86::PHI), DestReg) 14816 .addReg(OffsetDestReg).addMBB(offsetMBB) 14817 .addReg(OverflowDestReg).addMBB(overflowMBB); 14818 } 14819 14820 // Erase the pseudo instruction 14821 MI->eraseFromParent(); 14822 14823 return endMBB; 14824} 14825 14826MachineBasicBlock * 14827X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 14828 MachineInstr *MI, 14829 MachineBasicBlock *MBB) const { 14830 // Emit code to save XMM registers to the stack. The ABI says that the 14831 // number of registers to save is given in %al, so it's theoretically 14832 // possible to do an indirect jump trick to avoid saving all of them, 14833 // however this code takes a simpler approach and just executes all 14834 // of the stores if %al is non-zero. It's less code, and it's probably 14835 // easier on the hardware branch predictor, and stores aren't all that 14836 // expensive anyway. 14837 14838 // Create the new basic blocks. One block contains all the XMM stores, 14839 // and one block is the final destination regardless of whether any 14840 // stores were performed. 14841 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 14842 MachineFunction *F = MBB->getParent(); 14843 MachineFunction::iterator MBBIter = MBB; 14844 ++MBBIter; 14845 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 14846 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 14847 F->insert(MBBIter, XMMSaveMBB); 14848 F->insert(MBBIter, EndMBB); 14849 14850 // Transfer the remainder of MBB and its successor edges to EndMBB. 14851 EndMBB->splice(EndMBB->begin(), MBB, 14852 llvm::next(MachineBasicBlock::iterator(MI)), 14853 MBB->end()); 14854 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 14855 14856 // The original block will now fall through to the XMM save block. 14857 MBB->addSuccessor(XMMSaveMBB); 14858 // The XMMSaveMBB will fall through to the end block. 14859 XMMSaveMBB->addSuccessor(EndMBB); 14860 14861 // Now add the instructions. 14862 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14863 DebugLoc DL = MI->getDebugLoc(); 14864 14865 unsigned CountReg = MI->getOperand(0).getReg(); 14866 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 14867 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 14868 14869 if (!Subtarget->isTargetWin64()) { 14870 // If %al is 0, branch around the XMM save block. 14871 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 14872 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 14873 MBB->addSuccessor(EndMBB); 14874 } 14875 14876 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr; 14877 // In the XMM save block, save all the XMM argument registers. 14878 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 14879 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 14880 MachineMemOperand *MMO = 14881 F->getMachineMemOperand( 14882 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 14883 MachineMemOperand::MOStore, 14884 /*Size=*/16, /*Align=*/16); 14885 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 14886 .addFrameIndex(RegSaveFrameIndex) 14887 .addImm(/*Scale=*/1) 14888 .addReg(/*IndexReg=*/0) 14889 .addImm(/*Disp=*/Offset) 14890 .addReg(/*Segment=*/0) 14891 .addReg(MI->getOperand(i).getReg()) 14892 .addMemOperand(MMO); 14893 } 14894 14895 MI->eraseFromParent(); // The pseudo instruction is gone now. 14896 14897 return EndMBB; 14898} 14899 14900// The EFLAGS operand of SelectItr might be missing a kill marker 14901// because there were multiple uses of EFLAGS, and ISel didn't know 14902// which to mark. Figure out whether SelectItr should have had a 14903// kill marker, and set it if it should. Returns the correct kill 14904// marker value. 14905static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, 14906 MachineBasicBlock* BB, 14907 const TargetRegisterInfo* TRI) { 14908 // Scan forward through BB for a use/def of EFLAGS. 14909 MachineBasicBlock::iterator miI(llvm::next(SelectItr)); 14910 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 14911 const MachineInstr& mi = *miI; 14912 if (mi.readsRegister(X86::EFLAGS)) 14913 return false; 14914 if (mi.definesRegister(X86::EFLAGS)) 14915 break; // Should have kill-flag - update below. 14916 } 14917 14918 // If we hit the end of the block, check whether EFLAGS is live into a 14919 // successor. 14920 if (miI == BB->end()) { 14921 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 14922 sEnd = BB->succ_end(); 14923 sItr != sEnd; ++sItr) { 14924 MachineBasicBlock* succ = *sItr; 14925 if (succ->isLiveIn(X86::EFLAGS)) 14926 return false; 14927 } 14928 } 14929 14930 // We found a def, or hit the end of the basic block and EFLAGS wasn't live 14931 // out. SelectMI should have a kill flag on EFLAGS. 14932 SelectItr->addRegisterKilled(X86::EFLAGS, TRI); 14933 return true; 14934} 14935 14936MachineBasicBlock * 14937X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 14938 MachineBasicBlock *BB) const { 14939 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14940 DebugLoc DL = MI->getDebugLoc(); 14941 14942 // To "insert" a SELECT_CC instruction, we actually have to insert the 14943 // diamond control-flow pattern. The incoming instruction knows the 14944 // destination vreg to set, the condition code register to branch on, the 14945 // true/false values to select between, and a branch opcode to use. 14946 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 14947 MachineFunction::iterator It = BB; 14948 ++It; 14949 14950 // thisMBB: 14951 // ... 14952 // TrueVal = ... 14953 // cmpTY ccX, r1, r2 14954 // bCC copy1MBB 14955 // fallthrough --> copy0MBB 14956 MachineBasicBlock *thisMBB = BB; 14957 MachineFunction *F = BB->getParent(); 14958 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 14959 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 14960 F->insert(It, copy0MBB); 14961 F->insert(It, sinkMBB); 14962 14963 // If the EFLAGS register isn't dead in the terminator, then claim that it's 14964 // live into the sink and copy blocks. 14965 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 14966 if (!MI->killsRegister(X86::EFLAGS) && 14967 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) { 14968 copy0MBB->addLiveIn(X86::EFLAGS); 14969 sinkMBB->addLiveIn(X86::EFLAGS); 14970 } 14971 14972 // Transfer the remainder of BB and its successor edges to sinkMBB. 14973 sinkMBB->splice(sinkMBB->begin(), BB, 14974 llvm::next(MachineBasicBlock::iterator(MI)), 14975 BB->end()); 14976 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 14977 14978 // Add the true and fallthrough blocks as its successors. 14979 BB->addSuccessor(copy0MBB); 14980 BB->addSuccessor(sinkMBB); 14981 14982 // Create the conditional branch instruction. 14983 unsigned Opc = 14984 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 14985 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 14986 14987 // copy0MBB: 14988 // %FalseValue = ... 14989 // # fallthrough to sinkMBB 14990 copy0MBB->addSuccessor(sinkMBB); 14991 14992 // sinkMBB: 14993 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 14994 // ... 14995 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14996 TII->get(X86::PHI), MI->getOperand(0).getReg()) 14997 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 14998 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 14999 15000 MI->eraseFromParent(); // The pseudo instruction is gone now. 15001 return sinkMBB; 15002} 15003 15004MachineBasicBlock * 15005X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 15006 bool Is64Bit) const { 15007 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 15008 DebugLoc DL = MI->getDebugLoc(); 15009 MachineFunction *MF = BB->getParent(); 15010 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 15011 15012 assert(getTargetMachine().Options.EnableSegmentedStacks); 15013 15014 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 15015 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 15016 15017 // BB: 15018 // ... [Till the alloca] 15019 // If stacklet is not large enough, jump to mallocMBB 15020 // 15021 // bumpMBB: 15022 // Allocate by subtracting from RSP 15023 // Jump to continueMBB 15024 // 15025 // mallocMBB: 15026 // Allocate by call to runtime 15027 // 15028 // continueMBB: 15029 // ... 15030 // [rest of original BB] 15031 // 15032 15033 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 15034 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 15035 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 15036 15037 MachineRegisterInfo &MRI = MF->getRegInfo(); 15038 const TargetRegisterClass *AddrRegClass = 15039 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 15040 15041 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 15042 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 15043 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 15044 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 15045 sizeVReg = MI->getOperand(1).getReg(), 15046 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 15047 15048 MachineFunction::iterator MBBIter = BB; 15049 ++MBBIter; 15050 15051 MF->insert(MBBIter, bumpMBB); 15052 MF->insert(MBBIter, mallocMBB); 15053 MF->insert(MBBIter, continueMBB); 15054 15055 continueMBB->splice(continueMBB->begin(), BB, llvm::next 15056 (MachineBasicBlock::iterator(MI)), BB->end()); 15057 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 15058 15059 // Add code to the main basic block to check if the stack limit has been hit, 15060 // and if so, jump to mallocMBB otherwise to bumpMBB. 15061 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 15062 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 15063 .addReg(tmpSPVReg).addReg(sizeVReg); 15064 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 15065 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 15066 .addReg(SPLimitVReg); 15067 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 15068 15069 // bumpMBB simply decreases the stack pointer, since we know the current 15070 // stacklet has enough space. 15071 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 15072 .addReg(SPLimitVReg); 15073 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 15074 .addReg(SPLimitVReg); 15075 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 15076 15077 // Calls into a routine in libgcc to allocate more space from the heap. 15078 const uint32_t *RegMask = 15079 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 15080 if (Is64Bit) { 15081 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 15082 .addReg(sizeVReg); 15083 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 15084 .addExternalSymbol("__morestack_allocate_stack_space") 15085 .addRegMask(RegMask) 15086 .addReg(X86::RDI, RegState::Implicit) 15087 .addReg(X86::RAX, RegState::ImplicitDefine); 15088 } else { 15089 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 15090 .addImm(12); 15091 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 15092 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 15093 .addExternalSymbol("__morestack_allocate_stack_space") 15094 .addRegMask(RegMask) 15095 .addReg(X86::EAX, RegState::ImplicitDefine); 15096 } 15097 15098 if (!Is64Bit) 15099 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 15100 .addImm(16); 15101 15102 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 15103 .addReg(Is64Bit ? X86::RAX : X86::EAX); 15104 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 15105 15106 // Set up the CFG correctly. 15107 BB->addSuccessor(bumpMBB); 15108 BB->addSuccessor(mallocMBB); 15109 mallocMBB->addSuccessor(continueMBB); 15110 bumpMBB->addSuccessor(continueMBB); 15111 15112 // Take care of the PHI nodes. 15113 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 15114 MI->getOperand(0).getReg()) 15115 .addReg(mallocPtrVReg).addMBB(mallocMBB) 15116 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 15117 15118 // Delete the original pseudo instruction. 15119 MI->eraseFromParent(); 15120 15121 // And we're done. 15122 return continueMBB; 15123} 15124 15125MachineBasicBlock * 15126X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 15127 MachineBasicBlock *BB) const { 15128 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 15129 DebugLoc DL = MI->getDebugLoc(); 15130 15131 assert(!Subtarget->isTargetEnvMacho()); 15132 15133 // The lowering is pretty easy: we're just emitting the call to _alloca. The 15134 // non-trivial part is impdef of ESP. 15135 15136 if (Subtarget->isTargetWin64()) { 15137 if (Subtarget->isTargetCygMing()) { 15138 // ___chkstk(Mingw64): 15139 // Clobbers R10, R11, RAX and EFLAGS. 15140 // Updates RSP. 15141 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 15142 .addExternalSymbol("___chkstk") 15143 .addReg(X86::RAX, RegState::Implicit) 15144 .addReg(X86::RSP, RegState::Implicit) 15145 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 15146 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 15147 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 15148 } else { 15149 // __chkstk(MSVCRT): does not update stack pointer. 15150 // Clobbers R10, R11 and EFLAGS. 15151 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 15152 .addExternalSymbol("__chkstk") 15153 .addReg(X86::RAX, RegState::Implicit) 15154 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 15155 // RAX has the offset to be subtracted from RSP. 15156 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 15157 .addReg(X86::RSP) 15158 .addReg(X86::RAX); 15159 } 15160 } else { 15161 const char *StackProbeSymbol = 15162 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 15163 15164 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 15165 .addExternalSymbol(StackProbeSymbol) 15166 .addReg(X86::EAX, RegState::Implicit) 15167 .addReg(X86::ESP, RegState::Implicit) 15168 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 15169 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 15170 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 15171 } 15172 15173 MI->eraseFromParent(); // The pseudo instruction is gone now. 15174 return BB; 15175} 15176 15177MachineBasicBlock * 15178X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 15179 MachineBasicBlock *BB) const { 15180 // This is pretty easy. We're taking the value that we received from 15181 // our load from the relocation, sticking it in either RDI (x86-64) 15182 // or EAX and doing an indirect call. The return value will then 15183 // be in the normal return register. 15184 const X86InstrInfo *TII 15185 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 15186 DebugLoc DL = MI->getDebugLoc(); 15187 MachineFunction *F = BB->getParent(); 15188 15189 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 15190 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 15191 15192 // Get a register mask for the lowered call. 15193 // FIXME: The 32-bit calls have non-standard calling conventions. Use a 15194 // proper register mask. 15195 const uint32_t *RegMask = 15196 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 15197 if (Subtarget->is64Bit()) { 15198 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 15199 TII->get(X86::MOV64rm), X86::RDI) 15200 .addReg(X86::RIP) 15201 .addImm(0).addReg(0) 15202 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 15203 MI->getOperand(3).getTargetFlags()) 15204 .addReg(0); 15205 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 15206 addDirectMem(MIB, X86::RDI); 15207 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); 15208 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 15209 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 15210 TII->get(X86::MOV32rm), X86::EAX) 15211 .addReg(0) 15212 .addImm(0).addReg(0) 15213 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 15214 MI->getOperand(3).getTargetFlags()) 15215 .addReg(0); 15216 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 15217 addDirectMem(MIB, X86::EAX); 15218 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 15219 } else { 15220 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 15221 TII->get(X86::MOV32rm), X86::EAX) 15222 .addReg(TII->getGlobalBaseReg(F)) 15223 .addImm(0).addReg(0) 15224 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 15225 MI->getOperand(3).getTargetFlags()) 15226 .addReg(0); 15227 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 15228 addDirectMem(MIB, X86::EAX); 15229 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 15230 } 15231 15232 MI->eraseFromParent(); // The pseudo instruction is gone now. 15233 return BB; 15234} 15235 15236MachineBasicBlock * 15237X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 15238 MachineBasicBlock *MBB) const { 15239 DebugLoc DL = MI->getDebugLoc(); 15240 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 15241 15242 MachineFunction *MF = MBB->getParent(); 15243 MachineRegisterInfo &MRI = MF->getRegInfo(); 15244 15245 const BasicBlock *BB = MBB->getBasicBlock(); 15246 MachineFunction::iterator I = MBB; 15247 ++I; 15248 15249 // Memory Reference 15250 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 15251 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 15252 15253 unsigned DstReg; 15254 unsigned MemOpndSlot = 0; 15255 15256 unsigned CurOp = 0; 15257 15258 DstReg = MI->getOperand(CurOp++).getReg(); 15259 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 15260 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 15261 unsigned mainDstReg = MRI.createVirtualRegister(RC); 15262 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 15263 15264 MemOpndSlot = CurOp; 15265 15266 MVT PVT = getPointerTy(); 15267 assert((PVT == MVT::i64 || PVT == MVT::i32) && 15268 "Invalid Pointer Size!"); 15269 15270 // For v = setjmp(buf), we generate 15271 // 15272 // thisMBB: 15273 // buf[LabelOffset] = restoreMBB 15274 // SjLjSetup restoreMBB 15275 // 15276 // mainMBB: 15277 // v_main = 0 15278 // 15279 // sinkMBB: 15280 // v = phi(main, restore) 15281 // 15282 // restoreMBB: 15283 // v_restore = 1 15284 15285 MachineBasicBlock *thisMBB = MBB; 15286 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 15287 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 15288 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); 15289 MF->insert(I, mainMBB); 15290 MF->insert(I, sinkMBB); 15291 MF->push_back(restoreMBB); 15292 15293 MachineInstrBuilder MIB; 15294 15295 // Transfer the remainder of BB and its successor edges to sinkMBB. 15296 sinkMBB->splice(sinkMBB->begin(), MBB, 15297 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 15298 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 15299 15300 // thisMBB: 15301 unsigned PtrStoreOpc = 0; 15302 unsigned LabelReg = 0; 15303 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 15304 Reloc::Model RM = getTargetMachine().getRelocationModel(); 15305 bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) && 15306 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC); 15307 15308 // Prepare IP either in reg or imm. 15309 if (!UseImmLabel) { 15310 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; 15311 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 15312 LabelReg = MRI.createVirtualRegister(PtrRC); 15313 if (Subtarget->is64Bit()) { 15314 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg) 15315 .addReg(X86::RIP) 15316 .addImm(0) 15317 .addReg(0) 15318 .addMBB(restoreMBB) 15319 .addReg(0); 15320 } else { 15321 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII); 15322 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg) 15323 .addReg(XII->getGlobalBaseReg(MF)) 15324 .addImm(0) 15325 .addReg(0) 15326 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference()) 15327 .addReg(0); 15328 } 15329 } else 15330 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; 15331 // Store IP 15332 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc)); 15333 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 15334 if (i == X86::AddrDisp) 15335 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset); 15336 else 15337 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 15338 } 15339 if (!UseImmLabel) 15340 MIB.addReg(LabelReg); 15341 else 15342 MIB.addMBB(restoreMBB); 15343 MIB.setMemRefs(MMOBegin, MMOEnd); 15344 // Setup 15345 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) 15346 .addMBB(restoreMBB); 15347 15348 const X86RegisterInfo *RegInfo = 15349 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 15350 MIB.addRegMask(RegInfo->getNoPreservedMask()); 15351 thisMBB->addSuccessor(mainMBB); 15352 thisMBB->addSuccessor(restoreMBB); 15353 15354 // mainMBB: 15355 // EAX = 0 15356 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg); 15357 mainMBB->addSuccessor(sinkMBB); 15358 15359 // sinkMBB: 15360 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 15361 TII->get(X86::PHI), DstReg) 15362 .addReg(mainDstReg).addMBB(mainMBB) 15363 .addReg(restoreDstReg).addMBB(restoreMBB); 15364 15365 // restoreMBB: 15366 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); 15367 BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB); 15368 restoreMBB->addSuccessor(sinkMBB); 15369 15370 MI->eraseFromParent(); 15371 return sinkMBB; 15372} 15373 15374MachineBasicBlock * 15375X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 15376 MachineBasicBlock *MBB) const { 15377 DebugLoc DL = MI->getDebugLoc(); 15378 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 15379 15380 MachineFunction *MF = MBB->getParent(); 15381 MachineRegisterInfo &MRI = MF->getRegInfo(); 15382 15383 // Memory Reference 15384 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 15385 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 15386 15387 MVT PVT = getPointerTy(); 15388 assert((PVT == MVT::i64 || PVT == MVT::i32) && 15389 "Invalid Pointer Size!"); 15390 15391 const TargetRegisterClass *RC = 15392 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; 15393 unsigned Tmp = MRI.createVirtualRegister(RC); 15394 // Since FP is only updated here but NOT referenced, it's treated as GPR. 15395 const X86RegisterInfo *RegInfo = 15396 static_cast<const X86RegisterInfo*>(getTargetMachine().getRegisterInfo()); 15397 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; 15398 unsigned SP = RegInfo->getStackRegister(); 15399 15400 MachineInstrBuilder MIB; 15401 15402 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 15403 const int64_t SPOffset = 2 * PVT.getStoreSize(); 15404 15405 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; 15406 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r; 15407 15408 // Reload FP 15409 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); 15410 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 15411 MIB.addOperand(MI->getOperand(i)); 15412 MIB.setMemRefs(MMOBegin, MMOEnd); 15413 // Reload IP 15414 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); 15415 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 15416 if (i == X86::AddrDisp) 15417 MIB.addDisp(MI->getOperand(i), LabelOffset); 15418 else 15419 MIB.addOperand(MI->getOperand(i)); 15420 } 15421 MIB.setMemRefs(MMOBegin, MMOEnd); 15422 // Reload SP 15423 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP); 15424 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 15425 if (i == X86::AddrDisp) 15426 MIB.addDisp(MI->getOperand(i), SPOffset); 15427 else 15428 MIB.addOperand(MI->getOperand(i)); 15429 } 15430 MIB.setMemRefs(MMOBegin, MMOEnd); 15431 // Jump 15432 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); 15433 15434 MI->eraseFromParent(); 15435 return MBB; 15436} 15437 15438MachineBasicBlock * 15439X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 15440 MachineBasicBlock *BB) const { 15441 switch (MI->getOpcode()) { 15442 default: llvm_unreachable("Unexpected instr type to insert"); 15443 case X86::TAILJMPd64: 15444 case X86::TAILJMPr64: 15445 case X86::TAILJMPm64: 15446 llvm_unreachable("TAILJMP64 would not be touched here."); 15447 case X86::TCRETURNdi64: 15448 case X86::TCRETURNri64: 15449 case X86::TCRETURNmi64: 15450 return BB; 15451 case X86::WIN_ALLOCA: 15452 return EmitLoweredWinAlloca(MI, BB); 15453 case X86::SEG_ALLOCA_32: 15454 return EmitLoweredSegAlloca(MI, BB, false); 15455 case X86::SEG_ALLOCA_64: 15456 return EmitLoweredSegAlloca(MI, BB, true); 15457 case X86::TLSCall_32: 15458 case X86::TLSCall_64: 15459 return EmitLoweredTLSCall(MI, BB); 15460 case X86::CMOV_GR8: 15461 case X86::CMOV_FR32: 15462 case X86::CMOV_FR64: 15463 case X86::CMOV_V4F32: 15464 case X86::CMOV_V2F64: 15465 case X86::CMOV_V2I64: 15466 case X86::CMOV_V8F32: 15467 case X86::CMOV_V4F64: 15468 case X86::CMOV_V4I64: 15469 case X86::CMOV_GR16: 15470 case X86::CMOV_GR32: 15471 case X86::CMOV_RFP32: 15472 case X86::CMOV_RFP64: 15473 case X86::CMOV_RFP80: 15474 return EmitLoweredSelect(MI, BB); 15475 15476 case X86::FP32_TO_INT16_IN_MEM: 15477 case X86::FP32_TO_INT32_IN_MEM: 15478 case X86::FP32_TO_INT64_IN_MEM: 15479 case X86::FP64_TO_INT16_IN_MEM: 15480 case X86::FP64_TO_INT32_IN_MEM: 15481 case X86::FP64_TO_INT64_IN_MEM: 15482 case X86::FP80_TO_INT16_IN_MEM: 15483 case X86::FP80_TO_INT32_IN_MEM: 15484 case X86::FP80_TO_INT64_IN_MEM: { 15485 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 15486 DebugLoc DL = MI->getDebugLoc(); 15487 15488 // Change the floating point control register to use "round towards zero" 15489 // mode when truncating to an integer value. 15490 MachineFunction *F = BB->getParent(); 15491 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 15492 addFrameReference(BuildMI(*BB, MI, DL, 15493 TII->get(X86::FNSTCW16m)), CWFrameIdx); 15494 15495 // Load the old value of the high byte of the control word... 15496 unsigned OldCW = 15497 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); 15498 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 15499 CWFrameIdx); 15500 15501 // Set the high part to be round to zero... 15502 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 15503 .addImm(0xC7F); 15504 15505 // Reload the modified control word now... 15506 addFrameReference(BuildMI(*BB, MI, DL, 15507 TII->get(X86::FLDCW16m)), CWFrameIdx); 15508 15509 // Restore the memory image of control word to original value 15510 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 15511 .addReg(OldCW); 15512 15513 // Get the X86 opcode to use. 15514 unsigned Opc; 15515 switch (MI->getOpcode()) { 15516 default: llvm_unreachable("illegal opcode!"); 15517 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 15518 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 15519 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 15520 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 15521 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 15522 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 15523 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 15524 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 15525 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 15526 } 15527 15528 X86AddressMode AM; 15529 MachineOperand &Op = MI->getOperand(0); 15530 if (Op.isReg()) { 15531 AM.BaseType = X86AddressMode::RegBase; 15532 AM.Base.Reg = Op.getReg(); 15533 } else { 15534 AM.BaseType = X86AddressMode::FrameIndexBase; 15535 AM.Base.FrameIndex = Op.getIndex(); 15536 } 15537 Op = MI->getOperand(1); 15538 if (Op.isImm()) 15539 AM.Scale = Op.getImm(); 15540 Op = MI->getOperand(2); 15541 if (Op.isImm()) 15542 AM.IndexReg = Op.getImm(); 15543 Op = MI->getOperand(3); 15544 if (Op.isGlobal()) { 15545 AM.GV = Op.getGlobal(); 15546 } else { 15547 AM.Disp = Op.getImm(); 15548 } 15549 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 15550 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 15551 15552 // Reload the original control word now. 15553 addFrameReference(BuildMI(*BB, MI, DL, 15554 TII->get(X86::FLDCW16m)), CWFrameIdx); 15555 15556 MI->eraseFromParent(); // The pseudo instruction is gone now. 15557 return BB; 15558 } 15559 // String/text processing lowering. 15560 case X86::PCMPISTRM128REG: 15561 case X86::VPCMPISTRM128REG: 15562 case X86::PCMPISTRM128MEM: 15563 case X86::VPCMPISTRM128MEM: 15564 case X86::PCMPESTRM128REG: 15565 case X86::VPCMPESTRM128REG: 15566 case X86::PCMPESTRM128MEM: 15567 case X86::VPCMPESTRM128MEM: 15568 assert(Subtarget->hasSSE42() && 15569 "Target must have SSE4.2 or AVX features enabled"); 15570 return EmitPCMPSTRM(MI, BB, getTargetMachine().getInstrInfo()); 15571 15572 // String/text processing lowering. 15573 case X86::PCMPISTRIREG: 15574 case X86::VPCMPISTRIREG: 15575 case X86::PCMPISTRIMEM: 15576 case X86::VPCMPISTRIMEM: 15577 case X86::PCMPESTRIREG: 15578 case X86::VPCMPESTRIREG: 15579 case X86::PCMPESTRIMEM: 15580 case X86::VPCMPESTRIMEM: 15581 assert(Subtarget->hasSSE42() && 15582 "Target must have SSE4.2 or AVX features enabled"); 15583 return EmitPCMPSTRI(MI, BB, getTargetMachine().getInstrInfo()); 15584 15585 // Thread synchronization. 15586 case X86::MONITOR: 15587 return EmitMonitor(MI, BB, getTargetMachine().getInstrInfo(), Subtarget); 15588 15589 // xbegin 15590 case X86::XBEGIN: 15591 return EmitXBegin(MI, BB, getTargetMachine().getInstrInfo()); 15592 15593 // Atomic Lowering. 15594 case X86::ATOMAND8: 15595 case X86::ATOMAND16: 15596 case X86::ATOMAND32: 15597 case X86::ATOMAND64: 15598 // Fall through 15599 case X86::ATOMOR8: 15600 case X86::ATOMOR16: 15601 case X86::ATOMOR32: 15602 case X86::ATOMOR64: 15603 // Fall through 15604 case X86::ATOMXOR16: 15605 case X86::ATOMXOR8: 15606 case X86::ATOMXOR32: 15607 case X86::ATOMXOR64: 15608 // Fall through 15609 case X86::ATOMNAND8: 15610 case X86::ATOMNAND16: 15611 case X86::ATOMNAND32: 15612 case X86::ATOMNAND64: 15613 // Fall through 15614 case X86::ATOMMAX8: 15615 case X86::ATOMMAX16: 15616 case X86::ATOMMAX32: 15617 case X86::ATOMMAX64: 15618 // Fall through 15619 case X86::ATOMMIN8: 15620 case X86::ATOMMIN16: 15621 case X86::ATOMMIN32: 15622 case X86::ATOMMIN64: 15623 // Fall through 15624 case X86::ATOMUMAX8: 15625 case X86::ATOMUMAX16: 15626 case X86::ATOMUMAX32: 15627 case X86::ATOMUMAX64: 15628 // Fall through 15629 case X86::ATOMUMIN8: 15630 case X86::ATOMUMIN16: 15631 case X86::ATOMUMIN32: 15632 case X86::ATOMUMIN64: 15633 return EmitAtomicLoadArith(MI, BB); 15634 15635 // This group does 64-bit operations on a 32-bit host. 15636 case X86::ATOMAND6432: 15637 case X86::ATOMOR6432: 15638 case X86::ATOMXOR6432: 15639 case X86::ATOMNAND6432: 15640 case X86::ATOMADD6432: 15641 case X86::ATOMSUB6432: 15642 case X86::ATOMMAX6432: 15643 case X86::ATOMMIN6432: 15644 case X86::ATOMUMAX6432: 15645 case X86::ATOMUMIN6432: 15646 case X86::ATOMSWAP6432: 15647 return EmitAtomicLoadArith6432(MI, BB); 15648 15649 case X86::VASTART_SAVE_XMM_REGS: 15650 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 15651 15652 case X86::VAARG_64: 15653 return EmitVAARG64WithCustomInserter(MI, BB); 15654 15655 case X86::EH_SjLj_SetJmp32: 15656 case X86::EH_SjLj_SetJmp64: 15657 return emitEHSjLjSetJmp(MI, BB); 15658 15659 case X86::EH_SjLj_LongJmp32: 15660 case X86::EH_SjLj_LongJmp64: 15661 return emitEHSjLjLongJmp(MI, BB); 15662 } 15663} 15664 15665//===----------------------------------------------------------------------===// 15666// X86 Optimization Hooks 15667//===----------------------------------------------------------------------===// 15668 15669void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 15670 APInt &KnownZero, 15671 APInt &KnownOne, 15672 const SelectionDAG &DAG, 15673 unsigned Depth) const { 15674 unsigned BitWidth = KnownZero.getBitWidth(); 15675 unsigned Opc = Op.getOpcode(); 15676 assert((Opc >= ISD::BUILTIN_OP_END || 15677 Opc == ISD::INTRINSIC_WO_CHAIN || 15678 Opc == ISD::INTRINSIC_W_CHAIN || 15679 Opc == ISD::INTRINSIC_VOID) && 15680 "Should use MaskedValueIsZero if you don't know whether Op" 15681 " is a target node!"); 15682 15683 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 15684 switch (Opc) { 15685 default: break; 15686 case X86ISD::ADD: 15687 case X86ISD::SUB: 15688 case X86ISD::ADC: 15689 case X86ISD::SBB: 15690 case X86ISD::SMUL: 15691 case X86ISD::UMUL: 15692 case X86ISD::INC: 15693 case X86ISD::DEC: 15694 case X86ISD::OR: 15695 case X86ISD::XOR: 15696 case X86ISD::AND: 15697 // These nodes' second result is a boolean. 15698 if (Op.getResNo() == 0) 15699 break; 15700 // Fallthrough 15701 case X86ISD::SETCC: 15702 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 15703 break; 15704 case ISD::INTRINSIC_WO_CHAIN: { 15705 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 15706 unsigned NumLoBits = 0; 15707 switch (IntId) { 15708 default: break; 15709 case Intrinsic::x86_sse_movmsk_ps: 15710 case Intrinsic::x86_avx_movmsk_ps_256: 15711 case Intrinsic::x86_sse2_movmsk_pd: 15712 case Intrinsic::x86_avx_movmsk_pd_256: 15713 case Intrinsic::x86_mmx_pmovmskb: 15714 case Intrinsic::x86_sse2_pmovmskb_128: 15715 case Intrinsic::x86_avx2_pmovmskb: { 15716 // High bits of movmskp{s|d}, pmovmskb are known zero. 15717 switch (IntId) { 15718 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 15719 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 15720 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 15721 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 15722 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 15723 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 15724 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 15725 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 15726 } 15727 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits); 15728 break; 15729 } 15730 } 15731 break; 15732 } 15733 } 15734} 15735 15736unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 15737 unsigned Depth) const { 15738 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 15739 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 15740 return Op.getValueType().getScalarType().getSizeInBits(); 15741 15742 // Fallback case. 15743 return 1; 15744} 15745 15746/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 15747/// node is a GlobalAddress + offset. 15748bool X86TargetLowering::isGAPlusOffset(SDNode *N, 15749 const GlobalValue* &GA, 15750 int64_t &Offset) const { 15751 if (N->getOpcode() == X86ISD::Wrapper) { 15752 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 15753 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 15754 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 15755 return true; 15756 } 15757 } 15758 return TargetLowering::isGAPlusOffset(N, GA, Offset); 15759} 15760 15761/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 15762/// same as extracting the high 128-bit part of 256-bit vector and then 15763/// inserting the result into the low part of a new 256-bit vector 15764static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 15765 EVT VT = SVOp->getValueType(0); 15766 unsigned NumElems = VT.getVectorNumElements(); 15767 15768 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 15769 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) 15770 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 15771 SVOp->getMaskElt(j) >= 0) 15772 return false; 15773 15774 return true; 15775} 15776 15777/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 15778/// same as extracting the low 128-bit part of 256-bit vector and then 15779/// inserting the result into the high part of a new 256-bit vector 15780static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 15781 EVT VT = SVOp->getValueType(0); 15782 unsigned NumElems = VT.getVectorNumElements(); 15783 15784 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 15785 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) 15786 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 15787 SVOp->getMaskElt(j) >= 0) 15788 return false; 15789 15790 return true; 15791} 15792 15793/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 15794static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 15795 TargetLowering::DAGCombinerInfo &DCI, 15796 const X86Subtarget* Subtarget) { 15797 SDLoc dl(N); 15798 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 15799 SDValue V1 = SVOp->getOperand(0); 15800 SDValue V2 = SVOp->getOperand(1); 15801 EVT VT = SVOp->getValueType(0); 15802 unsigned NumElems = VT.getVectorNumElements(); 15803 15804 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 15805 V2.getOpcode() == ISD::CONCAT_VECTORS) { 15806 // 15807 // 0,0,0,... 15808 // | 15809 // V UNDEF BUILD_VECTOR UNDEF 15810 // \ / \ / 15811 // CONCAT_VECTOR CONCAT_VECTOR 15812 // \ / 15813 // \ / 15814 // RESULT: V + zero extended 15815 // 15816 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 15817 V2.getOperand(1).getOpcode() != ISD::UNDEF || 15818 V1.getOperand(1).getOpcode() != ISD::UNDEF) 15819 return SDValue(); 15820 15821 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 15822 return SDValue(); 15823 15824 // To match the shuffle mask, the first half of the mask should 15825 // be exactly the first vector, and all the rest a splat with the 15826 // first element of the second one. 15827 for (unsigned i = 0; i != NumElems/2; ++i) 15828 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 15829 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 15830 return SDValue(); 15831 15832 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 15833 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 15834 if (Ld->hasNUsesOfValue(1, 0)) { 15835 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 15836 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 15837 SDValue ResNode = 15838 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 15839 array_lengthof(Ops), 15840 Ld->getMemoryVT(), 15841 Ld->getPointerInfo(), 15842 Ld->getAlignment(), 15843 false/*isVolatile*/, true/*ReadMem*/, 15844 false/*WriteMem*/); 15845 15846 // Make sure the newly-created LOAD is in the same position as Ld in 15847 // terms of dependency. We create a TokenFactor for Ld and ResNode, 15848 // and update uses of Ld's output chain to use the TokenFactor. 15849 if (Ld->hasAnyUseOfValue(1)) { 15850 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 15851 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1)); 15852 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain); 15853 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1), 15854 SDValue(ResNode.getNode(), 1)); 15855 } 15856 15857 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 15858 } 15859 } 15860 15861 // Emit a zeroed vector and insert the desired subvector on its 15862 // first half. 15863 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 15864 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); 15865 return DCI.CombineTo(N, InsV); 15866 } 15867 15868 //===--------------------------------------------------------------------===// 15869 // Combine some shuffles into subvector extracts and inserts: 15870 // 15871 15872 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 15873 if (isShuffleHigh128VectorInsertLow(SVOp)) { 15874 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); 15875 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); 15876 return DCI.CombineTo(N, InsV); 15877 } 15878 15879 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 15880 if (isShuffleLow128VectorInsertHigh(SVOp)) { 15881 SDValue V = Extract128BitVector(V1, 0, DAG, dl); 15882 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); 15883 return DCI.CombineTo(N, InsV); 15884 } 15885 15886 return SDValue(); 15887} 15888 15889/// PerformShuffleCombine - Performs several different shuffle combines. 15890static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 15891 TargetLowering::DAGCombinerInfo &DCI, 15892 const X86Subtarget *Subtarget) { 15893 SDLoc dl(N); 15894 EVT VT = N->getValueType(0); 15895 15896 // Don't create instructions with illegal types after legalize types has run. 15897 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15898 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 15899 return SDValue(); 15900 15901 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 15902 if (Subtarget->hasFp256() && VT.is256BitVector() && 15903 N->getOpcode() == ISD::VECTOR_SHUFFLE) 15904 return PerformShuffleCombine256(N, DAG, DCI, Subtarget); 15905 15906 // Only handle 128 wide vector from here on. 15907 if (!VT.is128BitVector()) 15908 return SDValue(); 15909 15910 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 15911 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 15912 // consecutive, non-overlapping, and in the right order. 15913 SmallVector<SDValue, 16> Elts; 15914 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 15915 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 15916 15917 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 15918} 15919 15920/// PerformTruncateCombine - Converts truncate operation to 15921/// a sequence of vector shuffle operations. 15922/// It is possible when we truncate 256-bit vector to 128-bit vector 15923static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 15924 TargetLowering::DAGCombinerInfo &DCI, 15925 const X86Subtarget *Subtarget) { 15926 return SDValue(); 15927} 15928 15929/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target 15930/// specific shuffle of a load can be folded into a single element load. 15931/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but 15932/// shuffles have been customed lowered so we need to handle those here. 15933static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, 15934 TargetLowering::DAGCombinerInfo &DCI) { 15935 if (DCI.isBeforeLegalizeOps()) 15936 return SDValue(); 15937 15938 SDValue InVec = N->getOperand(0); 15939 SDValue EltNo = N->getOperand(1); 15940 15941 if (!isa<ConstantSDNode>(EltNo)) 15942 return SDValue(); 15943 15944 EVT VT = InVec.getValueType(); 15945 15946 bool HasShuffleIntoBitcast = false; 15947 if (InVec.getOpcode() == ISD::BITCAST) { 15948 // Don't duplicate a load with other uses. 15949 if (!InVec.hasOneUse()) 15950 return SDValue(); 15951 EVT BCVT = InVec.getOperand(0).getValueType(); 15952 if (BCVT.getVectorNumElements() != VT.getVectorNumElements()) 15953 return SDValue(); 15954 InVec = InVec.getOperand(0); 15955 HasShuffleIntoBitcast = true; 15956 } 15957 15958 if (!isTargetShuffle(InVec.getOpcode())) 15959 return SDValue(); 15960 15961 // Don't duplicate a load with other uses. 15962 if (!InVec.hasOneUse()) 15963 return SDValue(); 15964 15965 SmallVector<int, 16> ShuffleMask; 15966 bool UnaryShuffle; 15967 if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, 15968 UnaryShuffle)) 15969 return SDValue(); 15970 15971 // Select the input vector, guarding against out of range extract vector. 15972 unsigned NumElems = VT.getVectorNumElements(); 15973 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 15974 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt]; 15975 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0) 15976 : InVec.getOperand(1); 15977 15978 // If inputs to shuffle are the same for both ops, then allow 2 uses 15979 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1; 15980 15981 if (LdNode.getOpcode() == ISD::BITCAST) { 15982 // Don't duplicate a load with other uses. 15983 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) 15984 return SDValue(); 15985 15986 AllowedUses = 1; // only allow 1 load use if we have a bitcast 15987 LdNode = LdNode.getOperand(0); 15988 } 15989 15990 if (!ISD::isNormalLoad(LdNode.getNode())) 15991 return SDValue(); 15992 15993 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); 15994 15995 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) 15996 return SDValue(); 15997 15998 if (HasShuffleIntoBitcast) { 15999 // If there's a bitcast before the shuffle, check if the load type and 16000 // alignment is valid. 16001 unsigned Align = LN0->getAlignment(); 16002 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16003 unsigned NewAlign = TLI.getDataLayout()-> 16004 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 16005 16006 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 16007 return SDValue(); 16008 } 16009 16010 // All checks match so transform back to vector_shuffle so that DAG combiner 16011 // can finish the job 16012 SDLoc dl(N); 16013 16014 // Create shuffle node taking into account the case that its a unary shuffle 16015 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); 16016 Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl, 16017 InVec.getOperand(0), Shuffle, 16018 &ShuffleMask[0]); 16019 Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 16020 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, 16021 EltNo); 16022} 16023 16024/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 16025/// generation and convert it from being a bunch of shuffles and extracts 16026/// to a simple store and scalar loads to extract the elements. 16027static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 16028 TargetLowering::DAGCombinerInfo &DCI) { 16029 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI); 16030 if (NewOp.getNode()) 16031 return NewOp; 16032 16033 SDValue InputVector = N->getOperand(0); 16034 // Detect whether we are trying to convert from mmx to i32 and the bitcast 16035 // from mmx to v2i32 has a single usage. 16036 if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST && 16037 InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx && 16038 InputVector.hasOneUse() && N->getValueType(0) == MVT::i32) 16039 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector), 16040 N->getValueType(0), 16041 InputVector.getNode()->getOperand(0)); 16042 16043 // Only operate on vectors of 4 elements, where the alternative shuffling 16044 // gets to be more expensive. 16045 if (InputVector.getValueType() != MVT::v4i32) 16046 return SDValue(); 16047 16048 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 16049 // single use which is a sign-extend or zero-extend, and all elements are 16050 // used. 16051 SmallVector<SDNode *, 4> Uses; 16052 unsigned ExtractedElements = 0; 16053 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 16054 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 16055 if (UI.getUse().getResNo() != InputVector.getResNo()) 16056 return SDValue(); 16057 16058 SDNode *Extract = *UI; 16059 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 16060 return SDValue(); 16061 16062 if (Extract->getValueType(0) != MVT::i32) 16063 return SDValue(); 16064 if (!Extract->hasOneUse()) 16065 return SDValue(); 16066 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 16067 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 16068 return SDValue(); 16069 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 16070 return SDValue(); 16071 16072 // Record which element was extracted. 16073 ExtractedElements |= 16074 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 16075 16076 Uses.push_back(Extract); 16077 } 16078 16079 // If not all the elements were used, this may not be worthwhile. 16080 if (ExtractedElements != 15) 16081 return SDValue(); 16082 16083 // Ok, we've now decided to do the transformation. 16084 SDLoc dl(InputVector); 16085 16086 // Store the value to a temporary stack slot. 16087 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 16088 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 16089 MachinePointerInfo(), false, false, 0); 16090 16091 // Replace each use (extract) with a load of the appropriate element. 16092 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 16093 UE = Uses.end(); UI != UE; ++UI) { 16094 SDNode *Extract = *UI; 16095 16096 // cOMpute the element's address. 16097 SDValue Idx = Extract->getOperand(1); 16098 unsigned EltSize = 16099 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 16100 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 16101 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16102 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 16103 16104 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 16105 StackPtr, OffsetVal); 16106 16107 // Load the scalar. 16108 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 16109 ScalarAddr, MachinePointerInfo(), 16110 false, false, false, 0); 16111 16112 // Replace the exact with the load. 16113 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 16114 } 16115 16116 // The replacement was made in place; don't return anything. 16117 return SDValue(); 16118} 16119 16120/// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match. 16121static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, 16122 SDValue RHS, SelectionDAG &DAG, 16123 const X86Subtarget *Subtarget) { 16124 if (!VT.isVector()) 16125 return 0; 16126 16127 switch (VT.getSimpleVT().SimpleTy) { 16128 default: return 0; 16129 case MVT::v32i8: 16130 case MVT::v16i16: 16131 case MVT::v8i32: 16132 if (!Subtarget->hasAVX2()) 16133 return 0; 16134 case MVT::v16i8: 16135 case MVT::v8i16: 16136 case MVT::v4i32: 16137 if (!Subtarget->hasSSE2()) 16138 return 0; 16139 } 16140 16141 // SSE2 has only a small subset of the operations. 16142 bool hasUnsigned = Subtarget->hasSSE41() || 16143 (Subtarget->hasSSE2() && VT == MVT::v16i8); 16144 bool hasSigned = Subtarget->hasSSE41() || 16145 (Subtarget->hasSSE2() && VT == MVT::v8i16); 16146 16147 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 16148 16149 // Check for x CC y ? x : y. 16150 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 16151 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 16152 switch (CC) { 16153 default: break; 16154 case ISD::SETULT: 16155 case ISD::SETULE: 16156 return hasUnsigned ? X86ISD::UMIN : 0; 16157 case ISD::SETUGT: 16158 case ISD::SETUGE: 16159 return hasUnsigned ? X86ISD::UMAX : 0; 16160 case ISD::SETLT: 16161 case ISD::SETLE: 16162 return hasSigned ? X86ISD::SMIN : 0; 16163 case ISD::SETGT: 16164 case ISD::SETGE: 16165 return hasSigned ? X86ISD::SMAX : 0; 16166 } 16167 // Check for x CC y ? y : x -- a min/max with reversed arms. 16168 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 16169 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 16170 switch (CC) { 16171 default: break; 16172 case ISD::SETULT: 16173 case ISD::SETULE: 16174 return hasUnsigned ? X86ISD::UMAX : 0; 16175 case ISD::SETUGT: 16176 case ISD::SETUGE: 16177 return hasUnsigned ? X86ISD::UMIN : 0; 16178 case ISD::SETLT: 16179 case ISD::SETLE: 16180 return hasSigned ? X86ISD::SMAX : 0; 16181 case ISD::SETGT: 16182 case ISD::SETGE: 16183 return hasSigned ? X86ISD::SMIN : 0; 16184 } 16185 } 16186 16187 return 0; 16188} 16189 16190/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 16191/// nodes. 16192static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 16193 TargetLowering::DAGCombinerInfo &DCI, 16194 const X86Subtarget *Subtarget) { 16195 SDLoc DL(N); 16196 SDValue Cond = N->getOperand(0); 16197 // Get the LHS/RHS of the select. 16198 SDValue LHS = N->getOperand(1); 16199 SDValue RHS = N->getOperand(2); 16200 EVT VT = LHS.getValueType(); 16201 16202 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 16203 // instructions match the semantics of the common C idiom x<y?x:y but not 16204 // x<=y?x:y, because of how they handle negative zero (which can be 16205 // ignored in unsafe-math mode). 16206 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 16207 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 16208 (Subtarget->hasSSE2() || 16209 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 16210 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 16211 16212 unsigned Opcode = 0; 16213 // Check for x CC y ? x : y. 16214 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 16215 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 16216 switch (CC) { 16217 default: break; 16218 case ISD::SETULT: 16219 // Converting this to a min would handle NaNs incorrectly, and swapping 16220 // the operands would cause it to handle comparisons between positive 16221 // and negative zero incorrectly. 16222 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 16223 if (!DAG.getTarget().Options.UnsafeFPMath && 16224 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 16225 break; 16226 std::swap(LHS, RHS); 16227 } 16228 Opcode = X86ISD::FMIN; 16229 break; 16230 case ISD::SETOLE: 16231 // Converting this to a min would handle comparisons between positive 16232 // and negative zero incorrectly. 16233 if (!DAG.getTarget().Options.UnsafeFPMath && 16234 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 16235 break; 16236 Opcode = X86ISD::FMIN; 16237 break; 16238 case ISD::SETULE: 16239 // Converting this to a min would handle both negative zeros and NaNs 16240 // incorrectly, but we can swap the operands to fix both. 16241 std::swap(LHS, RHS); 16242 case ISD::SETOLT: 16243 case ISD::SETLT: 16244 case ISD::SETLE: 16245 Opcode = X86ISD::FMIN; 16246 break; 16247 16248 case ISD::SETOGE: 16249 // Converting this to a max would handle comparisons between positive 16250 // and negative zero incorrectly. 16251 if (!DAG.getTarget().Options.UnsafeFPMath && 16252 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 16253 break; 16254 Opcode = X86ISD::FMAX; 16255 break; 16256 case ISD::SETUGT: 16257 // Converting this to a max would handle NaNs incorrectly, and swapping 16258 // the operands would cause it to handle comparisons between positive 16259 // and negative zero incorrectly. 16260 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 16261 if (!DAG.getTarget().Options.UnsafeFPMath && 16262 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 16263 break; 16264 std::swap(LHS, RHS); 16265 } 16266 Opcode = X86ISD::FMAX; 16267 break; 16268 case ISD::SETUGE: 16269 // Converting this to a max would handle both negative zeros and NaNs 16270 // incorrectly, but we can swap the operands to fix both. 16271 std::swap(LHS, RHS); 16272 case ISD::SETOGT: 16273 case ISD::SETGT: 16274 case ISD::SETGE: 16275 Opcode = X86ISD::FMAX; 16276 break; 16277 } 16278 // Check for x CC y ? y : x -- a min/max with reversed arms. 16279 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 16280 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 16281 switch (CC) { 16282 default: break; 16283 case ISD::SETOGE: 16284 // Converting this to a min would handle comparisons between positive 16285 // and negative zero incorrectly, and swapping the operands would 16286 // cause it to handle NaNs incorrectly. 16287 if (!DAG.getTarget().Options.UnsafeFPMath && 16288 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 16289 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 16290 break; 16291 std::swap(LHS, RHS); 16292 } 16293 Opcode = X86ISD::FMIN; 16294 break; 16295 case ISD::SETUGT: 16296 // Converting this to a min would handle NaNs incorrectly. 16297 if (!DAG.getTarget().Options.UnsafeFPMath && 16298 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 16299 break; 16300 Opcode = X86ISD::FMIN; 16301 break; 16302 case ISD::SETUGE: 16303 // Converting this to a min would handle both negative zeros and NaNs 16304 // incorrectly, but we can swap the operands to fix both. 16305 std::swap(LHS, RHS); 16306 case ISD::SETOGT: 16307 case ISD::SETGT: 16308 case ISD::SETGE: 16309 Opcode = X86ISD::FMIN; 16310 break; 16311 16312 case ISD::SETULT: 16313 // Converting this to a max would handle NaNs incorrectly. 16314 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 16315 break; 16316 Opcode = X86ISD::FMAX; 16317 break; 16318 case ISD::SETOLE: 16319 // Converting this to a max would handle comparisons between positive 16320 // and negative zero incorrectly, and swapping the operands would 16321 // cause it to handle NaNs incorrectly. 16322 if (!DAG.getTarget().Options.UnsafeFPMath && 16323 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 16324 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 16325 break; 16326 std::swap(LHS, RHS); 16327 } 16328 Opcode = X86ISD::FMAX; 16329 break; 16330 case ISD::SETULE: 16331 // Converting this to a max would handle both negative zeros and NaNs 16332 // incorrectly, but we can swap the operands to fix both. 16333 std::swap(LHS, RHS); 16334 case ISD::SETOLT: 16335 case ISD::SETLT: 16336 case ISD::SETLE: 16337 Opcode = X86ISD::FMAX; 16338 break; 16339 } 16340 } 16341 16342 if (Opcode) 16343 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 16344 } 16345 16346 if (Subtarget->hasAVX512() && VT.isVector() && 16347 Cond.getValueType().getVectorElementType() == MVT::i1) { 16348 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper 16349 // lowering on AVX-512. In this case we convert it to 16350 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction. 16351 // The same situation for all 128 and 256-bit vectors of i8 and i16 16352 EVT OpVT = LHS.getValueType(); 16353 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) && 16354 (OpVT.getVectorElementType() == MVT::i8 || 16355 OpVT.getVectorElementType() == MVT::i16)) { 16356 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond); 16357 DCI.AddToWorklist(Cond.getNode()); 16358 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS); 16359 } 16360 else 16361 return SDValue(); 16362 } 16363 // If this is a select between two integer constants, try to do some 16364 // optimizations. 16365 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 16366 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 16367 // Don't do this for crazy integer types. 16368 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 16369 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 16370 // so that TrueC (the true value) is larger than FalseC. 16371 bool NeedsCondInvert = false; 16372 16373 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 16374 // Efficiently invertible. 16375 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 16376 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 16377 isa<ConstantSDNode>(Cond.getOperand(1))))) { 16378 NeedsCondInvert = true; 16379 std::swap(TrueC, FalseC); 16380 } 16381 16382 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 16383 if (FalseC->getAPIntValue() == 0 && 16384 TrueC->getAPIntValue().isPowerOf2()) { 16385 if (NeedsCondInvert) // Invert the condition if needed. 16386 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 16387 DAG.getConstant(1, Cond.getValueType())); 16388 16389 // Zero extend the condition if needed. 16390 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 16391 16392 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 16393 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 16394 DAG.getConstant(ShAmt, MVT::i8)); 16395 } 16396 16397 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 16398 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 16399 if (NeedsCondInvert) // Invert the condition if needed. 16400 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 16401 DAG.getConstant(1, Cond.getValueType())); 16402 16403 // Zero extend the condition if needed. 16404 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 16405 FalseC->getValueType(0), Cond); 16406 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16407 SDValue(FalseC, 0)); 16408 } 16409 16410 // Optimize cases that will turn into an LEA instruction. This requires 16411 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 16412 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 16413 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 16414 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 16415 16416 bool isFastMultiplier = false; 16417 if (Diff < 10) { 16418 switch ((unsigned char)Diff) { 16419 default: break; 16420 case 1: // result = add base, cond 16421 case 2: // result = lea base( , cond*2) 16422 case 3: // result = lea base(cond, cond*2) 16423 case 4: // result = lea base( , cond*4) 16424 case 5: // result = lea base(cond, cond*4) 16425 case 8: // result = lea base( , cond*8) 16426 case 9: // result = lea base(cond, cond*8) 16427 isFastMultiplier = true; 16428 break; 16429 } 16430 } 16431 16432 if (isFastMultiplier) { 16433 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 16434 if (NeedsCondInvert) // Invert the condition if needed. 16435 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 16436 DAG.getConstant(1, Cond.getValueType())); 16437 16438 // Zero extend the condition if needed. 16439 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 16440 Cond); 16441 // Scale the condition by the difference. 16442 if (Diff != 1) 16443 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 16444 DAG.getConstant(Diff, Cond.getValueType())); 16445 16446 // Add the base if non-zero. 16447 if (FalseC->getAPIntValue() != 0) 16448 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16449 SDValue(FalseC, 0)); 16450 return Cond; 16451 } 16452 } 16453 } 16454 } 16455 16456 // Canonicalize max and min: 16457 // (x > y) ? x : y -> (x >= y) ? x : y 16458 // (x < y) ? x : y -> (x <= y) ? x : y 16459 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 16460 // the need for an extra compare 16461 // against zero. e.g. 16462 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 16463 // subl %esi, %edi 16464 // testl %edi, %edi 16465 // movl $0, %eax 16466 // cmovgl %edi, %eax 16467 // => 16468 // xorl %eax, %eax 16469 // subl %esi, $edi 16470 // cmovsl %eax, %edi 16471 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 16472 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 16473 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 16474 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 16475 switch (CC) { 16476 default: break; 16477 case ISD::SETLT: 16478 case ISD::SETGT: { 16479 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 16480 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), 16481 Cond.getOperand(0), Cond.getOperand(1), NewCC); 16482 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 16483 } 16484 } 16485 } 16486 16487 // Match VSELECTs into subs with unsigned saturation. 16488 if (!DCI.isBeforeLegalize() && 16489 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC && 16490 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors. 16491 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) || 16492 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) { 16493 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 16494 16495 // Check if one of the arms of the VSELECT is a zero vector. If it's on the 16496 // left side invert the predicate to simplify logic below. 16497 SDValue Other; 16498 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 16499 Other = RHS; 16500 CC = ISD::getSetCCInverse(CC, true); 16501 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) { 16502 Other = LHS; 16503 } 16504 16505 if (Other.getNode() && Other->getNumOperands() == 2 && 16506 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) { 16507 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1); 16508 SDValue CondRHS = Cond->getOperand(1); 16509 16510 // Look for a general sub with unsigned saturation first. 16511 // x >= y ? x-y : 0 --> subus x, y 16512 // x > y ? x-y : 0 --> subus x, y 16513 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) && 16514 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS)) 16515 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); 16516 16517 // If the RHS is a constant we have to reverse the const canonicalization. 16518 // x > C-1 ? x+-C : 0 --> subus x, C 16519 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD && 16520 isSplatVector(CondRHS.getNode()) && isSplatVector(OpRHS.getNode())) { 16521 APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue(); 16522 if (CondRHS.getConstantOperandVal(0) == -A-1) 16523 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, 16524 DAG.getConstant(-A, VT)); 16525 } 16526 16527 // Another special case: If C was a sign bit, the sub has been 16528 // canonicalized into a xor. 16529 // FIXME: Would it be better to use ComputeMaskedBits to determine whether 16530 // it's safe to decanonicalize the xor? 16531 // x s< 0 ? x^C : 0 --> subus x, C 16532 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR && 16533 ISD::isBuildVectorAllZeros(CondRHS.getNode()) && 16534 isSplatVector(OpRHS.getNode())) { 16535 APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue(); 16536 if (A.isSignBit()) 16537 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); 16538 } 16539 } 16540 } 16541 16542 // Try to match a min/max vector operation. 16543 if (!DCI.isBeforeLegalize() && 16544 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) 16545 if (unsigned Op = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget)) 16546 return DAG.getNode(Op, DL, N->getValueType(0), LHS, RHS); 16547 16548 // Simplify vector selection if the selector will be produced by CMPP*/PCMP*. 16549 if (!DCI.isBeforeLegalize() && N->getOpcode() == ISD::VSELECT && 16550 Cond.getOpcode() == ISD::SETCC) { 16551 16552 assert(Cond.getValueType().isVector() && 16553 "vector select expects a vector selector!"); 16554 16555 EVT IntVT = Cond.getValueType(); 16556 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode()); 16557 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 16558 16559 if (!TValIsAllOnes && !FValIsAllZeros) { 16560 // Try invert the condition if true value is not all 1s and false value 16561 // is not all 0s. 16562 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode()); 16563 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode()); 16564 16565 if (TValIsAllZeros || FValIsAllOnes) { 16566 SDValue CC = Cond.getOperand(2); 16567 ISD::CondCode NewCC = 16568 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 16569 Cond.getOperand(0).getValueType().isInteger()); 16570 Cond = DAG.getSetCC(DL, IntVT, Cond.getOperand(0), Cond.getOperand(1), NewCC); 16571 std::swap(LHS, RHS); 16572 TValIsAllOnes = FValIsAllOnes; 16573 FValIsAllZeros = TValIsAllZeros; 16574 } 16575 } 16576 16577 if (TValIsAllOnes || FValIsAllZeros) { 16578 SDValue Ret; 16579 16580 if (TValIsAllOnes && FValIsAllZeros) 16581 Ret = Cond; 16582 else if (TValIsAllOnes) 16583 Ret = DAG.getNode(ISD::OR, DL, IntVT, Cond, 16584 DAG.getNode(ISD::BITCAST, DL, IntVT, RHS)); 16585 else if (FValIsAllZeros) 16586 Ret = DAG.getNode(ISD::AND, DL, IntVT, Cond, 16587 DAG.getNode(ISD::BITCAST, DL, IntVT, LHS)); 16588 16589 return DAG.getNode(ISD::BITCAST, DL, VT, Ret); 16590 } 16591 } 16592 16593 // If we know that this node is legal then we know that it is going to be 16594 // matched by one of the SSE/AVX BLEND instructions. These instructions only 16595 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 16596 // to simplify previous instructions. 16597 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16598 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 16599 !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { 16600 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 16601 16602 // Don't optimize vector selects that map to mask-registers. 16603 if (BitWidth == 1) 16604 return SDValue(); 16605 16606 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 16607 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 16608 16609 APInt KnownZero, KnownOne; 16610 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 16611 DCI.isBeforeLegalizeOps()); 16612 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 16613 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 16614 DCI.CommitTargetLoweringOpt(TLO); 16615 } 16616 16617 return SDValue(); 16618} 16619 16620// Check whether a boolean test is testing a boolean value generated by 16621// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition 16622// code. 16623// 16624// Simplify the following patterns: 16625// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or 16626// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) 16627// to (Op EFLAGS Cond) 16628// 16629// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or 16630// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) 16631// to (Op EFLAGS !Cond) 16632// 16633// where Op could be BRCOND or CMOV. 16634// 16635static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { 16636 // Quit if not CMP and SUB with its value result used. 16637 if (Cmp.getOpcode() != X86ISD::CMP && 16638 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) 16639 return SDValue(); 16640 16641 // Quit if not used as a boolean value. 16642 if (CC != X86::COND_E && CC != X86::COND_NE) 16643 return SDValue(); 16644 16645 // Check CMP operands. One of them should be 0 or 1 and the other should be 16646 // an SetCC or extended from it. 16647 SDValue Op1 = Cmp.getOperand(0); 16648 SDValue Op2 = Cmp.getOperand(1); 16649 16650 SDValue SetCC; 16651 const ConstantSDNode* C = 0; 16652 bool needOppositeCond = (CC == X86::COND_E); 16653 bool checkAgainstTrue = false; // Is it a comparison against 1? 16654 16655 if ((C = dyn_cast<ConstantSDNode>(Op1))) 16656 SetCC = Op2; 16657 else if ((C = dyn_cast<ConstantSDNode>(Op2))) 16658 SetCC = Op1; 16659 else // Quit if all operands are not constants. 16660 return SDValue(); 16661 16662 if (C->getZExtValue() == 1) { 16663 needOppositeCond = !needOppositeCond; 16664 checkAgainstTrue = true; 16665 } else if (C->getZExtValue() != 0) 16666 // Quit if the constant is neither 0 or 1. 16667 return SDValue(); 16668 16669 bool truncatedToBoolWithAnd = false; 16670 // Skip (zext $x), (trunc $x), or (and $x, 1) node. 16671 while (SetCC.getOpcode() == ISD::ZERO_EXTEND || 16672 SetCC.getOpcode() == ISD::TRUNCATE || 16673 SetCC.getOpcode() == ISD::AND) { 16674 if (SetCC.getOpcode() == ISD::AND) { 16675 int OpIdx = -1; 16676 ConstantSDNode *CS; 16677 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) && 16678 CS->getZExtValue() == 1) 16679 OpIdx = 1; 16680 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) && 16681 CS->getZExtValue() == 1) 16682 OpIdx = 0; 16683 if (OpIdx == -1) 16684 break; 16685 SetCC = SetCC.getOperand(OpIdx); 16686 truncatedToBoolWithAnd = true; 16687 } else 16688 SetCC = SetCC.getOperand(0); 16689 } 16690 16691 switch (SetCC.getOpcode()) { 16692 case X86ISD::SETCC_CARRY: 16693 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to 16694 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1, 16695 // i.e. it's a comparison against true but the result of SETCC_CARRY is not 16696 // truncated to i1 using 'and'. 16697 if (checkAgainstTrue && !truncatedToBoolWithAnd) 16698 break; 16699 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && 16700 "Invalid use of SETCC_CARRY!"); 16701 // FALL THROUGH 16702 case X86ISD::SETCC: 16703 // Set the condition code or opposite one if necessary. 16704 CC = X86::CondCode(SetCC.getConstantOperandVal(0)); 16705 if (needOppositeCond) 16706 CC = X86::GetOppositeBranchCondition(CC); 16707 return SetCC.getOperand(1); 16708 case X86ISD::CMOV: { 16709 // Check whether false/true value has canonical one, i.e. 0 or 1. 16710 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0)); 16711 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1)); 16712 // Quit if true value is not a constant. 16713 if (!TVal) 16714 return SDValue(); 16715 // Quit if false value is not a constant. 16716 if (!FVal) { 16717 SDValue Op = SetCC.getOperand(0); 16718 // Skip 'zext' or 'trunc' node. 16719 if (Op.getOpcode() == ISD::ZERO_EXTEND || 16720 Op.getOpcode() == ISD::TRUNCATE) 16721 Op = Op.getOperand(0); 16722 // A special case for rdrand/rdseed, where 0 is set if false cond is 16723 // found. 16724 if ((Op.getOpcode() != X86ISD::RDRAND && 16725 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0) 16726 return SDValue(); 16727 } 16728 // Quit if false value is not the constant 0 or 1. 16729 bool FValIsFalse = true; 16730 if (FVal && FVal->getZExtValue() != 0) { 16731 if (FVal->getZExtValue() != 1) 16732 return SDValue(); 16733 // If FVal is 1, opposite cond is needed. 16734 needOppositeCond = !needOppositeCond; 16735 FValIsFalse = false; 16736 } 16737 // Quit if TVal is not the constant opposite of FVal. 16738 if (FValIsFalse && TVal->getZExtValue() != 1) 16739 return SDValue(); 16740 if (!FValIsFalse && TVal->getZExtValue() != 0) 16741 return SDValue(); 16742 CC = X86::CondCode(SetCC.getConstantOperandVal(2)); 16743 if (needOppositeCond) 16744 CC = X86::GetOppositeBranchCondition(CC); 16745 return SetCC.getOperand(3); 16746 } 16747 } 16748 16749 return SDValue(); 16750} 16751 16752/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 16753static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 16754 TargetLowering::DAGCombinerInfo &DCI, 16755 const X86Subtarget *Subtarget) { 16756 SDLoc DL(N); 16757 16758 // If the flag operand isn't dead, don't touch this CMOV. 16759 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 16760 return SDValue(); 16761 16762 SDValue FalseOp = N->getOperand(0); 16763 SDValue TrueOp = N->getOperand(1); 16764 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 16765 SDValue Cond = N->getOperand(3); 16766 16767 if (CC == X86::COND_E || CC == X86::COND_NE) { 16768 switch (Cond.getOpcode()) { 16769 default: break; 16770 case X86ISD::BSR: 16771 case X86ISD::BSF: 16772 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 16773 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 16774 return (CC == X86::COND_E) ? FalseOp : TrueOp; 16775 } 16776 } 16777 16778 SDValue Flags; 16779 16780 Flags = checkBoolTestSetCCCombine(Cond, CC); 16781 if (Flags.getNode() && 16782 // Extra check as FCMOV only supports a subset of X86 cond. 16783 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { 16784 SDValue Ops[] = { FalseOp, TrueOp, 16785 DAG.getConstant(CC, MVT::i8), Flags }; 16786 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), 16787 Ops, array_lengthof(Ops)); 16788 } 16789 16790 // If this is a select between two integer constants, try to do some 16791 // optimizations. Note that the operands are ordered the opposite of SELECT 16792 // operands. 16793 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 16794 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 16795 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 16796 // larger than FalseC (the false value). 16797 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 16798 CC = X86::GetOppositeBranchCondition(CC); 16799 std::swap(TrueC, FalseC); 16800 std::swap(TrueOp, FalseOp); 16801 } 16802 16803 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 16804 // This is efficient for any integer data type (including i8/i16) and 16805 // shift amount. 16806 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 16807 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16808 DAG.getConstant(CC, MVT::i8), Cond); 16809 16810 // Zero extend the condition if needed. 16811 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 16812 16813 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 16814 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 16815 DAG.getConstant(ShAmt, MVT::i8)); 16816 if (N->getNumValues() == 2) // Dead flag value? 16817 return DCI.CombineTo(N, Cond, SDValue()); 16818 return Cond; 16819 } 16820 16821 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 16822 // for any integer data type, including i8/i16. 16823 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 16824 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16825 DAG.getConstant(CC, MVT::i8), Cond); 16826 16827 // Zero extend the condition if needed. 16828 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 16829 FalseC->getValueType(0), Cond); 16830 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16831 SDValue(FalseC, 0)); 16832 16833 if (N->getNumValues() == 2) // Dead flag value? 16834 return DCI.CombineTo(N, Cond, SDValue()); 16835 return Cond; 16836 } 16837 16838 // Optimize cases that will turn into an LEA instruction. This requires 16839 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 16840 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 16841 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 16842 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 16843 16844 bool isFastMultiplier = false; 16845 if (Diff < 10) { 16846 switch ((unsigned char)Diff) { 16847 default: break; 16848 case 1: // result = add base, cond 16849 case 2: // result = lea base( , cond*2) 16850 case 3: // result = lea base(cond, cond*2) 16851 case 4: // result = lea base( , cond*4) 16852 case 5: // result = lea base(cond, cond*4) 16853 case 8: // result = lea base( , cond*8) 16854 case 9: // result = lea base(cond, cond*8) 16855 isFastMultiplier = true; 16856 break; 16857 } 16858 } 16859 16860 if (isFastMultiplier) { 16861 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 16862 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16863 DAG.getConstant(CC, MVT::i8), Cond); 16864 // Zero extend the condition if needed. 16865 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 16866 Cond); 16867 // Scale the condition by the difference. 16868 if (Diff != 1) 16869 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 16870 DAG.getConstant(Diff, Cond.getValueType())); 16871 16872 // Add the base if non-zero. 16873 if (FalseC->getAPIntValue() != 0) 16874 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16875 SDValue(FalseC, 0)); 16876 if (N->getNumValues() == 2) // Dead flag value? 16877 return DCI.CombineTo(N, Cond, SDValue()); 16878 return Cond; 16879 } 16880 } 16881 } 16882 } 16883 16884 // Handle these cases: 16885 // (select (x != c), e, c) -> select (x != c), e, x), 16886 // (select (x == c), c, e) -> select (x == c), x, e) 16887 // where the c is an integer constant, and the "select" is the combination 16888 // of CMOV and CMP. 16889 // 16890 // The rationale for this change is that the conditional-move from a constant 16891 // needs two instructions, however, conditional-move from a register needs 16892 // only one instruction. 16893 // 16894 // CAVEAT: By replacing a constant with a symbolic value, it may obscure 16895 // some instruction-combining opportunities. This opt needs to be 16896 // postponed as late as possible. 16897 // 16898 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { 16899 // the DCI.xxxx conditions are provided to postpone the optimization as 16900 // late as possible. 16901 16902 ConstantSDNode *CmpAgainst = 0; 16903 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && 16904 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) && 16905 !isa<ConstantSDNode>(Cond.getOperand(0))) { 16906 16907 if (CC == X86::COND_NE && 16908 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) { 16909 CC = X86::GetOppositeBranchCondition(CC); 16910 std::swap(TrueOp, FalseOp); 16911 } 16912 16913 if (CC == X86::COND_E && 16914 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { 16915 SDValue Ops[] = { FalseOp, Cond.getOperand(0), 16916 DAG.getConstant(CC, MVT::i8), Cond }; 16917 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops, 16918 array_lengthof(Ops)); 16919 } 16920 } 16921 } 16922 16923 return SDValue(); 16924} 16925 16926/// PerformMulCombine - Optimize a single multiply with constant into two 16927/// in order to implement it with two cheaper instructions, e.g. 16928/// LEA + SHL, LEA + LEA. 16929static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 16930 TargetLowering::DAGCombinerInfo &DCI) { 16931 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 16932 return SDValue(); 16933 16934 EVT VT = N->getValueType(0); 16935 if (VT != MVT::i64) 16936 return SDValue(); 16937 16938 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 16939 if (!C) 16940 return SDValue(); 16941 uint64_t MulAmt = C->getZExtValue(); 16942 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 16943 return SDValue(); 16944 16945 uint64_t MulAmt1 = 0; 16946 uint64_t MulAmt2 = 0; 16947 if ((MulAmt % 9) == 0) { 16948 MulAmt1 = 9; 16949 MulAmt2 = MulAmt / 9; 16950 } else if ((MulAmt % 5) == 0) { 16951 MulAmt1 = 5; 16952 MulAmt2 = MulAmt / 5; 16953 } else if ((MulAmt % 3) == 0) { 16954 MulAmt1 = 3; 16955 MulAmt2 = MulAmt / 3; 16956 } 16957 if (MulAmt2 && 16958 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 16959 SDLoc DL(N); 16960 16961 if (isPowerOf2_64(MulAmt2) && 16962 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 16963 // If second multiplifer is pow2, issue it first. We want the multiply by 16964 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 16965 // is an add. 16966 std::swap(MulAmt1, MulAmt2); 16967 16968 SDValue NewMul; 16969 if (isPowerOf2_64(MulAmt1)) 16970 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 16971 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 16972 else 16973 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 16974 DAG.getConstant(MulAmt1, VT)); 16975 16976 if (isPowerOf2_64(MulAmt2)) 16977 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 16978 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 16979 else 16980 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 16981 DAG.getConstant(MulAmt2, VT)); 16982 16983 // Do not add new nodes to DAG combiner worklist. 16984 DCI.CombineTo(N, NewMul, false); 16985 } 16986 return SDValue(); 16987} 16988 16989static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 16990 SDValue N0 = N->getOperand(0); 16991 SDValue N1 = N->getOperand(1); 16992 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 16993 EVT VT = N0.getValueType(); 16994 16995 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 16996 // since the result of setcc_c is all zero's or all ones. 16997 if (VT.isInteger() && !VT.isVector() && 16998 N1C && N0.getOpcode() == ISD::AND && 16999 N0.getOperand(1).getOpcode() == ISD::Constant) { 17000 SDValue N00 = N0.getOperand(0); 17001 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 17002 ((N00.getOpcode() == ISD::ANY_EXTEND || 17003 N00.getOpcode() == ISD::ZERO_EXTEND) && 17004 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 17005 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 17006 APInt ShAmt = N1C->getAPIntValue(); 17007 Mask = Mask.shl(ShAmt); 17008 if (Mask != 0) 17009 return DAG.getNode(ISD::AND, SDLoc(N), VT, 17010 N00, DAG.getConstant(Mask, VT)); 17011 } 17012 } 17013 17014 // Hardware support for vector shifts is sparse which makes us scalarize the 17015 // vector operations in many cases. Also, on sandybridge ADD is faster than 17016 // shl. 17017 // (shl V, 1) -> add V,V 17018 if (isSplatVector(N1.getNode())) { 17019 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 17020 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 17021 // We shift all of the values by one. In many cases we do not have 17022 // hardware support for this operation. This is better expressed as an ADD 17023 // of two values. 17024 if (N1C && (1 == N1C->getZExtValue())) { 17025 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); 17026 } 17027 } 17028 17029 return SDValue(); 17030} 17031 17032/// \brief Returns a vector of 0s if the node in input is a vector logical 17033/// shift by a constant amount which is known to be bigger than or equal 17034/// to the vector element size in bits. 17035static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG, 17036 const X86Subtarget *Subtarget) { 17037 EVT VT = N->getValueType(0); 17038 17039 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 17040 (!Subtarget->hasInt256() || 17041 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 17042 return SDValue(); 17043 17044 SDValue Amt = N->getOperand(1); 17045 SDLoc DL(N); 17046 if (isSplatVector(Amt.getNode())) { 17047 SDValue SclrAmt = Amt->getOperand(0); 17048 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 17049 APInt ShiftAmt = C->getAPIntValue(); 17050 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits(); 17051 17052 // SSE2/AVX2 logical shifts always return a vector of 0s 17053 // if the shift amount is bigger than or equal to 17054 // the element size. The constant shift amount will be 17055 // encoded as a 8-bit immediate. 17056 if (ShiftAmt.trunc(8).uge(MaxAmount)) 17057 return getZeroVector(VT, Subtarget, DAG, DL); 17058 } 17059 } 17060 17061 return SDValue(); 17062} 17063 17064/// PerformShiftCombine - Combine shifts. 17065static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 17066 TargetLowering::DAGCombinerInfo &DCI, 17067 const X86Subtarget *Subtarget) { 17068 if (N->getOpcode() == ISD::SHL) { 17069 SDValue V = PerformSHLCombine(N, DAG); 17070 if (V.getNode()) return V; 17071 } 17072 17073 if (N->getOpcode() != ISD::SRA) { 17074 // Try to fold this logical shift into a zero vector. 17075 SDValue V = performShiftToAllZeros(N, DAG, Subtarget); 17076 if (V.getNode()) return V; 17077 } 17078 17079 return SDValue(); 17080} 17081 17082// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 17083// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 17084// and friends. Likewise for OR -> CMPNEQSS. 17085static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 17086 TargetLowering::DAGCombinerInfo &DCI, 17087 const X86Subtarget *Subtarget) { 17088 unsigned opcode; 17089 17090 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 17091 // we're requiring SSE2 for both. 17092 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 17093 SDValue N0 = N->getOperand(0); 17094 SDValue N1 = N->getOperand(1); 17095 SDValue CMP0 = N0->getOperand(1); 17096 SDValue CMP1 = N1->getOperand(1); 17097 SDLoc DL(N); 17098 17099 // The SETCCs should both refer to the same CMP. 17100 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 17101 return SDValue(); 17102 17103 SDValue CMP00 = CMP0->getOperand(0); 17104 SDValue CMP01 = CMP0->getOperand(1); 17105 EVT VT = CMP00.getValueType(); 17106 17107 if (VT == MVT::f32 || VT == MVT::f64) { 17108 bool ExpectingFlags = false; 17109 // Check for any users that want flags: 17110 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 17111 !ExpectingFlags && UI != UE; ++UI) 17112 switch (UI->getOpcode()) { 17113 default: 17114 case ISD::BR_CC: 17115 case ISD::BRCOND: 17116 case ISD::SELECT: 17117 ExpectingFlags = true; 17118 break; 17119 case ISD::CopyToReg: 17120 case ISD::SIGN_EXTEND: 17121 case ISD::ZERO_EXTEND: 17122 case ISD::ANY_EXTEND: 17123 break; 17124 } 17125 17126 if (!ExpectingFlags) { 17127 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 17128 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 17129 17130 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 17131 X86::CondCode tmp = cc0; 17132 cc0 = cc1; 17133 cc1 = tmp; 17134 } 17135 17136 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 17137 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 17138 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 17139 X86ISD::NodeType NTOperator = is64BitFP ? 17140 X86ISD::FSETCCsd : X86ISD::FSETCCss; 17141 // FIXME: need symbolic constants for these magic numbers. 17142 // See X86ATTInstPrinter.cpp:printSSECC(). 17143 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 17144 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 17145 DAG.getConstant(x86cc, MVT::i8)); 17146 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 17147 OnesOrZeroesF); 17148 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 17149 DAG.getConstant(1, MVT::i32)); 17150 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 17151 return OneBitOfTruth; 17152 } 17153 } 17154 } 17155 } 17156 return SDValue(); 17157} 17158 17159/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 17160/// so it can be folded inside ANDNP. 17161static bool CanFoldXORWithAllOnes(const SDNode *N) { 17162 EVT VT = N->getValueType(0); 17163 17164 // Match direct AllOnes for 128 and 256-bit vectors 17165 if (ISD::isBuildVectorAllOnes(N)) 17166 return true; 17167 17168 // Look through a bit convert. 17169 if (N->getOpcode() == ISD::BITCAST) 17170 N = N->getOperand(0).getNode(); 17171 17172 // Sometimes the operand may come from a insert_subvector building a 256-bit 17173 // allones vector 17174 if (VT.is256BitVector() && 17175 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 17176 SDValue V1 = N->getOperand(0); 17177 SDValue V2 = N->getOperand(1); 17178 17179 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 17180 V1.getOperand(0).getOpcode() == ISD::UNDEF && 17181 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 17182 ISD::isBuildVectorAllOnes(V2.getNode())) 17183 return true; 17184 } 17185 17186 return false; 17187} 17188 17189// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized 17190// register. In most cases we actually compare or select YMM-sized registers 17191// and mixing the two types creates horrible code. This method optimizes 17192// some of the transition sequences. 17193static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, 17194 TargetLowering::DAGCombinerInfo &DCI, 17195 const X86Subtarget *Subtarget) { 17196 EVT VT = N->getValueType(0); 17197 if (!VT.is256BitVector()) 17198 return SDValue(); 17199 17200 assert((N->getOpcode() == ISD::ANY_EXTEND || 17201 N->getOpcode() == ISD::ZERO_EXTEND || 17202 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node"); 17203 17204 SDValue Narrow = N->getOperand(0); 17205 EVT NarrowVT = Narrow->getValueType(0); 17206 if (!NarrowVT.is128BitVector()) 17207 return SDValue(); 17208 17209 if (Narrow->getOpcode() != ISD::XOR && 17210 Narrow->getOpcode() != ISD::AND && 17211 Narrow->getOpcode() != ISD::OR) 17212 return SDValue(); 17213 17214 SDValue N0 = Narrow->getOperand(0); 17215 SDValue N1 = Narrow->getOperand(1); 17216 SDLoc DL(Narrow); 17217 17218 // The Left side has to be a trunc. 17219 if (N0.getOpcode() != ISD::TRUNCATE) 17220 return SDValue(); 17221 17222 // The type of the truncated inputs. 17223 EVT WideVT = N0->getOperand(0)->getValueType(0); 17224 if (WideVT != VT) 17225 return SDValue(); 17226 17227 // The right side has to be a 'trunc' or a constant vector. 17228 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE; 17229 bool RHSConst = (isSplatVector(N1.getNode()) && 17230 isa<ConstantSDNode>(N1->getOperand(0))); 17231 if (!RHSTrunc && !RHSConst) 17232 return SDValue(); 17233 17234 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17235 17236 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT)) 17237 return SDValue(); 17238 17239 // Set N0 and N1 to hold the inputs to the new wide operation. 17240 N0 = N0->getOperand(0); 17241 if (RHSConst) { 17242 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(), 17243 N1->getOperand(0)); 17244 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1); 17245 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, &C[0], C.size()); 17246 } else if (RHSTrunc) { 17247 N1 = N1->getOperand(0); 17248 } 17249 17250 // Generate the wide operation. 17251 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1); 17252 unsigned Opcode = N->getOpcode(); 17253 switch (Opcode) { 17254 case ISD::ANY_EXTEND: 17255 return Op; 17256 case ISD::ZERO_EXTEND: { 17257 unsigned InBits = NarrowVT.getScalarType().getSizeInBits(); 17258 APInt Mask = APInt::getAllOnesValue(InBits); 17259 Mask = Mask.zext(VT.getScalarType().getSizeInBits()); 17260 return DAG.getNode(ISD::AND, DL, VT, 17261 Op, DAG.getConstant(Mask, VT)); 17262 } 17263 case ISD::SIGN_EXTEND: 17264 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, 17265 Op, DAG.getValueType(NarrowVT)); 17266 default: 17267 llvm_unreachable("Unexpected opcode"); 17268 } 17269} 17270 17271static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 17272 TargetLowering::DAGCombinerInfo &DCI, 17273 const X86Subtarget *Subtarget) { 17274 EVT VT = N->getValueType(0); 17275 if (DCI.isBeforeLegalizeOps()) 17276 return SDValue(); 17277 17278 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 17279 if (R.getNode()) 17280 return R; 17281 17282 // Create BLSI, and BLSR instructions 17283 // BLSI is X & (-X) 17284 // BLSR is X & (X-1) 17285 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 17286 SDValue N0 = N->getOperand(0); 17287 SDValue N1 = N->getOperand(1); 17288 SDLoc DL(N); 17289 17290 // Check LHS for neg 17291 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 17292 isZero(N0.getOperand(0))) 17293 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 17294 17295 // Check RHS for neg 17296 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 17297 isZero(N1.getOperand(0))) 17298 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 17299 17300 // Check LHS for X-1 17301 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 17302 isAllOnes(N0.getOperand(1))) 17303 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 17304 17305 // Check RHS for X-1 17306 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 17307 isAllOnes(N1.getOperand(1))) 17308 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 17309 17310 return SDValue(); 17311 } 17312 17313 // Want to form ANDNP nodes: 17314 // 1) In the hopes of then easily combining them with OR and AND nodes 17315 // to form PBLEND/PSIGN. 17316 // 2) To match ANDN packed intrinsics 17317 if (VT != MVT::v2i64 && VT != MVT::v4i64) 17318 return SDValue(); 17319 17320 SDValue N0 = N->getOperand(0); 17321 SDValue N1 = N->getOperand(1); 17322 SDLoc DL(N); 17323 17324 // Check LHS for vnot 17325 if (N0.getOpcode() == ISD::XOR && 17326 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 17327 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 17328 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 17329 17330 // Check RHS for vnot 17331 if (N1.getOpcode() == ISD::XOR && 17332 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 17333 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 17334 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 17335 17336 return SDValue(); 17337} 17338 17339static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 17340 TargetLowering::DAGCombinerInfo &DCI, 17341 const X86Subtarget *Subtarget) { 17342 EVT VT = N->getValueType(0); 17343 if (DCI.isBeforeLegalizeOps()) 17344 return SDValue(); 17345 17346 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 17347 if (R.getNode()) 17348 return R; 17349 17350 SDValue N0 = N->getOperand(0); 17351 SDValue N1 = N->getOperand(1); 17352 17353 // look for psign/blend 17354 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 17355 if (!Subtarget->hasSSSE3() || 17356 (VT == MVT::v4i64 && !Subtarget->hasInt256())) 17357 return SDValue(); 17358 17359 // Canonicalize pandn to RHS 17360 if (N0.getOpcode() == X86ISD::ANDNP) 17361 std::swap(N0, N1); 17362 // or (and (m, y), (pandn m, x)) 17363 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 17364 SDValue Mask = N1.getOperand(0); 17365 SDValue X = N1.getOperand(1); 17366 SDValue Y; 17367 if (N0.getOperand(0) == Mask) 17368 Y = N0.getOperand(1); 17369 if (N0.getOperand(1) == Mask) 17370 Y = N0.getOperand(0); 17371 17372 // Check to see if the mask appeared in both the AND and ANDNP and 17373 if (!Y.getNode()) 17374 return SDValue(); 17375 17376 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 17377 // Look through mask bitcast. 17378 if (Mask.getOpcode() == ISD::BITCAST) 17379 Mask = Mask.getOperand(0); 17380 if (X.getOpcode() == ISD::BITCAST) 17381 X = X.getOperand(0); 17382 if (Y.getOpcode() == ISD::BITCAST) 17383 Y = Y.getOperand(0); 17384 17385 EVT MaskVT = Mask.getValueType(); 17386 17387 // Validate that the Mask operand is a vector sra node. 17388 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 17389 // there is no psrai.b 17390 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 17391 unsigned SraAmt = ~0; 17392 if (Mask.getOpcode() == ISD::SRA) { 17393 SDValue Amt = Mask.getOperand(1); 17394 if (isSplatVector(Amt.getNode())) { 17395 SDValue SclrAmt = Amt->getOperand(0); 17396 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) 17397 SraAmt = C->getZExtValue(); 17398 } 17399 } else if (Mask.getOpcode() == X86ISD::VSRAI) { 17400 SDValue SraC = Mask.getOperand(1); 17401 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 17402 } 17403 if ((SraAmt + 1) != EltBits) 17404 return SDValue(); 17405 17406 SDLoc DL(N); 17407 17408 // Now we know we at least have a plendvb with the mask val. See if 17409 // we can form a psignb/w/d. 17410 // psign = x.type == y.type == mask.type && y = sub(0, x); 17411 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 17412 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 17413 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 17414 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 17415 "Unsupported VT for PSIGN"); 17416 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 17417 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 17418 } 17419 // PBLENDVB only available on SSE 4.1 17420 if (!Subtarget->hasSSE41()) 17421 return SDValue(); 17422 17423 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 17424 17425 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 17426 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 17427 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 17428 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 17429 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 17430 } 17431 } 17432 17433 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 17434 return SDValue(); 17435 17436 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 17437 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 17438 std::swap(N0, N1); 17439 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 17440 return SDValue(); 17441 if (!N0.hasOneUse() || !N1.hasOneUse()) 17442 return SDValue(); 17443 17444 SDValue ShAmt0 = N0.getOperand(1); 17445 if (ShAmt0.getValueType() != MVT::i8) 17446 return SDValue(); 17447 SDValue ShAmt1 = N1.getOperand(1); 17448 if (ShAmt1.getValueType() != MVT::i8) 17449 return SDValue(); 17450 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 17451 ShAmt0 = ShAmt0.getOperand(0); 17452 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 17453 ShAmt1 = ShAmt1.getOperand(0); 17454 17455 SDLoc DL(N); 17456 unsigned Opc = X86ISD::SHLD; 17457 SDValue Op0 = N0.getOperand(0); 17458 SDValue Op1 = N1.getOperand(0); 17459 if (ShAmt0.getOpcode() == ISD::SUB) { 17460 Opc = X86ISD::SHRD; 17461 std::swap(Op0, Op1); 17462 std::swap(ShAmt0, ShAmt1); 17463 } 17464 17465 unsigned Bits = VT.getSizeInBits(); 17466 if (ShAmt1.getOpcode() == ISD::SUB) { 17467 SDValue Sum = ShAmt1.getOperand(0); 17468 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 17469 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 17470 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 17471 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 17472 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 17473 return DAG.getNode(Opc, DL, VT, 17474 Op0, Op1, 17475 DAG.getNode(ISD::TRUNCATE, DL, 17476 MVT::i8, ShAmt0)); 17477 } 17478 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 17479 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 17480 if (ShAmt0C && 17481 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 17482 return DAG.getNode(Opc, DL, VT, 17483 N0.getOperand(0), N1.getOperand(0), 17484 DAG.getNode(ISD::TRUNCATE, DL, 17485 MVT::i8, ShAmt0)); 17486 } 17487 17488 return SDValue(); 17489} 17490 17491// Generate NEG and CMOV for integer abs. 17492static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 17493 EVT VT = N->getValueType(0); 17494 17495 // Since X86 does not have CMOV for 8-bit integer, we don't convert 17496 // 8-bit integer abs to NEG and CMOV. 17497 if (VT.isInteger() && VT.getSizeInBits() == 8) 17498 return SDValue(); 17499 17500 SDValue N0 = N->getOperand(0); 17501 SDValue N1 = N->getOperand(1); 17502 SDLoc DL(N); 17503 17504 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 17505 // and change it to SUB and CMOV. 17506 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 17507 N0.getOpcode() == ISD::ADD && 17508 N0.getOperand(1) == N1 && 17509 N1.getOpcode() == ISD::SRA && 17510 N1.getOperand(0) == N0.getOperand(0)) 17511 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 17512 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { 17513 // Generate SUB & CMOV. 17514 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), 17515 DAG.getConstant(0, VT), N0.getOperand(0)); 17516 17517 SDValue Ops[] = { N0.getOperand(0), Neg, 17518 DAG.getConstant(X86::COND_GE, MVT::i8), 17519 SDValue(Neg.getNode(), 1) }; 17520 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), 17521 Ops, array_lengthof(Ops)); 17522 } 17523 return SDValue(); 17524} 17525 17526// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 17527static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 17528 TargetLowering::DAGCombinerInfo &DCI, 17529 const X86Subtarget *Subtarget) { 17530 EVT VT = N->getValueType(0); 17531 if (DCI.isBeforeLegalizeOps()) 17532 return SDValue(); 17533 17534 if (Subtarget->hasCMov()) { 17535 SDValue RV = performIntegerAbsCombine(N, DAG); 17536 if (RV.getNode()) 17537 return RV; 17538 } 17539 17540 // Try forming BMI if it is available. 17541 if (!Subtarget->hasBMI()) 17542 return SDValue(); 17543 17544 if (VT != MVT::i32 && VT != MVT::i64) 17545 return SDValue(); 17546 17547 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 17548 17549 // Create BLSMSK instructions by finding X ^ (X-1) 17550 SDValue N0 = N->getOperand(0); 17551 SDValue N1 = N->getOperand(1); 17552 SDLoc DL(N); 17553 17554 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 17555 isAllOnes(N0.getOperand(1))) 17556 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 17557 17558 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 17559 isAllOnes(N1.getOperand(1))) 17560 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 17561 17562 return SDValue(); 17563} 17564 17565/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 17566static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 17567 TargetLowering::DAGCombinerInfo &DCI, 17568 const X86Subtarget *Subtarget) { 17569 LoadSDNode *Ld = cast<LoadSDNode>(N); 17570 EVT RegVT = Ld->getValueType(0); 17571 EVT MemVT = Ld->getMemoryVT(); 17572 SDLoc dl(Ld); 17573 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17574 unsigned RegSz = RegVT.getSizeInBits(); 17575 17576 // On Sandybridge unaligned 256bit loads are inefficient. 17577 ISD::LoadExtType Ext = Ld->getExtensionType(); 17578 unsigned Alignment = Ld->getAlignment(); 17579 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8; 17580 if (RegVT.is256BitVector() && !Subtarget->hasInt256() && 17581 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) { 17582 unsigned NumElems = RegVT.getVectorNumElements(); 17583 if (NumElems < 2) 17584 return SDValue(); 17585 17586 SDValue Ptr = Ld->getBasePtr(); 17587 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy()); 17588 17589 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 17590 NumElems/2); 17591 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, 17592 Ld->getPointerInfo(), Ld->isVolatile(), 17593 Ld->isNonTemporal(), Ld->isInvariant(), 17594 Alignment); 17595 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 17596 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, 17597 Ld->getPointerInfo(), Ld->isVolatile(), 17598 Ld->isNonTemporal(), Ld->isInvariant(), 17599 std::min(16U, Alignment)); 17600 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 17601 Load1.getValue(1), 17602 Load2.getValue(1)); 17603 17604 SDValue NewVec = DAG.getUNDEF(RegVT); 17605 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl); 17606 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl); 17607 return DCI.CombineTo(N, NewVec, TF, true); 17608 } 17609 17610 // If this is a vector EXT Load then attempt to optimize it using a 17611 // shuffle. If SSSE3 is not available we may emit an illegal shuffle but the 17612 // expansion is still better than scalar code. 17613 // We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise we'll 17614 // emit a shuffle and a arithmetic shift. 17615 // TODO: It is possible to support ZExt by zeroing the undef values 17616 // during the shuffle phase or after the shuffle. 17617 if (RegVT.isVector() && RegVT.isInteger() && Subtarget->hasSSE2() && 17618 (Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)) { 17619 assert(MemVT != RegVT && "Cannot extend to the same type"); 17620 assert(MemVT.isVector() && "Must load a vector from memory"); 17621 17622 unsigned NumElems = RegVT.getVectorNumElements(); 17623 unsigned MemSz = MemVT.getSizeInBits(); 17624 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 17625 17626 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) 17627 return SDValue(); 17628 17629 // All sizes must be a power of two. 17630 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) 17631 return SDValue(); 17632 17633 // Attempt to load the original value using scalar loads. 17634 // Find the largest scalar type that divides the total loaded size. 17635 MVT SclrLoadTy = MVT::i8; 17636 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 17637 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 17638 MVT Tp = (MVT::SimpleValueType)tp; 17639 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { 17640 SclrLoadTy = Tp; 17641 } 17642 } 17643 17644 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 17645 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && 17646 (64 <= MemSz)) 17647 SclrLoadTy = MVT::f64; 17648 17649 // Calculate the number of scalar loads that we need to perform 17650 // in order to load our vector from memory. 17651 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); 17652 if (Ext == ISD::SEXTLOAD && NumLoads > 1) 17653 return SDValue(); 17654 17655 unsigned loadRegZize = RegSz; 17656 if (Ext == ISD::SEXTLOAD && RegSz == 256) 17657 loadRegZize /= 2; 17658 17659 // Represent our vector as a sequence of elements which are the 17660 // largest scalar that we can load. 17661 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 17662 loadRegZize/SclrLoadTy.getSizeInBits()); 17663 17664 // Represent the data using the same element type that is stored in 17665 // memory. In practice, we ''widen'' MemVT. 17666 EVT WideVecVT = 17667 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 17668 loadRegZize/MemVT.getScalarType().getSizeInBits()); 17669 17670 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && 17671 "Invalid vector type"); 17672 17673 // We can't shuffle using an illegal type. 17674 if (!TLI.isTypeLegal(WideVecVT)) 17675 return SDValue(); 17676 17677 SmallVector<SDValue, 8> Chains; 17678 SDValue Ptr = Ld->getBasePtr(); 17679 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, 17680 TLI.getPointerTy()); 17681 SDValue Res = DAG.getUNDEF(LoadUnitVecVT); 17682 17683 for (unsigned i = 0; i < NumLoads; ++i) { 17684 // Perform a single load. 17685 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 17686 Ptr, Ld->getPointerInfo(), 17687 Ld->isVolatile(), Ld->isNonTemporal(), 17688 Ld->isInvariant(), Ld->getAlignment()); 17689 Chains.push_back(ScalarLoad.getValue(1)); 17690 // Create the first element type using SCALAR_TO_VECTOR in order to avoid 17691 // another round of DAGCombining. 17692 if (i == 0) 17693 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); 17694 else 17695 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, 17696 ScalarLoad, DAG.getIntPtrConstant(i)); 17697 17698 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 17699 } 17700 17701 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 17702 Chains.size()); 17703 17704 // Bitcast the loaded value to a vector of the original element type, in 17705 // the size of the target vector type. 17706 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); 17707 unsigned SizeRatio = RegSz/MemSz; 17708 17709 if (Ext == ISD::SEXTLOAD) { 17710 // If we have SSE4.1 we can directly emit a VSEXT node. 17711 if (Subtarget->hasSSE41()) { 17712 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec); 17713 return DCI.CombineTo(N, Sext, TF, true); 17714 } 17715 17716 // Otherwise we'll shuffle the small elements in the high bits of the 17717 // larger type and perform an arithmetic shift. If the shift is not legal 17718 // it's better to scalarize. 17719 if (!TLI.isOperationLegalOrCustom(ISD::SRA, RegVT)) 17720 return SDValue(); 17721 17722 // Redistribute the loaded elements into the different locations. 17723 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 17724 for (unsigned i = 0; i != NumElems; ++i) 17725 ShuffleVec[i*SizeRatio + SizeRatio-1] = i; 17726 17727 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 17728 DAG.getUNDEF(WideVecVT), 17729 &ShuffleVec[0]); 17730 17731 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 17732 17733 // Build the arithmetic shift. 17734 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() - 17735 MemVT.getVectorElementType().getSizeInBits(); 17736 Shuff = DAG.getNode(ISD::SRA, dl, RegVT, Shuff, 17737 DAG.getConstant(Amt, RegVT)); 17738 17739 return DCI.CombineTo(N, Shuff, TF, true); 17740 } 17741 17742 // Redistribute the loaded elements into the different locations. 17743 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 17744 for (unsigned i = 0; i != NumElems; ++i) 17745 ShuffleVec[i*SizeRatio] = i; 17746 17747 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 17748 DAG.getUNDEF(WideVecVT), 17749 &ShuffleVec[0]); 17750 17751 // Bitcast to the requested type. 17752 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 17753 // Replace the original load with the new sequence 17754 // and return the new chain. 17755 return DCI.CombineTo(N, Shuff, TF, true); 17756 } 17757 17758 return SDValue(); 17759} 17760 17761/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 17762static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 17763 const X86Subtarget *Subtarget) { 17764 StoreSDNode *St = cast<StoreSDNode>(N); 17765 EVT VT = St->getValue().getValueType(); 17766 EVT StVT = St->getMemoryVT(); 17767 SDLoc dl(St); 17768 SDValue StoredVal = St->getOperand(1); 17769 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17770 17771 // If we are saving a concatenation of two XMM registers, perform two stores. 17772 // On Sandy Bridge, 256-bit memory operations are executed by two 17773 // 128-bit ports. However, on Haswell it is better to issue a single 256-bit 17774 // memory operation. 17775 unsigned Alignment = St->getAlignment(); 17776 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8; 17777 if (VT.is256BitVector() && !Subtarget->hasInt256() && 17778 StVT == VT && !IsAligned) { 17779 unsigned NumElems = VT.getVectorNumElements(); 17780 if (NumElems < 2) 17781 return SDValue(); 17782 17783 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl); 17784 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl); 17785 17786 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 17787 SDValue Ptr0 = St->getBasePtr(); 17788 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 17789 17790 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 17791 St->getPointerInfo(), St->isVolatile(), 17792 St->isNonTemporal(), Alignment); 17793 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 17794 St->getPointerInfo(), St->isVolatile(), 17795 St->isNonTemporal(), 17796 std::min(16U, Alignment)); 17797 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 17798 } 17799 17800 // Optimize trunc store (of multiple scalars) to shuffle and store. 17801 // First, pack all of the elements in one place. Next, store to memory 17802 // in fewer chunks. 17803 if (St->isTruncatingStore() && VT.isVector()) { 17804 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17805 unsigned NumElems = VT.getVectorNumElements(); 17806 assert(StVT != VT && "Cannot truncate to the same type"); 17807 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 17808 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 17809 17810 // From, To sizes and ElemCount must be pow of two 17811 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 17812 // We are going to use the original vector elt for storing. 17813 // Accumulated smaller vector elements must be a multiple of the store size. 17814 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 17815 17816 unsigned SizeRatio = FromSz / ToSz; 17817 17818 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 17819 17820 // Create a type on which we perform the shuffle 17821 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 17822 StVT.getScalarType(), NumElems*SizeRatio); 17823 17824 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 17825 17826 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 17827 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 17828 for (unsigned i = 0; i != NumElems; ++i) 17829 ShuffleVec[i] = i * SizeRatio; 17830 17831 // Can't shuffle using an illegal type. 17832 if (!TLI.isTypeLegal(WideVecVT)) 17833 return SDValue(); 17834 17835 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 17836 DAG.getUNDEF(WideVecVT), 17837 &ShuffleVec[0]); 17838 // At this point all of the data is stored at the bottom of the 17839 // register. We now need to save it to mem. 17840 17841 // Find the largest store unit 17842 MVT StoreType = MVT::i8; 17843 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 17844 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 17845 MVT Tp = (MVT::SimpleValueType)tp; 17846 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) 17847 StoreType = Tp; 17848 } 17849 17850 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 17851 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && 17852 (64 <= NumElems * ToSz)) 17853 StoreType = MVT::f64; 17854 17855 // Bitcast the original vector into a vector of store-size units 17856 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 17857 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); 17858 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 17859 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 17860 SmallVector<SDValue, 8> Chains; 17861 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 17862 TLI.getPointerTy()); 17863 SDValue Ptr = St->getBasePtr(); 17864 17865 // Perform one or more big stores into memory. 17866 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { 17867 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 17868 StoreType, ShuffWide, 17869 DAG.getIntPtrConstant(i)); 17870 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 17871 St->getPointerInfo(), St->isVolatile(), 17872 St->isNonTemporal(), St->getAlignment()); 17873 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 17874 Chains.push_back(Ch); 17875 } 17876 17877 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 17878 Chains.size()); 17879 } 17880 17881 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 17882 // the FP state in cases where an emms may be missing. 17883 // A preferable solution to the general problem is to figure out the right 17884 // places to insert EMMS. This qualifies as a quick hack. 17885 17886 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 17887 if (VT.getSizeInBits() != 64) 17888 return SDValue(); 17889 17890 const Function *F = DAG.getMachineFunction().getFunction(); 17891 bool NoImplicitFloatOps = F->getAttributes(). 17892 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 17893 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 17894 && Subtarget->hasSSE2(); 17895 if ((VT.isVector() || 17896 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 17897 isa<LoadSDNode>(St->getValue()) && 17898 !cast<LoadSDNode>(St->getValue())->isVolatile() && 17899 St->getChain().hasOneUse() && !St->isVolatile()) { 17900 SDNode* LdVal = St->getValue().getNode(); 17901 LoadSDNode *Ld = 0; 17902 int TokenFactorIndex = -1; 17903 SmallVector<SDValue, 8> Ops; 17904 SDNode* ChainVal = St->getChain().getNode(); 17905 // Must be a store of a load. We currently handle two cases: the load 17906 // is a direct child, and it's under an intervening TokenFactor. It is 17907 // possible to dig deeper under nested TokenFactors. 17908 if (ChainVal == LdVal) 17909 Ld = cast<LoadSDNode>(St->getChain()); 17910 else if (St->getValue().hasOneUse() && 17911 ChainVal->getOpcode() == ISD::TokenFactor) { 17912 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) { 17913 if (ChainVal->getOperand(i).getNode() == LdVal) { 17914 TokenFactorIndex = i; 17915 Ld = cast<LoadSDNode>(St->getValue()); 17916 } else 17917 Ops.push_back(ChainVal->getOperand(i)); 17918 } 17919 } 17920 17921 if (!Ld || !ISD::isNormalLoad(Ld)) 17922 return SDValue(); 17923 17924 // If this is not the MMX case, i.e. we are just turning i64 load/store 17925 // into f64 load/store, avoid the transformation if there are multiple 17926 // uses of the loaded value. 17927 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 17928 return SDValue(); 17929 17930 SDLoc LdDL(Ld); 17931 SDLoc StDL(N); 17932 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 17933 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 17934 // pair instead. 17935 if (Subtarget->is64Bit() || F64IsLegal) { 17936 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 17937 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 17938 Ld->getPointerInfo(), Ld->isVolatile(), 17939 Ld->isNonTemporal(), Ld->isInvariant(), 17940 Ld->getAlignment()); 17941 SDValue NewChain = NewLd.getValue(1); 17942 if (TokenFactorIndex != -1) { 17943 Ops.push_back(NewChain); 17944 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 17945 Ops.size()); 17946 } 17947 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 17948 St->getPointerInfo(), 17949 St->isVolatile(), St->isNonTemporal(), 17950 St->getAlignment()); 17951 } 17952 17953 // Otherwise, lower to two pairs of 32-bit loads / stores. 17954 SDValue LoAddr = Ld->getBasePtr(); 17955 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 17956 DAG.getConstant(4, MVT::i32)); 17957 17958 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 17959 Ld->getPointerInfo(), 17960 Ld->isVolatile(), Ld->isNonTemporal(), 17961 Ld->isInvariant(), Ld->getAlignment()); 17962 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 17963 Ld->getPointerInfo().getWithOffset(4), 17964 Ld->isVolatile(), Ld->isNonTemporal(), 17965 Ld->isInvariant(), 17966 MinAlign(Ld->getAlignment(), 4)); 17967 17968 SDValue NewChain = LoLd.getValue(1); 17969 if (TokenFactorIndex != -1) { 17970 Ops.push_back(LoLd); 17971 Ops.push_back(HiLd); 17972 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 17973 Ops.size()); 17974 } 17975 17976 LoAddr = St->getBasePtr(); 17977 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 17978 DAG.getConstant(4, MVT::i32)); 17979 17980 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 17981 St->getPointerInfo(), 17982 St->isVolatile(), St->isNonTemporal(), 17983 St->getAlignment()); 17984 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 17985 St->getPointerInfo().getWithOffset(4), 17986 St->isVolatile(), 17987 St->isNonTemporal(), 17988 MinAlign(St->getAlignment(), 4)); 17989 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 17990 } 17991 return SDValue(); 17992} 17993 17994/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 17995/// and return the operands for the horizontal operation in LHS and RHS. A 17996/// horizontal operation performs the binary operation on successive elements 17997/// of its first operand, then on successive elements of its second operand, 17998/// returning the resulting values in a vector. For example, if 17999/// A = < float a0, float a1, float a2, float a3 > 18000/// and 18001/// B = < float b0, float b1, float b2, float b3 > 18002/// then the result of doing a horizontal operation on A and B is 18003/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 18004/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 18005/// A horizontal-op B, for some already available A and B, and if so then LHS is 18006/// set to A, RHS to B, and the routine returns 'true'. 18007/// Note that the binary operation should have the property that if one of the 18008/// operands is UNDEF then the result is UNDEF. 18009static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 18010 // Look for the following pattern: if 18011 // A = < float a0, float a1, float a2, float a3 > 18012 // B = < float b0, float b1, float b2, float b3 > 18013 // and 18014 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 18015 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 18016 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 18017 // which is A horizontal-op B. 18018 18019 // At least one of the operands should be a vector shuffle. 18020 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 18021 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 18022 return false; 18023 18024 MVT VT = LHS.getSimpleValueType(); 18025 18026 assert((VT.is128BitVector() || VT.is256BitVector()) && 18027 "Unsupported vector type for horizontal add/sub"); 18028 18029 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 18030 // operate independently on 128-bit lanes. 18031 unsigned NumElts = VT.getVectorNumElements(); 18032 unsigned NumLanes = VT.getSizeInBits()/128; 18033 unsigned NumLaneElts = NumElts / NumLanes; 18034 assert((NumLaneElts % 2 == 0) && 18035 "Vector type should have an even number of elements in each lane"); 18036 unsigned HalfLaneElts = NumLaneElts/2; 18037 18038 // View LHS in the form 18039 // LHS = VECTOR_SHUFFLE A, B, LMask 18040 // If LHS is not a shuffle then pretend it is the shuffle 18041 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 18042 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 18043 // type VT. 18044 SDValue A, B; 18045 SmallVector<int, 16> LMask(NumElts); 18046 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 18047 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 18048 A = LHS.getOperand(0); 18049 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 18050 B = LHS.getOperand(1); 18051 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 18052 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 18053 } else { 18054 if (LHS.getOpcode() != ISD::UNDEF) 18055 A = LHS; 18056 for (unsigned i = 0; i != NumElts; ++i) 18057 LMask[i] = i; 18058 } 18059 18060 // Likewise, view RHS in the form 18061 // RHS = VECTOR_SHUFFLE C, D, RMask 18062 SDValue C, D; 18063 SmallVector<int, 16> RMask(NumElts); 18064 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 18065 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 18066 C = RHS.getOperand(0); 18067 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 18068 D = RHS.getOperand(1); 18069 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 18070 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 18071 } else { 18072 if (RHS.getOpcode() != ISD::UNDEF) 18073 C = RHS; 18074 for (unsigned i = 0; i != NumElts; ++i) 18075 RMask[i] = i; 18076 } 18077 18078 // Check that the shuffles are both shuffling the same vectors. 18079 if (!(A == C && B == D) && !(A == D && B == C)) 18080 return false; 18081 18082 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 18083 if (!A.getNode() && !B.getNode()) 18084 return false; 18085 18086 // If A and B occur in reverse order in RHS, then "swap" them (which means 18087 // rewriting the mask). 18088 if (A != C) 18089 CommuteVectorShuffleMask(RMask, NumElts); 18090 18091 // At this point LHS and RHS are equivalent to 18092 // LHS = VECTOR_SHUFFLE A, B, LMask 18093 // RHS = VECTOR_SHUFFLE A, B, RMask 18094 // Check that the masks correspond to performing a horizontal operation. 18095 for (unsigned l = 0; l != NumElts; l += NumLaneElts) { 18096 for (unsigned i = 0; i != NumLaneElts; ++i) { 18097 int LIdx = LMask[i+l], RIdx = RMask[i+l]; 18098 18099 // Ignore any UNDEF components. 18100 if (LIdx < 0 || RIdx < 0 || 18101 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 18102 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 18103 continue; 18104 18105 // Check that successive elements are being operated on. If not, this is 18106 // not a horizontal operation. 18107 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs 18108 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l; 18109 if (!(LIdx == Index && RIdx == Index + 1) && 18110 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 18111 return false; 18112 } 18113 } 18114 18115 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 18116 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 18117 return true; 18118} 18119 18120/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 18121static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 18122 const X86Subtarget *Subtarget) { 18123 EVT VT = N->getValueType(0); 18124 SDValue LHS = N->getOperand(0); 18125 SDValue RHS = N->getOperand(1); 18126 18127 // Try to synthesize horizontal adds from adds of shuffles. 18128 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 18129 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 18130 isHorizontalBinOp(LHS, RHS, true)) 18131 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS); 18132 return SDValue(); 18133} 18134 18135/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 18136static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 18137 const X86Subtarget *Subtarget) { 18138 EVT VT = N->getValueType(0); 18139 SDValue LHS = N->getOperand(0); 18140 SDValue RHS = N->getOperand(1); 18141 18142 // Try to synthesize horizontal subs from subs of shuffles. 18143 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 18144 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 18145 isHorizontalBinOp(LHS, RHS, false)) 18146 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS); 18147 return SDValue(); 18148} 18149 18150/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 18151/// X86ISD::FXOR nodes. 18152static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 18153 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 18154 // F[X]OR(0.0, x) -> x 18155 // F[X]OR(x, 0.0) -> x 18156 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 18157 if (C->getValueAPF().isPosZero()) 18158 return N->getOperand(1); 18159 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 18160 if (C->getValueAPF().isPosZero()) 18161 return N->getOperand(0); 18162 return SDValue(); 18163} 18164 18165/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and 18166/// X86ISD::FMAX nodes. 18167static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { 18168 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX); 18169 18170 // Only perform optimizations if UnsafeMath is used. 18171 if (!DAG.getTarget().Options.UnsafeFPMath) 18172 return SDValue(); 18173 18174 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes 18175 // into FMINC and FMAXC, which are Commutative operations. 18176 unsigned NewOp = 0; 18177 switch (N->getOpcode()) { 18178 default: llvm_unreachable("unknown opcode"); 18179 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; 18180 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; 18181 } 18182 18183 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0), 18184 N->getOperand(0), N->getOperand(1)); 18185} 18186 18187/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 18188static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 18189 // FAND(0.0, x) -> 0.0 18190 // FAND(x, 0.0) -> 0.0 18191 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 18192 if (C->getValueAPF().isPosZero()) 18193 return N->getOperand(0); 18194 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 18195 if (C->getValueAPF().isPosZero()) 18196 return N->getOperand(1); 18197 return SDValue(); 18198} 18199 18200/// PerformFANDNCombine - Do target-specific dag combines on X86ISD::FANDN nodes 18201static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) { 18202 // FANDN(x, 0.0) -> 0.0 18203 // FANDN(0.0, x) -> x 18204 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 18205 if (C->getValueAPF().isPosZero()) 18206 return N->getOperand(1); 18207 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 18208 if (C->getValueAPF().isPosZero()) 18209 return N->getOperand(1); 18210 return SDValue(); 18211} 18212 18213static SDValue PerformBTCombine(SDNode *N, 18214 SelectionDAG &DAG, 18215 TargetLowering::DAGCombinerInfo &DCI) { 18216 // BT ignores high bits in the bit index operand. 18217 SDValue Op1 = N->getOperand(1); 18218 if (Op1.hasOneUse()) { 18219 unsigned BitWidth = Op1.getValueSizeInBits(); 18220 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 18221 APInt KnownZero, KnownOne; 18222 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 18223 !DCI.isBeforeLegalizeOps()); 18224 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 18225 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 18226 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 18227 DCI.CommitTargetLoweringOpt(TLO); 18228 } 18229 return SDValue(); 18230} 18231 18232static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 18233 SDValue Op = N->getOperand(0); 18234 if (Op.getOpcode() == ISD::BITCAST) 18235 Op = Op.getOperand(0); 18236 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 18237 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 18238 VT.getVectorElementType().getSizeInBits() == 18239 OpVT.getVectorElementType().getSizeInBits()) { 18240 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 18241 } 18242 return SDValue(); 18243} 18244 18245static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, 18246 const X86Subtarget *Subtarget) { 18247 EVT VT = N->getValueType(0); 18248 if (!VT.isVector()) 18249 return SDValue(); 18250 18251 SDValue N0 = N->getOperand(0); 18252 SDValue N1 = N->getOperand(1); 18253 EVT ExtraVT = cast<VTSDNode>(N1)->getVT(); 18254 SDLoc dl(N); 18255 18256 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the 18257 // both SSE and AVX2 since there is no sign-extended shift right 18258 // operation on a vector with 64-bit elements. 18259 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) -> 18260 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT))) 18261 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND || 18262 N0.getOpcode() == ISD::SIGN_EXTEND)) { 18263 SDValue N00 = N0.getOperand(0); 18264 18265 // EXTLOAD has a better solution on AVX2, 18266 // it may be replaced with X86ISD::VSEXT node. 18267 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256()) 18268 if (!ISD::isNormalLoad(N00.getNode())) 18269 return SDValue(); 18270 18271 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) { 18272 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, 18273 N00, N1); 18274 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp); 18275 } 18276 } 18277 return SDValue(); 18278} 18279 18280static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, 18281 TargetLowering::DAGCombinerInfo &DCI, 18282 const X86Subtarget *Subtarget) { 18283 if (!DCI.isBeforeLegalizeOps()) 18284 return SDValue(); 18285 18286 if (!Subtarget->hasFp256()) 18287 return SDValue(); 18288 18289 EVT VT = N->getValueType(0); 18290 if (VT.isVector() && VT.getSizeInBits() == 256) { 18291 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget); 18292 if (R.getNode()) 18293 return R; 18294 } 18295 18296 return SDValue(); 18297} 18298 18299static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, 18300 const X86Subtarget* Subtarget) { 18301 SDLoc dl(N); 18302 EVT VT = N->getValueType(0); 18303 18304 // Let legalize expand this if it isn't a legal type yet. 18305 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 18306 return SDValue(); 18307 18308 EVT ScalarVT = VT.getScalarType(); 18309 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || 18310 (!Subtarget->hasFMA() && !Subtarget->hasFMA4())) 18311 return SDValue(); 18312 18313 SDValue A = N->getOperand(0); 18314 SDValue B = N->getOperand(1); 18315 SDValue C = N->getOperand(2); 18316 18317 bool NegA = (A.getOpcode() == ISD::FNEG); 18318 bool NegB = (B.getOpcode() == ISD::FNEG); 18319 bool NegC = (C.getOpcode() == ISD::FNEG); 18320 18321 // Negative multiplication when NegA xor NegB 18322 bool NegMul = (NegA != NegB); 18323 if (NegA) 18324 A = A.getOperand(0); 18325 if (NegB) 18326 B = B.getOperand(0); 18327 if (NegC) 18328 C = C.getOperand(0); 18329 18330 unsigned Opcode; 18331 if (!NegMul) 18332 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB; 18333 else 18334 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB; 18335 18336 return DAG.getNode(Opcode, dl, VT, A, B, C); 18337} 18338 18339static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 18340 TargetLowering::DAGCombinerInfo &DCI, 18341 const X86Subtarget *Subtarget) { 18342 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 18343 // (and (i32 x86isd::setcc_carry), 1) 18344 // This eliminates the zext. This transformation is necessary because 18345 // ISD::SETCC is always legalized to i8. 18346 SDLoc dl(N); 18347 SDValue N0 = N->getOperand(0); 18348 EVT VT = N->getValueType(0); 18349 18350 if (N0.getOpcode() == ISD::AND && 18351 N0.hasOneUse() && 18352 N0.getOperand(0).hasOneUse()) { 18353 SDValue N00 = N0.getOperand(0); 18354 if (N00.getOpcode() == X86ISD::SETCC_CARRY) { 18355 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 18356 if (!C || C->getZExtValue() != 1) 18357 return SDValue(); 18358 return DAG.getNode(ISD::AND, dl, VT, 18359 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 18360 N00.getOperand(0), N00.getOperand(1)), 18361 DAG.getConstant(1, VT)); 18362 } 18363 } 18364 18365 if (VT.is256BitVector()) { 18366 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget); 18367 if (R.getNode()) 18368 return R; 18369 } 18370 18371 return SDValue(); 18372} 18373 18374// Optimize x == -y --> x+y == 0 18375// x != -y --> x+y != 0 18376static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { 18377 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 18378 SDValue LHS = N->getOperand(0); 18379 SDValue RHS = N->getOperand(1); 18380 18381 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) 18382 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) 18383 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { 18384 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), 18385 LHS.getValueType(), RHS, LHS.getOperand(1)); 18386 return DAG.getSetCC(SDLoc(N), N->getValueType(0), 18387 addV, DAG.getConstant(0, addV.getValueType()), CC); 18388 } 18389 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) 18390 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) 18391 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { 18392 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N), 18393 RHS.getValueType(), LHS, RHS.getOperand(1)); 18394 return DAG.getSetCC(SDLoc(N), N->getValueType(0), 18395 addV, DAG.getConstant(0, addV.getValueType()), CC); 18396 } 18397 return SDValue(); 18398} 18399 18400// Helper function of PerformSETCCCombine. It is to materialize "setb reg" 18401// as "sbb reg,reg", since it can be extended without zext and produces 18402// an all-ones bit which is more useful than 0/1 in some cases. 18403static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { 18404 return DAG.getNode(ISD::AND, DL, MVT::i8, 18405 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 18406 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), 18407 DAG.getConstant(1, MVT::i8)); 18408} 18409 18410// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 18411static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, 18412 TargetLowering::DAGCombinerInfo &DCI, 18413 const X86Subtarget *Subtarget) { 18414 SDLoc DL(N); 18415 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); 18416 SDValue EFLAGS = N->getOperand(1); 18417 18418 if (CC == X86::COND_A) { 18419 // Try to convert COND_A into COND_B in an attempt to facilitate 18420 // materializing "setb reg". 18421 // 18422 // Do not flip "e > c", where "c" is a constant, because Cmp instruction 18423 // cannot take an immediate as its first operand. 18424 // 18425 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && 18426 EFLAGS.getValueType().isInteger() && 18427 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { 18428 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), 18429 EFLAGS.getNode()->getVTList(), 18430 EFLAGS.getOperand(1), EFLAGS.getOperand(0)); 18431 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); 18432 return MaterializeSETB(DL, NewEFLAGS, DAG); 18433 } 18434 } 18435 18436 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 18437 // a zext and produces an all-ones bit which is more useful than 0/1 in some 18438 // cases. 18439 if (CC == X86::COND_B) 18440 return MaterializeSETB(DL, EFLAGS, DAG); 18441 18442 SDValue Flags; 18443 18444 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 18445 if (Flags.getNode()) { 18446 SDValue Cond = DAG.getConstant(CC, MVT::i8); 18447 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); 18448 } 18449 18450 return SDValue(); 18451} 18452 18453// Optimize branch condition evaluation. 18454// 18455static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, 18456 TargetLowering::DAGCombinerInfo &DCI, 18457 const X86Subtarget *Subtarget) { 18458 SDLoc DL(N); 18459 SDValue Chain = N->getOperand(0); 18460 SDValue Dest = N->getOperand(1); 18461 SDValue EFLAGS = N->getOperand(3); 18462 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); 18463 18464 SDValue Flags; 18465 18466 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 18467 if (Flags.getNode()) { 18468 SDValue Cond = DAG.getConstant(CC, MVT::i8); 18469 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, 18470 Flags); 18471 } 18472 18473 return SDValue(); 18474} 18475 18476static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 18477 const X86TargetLowering *XTLI) { 18478 SDValue Op0 = N->getOperand(0); 18479 EVT InVT = Op0->getValueType(0); 18480 18481 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) 18482 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 18483 SDLoc dl(N); 18484 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 18485 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); 18486 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 18487 } 18488 18489 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 18490 // a 32-bit target where SSE doesn't support i64->FP operations. 18491 if (Op0.getOpcode() == ISD::LOAD) { 18492 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 18493 EVT VT = Ld->getValueType(0); 18494 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 18495 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 18496 !XTLI->getSubtarget()->is64Bit() && 18497 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 18498 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 18499 Ld->getChain(), Op0, DAG); 18500 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 18501 return FILDChain; 18502 } 18503 } 18504 return SDValue(); 18505} 18506 18507// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 18508static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 18509 X86TargetLowering::DAGCombinerInfo &DCI) { 18510 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 18511 // the result is either zero or one (depending on the input carry bit). 18512 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 18513 if (X86::isZeroNode(N->getOperand(0)) && 18514 X86::isZeroNode(N->getOperand(1)) && 18515 // We don't have a good way to replace an EFLAGS use, so only do this when 18516 // dead right now. 18517 SDValue(N, 1).use_empty()) { 18518 SDLoc DL(N); 18519 EVT VT = N->getValueType(0); 18520 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 18521 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 18522 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 18523 DAG.getConstant(X86::COND_B,MVT::i8), 18524 N->getOperand(2)), 18525 DAG.getConstant(1, VT)); 18526 return DCI.CombineTo(N, Res1, CarryOut); 18527 } 18528 18529 return SDValue(); 18530} 18531 18532// fold (add Y, (sete X, 0)) -> adc 0, Y 18533// (add Y, (setne X, 0)) -> sbb -1, Y 18534// (sub (sete X, 0), Y) -> sbb 0, Y 18535// (sub (setne X, 0), Y) -> adc -1, Y 18536static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 18537 SDLoc DL(N); 18538 18539 // Look through ZExts. 18540 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 18541 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 18542 return SDValue(); 18543 18544 SDValue SetCC = Ext.getOperand(0); 18545 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 18546 return SDValue(); 18547 18548 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 18549 if (CC != X86::COND_E && CC != X86::COND_NE) 18550 return SDValue(); 18551 18552 SDValue Cmp = SetCC.getOperand(1); 18553 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 18554 !X86::isZeroNode(Cmp.getOperand(1)) || 18555 !Cmp.getOperand(0).getValueType().isInteger()) 18556 return SDValue(); 18557 18558 SDValue CmpOp0 = Cmp.getOperand(0); 18559 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 18560 DAG.getConstant(1, CmpOp0.getValueType())); 18561 18562 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 18563 if (CC == X86::COND_NE) 18564 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 18565 DL, OtherVal.getValueType(), OtherVal, 18566 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 18567 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 18568 DL, OtherVal.getValueType(), OtherVal, 18569 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 18570} 18571 18572/// PerformADDCombine - Do target-specific dag combines on integer adds. 18573static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 18574 const X86Subtarget *Subtarget) { 18575 EVT VT = N->getValueType(0); 18576 SDValue Op0 = N->getOperand(0); 18577 SDValue Op1 = N->getOperand(1); 18578 18579 // Try to synthesize horizontal adds from adds of shuffles. 18580 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 18581 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 18582 isHorizontalBinOp(Op0, Op1, true)) 18583 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1); 18584 18585 return OptimizeConditionalInDecrement(N, DAG); 18586} 18587 18588static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 18589 const X86Subtarget *Subtarget) { 18590 SDValue Op0 = N->getOperand(0); 18591 SDValue Op1 = N->getOperand(1); 18592 18593 // X86 can't encode an immediate LHS of a sub. See if we can push the 18594 // negation into a preceding instruction. 18595 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 18596 // If the RHS of the sub is a XOR with one use and a constant, invert the 18597 // immediate. Then add one to the LHS of the sub so we can turn 18598 // X-Y -> X+~Y+1, saving one register. 18599 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 18600 isa<ConstantSDNode>(Op1.getOperand(1))) { 18601 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 18602 EVT VT = Op0.getValueType(); 18603 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, 18604 Op1.getOperand(0), 18605 DAG.getConstant(~XorC, VT)); 18606 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor, 18607 DAG.getConstant(C->getAPIntValue()+1, VT)); 18608 } 18609 } 18610 18611 // Try to synthesize horizontal adds from adds of shuffles. 18612 EVT VT = N->getValueType(0); 18613 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 18614 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 18615 isHorizontalBinOp(Op0, Op1, true)) 18616 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1); 18617 18618 return OptimizeConditionalInDecrement(N, DAG); 18619} 18620 18621/// performVZEXTCombine - Performs build vector combines 18622static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, 18623 TargetLowering::DAGCombinerInfo &DCI, 18624 const X86Subtarget *Subtarget) { 18625 // (vzext (bitcast (vzext (x)) -> (vzext x) 18626 SDValue In = N->getOperand(0); 18627 while (In.getOpcode() == ISD::BITCAST) 18628 In = In.getOperand(0); 18629 18630 if (In.getOpcode() != X86ISD::VZEXT) 18631 return SDValue(); 18632 18633 return DAG.getNode(X86ISD::VZEXT, SDLoc(N), N->getValueType(0), 18634 In.getOperand(0)); 18635} 18636 18637SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 18638 DAGCombinerInfo &DCI) const { 18639 SelectionDAG &DAG = DCI.DAG; 18640 switch (N->getOpcode()) { 18641 default: break; 18642 case ISD::EXTRACT_VECTOR_ELT: 18643 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); 18644 case ISD::VSELECT: 18645 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 18646 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget); 18647 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 18648 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 18649 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 18650 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 18651 case ISD::SHL: 18652 case ISD::SRA: 18653 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget); 18654 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 18655 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 18656 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 18657 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); 18658 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 18659 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 18660 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 18661 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 18662 case X86ISD::FXOR: 18663 case X86ISD::FOR: return PerformFORCombine(N, DAG); 18664 case X86ISD::FMIN: 18665 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); 18666 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 18667 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG); 18668 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 18669 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 18670 case ISD::ANY_EXTEND: 18671 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); 18672 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); 18673 case ISD::SIGN_EXTEND_INREG: return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget); 18674 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget); 18675 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); 18676 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget); 18677 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); 18678 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget); 18679 case X86ISD::SHUFP: // Handle all target specific shuffles 18680 case X86ISD::PALIGNR: 18681 case X86ISD::UNPCKH: 18682 case X86ISD::UNPCKL: 18683 case X86ISD::MOVHLPS: 18684 case X86ISD::MOVLHPS: 18685 case X86ISD::PSHUFD: 18686 case X86ISD::PSHUFHW: 18687 case X86ISD::PSHUFLW: 18688 case X86ISD::MOVSS: 18689 case X86ISD::MOVSD: 18690 case X86ISD::VPERMILP: 18691 case X86ISD::VPERM2X128: 18692 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 18693 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); 18694 } 18695 18696 return SDValue(); 18697} 18698 18699/// isTypeDesirableForOp - Return true if the target has native support for 18700/// the specified value type and it is 'desirable' to use the type for the 18701/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 18702/// instruction encodings are longer and some i16 instructions are slow. 18703bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 18704 if (!isTypeLegal(VT)) 18705 return false; 18706 if (VT != MVT::i16) 18707 return true; 18708 18709 switch (Opc) { 18710 default: 18711 return true; 18712 case ISD::LOAD: 18713 case ISD::SIGN_EXTEND: 18714 case ISD::ZERO_EXTEND: 18715 case ISD::ANY_EXTEND: 18716 case ISD::SHL: 18717 case ISD::SRL: 18718 case ISD::SUB: 18719 case ISD::ADD: 18720 case ISD::MUL: 18721 case ISD::AND: 18722 case ISD::OR: 18723 case ISD::XOR: 18724 return false; 18725 } 18726} 18727 18728/// IsDesirableToPromoteOp - This method query the target whether it is 18729/// beneficial for dag combiner to promote the specified node. If true, it 18730/// should return the desired promotion type by reference. 18731bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 18732 EVT VT = Op.getValueType(); 18733 if (VT != MVT::i16) 18734 return false; 18735 18736 bool Promote = false; 18737 bool Commute = false; 18738 switch (Op.getOpcode()) { 18739 default: break; 18740 case ISD::LOAD: { 18741 LoadSDNode *LD = cast<LoadSDNode>(Op); 18742 // If the non-extending load has a single use and it's not live out, then it 18743 // might be folded. 18744 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 18745 Op.hasOneUse()*/) { 18746 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 18747 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 18748 // The only case where we'd want to promote LOAD (rather then it being 18749 // promoted as an operand is when it's only use is liveout. 18750 if (UI->getOpcode() != ISD::CopyToReg) 18751 return false; 18752 } 18753 } 18754 Promote = true; 18755 break; 18756 } 18757 case ISD::SIGN_EXTEND: 18758 case ISD::ZERO_EXTEND: 18759 case ISD::ANY_EXTEND: 18760 Promote = true; 18761 break; 18762 case ISD::SHL: 18763 case ISD::SRL: { 18764 SDValue N0 = Op.getOperand(0); 18765 // Look out for (store (shl (load), x)). 18766 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 18767 return false; 18768 Promote = true; 18769 break; 18770 } 18771 case ISD::ADD: 18772 case ISD::MUL: 18773 case ISD::AND: 18774 case ISD::OR: 18775 case ISD::XOR: 18776 Commute = true; 18777 // fallthrough 18778 case ISD::SUB: { 18779 SDValue N0 = Op.getOperand(0); 18780 SDValue N1 = Op.getOperand(1); 18781 if (!Commute && MayFoldLoad(N1)) 18782 return false; 18783 // Avoid disabling potential load folding opportunities. 18784 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 18785 return false; 18786 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 18787 return false; 18788 Promote = true; 18789 } 18790 } 18791 18792 PVT = MVT::i32; 18793 return Promote; 18794} 18795 18796//===----------------------------------------------------------------------===// 18797// X86 Inline Assembly Support 18798//===----------------------------------------------------------------------===// 18799 18800namespace { 18801 // Helper to match a string separated by whitespace. 18802 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 18803 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 18804 18805 for (unsigned i = 0, e = args.size(); i != e; ++i) { 18806 StringRef piece(*args[i]); 18807 if (!s.startswith(piece)) // Check if the piece matches. 18808 return false; 18809 18810 s = s.substr(piece.size()); 18811 StringRef::size_type pos = s.find_first_not_of(" \t"); 18812 if (pos == 0) // We matched a prefix. 18813 return false; 18814 18815 s = s.substr(pos); 18816 } 18817 18818 return s.empty(); 18819 } 18820 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 18821} 18822 18823bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 18824 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 18825 18826 std::string AsmStr = IA->getAsmString(); 18827 18828 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 18829 if (!Ty || Ty->getBitWidth() % 16 != 0) 18830 return false; 18831 18832 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 18833 SmallVector<StringRef, 4> AsmPieces; 18834 SplitString(AsmStr, AsmPieces, ";\n"); 18835 18836 switch (AsmPieces.size()) { 18837 default: return false; 18838 case 1: 18839 // FIXME: this should verify that we are targeting a 486 or better. If not, 18840 // we will turn this bswap into something that will be lowered to logical 18841 // ops instead of emitting the bswap asm. For now, we don't support 486 or 18842 // lower so don't worry about this. 18843 // bswap $0 18844 if (matchAsm(AsmPieces[0], "bswap", "$0") || 18845 matchAsm(AsmPieces[0], "bswapl", "$0") || 18846 matchAsm(AsmPieces[0], "bswapq", "$0") || 18847 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 18848 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 18849 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 18850 // No need to check constraints, nothing other than the equivalent of 18851 // "=r,0" would be valid here. 18852 return IntrinsicLowering::LowerToByteSwap(CI); 18853 } 18854 18855 // rorw $$8, ${0:w} --> llvm.bswap.i16 18856 if (CI->getType()->isIntegerTy(16) && 18857 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 18858 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 18859 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 18860 AsmPieces.clear(); 18861 const std::string &ConstraintsStr = IA->getConstraintString(); 18862 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 18863 array_pod_sort(AsmPieces.begin(), AsmPieces.end()); 18864 if (AsmPieces.size() == 4 && 18865 AsmPieces[0] == "~{cc}" && 18866 AsmPieces[1] == "~{dirflag}" && 18867 AsmPieces[2] == "~{flags}" && 18868 AsmPieces[3] == "~{fpsr}") 18869 return IntrinsicLowering::LowerToByteSwap(CI); 18870 } 18871 break; 18872 case 3: 18873 if (CI->getType()->isIntegerTy(32) && 18874 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 18875 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 18876 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 18877 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 18878 AsmPieces.clear(); 18879 const std::string &ConstraintsStr = IA->getConstraintString(); 18880 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 18881 array_pod_sort(AsmPieces.begin(), AsmPieces.end()); 18882 if (AsmPieces.size() == 4 && 18883 AsmPieces[0] == "~{cc}" && 18884 AsmPieces[1] == "~{dirflag}" && 18885 AsmPieces[2] == "~{flags}" && 18886 AsmPieces[3] == "~{fpsr}") 18887 return IntrinsicLowering::LowerToByteSwap(CI); 18888 } 18889 18890 if (CI->getType()->isIntegerTy(64)) { 18891 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 18892 if (Constraints.size() >= 2 && 18893 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 18894 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 18895 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 18896 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 18897 matchAsm(AsmPieces[1], "bswap", "%edx") && 18898 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 18899 return IntrinsicLowering::LowerToByteSwap(CI); 18900 } 18901 } 18902 break; 18903 } 18904 return false; 18905} 18906 18907/// getConstraintType - Given a constraint letter, return the type of 18908/// constraint it is for this target. 18909X86TargetLowering::ConstraintType 18910X86TargetLowering::getConstraintType(const std::string &Constraint) const { 18911 if (Constraint.size() == 1) { 18912 switch (Constraint[0]) { 18913 case 'R': 18914 case 'q': 18915 case 'Q': 18916 case 'f': 18917 case 't': 18918 case 'u': 18919 case 'y': 18920 case 'x': 18921 case 'Y': 18922 case 'l': 18923 return C_RegisterClass; 18924 case 'a': 18925 case 'b': 18926 case 'c': 18927 case 'd': 18928 case 'S': 18929 case 'D': 18930 case 'A': 18931 return C_Register; 18932 case 'I': 18933 case 'J': 18934 case 'K': 18935 case 'L': 18936 case 'M': 18937 case 'N': 18938 case 'G': 18939 case 'C': 18940 case 'e': 18941 case 'Z': 18942 return C_Other; 18943 default: 18944 break; 18945 } 18946 } 18947 return TargetLowering::getConstraintType(Constraint); 18948} 18949 18950/// Examine constraint type and operand type and determine a weight value. 18951/// This object must already have been set up with the operand type 18952/// and the current alternative constraint selected. 18953TargetLowering::ConstraintWeight 18954 X86TargetLowering::getSingleConstraintMatchWeight( 18955 AsmOperandInfo &info, const char *constraint) const { 18956 ConstraintWeight weight = CW_Invalid; 18957 Value *CallOperandVal = info.CallOperandVal; 18958 // If we don't have a value, we can't do a match, 18959 // but allow it at the lowest weight. 18960 if (CallOperandVal == NULL) 18961 return CW_Default; 18962 Type *type = CallOperandVal->getType(); 18963 // Look at the constraint type. 18964 switch (*constraint) { 18965 default: 18966 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 18967 case 'R': 18968 case 'q': 18969 case 'Q': 18970 case 'a': 18971 case 'b': 18972 case 'c': 18973 case 'd': 18974 case 'S': 18975 case 'D': 18976 case 'A': 18977 if (CallOperandVal->getType()->isIntegerTy()) 18978 weight = CW_SpecificReg; 18979 break; 18980 case 'f': 18981 case 't': 18982 case 'u': 18983 if (type->isFloatingPointTy()) 18984 weight = CW_SpecificReg; 18985 break; 18986 case 'y': 18987 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 18988 weight = CW_SpecificReg; 18989 break; 18990 case 'x': 18991 case 'Y': 18992 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 18993 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256())) 18994 weight = CW_Register; 18995 break; 18996 case 'I': 18997 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 18998 if (C->getZExtValue() <= 31) 18999 weight = CW_Constant; 19000 } 19001 break; 19002 case 'J': 19003 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 19004 if (C->getZExtValue() <= 63) 19005 weight = CW_Constant; 19006 } 19007 break; 19008 case 'K': 19009 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 19010 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 19011 weight = CW_Constant; 19012 } 19013 break; 19014 case 'L': 19015 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 19016 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 19017 weight = CW_Constant; 19018 } 19019 break; 19020 case 'M': 19021 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 19022 if (C->getZExtValue() <= 3) 19023 weight = CW_Constant; 19024 } 19025 break; 19026 case 'N': 19027 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 19028 if (C->getZExtValue() <= 0xff) 19029 weight = CW_Constant; 19030 } 19031 break; 19032 case 'G': 19033 case 'C': 19034 if (dyn_cast<ConstantFP>(CallOperandVal)) { 19035 weight = CW_Constant; 19036 } 19037 break; 19038 case 'e': 19039 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 19040 if ((C->getSExtValue() >= -0x80000000LL) && 19041 (C->getSExtValue() <= 0x7fffffffLL)) 19042 weight = CW_Constant; 19043 } 19044 break; 19045 case 'Z': 19046 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 19047 if (C->getZExtValue() <= 0xffffffff) 19048 weight = CW_Constant; 19049 } 19050 break; 19051 } 19052 return weight; 19053} 19054 19055/// LowerXConstraint - try to replace an X constraint, which matches anything, 19056/// with another that has more specific requirements based on the type of the 19057/// corresponding operand. 19058const char *X86TargetLowering:: 19059LowerXConstraint(EVT ConstraintVT) const { 19060 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 19061 // 'f' like normal targets. 19062 if (ConstraintVT.isFloatingPoint()) { 19063 if (Subtarget->hasSSE2()) 19064 return "Y"; 19065 if (Subtarget->hasSSE1()) 19066 return "x"; 19067 } 19068 19069 return TargetLowering::LowerXConstraint(ConstraintVT); 19070} 19071 19072/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 19073/// vector. If it is invalid, don't add anything to Ops. 19074void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 19075 std::string &Constraint, 19076 std::vector<SDValue>&Ops, 19077 SelectionDAG &DAG) const { 19078 SDValue Result(0, 0); 19079 19080 // Only support length 1 constraints for now. 19081 if (Constraint.length() > 1) return; 19082 19083 char ConstraintLetter = Constraint[0]; 19084 switch (ConstraintLetter) { 19085 default: break; 19086 case 'I': 19087 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 19088 if (C->getZExtValue() <= 31) { 19089 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 19090 break; 19091 } 19092 } 19093 return; 19094 case 'J': 19095 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 19096 if (C->getZExtValue() <= 63) { 19097 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 19098 break; 19099 } 19100 } 19101 return; 19102 case 'K': 19103 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 19104 if (isInt<8>(C->getSExtValue())) { 19105 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 19106 break; 19107 } 19108 } 19109 return; 19110 case 'N': 19111 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 19112 if (C->getZExtValue() <= 255) { 19113 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 19114 break; 19115 } 19116 } 19117 return; 19118 case 'e': { 19119 // 32-bit signed value 19120 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 19121 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 19122 C->getSExtValue())) { 19123 // Widen to 64 bits here to get it sign extended. 19124 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 19125 break; 19126 } 19127 // FIXME gcc accepts some relocatable values here too, but only in certain 19128 // memory models; it's complicated. 19129 } 19130 return; 19131 } 19132 case 'Z': { 19133 // 32-bit unsigned value 19134 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 19135 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 19136 C->getZExtValue())) { 19137 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 19138 break; 19139 } 19140 } 19141 // FIXME gcc accepts some relocatable values here too, but only in certain 19142 // memory models; it's complicated. 19143 return; 19144 } 19145 case 'i': { 19146 // Literal immediates are always ok. 19147 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 19148 // Widen to 64 bits here to get it sign extended. 19149 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 19150 break; 19151 } 19152 19153 // In any sort of PIC mode addresses need to be computed at runtime by 19154 // adding in a register or some sort of table lookup. These can't 19155 // be used as immediates. 19156 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 19157 return; 19158 19159 // If we are in non-pic codegen mode, we allow the address of a global (with 19160 // an optional displacement) to be used with 'i'. 19161 GlobalAddressSDNode *GA = 0; 19162 int64_t Offset = 0; 19163 19164 // Match either (GA), (GA+C), (GA+C1+C2), etc. 19165 while (1) { 19166 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 19167 Offset += GA->getOffset(); 19168 break; 19169 } else if (Op.getOpcode() == ISD::ADD) { 19170 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 19171 Offset += C->getZExtValue(); 19172 Op = Op.getOperand(0); 19173 continue; 19174 } 19175 } else if (Op.getOpcode() == ISD::SUB) { 19176 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 19177 Offset += -C->getZExtValue(); 19178 Op = Op.getOperand(0); 19179 continue; 19180 } 19181 } 19182 19183 // Otherwise, this isn't something we can handle, reject it. 19184 return; 19185 } 19186 19187 const GlobalValue *GV = GA->getGlobal(); 19188 // If we require an extra load to get this address, as in PIC mode, we 19189 // can't accept it. 19190 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 19191 getTargetMachine()))) 19192 return; 19193 19194 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op), 19195 GA->getValueType(0), Offset); 19196 break; 19197 } 19198 } 19199 19200 if (Result.getNode()) { 19201 Ops.push_back(Result); 19202 return; 19203 } 19204 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 19205} 19206 19207std::pair<unsigned, const TargetRegisterClass*> 19208X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 19209 MVT VT) const { 19210 // First, see if this is a constraint that directly corresponds to an LLVM 19211 // register class. 19212 if (Constraint.size() == 1) { 19213 // GCC Constraint Letters 19214 switch (Constraint[0]) { 19215 default: break; 19216 // TODO: Slight differences here in allocation order and leaving 19217 // RIP in the class. Do they matter any more here than they do 19218 // in the normal allocation? 19219 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 19220 if (Subtarget->is64Bit()) { 19221 if (VT == MVT::i32 || VT == MVT::f32) 19222 return std::make_pair(0U, &X86::GR32RegClass); 19223 if (VT == MVT::i16) 19224 return std::make_pair(0U, &X86::GR16RegClass); 19225 if (VT == MVT::i8 || VT == MVT::i1) 19226 return std::make_pair(0U, &X86::GR8RegClass); 19227 if (VT == MVT::i64 || VT == MVT::f64) 19228 return std::make_pair(0U, &X86::GR64RegClass); 19229 break; 19230 } 19231 // 32-bit fallthrough 19232 case 'Q': // Q_REGS 19233 if (VT == MVT::i32 || VT == MVT::f32) 19234 return std::make_pair(0U, &X86::GR32_ABCDRegClass); 19235 if (VT == MVT::i16) 19236 return std::make_pair(0U, &X86::GR16_ABCDRegClass); 19237 if (VT == MVT::i8 || VT == MVT::i1) 19238 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); 19239 if (VT == MVT::i64) 19240 return std::make_pair(0U, &X86::GR64_ABCDRegClass); 19241 break; 19242 case 'r': // GENERAL_REGS 19243 case 'l': // INDEX_REGS 19244 if (VT == MVT::i8 || VT == MVT::i1) 19245 return std::make_pair(0U, &X86::GR8RegClass); 19246 if (VT == MVT::i16) 19247 return std::make_pair(0U, &X86::GR16RegClass); 19248 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 19249 return std::make_pair(0U, &X86::GR32RegClass); 19250 return std::make_pair(0U, &X86::GR64RegClass); 19251 case 'R': // LEGACY_REGS 19252 if (VT == MVT::i8 || VT == MVT::i1) 19253 return std::make_pair(0U, &X86::GR8_NOREXRegClass); 19254 if (VT == MVT::i16) 19255 return std::make_pair(0U, &X86::GR16_NOREXRegClass); 19256 if (VT == MVT::i32 || !Subtarget->is64Bit()) 19257 return std::make_pair(0U, &X86::GR32_NOREXRegClass); 19258 return std::make_pair(0U, &X86::GR64_NOREXRegClass); 19259 case 'f': // FP Stack registers. 19260 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 19261 // value to the correct fpstack register class. 19262 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 19263 return std::make_pair(0U, &X86::RFP32RegClass); 19264 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 19265 return std::make_pair(0U, &X86::RFP64RegClass); 19266 return std::make_pair(0U, &X86::RFP80RegClass); 19267 case 'y': // MMX_REGS if MMX allowed. 19268 if (!Subtarget->hasMMX()) break; 19269 return std::make_pair(0U, &X86::VR64RegClass); 19270 case 'Y': // SSE_REGS if SSE2 allowed 19271 if (!Subtarget->hasSSE2()) break; 19272 // FALL THROUGH. 19273 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 19274 if (!Subtarget->hasSSE1()) break; 19275 19276 switch (VT.SimpleTy) { 19277 default: break; 19278 // Scalar SSE types. 19279 case MVT::f32: 19280 case MVT::i32: 19281 return std::make_pair(0U, &X86::FR32RegClass); 19282 case MVT::f64: 19283 case MVT::i64: 19284 return std::make_pair(0U, &X86::FR64RegClass); 19285 // Vector types. 19286 case MVT::v16i8: 19287 case MVT::v8i16: 19288 case MVT::v4i32: 19289 case MVT::v2i64: 19290 case MVT::v4f32: 19291 case MVT::v2f64: 19292 return std::make_pair(0U, &X86::VR128RegClass); 19293 // AVX types. 19294 case MVT::v32i8: 19295 case MVT::v16i16: 19296 case MVT::v8i32: 19297 case MVT::v4i64: 19298 case MVT::v8f32: 19299 case MVT::v4f64: 19300 return std::make_pair(0U, &X86::VR256RegClass); 19301 case MVT::v8f64: 19302 case MVT::v16f32: 19303 case MVT::v16i32: 19304 case MVT::v8i64: 19305 return std::make_pair(0U, &X86::VR512RegClass); 19306 } 19307 break; 19308 } 19309 } 19310 19311 // Use the default implementation in TargetLowering to convert the register 19312 // constraint into a member of a register class. 19313 std::pair<unsigned, const TargetRegisterClass*> Res; 19314 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 19315 19316 // Not found as a standard register? 19317 if (Res.second == 0) { 19318 // Map st(0) -> st(7) -> ST0 19319 if (Constraint.size() == 7 && Constraint[0] == '{' && 19320 tolower(Constraint[1]) == 's' && 19321 tolower(Constraint[2]) == 't' && 19322 Constraint[3] == '(' && 19323 (Constraint[4] >= '0' && Constraint[4] <= '7') && 19324 Constraint[5] == ')' && 19325 Constraint[6] == '}') { 19326 19327 Res.first = X86::ST0+Constraint[4]-'0'; 19328 Res.second = &X86::RFP80RegClass; 19329 return Res; 19330 } 19331 19332 // GCC allows "st(0)" to be called just plain "st". 19333 if (StringRef("{st}").equals_lower(Constraint)) { 19334 Res.first = X86::ST0; 19335 Res.second = &X86::RFP80RegClass; 19336 return Res; 19337 } 19338 19339 // flags -> EFLAGS 19340 if (StringRef("{flags}").equals_lower(Constraint)) { 19341 Res.first = X86::EFLAGS; 19342 Res.second = &X86::CCRRegClass; 19343 return Res; 19344 } 19345 19346 // 'A' means EAX + EDX. 19347 if (Constraint == "A") { 19348 Res.first = X86::EAX; 19349 Res.second = &X86::GR32_ADRegClass; 19350 return Res; 19351 } 19352 return Res; 19353 } 19354 19355 // Otherwise, check to see if this is a register class of the wrong value 19356 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 19357 // turn into {ax},{dx}. 19358 if (Res.second->hasType(VT)) 19359 return Res; // Correct type already, nothing to do. 19360 19361 // All of the single-register GCC register classes map their values onto 19362 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 19363 // really want an 8-bit or 32-bit register, map to the appropriate register 19364 // class and return the appropriate register. 19365 if (Res.second == &X86::GR16RegClass) { 19366 if (VT == MVT::i8 || VT == MVT::i1) { 19367 unsigned DestReg = 0; 19368 switch (Res.first) { 19369 default: break; 19370 case X86::AX: DestReg = X86::AL; break; 19371 case X86::DX: DestReg = X86::DL; break; 19372 case X86::CX: DestReg = X86::CL; break; 19373 case X86::BX: DestReg = X86::BL; break; 19374 } 19375 if (DestReg) { 19376 Res.first = DestReg; 19377 Res.second = &X86::GR8RegClass; 19378 } 19379 } else if (VT == MVT::i32 || VT == MVT::f32) { 19380 unsigned DestReg = 0; 19381 switch (Res.first) { 19382 default: break; 19383 case X86::AX: DestReg = X86::EAX; break; 19384 case X86::DX: DestReg = X86::EDX; break; 19385 case X86::CX: DestReg = X86::ECX; break; 19386 case X86::BX: DestReg = X86::EBX; break; 19387 case X86::SI: DestReg = X86::ESI; break; 19388 case X86::DI: DestReg = X86::EDI; break; 19389 case X86::BP: DestReg = X86::EBP; break; 19390 case X86::SP: DestReg = X86::ESP; break; 19391 } 19392 if (DestReg) { 19393 Res.first = DestReg; 19394 Res.second = &X86::GR32RegClass; 19395 } 19396 } else if (VT == MVT::i64 || VT == MVT::f64) { 19397 unsigned DestReg = 0; 19398 switch (Res.first) { 19399 default: break; 19400 case X86::AX: DestReg = X86::RAX; break; 19401 case X86::DX: DestReg = X86::RDX; break; 19402 case X86::CX: DestReg = X86::RCX; break; 19403 case X86::BX: DestReg = X86::RBX; break; 19404 case X86::SI: DestReg = X86::RSI; break; 19405 case X86::DI: DestReg = X86::RDI; break; 19406 case X86::BP: DestReg = X86::RBP; break; 19407 case X86::SP: DestReg = X86::RSP; break; 19408 } 19409 if (DestReg) { 19410 Res.first = DestReg; 19411 Res.second = &X86::GR64RegClass; 19412 } 19413 } 19414 } else if (Res.second == &X86::FR32RegClass || 19415 Res.second == &X86::FR64RegClass || 19416 Res.second == &X86::VR128RegClass || 19417 Res.second == &X86::VR256RegClass || 19418 Res.second == &X86::FR32XRegClass || 19419 Res.second == &X86::FR64XRegClass || 19420 Res.second == &X86::VR128XRegClass || 19421 Res.second == &X86::VR256XRegClass || 19422 Res.second == &X86::VR512RegClass) { 19423 // Handle references to XMM physical registers that got mapped into the 19424 // wrong class. This can happen with constraints like {xmm0} where the 19425 // target independent register mapper will just pick the first match it can 19426 // find, ignoring the required type. 19427 19428 if (VT == MVT::f32 || VT == MVT::i32) 19429 Res.second = &X86::FR32RegClass; 19430 else if (VT == MVT::f64 || VT == MVT::i64) 19431 Res.second = &X86::FR64RegClass; 19432 else if (X86::VR128RegClass.hasType(VT)) 19433 Res.second = &X86::VR128RegClass; 19434 else if (X86::VR256RegClass.hasType(VT)) 19435 Res.second = &X86::VR256RegClass; 19436 else if (X86::VR512RegClass.hasType(VT)) 19437 Res.second = &X86::VR512RegClass; 19438 } 19439 19440 return Res; 19441} 19442