X86ISelLowering.cpp revision 60ef6c9295844038406c6083b67ea24b3646f2dd
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86ISelLowering.h" 17#include "Utils/X86ShuffleDecode.h" 18#include "X86.h" 19#include "X86InstrBuilder.h" 20#include "X86TargetMachine.h" 21#include "X86TargetObjectFile.h" 22#include "llvm/ADT/SmallSet.h" 23#include "llvm/ADT/Statistic.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/VariadicFunction.h" 26#include "llvm/CodeGen/IntrinsicLowering.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineJumpTableInfo.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/IR/CallingConv.h" 34#include "llvm/IR/Constants.h" 35#include "llvm/IR/DerivedTypes.h" 36#include "llvm/IR/Function.h" 37#include "llvm/IR/GlobalAlias.h" 38#include "llvm/IR/GlobalVariable.h" 39#include "llvm/IR/Instructions.h" 40#include "llvm/IR/Intrinsics.h" 41#include "llvm/IR/LLVMContext.h" 42#include "llvm/MC/MCAsmInfo.h" 43#include "llvm/MC/MCContext.h" 44#include "llvm/MC/MCExpr.h" 45#include "llvm/MC/MCSymbol.h" 46#include "llvm/Support/CallSite.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Target/TargetOptions.h" 51#include <bitset> 52#include <cctype> 53using namespace llvm; 54 55STATISTIC(NumTailCalls, "Number of tail calls"); 56 57// Forward declarations. 58static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 62/// sets things up to match to an AVX VEXTRACTF128 instruction or a 63/// simple subregister reference. Idx is an index in the 128 bits we 64/// want. It need not be aligned to a 128-bit bounday. That makes 65/// lowering EXTRACT_VECTOR_ELT operations easier. 66static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 67 SelectionDAG &DAG, DebugLoc dl) { 68 EVT VT = Vec.getValueType(); 69 assert(VT.is256BitVector() && "Unexpected vector size!"); 70 EVT ElVT = VT.getVectorElementType(); 71 unsigned Factor = VT.getSizeInBits()/128; 72 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 73 VT.getVectorNumElements()/Factor); 74 75 // Extract from UNDEF is UNDEF. 76 if (Vec.getOpcode() == ISD::UNDEF) 77 return DAG.getUNDEF(ResultVT); 78 79 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 80 // we can match to VEXTRACTF128. 81 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 82 83 // This is the index of the first element of the 128-bit chunk 84 // we want. 85 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 86 * ElemsPerChunk); 87 88 // If the input is a buildvector just emit a smaller one. 89 if (Vec.getOpcode() == ISD::BUILD_VECTOR) 90 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT, 91 Vec->op_begin()+NormalizedIdxVal, ElemsPerChunk); 92 93 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 94 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 95 VecIdx); 96 97 return Result; 98} 99 100/// Generate a DAG to put 128-bits into a vector > 128 bits. This 101/// sets things up to match to an AVX VINSERTF128 instruction or a 102/// simple superregister reference. Idx is an index in the 128 bits 103/// we want. It need not be aligned to a 128-bit bounday. That makes 104/// lowering INSERT_VECTOR_ELT operations easier. 105static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 106 unsigned IdxVal, SelectionDAG &DAG, 107 DebugLoc dl) { 108 // Inserting UNDEF is Result 109 if (Vec.getOpcode() == ISD::UNDEF) 110 return Result; 111 112 EVT VT = Vec.getValueType(); 113 assert(VT.is128BitVector() && "Unexpected vector size!"); 114 115 EVT ElVT = VT.getVectorElementType(); 116 EVT ResultVT = Result.getValueType(); 117 118 // Insert the relevant 128 bits. 119 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 120 121 // This is the index of the first element of the 128-bit chunk 122 // we want. 123 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 124 * ElemsPerChunk); 125 126 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 127 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 128 VecIdx); 129} 130 131/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 132/// instructions. This is used because creating CONCAT_VECTOR nodes of 133/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 134/// large BUILD_VECTORS. 135static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 136 unsigned NumElems, SelectionDAG &DAG, 137 DebugLoc dl) { 138 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 139 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 140} 141 142static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 143 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 144 bool is64Bit = Subtarget->is64Bit(); 145 146 if (Subtarget->isTargetEnvMacho()) { 147 if (is64Bit) 148 return new X86_64MachoTargetObjectFile(); 149 return new TargetLoweringObjectFileMachO(); 150 } 151 152 if (Subtarget->isTargetLinux()) 153 return new X86LinuxTargetObjectFile(); 154 if (Subtarget->isTargetELF()) 155 return new TargetLoweringObjectFileELF(); 156 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 157 return new TargetLoweringObjectFileCOFF(); 158 llvm_unreachable("unknown subtarget type"); 159} 160 161X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 162 : TargetLowering(TM, createTLOF(TM)) { 163 Subtarget = &TM.getSubtarget<X86Subtarget>(); 164 X86ScalarSSEf64 = Subtarget->hasSSE2(); 165 X86ScalarSSEf32 = Subtarget->hasSSE1(); 166 RegInfo = TM.getRegisterInfo(); 167 TD = getDataLayout(); 168 169 resetOperationActions(); 170} 171 172void X86TargetLowering::resetOperationActions() { 173 const TargetMachine &TM = getTargetMachine(); 174 static bool FirstTimeThrough = true; 175 176 // If none of the target options have changed, then we don't need to reset the 177 // operation actions. 178 if (!FirstTimeThrough && TO == TM.Options) return; 179 180 if (!FirstTimeThrough) { 181 // Reinitialize the actions. 182 initActions(); 183 FirstTimeThrough = false; 184 } 185 186 TO = TM.Options; 187 188 // Set up the TargetLowering object. 189 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 190 191 // X86 is weird, it always uses i8 for shift amounts and setcc results. 192 setBooleanContents(ZeroOrOneBooleanContent); 193 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 194 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 195 196 // For 64-bit since we have so many registers use the ILP scheduler, for 197 // 32-bit code use the register pressure specific scheduling. 198 // For Atom, always use ILP scheduling. 199 if (Subtarget->isAtom()) 200 setSchedulingPreference(Sched::ILP); 201 else if (Subtarget->is64Bit()) 202 setSchedulingPreference(Sched::ILP); 203 else 204 setSchedulingPreference(Sched::RegPressure); 205 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); 206 207 // Bypass expensive divides on Atom when compiling with O2 208 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) { 209 addBypassSlowDiv(32, 8); 210 if (Subtarget->is64Bit()) 211 addBypassSlowDiv(64, 16); 212 } 213 214 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 215 // Setup Windows compiler runtime calls. 216 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 217 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 218 setLibcallName(RTLIB::SREM_I64, "_allrem"); 219 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 220 setLibcallName(RTLIB::MUL_I64, "_allmul"); 221 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 222 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 223 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 224 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 225 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 226 227 // The _ftol2 runtime function has an unusual calling conv, which 228 // is modeled by a special pseudo-instruction. 229 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 230 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 231 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 232 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 233 } 234 235 if (Subtarget->isTargetDarwin()) { 236 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 237 setUseUnderscoreSetJmp(false); 238 setUseUnderscoreLongJmp(false); 239 } else if (Subtarget->isTargetMingw()) { 240 // MS runtime is weird: it exports _setjmp, but longjmp! 241 setUseUnderscoreSetJmp(true); 242 setUseUnderscoreLongJmp(false); 243 } else { 244 setUseUnderscoreSetJmp(true); 245 setUseUnderscoreLongJmp(true); 246 } 247 248 // Set up the register classes. 249 addRegisterClass(MVT::i8, &X86::GR8RegClass); 250 addRegisterClass(MVT::i16, &X86::GR16RegClass); 251 addRegisterClass(MVT::i32, &X86::GR32RegClass); 252 if (Subtarget->is64Bit()) 253 addRegisterClass(MVT::i64, &X86::GR64RegClass); 254 255 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 256 257 // We don't accept any truncstore of integer registers. 258 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 259 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 260 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 261 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 262 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 263 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 264 265 // SETOEQ and SETUNE require checking two conditions. 266 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 267 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 268 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 269 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 270 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 271 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 272 273 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 274 // operation. 275 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 276 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 277 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 278 279 if (Subtarget->is64Bit()) { 280 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 281 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 282 } else if (!TM.Options.UseSoftFloat) { 283 // We have an algorithm for SSE2->double, and we turn this into a 284 // 64-bit FILD followed by conditional FADD for other targets. 285 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 286 // We have an algorithm for SSE2, and we turn this into a 64-bit 287 // FILD for other targets. 288 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 289 } 290 291 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 292 // this operation. 293 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 294 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 295 296 if (!TM.Options.UseSoftFloat) { 297 // SSE has no i16 to fp conversion, only i32 298 if (X86ScalarSSEf32) { 299 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 300 // f32 and f64 cases are Legal, f80 case is not 301 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 302 } else { 303 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 304 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 305 } 306 } else { 307 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 308 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 309 } 310 311 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 312 // are Legal, f80 is custom lowered. 313 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 314 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 315 316 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 317 // this operation. 318 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 319 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 320 321 if (X86ScalarSSEf32) { 322 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 323 // f32 and f64 cases are Legal, f80 case is not 324 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 325 } else { 326 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 327 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 328 } 329 330 // Handle FP_TO_UINT by promoting the destination to a larger signed 331 // conversion. 332 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 333 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 334 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 335 336 if (Subtarget->is64Bit()) { 337 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 338 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 339 } else if (!TM.Options.UseSoftFloat) { 340 // Since AVX is a superset of SSE3, only check for SSE here. 341 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 342 // Expand FP_TO_UINT into a select. 343 // FIXME: We would like to use a Custom expander here eventually to do 344 // the optimal thing for SSE vs. the default expansion in the legalizer. 345 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 346 else 347 // With SSE3 we can use fisttpll to convert to a signed i64; without 348 // SSE, we're stuck with a fistpll. 349 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 350 } 351 352 if (isTargetFTOL()) { 353 // Use the _ftol2 runtime function, which has a pseudo-instruction 354 // to handle its weird calling convention. 355 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 356 } 357 358 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 359 if (!X86ScalarSSEf64) { 360 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 361 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 362 if (Subtarget->is64Bit()) { 363 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 364 // Without SSE, i64->f64 goes through memory. 365 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 366 } 367 } 368 369 // Scalar integer divide and remainder are lowered to use operations that 370 // produce two results, to match the available instructions. This exposes 371 // the two-result form to trivial CSE, which is able to combine x/y and x%y 372 // into a single instruction. 373 // 374 // Scalar integer multiply-high is also lowered to use two-result 375 // operations, to match the available instructions. However, plain multiply 376 // (low) operations are left as Legal, as there are single-result 377 // instructions for this in x86. Using the two-result multiply instructions 378 // when both high and low results are needed must be arranged by dagcombine. 379 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 380 MVT VT = IntVTs[i]; 381 setOperationAction(ISD::MULHS, VT, Expand); 382 setOperationAction(ISD::MULHU, VT, Expand); 383 setOperationAction(ISD::SDIV, VT, Expand); 384 setOperationAction(ISD::UDIV, VT, Expand); 385 setOperationAction(ISD::SREM, VT, Expand); 386 setOperationAction(ISD::UREM, VT, Expand); 387 388 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 389 setOperationAction(ISD::ADDC, VT, Custom); 390 setOperationAction(ISD::ADDE, VT, Custom); 391 setOperationAction(ISD::SUBC, VT, Custom); 392 setOperationAction(ISD::SUBE, VT, Custom); 393 } 394 395 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 396 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 397 setOperationAction(ISD::BR_CC , MVT::f32, Expand); 398 setOperationAction(ISD::BR_CC , MVT::f64, Expand); 399 setOperationAction(ISD::BR_CC , MVT::f80, Expand); 400 setOperationAction(ISD::BR_CC , MVT::i8, Expand); 401 setOperationAction(ISD::BR_CC , MVT::i16, Expand); 402 setOperationAction(ISD::BR_CC , MVT::i32, Expand); 403 setOperationAction(ISD::BR_CC , MVT::i64, Expand); 404 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 405 if (Subtarget->is64Bit()) 406 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 407 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 408 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 409 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 410 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 411 setOperationAction(ISD::FREM , MVT::f32 , Expand); 412 setOperationAction(ISD::FREM , MVT::f64 , Expand); 413 setOperationAction(ISD::FREM , MVT::f80 , Expand); 414 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 415 416 // Promote the i8 variants and force them on up to i32 which has a shorter 417 // encoding. 418 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 419 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 420 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 421 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 422 if (Subtarget->hasBMI()) { 423 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 424 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 425 if (Subtarget->is64Bit()) 426 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 427 } else { 428 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 429 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 430 if (Subtarget->is64Bit()) 431 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 432 } 433 434 if (Subtarget->hasLZCNT()) { 435 // When promoting the i8 variants, force them to i32 for a shorter 436 // encoding. 437 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 438 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 439 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 440 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 441 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 442 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 443 if (Subtarget->is64Bit()) 444 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 445 } else { 446 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 447 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 448 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 449 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 450 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 451 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 452 if (Subtarget->is64Bit()) { 453 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 454 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 455 } 456 } 457 458 if (Subtarget->hasPOPCNT()) { 459 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 460 } else { 461 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 462 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 463 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 464 if (Subtarget->is64Bit()) 465 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 466 } 467 468 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 469 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 470 471 // These should be promoted to a larger select which is supported. 472 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 473 // X86 wants to expand cmov itself. 474 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 475 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 476 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 477 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 478 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 479 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 480 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 481 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 482 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 483 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 484 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 485 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 486 if (Subtarget->is64Bit()) { 487 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 488 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 489 } 490 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 491 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 492 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 493 // support continuation, user-level threading, and etc.. As a result, no 494 // other SjLj exception interfaces are implemented and please don't build 495 // your own exception handling based on them. 496 // LLVM/Clang supports zero-cost DWARF exception handling. 497 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 498 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 499 500 // Darwin ABI issue. 501 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 502 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 503 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 504 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 505 if (Subtarget->is64Bit()) 506 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 507 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 508 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 509 if (Subtarget->is64Bit()) { 510 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 511 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 512 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 513 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 514 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 515 } 516 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 517 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 518 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 519 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 520 if (Subtarget->is64Bit()) { 521 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 522 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 523 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 524 } 525 526 if (Subtarget->hasSSE1()) 527 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 528 529 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 530 531 // Expand certain atomics 532 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 533 MVT VT = IntVTs[i]; 534 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 535 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 536 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 537 } 538 539 if (!Subtarget->is64Bit()) { 540 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 541 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 542 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 543 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 544 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 545 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 546 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 547 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 548 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 549 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 550 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 551 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 552 } 553 554 if (Subtarget->hasCmpxchg16b()) { 555 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 556 } 557 558 // FIXME - use subtarget debug flags 559 if (!Subtarget->isTargetDarwin() && 560 !Subtarget->isTargetELF() && 561 !Subtarget->isTargetCygMing()) { 562 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 563 } 564 565 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 566 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 567 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 568 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 569 if (Subtarget->is64Bit()) { 570 setExceptionPointerRegister(X86::RAX); 571 setExceptionSelectorRegister(X86::RDX); 572 } else { 573 setExceptionPointerRegister(X86::EAX); 574 setExceptionSelectorRegister(X86::EDX); 575 } 576 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 577 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 578 579 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 580 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 581 582 setOperationAction(ISD::TRAP, MVT::Other, Legal); 583 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 584 585 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 586 setOperationAction(ISD::VASTART , MVT::Other, Custom); 587 setOperationAction(ISD::VAEND , MVT::Other, Expand); 588 if (Subtarget->is64Bit()) { 589 setOperationAction(ISD::VAARG , MVT::Other, Custom); 590 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 591 } else { 592 setOperationAction(ISD::VAARG , MVT::Other, Expand); 593 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 594 } 595 596 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 597 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 598 599 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 600 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 601 MVT::i64 : MVT::i32, Custom); 602 else if (TM.Options.EnableSegmentedStacks) 603 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 604 MVT::i64 : MVT::i32, Custom); 605 else 606 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 607 MVT::i64 : MVT::i32, Expand); 608 609 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 610 // f32 and f64 use SSE. 611 // Set up the FP register classes. 612 addRegisterClass(MVT::f32, &X86::FR32RegClass); 613 addRegisterClass(MVT::f64, &X86::FR64RegClass); 614 615 // Use ANDPD to simulate FABS. 616 setOperationAction(ISD::FABS , MVT::f64, Custom); 617 setOperationAction(ISD::FABS , MVT::f32, Custom); 618 619 // Use XORP to simulate FNEG. 620 setOperationAction(ISD::FNEG , MVT::f64, Custom); 621 setOperationAction(ISD::FNEG , MVT::f32, Custom); 622 623 // Use ANDPD and ORPD to simulate FCOPYSIGN. 624 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 625 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 626 627 // Lower this to FGETSIGNx86 plus an AND. 628 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 629 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 630 631 // We don't support sin/cos/fmod 632 setOperationAction(ISD::FSIN , MVT::f64, Expand); 633 setOperationAction(ISD::FCOS , MVT::f64, Expand); 634 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 635 setOperationAction(ISD::FSIN , MVT::f32, Expand); 636 setOperationAction(ISD::FCOS , MVT::f32, Expand); 637 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 638 639 // Expand FP immediates into loads from the stack, except for the special 640 // cases we handle. 641 addLegalFPImmediate(APFloat(+0.0)); // xorpd 642 addLegalFPImmediate(APFloat(+0.0f)); // xorps 643 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 644 // Use SSE for f32, x87 for f64. 645 // Set up the FP register classes. 646 addRegisterClass(MVT::f32, &X86::FR32RegClass); 647 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 648 649 // Use ANDPS to simulate FABS. 650 setOperationAction(ISD::FABS , MVT::f32, Custom); 651 652 // Use XORP to simulate FNEG. 653 setOperationAction(ISD::FNEG , MVT::f32, Custom); 654 655 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 656 657 // Use ANDPS and ORPS to simulate FCOPYSIGN. 658 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 659 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 660 661 // We don't support sin/cos/fmod 662 setOperationAction(ISD::FSIN , MVT::f32, Expand); 663 setOperationAction(ISD::FCOS , MVT::f32, Expand); 664 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 665 666 // Special cases we handle for FP constants. 667 addLegalFPImmediate(APFloat(+0.0f)); // xorps 668 addLegalFPImmediate(APFloat(+0.0)); // FLD0 669 addLegalFPImmediate(APFloat(+1.0)); // FLD1 670 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 671 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 672 673 if (!TM.Options.UnsafeFPMath) { 674 setOperationAction(ISD::FSIN , MVT::f64, Expand); 675 setOperationAction(ISD::FCOS , MVT::f64, Expand); 676 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 677 } 678 } else if (!TM.Options.UseSoftFloat) { 679 // f32 and f64 in x87. 680 // Set up the FP register classes. 681 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 682 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 683 684 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 685 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 686 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 687 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 688 689 if (!TM.Options.UnsafeFPMath) { 690 setOperationAction(ISD::FSIN , MVT::f64, Expand); 691 setOperationAction(ISD::FSIN , MVT::f32, Expand); 692 setOperationAction(ISD::FCOS , MVT::f64, Expand); 693 setOperationAction(ISD::FCOS , MVT::f32, Expand); 694 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 695 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 696 } 697 addLegalFPImmediate(APFloat(+0.0)); // FLD0 698 addLegalFPImmediate(APFloat(+1.0)); // FLD1 699 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 700 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 701 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 702 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 703 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 704 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 705 } 706 707 // We don't support FMA. 708 setOperationAction(ISD::FMA, MVT::f64, Expand); 709 setOperationAction(ISD::FMA, MVT::f32, Expand); 710 711 // Long double always uses X87. 712 if (!TM.Options.UseSoftFloat) { 713 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 714 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 715 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 716 { 717 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 718 addLegalFPImmediate(TmpFlt); // FLD0 719 TmpFlt.changeSign(); 720 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 721 722 bool ignored; 723 APFloat TmpFlt2(+1.0); 724 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 725 &ignored); 726 addLegalFPImmediate(TmpFlt2); // FLD1 727 TmpFlt2.changeSign(); 728 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 729 } 730 731 if (!TM.Options.UnsafeFPMath) { 732 setOperationAction(ISD::FSIN , MVT::f80, Expand); 733 setOperationAction(ISD::FCOS , MVT::f80, Expand); 734 setOperationAction(ISD::FSINCOS, MVT::f80, Expand); 735 } 736 737 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 738 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 739 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 740 setOperationAction(ISD::FRINT, MVT::f80, Expand); 741 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 742 setOperationAction(ISD::FMA, MVT::f80, Expand); 743 } 744 745 // Always use a library call for pow. 746 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 747 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 748 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 749 750 setOperationAction(ISD::FLOG, MVT::f80, Expand); 751 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 752 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 753 setOperationAction(ISD::FEXP, MVT::f80, Expand); 754 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 755 756 // First set operation action for all vector types to either promote 757 // (for widening) or expand (for scalarization). Then we will selectively 758 // turn on ones that can be effectively codegen'd. 759 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 760 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 761 MVT VT = (MVT::SimpleValueType)i; 762 setOperationAction(ISD::ADD , VT, Expand); 763 setOperationAction(ISD::SUB , VT, Expand); 764 setOperationAction(ISD::FADD, VT, Expand); 765 setOperationAction(ISD::FNEG, VT, Expand); 766 setOperationAction(ISD::FSUB, VT, Expand); 767 setOperationAction(ISD::MUL , VT, Expand); 768 setOperationAction(ISD::FMUL, VT, Expand); 769 setOperationAction(ISD::SDIV, VT, Expand); 770 setOperationAction(ISD::UDIV, VT, Expand); 771 setOperationAction(ISD::FDIV, VT, Expand); 772 setOperationAction(ISD::SREM, VT, Expand); 773 setOperationAction(ISD::UREM, VT, Expand); 774 setOperationAction(ISD::LOAD, VT, Expand); 775 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 776 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand); 777 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 778 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand); 779 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand); 780 setOperationAction(ISD::FABS, VT, Expand); 781 setOperationAction(ISD::FSIN, VT, Expand); 782 setOperationAction(ISD::FSINCOS, VT, Expand); 783 setOperationAction(ISD::FCOS, VT, Expand); 784 setOperationAction(ISD::FSINCOS, VT, Expand); 785 setOperationAction(ISD::FREM, VT, Expand); 786 setOperationAction(ISD::FMA, VT, Expand); 787 setOperationAction(ISD::FPOWI, VT, Expand); 788 setOperationAction(ISD::FSQRT, VT, Expand); 789 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 790 setOperationAction(ISD::FFLOOR, VT, Expand); 791 setOperationAction(ISD::FCEIL, VT, Expand); 792 setOperationAction(ISD::FTRUNC, VT, Expand); 793 setOperationAction(ISD::FRINT, VT, Expand); 794 setOperationAction(ISD::FNEARBYINT, VT, Expand); 795 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 796 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 797 setOperationAction(ISD::SDIVREM, VT, Expand); 798 setOperationAction(ISD::UDIVREM, VT, Expand); 799 setOperationAction(ISD::FPOW, VT, Expand); 800 setOperationAction(ISD::CTPOP, VT, Expand); 801 setOperationAction(ISD::CTTZ, VT, Expand); 802 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 803 setOperationAction(ISD::CTLZ, VT, Expand); 804 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 805 setOperationAction(ISD::SHL, VT, Expand); 806 setOperationAction(ISD::SRA, VT, Expand); 807 setOperationAction(ISD::SRL, VT, Expand); 808 setOperationAction(ISD::ROTL, VT, Expand); 809 setOperationAction(ISD::ROTR, VT, Expand); 810 setOperationAction(ISD::BSWAP, VT, Expand); 811 setOperationAction(ISD::SETCC, VT, Expand); 812 setOperationAction(ISD::FLOG, VT, Expand); 813 setOperationAction(ISD::FLOG2, VT, Expand); 814 setOperationAction(ISD::FLOG10, VT, Expand); 815 setOperationAction(ISD::FEXP, VT, Expand); 816 setOperationAction(ISD::FEXP2, VT, Expand); 817 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 818 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 819 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 820 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 821 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand); 822 setOperationAction(ISD::TRUNCATE, VT, Expand); 823 setOperationAction(ISD::SIGN_EXTEND, VT, Expand); 824 setOperationAction(ISD::ZERO_EXTEND, VT, Expand); 825 setOperationAction(ISD::ANY_EXTEND, VT, Expand); 826 setOperationAction(ISD::VSELECT, VT, Expand); 827 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 828 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 829 setTruncStoreAction(VT, 830 (MVT::SimpleValueType)InnerVT, Expand); 831 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 832 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 833 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 834 } 835 836 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 837 // with -msoft-float, disable use of MMX as well. 838 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 839 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 840 // No operations on x86mmx supported, everything uses intrinsics. 841 } 842 843 // MMX-sized vectors (other than x86mmx) are expected to be expanded 844 // into smaller operations. 845 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 846 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 847 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 848 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 849 setOperationAction(ISD::AND, MVT::v8i8, Expand); 850 setOperationAction(ISD::AND, MVT::v4i16, Expand); 851 setOperationAction(ISD::AND, MVT::v2i32, Expand); 852 setOperationAction(ISD::AND, MVT::v1i64, Expand); 853 setOperationAction(ISD::OR, MVT::v8i8, Expand); 854 setOperationAction(ISD::OR, MVT::v4i16, Expand); 855 setOperationAction(ISD::OR, MVT::v2i32, Expand); 856 setOperationAction(ISD::OR, MVT::v1i64, Expand); 857 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 858 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 859 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 860 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 861 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 862 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 863 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 864 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 865 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 866 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 867 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 868 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 869 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 870 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 871 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 872 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 873 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 874 875 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 876 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 877 878 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 879 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 880 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 881 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 882 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 883 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 884 setOperationAction(ISD::FABS, MVT::v4f32, Custom); 885 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 886 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 887 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 888 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 889 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 890 } 891 892 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 893 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 894 895 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 896 // registers cannot be used even for integer operations. 897 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 898 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 899 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 900 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 901 902 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 903 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 904 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 905 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 906 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 907 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 908 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 909 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 910 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 911 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 912 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 913 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 914 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 915 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 916 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 917 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 918 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 919 setOperationAction(ISD::FABS, MVT::v2f64, Custom); 920 921 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 922 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 923 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 924 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 925 926 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 927 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 928 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 929 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 930 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 931 932 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 933 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 934 MVT VT = (MVT::SimpleValueType)i; 935 // Do not attempt to custom lower non-power-of-2 vectors 936 if (!isPowerOf2_32(VT.getVectorNumElements())) 937 continue; 938 // Do not attempt to custom lower non-128-bit vectors 939 if (!VT.is128BitVector()) 940 continue; 941 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 942 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 943 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 944 } 945 946 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 947 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 948 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 949 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 950 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 951 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 952 953 if (Subtarget->is64Bit()) { 954 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 955 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 956 } 957 958 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 959 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 960 MVT VT = (MVT::SimpleValueType)i; 961 962 // Do not attempt to promote non-128-bit vectors 963 if (!VT.is128BitVector()) 964 continue; 965 966 setOperationAction(ISD::AND, VT, Promote); 967 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 968 setOperationAction(ISD::OR, VT, Promote); 969 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 970 setOperationAction(ISD::XOR, VT, Promote); 971 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 972 setOperationAction(ISD::LOAD, VT, Promote); 973 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 974 setOperationAction(ISD::SELECT, VT, Promote); 975 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 976 } 977 978 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 979 980 // Custom lower v2i64 and v2f64 selects. 981 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 982 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 983 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 984 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 985 986 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 987 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 988 989 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 990 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 991 // As there is no 64-bit GPR available, we need build a special custom 992 // sequence to convert from v2i32 to v2f32. 993 if (!Subtarget->is64Bit()) 994 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); 995 996 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 997 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); 998 999 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); 1000 } 1001 1002 if (Subtarget->hasSSE41()) { 1003 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 1004 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 1005 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 1006 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1007 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1008 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1009 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1010 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1011 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1012 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1013 1014 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1015 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1016 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1017 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 1018 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 1019 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 1020 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 1021 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 1022 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 1023 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 1024 1025 // FIXME: Do we need to handle scalar-to-vector here? 1026 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 1027 1028 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 1029 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 1030 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 1031 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 1032 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 1033 1034 // i8 and i16 vectors are custom , because the source register and source 1035 // source memory operand types are not the same width. f32 vectors are 1036 // custom since the immediate controlling the insert encodes additional 1037 // information. 1038 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 1039 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 1040 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 1041 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 1042 1043 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 1044 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 1045 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 1046 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 1047 1048 // FIXME: these should be Legal but thats only for the case where 1049 // the index is constant. For now custom expand to deal with that. 1050 if (Subtarget->is64Bit()) { 1051 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 1052 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 1053 } 1054 } 1055 1056 if (Subtarget->hasSSE2()) { 1057 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 1058 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 1059 1060 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 1061 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 1062 1063 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 1064 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 1065 1066 // In the customized shift lowering, the legal cases in AVX2 will be 1067 // recognized. 1068 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1069 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1070 1071 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1072 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1073 1074 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1075 1076 setOperationAction(ISD::SDIV, MVT::v8i16, Custom); 1077 setOperationAction(ISD::SDIV, MVT::v4i32, Custom); 1078 } 1079 1080 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) { 1081 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1082 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1083 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1084 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1085 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1086 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1087 1088 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1089 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1090 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1091 1092 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1093 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1094 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1095 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1096 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1097 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); 1098 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal); 1099 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal); 1100 setOperationAction(ISD::FRINT, MVT::v8f32, Legal); 1101 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal); 1102 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1103 setOperationAction(ISD::FABS, MVT::v8f32, Custom); 1104 1105 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1106 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1107 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1108 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1109 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1110 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1111 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1112 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1113 setOperationAction(ISD::FRINT, MVT::v4f64, Legal); 1114 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal); 1115 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1116 setOperationAction(ISD::FABS, MVT::v4f64, Custom); 1117 1118 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); 1119 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom); 1120 1121 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 1122 1123 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1124 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote); 1125 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1126 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1127 1128 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1129 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); 1130 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 1131 1132 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); 1133 1134 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1135 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1136 1137 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1138 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1139 1140 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1141 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1142 1143 setOperationAction(ISD::SDIV, MVT::v16i16, Custom); 1144 1145 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1146 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1147 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1148 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1149 1150 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1151 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1152 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1153 1154 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1155 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1156 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1157 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1158 1159 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); 1160 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); 1161 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom); 1162 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1163 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom); 1164 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom); 1165 1166 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { 1167 setOperationAction(ISD::FMA, MVT::v8f32, Legal); 1168 setOperationAction(ISD::FMA, MVT::v4f64, Legal); 1169 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 1170 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 1171 setOperationAction(ISD::FMA, MVT::f32, Legal); 1172 setOperationAction(ISD::FMA, MVT::f64, Legal); 1173 } 1174 1175 if (Subtarget->hasInt256()) { 1176 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1177 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1178 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1179 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1180 1181 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1182 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1183 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1184 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1185 1186 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1187 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1188 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1189 // Don't lower v32i8 because there is no 128-bit byte mul 1190 1191 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1192 1193 setOperationAction(ISD::SDIV, MVT::v8i32, Custom); 1194 } else { 1195 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1196 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1197 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1198 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1199 1200 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1201 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1202 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1203 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1204 1205 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1206 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1207 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1208 // Don't lower v32i8 because there is no 128-bit byte mul 1209 } 1210 1211 // In the customized shift lowering, the legal cases in AVX2 will be 1212 // recognized. 1213 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1214 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1215 1216 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1217 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1218 1219 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1220 1221 // Custom lower several nodes for 256-bit types. 1222 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1223 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1224 MVT VT = (MVT::SimpleValueType)i; 1225 1226 // Extract subvector is special because the value type 1227 // (result) is 128-bit but the source is 256-bit wide. 1228 if (VT.is128BitVector()) 1229 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1230 1231 // Do not attempt to custom lower other non-256-bit vectors 1232 if (!VT.is256BitVector()) 1233 continue; 1234 1235 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1236 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1237 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1238 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1239 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1240 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1241 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1242 } 1243 1244 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1245 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1246 MVT VT = (MVT::SimpleValueType)i; 1247 1248 // Do not attempt to promote non-256-bit vectors 1249 if (!VT.is256BitVector()) 1250 continue; 1251 1252 setOperationAction(ISD::AND, VT, Promote); 1253 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1254 setOperationAction(ISD::OR, VT, Promote); 1255 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1256 setOperationAction(ISD::XOR, VT, Promote); 1257 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1258 setOperationAction(ISD::LOAD, VT, Promote); 1259 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1260 setOperationAction(ISD::SELECT, VT, Promote); 1261 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1262 } 1263 } 1264 1265 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1266 // of this type with custom code. 1267 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1268 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1269 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1270 Custom); 1271 } 1272 1273 // We want to custom lower some of our intrinsics. 1274 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1275 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1276 1277 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1278 // handle type legalization for these operations here. 1279 // 1280 // FIXME: We really should do custom legalization for addition and 1281 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1282 // than generic legalization for 64-bit multiplication-with-overflow, though. 1283 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1284 // Add/Sub/Mul with overflow operations are custom lowered. 1285 MVT VT = IntVTs[i]; 1286 setOperationAction(ISD::SADDO, VT, Custom); 1287 setOperationAction(ISD::UADDO, VT, Custom); 1288 setOperationAction(ISD::SSUBO, VT, Custom); 1289 setOperationAction(ISD::USUBO, VT, Custom); 1290 setOperationAction(ISD::SMULO, VT, Custom); 1291 setOperationAction(ISD::UMULO, VT, Custom); 1292 } 1293 1294 // There are no 8-bit 3-address imul/mul instructions 1295 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1296 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1297 1298 if (!Subtarget->is64Bit()) { 1299 // These libcalls are not available in 32-bit. 1300 setLibcallName(RTLIB::SHL_I128, 0); 1301 setLibcallName(RTLIB::SRL_I128, 0); 1302 setLibcallName(RTLIB::SRA_I128, 0); 1303 } 1304 1305 // Combine sin / cos into one node or libcall if possible. 1306 if (Subtarget->hasSinCos()) { 1307 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 1308 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 1309 if (Subtarget->isTargetDarwin()) { 1310 // For MacOSX, we don't want to the normal expansion of a libcall to 1311 // sincos. We want to issue a libcall to __sincos_stret to avoid memory 1312 // traffic. 1313 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 1314 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 1315 } 1316 } 1317 1318 // We have target-specific dag combine patterns for the following nodes: 1319 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1320 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1321 setTargetDAGCombine(ISD::VSELECT); 1322 setTargetDAGCombine(ISD::SELECT); 1323 setTargetDAGCombine(ISD::SHL); 1324 setTargetDAGCombine(ISD::SRA); 1325 setTargetDAGCombine(ISD::SRL); 1326 setTargetDAGCombine(ISD::OR); 1327 setTargetDAGCombine(ISD::AND); 1328 setTargetDAGCombine(ISD::ADD); 1329 setTargetDAGCombine(ISD::FADD); 1330 setTargetDAGCombine(ISD::FSUB); 1331 setTargetDAGCombine(ISD::FMA); 1332 setTargetDAGCombine(ISD::SUB); 1333 setTargetDAGCombine(ISD::LOAD); 1334 setTargetDAGCombine(ISD::STORE); 1335 setTargetDAGCombine(ISD::ZERO_EXTEND); 1336 setTargetDAGCombine(ISD::ANY_EXTEND); 1337 setTargetDAGCombine(ISD::SIGN_EXTEND); 1338 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 1339 setTargetDAGCombine(ISD::TRUNCATE); 1340 setTargetDAGCombine(ISD::SINT_TO_FP); 1341 setTargetDAGCombine(ISD::SETCC); 1342 if (Subtarget->is64Bit()) 1343 setTargetDAGCombine(ISD::MUL); 1344 setTargetDAGCombine(ISD::XOR); 1345 1346 computeRegisterProperties(); 1347 1348 // On Darwin, -Os means optimize for size without hurting performance, 1349 // do not reduce the limit. 1350 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1351 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1352 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1353 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1354 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1355 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1356 setPrefLoopAlignment(4); // 2^4 bytes. 1357 1358 // Predictable cmov don't hurt on atom because it's in-order. 1359 PredictableSelectIsExpensive = !Subtarget->isAtom(); 1360 1361 setPrefFunctionAlignment(4); // 2^4 bytes. 1362} 1363 1364EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 1365 if (!VT.isVector()) return MVT::i8; 1366 return VT.changeVectorElementTypeToInteger(); 1367} 1368 1369/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1370/// the desired ByVal argument alignment. 1371static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1372 if (MaxAlign == 16) 1373 return; 1374 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1375 if (VTy->getBitWidth() == 128) 1376 MaxAlign = 16; 1377 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1378 unsigned EltAlign = 0; 1379 getMaxByValAlign(ATy->getElementType(), EltAlign); 1380 if (EltAlign > MaxAlign) 1381 MaxAlign = EltAlign; 1382 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1383 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1384 unsigned EltAlign = 0; 1385 getMaxByValAlign(STy->getElementType(i), EltAlign); 1386 if (EltAlign > MaxAlign) 1387 MaxAlign = EltAlign; 1388 if (MaxAlign == 16) 1389 break; 1390 } 1391 } 1392} 1393 1394/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1395/// function arguments in the caller parameter area. For X86, aggregates 1396/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1397/// are at 4-byte boundaries. 1398unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1399 if (Subtarget->is64Bit()) { 1400 // Max of 8 and alignment of type. 1401 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1402 if (TyAlign > 8) 1403 return TyAlign; 1404 return 8; 1405 } 1406 1407 unsigned Align = 4; 1408 if (Subtarget->hasSSE1()) 1409 getMaxByValAlign(Ty, Align); 1410 return Align; 1411} 1412 1413/// getOptimalMemOpType - Returns the target specific optimal type for load 1414/// and store operations as a result of memset, memcpy, and memmove 1415/// lowering. If DstAlign is zero that means it's safe to destination 1416/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1417/// means there isn't a need to check it against alignment requirement, 1418/// probably because the source does not need to be loaded. If 'IsMemset' is 1419/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 1420/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 1421/// source is constant so it does not need to be loaded. 1422/// It returns EVT::Other if the type should be determined using generic 1423/// target-independent logic. 1424EVT 1425X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1426 unsigned DstAlign, unsigned SrcAlign, 1427 bool IsMemset, bool ZeroMemset, 1428 bool MemcpyStrSrc, 1429 MachineFunction &MF) const { 1430 const Function *F = MF.getFunction(); 1431 if ((!IsMemset || ZeroMemset) && 1432 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1433 Attribute::NoImplicitFloat)) { 1434 if (Size >= 16 && 1435 (Subtarget->isUnalignedMemAccessFast() || 1436 ((DstAlign == 0 || DstAlign >= 16) && 1437 (SrcAlign == 0 || SrcAlign >= 16)))) { 1438 if (Size >= 32) { 1439 if (Subtarget->hasInt256()) 1440 return MVT::v8i32; 1441 if (Subtarget->hasFp256()) 1442 return MVT::v8f32; 1443 } 1444 if (Subtarget->hasSSE2()) 1445 return MVT::v4i32; 1446 if (Subtarget->hasSSE1()) 1447 return MVT::v4f32; 1448 } else if (!MemcpyStrSrc && Size >= 8 && 1449 !Subtarget->is64Bit() && 1450 Subtarget->hasSSE2()) { 1451 // Do not use f64 to lower memcpy if source is string constant. It's 1452 // better to use i32 to avoid the loads. 1453 return MVT::f64; 1454 } 1455 } 1456 if (Subtarget->is64Bit() && Size >= 8) 1457 return MVT::i64; 1458 return MVT::i32; 1459} 1460 1461bool X86TargetLowering::isSafeMemOpType(MVT VT) const { 1462 if (VT == MVT::f32) 1463 return X86ScalarSSEf32; 1464 else if (VT == MVT::f64) 1465 return X86ScalarSSEf64; 1466 return true; 1467} 1468 1469bool 1470X86TargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const { 1471 if (Fast) 1472 *Fast = Subtarget->isUnalignedMemAccessFast(); 1473 return true; 1474} 1475 1476/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1477/// current function. The returned value is a member of the 1478/// MachineJumpTableInfo::JTEntryKind enum. 1479unsigned X86TargetLowering::getJumpTableEncoding() const { 1480 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1481 // symbol. 1482 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1483 Subtarget->isPICStyleGOT()) 1484 return MachineJumpTableInfo::EK_Custom32; 1485 1486 // Otherwise, use the normal jump table encoding heuristics. 1487 return TargetLowering::getJumpTableEncoding(); 1488} 1489 1490const MCExpr * 1491X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1492 const MachineBasicBlock *MBB, 1493 unsigned uid,MCContext &Ctx) const{ 1494 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1495 Subtarget->isPICStyleGOT()); 1496 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1497 // entries. 1498 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1499 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1500} 1501 1502/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1503/// jumptable. 1504SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1505 SelectionDAG &DAG) const { 1506 if (!Subtarget->is64Bit()) 1507 // This doesn't have DebugLoc associated with it, but is not really the 1508 // same as a Register. 1509 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1510 return Table; 1511} 1512 1513/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1514/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1515/// MCExpr. 1516const MCExpr *X86TargetLowering:: 1517getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1518 MCContext &Ctx) const { 1519 // X86-64 uses RIP relative addressing based on the jump table label. 1520 if (Subtarget->isPICStyleRIPRel()) 1521 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1522 1523 // Otherwise, the reference is relative to the PIC base. 1524 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1525} 1526 1527// FIXME: Why this routine is here? Move to RegInfo! 1528std::pair<const TargetRegisterClass*, uint8_t> 1529X86TargetLowering::findRepresentativeClass(MVT VT) const{ 1530 const TargetRegisterClass *RRC = 0; 1531 uint8_t Cost = 1; 1532 switch (VT.SimpleTy) { 1533 default: 1534 return TargetLowering::findRepresentativeClass(VT); 1535 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1536 RRC = Subtarget->is64Bit() ? 1537 (const TargetRegisterClass*)&X86::GR64RegClass : 1538 (const TargetRegisterClass*)&X86::GR32RegClass; 1539 break; 1540 case MVT::x86mmx: 1541 RRC = &X86::VR64RegClass; 1542 break; 1543 case MVT::f32: case MVT::f64: 1544 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1545 case MVT::v4f32: case MVT::v2f64: 1546 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1547 case MVT::v4f64: 1548 RRC = &X86::VR128RegClass; 1549 break; 1550 } 1551 return std::make_pair(RRC, Cost); 1552} 1553 1554bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1555 unsigned &Offset) const { 1556 if (!Subtarget->isTargetLinux()) 1557 return false; 1558 1559 if (Subtarget->is64Bit()) { 1560 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1561 Offset = 0x28; 1562 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1563 AddressSpace = 256; 1564 else 1565 AddressSpace = 257; 1566 } else { 1567 // %gs:0x14 on i386 1568 Offset = 0x14; 1569 AddressSpace = 256; 1570 } 1571 return true; 1572} 1573 1574//===----------------------------------------------------------------------===// 1575// Return Value Calling Convention Implementation 1576//===----------------------------------------------------------------------===// 1577 1578#include "X86GenCallingConv.inc" 1579 1580bool 1581X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1582 MachineFunction &MF, bool isVarArg, 1583 const SmallVectorImpl<ISD::OutputArg> &Outs, 1584 LLVMContext &Context) const { 1585 SmallVector<CCValAssign, 16> RVLocs; 1586 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1587 RVLocs, Context); 1588 return CCInfo.CheckReturn(Outs, RetCC_X86); 1589} 1590 1591SDValue 1592X86TargetLowering::LowerReturn(SDValue Chain, 1593 CallingConv::ID CallConv, bool isVarArg, 1594 const SmallVectorImpl<ISD::OutputArg> &Outs, 1595 const SmallVectorImpl<SDValue> &OutVals, 1596 DebugLoc dl, SelectionDAG &DAG) const { 1597 MachineFunction &MF = DAG.getMachineFunction(); 1598 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1599 1600 SmallVector<CCValAssign, 16> RVLocs; 1601 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1602 RVLocs, *DAG.getContext()); 1603 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1604 1605 SDValue Flag; 1606 SmallVector<SDValue, 6> RetOps; 1607 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1608 // Operand #1 = Bytes To Pop 1609 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1610 MVT::i16)); 1611 1612 // Copy the result values into the output registers. 1613 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1614 CCValAssign &VA = RVLocs[i]; 1615 assert(VA.isRegLoc() && "Can only return in registers!"); 1616 SDValue ValToCopy = OutVals[i]; 1617 EVT ValVT = ValToCopy.getValueType(); 1618 1619 // Promote values to the appropriate types 1620 if (VA.getLocInfo() == CCValAssign::SExt) 1621 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1622 else if (VA.getLocInfo() == CCValAssign::ZExt) 1623 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1624 else if (VA.getLocInfo() == CCValAssign::AExt) 1625 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1626 else if (VA.getLocInfo() == CCValAssign::BCvt) 1627 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1628 1629 // If this is x86-64, and we disabled SSE, we can't return FP values, 1630 // or SSE or MMX vectors. 1631 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1632 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1633 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1634 report_fatal_error("SSE register return with SSE disabled"); 1635 } 1636 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1637 // llvm-gcc has never done it right and no one has noticed, so this 1638 // should be OK for now. 1639 if (ValVT == MVT::f64 && 1640 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1641 report_fatal_error("SSE2 register return with SSE2 disabled"); 1642 1643 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1644 // the RET instruction and handled by the FP Stackifier. 1645 if (VA.getLocReg() == X86::ST0 || 1646 VA.getLocReg() == X86::ST1) { 1647 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1648 // change the value to the FP stack register class. 1649 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1650 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1651 RetOps.push_back(ValToCopy); 1652 // Don't emit a copytoreg. 1653 continue; 1654 } 1655 1656 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1657 // which is returned in RAX / RDX. 1658 if (Subtarget->is64Bit()) { 1659 if (ValVT == MVT::x86mmx) { 1660 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1661 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1662 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1663 ValToCopy); 1664 // If we don't have SSE2 available, convert to v4f32 so the generated 1665 // register is legal. 1666 if (!Subtarget->hasSSE2()) 1667 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1668 } 1669 } 1670 } 1671 1672 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1673 Flag = Chain.getValue(1); 1674 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1675 } 1676 1677 // The x86-64 ABIs require that for returning structs by value we copy 1678 // the sret argument into %rax/%eax (depending on ABI) for the return. 1679 // Win32 requires us to put the sret argument to %eax as well. 1680 // We saved the argument into a virtual register in the entry block, 1681 // so now we copy the value out and into %rax/%eax. 1682 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() && 1683 (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { 1684 MachineFunction &MF = DAG.getMachineFunction(); 1685 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1686 unsigned Reg = FuncInfo->getSRetReturnReg(); 1687 assert(Reg && 1688 "SRetReturnReg should have been set in LowerFormalArguments()."); 1689 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1690 1691 unsigned RetValReg 1692 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ? 1693 X86::RAX : X86::EAX; 1694 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag); 1695 Flag = Chain.getValue(1); 1696 1697 // RAX/EAX now acts like a return value. 1698 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy())); 1699 } 1700 1701 RetOps[0] = Chain; // Update chain. 1702 1703 // Add the flag if we have it. 1704 if (Flag.getNode()) 1705 RetOps.push_back(Flag); 1706 1707 return DAG.getNode(X86ISD::RET_FLAG, dl, 1708 MVT::Other, &RetOps[0], RetOps.size()); 1709} 1710 1711bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1712 if (N->getNumValues() != 1) 1713 return false; 1714 if (!N->hasNUsesOfValue(1, 0)) 1715 return false; 1716 1717 SDValue TCChain = Chain; 1718 SDNode *Copy = *N->use_begin(); 1719 if (Copy->getOpcode() == ISD::CopyToReg) { 1720 // If the copy has a glue operand, we conservatively assume it isn't safe to 1721 // perform a tail call. 1722 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1723 return false; 1724 TCChain = Copy->getOperand(0); 1725 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1726 return false; 1727 1728 bool HasRet = false; 1729 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1730 UI != UE; ++UI) { 1731 if (UI->getOpcode() != X86ISD::RET_FLAG) 1732 return false; 1733 HasRet = true; 1734 } 1735 1736 if (!HasRet) 1737 return false; 1738 1739 Chain = TCChain; 1740 return true; 1741} 1742 1743MVT 1744X86TargetLowering::getTypeForExtArgOrReturn(MVT VT, 1745 ISD::NodeType ExtendKind) const { 1746 MVT ReturnMVT; 1747 // TODO: Is this also valid on 32-bit? 1748 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1749 ReturnMVT = MVT::i8; 1750 else 1751 ReturnMVT = MVT::i32; 1752 1753 MVT MinVT = getRegisterType(ReturnMVT); 1754 return VT.bitsLT(MinVT) ? MinVT : VT; 1755} 1756 1757/// LowerCallResult - Lower the result values of a call into the 1758/// appropriate copies out of appropriate physical registers. 1759/// 1760SDValue 1761X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1762 CallingConv::ID CallConv, bool isVarArg, 1763 const SmallVectorImpl<ISD::InputArg> &Ins, 1764 DebugLoc dl, SelectionDAG &DAG, 1765 SmallVectorImpl<SDValue> &InVals) const { 1766 1767 // Assign locations to each value returned by this call. 1768 SmallVector<CCValAssign, 16> RVLocs; 1769 bool Is64Bit = Subtarget->is64Bit(); 1770 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1771 getTargetMachine(), RVLocs, *DAG.getContext()); 1772 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1773 1774 // Copy all of the result registers out of their specified physreg. 1775 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1776 CCValAssign &VA = RVLocs[i]; 1777 EVT CopyVT = VA.getValVT(); 1778 1779 // If this is x86-64, and we disabled SSE, we can't return FP values 1780 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1781 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1782 report_fatal_error("SSE register return with SSE disabled"); 1783 } 1784 1785 SDValue Val; 1786 1787 // If this is a call to a function that returns an fp value on the floating 1788 // point stack, we must guarantee the value is popped from the stack, so 1789 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1790 // if the return value is not used. We use the FpPOP_RETVAL instruction 1791 // instead. 1792 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1793 // If we prefer to use the value in xmm registers, copy it out as f80 and 1794 // use a truncate to move it from fp stack reg to xmm reg. 1795 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1796 SDValue Ops[] = { Chain, InFlag }; 1797 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1798 MVT::Other, MVT::Glue, Ops), 1); 1799 Val = Chain.getValue(0); 1800 1801 // Round the f80 to the right size, which also moves it to the appropriate 1802 // xmm register. 1803 if (CopyVT != VA.getValVT()) 1804 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1805 // This truncation won't change the value. 1806 DAG.getIntPtrConstant(1)); 1807 } else { 1808 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1809 CopyVT, InFlag).getValue(1); 1810 Val = Chain.getValue(0); 1811 } 1812 InFlag = Chain.getValue(2); 1813 InVals.push_back(Val); 1814 } 1815 1816 return Chain; 1817} 1818 1819//===----------------------------------------------------------------------===// 1820// C & StdCall & Fast Calling Convention implementation 1821//===----------------------------------------------------------------------===// 1822// StdCall calling convention seems to be standard for many Windows' API 1823// routines and around. It differs from C calling convention just a little: 1824// callee should clean up the stack, not caller. Symbols should be also 1825// decorated in some fancy way :) It doesn't support any vector arguments. 1826// For info on fast calling convention see Fast Calling Convention (tail call) 1827// implementation LowerX86_32FastCCCallTo. 1828 1829/// CallIsStructReturn - Determines whether a call uses struct return 1830/// semantics. 1831enum StructReturnType { 1832 NotStructReturn, 1833 RegStructReturn, 1834 StackStructReturn 1835}; 1836static StructReturnType 1837callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1838 if (Outs.empty()) 1839 return NotStructReturn; 1840 1841 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 1842 if (!Flags.isSRet()) 1843 return NotStructReturn; 1844 if (Flags.isInReg()) 1845 return RegStructReturn; 1846 return StackStructReturn; 1847} 1848 1849/// ArgsAreStructReturn - Determines whether a function uses struct 1850/// return semantics. 1851static StructReturnType 1852argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1853 if (Ins.empty()) 1854 return NotStructReturn; 1855 1856 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 1857 if (!Flags.isSRet()) 1858 return NotStructReturn; 1859 if (Flags.isInReg()) 1860 return RegStructReturn; 1861 return StackStructReturn; 1862} 1863 1864/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1865/// by "Src" to address "Dst" with size and alignment information specified by 1866/// the specific parameter attribute. The copy will be passed as a byval 1867/// function parameter. 1868static SDValue 1869CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1870 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1871 DebugLoc dl) { 1872 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1873 1874 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1875 /*isVolatile*/false, /*AlwaysInline=*/true, 1876 MachinePointerInfo(), MachinePointerInfo()); 1877} 1878 1879/// IsTailCallConvention - Return true if the calling convention is one that 1880/// supports tail call optimization. 1881static bool IsTailCallConvention(CallingConv::ID CC) { 1882 return (CC == CallingConv::Fast || CC == CallingConv::GHC || 1883 CC == CallingConv::HiPE); 1884} 1885 1886bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1887 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1888 return false; 1889 1890 CallSite CS(CI); 1891 CallingConv::ID CalleeCC = CS.getCallingConv(); 1892 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1893 return false; 1894 1895 return true; 1896} 1897 1898/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1899/// a tailcall target by changing its ABI. 1900static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1901 bool GuaranteedTailCallOpt) { 1902 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1903} 1904 1905SDValue 1906X86TargetLowering::LowerMemArgument(SDValue Chain, 1907 CallingConv::ID CallConv, 1908 const SmallVectorImpl<ISD::InputArg> &Ins, 1909 DebugLoc dl, SelectionDAG &DAG, 1910 const CCValAssign &VA, 1911 MachineFrameInfo *MFI, 1912 unsigned i) const { 1913 // Create the nodes corresponding to a load from this parameter slot. 1914 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1915 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1916 getTargetMachine().Options.GuaranteedTailCallOpt); 1917 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1918 EVT ValVT; 1919 1920 // If value is passed by pointer we have address passed instead of the value 1921 // itself. 1922 if (VA.getLocInfo() == CCValAssign::Indirect) 1923 ValVT = VA.getLocVT(); 1924 else 1925 ValVT = VA.getValVT(); 1926 1927 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1928 // changed with more analysis. 1929 // In case of tail call optimization mark all arguments mutable. Since they 1930 // could be overwritten by lowering of arguments in case of a tail call. 1931 if (Flags.isByVal()) { 1932 unsigned Bytes = Flags.getByValSize(); 1933 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1934 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1935 return DAG.getFrameIndex(FI, getPointerTy()); 1936 } else { 1937 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1938 VA.getLocMemOffset(), isImmutable); 1939 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1940 return DAG.getLoad(ValVT, dl, Chain, FIN, 1941 MachinePointerInfo::getFixedStack(FI), 1942 false, false, false, 0); 1943 } 1944} 1945 1946SDValue 1947X86TargetLowering::LowerFormalArguments(SDValue Chain, 1948 CallingConv::ID CallConv, 1949 bool isVarArg, 1950 const SmallVectorImpl<ISD::InputArg> &Ins, 1951 DebugLoc dl, 1952 SelectionDAG &DAG, 1953 SmallVectorImpl<SDValue> &InVals) 1954 const { 1955 MachineFunction &MF = DAG.getMachineFunction(); 1956 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1957 1958 const Function* Fn = MF.getFunction(); 1959 if (Fn->hasExternalLinkage() && 1960 Subtarget->isTargetCygMing() && 1961 Fn->getName() == "main") 1962 FuncInfo->setForceFramePointer(true); 1963 1964 MachineFrameInfo *MFI = MF.getFrameInfo(); 1965 bool Is64Bit = Subtarget->is64Bit(); 1966 bool IsWindows = Subtarget->isTargetWindows(); 1967 bool IsWin64 = Subtarget->isTargetWin64(); 1968 1969 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1970 "Var args not supported with calling convention fastcc, ghc or hipe"); 1971 1972 // Assign locations to all of the incoming arguments. 1973 SmallVector<CCValAssign, 16> ArgLocs; 1974 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1975 ArgLocs, *DAG.getContext()); 1976 1977 // Allocate shadow area for Win64 1978 if (IsWin64) { 1979 CCInfo.AllocateStack(32, 8); 1980 } 1981 1982 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1983 1984 unsigned LastVal = ~0U; 1985 SDValue ArgValue; 1986 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1987 CCValAssign &VA = ArgLocs[i]; 1988 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1989 // places. 1990 assert(VA.getValNo() != LastVal && 1991 "Don't support value assigned to multiple locs yet"); 1992 (void)LastVal; 1993 LastVal = VA.getValNo(); 1994 1995 if (VA.isRegLoc()) { 1996 EVT RegVT = VA.getLocVT(); 1997 const TargetRegisterClass *RC; 1998 if (RegVT == MVT::i32) 1999 RC = &X86::GR32RegClass; 2000 else if (Is64Bit && RegVT == MVT::i64) 2001 RC = &X86::GR64RegClass; 2002 else if (RegVT == MVT::f32) 2003 RC = &X86::FR32RegClass; 2004 else if (RegVT == MVT::f64) 2005 RC = &X86::FR64RegClass; 2006 else if (RegVT.is256BitVector()) 2007 RC = &X86::VR256RegClass; 2008 else if (RegVT.is128BitVector()) 2009 RC = &X86::VR128RegClass; 2010 else if (RegVT == MVT::x86mmx) 2011 RC = &X86::VR64RegClass; 2012 else 2013 llvm_unreachable("Unknown argument type!"); 2014 2015 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2016 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2017 2018 // If this is an 8 or 16-bit value, it is really passed promoted to 32 2019 // bits. Insert an assert[sz]ext to capture this, then truncate to the 2020 // right size. 2021 if (VA.getLocInfo() == CCValAssign::SExt) 2022 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2023 DAG.getValueType(VA.getValVT())); 2024 else if (VA.getLocInfo() == CCValAssign::ZExt) 2025 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2026 DAG.getValueType(VA.getValVT())); 2027 else if (VA.getLocInfo() == CCValAssign::BCvt) 2028 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2029 2030 if (VA.isExtInLoc()) { 2031 // Handle MMX values passed in XMM regs. 2032 if (RegVT.isVector()) 2033 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue); 2034 else 2035 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2036 } 2037 } else { 2038 assert(VA.isMemLoc()); 2039 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 2040 } 2041 2042 // If value is passed via pointer - do a load. 2043 if (VA.getLocInfo() == CCValAssign::Indirect) 2044 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 2045 MachinePointerInfo(), false, false, false, 0); 2046 2047 InVals.push_back(ArgValue); 2048 } 2049 2050 // The x86-64 ABIs require that for returning structs by value we copy 2051 // the sret argument into %rax/%eax (depending on ABI) for the return. 2052 // Win32 requires us to put the sret argument to %eax as well. 2053 // Save the argument into a virtual register so that we can access it 2054 // from the return points. 2055 if (MF.getFunction()->hasStructRetAttr() && 2056 (Subtarget->is64Bit() || Subtarget->isTargetWindows())) { 2057 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2058 unsigned Reg = FuncInfo->getSRetReturnReg(); 2059 if (!Reg) { 2060 MVT PtrTy = getPointerTy(); 2061 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); 2062 FuncInfo->setSRetReturnReg(Reg); 2063 } 2064 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 2065 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 2066 } 2067 2068 unsigned StackSize = CCInfo.getNextStackOffset(); 2069 // Align stack specially for tail calls. 2070 if (FuncIsMadeTailCallSafe(CallConv, 2071 MF.getTarget().Options.GuaranteedTailCallOpt)) 2072 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 2073 2074 // If the function takes variable number of arguments, make a frame index for 2075 // the start of the first vararg value... for expansion of llvm.va_start. 2076 if (isVarArg) { 2077 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 2078 CallConv != CallingConv::X86_ThisCall)) { 2079 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 2080 } 2081 if (Is64Bit) { 2082 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 2083 2084 // FIXME: We should really autogenerate these arrays 2085 static const uint16_t GPR64ArgRegsWin64[] = { 2086 X86::RCX, X86::RDX, X86::R8, X86::R9 2087 }; 2088 static const uint16_t GPR64ArgRegs64Bit[] = { 2089 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 2090 }; 2091 static const uint16_t XMMArgRegs64Bit[] = { 2092 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2093 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2094 }; 2095 const uint16_t *GPR64ArgRegs; 2096 unsigned NumXMMRegs = 0; 2097 2098 if (IsWin64) { 2099 // The XMM registers which might contain var arg parameters are shadowed 2100 // in their paired GPR. So we only need to save the GPR to their home 2101 // slots. 2102 TotalNumIntRegs = 4; 2103 GPR64ArgRegs = GPR64ArgRegsWin64; 2104 } else { 2105 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 2106 GPR64ArgRegs = GPR64ArgRegs64Bit; 2107 2108 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 2109 TotalNumXMMRegs); 2110 } 2111 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 2112 TotalNumIntRegs); 2113 2114 bool NoImplicitFloatOps = Fn->getAttributes(). 2115 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 2116 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 2117 "SSE register cannot be used when SSE is disabled!"); 2118 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2119 NoImplicitFloatOps) && 2120 "SSE register cannot be used when SSE is disabled!"); 2121 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2122 !Subtarget->hasSSE1()) 2123 // Kernel mode asks for SSE to be disabled, so don't push them 2124 // on the stack. 2125 TotalNumXMMRegs = 0; 2126 2127 if (IsWin64) { 2128 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2129 // Get to the caller-allocated home save location. Add 8 to account 2130 // for the return address. 2131 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2132 FuncInfo->setRegSaveFrameIndex( 2133 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2134 // Fixup to set vararg frame on shadow area (4 x i64). 2135 if (NumIntRegs < 4) 2136 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2137 } else { 2138 // For X86-64, if there are vararg parameters that are passed via 2139 // registers, then we must store them to their spots on the stack so 2140 // they may be loaded by deferencing the result of va_next. 2141 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2142 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2143 FuncInfo->setRegSaveFrameIndex( 2144 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2145 false)); 2146 } 2147 2148 // Store the integer parameter registers. 2149 SmallVector<SDValue, 8> MemOps; 2150 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2151 getPointerTy()); 2152 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2153 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2154 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2155 DAG.getIntPtrConstant(Offset)); 2156 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2157 &X86::GR64RegClass); 2158 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2159 SDValue Store = 2160 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2161 MachinePointerInfo::getFixedStack( 2162 FuncInfo->getRegSaveFrameIndex(), Offset), 2163 false, false, 0); 2164 MemOps.push_back(Store); 2165 Offset += 8; 2166 } 2167 2168 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2169 // Now store the XMM (fp + vector) parameter registers. 2170 SmallVector<SDValue, 11> SaveXMMOps; 2171 SaveXMMOps.push_back(Chain); 2172 2173 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2174 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2175 SaveXMMOps.push_back(ALVal); 2176 2177 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2178 FuncInfo->getRegSaveFrameIndex())); 2179 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2180 FuncInfo->getVarArgsFPOffset())); 2181 2182 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2183 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2184 &X86::VR128RegClass); 2185 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2186 SaveXMMOps.push_back(Val); 2187 } 2188 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2189 MVT::Other, 2190 &SaveXMMOps[0], SaveXMMOps.size())); 2191 } 2192 2193 if (!MemOps.empty()) 2194 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2195 &MemOps[0], MemOps.size()); 2196 } 2197 } 2198 2199 // Some CCs need callee pop. 2200 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2201 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2202 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2203 } else { 2204 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2205 // If this is an sret function, the return should pop the hidden pointer. 2206 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2207 argsAreStructReturn(Ins) == StackStructReturn) 2208 FuncInfo->setBytesToPopOnReturn(4); 2209 } 2210 2211 if (!Is64Bit) { 2212 // RegSaveFrameIndex is X86-64 only. 2213 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2214 if (CallConv == CallingConv::X86_FastCall || 2215 CallConv == CallingConv::X86_ThisCall) 2216 // fastcc functions can't have varargs. 2217 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2218 } 2219 2220 FuncInfo->setArgumentStackSize(StackSize); 2221 2222 return Chain; 2223} 2224 2225SDValue 2226X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2227 SDValue StackPtr, SDValue Arg, 2228 DebugLoc dl, SelectionDAG &DAG, 2229 const CCValAssign &VA, 2230 ISD::ArgFlagsTy Flags) const { 2231 unsigned LocMemOffset = VA.getLocMemOffset(); 2232 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2233 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2234 if (Flags.isByVal()) 2235 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2236 2237 return DAG.getStore(Chain, dl, Arg, PtrOff, 2238 MachinePointerInfo::getStack(LocMemOffset), 2239 false, false, 0); 2240} 2241 2242/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2243/// optimization is performed and it is required. 2244SDValue 2245X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2246 SDValue &OutRetAddr, SDValue Chain, 2247 bool IsTailCall, bool Is64Bit, 2248 int FPDiff, DebugLoc dl) const { 2249 // Adjust the Return address stack slot. 2250 EVT VT = getPointerTy(); 2251 OutRetAddr = getReturnAddressFrameIndex(DAG); 2252 2253 // Load the "old" Return address. 2254 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2255 false, false, false, 0); 2256 return SDValue(OutRetAddr.getNode(), 1); 2257} 2258 2259/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2260/// optimization is performed and it is required (FPDiff!=0). 2261static SDValue 2262EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2263 SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, 2264 unsigned SlotSize, int FPDiff, DebugLoc dl) { 2265 // Store the return address to the appropriate stack slot. 2266 if (!FPDiff) return Chain; 2267 // Calculate the new stack slot for the return address. 2268 int NewReturnAddrFI = 2269 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2270 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); 2271 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2272 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2273 false, false, 0); 2274 return Chain; 2275} 2276 2277SDValue 2278X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2279 SmallVectorImpl<SDValue> &InVals) const { 2280 SelectionDAG &DAG = CLI.DAG; 2281 DebugLoc &dl = CLI.DL; 2282 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2283 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2284 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2285 SDValue Chain = CLI.Chain; 2286 SDValue Callee = CLI.Callee; 2287 CallingConv::ID CallConv = CLI.CallConv; 2288 bool &isTailCall = CLI.IsTailCall; 2289 bool isVarArg = CLI.IsVarArg; 2290 2291 MachineFunction &MF = DAG.getMachineFunction(); 2292 bool Is64Bit = Subtarget->is64Bit(); 2293 bool IsWin64 = Subtarget->isTargetWin64(); 2294 bool IsWindows = Subtarget->isTargetWindows(); 2295 StructReturnType SR = callIsStructReturn(Outs); 2296 bool IsSibcall = false; 2297 2298 if (MF.getTarget().Options.DisableTailCalls) 2299 isTailCall = false; 2300 2301 if (isTailCall) { 2302 // Check if it's really possible to do a tail call. 2303 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2304 isVarArg, SR != NotStructReturn, 2305 MF.getFunction()->hasStructRetAttr(), CLI.RetTy, 2306 Outs, OutVals, Ins, DAG); 2307 2308 // Sibcalls are automatically detected tailcalls which do not require 2309 // ABI changes. 2310 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2311 IsSibcall = true; 2312 2313 if (isTailCall) 2314 ++NumTailCalls; 2315 } 2316 2317 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2318 "Var args not supported with calling convention fastcc, ghc or hipe"); 2319 2320 // Analyze operands of the call, assigning locations to each operand. 2321 SmallVector<CCValAssign, 16> ArgLocs; 2322 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2323 ArgLocs, *DAG.getContext()); 2324 2325 // Allocate shadow area for Win64 2326 if (IsWin64) { 2327 CCInfo.AllocateStack(32, 8); 2328 } 2329 2330 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2331 2332 // Get a count of how many bytes are to be pushed on the stack. 2333 unsigned NumBytes = CCInfo.getNextStackOffset(); 2334 if (IsSibcall) 2335 // This is a sibcall. The memory operands are available in caller's 2336 // own caller's stack. 2337 NumBytes = 0; 2338 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2339 IsTailCallConvention(CallConv)) 2340 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2341 2342 int FPDiff = 0; 2343 if (isTailCall && !IsSibcall) { 2344 // Lower arguments at fp - stackoffset + fpdiff. 2345 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); 2346 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn(); 2347 2348 FPDiff = NumBytesCallerPushed - NumBytes; 2349 2350 // Set the delta of movement of the returnaddr stackslot. 2351 // But only set if delta is greater than previous delta. 2352 if (FPDiff < X86Info->getTCReturnAddrDelta()) 2353 X86Info->setTCReturnAddrDelta(FPDiff); 2354 } 2355 2356 if (!IsSibcall) 2357 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2358 2359 SDValue RetAddrFrIdx; 2360 // Load return address for tail calls. 2361 if (isTailCall && FPDiff) 2362 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2363 Is64Bit, FPDiff, dl); 2364 2365 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2366 SmallVector<SDValue, 8> MemOpChains; 2367 SDValue StackPtr; 2368 2369 // Walk the register/memloc assignments, inserting copies/loads. In the case 2370 // of tail call optimization arguments are handle later. 2371 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2372 CCValAssign &VA = ArgLocs[i]; 2373 EVT RegVT = VA.getLocVT(); 2374 SDValue Arg = OutVals[i]; 2375 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2376 bool isByVal = Flags.isByVal(); 2377 2378 // Promote the value if needed. 2379 switch (VA.getLocInfo()) { 2380 default: llvm_unreachable("Unknown loc info!"); 2381 case CCValAssign::Full: break; 2382 case CCValAssign::SExt: 2383 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2384 break; 2385 case CCValAssign::ZExt: 2386 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2387 break; 2388 case CCValAssign::AExt: 2389 if (RegVT.is128BitVector()) { 2390 // Special case: passing MMX values in XMM registers. 2391 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2392 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2393 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2394 } else 2395 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2396 break; 2397 case CCValAssign::BCvt: 2398 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2399 break; 2400 case CCValAssign::Indirect: { 2401 // Store the argument. 2402 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2403 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2404 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2405 MachinePointerInfo::getFixedStack(FI), 2406 false, false, 0); 2407 Arg = SpillSlot; 2408 break; 2409 } 2410 } 2411 2412 if (VA.isRegLoc()) { 2413 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2414 if (isVarArg && IsWin64) { 2415 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2416 // shadow reg if callee is a varargs function. 2417 unsigned ShadowReg = 0; 2418 switch (VA.getLocReg()) { 2419 case X86::XMM0: ShadowReg = X86::RCX; break; 2420 case X86::XMM1: ShadowReg = X86::RDX; break; 2421 case X86::XMM2: ShadowReg = X86::R8; break; 2422 case X86::XMM3: ShadowReg = X86::R9; break; 2423 } 2424 if (ShadowReg) 2425 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2426 } 2427 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2428 assert(VA.isMemLoc()); 2429 if (StackPtr.getNode() == 0) 2430 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 2431 getPointerTy()); 2432 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2433 dl, DAG, VA, Flags)); 2434 } 2435 } 2436 2437 if (!MemOpChains.empty()) 2438 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2439 &MemOpChains[0], MemOpChains.size()); 2440 2441 if (Subtarget->isPICStyleGOT()) { 2442 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2443 // GOT pointer. 2444 if (!isTailCall) { 2445 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2446 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); 2447 } else { 2448 // If we are tail calling and generating PIC/GOT style code load the 2449 // address of the callee into ECX. The value in ecx is used as target of 2450 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2451 // for tail calls on PIC/GOT architectures. Normally we would just put the 2452 // address of GOT into ebx and then call target@PLT. But for tail calls 2453 // ebx would be restored (since ebx is callee saved) before jumping to the 2454 // target@PLT. 2455 2456 // Note: The actual moving to ECX is done further down. 2457 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2458 if (G && !G->getGlobal()->hasHiddenVisibility() && 2459 !G->getGlobal()->hasProtectedVisibility()) 2460 Callee = LowerGlobalAddress(Callee, DAG); 2461 else if (isa<ExternalSymbolSDNode>(Callee)) 2462 Callee = LowerExternalSymbol(Callee, DAG); 2463 } 2464 } 2465 2466 if (Is64Bit && isVarArg && !IsWin64) { 2467 // From AMD64 ABI document: 2468 // For calls that may call functions that use varargs or stdargs 2469 // (prototype-less calls or calls to functions containing ellipsis (...) in 2470 // the declaration) %al is used as hidden argument to specify the number 2471 // of SSE registers used. The contents of %al do not need to match exactly 2472 // the number of registers, but must be an ubound on the number of SSE 2473 // registers used and is in the range 0 - 8 inclusive. 2474 2475 // Count the number of XMM registers allocated. 2476 static const uint16_t XMMArgRegs[] = { 2477 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2478 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2479 }; 2480 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2481 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2482 && "SSE registers cannot be used when SSE is disabled"); 2483 2484 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2485 DAG.getConstant(NumXMMRegs, MVT::i8))); 2486 } 2487 2488 // For tail calls lower the arguments to the 'real' stack slot. 2489 if (isTailCall) { 2490 // Force all the incoming stack arguments to be loaded from the stack 2491 // before any new outgoing arguments are stored to the stack, because the 2492 // outgoing stack slots may alias the incoming argument stack slots, and 2493 // the alias isn't otherwise explicit. This is slightly more conservative 2494 // than necessary, because it means that each store effectively depends 2495 // on every argument instead of just those arguments it would clobber. 2496 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2497 2498 SmallVector<SDValue, 8> MemOpChains2; 2499 SDValue FIN; 2500 int FI = 0; 2501 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2502 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2503 CCValAssign &VA = ArgLocs[i]; 2504 if (VA.isRegLoc()) 2505 continue; 2506 assert(VA.isMemLoc()); 2507 SDValue Arg = OutVals[i]; 2508 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2509 // Create frame index. 2510 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2511 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2512 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2513 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2514 2515 if (Flags.isByVal()) { 2516 // Copy relative to framepointer. 2517 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2518 if (StackPtr.getNode() == 0) 2519 StackPtr = DAG.getCopyFromReg(Chain, dl, 2520 RegInfo->getStackRegister(), 2521 getPointerTy()); 2522 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2523 2524 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2525 ArgChain, 2526 Flags, DAG, dl)); 2527 } else { 2528 // Store relative to framepointer. 2529 MemOpChains2.push_back( 2530 DAG.getStore(ArgChain, dl, Arg, FIN, 2531 MachinePointerInfo::getFixedStack(FI), 2532 false, false, 0)); 2533 } 2534 } 2535 } 2536 2537 if (!MemOpChains2.empty()) 2538 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2539 &MemOpChains2[0], MemOpChains2.size()); 2540 2541 // Store the return address to the appropriate stack slot. 2542 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, 2543 getPointerTy(), RegInfo->getSlotSize(), 2544 FPDiff, dl); 2545 } 2546 2547 // Build a sequence of copy-to-reg nodes chained together with token chain 2548 // and flag operands which copy the outgoing args into registers. 2549 SDValue InFlag; 2550 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2551 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2552 RegsToPass[i].second, InFlag); 2553 InFlag = Chain.getValue(1); 2554 } 2555 2556 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2557 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2558 // In the 64-bit large code model, we have to make all calls 2559 // through a register, since the call instruction's 32-bit 2560 // pc-relative offset may not be large enough to hold the whole 2561 // address. 2562 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2563 // If the callee is a GlobalAddress node (quite common, every direct call 2564 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2565 // it. 2566 2567 // We should use extra load for direct calls to dllimported functions in 2568 // non-JIT mode. 2569 const GlobalValue *GV = G->getGlobal(); 2570 if (!GV->hasDLLImportLinkage()) { 2571 unsigned char OpFlags = 0; 2572 bool ExtraLoad = false; 2573 unsigned WrapperKind = ISD::DELETED_NODE; 2574 2575 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2576 // external symbols most go through the PLT in PIC mode. If the symbol 2577 // has hidden or protected visibility, or if it is static or local, then 2578 // we don't need to use the PLT - we can directly call it. 2579 if (Subtarget->isTargetELF() && 2580 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2581 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2582 OpFlags = X86II::MO_PLT; 2583 } else if (Subtarget->isPICStyleStubAny() && 2584 (GV->isDeclaration() || GV->isWeakForLinker()) && 2585 (!Subtarget->getTargetTriple().isMacOSX() || 2586 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2587 // PC-relative references to external symbols should go through $stub, 2588 // unless we're building with the leopard linker or later, which 2589 // automatically synthesizes these stubs. 2590 OpFlags = X86II::MO_DARWIN_STUB; 2591 } else if (Subtarget->isPICStyleRIPRel() && 2592 isa<Function>(GV) && 2593 cast<Function>(GV)->getAttributes(). 2594 hasAttribute(AttributeSet::FunctionIndex, 2595 Attribute::NonLazyBind)) { 2596 // If the function is marked as non-lazy, generate an indirect call 2597 // which loads from the GOT directly. This avoids runtime overhead 2598 // at the cost of eager binding (and one extra byte of encoding). 2599 OpFlags = X86II::MO_GOTPCREL; 2600 WrapperKind = X86ISD::WrapperRIP; 2601 ExtraLoad = true; 2602 } 2603 2604 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2605 G->getOffset(), OpFlags); 2606 2607 // Add a wrapper if needed. 2608 if (WrapperKind != ISD::DELETED_NODE) 2609 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2610 // Add extra indirection if needed. 2611 if (ExtraLoad) 2612 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2613 MachinePointerInfo::getGOT(), 2614 false, false, false, 0); 2615 } 2616 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2617 unsigned char OpFlags = 0; 2618 2619 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2620 // external symbols should go through the PLT. 2621 if (Subtarget->isTargetELF() && 2622 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2623 OpFlags = X86II::MO_PLT; 2624 } else if (Subtarget->isPICStyleStubAny() && 2625 (!Subtarget->getTargetTriple().isMacOSX() || 2626 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2627 // PC-relative references to external symbols should go through $stub, 2628 // unless we're building with the leopard linker or later, which 2629 // automatically synthesizes these stubs. 2630 OpFlags = X86II::MO_DARWIN_STUB; 2631 } 2632 2633 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2634 OpFlags); 2635 } 2636 2637 // Returns a chain & a flag for retval copy to use. 2638 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2639 SmallVector<SDValue, 8> Ops; 2640 2641 if (!IsSibcall && isTailCall) { 2642 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2643 DAG.getIntPtrConstant(0, true), InFlag); 2644 InFlag = Chain.getValue(1); 2645 } 2646 2647 Ops.push_back(Chain); 2648 Ops.push_back(Callee); 2649 2650 if (isTailCall) 2651 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2652 2653 // Add argument registers to the end of the list so that they are known live 2654 // into the call. 2655 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2656 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2657 RegsToPass[i].second.getValueType())); 2658 2659 // Add a register mask operand representing the call-preserved registers. 2660 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2661 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2662 assert(Mask && "Missing call preserved mask for calling convention"); 2663 Ops.push_back(DAG.getRegisterMask(Mask)); 2664 2665 if (InFlag.getNode()) 2666 Ops.push_back(InFlag); 2667 2668 if (isTailCall) { 2669 // We used to do: 2670 //// If this is the first return lowered for this function, add the regs 2671 //// to the liveout set for the function. 2672 // This isn't right, although it's probably harmless on x86; liveouts 2673 // should be computed from returns not tail calls. Consider a void 2674 // function making a tail call to a function returning int. 2675 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 2676 } 2677 2678 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2679 InFlag = Chain.getValue(1); 2680 2681 // Create the CALLSEQ_END node. 2682 unsigned NumBytesForCalleeToPush; 2683 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2684 getTargetMachine().Options.GuaranteedTailCallOpt)) 2685 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2686 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2687 SR == StackStructReturn) 2688 // If this is a call to a struct-return function, the callee 2689 // pops the hidden struct pointer, so we have to push it back. 2690 // This is common for Darwin/X86, Linux & Mingw32 targets. 2691 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2692 NumBytesForCalleeToPush = 4; 2693 else 2694 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2695 2696 // Returns a flag for retval copy to use. 2697 if (!IsSibcall) { 2698 Chain = DAG.getCALLSEQ_END(Chain, 2699 DAG.getIntPtrConstant(NumBytes, true), 2700 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2701 true), 2702 InFlag); 2703 InFlag = Chain.getValue(1); 2704 } 2705 2706 // Handle result values, copying them out of physregs into vregs that we 2707 // return. 2708 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2709 Ins, dl, DAG, InVals); 2710} 2711 2712//===----------------------------------------------------------------------===// 2713// Fast Calling Convention (tail call) implementation 2714//===----------------------------------------------------------------------===// 2715 2716// Like std call, callee cleans arguments, convention except that ECX is 2717// reserved for storing the tail called function address. Only 2 registers are 2718// free for argument passing (inreg). Tail call optimization is performed 2719// provided: 2720// * tailcallopt is enabled 2721// * caller/callee are fastcc 2722// On X86_64 architecture with GOT-style position independent code only local 2723// (within module) calls are supported at the moment. 2724// To keep the stack aligned according to platform abi the function 2725// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2726// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2727// If a tail called function callee has more arguments than the caller the 2728// caller needs to make sure that there is room to move the RETADDR to. This is 2729// achieved by reserving an area the size of the argument delta right after the 2730// original REtADDR, but before the saved framepointer or the spilled registers 2731// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2732// stack layout: 2733// arg1 2734// arg2 2735// RETADDR 2736// [ new RETADDR 2737// move area ] 2738// (possible EBP) 2739// ESI 2740// EDI 2741// local1 .. 2742 2743/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2744/// for a 16 byte align requirement. 2745unsigned 2746X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2747 SelectionDAG& DAG) const { 2748 MachineFunction &MF = DAG.getMachineFunction(); 2749 const TargetMachine &TM = MF.getTarget(); 2750 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2751 unsigned StackAlignment = TFI.getStackAlignment(); 2752 uint64_t AlignMask = StackAlignment - 1; 2753 int64_t Offset = StackSize; 2754 unsigned SlotSize = RegInfo->getSlotSize(); 2755 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2756 // Number smaller than 12 so just add the difference. 2757 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2758 } else { 2759 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2760 Offset = ((~AlignMask) & Offset) + StackAlignment + 2761 (StackAlignment-SlotSize); 2762 } 2763 return Offset; 2764} 2765 2766/// MatchingStackOffset - Return true if the given stack call argument is 2767/// already available in the same position (relatively) of the caller's 2768/// incoming argument stack. 2769static 2770bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2771 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2772 const X86InstrInfo *TII) { 2773 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2774 int FI = INT_MAX; 2775 if (Arg.getOpcode() == ISD::CopyFromReg) { 2776 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2777 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2778 return false; 2779 MachineInstr *Def = MRI->getVRegDef(VR); 2780 if (!Def) 2781 return false; 2782 if (!Flags.isByVal()) { 2783 if (!TII->isLoadFromStackSlot(Def, FI)) 2784 return false; 2785 } else { 2786 unsigned Opcode = Def->getOpcode(); 2787 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2788 Def->getOperand(1).isFI()) { 2789 FI = Def->getOperand(1).getIndex(); 2790 Bytes = Flags.getByValSize(); 2791 } else 2792 return false; 2793 } 2794 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2795 if (Flags.isByVal()) 2796 // ByVal argument is passed in as a pointer but it's now being 2797 // dereferenced. e.g. 2798 // define @foo(%struct.X* %A) { 2799 // tail call @bar(%struct.X* byval %A) 2800 // } 2801 return false; 2802 SDValue Ptr = Ld->getBasePtr(); 2803 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2804 if (!FINode) 2805 return false; 2806 FI = FINode->getIndex(); 2807 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2808 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2809 FI = FINode->getIndex(); 2810 Bytes = Flags.getByValSize(); 2811 } else 2812 return false; 2813 2814 assert(FI != INT_MAX); 2815 if (!MFI->isFixedObjectIndex(FI)) 2816 return false; 2817 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2818} 2819 2820/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2821/// for tail call optimization. Targets which want to do tail call 2822/// optimization should implement this function. 2823bool 2824X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2825 CallingConv::ID CalleeCC, 2826 bool isVarArg, 2827 bool isCalleeStructRet, 2828 bool isCallerStructRet, 2829 Type *RetTy, 2830 const SmallVectorImpl<ISD::OutputArg> &Outs, 2831 const SmallVectorImpl<SDValue> &OutVals, 2832 const SmallVectorImpl<ISD::InputArg> &Ins, 2833 SelectionDAG &DAG) const { 2834 if (!IsTailCallConvention(CalleeCC) && 2835 CalleeCC != CallingConv::C) 2836 return false; 2837 2838 // If -tailcallopt is specified, make fastcc functions tail-callable. 2839 const MachineFunction &MF = DAG.getMachineFunction(); 2840 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2841 2842 // If the function return type is x86_fp80 and the callee return type is not, 2843 // then the FP_EXTEND of the call result is not a nop. It's not safe to 2844 // perform a tailcall optimization here. 2845 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) 2846 return false; 2847 2848 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2849 bool CCMatch = CallerCC == CalleeCC; 2850 2851 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2852 if (IsTailCallConvention(CalleeCC) && CCMatch) 2853 return true; 2854 return false; 2855 } 2856 2857 // Look for obvious safe cases to perform tail call optimization that do not 2858 // require ABI changes. This is what gcc calls sibcall. 2859 2860 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2861 // emit a special epilogue. 2862 if (RegInfo->needsStackRealignment(MF)) 2863 return false; 2864 2865 // Also avoid sibcall optimization if either caller or callee uses struct 2866 // return semantics. 2867 if (isCalleeStructRet || isCallerStructRet) 2868 return false; 2869 2870 // An stdcall caller is expected to clean up its arguments; the callee 2871 // isn't going to do that. 2872 if (!CCMatch && CallerCC == CallingConv::X86_StdCall) 2873 return false; 2874 2875 // Do not sibcall optimize vararg calls unless all arguments are passed via 2876 // registers. 2877 if (isVarArg && !Outs.empty()) { 2878 2879 // Optimizing for varargs on Win64 is unlikely to be safe without 2880 // additional testing. 2881 if (Subtarget->isTargetWin64()) 2882 return false; 2883 2884 SmallVector<CCValAssign, 16> ArgLocs; 2885 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2886 getTargetMachine(), ArgLocs, *DAG.getContext()); 2887 2888 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2889 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2890 if (!ArgLocs[i].isRegLoc()) 2891 return false; 2892 } 2893 2894 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2895 // stack. Therefore, if it's not used by the call it is not safe to optimize 2896 // this into a sibcall. 2897 bool Unused = false; 2898 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2899 if (!Ins[i].Used) { 2900 Unused = true; 2901 break; 2902 } 2903 } 2904 if (Unused) { 2905 SmallVector<CCValAssign, 16> RVLocs; 2906 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2907 getTargetMachine(), RVLocs, *DAG.getContext()); 2908 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2909 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2910 CCValAssign &VA = RVLocs[i]; 2911 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2912 return false; 2913 } 2914 } 2915 2916 // If the calling conventions do not match, then we'd better make sure the 2917 // results are returned in the same way as what the caller expects. 2918 if (!CCMatch) { 2919 SmallVector<CCValAssign, 16> RVLocs1; 2920 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2921 getTargetMachine(), RVLocs1, *DAG.getContext()); 2922 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2923 2924 SmallVector<CCValAssign, 16> RVLocs2; 2925 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2926 getTargetMachine(), RVLocs2, *DAG.getContext()); 2927 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2928 2929 if (RVLocs1.size() != RVLocs2.size()) 2930 return false; 2931 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2932 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2933 return false; 2934 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2935 return false; 2936 if (RVLocs1[i].isRegLoc()) { 2937 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2938 return false; 2939 } else { 2940 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2941 return false; 2942 } 2943 } 2944 } 2945 2946 // If the callee takes no arguments then go on to check the results of the 2947 // call. 2948 if (!Outs.empty()) { 2949 // Check if stack adjustment is needed. For now, do not do this if any 2950 // argument is passed on the stack. 2951 SmallVector<CCValAssign, 16> ArgLocs; 2952 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2953 getTargetMachine(), ArgLocs, *DAG.getContext()); 2954 2955 // Allocate shadow area for Win64 2956 if (Subtarget->isTargetWin64()) { 2957 CCInfo.AllocateStack(32, 8); 2958 } 2959 2960 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2961 if (CCInfo.getNextStackOffset()) { 2962 MachineFunction &MF = DAG.getMachineFunction(); 2963 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2964 return false; 2965 2966 // Check if the arguments are already laid out in the right way as 2967 // the caller's fixed stack objects. 2968 MachineFrameInfo *MFI = MF.getFrameInfo(); 2969 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2970 const X86InstrInfo *TII = 2971 ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2972 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2973 CCValAssign &VA = ArgLocs[i]; 2974 SDValue Arg = OutVals[i]; 2975 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2976 if (VA.getLocInfo() == CCValAssign::Indirect) 2977 return false; 2978 if (!VA.isRegLoc()) { 2979 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2980 MFI, MRI, TII)) 2981 return false; 2982 } 2983 } 2984 } 2985 2986 // If the tailcall address may be in a register, then make sure it's 2987 // possible to register allocate for it. In 32-bit, the call address can 2988 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2989 // callee-saved registers are restored. These happen to be the same 2990 // registers used to pass 'inreg' arguments so watch out for those. 2991 if (!Subtarget->is64Bit() && 2992 ((!isa<GlobalAddressSDNode>(Callee) && 2993 !isa<ExternalSymbolSDNode>(Callee)) || 2994 getTargetMachine().getRelocationModel() == Reloc::PIC_)) { 2995 unsigned NumInRegs = 0; 2996 // In PIC we need an extra register to formulate the address computation 2997 // for the callee. 2998 unsigned MaxInRegs = 2999 (getTargetMachine().getRelocationModel() == Reloc::PIC_) ? 2 : 3; 3000 3001 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3002 CCValAssign &VA = ArgLocs[i]; 3003 if (!VA.isRegLoc()) 3004 continue; 3005 unsigned Reg = VA.getLocReg(); 3006 switch (Reg) { 3007 default: break; 3008 case X86::EAX: case X86::EDX: case X86::ECX: 3009 if (++NumInRegs == MaxInRegs) 3010 return false; 3011 break; 3012 } 3013 } 3014 } 3015 } 3016 3017 return true; 3018} 3019 3020FastISel * 3021X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 3022 const TargetLibraryInfo *libInfo) const { 3023 return X86::createFastISel(funcInfo, libInfo); 3024} 3025 3026//===----------------------------------------------------------------------===// 3027// Other Lowering Hooks 3028//===----------------------------------------------------------------------===// 3029 3030static bool MayFoldLoad(SDValue Op) { 3031 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 3032} 3033 3034static bool MayFoldIntoStore(SDValue Op) { 3035 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 3036} 3037 3038static bool isTargetShuffle(unsigned Opcode) { 3039 switch(Opcode) { 3040 default: return false; 3041 case X86ISD::PSHUFD: 3042 case X86ISD::PSHUFHW: 3043 case X86ISD::PSHUFLW: 3044 case X86ISD::SHUFP: 3045 case X86ISD::PALIGNR: 3046 case X86ISD::MOVLHPS: 3047 case X86ISD::MOVLHPD: 3048 case X86ISD::MOVHLPS: 3049 case X86ISD::MOVLPS: 3050 case X86ISD::MOVLPD: 3051 case X86ISD::MOVSHDUP: 3052 case X86ISD::MOVSLDUP: 3053 case X86ISD::MOVDDUP: 3054 case X86ISD::MOVSS: 3055 case X86ISD::MOVSD: 3056 case X86ISD::UNPCKL: 3057 case X86ISD::UNPCKH: 3058 case X86ISD::VPERMILP: 3059 case X86ISD::VPERM2X128: 3060 case X86ISD::VPERMI: 3061 return true; 3062 } 3063} 3064 3065static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3066 SDValue V1, SelectionDAG &DAG) { 3067 switch(Opc) { 3068 default: llvm_unreachable("Unknown x86 shuffle node"); 3069 case X86ISD::MOVSHDUP: 3070 case X86ISD::MOVSLDUP: 3071 case X86ISD::MOVDDUP: 3072 return DAG.getNode(Opc, dl, VT, V1); 3073 } 3074} 3075 3076static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3077 SDValue V1, unsigned TargetMask, 3078 SelectionDAG &DAG) { 3079 switch(Opc) { 3080 default: llvm_unreachable("Unknown x86 shuffle node"); 3081 case X86ISD::PSHUFD: 3082 case X86ISD::PSHUFHW: 3083 case X86ISD::PSHUFLW: 3084 case X86ISD::VPERMILP: 3085 case X86ISD::VPERMI: 3086 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 3087 } 3088} 3089 3090static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3091 SDValue V1, SDValue V2, unsigned TargetMask, 3092 SelectionDAG &DAG) { 3093 switch(Opc) { 3094 default: llvm_unreachable("Unknown x86 shuffle node"); 3095 case X86ISD::PALIGNR: 3096 case X86ISD::SHUFP: 3097 case X86ISD::VPERM2X128: 3098 return DAG.getNode(Opc, dl, VT, V1, V2, 3099 DAG.getConstant(TargetMask, MVT::i8)); 3100 } 3101} 3102 3103static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3104 SDValue V1, SDValue V2, SelectionDAG &DAG) { 3105 switch(Opc) { 3106 default: llvm_unreachable("Unknown x86 shuffle node"); 3107 case X86ISD::MOVLHPS: 3108 case X86ISD::MOVLHPD: 3109 case X86ISD::MOVHLPS: 3110 case X86ISD::MOVLPS: 3111 case X86ISD::MOVLPD: 3112 case X86ISD::MOVSS: 3113 case X86ISD::MOVSD: 3114 case X86ISD::UNPCKL: 3115 case X86ISD::UNPCKH: 3116 return DAG.getNode(Opc, dl, VT, V1, V2); 3117 } 3118} 3119 3120SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 3121 MachineFunction &MF = DAG.getMachineFunction(); 3122 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 3123 int ReturnAddrIndex = FuncInfo->getRAIndex(); 3124 3125 if (ReturnAddrIndex == 0) { 3126 // Set up a frame object for the return address. 3127 unsigned SlotSize = RegInfo->getSlotSize(); 3128 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 3129 false); 3130 FuncInfo->setRAIndex(ReturnAddrIndex); 3131 } 3132 3133 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3134} 3135 3136bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3137 bool hasSymbolicDisplacement) { 3138 // Offset should fit into 32 bit immediate field. 3139 if (!isInt<32>(Offset)) 3140 return false; 3141 3142 // If we don't have a symbolic displacement - we don't have any extra 3143 // restrictions. 3144 if (!hasSymbolicDisplacement) 3145 return true; 3146 3147 // FIXME: Some tweaks might be needed for medium code model. 3148 if (M != CodeModel::Small && M != CodeModel::Kernel) 3149 return false; 3150 3151 // For small code model we assume that latest object is 16MB before end of 31 3152 // bits boundary. We may also accept pretty large negative constants knowing 3153 // that all objects are in the positive half of address space. 3154 if (M == CodeModel::Small && Offset < 16*1024*1024) 3155 return true; 3156 3157 // For kernel code model we know that all object resist in the negative half 3158 // of 32bits address space. We may not accept negative offsets, since they may 3159 // be just off and we may accept pretty large positive ones. 3160 if (M == CodeModel::Kernel && Offset > 0) 3161 return true; 3162 3163 return false; 3164} 3165 3166/// isCalleePop - Determines whether the callee is required to pop its 3167/// own arguments. Callee pop is necessary to support tail calls. 3168bool X86::isCalleePop(CallingConv::ID CallingConv, 3169 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3170 if (IsVarArg) 3171 return false; 3172 3173 switch (CallingConv) { 3174 default: 3175 return false; 3176 case CallingConv::X86_StdCall: 3177 return !is64Bit; 3178 case CallingConv::X86_FastCall: 3179 return !is64Bit; 3180 case CallingConv::X86_ThisCall: 3181 return !is64Bit; 3182 case CallingConv::Fast: 3183 return TailCallOpt; 3184 case CallingConv::GHC: 3185 return TailCallOpt; 3186 case CallingConv::HiPE: 3187 return TailCallOpt; 3188 } 3189} 3190 3191/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3192/// specific condition code, returning the condition code and the LHS/RHS of the 3193/// comparison to make. 3194static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3195 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3196 if (!isFP) { 3197 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3198 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3199 // X > -1 -> X == 0, jump !sign. 3200 RHS = DAG.getConstant(0, RHS.getValueType()); 3201 return X86::COND_NS; 3202 } 3203 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3204 // X < 0 -> X == 0, jump on sign. 3205 return X86::COND_S; 3206 } 3207 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3208 // X < 1 -> X <= 0 3209 RHS = DAG.getConstant(0, RHS.getValueType()); 3210 return X86::COND_LE; 3211 } 3212 } 3213 3214 switch (SetCCOpcode) { 3215 default: llvm_unreachable("Invalid integer condition!"); 3216 case ISD::SETEQ: return X86::COND_E; 3217 case ISD::SETGT: return X86::COND_G; 3218 case ISD::SETGE: return X86::COND_GE; 3219 case ISD::SETLT: return X86::COND_L; 3220 case ISD::SETLE: return X86::COND_LE; 3221 case ISD::SETNE: return X86::COND_NE; 3222 case ISD::SETULT: return X86::COND_B; 3223 case ISD::SETUGT: return X86::COND_A; 3224 case ISD::SETULE: return X86::COND_BE; 3225 case ISD::SETUGE: return X86::COND_AE; 3226 } 3227 } 3228 3229 // First determine if it is required or is profitable to flip the operands. 3230 3231 // If LHS is a foldable load, but RHS is not, flip the condition. 3232 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3233 !ISD::isNON_EXTLoad(RHS.getNode())) { 3234 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3235 std::swap(LHS, RHS); 3236 } 3237 3238 switch (SetCCOpcode) { 3239 default: break; 3240 case ISD::SETOLT: 3241 case ISD::SETOLE: 3242 case ISD::SETUGT: 3243 case ISD::SETUGE: 3244 std::swap(LHS, RHS); 3245 break; 3246 } 3247 3248 // On a floating point condition, the flags are set as follows: 3249 // ZF PF CF op 3250 // 0 | 0 | 0 | X > Y 3251 // 0 | 0 | 1 | X < Y 3252 // 1 | 0 | 0 | X == Y 3253 // 1 | 1 | 1 | unordered 3254 switch (SetCCOpcode) { 3255 default: llvm_unreachable("Condcode should be pre-legalized away"); 3256 case ISD::SETUEQ: 3257 case ISD::SETEQ: return X86::COND_E; 3258 case ISD::SETOLT: // flipped 3259 case ISD::SETOGT: 3260 case ISD::SETGT: return X86::COND_A; 3261 case ISD::SETOLE: // flipped 3262 case ISD::SETOGE: 3263 case ISD::SETGE: return X86::COND_AE; 3264 case ISD::SETUGT: // flipped 3265 case ISD::SETULT: 3266 case ISD::SETLT: return X86::COND_B; 3267 case ISD::SETUGE: // flipped 3268 case ISD::SETULE: 3269 case ISD::SETLE: return X86::COND_BE; 3270 case ISD::SETONE: 3271 case ISD::SETNE: return X86::COND_NE; 3272 case ISD::SETUO: return X86::COND_P; 3273 case ISD::SETO: return X86::COND_NP; 3274 case ISD::SETOEQ: 3275 case ISD::SETUNE: return X86::COND_INVALID; 3276 } 3277} 3278 3279/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3280/// code. Current x86 isa includes the following FP cmov instructions: 3281/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3282static bool hasFPCMov(unsigned X86CC) { 3283 switch (X86CC) { 3284 default: 3285 return false; 3286 case X86::COND_B: 3287 case X86::COND_BE: 3288 case X86::COND_E: 3289 case X86::COND_P: 3290 case X86::COND_A: 3291 case X86::COND_AE: 3292 case X86::COND_NE: 3293 case X86::COND_NP: 3294 return true; 3295 } 3296} 3297 3298/// isFPImmLegal - Returns true if the target can instruction select the 3299/// specified FP immediate natively. If false, the legalizer will 3300/// materialize the FP immediate as a load from a constant pool. 3301bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3302 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3303 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3304 return true; 3305 } 3306 return false; 3307} 3308 3309/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3310/// the specified range (L, H]. 3311static bool isUndefOrInRange(int Val, int Low, int Hi) { 3312 return (Val < 0) || (Val >= Low && Val < Hi); 3313} 3314 3315/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3316/// specified value. 3317static bool isUndefOrEqual(int Val, int CmpVal) { 3318 return (Val < 0 || Val == CmpVal); 3319} 3320 3321/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3322/// from position Pos and ending in Pos+Size, falls within the specified 3323/// sequential range (L, L+Pos]. or is undef. 3324static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3325 unsigned Pos, unsigned Size, int Low) { 3326 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3327 if (!isUndefOrEqual(Mask[i], Low)) 3328 return false; 3329 return true; 3330} 3331 3332/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3333/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3334/// the second operand. 3335static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3336 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3337 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3338 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3339 return (Mask[0] < 2 && Mask[1] < 2); 3340 return false; 3341} 3342 3343/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3344/// is suitable for input to PSHUFHW. 3345static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3346 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16)) 3347 return false; 3348 3349 // Lower quadword copied in order or undef. 3350 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3351 return false; 3352 3353 // Upper quadword shuffled. 3354 for (unsigned i = 4; i != 8; ++i) 3355 if (!isUndefOrInRange(Mask[i], 4, 8)) 3356 return false; 3357 3358 if (VT == MVT::v16i16) { 3359 // Lower quadword copied in order or undef. 3360 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3361 return false; 3362 3363 // Upper quadword shuffled. 3364 for (unsigned i = 12; i != 16; ++i) 3365 if (!isUndefOrInRange(Mask[i], 12, 16)) 3366 return false; 3367 } 3368 3369 return true; 3370} 3371 3372/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3373/// is suitable for input to PSHUFLW. 3374static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3375 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16)) 3376 return false; 3377 3378 // Upper quadword copied in order. 3379 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3380 return false; 3381 3382 // Lower quadword shuffled. 3383 for (unsigned i = 0; i != 4; ++i) 3384 if (!isUndefOrInRange(Mask[i], 0, 4)) 3385 return false; 3386 3387 if (VT == MVT::v16i16) { 3388 // Upper quadword copied in order. 3389 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3390 return false; 3391 3392 // Lower quadword shuffled. 3393 for (unsigned i = 8; i != 12; ++i) 3394 if (!isUndefOrInRange(Mask[i], 8, 12)) 3395 return false; 3396 } 3397 3398 return true; 3399} 3400 3401/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3402/// is suitable for input to PALIGNR. 3403static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3404 const X86Subtarget *Subtarget) { 3405 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) || 3406 (VT.is256BitVector() && !Subtarget->hasInt256())) 3407 return false; 3408 3409 unsigned NumElts = VT.getVectorNumElements(); 3410 unsigned NumLanes = VT.getSizeInBits()/128; 3411 unsigned NumLaneElts = NumElts/NumLanes; 3412 3413 // Do not handle 64-bit element shuffles with palignr. 3414 if (NumLaneElts == 2) 3415 return false; 3416 3417 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3418 unsigned i; 3419 for (i = 0; i != NumLaneElts; ++i) { 3420 if (Mask[i+l] >= 0) 3421 break; 3422 } 3423 3424 // Lane is all undef, go to next lane 3425 if (i == NumLaneElts) 3426 continue; 3427 3428 int Start = Mask[i+l]; 3429 3430 // Make sure its in this lane in one of the sources 3431 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3432 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3433 return false; 3434 3435 // If not lane 0, then we must match lane 0 3436 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3437 return false; 3438 3439 // Correct second source to be contiguous with first source 3440 if (Start >= (int)NumElts) 3441 Start -= NumElts - NumLaneElts; 3442 3443 // Make sure we're shifting in the right direction. 3444 if (Start <= (int)(i+l)) 3445 return false; 3446 3447 Start -= i; 3448 3449 // Check the rest of the elements to see if they are consecutive. 3450 for (++i; i != NumLaneElts; ++i) { 3451 int Idx = Mask[i+l]; 3452 3453 // Make sure its in this lane 3454 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3455 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3456 return false; 3457 3458 // If not lane 0, then we must match lane 0 3459 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3460 return false; 3461 3462 if (Idx >= (int)NumElts) 3463 Idx -= NumElts - NumLaneElts; 3464 3465 if (!isUndefOrEqual(Idx, Start+i)) 3466 return false; 3467 3468 } 3469 } 3470 3471 return true; 3472} 3473 3474/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3475/// the two vector operands have swapped position. 3476static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3477 unsigned NumElems) { 3478 for (unsigned i = 0; i != NumElems; ++i) { 3479 int idx = Mask[i]; 3480 if (idx < 0) 3481 continue; 3482 else if (idx < (int)NumElems) 3483 Mask[i] = idx + NumElems; 3484 else 3485 Mask[i] = idx - NumElems; 3486 } 3487} 3488 3489/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3490/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3491/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3492/// reverse of what x86 shuffles want. 3493static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256, 3494 bool Commuted = false) { 3495 if (!HasFp256 && VT.is256BitVector()) 3496 return false; 3497 3498 unsigned NumElems = VT.getVectorNumElements(); 3499 unsigned NumLanes = VT.getSizeInBits()/128; 3500 unsigned NumLaneElems = NumElems/NumLanes; 3501 3502 if (NumLaneElems != 2 && NumLaneElems != 4) 3503 return false; 3504 3505 // VSHUFPSY divides the resulting vector into 4 chunks. 3506 // The sources are also splitted into 4 chunks, and each destination 3507 // chunk must come from a different source chunk. 3508 // 3509 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3510 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3511 // 3512 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3513 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3514 // 3515 // VSHUFPDY divides the resulting vector into 4 chunks. 3516 // The sources are also splitted into 4 chunks, and each destination 3517 // chunk must come from a different source chunk. 3518 // 3519 // SRC1 => X3 X2 X1 X0 3520 // SRC2 => Y3 Y2 Y1 Y0 3521 // 3522 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3523 // 3524 unsigned HalfLaneElems = NumLaneElems/2; 3525 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3526 for (unsigned i = 0; i != NumLaneElems; ++i) { 3527 int Idx = Mask[i+l]; 3528 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3529 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3530 return false; 3531 // For VSHUFPSY, the mask of the second half must be the same as the 3532 // first but with the appropriate offsets. This works in the same way as 3533 // VPERMILPS works with masks. 3534 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3535 continue; 3536 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3537 return false; 3538 } 3539 } 3540 3541 return true; 3542} 3543 3544/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3545/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3546static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) { 3547 if (!VT.is128BitVector()) 3548 return false; 3549 3550 unsigned NumElems = VT.getVectorNumElements(); 3551 3552 if (NumElems != 4) 3553 return false; 3554 3555 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3556 return isUndefOrEqual(Mask[0], 6) && 3557 isUndefOrEqual(Mask[1], 7) && 3558 isUndefOrEqual(Mask[2], 2) && 3559 isUndefOrEqual(Mask[3], 3); 3560} 3561 3562/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3563/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3564/// <2, 3, 2, 3> 3565static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) { 3566 if (!VT.is128BitVector()) 3567 return false; 3568 3569 unsigned NumElems = VT.getVectorNumElements(); 3570 3571 if (NumElems != 4) 3572 return false; 3573 3574 return isUndefOrEqual(Mask[0], 2) && 3575 isUndefOrEqual(Mask[1], 3) && 3576 isUndefOrEqual(Mask[2], 2) && 3577 isUndefOrEqual(Mask[3], 3); 3578} 3579 3580/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3581/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3582static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) { 3583 if (!VT.is128BitVector()) 3584 return false; 3585 3586 unsigned NumElems = VT.getVectorNumElements(); 3587 3588 if (NumElems != 2 && NumElems != 4) 3589 return false; 3590 3591 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3592 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3593 return false; 3594 3595 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3596 if (!isUndefOrEqual(Mask[i], i)) 3597 return false; 3598 3599 return true; 3600} 3601 3602/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3603/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3604static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) { 3605 if (!VT.is128BitVector()) 3606 return false; 3607 3608 unsigned NumElems = VT.getVectorNumElements(); 3609 3610 if (NumElems != 2 && NumElems != 4) 3611 return false; 3612 3613 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3614 if (!isUndefOrEqual(Mask[i], i)) 3615 return false; 3616 3617 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3618 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3619 return false; 3620 3621 return true; 3622} 3623 3624// 3625// Some special combinations that can be optimized. 3626// 3627static 3628SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3629 SelectionDAG &DAG) { 3630 MVT VT = SVOp->getValueType(0).getSimpleVT(); 3631 DebugLoc dl = SVOp->getDebugLoc(); 3632 3633 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3634 return SDValue(); 3635 3636 ArrayRef<int> Mask = SVOp->getMask(); 3637 3638 // These are the special masks that may be optimized. 3639 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3640 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3641 bool MatchEvenMask = true; 3642 bool MatchOddMask = true; 3643 for (int i=0; i<8; ++i) { 3644 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3645 MatchEvenMask = false; 3646 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3647 MatchOddMask = false; 3648 } 3649 3650 if (!MatchEvenMask && !MatchOddMask) 3651 return SDValue(); 3652 3653 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3654 3655 SDValue Op0 = SVOp->getOperand(0); 3656 SDValue Op1 = SVOp->getOperand(1); 3657 3658 if (MatchEvenMask) { 3659 // Shift the second operand right to 32 bits. 3660 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; 3661 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); 3662 } else { 3663 // Shift the first operand left to 32 bits. 3664 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; 3665 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); 3666 } 3667 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; 3668 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); 3669} 3670 3671/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3672/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3673static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3674 bool HasInt256, bool V2IsSplat = false) { 3675 unsigned NumElts = VT.getVectorNumElements(); 3676 3677 assert((VT.is128BitVector() || VT.is256BitVector()) && 3678 "Unsupported vector type for unpckh"); 3679 3680 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 && 3681 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3682 return false; 3683 3684 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3685 // independently on 128-bit lanes. 3686 unsigned NumLanes = VT.getSizeInBits()/128; 3687 unsigned NumLaneElts = NumElts/NumLanes; 3688 3689 for (unsigned l = 0; l != NumLanes; ++l) { 3690 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3691 i != (l+1)*NumLaneElts; 3692 i += 2, ++j) { 3693 int BitI = Mask[i]; 3694 int BitI1 = Mask[i+1]; 3695 if (!isUndefOrEqual(BitI, j)) 3696 return false; 3697 if (V2IsSplat) { 3698 if (!isUndefOrEqual(BitI1, NumElts)) 3699 return false; 3700 } else { 3701 if (!isUndefOrEqual(BitI1, j + NumElts)) 3702 return false; 3703 } 3704 } 3705 } 3706 3707 return true; 3708} 3709 3710/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3711/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3712static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3713 bool HasInt256, bool V2IsSplat = false) { 3714 unsigned NumElts = VT.getVectorNumElements(); 3715 3716 assert((VT.is128BitVector() || VT.is256BitVector()) && 3717 "Unsupported vector type for unpckh"); 3718 3719 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 && 3720 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3721 return false; 3722 3723 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3724 // independently on 128-bit lanes. 3725 unsigned NumLanes = VT.getSizeInBits()/128; 3726 unsigned NumLaneElts = NumElts/NumLanes; 3727 3728 for (unsigned l = 0; l != NumLanes; ++l) { 3729 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3730 i != (l+1)*NumLaneElts; i += 2, ++j) { 3731 int BitI = Mask[i]; 3732 int BitI1 = Mask[i+1]; 3733 if (!isUndefOrEqual(BitI, j)) 3734 return false; 3735 if (V2IsSplat) { 3736 if (isUndefOrEqual(BitI1, NumElts)) 3737 return false; 3738 } else { 3739 if (!isUndefOrEqual(BitI1, j+NumElts)) 3740 return false; 3741 } 3742 } 3743 } 3744 return true; 3745} 3746 3747/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3748/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3749/// <0, 0, 1, 1> 3750static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3751 unsigned NumElts = VT.getVectorNumElements(); 3752 bool Is256BitVec = VT.is256BitVector(); 3753 3754 assert((VT.is128BitVector() || VT.is256BitVector()) && 3755 "Unsupported vector type for unpckh"); 3756 3757 if (Is256BitVec && NumElts != 4 && NumElts != 8 && 3758 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3759 return false; 3760 3761 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3762 // FIXME: Need a better way to get rid of this, there's no latency difference 3763 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3764 // the former later. We should also remove the "_undef" special mask. 3765 if (NumElts == 4 && Is256BitVec) 3766 return false; 3767 3768 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3769 // independently on 128-bit lanes. 3770 unsigned NumLanes = VT.getSizeInBits()/128; 3771 unsigned NumLaneElts = NumElts/NumLanes; 3772 3773 for (unsigned l = 0; l != NumLanes; ++l) { 3774 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3775 i != (l+1)*NumLaneElts; 3776 i += 2, ++j) { 3777 int BitI = Mask[i]; 3778 int BitI1 = Mask[i+1]; 3779 3780 if (!isUndefOrEqual(BitI, j)) 3781 return false; 3782 if (!isUndefOrEqual(BitI1, j)) 3783 return false; 3784 } 3785 } 3786 3787 return true; 3788} 3789 3790/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3791/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3792/// <2, 2, 3, 3> 3793static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasInt256) { 3794 unsigned NumElts = VT.getVectorNumElements(); 3795 3796 assert((VT.is128BitVector() || VT.is256BitVector()) && 3797 "Unsupported vector type for unpckh"); 3798 3799 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 && 3800 (!HasInt256 || (NumElts != 16 && NumElts != 32))) 3801 return false; 3802 3803 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3804 // independently on 128-bit lanes. 3805 unsigned NumLanes = VT.getSizeInBits()/128; 3806 unsigned NumLaneElts = NumElts/NumLanes; 3807 3808 for (unsigned l = 0; l != NumLanes; ++l) { 3809 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3810 i != (l+1)*NumLaneElts; i += 2, ++j) { 3811 int BitI = Mask[i]; 3812 int BitI1 = Mask[i+1]; 3813 if (!isUndefOrEqual(BitI, j)) 3814 return false; 3815 if (!isUndefOrEqual(BitI1, j)) 3816 return false; 3817 } 3818 } 3819 return true; 3820} 3821 3822/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3823/// specifies a shuffle of elements that is suitable for input to MOVSS, 3824/// MOVSD, and MOVD, i.e. setting the lowest element. 3825static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3826 if (VT.getVectorElementType().getSizeInBits() < 32) 3827 return false; 3828 if (!VT.is128BitVector()) 3829 return false; 3830 3831 unsigned NumElts = VT.getVectorNumElements(); 3832 3833 if (!isUndefOrEqual(Mask[0], NumElts)) 3834 return false; 3835 3836 for (unsigned i = 1; i != NumElts; ++i) 3837 if (!isUndefOrEqual(Mask[i], i)) 3838 return false; 3839 3840 return true; 3841} 3842 3843/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3844/// as permutations between 128-bit chunks or halves. As an example: this 3845/// shuffle bellow: 3846/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3847/// The first half comes from the second half of V1 and the second half from the 3848/// the second half of V2. 3849static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasFp256) { 3850 if (!HasFp256 || !VT.is256BitVector()) 3851 return false; 3852 3853 // The shuffle result is divided into half A and half B. In total the two 3854 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3855 // B must come from C, D, E or F. 3856 unsigned HalfSize = VT.getVectorNumElements()/2; 3857 bool MatchA = false, MatchB = false; 3858 3859 // Check if A comes from one of C, D, E, F. 3860 for (unsigned Half = 0; Half != 4; ++Half) { 3861 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3862 MatchA = true; 3863 break; 3864 } 3865 } 3866 3867 // Check if B comes from one of C, D, E, F. 3868 for (unsigned Half = 0; Half != 4; ++Half) { 3869 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3870 MatchB = true; 3871 break; 3872 } 3873 } 3874 3875 return MatchA && MatchB; 3876} 3877 3878/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3879/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3880static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3881 MVT VT = SVOp->getValueType(0).getSimpleVT(); 3882 3883 unsigned HalfSize = VT.getVectorNumElements()/2; 3884 3885 unsigned FstHalf = 0, SndHalf = 0; 3886 for (unsigned i = 0; i < HalfSize; ++i) { 3887 if (SVOp->getMaskElt(i) > 0) { 3888 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3889 break; 3890 } 3891 } 3892 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3893 if (SVOp->getMaskElt(i) > 0) { 3894 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3895 break; 3896 } 3897 } 3898 3899 return (FstHalf | (SndHalf << 4)); 3900} 3901 3902/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3903/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3904/// Note that VPERMIL mask matching is different depending whether theunderlying 3905/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3906/// to the same elements of the low, but to the higher half of the source. 3907/// In VPERMILPD the two lanes could be shuffled independently of each other 3908/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 3909static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasFp256) { 3910 if (!HasFp256) 3911 return false; 3912 3913 unsigned NumElts = VT.getVectorNumElements(); 3914 // Only match 256-bit with 32/64-bit types 3915 if (!VT.is256BitVector() || (NumElts != 4 && NumElts != 8)) 3916 return false; 3917 3918 unsigned NumLanes = VT.getSizeInBits()/128; 3919 unsigned LaneSize = NumElts/NumLanes; 3920 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3921 for (unsigned i = 0; i != LaneSize; ++i) { 3922 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3923 return false; 3924 if (NumElts != 8 || l == 0) 3925 continue; 3926 // VPERMILPS handling 3927 if (Mask[i] < 0) 3928 continue; 3929 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3930 return false; 3931 } 3932 } 3933 3934 return true; 3935} 3936 3937/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 3938/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3939/// element of vector 2 and the other elements to come from vector 1 in order. 3940static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3941 bool V2IsSplat = false, bool V2IsUndef = false) { 3942 if (!VT.is128BitVector()) 3943 return false; 3944 3945 unsigned NumOps = VT.getVectorNumElements(); 3946 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3947 return false; 3948 3949 if (!isUndefOrEqual(Mask[0], 0)) 3950 return false; 3951 3952 for (unsigned i = 1; i != NumOps; ++i) 3953 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3954 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3955 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3956 return false; 3957 3958 return true; 3959} 3960 3961/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3962/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3963/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3964static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT, 3965 const X86Subtarget *Subtarget) { 3966 if (!Subtarget->hasSSE3()) 3967 return false; 3968 3969 unsigned NumElems = VT.getVectorNumElements(); 3970 3971 if ((VT.is128BitVector() && NumElems != 4) || 3972 (VT.is256BitVector() && NumElems != 8)) 3973 return false; 3974 3975 // "i+1" is the value the indexed mask element must have 3976 for (unsigned i = 0; i != NumElems; i += 2) 3977 if (!isUndefOrEqual(Mask[i], i+1) || 3978 !isUndefOrEqual(Mask[i+1], i+1)) 3979 return false; 3980 3981 return true; 3982} 3983 3984/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3985/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3986/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3987static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT, 3988 const X86Subtarget *Subtarget) { 3989 if (!Subtarget->hasSSE3()) 3990 return false; 3991 3992 unsigned NumElems = VT.getVectorNumElements(); 3993 3994 if ((VT.is128BitVector() && NumElems != 4) || 3995 (VT.is256BitVector() && NumElems != 8)) 3996 return false; 3997 3998 // "i" is the value the indexed mask element must have 3999 for (unsigned i = 0; i != NumElems; i += 2) 4000 if (!isUndefOrEqual(Mask[i], i) || 4001 !isUndefOrEqual(Mask[i+1], i)) 4002 return false; 4003 4004 return true; 4005} 4006 4007/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 4008/// specifies a shuffle of elements that is suitable for input to 256-bit 4009/// version of MOVDDUP. 4010static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasFp256) { 4011 if (!HasFp256 || !VT.is256BitVector()) 4012 return false; 4013 4014 unsigned NumElts = VT.getVectorNumElements(); 4015 if (NumElts != 4) 4016 return false; 4017 4018 for (unsigned i = 0; i != NumElts/2; ++i) 4019 if (!isUndefOrEqual(Mask[i], 0)) 4020 return false; 4021 for (unsigned i = NumElts/2; i != NumElts; ++i) 4022 if (!isUndefOrEqual(Mask[i], NumElts/2)) 4023 return false; 4024 return true; 4025} 4026 4027/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 4028/// specifies a shuffle of elements that is suitable for input to 128-bit 4029/// version of MOVDDUP. 4030static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { 4031 if (!VT.is128BitVector()) 4032 return false; 4033 4034 unsigned e = VT.getVectorNumElements() / 2; 4035 for (unsigned i = 0; i != e; ++i) 4036 if (!isUndefOrEqual(Mask[i], i)) 4037 return false; 4038 for (unsigned i = 0; i != e; ++i) 4039 if (!isUndefOrEqual(Mask[e+i], i)) 4040 return false; 4041 return true; 4042} 4043 4044/// isVEXTRACTF128Index - Return true if the specified 4045/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 4046/// suitable for input to VEXTRACTF128. 4047bool X86::isVEXTRACTF128Index(SDNode *N) { 4048 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4049 return false; 4050 4051 // The index should be aligned on a 128-bit boundary. 4052 uint64_t Index = 4053 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4054 4055 MVT VT = N->getValueType(0).getSimpleVT(); 4056 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); 4057 bool Result = (Index * ElSize) % 128 == 0; 4058 4059 return Result; 4060} 4061 4062/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 4063/// operand specifies a subvector insert that is suitable for input to 4064/// VINSERTF128. 4065bool X86::isVINSERTF128Index(SDNode *N) { 4066 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4067 return false; 4068 4069 // The index should be aligned on a 128-bit boundary. 4070 uint64_t Index = 4071 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4072 4073 MVT VT = N->getValueType(0).getSimpleVT(); 4074 unsigned ElSize = VT.getVectorElementType().getSizeInBits(); 4075 bool Result = (Index * ElSize) % 128 == 0; 4076 4077 return Result; 4078} 4079 4080/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 4081/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 4082/// Handles 128-bit and 256-bit. 4083static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 4084 MVT VT = N->getValueType(0).getSimpleVT(); 4085 4086 assert((VT.is128BitVector() || VT.is256BitVector()) && 4087 "Unsupported vector type for PSHUF/SHUFP"); 4088 4089 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 4090 // independently on 128-bit lanes. 4091 unsigned NumElts = VT.getVectorNumElements(); 4092 unsigned NumLanes = VT.getSizeInBits()/128; 4093 unsigned NumLaneElts = NumElts/NumLanes; 4094 4095 assert((NumLaneElts == 2 || NumLaneElts == 4) && 4096 "Only supports 2 or 4 elements per lane"); 4097 4098 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 4099 unsigned Mask = 0; 4100 for (unsigned i = 0; i != NumElts; ++i) { 4101 int Elt = N->getMaskElt(i); 4102 if (Elt < 0) continue; 4103 Elt &= NumLaneElts - 1; 4104 unsigned ShAmt = (i << Shift) % 8; 4105 Mask |= Elt << ShAmt; 4106 } 4107 4108 return Mask; 4109} 4110 4111/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 4112/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 4113static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 4114 MVT VT = N->getValueType(0).getSimpleVT(); 4115 4116 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4117 "Unsupported vector type for PSHUFHW"); 4118 4119 unsigned NumElts = VT.getVectorNumElements(); 4120 4121 unsigned Mask = 0; 4122 for (unsigned l = 0; l != NumElts; l += 8) { 4123 // 8 nodes per lane, but we only care about the last 4. 4124 for (unsigned i = 0; i < 4; ++i) { 4125 int Elt = N->getMaskElt(l+i+4); 4126 if (Elt < 0) continue; 4127 Elt &= 0x3; // only 2-bits. 4128 Mask |= Elt << (i * 2); 4129 } 4130 } 4131 4132 return Mask; 4133} 4134 4135/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4136/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4137static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4138 MVT VT = N->getValueType(0).getSimpleVT(); 4139 4140 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4141 "Unsupported vector type for PSHUFHW"); 4142 4143 unsigned NumElts = VT.getVectorNumElements(); 4144 4145 unsigned Mask = 0; 4146 for (unsigned l = 0; l != NumElts; l += 8) { 4147 // 8 nodes per lane, but we only care about the first 4. 4148 for (unsigned i = 0; i < 4; ++i) { 4149 int Elt = N->getMaskElt(l+i); 4150 if (Elt < 0) continue; 4151 Elt &= 0x3; // only 2-bits 4152 Mask |= Elt << (i * 2); 4153 } 4154 } 4155 4156 return Mask; 4157} 4158 4159/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4160/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4161static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4162 MVT VT = SVOp->getValueType(0).getSimpleVT(); 4163 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4164 4165 unsigned NumElts = VT.getVectorNumElements(); 4166 unsigned NumLanes = VT.getSizeInBits()/128; 4167 unsigned NumLaneElts = NumElts/NumLanes; 4168 4169 int Val = 0; 4170 unsigned i; 4171 for (i = 0; i != NumElts; ++i) { 4172 Val = SVOp->getMaskElt(i); 4173 if (Val >= 0) 4174 break; 4175 } 4176 if (Val >= (int)NumElts) 4177 Val -= NumElts - NumLaneElts; 4178 4179 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4180 return (Val - i) * EltSize; 4181} 4182 4183/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4184/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4185/// instructions. 4186unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4187 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4188 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4189 4190 uint64_t Index = 4191 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4192 4193 MVT VecVT = N->getOperand(0).getValueType().getSimpleVT(); 4194 MVT ElVT = VecVT.getVectorElementType(); 4195 4196 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4197 return Index / NumElemsPerChunk; 4198} 4199 4200/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4201/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4202/// instructions. 4203unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4204 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4205 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4206 4207 uint64_t Index = 4208 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4209 4210 MVT VecVT = N->getValueType(0).getSimpleVT(); 4211 MVT ElVT = VecVT.getVectorElementType(); 4212 4213 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4214 return Index / NumElemsPerChunk; 4215} 4216 4217/// getShuffleCLImmediate - Return the appropriate immediate to shuffle 4218/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. 4219/// Handles 256-bit. 4220static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { 4221 MVT VT = N->getValueType(0).getSimpleVT(); 4222 4223 unsigned NumElts = VT.getVectorNumElements(); 4224 4225 assert((VT.is256BitVector() && NumElts == 4) && 4226 "Unsupported vector type for VPERMQ/VPERMPD"); 4227 4228 unsigned Mask = 0; 4229 for (unsigned i = 0; i != NumElts; ++i) { 4230 int Elt = N->getMaskElt(i); 4231 if (Elt < 0) 4232 continue; 4233 Mask |= Elt << (i*2); 4234 } 4235 4236 return Mask; 4237} 4238/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4239/// constant +0.0. 4240bool X86::isZeroNode(SDValue Elt) { 4241 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Elt)) 4242 return CN->isNullValue(); 4243 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt)) 4244 return CFP->getValueAPF().isPosZero(); 4245 return false; 4246} 4247 4248/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4249/// their permute mask. 4250static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4251 SelectionDAG &DAG) { 4252 MVT VT = SVOp->getValueType(0).getSimpleVT(); 4253 unsigned NumElems = VT.getVectorNumElements(); 4254 SmallVector<int, 8> MaskVec; 4255 4256 for (unsigned i = 0; i != NumElems; ++i) { 4257 int Idx = SVOp->getMaskElt(i); 4258 if (Idx >= 0) { 4259 if (Idx < (int)NumElems) 4260 Idx += NumElems; 4261 else 4262 Idx -= NumElems; 4263 } 4264 MaskVec.push_back(Idx); 4265 } 4266 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4267 SVOp->getOperand(0), &MaskVec[0]); 4268} 4269 4270/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4271/// match movhlps. The lower half elements should come from upper half of 4272/// V1 (and in order), and the upper half elements should come from the upper 4273/// half of V2 (and in order). 4274static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) { 4275 if (!VT.is128BitVector()) 4276 return false; 4277 if (VT.getVectorNumElements() != 4) 4278 return false; 4279 for (unsigned i = 0, e = 2; i != e; ++i) 4280 if (!isUndefOrEqual(Mask[i], i+2)) 4281 return false; 4282 for (unsigned i = 2; i != 4; ++i) 4283 if (!isUndefOrEqual(Mask[i], i+4)) 4284 return false; 4285 return true; 4286} 4287 4288/// isScalarLoadToVector - Returns true if the node is a scalar load that 4289/// is promoted to a vector. It also returns the LoadSDNode by reference if 4290/// required. 4291static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4292 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4293 return false; 4294 N = N->getOperand(0).getNode(); 4295 if (!ISD::isNON_EXTLoad(N)) 4296 return false; 4297 if (LD) 4298 *LD = cast<LoadSDNode>(N); 4299 return true; 4300} 4301 4302// Test whether the given value is a vector value which will be legalized 4303// into a load. 4304static bool WillBeConstantPoolLoad(SDNode *N) { 4305 if (N->getOpcode() != ISD::BUILD_VECTOR) 4306 return false; 4307 4308 // Check for any non-constant elements. 4309 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4310 switch (N->getOperand(i).getNode()->getOpcode()) { 4311 case ISD::UNDEF: 4312 case ISD::ConstantFP: 4313 case ISD::Constant: 4314 break; 4315 default: 4316 return false; 4317 } 4318 4319 // Vectors of all-zeros and all-ones are materialized with special 4320 // instructions rather than being loaded. 4321 return !ISD::isBuildVectorAllZeros(N) && 4322 !ISD::isBuildVectorAllOnes(N); 4323} 4324 4325/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4326/// match movlp{s|d}. The lower half elements should come from lower half of 4327/// V1 (and in order), and the upper half elements should come from the upper 4328/// half of V2 (and in order). And since V1 will become the source of the 4329/// MOVLP, it must be either a vector load or a scalar load to vector. 4330static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4331 ArrayRef<int> Mask, EVT VT) { 4332 if (!VT.is128BitVector()) 4333 return false; 4334 4335 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4336 return false; 4337 // Is V2 is a vector load, don't do this transformation. We will try to use 4338 // load folding shufps op. 4339 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4340 return false; 4341 4342 unsigned NumElems = VT.getVectorNumElements(); 4343 4344 if (NumElems != 2 && NumElems != 4) 4345 return false; 4346 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4347 if (!isUndefOrEqual(Mask[i], i)) 4348 return false; 4349 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4350 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4351 return false; 4352 return true; 4353} 4354 4355/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4356/// all the same. 4357static bool isSplatVector(SDNode *N) { 4358 if (N->getOpcode() != ISD::BUILD_VECTOR) 4359 return false; 4360 4361 SDValue SplatValue = N->getOperand(0); 4362 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4363 if (N->getOperand(i) != SplatValue) 4364 return false; 4365 return true; 4366} 4367 4368/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4369/// to an zero vector. 4370/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4371static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4372 SDValue V1 = N->getOperand(0); 4373 SDValue V2 = N->getOperand(1); 4374 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4375 for (unsigned i = 0; i != NumElems; ++i) { 4376 int Idx = N->getMaskElt(i); 4377 if (Idx >= (int)NumElems) { 4378 unsigned Opc = V2.getOpcode(); 4379 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4380 continue; 4381 if (Opc != ISD::BUILD_VECTOR || 4382 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4383 return false; 4384 } else if (Idx >= 0) { 4385 unsigned Opc = V1.getOpcode(); 4386 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4387 continue; 4388 if (Opc != ISD::BUILD_VECTOR || 4389 !X86::isZeroNode(V1.getOperand(Idx))) 4390 return false; 4391 } 4392 } 4393 return true; 4394} 4395 4396/// getZeroVector - Returns a vector of specified type with all zero elements. 4397/// 4398static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4399 SelectionDAG &DAG, DebugLoc dl) { 4400 assert(VT.isVector() && "Expected a vector type"); 4401 4402 // Always build SSE zero vectors as <4 x i32> bitcasted 4403 // to their dest type. This ensures they get CSE'd. 4404 SDValue Vec; 4405 if (VT.is128BitVector()) { // SSE 4406 if (Subtarget->hasSSE2()) { // SSE2 4407 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4408 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4409 } else { // SSE1 4410 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4411 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4412 } 4413 } else if (VT.is256BitVector()) { // AVX 4414 if (Subtarget->hasInt256()) { // AVX2 4415 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4416 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4417 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 4418 array_lengthof(Ops)); 4419 } else { 4420 // 256-bit logic and arithmetic instructions in AVX are all 4421 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4422 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4423 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4424 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 4425 array_lengthof(Ops)); 4426 } 4427 } else 4428 llvm_unreachable("Unexpected vector type"); 4429 4430 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4431} 4432 4433/// getOnesVector - Returns a vector of specified type with all bits set. 4434/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4435/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4436/// Then bitcast to their original type, ensuring they get CSE'd. 4437static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG, 4438 DebugLoc dl) { 4439 assert(VT.isVector() && "Expected a vector type"); 4440 4441 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4442 SDValue Vec; 4443 if (VT.is256BitVector()) { 4444 if (HasInt256) { // AVX2 4445 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4446 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 4447 array_lengthof(Ops)); 4448 } else { // AVX 4449 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4450 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4451 } 4452 } else if (VT.is128BitVector()) { 4453 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4454 } else 4455 llvm_unreachable("Unexpected vector type"); 4456 4457 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4458} 4459 4460/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4461/// that point to V2 points to its first element. 4462static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4463 for (unsigned i = 0; i != NumElems; ++i) { 4464 if (Mask[i] > (int)NumElems) { 4465 Mask[i] = NumElems; 4466 } 4467 } 4468} 4469 4470/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4471/// operation of specified width. 4472static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4473 SDValue V2) { 4474 unsigned NumElems = VT.getVectorNumElements(); 4475 SmallVector<int, 8> Mask; 4476 Mask.push_back(NumElems); 4477 for (unsigned i = 1; i != NumElems; ++i) 4478 Mask.push_back(i); 4479 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4480} 4481 4482/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4483static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4484 SDValue V2) { 4485 unsigned NumElems = VT.getVectorNumElements(); 4486 SmallVector<int, 8> Mask; 4487 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4488 Mask.push_back(i); 4489 Mask.push_back(i + NumElems); 4490 } 4491 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4492} 4493 4494/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4495static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4496 SDValue V2) { 4497 unsigned NumElems = VT.getVectorNumElements(); 4498 SmallVector<int, 8> Mask; 4499 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4500 Mask.push_back(i + Half); 4501 Mask.push_back(i + NumElems + Half); 4502 } 4503 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4504} 4505 4506// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4507// a generic shuffle instruction because the target has no such instructions. 4508// Generate shuffles which repeat i16 and i8 several times until they can be 4509// represented by v4f32 and then be manipulated by target suported shuffles. 4510static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4511 EVT VT = V.getValueType(); 4512 int NumElems = VT.getVectorNumElements(); 4513 DebugLoc dl = V.getDebugLoc(); 4514 4515 while (NumElems > 4) { 4516 if (EltNo < NumElems/2) { 4517 V = getUnpackl(DAG, dl, VT, V, V); 4518 } else { 4519 V = getUnpackh(DAG, dl, VT, V, V); 4520 EltNo -= NumElems/2; 4521 } 4522 NumElems >>= 1; 4523 } 4524 return V; 4525} 4526 4527/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4528static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4529 EVT VT = V.getValueType(); 4530 DebugLoc dl = V.getDebugLoc(); 4531 4532 if (VT.is128BitVector()) { 4533 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4534 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4535 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4536 &SplatMask[0]); 4537 } else if (VT.is256BitVector()) { 4538 // To use VPERMILPS to splat scalars, the second half of indicies must 4539 // refer to the higher part, which is a duplication of the lower one, 4540 // because VPERMILPS can only handle in-lane permutations. 4541 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4542 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4543 4544 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4545 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4546 &SplatMask[0]); 4547 } else 4548 llvm_unreachable("Vector size not supported"); 4549 4550 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4551} 4552 4553/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4554static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4555 EVT SrcVT = SV->getValueType(0); 4556 SDValue V1 = SV->getOperand(0); 4557 DebugLoc dl = SV->getDebugLoc(); 4558 4559 int EltNo = SV->getSplatIndex(); 4560 int NumElems = SrcVT.getVectorNumElements(); 4561 bool Is256BitVec = SrcVT.is256BitVector(); 4562 4563 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) && 4564 "Unknown how to promote splat for type"); 4565 4566 // Extract the 128-bit part containing the splat element and update 4567 // the splat element index when it refers to the higher register. 4568 if (Is256BitVec) { 4569 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4570 if (EltNo >= NumElems/2) 4571 EltNo -= NumElems/2; 4572 } 4573 4574 // All i16 and i8 vector types can't be used directly by a generic shuffle 4575 // instruction because the target has no such instruction. Generate shuffles 4576 // which repeat i16 and i8 several times until they fit in i32, and then can 4577 // be manipulated by target suported shuffles. 4578 EVT EltVT = SrcVT.getVectorElementType(); 4579 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4580 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4581 4582 // Recreate the 256-bit vector and place the same 128-bit vector 4583 // into the low and high part. This is necessary because we want 4584 // to use VPERM* to shuffle the vectors 4585 if (Is256BitVec) { 4586 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4587 } 4588 4589 return getLegalSplat(DAG, V1, EltNo); 4590} 4591 4592/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4593/// vector of zero or undef vector. This produces a shuffle where the low 4594/// element of V2 is swizzled into the zero/undef vector, landing at element 4595/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4596static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4597 bool IsZero, 4598 const X86Subtarget *Subtarget, 4599 SelectionDAG &DAG) { 4600 EVT VT = V2.getValueType(); 4601 SDValue V1 = IsZero 4602 ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4603 unsigned NumElems = VT.getVectorNumElements(); 4604 SmallVector<int, 16> MaskVec; 4605 for (unsigned i = 0; i != NumElems; ++i) 4606 // If this is the insertion idx, put the low elt of V2 here. 4607 MaskVec.push_back(i == Idx ? NumElems : i); 4608 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4609} 4610 4611/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4612/// target specific opcode. Returns true if the Mask could be calculated. 4613/// Sets IsUnary to true if only uses one source. 4614static bool getTargetShuffleMask(SDNode *N, MVT VT, 4615 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4616 unsigned NumElems = VT.getVectorNumElements(); 4617 SDValue ImmN; 4618 4619 IsUnary = false; 4620 switch(N->getOpcode()) { 4621 case X86ISD::SHUFP: 4622 ImmN = N->getOperand(N->getNumOperands()-1); 4623 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4624 break; 4625 case X86ISD::UNPCKH: 4626 DecodeUNPCKHMask(VT, Mask); 4627 break; 4628 case X86ISD::UNPCKL: 4629 DecodeUNPCKLMask(VT, Mask); 4630 break; 4631 case X86ISD::MOVHLPS: 4632 DecodeMOVHLPSMask(NumElems, Mask); 4633 break; 4634 case X86ISD::MOVLHPS: 4635 DecodeMOVLHPSMask(NumElems, Mask); 4636 break; 4637 case X86ISD::PALIGNR: 4638 ImmN = N->getOperand(N->getNumOperands()-1); 4639 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4640 break; 4641 case X86ISD::PSHUFD: 4642 case X86ISD::VPERMILP: 4643 ImmN = N->getOperand(N->getNumOperands()-1); 4644 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4645 IsUnary = true; 4646 break; 4647 case X86ISD::PSHUFHW: 4648 ImmN = N->getOperand(N->getNumOperands()-1); 4649 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4650 IsUnary = true; 4651 break; 4652 case X86ISD::PSHUFLW: 4653 ImmN = N->getOperand(N->getNumOperands()-1); 4654 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4655 IsUnary = true; 4656 break; 4657 case X86ISD::VPERMI: 4658 ImmN = N->getOperand(N->getNumOperands()-1); 4659 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4660 IsUnary = true; 4661 break; 4662 case X86ISD::MOVSS: 4663 case X86ISD::MOVSD: { 4664 // The index 0 always comes from the first element of the second source, 4665 // this is why MOVSS and MOVSD are used in the first place. The other 4666 // elements come from the other positions of the first source vector 4667 Mask.push_back(NumElems); 4668 for (unsigned i = 1; i != NumElems; ++i) { 4669 Mask.push_back(i); 4670 } 4671 break; 4672 } 4673 case X86ISD::VPERM2X128: 4674 ImmN = N->getOperand(N->getNumOperands()-1); 4675 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4676 if (Mask.empty()) return false; 4677 break; 4678 case X86ISD::MOVDDUP: 4679 case X86ISD::MOVLHPD: 4680 case X86ISD::MOVLPD: 4681 case X86ISD::MOVLPS: 4682 case X86ISD::MOVSHDUP: 4683 case X86ISD::MOVSLDUP: 4684 // Not yet implemented 4685 return false; 4686 default: llvm_unreachable("unknown target shuffle node"); 4687 } 4688 4689 return true; 4690} 4691 4692/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4693/// element of the result of the vector shuffle. 4694static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4695 unsigned Depth) { 4696 if (Depth == 6) 4697 return SDValue(); // Limit search depth. 4698 4699 SDValue V = SDValue(N, 0); 4700 EVT VT = V.getValueType(); 4701 unsigned Opcode = V.getOpcode(); 4702 4703 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4704 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4705 int Elt = SV->getMaskElt(Index); 4706 4707 if (Elt < 0) 4708 return DAG.getUNDEF(VT.getVectorElementType()); 4709 4710 unsigned NumElems = VT.getVectorNumElements(); 4711 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4712 : SV->getOperand(1); 4713 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 4714 } 4715 4716 // Recurse into target specific vector shuffles to find scalars. 4717 if (isTargetShuffle(Opcode)) { 4718 MVT ShufVT = V.getValueType().getSimpleVT(); 4719 unsigned NumElems = ShufVT.getVectorNumElements(); 4720 SmallVector<int, 16> ShuffleMask; 4721 bool IsUnary; 4722 4723 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 4724 return SDValue(); 4725 4726 int Elt = ShuffleMask[Index]; 4727 if (Elt < 0) 4728 return DAG.getUNDEF(ShufVT.getVectorElementType()); 4729 4730 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 4731 : N->getOperand(1); 4732 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 4733 Depth+1); 4734 } 4735 4736 // Actual nodes that may contain scalar elements 4737 if (Opcode == ISD::BITCAST) { 4738 V = V.getOperand(0); 4739 EVT SrcVT = V.getValueType(); 4740 unsigned NumElems = VT.getVectorNumElements(); 4741 4742 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4743 return SDValue(); 4744 } 4745 4746 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4747 return (Index == 0) ? V.getOperand(0) 4748 : DAG.getUNDEF(VT.getVectorElementType()); 4749 4750 if (V.getOpcode() == ISD::BUILD_VECTOR) 4751 return V.getOperand(Index); 4752 4753 return SDValue(); 4754} 4755 4756/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4757/// shuffle operation which come from a consecutively from a zero. The 4758/// search can start in two different directions, from left or right. 4759/// We count undefs as zeros until PreferredNum is reached. 4760static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, 4761 unsigned NumElems, bool ZerosFromLeft, 4762 SelectionDAG &DAG, 4763 unsigned PreferredNum = -1U) { 4764 unsigned NumZeros = 0; 4765 for (unsigned i = 0; i != NumElems; ++i) { 4766 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1; 4767 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 4768 if (!Elt.getNode()) 4769 break; 4770 4771 if (X86::isZeroNode(Elt)) 4772 ++NumZeros; 4773 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum. 4774 NumZeros = std::min(NumZeros + 1, PreferredNum); 4775 else 4776 break; 4777 } 4778 4779 return NumZeros; 4780} 4781 4782/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 4783/// correspond consecutively to elements from one of the vector operands, 4784/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4785static 4786bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 4787 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 4788 unsigned NumElems, unsigned &OpNum) { 4789 bool SeenV1 = false; 4790 bool SeenV2 = false; 4791 4792 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 4793 int Idx = SVOp->getMaskElt(i); 4794 // Ignore undef indicies 4795 if (Idx < 0) 4796 continue; 4797 4798 if (Idx < (int)NumElems) 4799 SeenV1 = true; 4800 else 4801 SeenV2 = true; 4802 4803 // Only accept consecutive elements from the same vector 4804 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4805 return false; 4806 } 4807 4808 OpNum = SeenV1 ? 0 : 1; 4809 return true; 4810} 4811 4812/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4813/// logical left shift of a vector. 4814static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4815 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4816 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4817 unsigned NumZeros = getNumOfConsecutiveZeros( 4818 SVOp, NumElems, false /* check zeros from right */, DAG, 4819 SVOp->getMaskElt(0)); 4820 unsigned OpSrc; 4821 4822 if (!NumZeros) 4823 return false; 4824 4825 // Considering the elements in the mask that are not consecutive zeros, 4826 // check if they consecutively come from only one of the source vectors. 4827 // 4828 // V1 = {X, A, B, C} 0 4829 // \ \ \ / 4830 // vector_shuffle V1, V2 <1, 2, 3, X> 4831 // 4832 if (!isShuffleMaskConsecutive(SVOp, 4833 0, // Mask Start Index 4834 NumElems-NumZeros, // Mask End Index(exclusive) 4835 NumZeros, // Where to start looking in the src vector 4836 NumElems, // Number of elements in vector 4837 OpSrc)) // Which source operand ? 4838 return false; 4839 4840 isLeft = false; 4841 ShAmt = NumZeros; 4842 ShVal = SVOp->getOperand(OpSrc); 4843 return true; 4844} 4845 4846/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4847/// logical left shift of a vector. 4848static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4849 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4850 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4851 unsigned NumZeros = getNumOfConsecutiveZeros( 4852 SVOp, NumElems, true /* check zeros from left */, DAG, 4853 NumElems - SVOp->getMaskElt(NumElems - 1) - 1); 4854 unsigned OpSrc; 4855 4856 if (!NumZeros) 4857 return false; 4858 4859 // Considering the elements in the mask that are not consecutive zeros, 4860 // check if they consecutively come from only one of the source vectors. 4861 // 4862 // 0 { A, B, X, X } = V2 4863 // / \ / / 4864 // vector_shuffle V1, V2 <X, X, 4, 5> 4865 // 4866 if (!isShuffleMaskConsecutive(SVOp, 4867 NumZeros, // Mask Start Index 4868 NumElems, // Mask End Index(exclusive) 4869 0, // Where to start looking in the src vector 4870 NumElems, // Number of elements in vector 4871 OpSrc)) // Which source operand ? 4872 return false; 4873 4874 isLeft = true; 4875 ShAmt = NumZeros; 4876 ShVal = SVOp->getOperand(OpSrc); 4877 return true; 4878} 4879 4880/// isVectorShift - Returns true if the shuffle can be implemented as a 4881/// logical left or right shift of a vector. 4882static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4883 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4884 // Although the logic below support any bitwidth size, there are no 4885 // shift instructions which handle more than 128-bit vectors. 4886 if (!SVOp->getValueType(0).is128BitVector()) 4887 return false; 4888 4889 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4890 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4891 return true; 4892 4893 return false; 4894} 4895 4896/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4897/// 4898static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4899 unsigned NumNonZero, unsigned NumZero, 4900 SelectionDAG &DAG, 4901 const X86Subtarget* Subtarget, 4902 const TargetLowering &TLI) { 4903 if (NumNonZero > 8) 4904 return SDValue(); 4905 4906 DebugLoc dl = Op.getDebugLoc(); 4907 SDValue V(0, 0); 4908 bool First = true; 4909 for (unsigned i = 0; i < 16; ++i) { 4910 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4911 if (ThisIsNonZero && First) { 4912 if (NumZero) 4913 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4914 else 4915 V = DAG.getUNDEF(MVT::v8i16); 4916 First = false; 4917 } 4918 4919 if ((i & 1) != 0) { 4920 SDValue ThisElt(0, 0), LastElt(0, 0); 4921 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4922 if (LastIsNonZero) { 4923 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4924 MVT::i16, Op.getOperand(i-1)); 4925 } 4926 if (ThisIsNonZero) { 4927 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4928 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4929 ThisElt, DAG.getConstant(8, MVT::i8)); 4930 if (LastIsNonZero) 4931 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4932 } else 4933 ThisElt = LastElt; 4934 4935 if (ThisElt.getNode()) 4936 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4937 DAG.getIntPtrConstant(i/2)); 4938 } 4939 } 4940 4941 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4942} 4943 4944/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4945/// 4946static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4947 unsigned NumNonZero, unsigned NumZero, 4948 SelectionDAG &DAG, 4949 const X86Subtarget* Subtarget, 4950 const TargetLowering &TLI) { 4951 if (NumNonZero > 4) 4952 return SDValue(); 4953 4954 DebugLoc dl = Op.getDebugLoc(); 4955 SDValue V(0, 0); 4956 bool First = true; 4957 for (unsigned i = 0; i < 8; ++i) { 4958 bool isNonZero = (NonZeros & (1 << i)) != 0; 4959 if (isNonZero) { 4960 if (First) { 4961 if (NumZero) 4962 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4963 else 4964 V = DAG.getUNDEF(MVT::v8i16); 4965 First = false; 4966 } 4967 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4968 MVT::v8i16, V, Op.getOperand(i), 4969 DAG.getIntPtrConstant(i)); 4970 } 4971 } 4972 4973 return V; 4974} 4975 4976/// getVShift - Return a vector logical shift node. 4977/// 4978static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4979 unsigned NumBits, SelectionDAG &DAG, 4980 const TargetLowering &TLI, DebugLoc dl) { 4981 assert(VT.is128BitVector() && "Unknown type for VShift"); 4982 EVT ShVT = MVT::v2i64; 4983 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4984 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4985 return DAG.getNode(ISD::BITCAST, dl, VT, 4986 DAG.getNode(Opc, dl, ShVT, SrcOp, 4987 DAG.getConstant(NumBits, 4988 TLI.getScalarShiftAmountTy(SrcOp.getValueType())))); 4989} 4990 4991SDValue 4992X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4993 SelectionDAG &DAG) const { 4994 4995 // Check if the scalar load can be widened into a vector load. And if 4996 // the address is "base + cst" see if the cst can be "absorbed" into 4997 // the shuffle mask. 4998 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4999 SDValue Ptr = LD->getBasePtr(); 5000 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 5001 return SDValue(); 5002 EVT PVT = LD->getValueType(0); 5003 if (PVT != MVT::i32 && PVT != MVT::f32) 5004 return SDValue(); 5005 5006 int FI = -1; 5007 int64_t Offset = 0; 5008 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 5009 FI = FINode->getIndex(); 5010 Offset = 0; 5011 } else if (DAG.isBaseWithConstantOffset(Ptr) && 5012 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 5013 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 5014 Offset = Ptr.getConstantOperandVal(1); 5015 Ptr = Ptr.getOperand(0); 5016 } else { 5017 return SDValue(); 5018 } 5019 5020 // FIXME: 256-bit vector instructions don't require a strict alignment, 5021 // improve this code to support it better. 5022 unsigned RequiredAlign = VT.getSizeInBits()/8; 5023 SDValue Chain = LD->getChain(); 5024 // Make sure the stack object alignment is at least 16 or 32. 5025 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 5026 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 5027 if (MFI->isFixedObjectIndex(FI)) { 5028 // Can't change the alignment. FIXME: It's possible to compute 5029 // the exact stack offset and reference FI + adjust offset instead. 5030 // If someone *really* cares about this. That's the way to implement it. 5031 return SDValue(); 5032 } else { 5033 MFI->setObjectAlignment(FI, RequiredAlign); 5034 } 5035 } 5036 5037 // (Offset % 16 or 32) must be multiple of 4. Then address is then 5038 // Ptr + (Offset & ~15). 5039 if (Offset < 0) 5040 return SDValue(); 5041 if ((Offset % RequiredAlign) & 3) 5042 return SDValue(); 5043 int64_t StartOffset = Offset & ~(RequiredAlign-1); 5044 if (StartOffset) 5045 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 5046 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 5047 5048 int EltNo = (Offset - StartOffset) >> 2; 5049 unsigned NumElems = VT.getVectorNumElements(); 5050 5051 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 5052 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 5053 LD->getPointerInfo().getWithOffset(StartOffset), 5054 false, false, false, 0); 5055 5056 SmallVector<int, 8> Mask; 5057 for (unsigned i = 0; i != NumElems; ++i) 5058 Mask.push_back(EltNo); 5059 5060 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 5061 } 5062 5063 return SDValue(); 5064} 5065 5066/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 5067/// vector of type 'VT', see if the elements can be replaced by a single large 5068/// load which has the same value as a build_vector whose operands are 'elts'. 5069/// 5070/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 5071/// 5072/// FIXME: we'd also like to handle the case where the last elements are zero 5073/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 5074/// There's even a handy isZeroNode for that purpose. 5075static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 5076 DebugLoc &DL, SelectionDAG &DAG) { 5077 EVT EltVT = VT.getVectorElementType(); 5078 unsigned NumElems = Elts.size(); 5079 5080 LoadSDNode *LDBase = NULL; 5081 unsigned LastLoadedElt = -1U; 5082 5083 // For each element in the initializer, see if we've found a load or an undef. 5084 // If we don't find an initial load element, or later load elements are 5085 // non-consecutive, bail out. 5086 for (unsigned i = 0; i < NumElems; ++i) { 5087 SDValue Elt = Elts[i]; 5088 5089 if (!Elt.getNode() || 5090 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 5091 return SDValue(); 5092 if (!LDBase) { 5093 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 5094 return SDValue(); 5095 LDBase = cast<LoadSDNode>(Elt.getNode()); 5096 LastLoadedElt = i; 5097 continue; 5098 } 5099 if (Elt.getOpcode() == ISD::UNDEF) 5100 continue; 5101 5102 LoadSDNode *LD = cast<LoadSDNode>(Elt); 5103 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 5104 return SDValue(); 5105 LastLoadedElt = i; 5106 } 5107 5108 // If we have found an entire vector of loads and undefs, then return a large 5109 // load of the entire vector width starting at the base pointer. If we found 5110 // consecutive loads for the low half, generate a vzext_load node. 5111 if (LastLoadedElt == NumElems - 1) { 5112 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 5113 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5114 LDBase->getPointerInfo(), 5115 LDBase->isVolatile(), LDBase->isNonTemporal(), 5116 LDBase->isInvariant(), 0); 5117 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5118 LDBase->getPointerInfo(), 5119 LDBase->isVolatile(), LDBase->isNonTemporal(), 5120 LDBase->isInvariant(), LDBase->getAlignment()); 5121 } 5122 if (NumElems == 4 && LastLoadedElt == 1 && 5123 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 5124 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 5125 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 5126 SDValue ResNode = 5127 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 5128 array_lengthof(Ops), MVT::i64, 5129 LDBase->getPointerInfo(), 5130 LDBase->getAlignment(), 5131 false/*isVolatile*/, true/*ReadMem*/, 5132 false/*WriteMem*/); 5133 5134 // Make sure the newly-created LOAD is in the same position as LDBase in 5135 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and 5136 // update uses of LDBase's output chain to use the TokenFactor. 5137 if (LDBase->hasAnyUseOfValue(1)) { 5138 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5139 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); 5140 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5141 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5142 SDValue(ResNode.getNode(), 1)); 5143 } 5144 5145 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 5146 } 5147 return SDValue(); 5148} 5149 5150/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 5151/// to generate a splat value for the following cases: 5152/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 5153/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 5154/// a scalar load, or a constant. 5155/// The VBROADCAST node is returned when a pattern is found, 5156/// or SDValue() otherwise. 5157SDValue 5158X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { 5159 if (!Subtarget->hasFp256()) 5160 return SDValue(); 5161 5162 MVT VT = Op.getValueType().getSimpleVT(); 5163 DebugLoc dl = Op.getDebugLoc(); 5164 5165 assert((VT.is128BitVector() || VT.is256BitVector()) && 5166 "Unsupported vector type for broadcast."); 5167 5168 SDValue Ld; 5169 bool ConstSplatVal; 5170 5171 switch (Op.getOpcode()) { 5172 default: 5173 // Unknown pattern found. 5174 return SDValue(); 5175 5176 case ISD::BUILD_VECTOR: { 5177 // The BUILD_VECTOR node must be a splat. 5178 if (!isSplatVector(Op.getNode())) 5179 return SDValue(); 5180 5181 Ld = Op.getOperand(0); 5182 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5183 Ld.getOpcode() == ISD::ConstantFP); 5184 5185 // The suspected load node has several users. Make sure that all 5186 // of its users are from the BUILD_VECTOR node. 5187 // Constants may have multiple users. 5188 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5189 return SDValue(); 5190 break; 5191 } 5192 5193 case ISD::VECTOR_SHUFFLE: { 5194 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5195 5196 // Shuffles must have a splat mask where the first element is 5197 // broadcasted. 5198 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5199 return SDValue(); 5200 5201 SDValue Sc = Op.getOperand(0); 5202 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5203 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5204 5205 if (!Subtarget->hasInt256()) 5206 return SDValue(); 5207 5208 // Use the register form of the broadcast instruction available on AVX2. 5209 if (VT.is256BitVector()) 5210 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5211 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5212 } 5213 5214 Ld = Sc.getOperand(0); 5215 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5216 Ld.getOpcode() == ISD::ConstantFP); 5217 5218 // The scalar_to_vector node and the suspected 5219 // load node must have exactly one user. 5220 // Constants may have multiple users. 5221 if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) 5222 return SDValue(); 5223 break; 5224 } 5225 } 5226 5227 bool Is256 = VT.is256BitVector(); 5228 5229 // Handle the broadcasting a single constant scalar from the constant pool 5230 // into a vector. On Sandybridge it is still better to load a constant vector 5231 // from the constant pool and not to broadcast it from a scalar. 5232 if (ConstSplatVal && Subtarget->hasInt256()) { 5233 EVT CVT = Ld.getValueType(); 5234 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5235 unsigned ScalarSize = CVT.getSizeInBits(); 5236 5237 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { 5238 const Constant *C = 0; 5239 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5240 C = CI->getConstantIntValue(); 5241 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5242 C = CF->getConstantFPValue(); 5243 5244 assert(C && "Invalid constant type"); 5245 5246 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 5247 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5248 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5249 MachinePointerInfo::getConstantPool(), 5250 false, false, false, Alignment); 5251 5252 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5253 } 5254 } 5255 5256 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5257 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5258 5259 // Handle AVX2 in-register broadcasts. 5260 if (!IsLoad && Subtarget->hasInt256() && 5261 (ScalarSize == 32 || (Is256 && ScalarSize == 64))) 5262 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5263 5264 // The scalar source must be a normal load. 5265 if (!IsLoad) 5266 return SDValue(); 5267 5268 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) 5269 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5270 5271 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5272 // double since there is no vbroadcastsd xmm 5273 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) { 5274 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5275 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5276 } 5277 5278 // Unsupported broadcast. 5279 return SDValue(); 5280} 5281 5282SDValue 5283X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { 5284 EVT VT = Op.getValueType(); 5285 5286 // Skip if insert_vec_elt is not supported. 5287 if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) 5288 return SDValue(); 5289 5290 DebugLoc DL = Op.getDebugLoc(); 5291 unsigned NumElems = Op.getNumOperands(); 5292 5293 SDValue VecIn1; 5294 SDValue VecIn2; 5295 SmallVector<unsigned, 4> InsertIndices; 5296 SmallVector<int, 8> Mask(NumElems, -1); 5297 5298 for (unsigned i = 0; i != NumElems; ++i) { 5299 unsigned Opc = Op.getOperand(i).getOpcode(); 5300 5301 if (Opc == ISD::UNDEF) 5302 continue; 5303 5304 if (Opc != ISD::EXTRACT_VECTOR_ELT) { 5305 // Quit if more than 1 elements need inserting. 5306 if (InsertIndices.size() > 1) 5307 return SDValue(); 5308 5309 InsertIndices.push_back(i); 5310 continue; 5311 } 5312 5313 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0); 5314 SDValue ExtIdx = Op.getOperand(i).getOperand(1); 5315 5316 // Quit if extracted from vector of different type. 5317 if (ExtractedFromVec.getValueType() != VT) 5318 return SDValue(); 5319 5320 // Quit if non-constant index. 5321 if (!isa<ConstantSDNode>(ExtIdx)) 5322 return SDValue(); 5323 5324 if (VecIn1.getNode() == 0) 5325 VecIn1 = ExtractedFromVec; 5326 else if (VecIn1 != ExtractedFromVec) { 5327 if (VecIn2.getNode() == 0) 5328 VecIn2 = ExtractedFromVec; 5329 else if (VecIn2 != ExtractedFromVec) 5330 // Quit if more than 2 vectors to shuffle 5331 return SDValue(); 5332 } 5333 5334 unsigned Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue(); 5335 5336 if (ExtractedFromVec == VecIn1) 5337 Mask[i] = Idx; 5338 else if (ExtractedFromVec == VecIn2) 5339 Mask[i] = Idx + NumElems; 5340 } 5341 5342 if (VecIn1.getNode() == 0) 5343 return SDValue(); 5344 5345 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 5346 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]); 5347 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) { 5348 unsigned Idx = InsertIndices[i]; 5349 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), 5350 DAG.getIntPtrConstant(Idx)); 5351 } 5352 5353 return NV; 5354} 5355 5356SDValue 5357X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5358 DebugLoc dl = Op.getDebugLoc(); 5359 5360 MVT VT = Op.getValueType().getSimpleVT(); 5361 MVT ExtVT = VT.getVectorElementType(); 5362 unsigned NumElems = Op.getNumOperands(); 5363 5364 // Vectors containing all zeros can be matched by pxor and xorps later 5365 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5366 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5367 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5368 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5369 return Op; 5370 5371 return getZeroVector(VT, Subtarget, DAG, dl); 5372 } 5373 5374 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5375 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5376 // vpcmpeqd on 256-bit vectors. 5377 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) { 5378 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256())) 5379 return Op; 5380 5381 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl); 5382 } 5383 5384 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 5385 if (Broadcast.getNode()) 5386 return Broadcast; 5387 5388 unsigned EVTBits = ExtVT.getSizeInBits(); 5389 5390 unsigned NumZero = 0; 5391 unsigned NumNonZero = 0; 5392 unsigned NonZeros = 0; 5393 bool IsAllConstants = true; 5394 SmallSet<SDValue, 8> Values; 5395 for (unsigned i = 0; i < NumElems; ++i) { 5396 SDValue Elt = Op.getOperand(i); 5397 if (Elt.getOpcode() == ISD::UNDEF) 5398 continue; 5399 Values.insert(Elt); 5400 if (Elt.getOpcode() != ISD::Constant && 5401 Elt.getOpcode() != ISD::ConstantFP) 5402 IsAllConstants = false; 5403 if (X86::isZeroNode(Elt)) 5404 NumZero++; 5405 else { 5406 NonZeros |= (1 << i); 5407 NumNonZero++; 5408 } 5409 } 5410 5411 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5412 if (NumNonZero == 0) 5413 return DAG.getUNDEF(VT); 5414 5415 // Special case for single non-zero, non-undef, element. 5416 if (NumNonZero == 1) { 5417 unsigned Idx = CountTrailingZeros_32(NonZeros); 5418 SDValue Item = Op.getOperand(Idx); 5419 5420 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5421 // the value are obviously zero, truncate the value to i32 and do the 5422 // insertion that way. Only do this if the value is non-constant or if the 5423 // value is a constant being inserted into element 0. It is cheaper to do 5424 // a constant pool load than it is to do a movd + shuffle. 5425 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5426 (!IsAllConstants || Idx == 0)) { 5427 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5428 // Handle SSE only. 5429 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5430 EVT VecVT = MVT::v4i32; 5431 unsigned VecElts = 4; 5432 5433 // Truncate the value (which may itself be a constant) to i32, and 5434 // convert it to a vector with movd (S2V+shuffle to zero extend). 5435 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5436 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5437 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5438 5439 // Now we have our 32-bit value zero extended in the low element of 5440 // a vector. If Idx != 0, swizzle it into place. 5441 if (Idx != 0) { 5442 SmallVector<int, 4> Mask; 5443 Mask.push_back(Idx); 5444 for (unsigned i = 1; i != VecElts; ++i) 5445 Mask.push_back(i); 5446 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5447 &Mask[0]); 5448 } 5449 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5450 } 5451 } 5452 5453 // If we have a constant or non-constant insertion into the low element of 5454 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5455 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5456 // depending on what the source datatype is. 5457 if (Idx == 0) { 5458 if (NumZero == 0) 5459 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5460 5461 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5462 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5463 if (VT.is256BitVector()) { 5464 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5465 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5466 Item, DAG.getIntPtrConstant(0)); 5467 } 5468 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5469 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5470 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5471 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5472 } 5473 5474 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5475 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5476 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5477 if (VT.is256BitVector()) { 5478 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5479 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5480 } else { 5481 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5482 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5483 } 5484 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5485 } 5486 } 5487 5488 // Is it a vector logical left shift? 5489 if (NumElems == 2 && Idx == 1 && 5490 X86::isZeroNode(Op.getOperand(0)) && 5491 !X86::isZeroNode(Op.getOperand(1))) { 5492 unsigned NumBits = VT.getSizeInBits(); 5493 return getVShift(true, VT, 5494 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5495 VT, Op.getOperand(1)), 5496 NumBits/2, DAG, *this, dl); 5497 } 5498 5499 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5500 return SDValue(); 5501 5502 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5503 // is a non-constant being inserted into an element other than the low one, 5504 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5505 // movd/movss) to move this into the low element, then shuffle it into 5506 // place. 5507 if (EVTBits == 32) { 5508 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5509 5510 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5511 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5512 SmallVector<int, 8> MaskVec; 5513 for (unsigned i = 0; i != NumElems; ++i) 5514 MaskVec.push_back(i == Idx ? 0 : 1); 5515 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5516 } 5517 } 5518 5519 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5520 if (Values.size() == 1) { 5521 if (EVTBits == 32) { 5522 // Instead of a shuffle like this: 5523 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5524 // Check if it's possible to issue this instead. 5525 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5526 unsigned Idx = CountTrailingZeros_32(NonZeros); 5527 SDValue Item = Op.getOperand(Idx); 5528 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5529 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5530 } 5531 return SDValue(); 5532 } 5533 5534 // A vector full of immediates; various special cases are already 5535 // handled, so this is best done with a single constant-pool load. 5536 if (IsAllConstants) 5537 return SDValue(); 5538 5539 // For AVX-length vectors, build the individual 128-bit pieces and use 5540 // shuffles to put them in place. 5541 if (VT.is256BitVector()) { 5542 SmallVector<SDValue, 32> V; 5543 for (unsigned i = 0; i != NumElems; ++i) 5544 V.push_back(Op.getOperand(i)); 5545 5546 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5547 5548 // Build both the lower and upper subvector. 5549 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5550 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5551 NumElems/2); 5552 5553 // Recreate the wider vector with the lower and upper part. 5554 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5555 } 5556 5557 // Let legalizer expand 2-wide build_vectors. 5558 if (EVTBits == 64) { 5559 if (NumNonZero == 1) { 5560 // One half is zero or undef. 5561 unsigned Idx = CountTrailingZeros_32(NonZeros); 5562 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5563 Op.getOperand(Idx)); 5564 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5565 } 5566 return SDValue(); 5567 } 5568 5569 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5570 if (EVTBits == 8 && NumElems == 16) { 5571 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5572 Subtarget, *this); 5573 if (V.getNode()) return V; 5574 } 5575 5576 if (EVTBits == 16 && NumElems == 8) { 5577 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5578 Subtarget, *this); 5579 if (V.getNode()) return V; 5580 } 5581 5582 // If element VT is == 32 bits, turn it into a number of shuffles. 5583 SmallVector<SDValue, 8> V(NumElems); 5584 if (NumElems == 4 && NumZero > 0) { 5585 for (unsigned i = 0; i < 4; ++i) { 5586 bool isZero = !(NonZeros & (1 << i)); 5587 if (isZero) 5588 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 5589 else 5590 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5591 } 5592 5593 for (unsigned i = 0; i < 2; ++i) { 5594 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5595 default: break; 5596 case 0: 5597 V[i] = V[i*2]; // Must be a zero vector. 5598 break; 5599 case 1: 5600 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5601 break; 5602 case 2: 5603 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5604 break; 5605 case 3: 5606 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5607 break; 5608 } 5609 } 5610 5611 bool Reverse1 = (NonZeros & 0x3) == 2; 5612 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5613 int MaskVec[] = { 5614 Reverse1 ? 1 : 0, 5615 Reverse1 ? 0 : 1, 5616 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 5617 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 5618 }; 5619 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5620 } 5621 5622 if (Values.size() > 1 && VT.is128BitVector()) { 5623 // Check for a build vector of consecutive loads. 5624 for (unsigned i = 0; i < NumElems; ++i) 5625 V[i] = Op.getOperand(i); 5626 5627 // Check for elements which are consecutive loads. 5628 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5629 if (LD.getNode()) 5630 return LD; 5631 5632 // Check for a build vector from mostly shuffle plus few inserting. 5633 SDValue Sh = buildFromShuffleMostly(Op, DAG); 5634 if (Sh.getNode()) 5635 return Sh; 5636 5637 // For SSE 4.1, use insertps to put the high elements into the low element. 5638 if (getSubtarget()->hasSSE41()) { 5639 SDValue Result; 5640 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5641 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5642 else 5643 Result = DAG.getUNDEF(VT); 5644 5645 for (unsigned i = 1; i < NumElems; ++i) { 5646 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5647 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5648 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5649 } 5650 return Result; 5651 } 5652 5653 // Otherwise, expand into a number of unpckl*, start by extending each of 5654 // our (non-undef) elements to the full vector width with the element in the 5655 // bottom slot of the vector (which generates no code for SSE). 5656 for (unsigned i = 0; i < NumElems; ++i) { 5657 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5658 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5659 else 5660 V[i] = DAG.getUNDEF(VT); 5661 } 5662 5663 // Next, we iteratively mix elements, e.g. for v4f32: 5664 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5665 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5666 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5667 unsigned EltStride = NumElems >> 1; 5668 while (EltStride != 0) { 5669 for (unsigned i = 0; i < EltStride; ++i) { 5670 // If V[i+EltStride] is undef and this is the first round of mixing, 5671 // then it is safe to just drop this shuffle: V[i] is already in the 5672 // right place, the one element (since it's the first round) being 5673 // inserted as undef can be dropped. This isn't safe for successive 5674 // rounds because they will permute elements within both vectors. 5675 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5676 EltStride == NumElems/2) 5677 continue; 5678 5679 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5680 } 5681 EltStride >>= 1; 5682 } 5683 return V[0]; 5684 } 5685 return SDValue(); 5686} 5687 5688// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5689// to create 256-bit vectors from two other 128-bit ones. 5690static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5691 DebugLoc dl = Op.getDebugLoc(); 5692 MVT ResVT = Op.getValueType().getSimpleVT(); 5693 5694 assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); 5695 5696 SDValue V1 = Op.getOperand(0); 5697 SDValue V2 = Op.getOperand(1); 5698 unsigned NumElems = ResVT.getVectorNumElements(); 5699 5700 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 5701} 5702 5703static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5704 assert(Op.getNumOperands() == 2); 5705 5706 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5707 // from two other 128-bit ones. 5708 return LowerAVXCONCAT_VECTORS(Op, DAG); 5709} 5710 5711// Try to lower a shuffle node into a simple blend instruction. 5712static SDValue 5713LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 5714 const X86Subtarget *Subtarget, SelectionDAG &DAG) { 5715 SDValue V1 = SVOp->getOperand(0); 5716 SDValue V2 = SVOp->getOperand(1); 5717 DebugLoc dl = SVOp->getDebugLoc(); 5718 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5719 MVT EltVT = VT.getVectorElementType(); 5720 unsigned NumElems = VT.getVectorNumElements(); 5721 5722 if (!Subtarget->hasSSE41() || EltVT == MVT::i8) 5723 return SDValue(); 5724 if (!Subtarget->hasInt256() && VT == MVT::v16i16) 5725 return SDValue(); 5726 5727 // Check the mask for BLEND and build the value. 5728 unsigned MaskValue = 0; 5729 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise. 5730 unsigned NumLanes = (NumElems-1)/8 + 1; 5731 unsigned NumElemsInLane = NumElems / NumLanes; 5732 5733 // Blend for v16i16 should be symetric for the both lanes. 5734 for (unsigned i = 0; i < NumElemsInLane; ++i) { 5735 5736 int SndLaneEltIdx = (NumLanes == 2) ? 5737 SVOp->getMaskElt(i + NumElemsInLane) : -1; 5738 int EltIdx = SVOp->getMaskElt(i); 5739 5740 if ((EltIdx < 0 || EltIdx == (int)i) && 5741 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane))) 5742 continue; 5743 5744 if (((unsigned)EltIdx == (i + NumElems)) && 5745 (SndLaneEltIdx < 0 || 5746 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane)) 5747 MaskValue |= (1<<i); 5748 else 5749 return SDValue(); 5750 } 5751 5752 // Convert i32 vectors to floating point if it is not AVX2. 5753 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors. 5754 MVT BlendVT = VT; 5755 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) { 5756 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()), 5757 NumElems); 5758 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1); 5759 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2); 5760 } 5761 5762 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2, 5763 DAG.getConstant(MaskValue, MVT::i32)); 5764 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 5765} 5766 5767// v8i16 shuffles - Prefer shuffles in the following order: 5768// 1. [all] pshuflw, pshufhw, optional move 5769// 2. [ssse3] 1 x pshufb 5770// 3. [ssse3] 2 x pshufb + 1 x por 5771// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5772static SDValue 5773LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, 5774 SelectionDAG &DAG) { 5775 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5776 SDValue V1 = SVOp->getOperand(0); 5777 SDValue V2 = SVOp->getOperand(1); 5778 DebugLoc dl = SVOp->getDebugLoc(); 5779 SmallVector<int, 8> MaskVals; 5780 5781 // Determine if more than 1 of the words in each of the low and high quadwords 5782 // of the result come from the same quadword of one of the two inputs. Undef 5783 // mask values count as coming from any quadword, for better codegen. 5784 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5785 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5786 std::bitset<4> InputQuads; 5787 for (unsigned i = 0; i < 8; ++i) { 5788 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5789 int EltIdx = SVOp->getMaskElt(i); 5790 MaskVals.push_back(EltIdx); 5791 if (EltIdx < 0) { 5792 ++Quad[0]; 5793 ++Quad[1]; 5794 ++Quad[2]; 5795 ++Quad[3]; 5796 continue; 5797 } 5798 ++Quad[EltIdx / 4]; 5799 InputQuads.set(EltIdx / 4); 5800 } 5801 5802 int BestLoQuad = -1; 5803 unsigned MaxQuad = 1; 5804 for (unsigned i = 0; i < 4; ++i) { 5805 if (LoQuad[i] > MaxQuad) { 5806 BestLoQuad = i; 5807 MaxQuad = LoQuad[i]; 5808 } 5809 } 5810 5811 int BestHiQuad = -1; 5812 MaxQuad = 1; 5813 for (unsigned i = 0; i < 4; ++i) { 5814 if (HiQuad[i] > MaxQuad) { 5815 BestHiQuad = i; 5816 MaxQuad = HiQuad[i]; 5817 } 5818 } 5819 5820 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5821 // of the two input vectors, shuffle them into one input vector so only a 5822 // single pshufb instruction is necessary. If There are more than 2 input 5823 // quads, disable the next transformation since it does not help SSSE3. 5824 bool V1Used = InputQuads[0] || InputQuads[1]; 5825 bool V2Used = InputQuads[2] || InputQuads[3]; 5826 if (Subtarget->hasSSSE3()) { 5827 if (InputQuads.count() == 2 && V1Used && V2Used) { 5828 BestLoQuad = InputQuads[0] ? 0 : 1; 5829 BestHiQuad = InputQuads[2] ? 2 : 3; 5830 } 5831 if (InputQuads.count() > 2) { 5832 BestLoQuad = -1; 5833 BestHiQuad = -1; 5834 } 5835 } 5836 5837 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5838 // the shuffle mask. If a quad is scored as -1, that means that it contains 5839 // words from all 4 input quadwords. 5840 SDValue NewV; 5841 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5842 int MaskV[] = { 5843 BestLoQuad < 0 ? 0 : BestLoQuad, 5844 BestHiQuad < 0 ? 1 : BestHiQuad 5845 }; 5846 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5847 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5848 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5849 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5850 5851 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5852 // source words for the shuffle, to aid later transformations. 5853 bool AllWordsInNewV = true; 5854 bool InOrder[2] = { true, true }; 5855 for (unsigned i = 0; i != 8; ++i) { 5856 int idx = MaskVals[i]; 5857 if (idx != (int)i) 5858 InOrder[i/4] = false; 5859 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5860 continue; 5861 AllWordsInNewV = false; 5862 break; 5863 } 5864 5865 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5866 if (AllWordsInNewV) { 5867 for (int i = 0; i != 8; ++i) { 5868 int idx = MaskVals[i]; 5869 if (idx < 0) 5870 continue; 5871 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5872 if ((idx != i) && idx < 4) 5873 pshufhw = false; 5874 if ((idx != i) && idx > 3) 5875 pshuflw = false; 5876 } 5877 V1 = NewV; 5878 V2Used = false; 5879 BestLoQuad = 0; 5880 BestHiQuad = 1; 5881 } 5882 5883 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5884 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5885 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5886 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5887 unsigned TargetMask = 0; 5888 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5889 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5890 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5891 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 5892 getShufflePSHUFLWImmediate(SVOp); 5893 V1 = NewV.getOperand(0); 5894 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5895 } 5896 } 5897 5898 // Promote splats to a larger type which usually leads to more efficient code. 5899 // FIXME: Is this true if pshufb is available? 5900 if (SVOp->isSplat()) 5901 return PromoteSplat(SVOp, DAG); 5902 5903 // If we have SSSE3, and all words of the result are from 1 input vector, 5904 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5905 // is present, fall back to case 4. 5906 if (Subtarget->hasSSSE3()) { 5907 SmallVector<SDValue,16> pshufbMask; 5908 5909 // If we have elements from both input vectors, set the high bit of the 5910 // shuffle mask element to zero out elements that come from V2 in the V1 5911 // mask, and elements that come from V1 in the V2 mask, so that the two 5912 // results can be OR'd together. 5913 bool TwoInputs = V1Used && V2Used; 5914 for (unsigned i = 0; i != 8; ++i) { 5915 int EltIdx = MaskVals[i] * 2; 5916 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 5917 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 5918 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5919 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5920 } 5921 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5922 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5923 DAG.getNode(ISD::BUILD_VECTOR, dl, 5924 MVT::v16i8, &pshufbMask[0], 16)); 5925 if (!TwoInputs) 5926 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5927 5928 // Calculate the shuffle mask for the second input, shuffle it, and 5929 // OR it with the first shuffled input. 5930 pshufbMask.clear(); 5931 for (unsigned i = 0; i != 8; ++i) { 5932 int EltIdx = MaskVals[i] * 2; 5933 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5934 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; 5935 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5936 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5937 } 5938 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5939 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5940 DAG.getNode(ISD::BUILD_VECTOR, dl, 5941 MVT::v16i8, &pshufbMask[0], 16)); 5942 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5943 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5944 } 5945 5946 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5947 // and update MaskVals with new element order. 5948 std::bitset<8> InOrder; 5949 if (BestLoQuad >= 0) { 5950 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 5951 for (int i = 0; i != 4; ++i) { 5952 int idx = MaskVals[i]; 5953 if (idx < 0) { 5954 InOrder.set(i); 5955 } else if ((idx / 4) == BestLoQuad) { 5956 MaskV[i] = idx & 3; 5957 InOrder.set(i); 5958 } 5959 } 5960 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5961 &MaskV[0]); 5962 5963 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5964 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5965 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5966 NewV.getOperand(0), 5967 getShufflePSHUFLWImmediate(SVOp), DAG); 5968 } 5969 } 5970 5971 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5972 // and update MaskVals with the new element order. 5973 if (BestHiQuad >= 0) { 5974 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 5975 for (unsigned i = 4; i != 8; ++i) { 5976 int idx = MaskVals[i]; 5977 if (idx < 0) { 5978 InOrder.set(i); 5979 } else if ((idx / 4) == BestHiQuad) { 5980 MaskV[i] = (idx & 3) + 4; 5981 InOrder.set(i); 5982 } 5983 } 5984 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5985 &MaskV[0]); 5986 5987 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5988 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5989 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5990 NewV.getOperand(0), 5991 getShufflePSHUFHWImmediate(SVOp), DAG); 5992 } 5993 } 5994 5995 // In case BestHi & BestLo were both -1, which means each quadword has a word 5996 // from each of the four input quadwords, calculate the InOrder bitvector now 5997 // before falling through to the insert/extract cleanup. 5998 if (BestLoQuad == -1 && BestHiQuad == -1) { 5999 NewV = V1; 6000 for (int i = 0; i != 8; ++i) 6001 if (MaskVals[i] < 0 || MaskVals[i] == i) 6002 InOrder.set(i); 6003 } 6004 6005 // The other elements are put in the right place using pextrw and pinsrw. 6006 for (unsigned i = 0; i != 8; ++i) { 6007 if (InOrder[i]) 6008 continue; 6009 int EltIdx = MaskVals[i]; 6010 if (EltIdx < 0) 6011 continue; 6012 SDValue ExtOp = (EltIdx < 8) ? 6013 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 6014 DAG.getIntPtrConstant(EltIdx)) : 6015 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 6016 DAG.getIntPtrConstant(EltIdx - 8)); 6017 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 6018 DAG.getIntPtrConstant(i)); 6019 } 6020 return NewV; 6021} 6022 6023// v16i8 shuffles - Prefer shuffles in the following order: 6024// 1. [ssse3] 1 x pshufb 6025// 2. [ssse3] 2 x pshufb + 1 x por 6026// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 6027static 6028SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 6029 SelectionDAG &DAG, 6030 const X86TargetLowering &TLI) { 6031 SDValue V1 = SVOp->getOperand(0); 6032 SDValue V2 = SVOp->getOperand(1); 6033 DebugLoc dl = SVOp->getDebugLoc(); 6034 ArrayRef<int> MaskVals = SVOp->getMask(); 6035 6036 // Promote splats to a larger type which usually leads to more efficient code. 6037 // FIXME: Is this true if pshufb is available? 6038 if (SVOp->isSplat()) 6039 return PromoteSplat(SVOp, DAG); 6040 6041 // If we have SSSE3, case 1 is generated when all result bytes come from 6042 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 6043 // present, fall back to case 3. 6044 6045 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 6046 if (TLI.getSubtarget()->hasSSSE3()) { 6047 SmallVector<SDValue,16> pshufbMask; 6048 6049 // If all result elements are from one input vector, then only translate 6050 // undef mask values to 0x80 (zero out result) in the pshufb mask. 6051 // 6052 // Otherwise, we have elements from both input vectors, and must zero out 6053 // elements that come from V2 in the first mask, and V1 in the second mask 6054 // so that we can OR them together. 6055 for (unsigned i = 0; i != 16; ++i) { 6056 int EltIdx = MaskVals[i]; 6057 if (EltIdx < 0 || EltIdx >= 16) 6058 EltIdx = 0x80; 6059 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6060 } 6061 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 6062 DAG.getNode(ISD::BUILD_VECTOR, dl, 6063 MVT::v16i8, &pshufbMask[0], 16)); 6064 6065 // As PSHUFB will zero elements with negative indices, it's safe to ignore 6066 // the 2nd operand if it's undefined or zero. 6067 if (V2.getOpcode() == ISD::UNDEF || 6068 ISD::isBuildVectorAllZeros(V2.getNode())) 6069 return V1; 6070 6071 // Calculate the shuffle mask for the second input, shuffle it, and 6072 // OR it with the first shuffled input. 6073 pshufbMask.clear(); 6074 for (unsigned i = 0; i != 16; ++i) { 6075 int EltIdx = MaskVals[i]; 6076 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; 6077 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6078 } 6079 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 6080 DAG.getNode(ISD::BUILD_VECTOR, dl, 6081 MVT::v16i8, &pshufbMask[0], 16)); 6082 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 6083 } 6084 6085 // No SSSE3 - Calculate in place words and then fix all out of place words 6086 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 6087 // the 16 different words that comprise the two doublequadword input vectors. 6088 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 6089 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 6090 SDValue NewV = V1; 6091 for (int i = 0; i != 8; ++i) { 6092 int Elt0 = MaskVals[i*2]; 6093 int Elt1 = MaskVals[i*2+1]; 6094 6095 // This word of the result is all undef, skip it. 6096 if (Elt0 < 0 && Elt1 < 0) 6097 continue; 6098 6099 // This word of the result is already in the correct place, skip it. 6100 if ((Elt0 == i*2) && (Elt1 == i*2+1)) 6101 continue; 6102 6103 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 6104 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 6105 SDValue InsElt; 6106 6107 // If Elt0 and Elt1 are defined, are consecutive, and can be load 6108 // using a single extract together, load it and store it. 6109 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 6110 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6111 DAG.getIntPtrConstant(Elt1 / 2)); 6112 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6113 DAG.getIntPtrConstant(i)); 6114 continue; 6115 } 6116 6117 // If Elt1 is defined, extract it from the appropriate source. If the 6118 // source byte is not also odd, shift the extracted word left 8 bits 6119 // otherwise clear the bottom 8 bits if we need to do an or. 6120 if (Elt1 >= 0) { 6121 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6122 DAG.getIntPtrConstant(Elt1 / 2)); 6123 if ((Elt1 & 1) == 0) 6124 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 6125 DAG.getConstant(8, 6126 TLI.getShiftAmountTy(InsElt.getValueType()))); 6127 else if (Elt0 >= 0) 6128 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 6129 DAG.getConstant(0xFF00, MVT::i16)); 6130 } 6131 // If Elt0 is defined, extract it from the appropriate source. If the 6132 // source byte is not also even, shift the extracted word right 8 bits. If 6133 // Elt1 was also defined, OR the extracted values together before 6134 // inserting them in the result. 6135 if (Elt0 >= 0) { 6136 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 6137 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 6138 if ((Elt0 & 1) != 0) 6139 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 6140 DAG.getConstant(8, 6141 TLI.getShiftAmountTy(InsElt0.getValueType()))); 6142 else if (Elt1 >= 0) 6143 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 6144 DAG.getConstant(0x00FF, MVT::i16)); 6145 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 6146 : InsElt0; 6147 } 6148 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6149 DAG.getIntPtrConstant(i)); 6150 } 6151 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 6152} 6153 6154// v32i8 shuffles - Translate to VPSHUFB if possible. 6155static 6156SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, 6157 const X86Subtarget *Subtarget, 6158 SelectionDAG &DAG) { 6159 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6160 SDValue V1 = SVOp->getOperand(0); 6161 SDValue V2 = SVOp->getOperand(1); 6162 DebugLoc dl = SVOp->getDebugLoc(); 6163 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); 6164 6165 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6166 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode()); 6167 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode()); 6168 6169 // VPSHUFB may be generated if 6170 // (1) one of input vector is undefined or zeroinitializer. 6171 // The mask value 0x80 puts 0 in the corresponding slot of the vector. 6172 // And (2) the mask indexes don't cross the 128-bit lane. 6173 if (VT != MVT::v32i8 || !Subtarget->hasInt256() || 6174 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero)) 6175 return SDValue(); 6176 6177 if (V1IsAllZero && !V2IsAllZero) { 6178 CommuteVectorShuffleMask(MaskVals, 32); 6179 V1 = V2; 6180 } 6181 SmallVector<SDValue, 32> pshufbMask; 6182 for (unsigned i = 0; i != 32; i++) { 6183 int EltIdx = MaskVals[i]; 6184 if (EltIdx < 0 || EltIdx >= 32) 6185 EltIdx = 0x80; 6186 else { 6187 if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16)) 6188 // Cross lane is not allowed. 6189 return SDValue(); 6190 EltIdx &= 0xf; 6191 } 6192 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6193 } 6194 return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1, 6195 DAG.getNode(ISD::BUILD_VECTOR, dl, 6196 MVT::v32i8, &pshufbMask[0], 32)); 6197} 6198 6199/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 6200/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 6201/// done when every pair / quad of shuffle mask elements point to elements in 6202/// the right sequence. e.g. 6203/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 6204static 6205SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 6206 SelectionDAG &DAG) { 6207 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6208 DebugLoc dl = SVOp->getDebugLoc(); 6209 unsigned NumElems = VT.getVectorNumElements(); 6210 MVT NewVT; 6211 unsigned Scale; 6212 switch (VT.SimpleTy) { 6213 default: llvm_unreachable("Unexpected!"); 6214 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; 6215 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; 6216 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; 6217 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; 6218 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; 6219 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; 6220 } 6221 6222 SmallVector<int, 8> MaskVec; 6223 for (unsigned i = 0; i != NumElems; i += Scale) { 6224 int StartIdx = -1; 6225 for (unsigned j = 0; j != Scale; ++j) { 6226 int EltIdx = SVOp->getMaskElt(i+j); 6227 if (EltIdx < 0) 6228 continue; 6229 if (StartIdx < 0) 6230 StartIdx = (EltIdx / Scale); 6231 if (EltIdx != (int)(StartIdx*Scale + j)) 6232 return SDValue(); 6233 } 6234 MaskVec.push_back(StartIdx); 6235 } 6236 6237 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); 6238 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); 6239 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 6240} 6241 6242/// getVZextMovL - Return a zero-extending vector move low node. 6243/// 6244static SDValue getVZextMovL(MVT VT, EVT OpVT, 6245 SDValue SrcOp, SelectionDAG &DAG, 6246 const X86Subtarget *Subtarget, DebugLoc dl) { 6247 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 6248 LoadSDNode *LD = NULL; 6249 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 6250 LD = dyn_cast<LoadSDNode>(SrcOp); 6251 if (!LD) { 6252 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 6253 // instead. 6254 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 6255 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 6256 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 6257 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 6258 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 6259 // PR2108 6260 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 6261 return DAG.getNode(ISD::BITCAST, dl, VT, 6262 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6263 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6264 OpVT, 6265 SrcOp.getOperand(0) 6266 .getOperand(0)))); 6267 } 6268 } 6269 } 6270 6271 return DAG.getNode(ISD::BITCAST, dl, VT, 6272 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6273 DAG.getNode(ISD::BITCAST, dl, 6274 OpVT, SrcOp))); 6275} 6276 6277/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 6278/// which could not be matched by any known target speficic shuffle 6279static SDValue 6280LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6281 6282 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); 6283 if (NewOp.getNode()) 6284 return NewOp; 6285 6286 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6287 6288 unsigned NumElems = VT.getVectorNumElements(); 6289 unsigned NumLaneElems = NumElems / 2; 6290 6291 DebugLoc dl = SVOp->getDebugLoc(); 6292 MVT EltVT = VT.getVectorElementType(); 6293 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); 6294 SDValue Output[2]; 6295 6296 SmallVector<int, 16> Mask; 6297 for (unsigned l = 0; l < 2; ++l) { 6298 // Build a shuffle mask for the output, discovering on the fly which 6299 // input vectors to use as shuffle operands (recorded in InputUsed). 6300 // If building a suitable shuffle vector proves too hard, then bail 6301 // out with UseBuildVector set. 6302 bool UseBuildVector = false; 6303 int InputUsed[2] = { -1, -1 }; // Not yet discovered. 6304 unsigned LaneStart = l * NumLaneElems; 6305 for (unsigned i = 0; i != NumLaneElems; ++i) { 6306 // The mask element. This indexes into the input. 6307 int Idx = SVOp->getMaskElt(i+LaneStart); 6308 if (Idx < 0) { 6309 // the mask element does not index into any input vector. 6310 Mask.push_back(-1); 6311 continue; 6312 } 6313 6314 // The input vector this mask element indexes into. 6315 int Input = Idx / NumLaneElems; 6316 6317 // Turn the index into an offset from the start of the input vector. 6318 Idx -= Input * NumLaneElems; 6319 6320 // Find or create a shuffle vector operand to hold this input. 6321 unsigned OpNo; 6322 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { 6323 if (InputUsed[OpNo] == Input) 6324 // This input vector is already an operand. 6325 break; 6326 if (InputUsed[OpNo] < 0) { 6327 // Create a new operand for this input vector. 6328 InputUsed[OpNo] = Input; 6329 break; 6330 } 6331 } 6332 6333 if (OpNo >= array_lengthof(InputUsed)) { 6334 // More than two input vectors used! Give up on trying to create a 6335 // shuffle vector. Insert all elements into a BUILD_VECTOR instead. 6336 UseBuildVector = true; 6337 break; 6338 } 6339 6340 // Add the mask index for the new shuffle vector. 6341 Mask.push_back(Idx + OpNo * NumLaneElems); 6342 } 6343 6344 if (UseBuildVector) { 6345 SmallVector<SDValue, 16> SVOps; 6346 for (unsigned i = 0; i != NumLaneElems; ++i) { 6347 // The mask element. This indexes into the input. 6348 int Idx = SVOp->getMaskElt(i+LaneStart); 6349 if (Idx < 0) { 6350 SVOps.push_back(DAG.getUNDEF(EltVT)); 6351 continue; 6352 } 6353 6354 // The input vector this mask element indexes into. 6355 int Input = Idx / NumElems; 6356 6357 // Turn the index into an offset from the start of the input vector. 6358 Idx -= Input * NumElems; 6359 6360 // Extract the vector element by hand. 6361 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6362 SVOp->getOperand(Input), 6363 DAG.getIntPtrConstant(Idx))); 6364 } 6365 6366 // Construct the output using a BUILD_VECTOR. 6367 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], 6368 SVOps.size()); 6369 } else if (InputUsed[0] < 0) { 6370 // No input vectors were used! The result is undefined. 6371 Output[l] = DAG.getUNDEF(NVT); 6372 } else { 6373 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), 6374 (InputUsed[0] % 2) * NumLaneElems, 6375 DAG, dl); 6376 // If only one input was used, use an undefined vector for the other. 6377 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : 6378 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), 6379 (InputUsed[1] % 2) * NumLaneElems, DAG, dl); 6380 // At least one input vector was used. Create a new shuffle vector. 6381 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); 6382 } 6383 6384 Mask.clear(); 6385 } 6386 6387 // Concatenate the result back 6388 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); 6389} 6390 6391/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6392/// 4 elements, and match them with several different shuffle types. 6393static SDValue 6394LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6395 SDValue V1 = SVOp->getOperand(0); 6396 SDValue V2 = SVOp->getOperand(1); 6397 DebugLoc dl = SVOp->getDebugLoc(); 6398 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6399 6400 assert(VT.is128BitVector() && "Unsupported vector size"); 6401 6402 std::pair<int, int> Locs[4]; 6403 int Mask1[] = { -1, -1, -1, -1 }; 6404 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6405 6406 unsigned NumHi = 0; 6407 unsigned NumLo = 0; 6408 for (unsigned i = 0; i != 4; ++i) { 6409 int Idx = PermMask[i]; 6410 if (Idx < 0) { 6411 Locs[i] = std::make_pair(-1, -1); 6412 } else { 6413 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6414 if (Idx < 4) { 6415 Locs[i] = std::make_pair(0, NumLo); 6416 Mask1[NumLo] = Idx; 6417 NumLo++; 6418 } else { 6419 Locs[i] = std::make_pair(1, NumHi); 6420 if (2+NumHi < 4) 6421 Mask1[2+NumHi] = Idx; 6422 NumHi++; 6423 } 6424 } 6425 } 6426 6427 if (NumLo <= 2 && NumHi <= 2) { 6428 // If no more than two elements come from either vector. This can be 6429 // implemented with two shuffles. First shuffle gather the elements. 6430 // The second shuffle, which takes the first shuffle as both of its 6431 // vector operands, put the elements into the right order. 6432 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6433 6434 int Mask2[] = { -1, -1, -1, -1 }; 6435 6436 for (unsigned i = 0; i != 4; ++i) 6437 if (Locs[i].first != -1) { 6438 unsigned Idx = (i < 2) ? 0 : 4; 6439 Idx += Locs[i].first * 2 + Locs[i].second; 6440 Mask2[i] = Idx; 6441 } 6442 6443 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6444 } 6445 6446 if (NumLo == 3 || NumHi == 3) { 6447 // Otherwise, we must have three elements from one vector, call it X, and 6448 // one element from the other, call it Y. First, use a shufps to build an 6449 // intermediate vector with the one element from Y and the element from X 6450 // that will be in the same half in the final destination (the indexes don't 6451 // matter). Then, use a shufps to build the final vector, taking the half 6452 // containing the element from Y from the intermediate, and the other half 6453 // from X. 6454 if (NumHi == 3) { 6455 // Normalize it so the 3 elements come from V1. 6456 CommuteVectorShuffleMask(PermMask, 4); 6457 std::swap(V1, V2); 6458 } 6459 6460 // Find the element from V2. 6461 unsigned HiIndex; 6462 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6463 int Val = PermMask[HiIndex]; 6464 if (Val < 0) 6465 continue; 6466 if (Val >= 4) 6467 break; 6468 } 6469 6470 Mask1[0] = PermMask[HiIndex]; 6471 Mask1[1] = -1; 6472 Mask1[2] = PermMask[HiIndex^1]; 6473 Mask1[3] = -1; 6474 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6475 6476 if (HiIndex >= 2) { 6477 Mask1[0] = PermMask[0]; 6478 Mask1[1] = PermMask[1]; 6479 Mask1[2] = HiIndex & 1 ? 6 : 4; 6480 Mask1[3] = HiIndex & 1 ? 4 : 6; 6481 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6482 } 6483 6484 Mask1[0] = HiIndex & 1 ? 2 : 0; 6485 Mask1[1] = HiIndex & 1 ? 0 : 2; 6486 Mask1[2] = PermMask[2]; 6487 Mask1[3] = PermMask[3]; 6488 if (Mask1[2] >= 0) 6489 Mask1[2] += 4; 6490 if (Mask1[3] >= 0) 6491 Mask1[3] += 4; 6492 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6493 } 6494 6495 // Break it into (shuffle shuffle_hi, shuffle_lo). 6496 int LoMask[] = { -1, -1, -1, -1 }; 6497 int HiMask[] = { -1, -1, -1, -1 }; 6498 6499 int *MaskPtr = LoMask; 6500 unsigned MaskIdx = 0; 6501 unsigned LoIdx = 0; 6502 unsigned HiIdx = 2; 6503 for (unsigned i = 0; i != 4; ++i) { 6504 if (i == 2) { 6505 MaskPtr = HiMask; 6506 MaskIdx = 1; 6507 LoIdx = 0; 6508 HiIdx = 2; 6509 } 6510 int Idx = PermMask[i]; 6511 if (Idx < 0) { 6512 Locs[i] = std::make_pair(-1, -1); 6513 } else if (Idx < 4) { 6514 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6515 MaskPtr[LoIdx] = Idx; 6516 LoIdx++; 6517 } else { 6518 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6519 MaskPtr[HiIdx] = Idx; 6520 HiIdx++; 6521 } 6522 } 6523 6524 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6525 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6526 int MaskOps[] = { -1, -1, -1, -1 }; 6527 for (unsigned i = 0; i != 4; ++i) 6528 if (Locs[i].first != -1) 6529 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6530 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6531} 6532 6533static bool MayFoldVectorLoad(SDValue V) { 6534 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6535 V = V.getOperand(0); 6536 6537 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6538 V = V.getOperand(0); 6539 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6540 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6541 // BUILD_VECTOR (load), undef 6542 V = V.getOperand(0); 6543 6544 return MayFoldLoad(V); 6545} 6546 6547static 6548SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6549 EVT VT = Op.getValueType(); 6550 6551 // Canonizalize to v2f64. 6552 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6553 return DAG.getNode(ISD::BITCAST, dl, VT, 6554 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6555 V1, DAG)); 6556} 6557 6558static 6559SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6560 bool HasSSE2) { 6561 SDValue V1 = Op.getOperand(0); 6562 SDValue V2 = Op.getOperand(1); 6563 EVT VT = Op.getValueType(); 6564 6565 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6566 6567 if (HasSSE2 && VT == MVT::v2f64) 6568 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6569 6570 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6571 return DAG.getNode(ISD::BITCAST, dl, VT, 6572 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6573 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6574 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6575} 6576 6577static 6578SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6579 SDValue V1 = Op.getOperand(0); 6580 SDValue V2 = Op.getOperand(1); 6581 EVT VT = Op.getValueType(); 6582 6583 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6584 "unsupported shuffle type"); 6585 6586 if (V2.getOpcode() == ISD::UNDEF) 6587 V2 = V1; 6588 6589 // v4i32 or v4f32 6590 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6591} 6592 6593static 6594SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6595 SDValue V1 = Op.getOperand(0); 6596 SDValue V2 = Op.getOperand(1); 6597 EVT VT = Op.getValueType(); 6598 unsigned NumElems = VT.getVectorNumElements(); 6599 6600 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6601 // operand of these instructions is only memory, so check if there's a 6602 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6603 // same masks. 6604 bool CanFoldLoad = false; 6605 6606 // Trivial case, when V2 comes from a load. 6607 if (MayFoldVectorLoad(V2)) 6608 CanFoldLoad = true; 6609 6610 // When V1 is a load, it can be folded later into a store in isel, example: 6611 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6612 // turns into: 6613 // (MOVLPSmr addr:$src1, VR128:$src2) 6614 // So, recognize this potential and also use MOVLPS or MOVLPD 6615 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6616 CanFoldLoad = true; 6617 6618 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6619 if (CanFoldLoad) { 6620 if (HasSSE2 && NumElems == 2) 6621 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6622 6623 if (NumElems == 4) 6624 // If we don't care about the second element, proceed to use movss. 6625 if (SVOp->getMaskElt(1) != -1) 6626 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6627 } 6628 6629 // movl and movlp will both match v2i64, but v2i64 is never matched by 6630 // movl earlier because we make it strict to avoid messing with the movlp load 6631 // folding logic (see the code above getMOVLP call). Match it here then, 6632 // this is horrible, but will stay like this until we move all shuffle 6633 // matching to x86 specific nodes. Note that for the 1st condition all 6634 // types are matched with movsd. 6635 if (HasSSE2) { 6636 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6637 // as to remove this logic from here, as much as possible 6638 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT)) 6639 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6640 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6641 } 6642 6643 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6644 6645 // Invert the operand order and use SHUFPS to match it. 6646 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6647 getShuffleSHUFImmediate(SVOp), DAG); 6648} 6649 6650// Reduce a vector shuffle to zext. 6651SDValue 6652X86TargetLowering::LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { 6653 // PMOVZX is only available from SSE41. 6654 if (!Subtarget->hasSSE41()) 6655 return SDValue(); 6656 6657 EVT VT = Op.getValueType(); 6658 6659 // Only AVX2 support 256-bit vector integer extending. 6660 if (!Subtarget->hasInt256() && VT.is256BitVector()) 6661 return SDValue(); 6662 6663 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6664 DebugLoc DL = Op.getDebugLoc(); 6665 SDValue V1 = Op.getOperand(0); 6666 SDValue V2 = Op.getOperand(1); 6667 unsigned NumElems = VT.getVectorNumElements(); 6668 6669 // Extending is an unary operation and the element type of the source vector 6670 // won't be equal to or larger than i64. 6671 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() || 6672 VT.getVectorElementType() == MVT::i64) 6673 return SDValue(); 6674 6675 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4. 6676 unsigned Shift = 1; // Start from 2, i.e. 1 << 1. 6677 while ((1U << Shift) < NumElems) { 6678 if (SVOp->getMaskElt(1U << Shift) == 1) 6679 break; 6680 Shift += 1; 6681 // The maximal ratio is 8, i.e. from i8 to i64. 6682 if (Shift > 3) 6683 return SDValue(); 6684 } 6685 6686 // Check the shuffle mask. 6687 unsigned Mask = (1U << Shift) - 1; 6688 for (unsigned i = 0; i != NumElems; ++i) { 6689 int EltIdx = SVOp->getMaskElt(i); 6690 if ((i & Mask) != 0 && EltIdx != -1) 6691 return SDValue(); 6692 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift)) 6693 return SDValue(); 6694 } 6695 6696 LLVMContext *Context = DAG.getContext(); 6697 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift; 6698 EVT NeVT = EVT::getIntegerVT(*Context, NBits); 6699 EVT NVT = EVT::getVectorVT(*Context, NeVT, NumElems >> Shift); 6700 6701 if (!isTypeLegal(NVT)) 6702 return SDValue(); 6703 6704 // Simplify the operand as it's prepared to be fed into shuffle. 6705 unsigned SignificantBits = NVT.getSizeInBits() >> Shift; 6706 if (V1.getOpcode() == ISD::BITCAST && 6707 V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 6708 V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6709 V1.getOperand(0) 6710 .getOperand(0).getValueType().getSizeInBits() == SignificantBits) { 6711 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x) 6712 SDValue V = V1.getOperand(0).getOperand(0).getOperand(0); 6713 ConstantSDNode *CIdx = 6714 dyn_cast<ConstantSDNode>(V1.getOperand(0).getOperand(0).getOperand(1)); 6715 // If it's foldable, i.e. normal load with single use, we will let code 6716 // selection to fold it. Otherwise, we will short the conversion sequence. 6717 if (CIdx && CIdx->getZExtValue() == 0 && 6718 (!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse())) { 6719 if (V.getValueSizeInBits() > V1.getValueSizeInBits()) { 6720 // The "ext_vec_elt" node is wider than the result node. 6721 // In this case we should extract subvector from V. 6722 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast (extract_subvector x)). 6723 unsigned Ratio = V.getValueSizeInBits() / V1.getValueSizeInBits(); 6724 EVT FullVT = V.getValueType(); 6725 EVT SubVecVT = EVT::getVectorVT(*Context, 6726 FullVT.getVectorElementType(), 6727 FullVT.getVectorNumElements()/Ratio); 6728 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, V, 6729 DAG.getIntPtrConstant(0)); 6730 } 6731 V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V); 6732 } 6733 } 6734 6735 return DAG.getNode(ISD::BITCAST, DL, VT, 6736 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1)); 6737} 6738 6739SDValue 6740X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { 6741 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6742 MVT VT = Op.getValueType().getSimpleVT(); 6743 DebugLoc dl = Op.getDebugLoc(); 6744 SDValue V1 = Op.getOperand(0); 6745 SDValue V2 = Op.getOperand(1); 6746 6747 if (isZeroShuffle(SVOp)) 6748 return getZeroVector(VT, Subtarget, DAG, dl); 6749 6750 // Handle splat operations 6751 if (SVOp->isSplat()) { 6752 // Use vbroadcast whenever the splat comes from a foldable load 6753 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 6754 if (Broadcast.getNode()) 6755 return Broadcast; 6756 } 6757 6758 // Check integer expanding shuffles. 6759 SDValue NewOp = LowerVectorIntExtend(Op, DAG); 6760 if (NewOp.getNode()) 6761 return NewOp; 6762 6763 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6764 // do it! 6765 if (VT == MVT::v8i16 || VT == MVT::v16i8 || 6766 VT == MVT::v16i16 || VT == MVT::v32i8) { 6767 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 6768 if (NewOp.getNode()) 6769 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6770 } else if ((VT == MVT::v4i32 || 6771 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6772 // FIXME: Figure out a cleaner way to do this. 6773 // Try to make use of movq to zero out the top part. 6774 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6775 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 6776 if (NewOp.getNode()) { 6777 MVT NewVT = NewOp.getValueType().getSimpleVT(); 6778 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), 6779 NewVT, true, false)) 6780 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), 6781 DAG, Subtarget, dl); 6782 } 6783 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6784 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG); 6785 if (NewOp.getNode()) { 6786 MVT NewVT = NewOp.getValueType().getSimpleVT(); 6787 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT)) 6788 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), 6789 DAG, Subtarget, dl); 6790 } 6791 } 6792 } 6793 return SDValue(); 6794} 6795 6796SDValue 6797X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6798 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6799 SDValue V1 = Op.getOperand(0); 6800 SDValue V2 = Op.getOperand(1); 6801 MVT VT = Op.getValueType().getSimpleVT(); 6802 DebugLoc dl = Op.getDebugLoc(); 6803 unsigned NumElems = VT.getVectorNumElements(); 6804 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6805 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6806 bool V1IsSplat = false; 6807 bool V2IsSplat = false; 6808 bool HasSSE2 = Subtarget->hasSSE2(); 6809 bool HasFp256 = Subtarget->hasFp256(); 6810 bool HasInt256 = Subtarget->hasInt256(); 6811 MachineFunction &MF = DAG.getMachineFunction(); 6812 bool OptForSize = MF.getFunction()->getAttributes(). 6813 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 6814 6815 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6816 6817 if (V1IsUndef && V2IsUndef) 6818 return DAG.getUNDEF(VT); 6819 6820 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 6821 6822 // Vector shuffle lowering takes 3 steps: 6823 // 6824 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6825 // narrowing and commutation of operands should be handled. 6826 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6827 // shuffle nodes. 6828 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6829 // so the shuffle can be broken into other shuffles and the legalizer can 6830 // try the lowering again. 6831 // 6832 // The general idea is that no vector_shuffle operation should be left to 6833 // be matched during isel, all of them must be converted to a target specific 6834 // node here. 6835 6836 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6837 // narrowing and commutation of operands should be handled. The actual code 6838 // doesn't include all of those, work in progress... 6839 SDValue NewOp = NormalizeVectorShuffle(Op, DAG); 6840 if (NewOp.getNode()) 6841 return NewOp; 6842 6843 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end()); 6844 6845 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6846 // unpckh_undef). Only use pshufd if speed is more important than size. 6847 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256)) 6848 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6849 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256)) 6850 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6851 6852 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() && 6853 V2IsUndef && MayFoldVectorLoad(V1)) 6854 return getMOVDDup(Op, dl, V1, DAG); 6855 6856 if (isMOVHLPS_v_undef_Mask(M, VT)) 6857 return getMOVHighToLow(Op, dl, DAG); 6858 6859 // Use to match splats 6860 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef && 6861 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6862 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6863 6864 if (isPSHUFDMask(M, VT)) { 6865 // The actual implementation will match the mask in the if above and then 6866 // during isel it can match several different instructions, not only pshufd 6867 // as its name says, sad but true, emulate the behavior for now... 6868 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6869 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6870 6871 unsigned TargetMask = getShuffleSHUFImmediate(SVOp); 6872 6873 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6874 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6875 6876 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64)) 6877 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, 6878 DAG); 6879 6880 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6881 TargetMask, DAG); 6882 } 6883 6884 if (isPALIGNRMask(M, VT, Subtarget)) 6885 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2, 6886 getShufflePALIGNRImmediate(SVOp), 6887 DAG); 6888 6889 // Check if this can be converted into a logical shift. 6890 bool isLeft = false; 6891 unsigned ShAmt = 0; 6892 SDValue ShVal; 6893 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6894 if (isShift && ShVal.hasOneUse()) { 6895 // If the shifted value has multiple uses, it may be cheaper to use 6896 // v_set0 + movlhps or movhlps, etc. 6897 MVT EltVT = VT.getVectorElementType(); 6898 ShAmt *= EltVT.getSizeInBits(); 6899 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6900 } 6901 6902 if (isMOVLMask(M, VT)) { 6903 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6904 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6905 if (!isMOVLPMask(M, VT)) { 6906 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6907 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6908 6909 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6910 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6911 } 6912 } 6913 6914 // FIXME: fold these into legal mask. 6915 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256)) 6916 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6917 6918 if (isMOVHLPSMask(M, VT)) 6919 return getMOVHighToLow(Op, dl, DAG); 6920 6921 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget)) 6922 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6923 6924 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget)) 6925 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6926 6927 if (isMOVLPMask(M, VT)) 6928 return getMOVLP(Op, dl, DAG, HasSSE2); 6929 6930 if (ShouldXformToMOVHLPS(M, VT) || 6931 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT)) 6932 return CommuteVectorShuffle(SVOp, DAG); 6933 6934 if (isShift) { 6935 // No better options. Use a vshldq / vsrldq. 6936 MVT EltVT = VT.getVectorElementType(); 6937 ShAmt *= EltVT.getSizeInBits(); 6938 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6939 } 6940 6941 bool Commuted = false; 6942 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6943 // 1,1,1,1 -> v8i16 though. 6944 V1IsSplat = isSplatVector(V1.getNode()); 6945 V2IsSplat = isSplatVector(V2.getNode()); 6946 6947 // Canonicalize the splat or undef, if present, to be on the RHS. 6948 if (!V2IsUndef && V1IsSplat && !V2IsSplat) { 6949 CommuteVectorShuffleMask(M, NumElems); 6950 std::swap(V1, V2); 6951 std::swap(V1IsSplat, V2IsSplat); 6952 Commuted = true; 6953 } 6954 6955 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6956 // Shuffling low element of v1 into undef, just return v1. 6957 if (V2IsUndef) 6958 return V1; 6959 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6960 // the instruction selector will not match, so get a canonical MOVL with 6961 // swapped operands to undo the commute. 6962 return getMOVL(DAG, dl, VT, V2, V1); 6963 } 6964 6965 if (isUNPCKLMask(M, VT, HasInt256)) 6966 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6967 6968 if (isUNPCKHMask(M, VT, HasInt256)) 6969 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6970 6971 if (V2IsSplat) { 6972 // Normalize mask so all entries that point to V2 points to its first 6973 // element then try to match unpck{h|l} again. If match, return a 6974 // new vector_shuffle with the corrected mask.p 6975 SmallVector<int, 8> NewMask(M.begin(), M.end()); 6976 NormalizeMask(NewMask, NumElems); 6977 if (isUNPCKLMask(NewMask, VT, HasInt256, true)) 6978 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6979 if (isUNPCKHMask(NewMask, VT, HasInt256, true)) 6980 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6981 } 6982 6983 if (Commuted) { 6984 // Commute is back and try unpck* again. 6985 // FIXME: this seems wrong. 6986 CommuteVectorShuffleMask(M, NumElems); 6987 std::swap(V1, V2); 6988 std::swap(V1IsSplat, V2IsSplat); 6989 Commuted = false; 6990 6991 if (isUNPCKLMask(M, VT, HasInt256)) 6992 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6993 6994 if (isUNPCKHMask(M, VT, HasInt256)) 6995 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6996 } 6997 6998 // Normalize the node to match x86 shuffle ops if needed 6999 if (!V2IsUndef && (isSHUFPMask(M, VT, HasFp256, /* Commuted */ true))) 7000 return CommuteVectorShuffle(SVOp, DAG); 7001 7002 // The checks below are all present in isShuffleMaskLegal, but they are 7003 // inlined here right now to enable us to directly emit target specific 7004 // nodes, and remove one by one until they don't return Op anymore. 7005 7006 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 7007 SVOp->getSplatIndex() == 0 && V2IsUndef) { 7008 if (VT == MVT::v2f64 || VT == MVT::v2i64) 7009 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 7010 } 7011 7012 if (isPSHUFHWMask(M, VT, HasInt256)) 7013 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 7014 getShufflePSHUFHWImmediate(SVOp), 7015 DAG); 7016 7017 if (isPSHUFLWMask(M, VT, HasInt256)) 7018 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 7019 getShufflePSHUFLWImmediate(SVOp), 7020 DAG); 7021 7022 if (isSHUFPMask(M, VT, HasFp256)) 7023 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 7024 getShuffleSHUFImmediate(SVOp), DAG); 7025 7026 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256)) 7027 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 7028 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256)) 7029 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 7030 7031 //===--------------------------------------------------------------------===// 7032 // Generate target specific nodes for 128 or 256-bit shuffles only 7033 // supported in the AVX instruction set. 7034 // 7035 7036 // Handle VMOVDDUPY permutations 7037 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256)) 7038 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 7039 7040 // Handle VPERMILPS/D* permutations 7041 if (isVPERMILPMask(M, VT, HasFp256)) { 7042 if (HasInt256 && VT == MVT::v8i32) 7043 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, 7044 getShuffleSHUFImmediate(SVOp), DAG); 7045 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 7046 getShuffleSHUFImmediate(SVOp), DAG); 7047 } 7048 7049 // Handle VPERM2F128/VPERM2I128 permutations 7050 if (isVPERM2X128Mask(M, VT, HasFp256)) 7051 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 7052 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 7053 7054 SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG); 7055 if (BlendOp.getNode()) 7056 return BlendOp; 7057 7058 if (V2IsUndef && HasInt256 && (VT == MVT::v8i32 || VT == MVT::v8f32)) { 7059 SmallVector<SDValue, 8> permclMask; 7060 for (unsigned i = 0; i != 8; ++i) { 7061 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32)); 7062 } 7063 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, 7064 &permclMask[0], 8); 7065 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 7066 return DAG.getNode(X86ISD::VPERMV, dl, VT, 7067 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); 7068 } 7069 7070 if (V2IsUndef && HasInt256 && (VT == MVT::v4i64 || VT == MVT::v4f64)) 7071 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, 7072 getShuffleCLImmediate(SVOp), DAG); 7073 7074 //===--------------------------------------------------------------------===// 7075 // Since no target specific shuffle was selected for this generic one, 7076 // lower it into other known shuffles. FIXME: this isn't true yet, but 7077 // this is the plan. 7078 // 7079 7080 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 7081 if (VT == MVT::v8i16) { 7082 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG); 7083 if (NewOp.getNode()) 7084 return NewOp; 7085 } 7086 7087 if (VT == MVT::v16i8) { 7088 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 7089 if (NewOp.getNode()) 7090 return NewOp; 7091 } 7092 7093 if (VT == MVT::v32i8) { 7094 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG); 7095 if (NewOp.getNode()) 7096 return NewOp; 7097 } 7098 7099 // Handle all 128-bit wide vectors with 4 elements, and match them with 7100 // several different shuffle types. 7101 if (NumElems == 4 && VT.is128BitVector()) 7102 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 7103 7104 // Handle general 256-bit shuffles 7105 if (VT.is256BitVector()) 7106 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 7107 7108 return SDValue(); 7109} 7110 7111static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { 7112 MVT VT = Op.getValueType().getSimpleVT(); 7113 DebugLoc dl = Op.getDebugLoc(); 7114 7115 if (!Op.getOperand(0).getValueType().getSimpleVT().is128BitVector()) 7116 return SDValue(); 7117 7118 if (VT.getSizeInBits() == 8) { 7119 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 7120 Op.getOperand(0), Op.getOperand(1)); 7121 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7122 DAG.getValueType(VT)); 7123 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7124 } 7125 7126 if (VT.getSizeInBits() == 16) { 7127 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7128 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 7129 if (Idx == 0) 7130 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7131 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7132 DAG.getNode(ISD::BITCAST, dl, 7133 MVT::v4i32, 7134 Op.getOperand(0)), 7135 Op.getOperand(1))); 7136 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 7137 Op.getOperand(0), Op.getOperand(1)); 7138 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7139 DAG.getValueType(VT)); 7140 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7141 } 7142 7143 if (VT == MVT::f32) { 7144 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 7145 // the result back to FR32 register. It's only worth matching if the 7146 // result has a single use which is a store or a bitcast to i32. And in 7147 // the case of a store, it's not worth it if the index is a constant 0, 7148 // because a MOVSSmr can be used instead, which is smaller and faster. 7149 if (!Op.hasOneUse()) 7150 return SDValue(); 7151 SDNode *User = *Op.getNode()->use_begin(); 7152 if ((User->getOpcode() != ISD::STORE || 7153 (isa<ConstantSDNode>(Op.getOperand(1)) && 7154 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 7155 (User->getOpcode() != ISD::BITCAST || 7156 User->getValueType(0) != MVT::i32)) 7157 return SDValue(); 7158 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7159 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 7160 Op.getOperand(0)), 7161 Op.getOperand(1)); 7162 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 7163 } 7164 7165 if (VT == MVT::i32 || VT == MVT::i64) { 7166 // ExtractPS/pextrq works with constant index. 7167 if (isa<ConstantSDNode>(Op.getOperand(1))) 7168 return Op; 7169 } 7170 return SDValue(); 7171} 7172 7173SDValue 7174X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7175 SelectionDAG &DAG) const { 7176 if (!isa<ConstantSDNode>(Op.getOperand(1))) 7177 return SDValue(); 7178 7179 SDValue Vec = Op.getOperand(0); 7180 MVT VecVT = Vec.getValueType().getSimpleVT(); 7181 7182 // If this is a 256-bit vector result, first extract the 128-bit vector and 7183 // then extract the element from the 128-bit vector. 7184 if (VecVT.is256BitVector()) { 7185 DebugLoc dl = Op.getNode()->getDebugLoc(); 7186 unsigned NumElems = VecVT.getVectorNumElements(); 7187 SDValue Idx = Op.getOperand(1); 7188 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7189 7190 // Get the 128-bit vector. 7191 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); 7192 7193 if (IdxVal >= NumElems/2) 7194 IdxVal -= NumElems/2; 7195 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 7196 DAG.getConstant(IdxVal, MVT::i32)); 7197 } 7198 7199 assert(VecVT.is128BitVector() && "Unexpected vector length"); 7200 7201 if (Subtarget->hasSSE41()) { 7202 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 7203 if (Res.getNode()) 7204 return Res; 7205 } 7206 7207 MVT VT = Op.getValueType().getSimpleVT(); 7208 DebugLoc dl = Op.getDebugLoc(); 7209 // TODO: handle v16i8. 7210 if (VT.getSizeInBits() == 16) { 7211 SDValue Vec = Op.getOperand(0); 7212 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7213 if (Idx == 0) 7214 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7215 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7216 DAG.getNode(ISD::BITCAST, dl, 7217 MVT::v4i32, Vec), 7218 Op.getOperand(1))); 7219 // Transform it so it match pextrw which produces a 32-bit result. 7220 MVT EltVT = MVT::i32; 7221 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 7222 Op.getOperand(0), Op.getOperand(1)); 7223 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 7224 DAG.getValueType(VT)); 7225 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7226 } 7227 7228 if (VT.getSizeInBits() == 32) { 7229 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7230 if (Idx == 0) 7231 return Op; 7232 7233 // SHUFPS the element to the lowest double word, then movss. 7234 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 7235 MVT VVT = Op.getOperand(0).getValueType().getSimpleVT(); 7236 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7237 DAG.getUNDEF(VVT), Mask); 7238 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7239 DAG.getIntPtrConstant(0)); 7240 } 7241 7242 if (VT.getSizeInBits() == 64) { 7243 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 7244 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 7245 // to match extract_elt for f64. 7246 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7247 if (Idx == 0) 7248 return Op; 7249 7250 // UNPCKHPD the element to the lowest double word, then movsd. 7251 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 7252 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 7253 int Mask[2] = { 1, -1 }; 7254 MVT VVT = Op.getOperand(0).getValueType().getSimpleVT(); 7255 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7256 DAG.getUNDEF(VVT), Mask); 7257 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7258 DAG.getIntPtrConstant(0)); 7259 } 7260 7261 return SDValue(); 7262} 7263 7264static SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { 7265 MVT VT = Op.getValueType().getSimpleVT(); 7266 MVT EltVT = VT.getVectorElementType(); 7267 DebugLoc dl = Op.getDebugLoc(); 7268 7269 SDValue N0 = Op.getOperand(0); 7270 SDValue N1 = Op.getOperand(1); 7271 SDValue N2 = Op.getOperand(2); 7272 7273 if (!VT.is128BitVector()) 7274 return SDValue(); 7275 7276 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 7277 isa<ConstantSDNode>(N2)) { 7278 unsigned Opc; 7279 if (VT == MVT::v8i16) 7280 Opc = X86ISD::PINSRW; 7281 else if (VT == MVT::v16i8) 7282 Opc = X86ISD::PINSRB; 7283 else 7284 Opc = X86ISD::PINSRB; 7285 7286 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 7287 // argument. 7288 if (N1.getValueType() != MVT::i32) 7289 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7290 if (N2.getValueType() != MVT::i32) 7291 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7292 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 7293 } 7294 7295 if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 7296 // Bits [7:6] of the constant are the source select. This will always be 7297 // zero here. The DAG Combiner may combine an extract_elt index into these 7298 // bits. For example (insert (extract, 3), 2) could be matched by putting 7299 // the '3' into bits [7:6] of X86ISD::INSERTPS. 7300 // Bits [5:4] of the constant are the destination select. This is the 7301 // value of the incoming immediate. 7302 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 7303 // combine either bitwise AND or insert of float 0.0 to set these bits. 7304 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 7305 // Create this as a scalar to vector.. 7306 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 7307 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 7308 } 7309 7310 if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) { 7311 // PINSR* works with constant index. 7312 return Op; 7313 } 7314 return SDValue(); 7315} 7316 7317SDValue 7318X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 7319 MVT VT = Op.getValueType().getSimpleVT(); 7320 MVT EltVT = VT.getVectorElementType(); 7321 7322 DebugLoc dl = Op.getDebugLoc(); 7323 SDValue N0 = Op.getOperand(0); 7324 SDValue N1 = Op.getOperand(1); 7325 SDValue N2 = Op.getOperand(2); 7326 7327 // If this is a 256-bit vector result, first extract the 128-bit vector, 7328 // insert the element into the extracted half and then place it back. 7329 if (VT.is256BitVector()) { 7330 if (!isa<ConstantSDNode>(N2)) 7331 return SDValue(); 7332 7333 // Get the desired 128-bit vector half. 7334 unsigned NumElems = VT.getVectorNumElements(); 7335 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 7336 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); 7337 7338 // Insert the element into the desired half. 7339 bool Upper = IdxVal >= NumElems/2; 7340 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, 7341 DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); 7342 7343 // Insert the changed part back to the 256-bit vector 7344 return Insert128BitVector(N0, V, IdxVal, DAG, dl); 7345 } 7346 7347 if (Subtarget->hasSSE41()) 7348 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 7349 7350 if (EltVT == MVT::i8) 7351 return SDValue(); 7352 7353 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 7354 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 7355 // as its second argument. 7356 if (N1.getValueType() != MVT::i32) 7357 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7358 if (N2.getValueType() != MVT::i32) 7359 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7360 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 7361 } 7362 return SDValue(); 7363} 7364 7365static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { 7366 LLVMContext *Context = DAG.getContext(); 7367 DebugLoc dl = Op.getDebugLoc(); 7368 MVT OpVT = Op.getValueType().getSimpleVT(); 7369 7370 // If this is a 256-bit vector result, first insert into a 128-bit 7371 // vector and then insert into the 256-bit vector. 7372 if (!OpVT.is128BitVector()) { 7373 // Insert into a 128-bit vector. 7374 EVT VT128 = EVT::getVectorVT(*Context, 7375 OpVT.getVectorElementType(), 7376 OpVT.getVectorNumElements() / 2); 7377 7378 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7379 7380 // Insert the 128-bit vector. 7381 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); 7382 } 7383 7384 if (OpVT == MVT::v1i64 && 7385 Op.getOperand(0).getValueType() == MVT::i64) 7386 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7387 7388 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7389 assert(OpVT.is128BitVector() && "Expected an SSE type!"); 7390 return DAG.getNode(ISD::BITCAST, dl, OpVT, 7391 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7392} 7393 7394// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7395// a simple subregister reference or explicit instructions to grab 7396// upper bits of a vector. 7397static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7398 SelectionDAG &DAG) { 7399 if (Subtarget->hasFp256()) { 7400 DebugLoc dl = Op.getNode()->getDebugLoc(); 7401 SDValue Vec = Op.getNode()->getOperand(0); 7402 SDValue Idx = Op.getNode()->getOperand(1); 7403 7404 if (Op.getNode()->getValueType(0).is128BitVector() && 7405 Vec.getNode()->getValueType(0).is256BitVector() && 7406 isa<ConstantSDNode>(Idx)) { 7407 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7408 return Extract128BitVector(Vec, IdxVal, DAG, dl); 7409 } 7410 } 7411 return SDValue(); 7412} 7413 7414// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7415// simple superregister reference or explicit instructions to insert 7416// the upper bits of a vector. 7417static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7418 SelectionDAG &DAG) { 7419 if (Subtarget->hasFp256()) { 7420 DebugLoc dl = Op.getNode()->getDebugLoc(); 7421 SDValue Vec = Op.getNode()->getOperand(0); 7422 SDValue SubVec = Op.getNode()->getOperand(1); 7423 SDValue Idx = Op.getNode()->getOperand(2); 7424 7425 if (Op.getNode()->getValueType(0).is256BitVector() && 7426 SubVec.getNode()->getValueType(0).is128BitVector() && 7427 isa<ConstantSDNode>(Idx)) { 7428 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7429 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); 7430 } 7431 } 7432 return SDValue(); 7433} 7434 7435// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7436// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7437// one of the above mentioned nodes. It has to be wrapped because otherwise 7438// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7439// be used to form addressing mode. These wrapped nodes will be selected 7440// into MOV32ri. 7441SDValue 7442X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7443 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7444 7445 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7446 // global base reg. 7447 unsigned char OpFlag = 0; 7448 unsigned WrapperKind = X86ISD::Wrapper; 7449 CodeModel::Model M = getTargetMachine().getCodeModel(); 7450 7451 if (Subtarget->isPICStyleRIPRel() && 7452 (M == CodeModel::Small || M == CodeModel::Kernel)) 7453 WrapperKind = X86ISD::WrapperRIP; 7454 else if (Subtarget->isPICStyleGOT()) 7455 OpFlag = X86II::MO_GOTOFF; 7456 else if (Subtarget->isPICStyleStubPIC()) 7457 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7458 7459 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7460 CP->getAlignment(), 7461 CP->getOffset(), OpFlag); 7462 DebugLoc DL = CP->getDebugLoc(); 7463 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7464 // With PIC, the address is actually $g + Offset. 7465 if (OpFlag) { 7466 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7467 DAG.getNode(X86ISD::GlobalBaseReg, 7468 DebugLoc(), getPointerTy()), 7469 Result); 7470 } 7471 7472 return Result; 7473} 7474 7475SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7476 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7477 7478 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7479 // global base reg. 7480 unsigned char OpFlag = 0; 7481 unsigned WrapperKind = X86ISD::Wrapper; 7482 CodeModel::Model M = getTargetMachine().getCodeModel(); 7483 7484 if (Subtarget->isPICStyleRIPRel() && 7485 (M == CodeModel::Small || M == CodeModel::Kernel)) 7486 WrapperKind = X86ISD::WrapperRIP; 7487 else if (Subtarget->isPICStyleGOT()) 7488 OpFlag = X86II::MO_GOTOFF; 7489 else if (Subtarget->isPICStyleStubPIC()) 7490 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7491 7492 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7493 OpFlag); 7494 DebugLoc DL = JT->getDebugLoc(); 7495 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7496 7497 // With PIC, the address is actually $g + Offset. 7498 if (OpFlag) 7499 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7500 DAG.getNode(X86ISD::GlobalBaseReg, 7501 DebugLoc(), getPointerTy()), 7502 Result); 7503 7504 return Result; 7505} 7506 7507SDValue 7508X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7509 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7510 7511 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7512 // global base reg. 7513 unsigned char OpFlag = 0; 7514 unsigned WrapperKind = X86ISD::Wrapper; 7515 CodeModel::Model M = getTargetMachine().getCodeModel(); 7516 7517 if (Subtarget->isPICStyleRIPRel() && 7518 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7519 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7520 OpFlag = X86II::MO_GOTPCREL; 7521 WrapperKind = X86ISD::WrapperRIP; 7522 } else if (Subtarget->isPICStyleGOT()) { 7523 OpFlag = X86II::MO_GOT; 7524 } else if (Subtarget->isPICStyleStubPIC()) { 7525 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7526 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7527 OpFlag = X86II::MO_DARWIN_NONLAZY; 7528 } 7529 7530 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7531 7532 DebugLoc DL = Op.getDebugLoc(); 7533 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7534 7535 // With PIC, the address is actually $g + Offset. 7536 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7537 !Subtarget->is64Bit()) { 7538 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7539 DAG.getNode(X86ISD::GlobalBaseReg, 7540 DebugLoc(), getPointerTy()), 7541 Result); 7542 } 7543 7544 // For symbols that require a load from a stub to get the address, emit the 7545 // load. 7546 if (isGlobalStubReference(OpFlag)) 7547 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7548 MachinePointerInfo::getGOT(), false, false, false, 0); 7549 7550 return Result; 7551} 7552 7553SDValue 7554X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7555 // Create the TargetBlockAddressAddress node. 7556 unsigned char OpFlags = 7557 Subtarget->ClassifyBlockAddressReference(); 7558 CodeModel::Model M = getTargetMachine().getCodeModel(); 7559 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7560 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); 7561 DebugLoc dl = Op.getDebugLoc(); 7562 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, 7563 OpFlags); 7564 7565 if (Subtarget->isPICStyleRIPRel() && 7566 (M == CodeModel::Small || M == CodeModel::Kernel)) 7567 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7568 else 7569 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7570 7571 // With PIC, the address is actually $g + Offset. 7572 if (isGlobalRelativeToPICBase(OpFlags)) { 7573 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7574 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7575 Result); 7576 } 7577 7578 return Result; 7579} 7580 7581SDValue 7582X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7583 int64_t Offset, SelectionDAG &DAG) const { 7584 // Create the TargetGlobalAddress node, folding in the constant 7585 // offset if it is legal. 7586 unsigned char OpFlags = 7587 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7588 CodeModel::Model M = getTargetMachine().getCodeModel(); 7589 SDValue Result; 7590 if (OpFlags == X86II::MO_NO_FLAG && 7591 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7592 // A direct static reference to a global. 7593 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7594 Offset = 0; 7595 } else { 7596 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7597 } 7598 7599 if (Subtarget->isPICStyleRIPRel() && 7600 (M == CodeModel::Small || M == CodeModel::Kernel)) 7601 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7602 else 7603 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7604 7605 // With PIC, the address is actually $g + Offset. 7606 if (isGlobalRelativeToPICBase(OpFlags)) { 7607 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7608 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7609 Result); 7610 } 7611 7612 // For globals that require a load from a stub to get the address, emit the 7613 // load. 7614 if (isGlobalStubReference(OpFlags)) 7615 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7616 MachinePointerInfo::getGOT(), false, false, false, 0); 7617 7618 // If there was a non-zero offset that we didn't fold, create an explicit 7619 // addition for it. 7620 if (Offset != 0) 7621 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7622 DAG.getConstant(Offset, getPointerTy())); 7623 7624 return Result; 7625} 7626 7627SDValue 7628X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7629 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7630 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7631 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7632} 7633 7634static SDValue 7635GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7636 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7637 unsigned char OperandFlags, bool LocalDynamic = false) { 7638 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7639 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7640 DebugLoc dl = GA->getDebugLoc(); 7641 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7642 GA->getValueType(0), 7643 GA->getOffset(), 7644 OperandFlags); 7645 7646 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR 7647 : X86ISD::TLSADDR; 7648 7649 if (InFlag) { 7650 SDValue Ops[] = { Chain, TGA, *InFlag }; 7651 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); 7652 } else { 7653 SDValue Ops[] = { Chain, TGA }; 7654 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, array_lengthof(Ops)); 7655 } 7656 7657 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7658 MFI->setAdjustsStack(true); 7659 7660 SDValue Flag = Chain.getValue(1); 7661 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7662} 7663 7664// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7665static SDValue 7666LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7667 const EVT PtrVT) { 7668 SDValue InFlag; 7669 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7670 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7671 DAG.getNode(X86ISD::GlobalBaseReg, 7672 DebugLoc(), PtrVT), InFlag); 7673 InFlag = Chain.getValue(1); 7674 7675 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7676} 7677 7678// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7679static SDValue 7680LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7681 const EVT PtrVT) { 7682 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7683 X86::RAX, X86II::MO_TLSGD); 7684} 7685 7686static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, 7687 SelectionDAG &DAG, 7688 const EVT PtrVT, 7689 bool is64Bit) { 7690 DebugLoc dl = GA->getDebugLoc(); 7691 7692 // Get the start address of the TLS block for this module. 7693 X86MachineFunctionInfo* MFI = DAG.getMachineFunction() 7694 .getInfo<X86MachineFunctionInfo>(); 7695 MFI->incNumLocalDynamicTLSAccesses(); 7696 7697 SDValue Base; 7698 if (is64Bit) { 7699 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, 7700 X86II::MO_TLSLD, /*LocalDynamic=*/true); 7701 } else { 7702 SDValue InFlag; 7703 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7704 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); 7705 InFlag = Chain.getValue(1); 7706 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, 7707 X86II::MO_TLSLDM, /*LocalDynamic=*/true); 7708 } 7709 7710 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations 7711 // of Base. 7712 7713 // Build x@dtpoff. 7714 unsigned char OperandFlags = X86II::MO_DTPOFF; 7715 unsigned WrapperKind = X86ISD::Wrapper; 7716 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7717 GA->getValueType(0), 7718 GA->getOffset(), OperandFlags); 7719 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7720 7721 // Add x@dtpoff with the base. 7722 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); 7723} 7724 7725// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. 7726static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7727 const EVT PtrVT, TLSModel::Model model, 7728 bool is64Bit, bool isPIC) { 7729 DebugLoc dl = GA->getDebugLoc(); 7730 7731 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7732 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7733 is64Bit ? 257 : 256)); 7734 7735 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7736 DAG.getIntPtrConstant(0), 7737 MachinePointerInfo(Ptr), 7738 false, false, false, 0); 7739 7740 unsigned char OperandFlags = 0; 7741 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7742 // initialexec. 7743 unsigned WrapperKind = X86ISD::Wrapper; 7744 if (model == TLSModel::LocalExec) { 7745 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7746 } else if (model == TLSModel::InitialExec) { 7747 if (is64Bit) { 7748 OperandFlags = X86II::MO_GOTTPOFF; 7749 WrapperKind = X86ISD::WrapperRIP; 7750 } else { 7751 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; 7752 } 7753 } else { 7754 llvm_unreachable("Unexpected model"); 7755 } 7756 7757 // emit "addl x@ntpoff,%eax" (local exec) 7758 // or "addl x@indntpoff,%eax" (initial exec) 7759 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) 7760 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7761 GA->getValueType(0), 7762 GA->getOffset(), OperandFlags); 7763 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7764 7765 if (model == TLSModel::InitialExec) { 7766 if (isPIC && !is64Bit) { 7767 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, 7768 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), 7769 Offset); 7770 } 7771 7772 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7773 MachinePointerInfo::getGOT(), false, false, false, 7774 0); 7775 } 7776 7777 // The address of the thread local variable is the add of the thread 7778 // pointer with the offset of the variable. 7779 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7780} 7781 7782SDValue 7783X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7784 7785 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7786 const GlobalValue *GV = GA->getGlobal(); 7787 7788 if (Subtarget->isTargetELF()) { 7789 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 7790 7791 switch (model) { 7792 case TLSModel::GeneralDynamic: 7793 if (Subtarget->is64Bit()) 7794 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7795 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7796 case TLSModel::LocalDynamic: 7797 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), 7798 Subtarget->is64Bit()); 7799 case TLSModel::InitialExec: 7800 case TLSModel::LocalExec: 7801 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7802 Subtarget->is64Bit(), 7803 getTargetMachine().getRelocationModel() == Reloc::PIC_); 7804 } 7805 llvm_unreachable("Unknown TLS model."); 7806 } 7807 7808 if (Subtarget->isTargetDarwin()) { 7809 // Darwin only has one model of TLS. Lower to that. 7810 unsigned char OpFlag = 0; 7811 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7812 X86ISD::WrapperRIP : X86ISD::Wrapper; 7813 7814 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7815 // global base reg. 7816 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7817 !Subtarget->is64Bit(); 7818 if (PIC32) 7819 OpFlag = X86II::MO_TLVP_PIC_BASE; 7820 else 7821 OpFlag = X86II::MO_TLVP; 7822 DebugLoc DL = Op.getDebugLoc(); 7823 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7824 GA->getValueType(0), 7825 GA->getOffset(), OpFlag); 7826 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7827 7828 // With PIC32, the address is actually $g + Offset. 7829 if (PIC32) 7830 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7831 DAG.getNode(X86ISD::GlobalBaseReg, 7832 DebugLoc(), getPointerTy()), 7833 Offset); 7834 7835 // Lowering the machine isd will make sure everything is in the right 7836 // location. 7837 SDValue Chain = DAG.getEntryNode(); 7838 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7839 SDValue Args[] = { Chain, Offset }; 7840 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7841 7842 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7843 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7844 MFI->setAdjustsStack(true); 7845 7846 // And our return value (tls address) is in the standard call return value 7847 // location. 7848 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7849 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7850 Chain.getValue(1)); 7851 } 7852 7853 if (Subtarget->isTargetWindows() || Subtarget->isTargetMingw()) { 7854 // Just use the implicit TLS architecture 7855 // Need to generate someting similar to: 7856 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage 7857 // ; from TEB 7858 // mov ecx, dword [rel _tls_index]: Load index (from C runtime) 7859 // mov rcx, qword [rdx+rcx*8] 7860 // mov eax, .tls$:tlsvar 7861 // [rax+rcx] contains the address 7862 // Windows 64bit: gs:0x58 7863 // Windows 32bit: fs:__tls_array 7864 7865 // If GV is an alias then use the aliasee for determining 7866 // thread-localness. 7867 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7868 GV = GA->resolveAliasedGlobal(false); 7869 DebugLoc dl = GA->getDebugLoc(); 7870 SDValue Chain = DAG.getEntryNode(); 7871 7872 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or 7873 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly 7874 // use its literal value of 0x2C. 7875 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit() 7876 ? Type::getInt8PtrTy(*DAG.getContext(), 7877 256) 7878 : Type::getInt32PtrTy(*DAG.getContext(), 7879 257)); 7880 7881 SDValue TlsArray = Subtarget->is64Bit() ? DAG.getIntPtrConstant(0x58) : 7882 (Subtarget->isTargetMingw() ? DAG.getIntPtrConstant(0x2C) : 7883 DAG.getExternalSymbol("_tls_array", getPointerTy())); 7884 7885 SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, TlsArray, 7886 MachinePointerInfo(Ptr), 7887 false, false, false, 0); 7888 7889 // Load the _tls_index variable 7890 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy()); 7891 if (Subtarget->is64Bit()) 7892 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain, 7893 IDX, MachinePointerInfo(), MVT::i32, 7894 false, false, 0); 7895 else 7896 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), 7897 false, false, false, 0); 7898 7899 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), 7900 getPointerTy()); 7901 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); 7902 7903 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); 7904 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(), 7905 false, false, false, 0); 7906 7907 // Get the offset of start of .tls section 7908 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7909 GA->getValueType(0), 7910 GA->getOffset(), X86II::MO_SECREL); 7911 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA); 7912 7913 // The address of the thread local variable is the add of the thread 7914 // pointer with the offset of the variable. 7915 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset); 7916 } 7917 7918 llvm_unreachable("TLS not implemented for this target."); 7919} 7920 7921/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7922/// and take a 2 x i32 value to shift plus a shift amount. 7923SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7924 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7925 EVT VT = Op.getValueType(); 7926 unsigned VTBits = VT.getSizeInBits(); 7927 DebugLoc dl = Op.getDebugLoc(); 7928 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7929 SDValue ShOpLo = Op.getOperand(0); 7930 SDValue ShOpHi = Op.getOperand(1); 7931 SDValue ShAmt = Op.getOperand(2); 7932 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7933 DAG.getConstant(VTBits - 1, MVT::i8)) 7934 : DAG.getConstant(0, VT); 7935 7936 SDValue Tmp2, Tmp3; 7937 if (Op.getOpcode() == ISD::SHL_PARTS) { 7938 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7939 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7940 } else { 7941 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7942 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7943 } 7944 7945 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7946 DAG.getConstant(VTBits, MVT::i8)); 7947 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7948 AndNode, DAG.getConstant(0, MVT::i8)); 7949 7950 SDValue Hi, Lo; 7951 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7952 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7953 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7954 7955 if (Op.getOpcode() == ISD::SHL_PARTS) { 7956 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7957 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7958 } else { 7959 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7960 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7961 } 7962 7963 SDValue Ops[2] = { Lo, Hi }; 7964 return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); 7965} 7966 7967SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7968 SelectionDAG &DAG) const { 7969 EVT SrcVT = Op.getOperand(0).getValueType(); 7970 7971 if (SrcVT.isVector()) 7972 return SDValue(); 7973 7974 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7975 "Unknown SINT_TO_FP to lower!"); 7976 7977 // These are really Legal; return the operand so the caller accepts it as 7978 // Legal. 7979 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7980 return Op; 7981 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7982 Subtarget->is64Bit()) { 7983 return Op; 7984 } 7985 7986 DebugLoc dl = Op.getDebugLoc(); 7987 unsigned Size = SrcVT.getSizeInBits()/8; 7988 MachineFunction &MF = DAG.getMachineFunction(); 7989 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7990 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7991 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7992 StackSlot, 7993 MachinePointerInfo::getFixedStack(SSFI), 7994 false, false, 0); 7995 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7996} 7997 7998SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7999 SDValue StackSlot, 8000 SelectionDAG &DAG) const { 8001 // Build the FILD 8002 DebugLoc DL = Op.getDebugLoc(); 8003 SDVTList Tys; 8004 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 8005 if (useSSE) 8006 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 8007 else 8008 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 8009 8010 unsigned ByteSize = SrcVT.getSizeInBits()/8; 8011 8012 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 8013 MachineMemOperand *MMO; 8014 if (FI) { 8015 int SSFI = FI->getIndex(); 8016 MMO = 8017 DAG.getMachineFunction() 8018 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8019 MachineMemOperand::MOLoad, ByteSize, ByteSize); 8020 } else { 8021 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 8022 StackSlot = StackSlot.getOperand(1); 8023 } 8024 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 8025 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 8026 X86ISD::FILD, DL, 8027 Tys, Ops, array_lengthof(Ops), 8028 SrcVT, MMO); 8029 8030 if (useSSE) { 8031 Chain = Result.getValue(1); 8032 SDValue InFlag = Result.getValue(2); 8033 8034 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 8035 // shouldn't be necessary except that RFP cannot be live across 8036 // multiple blocks. When stackifier is fixed, they can be uncoupled. 8037 MachineFunction &MF = DAG.getMachineFunction(); 8038 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 8039 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 8040 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8041 Tys = DAG.getVTList(MVT::Other); 8042 SDValue Ops[] = { 8043 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 8044 }; 8045 MachineMemOperand *MMO = 8046 DAG.getMachineFunction() 8047 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8048 MachineMemOperand::MOStore, SSFISize, SSFISize); 8049 8050 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 8051 Ops, array_lengthof(Ops), 8052 Op.getValueType(), MMO); 8053 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 8054 MachinePointerInfo::getFixedStack(SSFI), 8055 false, false, false, 0); 8056 } 8057 8058 return Result; 8059} 8060 8061// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 8062SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 8063 SelectionDAG &DAG) const { 8064 // This algorithm is not obvious. Here it is what we're trying to output: 8065 /* 8066 movq %rax, %xmm0 8067 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 8068 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 8069 #ifdef __SSE3__ 8070 haddpd %xmm0, %xmm0 8071 #else 8072 pshufd $0x4e, %xmm0, %xmm1 8073 addpd %xmm1, %xmm0 8074 #endif 8075 */ 8076 8077 DebugLoc dl = Op.getDebugLoc(); 8078 LLVMContext *Context = DAG.getContext(); 8079 8080 // Build some magic constants. 8081 const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; 8082 Constant *C0 = ConstantDataVector::get(*Context, CV0); 8083 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 8084 8085 SmallVector<Constant*,2> CV1; 8086 CV1.push_back( 8087 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8088 APInt(64, 0x4330000000000000ULL)))); 8089 CV1.push_back( 8090 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8091 APInt(64, 0x4530000000000000ULL)))); 8092 Constant *C1 = ConstantVector::get(CV1); 8093 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 8094 8095 // Load the 64-bit value into an XMM register. 8096 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 8097 Op.getOperand(0)); 8098 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 8099 MachinePointerInfo::getConstantPool(), 8100 false, false, false, 16); 8101 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 8102 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 8103 CLod0); 8104 8105 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 8106 MachinePointerInfo::getConstantPool(), 8107 false, false, false, 16); 8108 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 8109 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 8110 SDValue Result; 8111 8112 if (Subtarget->hasSSE3()) { 8113 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 8114 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 8115 } else { 8116 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 8117 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 8118 S2F, 0x4E, DAG); 8119 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 8120 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 8121 Sub); 8122 } 8123 8124 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 8125 DAG.getIntPtrConstant(0)); 8126} 8127 8128// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 8129SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 8130 SelectionDAG &DAG) const { 8131 DebugLoc dl = Op.getDebugLoc(); 8132 // FP constant to bias correct the final result. 8133 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 8134 MVT::f64); 8135 8136 // Load the 32-bit value into an XMM register. 8137 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 8138 Op.getOperand(0)); 8139 8140 // Zero out the upper parts of the register. 8141 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 8142 8143 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8144 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 8145 DAG.getIntPtrConstant(0)); 8146 8147 // Or the load with the bias. 8148 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 8149 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8150 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8151 MVT::v2f64, Load)), 8152 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8153 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8154 MVT::v2f64, Bias))); 8155 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8156 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 8157 DAG.getIntPtrConstant(0)); 8158 8159 // Subtract the bias. 8160 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 8161 8162 // Handle final rounding. 8163 EVT DestVT = Op.getValueType(); 8164 8165 if (DestVT.bitsLT(MVT::f64)) 8166 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 8167 DAG.getIntPtrConstant(0)); 8168 if (DestVT.bitsGT(MVT::f64)) 8169 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 8170 8171 // Handle final rounding. 8172 return Sub; 8173} 8174 8175SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, 8176 SelectionDAG &DAG) const { 8177 SDValue N0 = Op.getOperand(0); 8178 EVT SVT = N0.getValueType(); 8179 DebugLoc dl = Op.getDebugLoc(); 8180 8181 assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || 8182 SVT == MVT::v8i8 || SVT == MVT::v8i16) && 8183 "Custom UINT_TO_FP is not supported!"); 8184 8185 EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, 8186 SVT.getVectorNumElements()); 8187 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), 8188 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0)); 8189} 8190 8191SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 8192 SelectionDAG &DAG) const { 8193 SDValue N0 = Op.getOperand(0); 8194 DebugLoc dl = Op.getDebugLoc(); 8195 8196 if (Op.getValueType().isVector()) 8197 return lowerUINT_TO_FP_vec(Op, DAG); 8198 8199 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 8200 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 8201 // the optimization here. 8202 if (DAG.SignBitIsZero(N0)) 8203 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 8204 8205 EVT SrcVT = N0.getValueType(); 8206 EVT DstVT = Op.getValueType(); 8207 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 8208 return LowerUINT_TO_FP_i64(Op, DAG); 8209 if (SrcVT == MVT::i32 && X86ScalarSSEf64) 8210 return LowerUINT_TO_FP_i32(Op, DAG); 8211 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 8212 return SDValue(); 8213 8214 // Make a 64-bit buffer, and use it to build an FILD. 8215 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 8216 if (SrcVT == MVT::i32) { 8217 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 8218 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 8219 getPointerTy(), StackSlot, WordOff); 8220 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8221 StackSlot, MachinePointerInfo(), 8222 false, false, 0); 8223 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 8224 OffsetSlot, MachinePointerInfo(), 8225 false, false, 0); 8226 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 8227 return Fild; 8228 } 8229 8230 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 8231 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8232 StackSlot, MachinePointerInfo(), 8233 false, false, 0); 8234 // For i64 source, we need to add the appropriate power of 2 if the input 8235 // was negative. This is the same as the optimization in 8236 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 8237 // we must be careful to do the computation in x87 extended precision, not 8238 // in SSE. (The generic code can't know it's OK to do this, or how to.) 8239 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 8240 MachineMemOperand *MMO = 8241 DAG.getMachineFunction() 8242 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8243 MachineMemOperand::MOLoad, 8, 8); 8244 8245 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 8246 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 8247 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 8248 array_lengthof(Ops), MVT::i64, MMO); 8249 8250 APInt FF(32, 0x5F800000ULL); 8251 8252 // Check whether the sign bit is set. 8253 SDValue SignSet = DAG.getSetCC(dl, 8254 getSetCCResultType(*DAG.getContext(), MVT::i64), 8255 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 8256 ISD::SETLT); 8257 8258 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 8259 SDValue FudgePtr = DAG.getConstantPool( 8260 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 8261 getPointerTy()); 8262 8263 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 8264 SDValue Zero = DAG.getIntPtrConstant(0); 8265 SDValue Four = DAG.getIntPtrConstant(4); 8266 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 8267 Zero, Four); 8268 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 8269 8270 // Load the value out, extending it from f32 to f80. 8271 // FIXME: Avoid the extend by constructing the right constant pool? 8272 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 8273 FudgePtr, MachinePointerInfo::getConstantPool(), 8274 MVT::f32, false, false, 4); 8275 // Extend everything to 80 bits to force it to be done on x87. 8276 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 8277 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 8278} 8279 8280std::pair<SDValue,SDValue> 8281X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, 8282 bool IsSigned, bool IsReplace) const { 8283 DebugLoc DL = Op.getDebugLoc(); 8284 8285 EVT DstTy = Op.getValueType(); 8286 8287 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) { 8288 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 8289 DstTy = MVT::i64; 8290 } 8291 8292 assert(DstTy.getSimpleVT() <= MVT::i64 && 8293 DstTy.getSimpleVT() >= MVT::i16 && 8294 "Unknown FP_TO_INT to lower!"); 8295 8296 // These are really Legal. 8297 if (DstTy == MVT::i32 && 8298 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8299 return std::make_pair(SDValue(), SDValue()); 8300 if (Subtarget->is64Bit() && 8301 DstTy == MVT::i64 && 8302 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8303 return std::make_pair(SDValue(), SDValue()); 8304 8305 // We lower FP->int64 either into FISTP64 followed by a load from a temporary 8306 // stack slot, or into the FTOL runtime function. 8307 MachineFunction &MF = DAG.getMachineFunction(); 8308 unsigned MemSize = DstTy.getSizeInBits()/8; 8309 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8310 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8311 8312 unsigned Opc; 8313 if (!IsSigned && isIntegerTypeFTOL(DstTy)) 8314 Opc = X86ISD::WIN_FTOL; 8315 else 8316 switch (DstTy.getSimpleVT().SimpleTy) { 8317 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 8318 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 8319 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 8320 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 8321 } 8322 8323 SDValue Chain = DAG.getEntryNode(); 8324 SDValue Value = Op.getOperand(0); 8325 EVT TheVT = Op.getOperand(0).getValueType(); 8326 // FIXME This causes a redundant load/store if the SSE-class value is already 8327 // in memory, such as if it is on the callstack. 8328 if (isScalarFPTypeInSSEReg(TheVT)) { 8329 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 8330 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 8331 MachinePointerInfo::getFixedStack(SSFI), 8332 false, false, 0); 8333 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 8334 SDValue Ops[] = { 8335 Chain, StackSlot, DAG.getValueType(TheVT) 8336 }; 8337 8338 MachineMemOperand *MMO = 8339 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8340 MachineMemOperand::MOLoad, MemSize, MemSize); 8341 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 8342 array_lengthof(Ops), DstTy, MMO); 8343 Chain = Value.getValue(1); 8344 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8345 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8346 } 8347 8348 MachineMemOperand *MMO = 8349 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8350 MachineMemOperand::MOStore, MemSize, MemSize); 8351 8352 if (Opc != X86ISD::WIN_FTOL) { 8353 // Build the FP_TO_INT*_IN_MEM 8354 SDValue Ops[] = { Chain, Value, StackSlot }; 8355 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 8356 Ops, array_lengthof(Ops), DstTy, 8357 MMO); 8358 return std::make_pair(FIST, StackSlot); 8359 } else { 8360 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, 8361 DAG.getVTList(MVT::Other, MVT::Glue), 8362 Chain, Value); 8363 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX, 8364 MVT::i32, ftol.getValue(1)); 8365 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX, 8366 MVT::i32, eax.getValue(2)); 8367 SDValue Ops[] = { eax, edx }; 8368 SDValue pair = IsReplace 8369 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, array_lengthof(Ops)) 8370 : DAG.getMergeValues(Ops, array_lengthof(Ops), DL); 8371 return std::make_pair(pair, SDValue()); 8372 } 8373} 8374 8375static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG, 8376 const X86Subtarget *Subtarget) { 8377 MVT VT = Op->getValueType(0).getSimpleVT(); 8378 SDValue In = Op->getOperand(0); 8379 MVT InVT = In.getValueType().getSimpleVT(); 8380 DebugLoc dl = Op->getDebugLoc(); 8381 8382 // Optimize vectors in AVX mode: 8383 // 8384 // v8i16 -> v8i32 8385 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 8386 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 8387 // Concat upper and lower parts. 8388 // 8389 // v4i32 -> v4i64 8390 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 8391 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 8392 // Concat upper and lower parts. 8393 // 8394 8395 if (((VT != MVT::v8i32) || (InVT != MVT::v8i16)) && 8396 ((VT != MVT::v4i64) || (InVT != MVT::v4i32))) 8397 return SDValue(); 8398 8399 if (Subtarget->hasInt256()) 8400 return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, In); 8401 8402 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl); 8403 SDValue Undef = DAG.getUNDEF(InVT); 8404 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND; 8405 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef); 8406 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef); 8407 8408 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(), 8409 VT.getVectorNumElements()/2); 8410 8411 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 8412 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 8413 8414 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 8415} 8416 8417SDValue X86TargetLowering::LowerANY_EXTEND(SDValue Op, 8418 SelectionDAG &DAG) const { 8419 if (Subtarget->hasFp256()) { 8420 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget); 8421 if (Res.getNode()) 8422 return Res; 8423 } 8424 8425 return SDValue(); 8426} 8427SDValue X86TargetLowering::LowerZERO_EXTEND(SDValue Op, 8428 SelectionDAG &DAG) const { 8429 DebugLoc DL = Op.getDebugLoc(); 8430 MVT VT = Op.getValueType().getSimpleVT(); 8431 SDValue In = Op.getOperand(0); 8432 MVT SVT = In.getValueType().getSimpleVT(); 8433 8434 if (Subtarget->hasFp256()) { 8435 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget); 8436 if (Res.getNode()) 8437 return Res; 8438 } 8439 8440 if (!VT.is256BitVector() || !SVT.is128BitVector() || 8441 VT.getVectorNumElements() != SVT.getVectorNumElements()) 8442 return SDValue(); 8443 8444 assert(Subtarget->hasFp256() && "256-bit vector is observed without AVX!"); 8445 8446 // AVX2 has better support of integer extending. 8447 if (Subtarget->hasInt256()) 8448 return DAG.getNode(X86ISD::VZEXT, DL, VT, In); 8449 8450 SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In); 8451 static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1}; 8452 SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, 8453 DAG.getVectorShuffle(MVT::v8i16, DL, In, 8454 DAG.getUNDEF(MVT::v8i16), 8455 &Mask[0])); 8456 8457 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi); 8458} 8459 8460SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 8461 DebugLoc DL = Op.getDebugLoc(); 8462 MVT VT = Op.getValueType().getSimpleVT(); 8463 SDValue In = Op.getOperand(0); 8464 MVT SVT = In.getValueType().getSimpleVT(); 8465 8466 if ((VT == MVT::v4i32) && (SVT == MVT::v4i64)) { 8467 // On AVX2, v4i64 -> v4i32 becomes VPERMD. 8468 if (Subtarget->hasInt256()) { 8469 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; 8470 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In); 8471 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32), 8472 ShufMask); 8473 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In, 8474 DAG.getIntPtrConstant(0)); 8475 } 8476 8477 // On AVX, v4i64 -> v4i32 becomes a sequence that uses PSHUFD and MOVLHPS. 8478 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8479 DAG.getIntPtrConstant(0)); 8480 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8481 DAG.getIntPtrConstant(2)); 8482 8483 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo); 8484 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi); 8485 8486 // The PSHUFD mask: 8487 static const int ShufMask1[] = {0, 2, 0, 0}; 8488 SDValue Undef = DAG.getUNDEF(VT); 8489 OpLo = DAG.getVectorShuffle(VT, DL, OpLo, Undef, ShufMask1); 8490 OpHi = DAG.getVectorShuffle(VT, DL, OpHi, Undef, ShufMask1); 8491 8492 // The MOVLHPS mask: 8493 static const int ShufMask2[] = {0, 1, 4, 5}; 8494 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask2); 8495 } 8496 8497 if ((VT == MVT::v8i16) && (SVT == MVT::v8i32)) { 8498 // On AVX2, v8i32 -> v8i16 becomed PSHUFB. 8499 if (Subtarget->hasInt256()) { 8500 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In); 8501 8502 SmallVector<SDValue,32> pshufbMask; 8503 for (unsigned i = 0; i < 2; ++i) { 8504 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); 8505 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); 8506 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); 8507 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); 8508 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); 8509 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); 8510 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); 8511 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); 8512 for (unsigned j = 0; j < 8; ++j) 8513 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 8514 } 8515 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, 8516 &pshufbMask[0], 32); 8517 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV); 8518 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In); 8519 8520 static const int ShufMask[] = {0, 2, -1, -1}; 8521 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64), 8522 &ShufMask[0]); 8523 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, 8524 DAG.getIntPtrConstant(0)); 8525 return DAG.getNode(ISD::BITCAST, DL, VT, In); 8526 } 8527 8528 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, 8529 DAG.getIntPtrConstant(0)); 8530 8531 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, 8532 DAG.getIntPtrConstant(4)); 8533 8534 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo); 8535 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi); 8536 8537 // The PSHUFB mask: 8538 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, 8539 -1, -1, -1, -1, -1, -1, -1, -1}; 8540 8541 SDValue Undef = DAG.getUNDEF(MVT::v16i8); 8542 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1); 8543 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1); 8544 8545 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo); 8546 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi); 8547 8548 // The MOVLHPS Mask: 8549 static const int ShufMask2[] = {0, 1, 4, 5}; 8550 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2); 8551 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res); 8552 } 8553 8554 // Handle truncation of V256 to V128 using shuffles. 8555 if (!VT.is128BitVector() || !SVT.is256BitVector()) 8556 return SDValue(); 8557 8558 assert(VT.getVectorNumElements() != SVT.getVectorNumElements() && 8559 "Invalid op"); 8560 assert(Subtarget->hasFp256() && "256-bit vector without AVX!"); 8561 8562 unsigned NumElems = VT.getVectorNumElements(); 8563 EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 8564 NumElems * 2); 8565 8566 SmallVector<int, 16> MaskVec(NumElems * 2, -1); 8567 // Prepare truncation shuffle mask 8568 for (unsigned i = 0; i != NumElems; ++i) 8569 MaskVec[i] = i * 2; 8570 SDValue V = DAG.getVectorShuffle(NVT, DL, 8571 DAG.getNode(ISD::BITCAST, DL, NVT, In), 8572 DAG.getUNDEF(NVT), &MaskVec[0]); 8573 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, 8574 DAG.getIntPtrConstant(0)); 8575} 8576 8577SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 8578 SelectionDAG &DAG) const { 8579 MVT VT = Op.getValueType().getSimpleVT(); 8580 if (VT.isVector()) { 8581 if (VT == MVT::v8i16) 8582 return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), VT, 8583 DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(), 8584 MVT::v8i32, Op.getOperand(0))); 8585 return SDValue(); 8586 } 8587 8588 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8589 /*IsSigned=*/ true, /*IsReplace=*/ false); 8590 SDValue FIST = Vals.first, StackSlot = Vals.second; 8591 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 8592 if (FIST.getNode() == 0) return Op; 8593 8594 if (StackSlot.getNode()) 8595 // Load the result. 8596 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8597 FIST, StackSlot, MachinePointerInfo(), 8598 false, false, false, 0); 8599 8600 // The node is the result. 8601 return FIST; 8602} 8603 8604SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 8605 SelectionDAG &DAG) const { 8606 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8607 /*IsSigned=*/ false, /*IsReplace=*/ false); 8608 SDValue FIST = Vals.first, StackSlot = Vals.second; 8609 assert(FIST.getNode() && "Unexpected failure"); 8610 8611 if (StackSlot.getNode()) 8612 // Load the result. 8613 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8614 FIST, StackSlot, MachinePointerInfo(), 8615 false, false, false, 0); 8616 8617 // The node is the result. 8618 return FIST; 8619} 8620 8621static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { 8622 DebugLoc DL = Op.getDebugLoc(); 8623 MVT VT = Op.getValueType().getSimpleVT(); 8624 SDValue In = Op.getOperand(0); 8625 MVT SVT = In.getValueType().getSimpleVT(); 8626 8627 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!"); 8628 8629 return DAG.getNode(X86ISD::VFPEXT, DL, VT, 8630 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, 8631 In, DAG.getUNDEF(SVT))); 8632} 8633 8634SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { 8635 LLVMContext *Context = DAG.getContext(); 8636 DebugLoc dl = Op.getDebugLoc(); 8637 MVT VT = Op.getValueType().getSimpleVT(); 8638 MVT EltVT = VT; 8639 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8640 if (VT.isVector()) { 8641 EltVT = VT.getVectorElementType(); 8642 NumElts = VT.getVectorNumElements(); 8643 } 8644 Constant *C; 8645 if (EltVT == MVT::f64) 8646 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8647 APInt(64, ~(1ULL << 63)))); 8648 else 8649 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle, 8650 APInt(32, ~(1U << 31)))); 8651 C = ConstantVector::getSplat(NumElts, C); 8652 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8653 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8654 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8655 MachinePointerInfo::getConstantPool(), 8656 false, false, false, Alignment); 8657 if (VT.isVector()) { 8658 MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8659 return DAG.getNode(ISD::BITCAST, dl, VT, 8660 DAG.getNode(ISD::AND, dl, ANDVT, 8661 DAG.getNode(ISD::BITCAST, dl, ANDVT, 8662 Op.getOperand(0)), 8663 DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask))); 8664 } 8665 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 8666} 8667 8668SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 8669 LLVMContext *Context = DAG.getContext(); 8670 DebugLoc dl = Op.getDebugLoc(); 8671 MVT VT = Op.getValueType().getSimpleVT(); 8672 MVT EltVT = VT; 8673 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8674 if (VT.isVector()) { 8675 EltVT = VT.getVectorElementType(); 8676 NumElts = VT.getVectorNumElements(); 8677 } 8678 Constant *C; 8679 if (EltVT == MVT::f64) 8680 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble, 8681 APInt(64, 1ULL << 63))); 8682 else 8683 C = ConstantFP::get(*Context, APFloat(APFloat::IEEEsingle, 8684 APInt(32, 1U << 31))); 8685 C = ConstantVector::getSplat(NumElts, C); 8686 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8687 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8688 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8689 MachinePointerInfo::getConstantPool(), 8690 false, false, false, Alignment); 8691 if (VT.isVector()) { 8692 MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8693 return DAG.getNode(ISD::BITCAST, dl, VT, 8694 DAG.getNode(ISD::XOR, dl, XORVT, 8695 DAG.getNode(ISD::BITCAST, dl, XORVT, 8696 Op.getOperand(0)), 8697 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 8698 } 8699 8700 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 8701} 8702 8703SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 8704 LLVMContext *Context = DAG.getContext(); 8705 SDValue Op0 = Op.getOperand(0); 8706 SDValue Op1 = Op.getOperand(1); 8707 DebugLoc dl = Op.getDebugLoc(); 8708 MVT VT = Op.getValueType().getSimpleVT(); 8709 MVT SrcVT = Op1.getValueType().getSimpleVT(); 8710 8711 // If second operand is smaller, extend it first. 8712 if (SrcVT.bitsLT(VT)) { 8713 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 8714 SrcVT = VT; 8715 } 8716 // And if it is bigger, shrink it first. 8717 if (SrcVT.bitsGT(VT)) { 8718 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 8719 SrcVT = VT; 8720 } 8721 8722 // At this point the operands and the result should have the same 8723 // type, and that won't be f80 since that is not custom lowered. 8724 8725 // First get the sign bit of second operand. 8726 SmallVector<Constant*,4> CV; 8727 if (SrcVT == MVT::f64) { 8728 const fltSemantics &Sem = APFloat::IEEEdouble; 8729 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 1ULL << 63)))); 8730 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0)))); 8731 } else { 8732 const fltSemantics &Sem = APFloat::IEEEsingle; 8733 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 1U << 31)))); 8734 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8735 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8736 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8737 } 8738 Constant *C = ConstantVector::get(CV); 8739 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8740 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 8741 MachinePointerInfo::getConstantPool(), 8742 false, false, false, 16); 8743 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 8744 8745 // Shift sign bit right or left if the two operands have different types. 8746 if (SrcVT.bitsGT(VT)) { 8747 // Op0 is MVT::f32, Op1 is MVT::f64. 8748 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 8749 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 8750 DAG.getConstant(32, MVT::i32)); 8751 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 8752 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 8753 DAG.getIntPtrConstant(0)); 8754 } 8755 8756 // Clear first operand sign bit. 8757 CV.clear(); 8758 if (VT == MVT::f64) { 8759 const fltSemantics &Sem = APFloat::IEEEdouble; 8760 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, 8761 APInt(64, ~(1ULL << 63))))); 8762 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(64, 0)))); 8763 } else { 8764 const fltSemantics &Sem = APFloat::IEEEsingle; 8765 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, 8766 APInt(32, ~(1U << 31))))); 8767 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8768 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8769 CV.push_back(ConstantFP::get(*Context, APFloat(Sem, APInt(32, 0)))); 8770 } 8771 C = ConstantVector::get(CV); 8772 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8773 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8774 MachinePointerInfo::getConstantPool(), 8775 false, false, false, 16); 8776 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8777 8778 // Or the value with the sign bit. 8779 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8780} 8781 8782static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { 8783 SDValue N0 = Op.getOperand(0); 8784 DebugLoc dl = Op.getDebugLoc(); 8785 MVT VT = Op.getValueType().getSimpleVT(); 8786 8787 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8788 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8789 DAG.getConstant(1, VT)); 8790 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8791} 8792 8793// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able. 8794// 8795SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, 8796 SelectionDAG &DAG) const { 8797 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree."); 8798 8799 if (!Subtarget->hasSSE41()) 8800 return SDValue(); 8801 8802 if (!Op->hasOneUse()) 8803 return SDValue(); 8804 8805 SDNode *N = Op.getNode(); 8806 DebugLoc DL = N->getDebugLoc(); 8807 8808 SmallVector<SDValue, 8> Opnds; 8809 DenseMap<SDValue, unsigned> VecInMap; 8810 EVT VT = MVT::Other; 8811 8812 // Recognize a special case where a vector is casted into wide integer to 8813 // test all 0s. 8814 Opnds.push_back(N->getOperand(0)); 8815 Opnds.push_back(N->getOperand(1)); 8816 8817 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { 8818 SmallVector<SDValue, 8>::const_iterator I = Opnds.begin() + Slot; 8819 // BFS traverse all OR'd operands. 8820 if (I->getOpcode() == ISD::OR) { 8821 Opnds.push_back(I->getOperand(0)); 8822 Opnds.push_back(I->getOperand(1)); 8823 // Re-evaluate the number of nodes to be traversed. 8824 e += 2; // 2 more nodes (LHS and RHS) are pushed. 8825 continue; 8826 } 8827 8828 // Quit if a non-EXTRACT_VECTOR_ELT 8829 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8830 return SDValue(); 8831 8832 // Quit if without a constant index. 8833 SDValue Idx = I->getOperand(1); 8834 if (!isa<ConstantSDNode>(Idx)) 8835 return SDValue(); 8836 8837 SDValue ExtractedFromVec = I->getOperand(0); 8838 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec); 8839 if (M == VecInMap.end()) { 8840 VT = ExtractedFromVec.getValueType(); 8841 // Quit if not 128/256-bit vector. 8842 if (!VT.is128BitVector() && !VT.is256BitVector()) 8843 return SDValue(); 8844 // Quit if not the same type. 8845 if (VecInMap.begin() != VecInMap.end() && 8846 VT != VecInMap.begin()->first.getValueType()) 8847 return SDValue(); 8848 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; 8849 } 8850 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue(); 8851 } 8852 8853 assert((VT.is128BitVector() || VT.is256BitVector()) && 8854 "Not extracted from 128-/256-bit vector."); 8855 8856 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; 8857 SmallVector<SDValue, 8> VecIns; 8858 8859 for (DenseMap<SDValue, unsigned>::const_iterator 8860 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { 8861 // Quit if not all elements are used. 8862 if (I->second != FullMask) 8863 return SDValue(); 8864 VecIns.push_back(I->first); 8865 } 8866 8867 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8868 8869 // Cast all vectors into TestVT for PTEST. 8870 for (unsigned i = 0, e = VecIns.size(); i < e; ++i) 8871 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]); 8872 8873 // If more than one full vectors are evaluated, OR them first before PTEST. 8874 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { 8875 // Each iteration will OR 2 nodes and append the result until there is only 8876 // 1 node left, i.e. the final OR'd value of all vectors. 8877 SDValue LHS = VecIns[Slot]; 8878 SDValue RHS = VecIns[Slot + 1]; 8879 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); 8880 } 8881 8882 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, 8883 VecIns.back(), VecIns.back()); 8884} 8885 8886/// Emit nodes that will be selected as "test Op0,Op0", or something 8887/// equivalent. 8888SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8889 SelectionDAG &DAG) const { 8890 DebugLoc dl = Op.getDebugLoc(); 8891 8892 // CF and OF aren't always set the way we want. Determine which 8893 // of these we need. 8894 bool NeedCF = false; 8895 bool NeedOF = false; 8896 switch (X86CC) { 8897 default: break; 8898 case X86::COND_A: case X86::COND_AE: 8899 case X86::COND_B: case X86::COND_BE: 8900 NeedCF = true; 8901 break; 8902 case X86::COND_G: case X86::COND_GE: 8903 case X86::COND_L: case X86::COND_LE: 8904 case X86::COND_O: case X86::COND_NO: 8905 NeedOF = true; 8906 break; 8907 } 8908 8909 // See if we can use the EFLAGS value from the operand instead of 8910 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8911 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8912 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8913 // Emit a CMP with 0, which is the TEST pattern. 8914 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8915 DAG.getConstant(0, Op.getValueType())); 8916 8917 unsigned Opcode = 0; 8918 unsigned NumOperands = 0; 8919 8920 // Truncate operations may prevent the merge of the SETCC instruction 8921 // and the arithmetic intruction before it. Attempt to truncate the operands 8922 // of the arithmetic instruction and use a reduced bit-width instruction. 8923 bool NeedTruncation = false; 8924 SDValue ArithOp = Op; 8925 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { 8926 SDValue Arith = Op->getOperand(0); 8927 // Both the trunc and the arithmetic op need to have one user each. 8928 if (Arith->hasOneUse()) 8929 switch (Arith.getOpcode()) { 8930 default: break; 8931 case ISD::ADD: 8932 case ISD::SUB: 8933 case ISD::AND: 8934 case ISD::OR: 8935 case ISD::XOR: { 8936 NeedTruncation = true; 8937 ArithOp = Arith; 8938 } 8939 } 8940 } 8941 8942 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation 8943 // which may be the result of a CAST. We use the variable 'Op', which is the 8944 // non-casted variable when we check for possible users. 8945 switch (ArithOp.getOpcode()) { 8946 case ISD::ADD: 8947 // Due to an isel shortcoming, be conservative if this add is likely to be 8948 // selected as part of a load-modify-store instruction. When the root node 8949 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8950 // uses of other nodes in the match, such as the ADD in this case. This 8951 // leads to the ADD being left around and reselected, with the result being 8952 // two adds in the output. Alas, even if none our users are stores, that 8953 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8954 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8955 // climbing the DAG back to the root, and it doesn't seem to be worth the 8956 // effort. 8957 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8958 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8959 if (UI->getOpcode() != ISD::CopyToReg && 8960 UI->getOpcode() != ISD::SETCC && 8961 UI->getOpcode() != ISD::STORE) 8962 goto default_case; 8963 8964 if (ConstantSDNode *C = 8965 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) { 8966 // An add of one will be selected as an INC. 8967 if (C->getAPIntValue() == 1) { 8968 Opcode = X86ISD::INC; 8969 NumOperands = 1; 8970 break; 8971 } 8972 8973 // An add of negative one (subtract of one) will be selected as a DEC. 8974 if (C->getAPIntValue().isAllOnesValue()) { 8975 Opcode = X86ISD::DEC; 8976 NumOperands = 1; 8977 break; 8978 } 8979 } 8980 8981 // Otherwise use a regular EFLAGS-setting add. 8982 Opcode = X86ISD::ADD; 8983 NumOperands = 2; 8984 break; 8985 case ISD::AND: { 8986 // If the primary and result isn't used, don't bother using X86ISD::AND, 8987 // because a TEST instruction will be better. 8988 bool NonFlagUse = false; 8989 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8990 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8991 SDNode *User = *UI; 8992 unsigned UOpNo = UI.getOperandNo(); 8993 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8994 // Look pass truncate. 8995 UOpNo = User->use_begin().getOperandNo(); 8996 User = *User->use_begin(); 8997 } 8998 8999 if (User->getOpcode() != ISD::BRCOND && 9000 User->getOpcode() != ISD::SETCC && 9001 !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) { 9002 NonFlagUse = true; 9003 break; 9004 } 9005 } 9006 9007 if (!NonFlagUse) 9008 break; 9009 } 9010 // FALL THROUGH 9011 case ISD::SUB: 9012 case ISD::OR: 9013 case ISD::XOR: 9014 // Due to the ISEL shortcoming noted above, be conservative if this op is 9015 // likely to be selected as part of a load-modify-store instruction. 9016 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 9017 UE = Op.getNode()->use_end(); UI != UE; ++UI) 9018 if (UI->getOpcode() == ISD::STORE) 9019 goto default_case; 9020 9021 // Otherwise use a regular EFLAGS-setting instruction. 9022 switch (ArithOp.getOpcode()) { 9023 default: llvm_unreachable("unexpected operator!"); 9024 case ISD::SUB: Opcode = X86ISD::SUB; break; 9025 case ISD::XOR: Opcode = X86ISD::XOR; break; 9026 case ISD::AND: Opcode = X86ISD::AND; break; 9027 case ISD::OR: { 9028 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) { 9029 SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG); 9030 if (EFLAGS.getNode()) 9031 return EFLAGS; 9032 } 9033 Opcode = X86ISD::OR; 9034 break; 9035 } 9036 } 9037 9038 NumOperands = 2; 9039 break; 9040 case X86ISD::ADD: 9041 case X86ISD::SUB: 9042 case X86ISD::INC: 9043 case X86ISD::DEC: 9044 case X86ISD::OR: 9045 case X86ISD::XOR: 9046 case X86ISD::AND: 9047 return SDValue(Op.getNode(), 1); 9048 default: 9049 default_case: 9050 break; 9051 } 9052 9053 // If we found that truncation is beneficial, perform the truncation and 9054 // update 'Op'. 9055 if (NeedTruncation) { 9056 EVT VT = Op.getValueType(); 9057 SDValue WideVal = Op->getOperand(0); 9058 EVT WideVT = WideVal.getValueType(); 9059 unsigned ConvertedOp = 0; 9060 // Use a target machine opcode to prevent further DAGCombine 9061 // optimizations that may separate the arithmetic operations 9062 // from the setcc node. 9063 switch (WideVal.getOpcode()) { 9064 default: break; 9065 case ISD::ADD: ConvertedOp = X86ISD::ADD; break; 9066 case ISD::SUB: ConvertedOp = X86ISD::SUB; break; 9067 case ISD::AND: ConvertedOp = X86ISD::AND; break; 9068 case ISD::OR: ConvertedOp = X86ISD::OR; break; 9069 case ISD::XOR: ConvertedOp = X86ISD::XOR; break; 9070 } 9071 9072 if (ConvertedOp) { 9073 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9074 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { 9075 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); 9076 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); 9077 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); 9078 } 9079 } 9080 } 9081 9082 if (Opcode == 0) 9083 // Emit a CMP with 0, which is the TEST pattern. 9084 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 9085 DAG.getConstant(0, Op.getValueType())); 9086 9087 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 9088 SmallVector<SDValue, 4> Ops; 9089 for (unsigned i = 0; i != NumOperands; ++i) 9090 Ops.push_back(Op.getOperand(i)); 9091 9092 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 9093 DAG.ReplaceAllUsesWith(Op, New); 9094 return SDValue(New.getNode(), 1); 9095} 9096 9097/// Emit nodes that will be selected as "cmp Op0,Op1", or something 9098/// equivalent. 9099SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 9100 SelectionDAG &DAG) const { 9101 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 9102 if (C->getAPIntValue() == 0) 9103 return EmitTest(Op0, X86CC, DAG); 9104 9105 DebugLoc dl = Op0.getDebugLoc(); 9106 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || 9107 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { 9108 // Use SUB instead of CMP to enable CSE between SUB and CMP. 9109 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); 9110 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, 9111 Op0, Op1); 9112 return SDValue(Sub.getNode(), 1); 9113 } 9114 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 9115} 9116 9117/// Convert a comparison if required by the subtarget. 9118SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, 9119 SelectionDAG &DAG) const { 9120 // If the subtarget does not support the FUCOMI instruction, floating-point 9121 // comparisons have to be converted. 9122 if (Subtarget->hasCMov() || 9123 Cmp.getOpcode() != X86ISD::CMP || 9124 !Cmp.getOperand(0).getValueType().isFloatingPoint() || 9125 !Cmp.getOperand(1).getValueType().isFloatingPoint()) 9126 return Cmp; 9127 9128 // The instruction selector will select an FUCOM instruction instead of 9129 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence 9130 // build an SDNode sequence that transfers the result from FPSW into EFLAGS: 9131 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) 9132 DebugLoc dl = Cmp.getDebugLoc(); 9133 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); 9134 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); 9135 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, 9136 DAG.getConstant(8, MVT::i8)); 9137 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); 9138 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); 9139} 9140 9141static bool isAllOnes(SDValue V) { 9142 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9143 return C && C->isAllOnesValue(); 9144} 9145 9146/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 9147/// if it's possible. 9148SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 9149 DebugLoc dl, SelectionDAG &DAG) const { 9150 SDValue Op0 = And.getOperand(0); 9151 SDValue Op1 = And.getOperand(1); 9152 if (Op0.getOpcode() == ISD::TRUNCATE) 9153 Op0 = Op0.getOperand(0); 9154 if (Op1.getOpcode() == ISD::TRUNCATE) 9155 Op1 = Op1.getOperand(0); 9156 9157 SDValue LHS, RHS; 9158 if (Op1.getOpcode() == ISD::SHL) 9159 std::swap(Op0, Op1); 9160 if (Op0.getOpcode() == ISD::SHL) { 9161 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 9162 if (And00C->getZExtValue() == 1) { 9163 // If we looked past a truncate, check that it's only truncating away 9164 // known zeros. 9165 unsigned BitWidth = Op0.getValueSizeInBits(); 9166 unsigned AndBitWidth = And.getValueSizeInBits(); 9167 if (BitWidth > AndBitWidth) { 9168 APInt Zeros, Ones; 9169 DAG.ComputeMaskedBits(Op0, Zeros, Ones); 9170 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 9171 return SDValue(); 9172 } 9173 LHS = Op1; 9174 RHS = Op0.getOperand(1); 9175 } 9176 } else if (Op1.getOpcode() == ISD::Constant) { 9177 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 9178 uint64_t AndRHSVal = AndRHS->getZExtValue(); 9179 SDValue AndLHS = Op0; 9180 9181 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 9182 LHS = AndLHS.getOperand(0); 9183 RHS = AndLHS.getOperand(1); 9184 } 9185 9186 // Use BT if the immediate can't be encoded in a TEST instruction. 9187 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 9188 LHS = AndLHS; 9189 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 9190 } 9191 } 9192 9193 if (LHS.getNode()) { 9194 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 9195 // instruction. Since the shift amount is in-range-or-undefined, we know 9196 // that doing a bittest on the i32 value is ok. We extend to i32 because 9197 // the encoding for the i16 version is larger than the i32 version. 9198 // Also promote i16 to i32 for performance / code size reason. 9199 if (LHS.getValueType() == MVT::i8 || 9200 LHS.getValueType() == MVT::i16) 9201 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 9202 9203 // If the operand types disagree, extend the shift amount to match. Since 9204 // BT ignores high bits (like shifts) we can use anyextend. 9205 if (LHS.getValueType() != RHS.getValueType()) 9206 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 9207 9208 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 9209 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 9210 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9211 DAG.getConstant(Cond, MVT::i8), BT); 9212 } 9213 9214 return SDValue(); 9215} 9216 9217// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 9218// ones, and then concatenate the result back. 9219static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 9220 MVT VT = Op.getValueType().getSimpleVT(); 9221 9222 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && 9223 "Unsupported value type for operation"); 9224 9225 unsigned NumElems = VT.getVectorNumElements(); 9226 DebugLoc dl = Op.getDebugLoc(); 9227 SDValue CC = Op.getOperand(2); 9228 9229 // Extract the LHS vectors 9230 SDValue LHS = Op.getOperand(0); 9231 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 9232 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 9233 9234 // Extract the RHS vectors 9235 SDValue RHS = Op.getOperand(1); 9236 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 9237 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 9238 9239 // Issue the operation on the smaller types and concatenate the result back 9240 MVT EltVT = VT.getVectorElementType(); 9241 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9242 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9243 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 9244 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 9245} 9246 9247static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget, 9248 SelectionDAG &DAG) { 9249 SDValue Cond; 9250 SDValue Op0 = Op.getOperand(0); 9251 SDValue Op1 = Op.getOperand(1); 9252 SDValue CC = Op.getOperand(2); 9253 MVT VT = Op.getValueType().getSimpleVT(); 9254 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 9255 bool isFP = Op.getOperand(1).getValueType().getSimpleVT().isFloatingPoint(); 9256 DebugLoc dl = Op.getDebugLoc(); 9257 9258 if (isFP) { 9259#ifndef NDEBUG 9260 MVT EltVT = Op0.getValueType().getVectorElementType().getSimpleVT(); 9261 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 9262#endif 9263 9264 unsigned SSECC; 9265 bool Swap = false; 9266 9267 // SSE Condition code mapping: 9268 // 0 - EQ 9269 // 1 - LT 9270 // 2 - LE 9271 // 3 - UNORD 9272 // 4 - NEQ 9273 // 5 - NLT 9274 // 6 - NLE 9275 // 7 - ORD 9276 switch (SetCCOpcode) { 9277 default: llvm_unreachable("Unexpected SETCC condition"); 9278 case ISD::SETOEQ: 9279 case ISD::SETEQ: SSECC = 0; break; 9280 case ISD::SETOGT: 9281 case ISD::SETGT: Swap = true; // Fallthrough 9282 case ISD::SETLT: 9283 case ISD::SETOLT: SSECC = 1; break; 9284 case ISD::SETOGE: 9285 case ISD::SETGE: Swap = true; // Fallthrough 9286 case ISD::SETLE: 9287 case ISD::SETOLE: SSECC = 2; break; 9288 case ISD::SETUO: SSECC = 3; break; 9289 case ISD::SETUNE: 9290 case ISD::SETNE: SSECC = 4; break; 9291 case ISD::SETULE: Swap = true; // Fallthrough 9292 case ISD::SETUGE: SSECC = 5; break; 9293 case ISD::SETULT: Swap = true; // Fallthrough 9294 case ISD::SETUGT: SSECC = 6; break; 9295 case ISD::SETO: SSECC = 7; break; 9296 case ISD::SETUEQ: 9297 case ISD::SETONE: SSECC = 8; break; 9298 } 9299 if (Swap) 9300 std::swap(Op0, Op1); 9301 9302 // In the two special cases we can't handle, emit two comparisons. 9303 if (SSECC == 8) { 9304 unsigned CC0, CC1; 9305 unsigned CombineOpc; 9306 if (SetCCOpcode == ISD::SETUEQ) { 9307 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR; 9308 } else { 9309 assert(SetCCOpcode == ISD::SETONE); 9310 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND; 9311 } 9312 9313 SDValue Cmp0 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9314 DAG.getConstant(CC0, MVT::i8)); 9315 SDValue Cmp1 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9316 DAG.getConstant(CC1, MVT::i8)); 9317 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); 9318 } 9319 // Handle all other FP comparisons here. 9320 return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9321 DAG.getConstant(SSECC, MVT::i8)); 9322 } 9323 9324 // Break 256-bit integer vector compare into smaller ones. 9325 if (VT.is256BitVector() && !Subtarget->hasInt256()) 9326 return Lower256IntVSETCC(Op, DAG); 9327 9328 // We are handling one of the integer comparisons here. Since SSE only has 9329 // GT and EQ comparisons for integer, swapping operands and multiple 9330 // operations may be required for some comparisons. 9331 unsigned Opc; 9332 bool Swap = false, Invert = false, FlipSigns = false; 9333 9334 switch (SetCCOpcode) { 9335 default: llvm_unreachable("Unexpected SETCC condition"); 9336 case ISD::SETNE: Invert = true; 9337 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break; 9338 case ISD::SETLT: Swap = true; 9339 case ISD::SETGT: Opc = X86ISD::PCMPGT; break; 9340 case ISD::SETGE: Swap = true; 9341 case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break; 9342 case ISD::SETULT: Swap = true; 9343 case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break; 9344 case ISD::SETUGE: Swap = true; 9345 case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; 9346 } 9347 if (Swap) 9348 std::swap(Op0, Op1); 9349 9350 // Check that the operation in question is available (most are plain SSE2, 9351 // but PCMPGTQ and PCMPEQQ have different requirements). 9352 if (VT == MVT::v2i64) { 9353 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) { 9354 assert(Subtarget->hasSSE2() && "Don't know how to lower!"); 9355 9356 // First cast everything to the right type. 9357 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); 9358 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); 9359 9360 // Since SSE has no unsigned integer comparisons, we need to flip the sign 9361 // bits of the inputs before performing those operations. The lower 9362 // compare is always unsigned. 9363 SDValue SB; 9364 if (FlipSigns) { 9365 SB = DAG.getConstant(0x80000000U, MVT::v4i32); 9366 } else { 9367 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32); 9368 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32); 9369 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, 9370 Sign, Zero, Sign, Zero); 9371 } 9372 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB); 9373 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB); 9374 9375 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2)) 9376 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1); 9377 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1); 9378 9379 // Create masks for only the low parts/high parts of the 64 bit integers. 9380 const int MaskHi[] = { 1, 1, 3, 3 }; 9381 const int MaskLo[] = { 0, 0, 2, 2 }; 9382 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi); 9383 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo); 9384 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi); 9385 9386 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo); 9387 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi); 9388 9389 if (Invert) 9390 Result = DAG.getNOT(dl, Result, MVT::v4i32); 9391 9392 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9393 } 9394 9395 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) { 9396 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with 9397 // pcmpeqd + pshufd + pand. 9398 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!"); 9399 9400 // First cast everything to the right type. 9401 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0); 9402 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1); 9403 9404 // Do the compare. 9405 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1); 9406 9407 // Make sure the lower and upper halves are both all-ones. 9408 const int Mask[] = { 1, 0, 3, 2 }; 9409 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask); 9410 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf); 9411 9412 if (Invert) 9413 Result = DAG.getNOT(dl, Result, MVT::v4i32); 9414 9415 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9416 } 9417 } 9418 9419 // Since SSE has no unsigned integer comparisons, we need to flip the sign 9420 // bits of the inputs before performing those operations. 9421 if (FlipSigns) { 9422 EVT EltVT = VT.getVectorElementType(); 9423 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT); 9424 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB); 9425 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB); 9426 } 9427 9428 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 9429 9430 // If the logical-not of the result is required, perform that now. 9431 if (Invert) 9432 Result = DAG.getNOT(dl, Result, VT); 9433 9434 return Result; 9435} 9436 9437SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 9438 9439 MVT VT = Op.getValueType().getSimpleVT(); 9440 9441 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG); 9442 9443 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer"); 9444 SDValue Op0 = Op.getOperand(0); 9445 SDValue Op1 = Op.getOperand(1); 9446 DebugLoc dl = Op.getDebugLoc(); 9447 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 9448 9449 // Optimize to BT if possible. 9450 // Lower (X & (1 << N)) == 0 to BT(X, N). 9451 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 9452 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 9453 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 9454 Op1.getOpcode() == ISD::Constant && 9455 cast<ConstantSDNode>(Op1)->isNullValue() && 9456 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 9457 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 9458 if (NewSetCC.getNode()) 9459 return NewSetCC; 9460 } 9461 9462 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 9463 // these. 9464 if (Op1.getOpcode() == ISD::Constant && 9465 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 9466 cast<ConstantSDNode>(Op1)->isNullValue()) && 9467 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 9468 9469 // If the input is a setcc, then reuse the input setcc or use a new one with 9470 // the inverted condition. 9471 if (Op0.getOpcode() == X86ISD::SETCC) { 9472 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 9473 bool Invert = (CC == ISD::SETNE) ^ 9474 cast<ConstantSDNode>(Op1)->isNullValue(); 9475 if (!Invert) return Op0; 9476 9477 CCode = X86::GetOppositeBranchCondition(CCode); 9478 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9479 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 9480 } 9481 } 9482 9483 bool isFP = Op1.getValueType().getSimpleVT().isFloatingPoint(); 9484 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 9485 if (X86CC == X86::COND_INVALID) 9486 return SDValue(); 9487 9488 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 9489 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 9490 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9491 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 9492} 9493 9494// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 9495static bool isX86LogicalCmp(SDValue Op) { 9496 unsigned Opc = Op.getNode()->getOpcode(); 9497 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || 9498 Opc == X86ISD::SAHF) 9499 return true; 9500 if (Op.getResNo() == 1 && 9501 (Opc == X86ISD::ADD || 9502 Opc == X86ISD::SUB || 9503 Opc == X86ISD::ADC || 9504 Opc == X86ISD::SBB || 9505 Opc == X86ISD::SMUL || 9506 Opc == X86ISD::UMUL || 9507 Opc == X86ISD::INC || 9508 Opc == X86ISD::DEC || 9509 Opc == X86ISD::OR || 9510 Opc == X86ISD::XOR || 9511 Opc == X86ISD::AND)) 9512 return true; 9513 9514 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 9515 return true; 9516 9517 return false; 9518} 9519 9520static bool isZero(SDValue V) { 9521 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9522 return C && C->isNullValue(); 9523} 9524 9525static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { 9526 if (V.getOpcode() != ISD::TRUNCATE) 9527 return false; 9528 9529 SDValue VOp0 = V.getOperand(0); 9530 unsigned InBits = VOp0.getValueSizeInBits(); 9531 unsigned Bits = V.getValueSizeInBits(); 9532 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); 9533} 9534 9535SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 9536 bool addTest = true; 9537 SDValue Cond = Op.getOperand(0); 9538 SDValue Op1 = Op.getOperand(1); 9539 SDValue Op2 = Op.getOperand(2); 9540 DebugLoc DL = Op.getDebugLoc(); 9541 SDValue CC; 9542 9543 if (Cond.getOpcode() == ISD::SETCC) { 9544 SDValue NewCond = LowerSETCC(Cond, DAG); 9545 if (NewCond.getNode()) 9546 Cond = NewCond; 9547 } 9548 9549 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 9550 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 9551 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 9552 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 9553 if (Cond.getOpcode() == X86ISD::SETCC && 9554 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 9555 isZero(Cond.getOperand(1).getOperand(1))) { 9556 SDValue Cmp = Cond.getOperand(1); 9557 9558 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 9559 9560 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 9561 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 9562 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 9563 9564 SDValue CmpOp0 = Cmp.getOperand(0); 9565 // Apply further optimizations for special cases 9566 // (select (x != 0), -1, 0) -> neg & sbb 9567 // (select (x == 0), 0, -1) -> neg & sbb 9568 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y)) 9569 if (YC->isNullValue() && 9570 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { 9571 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); 9572 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, 9573 DAG.getConstant(0, CmpOp0.getValueType()), 9574 CmpOp0); 9575 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9576 DAG.getConstant(X86::COND_B, MVT::i8), 9577 SDValue(Neg.getNode(), 1)); 9578 return Res; 9579 } 9580 9581 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 9582 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 9583 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9584 9585 SDValue Res = // Res = 0 or -1. 9586 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9587 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 9588 9589 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 9590 Res = DAG.getNOT(DL, Res, Res.getValueType()); 9591 9592 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 9593 if (N2C == 0 || !N2C->isNullValue()) 9594 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 9595 return Res; 9596 } 9597 } 9598 9599 // Look past (and (setcc_carry (cmp ...)), 1). 9600 if (Cond.getOpcode() == ISD::AND && 9601 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9602 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9603 if (C && C->getAPIntValue() == 1) 9604 Cond = Cond.getOperand(0); 9605 } 9606 9607 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9608 // setting operand in place of the X86ISD::SETCC. 9609 unsigned CondOpcode = Cond.getOpcode(); 9610 if (CondOpcode == X86ISD::SETCC || 9611 CondOpcode == X86ISD::SETCC_CARRY) { 9612 CC = Cond.getOperand(0); 9613 9614 SDValue Cmp = Cond.getOperand(1); 9615 unsigned Opc = Cmp.getOpcode(); 9616 MVT VT = Op.getValueType().getSimpleVT(); 9617 9618 bool IllegalFPCMov = false; 9619 if (VT.isFloatingPoint() && !VT.isVector() && 9620 !isScalarFPTypeInSSEReg(VT)) // FPStack? 9621 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 9622 9623 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 9624 Opc == X86ISD::BT) { // FIXME 9625 Cond = Cmp; 9626 addTest = false; 9627 } 9628 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9629 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9630 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9631 Cond.getOperand(0).getValueType() != MVT::i8)) { 9632 SDValue LHS = Cond.getOperand(0); 9633 SDValue RHS = Cond.getOperand(1); 9634 unsigned X86Opcode; 9635 unsigned X86Cond; 9636 SDVTList VTs; 9637 switch (CondOpcode) { 9638 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9639 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9640 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9641 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9642 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9643 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9644 default: llvm_unreachable("unexpected overflowing operator"); 9645 } 9646 if (CondOpcode == ISD::UMULO) 9647 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9648 MVT::i32); 9649 else 9650 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9651 9652 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 9653 9654 if (CondOpcode == ISD::UMULO) 9655 Cond = X86Op.getValue(2); 9656 else 9657 Cond = X86Op.getValue(1); 9658 9659 CC = DAG.getConstant(X86Cond, MVT::i8); 9660 addTest = false; 9661 } 9662 9663 if (addTest) { 9664 // Look pass the truncate if the high bits are known zero. 9665 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9666 Cond = Cond.getOperand(0); 9667 9668 // We know the result of AND is compared against zero. Try to match 9669 // it to BT. 9670 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9671 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 9672 if (NewSetCC.getNode()) { 9673 CC = NewSetCC.getOperand(0); 9674 Cond = NewSetCC.getOperand(1); 9675 addTest = false; 9676 } 9677 } 9678 } 9679 9680 if (addTest) { 9681 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9682 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9683 } 9684 9685 // a < b ? -1 : 0 -> RES = ~setcc_carry 9686 // a < b ? 0 : -1 -> RES = setcc_carry 9687 // a >= b ? -1 : 0 -> RES = setcc_carry 9688 // a >= b ? 0 : -1 -> RES = ~setcc_carry 9689 if (Cond.getOpcode() == X86ISD::SUB) { 9690 Cond = ConvertCmpIfNecessary(Cond, DAG); 9691 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 9692 9693 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 9694 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 9695 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9696 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 9697 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 9698 return DAG.getNOT(DL, Res, Res.getValueType()); 9699 return Res; 9700 } 9701 } 9702 9703 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate 9704 // widen the cmov and push the truncate through. This avoids introducing a new 9705 // branch during isel and doesn't add any extensions. 9706 if (Op.getValueType() == MVT::i8 && 9707 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) { 9708 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); 9709 if (T1.getValueType() == T2.getValueType() && 9710 // Blacklist CopyFromReg to avoid partial register stalls. 9711 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){ 9712 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue); 9713 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond); 9714 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); 9715 } 9716 } 9717 9718 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 9719 // condition is true. 9720 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 9721 SDValue Ops[] = { Op2, Op1, CC, Cond }; 9722 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 9723} 9724 9725SDValue X86TargetLowering::LowerSIGN_EXTEND(SDValue Op, 9726 SelectionDAG &DAG) const { 9727 MVT VT = Op->getValueType(0).getSimpleVT(); 9728 SDValue In = Op->getOperand(0); 9729 MVT InVT = In.getValueType().getSimpleVT(); 9730 DebugLoc dl = Op->getDebugLoc(); 9731 9732 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) && 9733 (VT != MVT::v8i32 || InVT != MVT::v8i16)) 9734 return SDValue(); 9735 9736 if (Subtarget->hasInt256()) 9737 return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, In); 9738 9739 // Optimize vectors in AVX mode 9740 // Sign extend v8i16 to v8i32 and 9741 // v4i32 to v4i64 9742 // 9743 // Divide input vector into two parts 9744 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} 9745 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 9746 // concat the vectors to original VT 9747 9748 unsigned NumElems = InVT.getVectorNumElements(); 9749 SDValue Undef = DAG.getUNDEF(InVT); 9750 9751 SmallVector<int,8> ShufMask1(NumElems, -1); 9752 for (unsigned i = 0; i != NumElems/2; ++i) 9753 ShufMask1[i] = i; 9754 9755 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]); 9756 9757 SmallVector<int,8> ShufMask2(NumElems, -1); 9758 for (unsigned i = 0; i != NumElems/2; ++i) 9759 ShufMask2[i] = i + NumElems/2; 9760 9761 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]); 9762 9763 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), 9764 VT.getVectorNumElements()/2); 9765 9766 OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); 9767 OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); 9768 9769 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 9770} 9771 9772// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 9773// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 9774// from the AND / OR. 9775static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 9776 Opc = Op.getOpcode(); 9777 if (Opc != ISD::OR && Opc != ISD::AND) 9778 return false; 9779 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9780 Op.getOperand(0).hasOneUse() && 9781 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 9782 Op.getOperand(1).hasOneUse()); 9783} 9784 9785// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 9786// 1 and that the SETCC node has a single use. 9787static bool isXor1OfSetCC(SDValue Op) { 9788 if (Op.getOpcode() != ISD::XOR) 9789 return false; 9790 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9791 if (N1C && N1C->getAPIntValue() == 1) { 9792 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9793 Op.getOperand(0).hasOneUse(); 9794 } 9795 return false; 9796} 9797 9798SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 9799 bool addTest = true; 9800 SDValue Chain = Op.getOperand(0); 9801 SDValue Cond = Op.getOperand(1); 9802 SDValue Dest = Op.getOperand(2); 9803 DebugLoc dl = Op.getDebugLoc(); 9804 SDValue CC; 9805 bool Inverted = false; 9806 9807 if (Cond.getOpcode() == ISD::SETCC) { 9808 // Check for setcc([su]{add,sub,mul}o == 0). 9809 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 9810 isa<ConstantSDNode>(Cond.getOperand(1)) && 9811 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 9812 Cond.getOperand(0).getResNo() == 1 && 9813 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 9814 Cond.getOperand(0).getOpcode() == ISD::UADDO || 9815 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 9816 Cond.getOperand(0).getOpcode() == ISD::USUBO || 9817 Cond.getOperand(0).getOpcode() == ISD::SMULO || 9818 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 9819 Inverted = true; 9820 Cond = Cond.getOperand(0); 9821 } else { 9822 SDValue NewCond = LowerSETCC(Cond, DAG); 9823 if (NewCond.getNode()) 9824 Cond = NewCond; 9825 } 9826 } 9827#if 0 9828 // FIXME: LowerXALUO doesn't handle these!! 9829 else if (Cond.getOpcode() == X86ISD::ADD || 9830 Cond.getOpcode() == X86ISD::SUB || 9831 Cond.getOpcode() == X86ISD::SMUL || 9832 Cond.getOpcode() == X86ISD::UMUL) 9833 Cond = LowerXALUO(Cond, DAG); 9834#endif 9835 9836 // Look pass (and (setcc_carry (cmp ...)), 1). 9837 if (Cond.getOpcode() == ISD::AND && 9838 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9839 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9840 if (C && C->getAPIntValue() == 1) 9841 Cond = Cond.getOperand(0); 9842 } 9843 9844 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9845 // setting operand in place of the X86ISD::SETCC. 9846 unsigned CondOpcode = Cond.getOpcode(); 9847 if (CondOpcode == X86ISD::SETCC || 9848 CondOpcode == X86ISD::SETCC_CARRY) { 9849 CC = Cond.getOperand(0); 9850 9851 SDValue Cmp = Cond.getOperand(1); 9852 unsigned Opc = Cmp.getOpcode(); 9853 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 9854 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 9855 Cond = Cmp; 9856 addTest = false; 9857 } else { 9858 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 9859 default: break; 9860 case X86::COND_O: 9861 case X86::COND_B: 9862 // These can only come from an arithmetic instruction with overflow, 9863 // e.g. SADDO, UADDO. 9864 Cond = Cond.getNode()->getOperand(1); 9865 addTest = false; 9866 break; 9867 } 9868 } 9869 } 9870 CondOpcode = Cond.getOpcode(); 9871 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9872 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9873 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9874 Cond.getOperand(0).getValueType() != MVT::i8)) { 9875 SDValue LHS = Cond.getOperand(0); 9876 SDValue RHS = Cond.getOperand(1); 9877 unsigned X86Opcode; 9878 unsigned X86Cond; 9879 SDVTList VTs; 9880 switch (CondOpcode) { 9881 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9882 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9883 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9884 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9885 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9886 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9887 default: llvm_unreachable("unexpected overflowing operator"); 9888 } 9889 if (Inverted) 9890 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 9891 if (CondOpcode == ISD::UMULO) 9892 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9893 MVT::i32); 9894 else 9895 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9896 9897 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 9898 9899 if (CondOpcode == ISD::UMULO) 9900 Cond = X86Op.getValue(2); 9901 else 9902 Cond = X86Op.getValue(1); 9903 9904 CC = DAG.getConstant(X86Cond, MVT::i8); 9905 addTest = false; 9906 } else { 9907 unsigned CondOpc; 9908 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 9909 SDValue Cmp = Cond.getOperand(0).getOperand(1); 9910 if (CondOpc == ISD::OR) { 9911 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 9912 // two branches instead of an explicit OR instruction with a 9913 // separate test. 9914 if (Cmp == Cond.getOperand(1).getOperand(1) && 9915 isX86LogicalCmp(Cmp)) { 9916 CC = Cond.getOperand(0).getOperand(0); 9917 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9918 Chain, Dest, CC, Cmp); 9919 CC = Cond.getOperand(1).getOperand(0); 9920 Cond = Cmp; 9921 addTest = false; 9922 } 9923 } else { // ISD::AND 9924 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 9925 // two branches instead of an explicit AND instruction with a 9926 // separate test. However, we only do this if this block doesn't 9927 // have a fall-through edge, because this requires an explicit 9928 // jmp when the condition is false. 9929 if (Cmp == Cond.getOperand(1).getOperand(1) && 9930 isX86LogicalCmp(Cmp) && 9931 Op.getNode()->hasOneUse()) { 9932 X86::CondCode CCode = 9933 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9934 CCode = X86::GetOppositeBranchCondition(CCode); 9935 CC = DAG.getConstant(CCode, MVT::i8); 9936 SDNode *User = *Op.getNode()->use_begin(); 9937 // Look for an unconditional branch following this conditional branch. 9938 // We need this because we need to reverse the successors in order 9939 // to implement FCMP_OEQ. 9940 if (User->getOpcode() == ISD::BR) { 9941 SDValue FalseBB = User->getOperand(1); 9942 SDNode *NewBR = 9943 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9944 assert(NewBR == User); 9945 (void)NewBR; 9946 Dest = FalseBB; 9947 9948 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9949 Chain, Dest, CC, Cmp); 9950 X86::CondCode CCode = 9951 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 9952 CCode = X86::GetOppositeBranchCondition(CCode); 9953 CC = DAG.getConstant(CCode, MVT::i8); 9954 Cond = Cmp; 9955 addTest = false; 9956 } 9957 } 9958 } 9959 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 9960 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 9961 // It should be transformed during dag combiner except when the condition 9962 // is set by a arithmetics with overflow node. 9963 X86::CondCode CCode = 9964 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9965 CCode = X86::GetOppositeBranchCondition(CCode); 9966 CC = DAG.getConstant(CCode, MVT::i8); 9967 Cond = Cond.getOperand(0).getOperand(1); 9968 addTest = false; 9969 } else if (Cond.getOpcode() == ISD::SETCC && 9970 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 9971 // For FCMP_OEQ, we can emit 9972 // two branches instead of an explicit AND instruction with a 9973 // separate test. However, we only do this if this block doesn't 9974 // have a fall-through edge, because this requires an explicit 9975 // jmp when the condition is false. 9976 if (Op.getNode()->hasOneUse()) { 9977 SDNode *User = *Op.getNode()->use_begin(); 9978 // Look for an unconditional branch following this conditional branch. 9979 // We need this because we need to reverse the successors in order 9980 // to implement FCMP_OEQ. 9981 if (User->getOpcode() == ISD::BR) { 9982 SDValue FalseBB = User->getOperand(1); 9983 SDNode *NewBR = 9984 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9985 assert(NewBR == User); 9986 (void)NewBR; 9987 Dest = FalseBB; 9988 9989 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9990 Cond.getOperand(0), Cond.getOperand(1)); 9991 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9992 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9993 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9994 Chain, Dest, CC, Cmp); 9995 CC = DAG.getConstant(X86::COND_P, MVT::i8); 9996 Cond = Cmp; 9997 addTest = false; 9998 } 9999 } 10000 } else if (Cond.getOpcode() == ISD::SETCC && 10001 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 10002 // For FCMP_UNE, we can emit 10003 // two branches instead of an explicit AND instruction with a 10004 // separate test. However, we only do this if this block doesn't 10005 // have a fall-through edge, because this requires an explicit 10006 // jmp when the condition is false. 10007 if (Op.getNode()->hasOneUse()) { 10008 SDNode *User = *Op.getNode()->use_begin(); 10009 // Look for an unconditional branch following this conditional branch. 10010 // We need this because we need to reverse the successors in order 10011 // to implement FCMP_UNE. 10012 if (User->getOpcode() == ISD::BR) { 10013 SDValue FalseBB = User->getOperand(1); 10014 SDNode *NewBR = 10015 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 10016 assert(NewBR == User); 10017 (void)NewBR; 10018 10019 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 10020 Cond.getOperand(0), Cond.getOperand(1)); 10021 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 10022 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10023 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10024 Chain, Dest, CC, Cmp); 10025 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 10026 Cond = Cmp; 10027 addTest = false; 10028 Dest = FalseBB; 10029 } 10030 } 10031 } 10032 } 10033 10034 if (addTest) { 10035 // Look pass the truncate if the high bits are known zero. 10036 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 10037 Cond = Cond.getOperand(0); 10038 10039 // We know the result of AND is compared against zero. Try to match 10040 // it to BT. 10041 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 10042 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 10043 if (NewSetCC.getNode()) { 10044 CC = NewSetCC.getOperand(0); 10045 Cond = NewSetCC.getOperand(1); 10046 addTest = false; 10047 } 10048 } 10049 } 10050 10051 if (addTest) { 10052 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 10053 Cond = EmitTest(Cond, X86::COND_NE, DAG); 10054 } 10055 Cond = ConvertCmpIfNecessary(Cond, DAG); 10056 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 10057 Chain, Dest, CC, Cond); 10058} 10059 10060// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 10061// Calls to _alloca is needed to probe the stack when allocating more than 4k 10062// bytes in one go. Touching the stack at 4K increments is necessary to ensure 10063// that the guard pages used by the OS virtual memory manager are allocated in 10064// correct sequence. 10065SDValue 10066X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 10067 SelectionDAG &DAG) const { 10068 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 10069 getTargetMachine().Options.EnableSegmentedStacks) && 10070 "This should be used only on Windows targets or when segmented stacks " 10071 "are being used"); 10072 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 10073 DebugLoc dl = Op.getDebugLoc(); 10074 10075 // Get the inputs. 10076 SDValue Chain = Op.getOperand(0); 10077 SDValue Size = Op.getOperand(1); 10078 // FIXME: Ensure alignment here 10079 10080 bool Is64Bit = Subtarget->is64Bit(); 10081 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 10082 10083 if (getTargetMachine().Options.EnableSegmentedStacks) { 10084 MachineFunction &MF = DAG.getMachineFunction(); 10085 MachineRegisterInfo &MRI = MF.getRegInfo(); 10086 10087 if (Is64Bit) { 10088 // The 64 bit implementation of segmented stacks needs to clobber both r10 10089 // r11. This makes it impossible to use it along with nested parameters. 10090 const Function *F = MF.getFunction(); 10091 10092 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 10093 I != E; ++I) 10094 if (I->hasNestAttr()) 10095 report_fatal_error("Cannot use segmented stacks with functions that " 10096 "have nested arguments."); 10097 } 10098 10099 const TargetRegisterClass *AddrRegClass = 10100 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 10101 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 10102 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 10103 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 10104 DAG.getRegister(Vreg, SPTy)); 10105 SDValue Ops1[2] = { Value, Chain }; 10106 return DAG.getMergeValues(Ops1, 2, dl); 10107 } else { 10108 SDValue Flag; 10109 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 10110 10111 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 10112 Flag = Chain.getValue(1); 10113 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 10114 10115 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 10116 Flag = Chain.getValue(1); 10117 10118 Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 10119 SPTy).getValue(1); 10120 10121 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 10122 return DAG.getMergeValues(Ops1, 2, dl); 10123 } 10124} 10125 10126SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 10127 MachineFunction &MF = DAG.getMachineFunction(); 10128 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 10129 10130 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 10131 DebugLoc DL = Op.getDebugLoc(); 10132 10133 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 10134 // vastart just stores the address of the VarArgsFrameIndex slot into the 10135 // memory location argument. 10136 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 10137 getPointerTy()); 10138 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 10139 MachinePointerInfo(SV), false, false, 0); 10140 } 10141 10142 // __va_list_tag: 10143 // gp_offset (0 - 6 * 8) 10144 // fp_offset (48 - 48 + 8 * 16) 10145 // overflow_arg_area (point to parameters coming in memory). 10146 // reg_save_area 10147 SmallVector<SDValue, 8> MemOps; 10148 SDValue FIN = Op.getOperand(1); 10149 // Store gp_offset 10150 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 10151 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 10152 MVT::i32), 10153 FIN, MachinePointerInfo(SV), false, false, 0); 10154 MemOps.push_back(Store); 10155 10156 // Store fp_offset 10157 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10158 FIN, DAG.getIntPtrConstant(4)); 10159 Store = DAG.getStore(Op.getOperand(0), DL, 10160 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 10161 MVT::i32), 10162 FIN, MachinePointerInfo(SV, 4), false, false, 0); 10163 MemOps.push_back(Store); 10164 10165 // Store ptr to overflow_arg_area 10166 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10167 FIN, DAG.getIntPtrConstant(4)); 10168 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 10169 getPointerTy()); 10170 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 10171 MachinePointerInfo(SV, 8), 10172 false, false, 0); 10173 MemOps.push_back(Store); 10174 10175 // Store ptr to reg_save_area. 10176 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 10177 FIN, DAG.getIntPtrConstant(8)); 10178 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 10179 getPointerTy()); 10180 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 10181 MachinePointerInfo(SV, 16), false, false, 0); 10182 MemOps.push_back(Store); 10183 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 10184 &MemOps[0], MemOps.size()); 10185} 10186 10187SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 10188 assert(Subtarget->is64Bit() && 10189 "LowerVAARG only handles 64-bit va_arg!"); 10190 assert((Subtarget->isTargetLinux() || 10191 Subtarget->isTargetDarwin()) && 10192 "Unhandled target in LowerVAARG"); 10193 assert(Op.getNode()->getNumOperands() == 4); 10194 SDValue Chain = Op.getOperand(0); 10195 SDValue SrcPtr = Op.getOperand(1); 10196 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 10197 unsigned Align = Op.getConstantOperandVal(3); 10198 DebugLoc dl = Op.getDebugLoc(); 10199 10200 EVT ArgVT = Op.getNode()->getValueType(0); 10201 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 10202 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy); 10203 uint8_t ArgMode; 10204 10205 // Decide which area this value should be read from. 10206 // TODO: Implement the AMD64 ABI in its entirety. This simple 10207 // selection mechanism works only for the basic types. 10208 if (ArgVT == MVT::f80) { 10209 llvm_unreachable("va_arg for f80 not yet implemented"); 10210 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 10211 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 10212 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 10213 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 10214 } else { 10215 llvm_unreachable("Unhandled argument type in LowerVAARG"); 10216 } 10217 10218 if (ArgMode == 2) { 10219 // Sanity Check: Make sure using fp_offset makes sense. 10220 assert(!getTargetMachine().Options.UseSoftFloat && 10221 !(DAG.getMachineFunction() 10222 .getFunction()->getAttributes() 10223 .hasAttribute(AttributeSet::FunctionIndex, 10224 Attribute::NoImplicitFloat)) && 10225 Subtarget->hasSSE1()); 10226 } 10227 10228 // Insert VAARG_64 node into the DAG 10229 // VAARG_64 returns two values: Variable Argument Address, Chain 10230 SmallVector<SDValue, 11> InstOps; 10231 InstOps.push_back(Chain); 10232 InstOps.push_back(SrcPtr); 10233 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 10234 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 10235 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 10236 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 10237 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 10238 VTs, &InstOps[0], InstOps.size(), 10239 MVT::i64, 10240 MachinePointerInfo(SV), 10241 /*Align=*/0, 10242 /*Volatile=*/false, 10243 /*ReadMem=*/true, 10244 /*WriteMem=*/true); 10245 Chain = VAARG.getValue(1); 10246 10247 // Load the next argument and return it 10248 return DAG.getLoad(ArgVT, dl, 10249 Chain, 10250 VAARG, 10251 MachinePointerInfo(), 10252 false, false, false, 0); 10253} 10254 10255static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, 10256 SelectionDAG &DAG) { 10257 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 10258 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 10259 SDValue Chain = Op.getOperand(0); 10260 SDValue DstPtr = Op.getOperand(1); 10261 SDValue SrcPtr = Op.getOperand(2); 10262 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 10263 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 10264 DebugLoc DL = Op.getDebugLoc(); 10265 10266 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 10267 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 10268 false, 10269 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 10270} 10271 10272// getTargetVShiftNode - Handle vector element shifts where the shift amount 10273// may or may not be a constant. Takes immediate version of shift as input. 10274static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, 10275 SDValue SrcOp, SDValue ShAmt, 10276 SelectionDAG &DAG) { 10277 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 10278 10279 if (isa<ConstantSDNode>(ShAmt)) { 10280 // Constant may be a TargetConstant. Use a regular constant. 10281 uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 10282 switch (Opc) { 10283 default: llvm_unreachable("Unknown target vector shift node"); 10284 case X86ISD::VSHLI: 10285 case X86ISD::VSRLI: 10286 case X86ISD::VSRAI: 10287 return DAG.getNode(Opc, dl, VT, SrcOp, 10288 DAG.getConstant(ShiftAmt, MVT::i32)); 10289 } 10290 } 10291 10292 // Change opcode to non-immediate version 10293 switch (Opc) { 10294 default: llvm_unreachable("Unknown target vector shift node"); 10295 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 10296 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 10297 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 10298 } 10299 10300 // Need to build a vector containing shift amount 10301 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 10302 SDValue ShOps[4]; 10303 ShOps[0] = ShAmt; 10304 ShOps[1] = DAG.getConstant(0, MVT::i32); 10305 ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32); 10306 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 10307 10308 // The return type has to be a 128-bit type with the same element 10309 // type as the input type. 10310 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10311 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); 10312 10313 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); 10314 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 10315} 10316 10317static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { 10318 DebugLoc dl = Op.getDebugLoc(); 10319 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10320 switch (IntNo) { 10321 default: return SDValue(); // Don't custom lower most intrinsics. 10322 // Comparison intrinsics. 10323 case Intrinsic::x86_sse_comieq_ss: 10324 case Intrinsic::x86_sse_comilt_ss: 10325 case Intrinsic::x86_sse_comile_ss: 10326 case Intrinsic::x86_sse_comigt_ss: 10327 case Intrinsic::x86_sse_comige_ss: 10328 case Intrinsic::x86_sse_comineq_ss: 10329 case Intrinsic::x86_sse_ucomieq_ss: 10330 case Intrinsic::x86_sse_ucomilt_ss: 10331 case Intrinsic::x86_sse_ucomile_ss: 10332 case Intrinsic::x86_sse_ucomigt_ss: 10333 case Intrinsic::x86_sse_ucomige_ss: 10334 case Intrinsic::x86_sse_ucomineq_ss: 10335 case Intrinsic::x86_sse2_comieq_sd: 10336 case Intrinsic::x86_sse2_comilt_sd: 10337 case Intrinsic::x86_sse2_comile_sd: 10338 case Intrinsic::x86_sse2_comigt_sd: 10339 case Intrinsic::x86_sse2_comige_sd: 10340 case Intrinsic::x86_sse2_comineq_sd: 10341 case Intrinsic::x86_sse2_ucomieq_sd: 10342 case Intrinsic::x86_sse2_ucomilt_sd: 10343 case Intrinsic::x86_sse2_ucomile_sd: 10344 case Intrinsic::x86_sse2_ucomigt_sd: 10345 case Intrinsic::x86_sse2_ucomige_sd: 10346 case Intrinsic::x86_sse2_ucomineq_sd: { 10347 unsigned Opc; 10348 ISD::CondCode CC; 10349 switch (IntNo) { 10350 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10351 case Intrinsic::x86_sse_comieq_ss: 10352 case Intrinsic::x86_sse2_comieq_sd: 10353 Opc = X86ISD::COMI; 10354 CC = ISD::SETEQ; 10355 break; 10356 case Intrinsic::x86_sse_comilt_ss: 10357 case Intrinsic::x86_sse2_comilt_sd: 10358 Opc = X86ISD::COMI; 10359 CC = ISD::SETLT; 10360 break; 10361 case Intrinsic::x86_sse_comile_ss: 10362 case Intrinsic::x86_sse2_comile_sd: 10363 Opc = X86ISD::COMI; 10364 CC = ISD::SETLE; 10365 break; 10366 case Intrinsic::x86_sse_comigt_ss: 10367 case Intrinsic::x86_sse2_comigt_sd: 10368 Opc = X86ISD::COMI; 10369 CC = ISD::SETGT; 10370 break; 10371 case Intrinsic::x86_sse_comige_ss: 10372 case Intrinsic::x86_sse2_comige_sd: 10373 Opc = X86ISD::COMI; 10374 CC = ISD::SETGE; 10375 break; 10376 case Intrinsic::x86_sse_comineq_ss: 10377 case Intrinsic::x86_sse2_comineq_sd: 10378 Opc = X86ISD::COMI; 10379 CC = ISD::SETNE; 10380 break; 10381 case Intrinsic::x86_sse_ucomieq_ss: 10382 case Intrinsic::x86_sse2_ucomieq_sd: 10383 Opc = X86ISD::UCOMI; 10384 CC = ISD::SETEQ; 10385 break; 10386 case Intrinsic::x86_sse_ucomilt_ss: 10387 case Intrinsic::x86_sse2_ucomilt_sd: 10388 Opc = X86ISD::UCOMI; 10389 CC = ISD::SETLT; 10390 break; 10391 case Intrinsic::x86_sse_ucomile_ss: 10392 case Intrinsic::x86_sse2_ucomile_sd: 10393 Opc = X86ISD::UCOMI; 10394 CC = ISD::SETLE; 10395 break; 10396 case Intrinsic::x86_sse_ucomigt_ss: 10397 case Intrinsic::x86_sse2_ucomigt_sd: 10398 Opc = X86ISD::UCOMI; 10399 CC = ISD::SETGT; 10400 break; 10401 case Intrinsic::x86_sse_ucomige_ss: 10402 case Intrinsic::x86_sse2_ucomige_sd: 10403 Opc = X86ISD::UCOMI; 10404 CC = ISD::SETGE; 10405 break; 10406 case Intrinsic::x86_sse_ucomineq_ss: 10407 case Intrinsic::x86_sse2_ucomineq_sd: 10408 Opc = X86ISD::UCOMI; 10409 CC = ISD::SETNE; 10410 break; 10411 } 10412 10413 SDValue LHS = Op.getOperand(1); 10414 SDValue RHS = Op.getOperand(2); 10415 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 10416 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 10417 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 10418 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10419 DAG.getConstant(X86CC, MVT::i8), Cond); 10420 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10421 } 10422 10423 // Arithmetic intrinsics. 10424 case Intrinsic::x86_sse2_pmulu_dq: 10425 case Intrinsic::x86_avx2_pmulu_dq: 10426 return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(), 10427 Op.getOperand(1), Op.getOperand(2)); 10428 10429 // SSE2/AVX2 sub with unsigned saturation intrinsics 10430 case Intrinsic::x86_sse2_psubus_b: 10431 case Intrinsic::x86_sse2_psubus_w: 10432 case Intrinsic::x86_avx2_psubus_b: 10433 case Intrinsic::x86_avx2_psubus_w: 10434 return DAG.getNode(X86ISD::SUBUS, dl, Op.getValueType(), 10435 Op.getOperand(1), Op.getOperand(2)); 10436 10437 // SSE3/AVX horizontal add/sub intrinsics 10438 case Intrinsic::x86_sse3_hadd_ps: 10439 case Intrinsic::x86_sse3_hadd_pd: 10440 case Intrinsic::x86_avx_hadd_ps_256: 10441 case Intrinsic::x86_avx_hadd_pd_256: 10442 case Intrinsic::x86_sse3_hsub_ps: 10443 case Intrinsic::x86_sse3_hsub_pd: 10444 case Intrinsic::x86_avx_hsub_ps_256: 10445 case Intrinsic::x86_avx_hsub_pd_256: 10446 case Intrinsic::x86_ssse3_phadd_w_128: 10447 case Intrinsic::x86_ssse3_phadd_d_128: 10448 case Intrinsic::x86_avx2_phadd_w: 10449 case Intrinsic::x86_avx2_phadd_d: 10450 case Intrinsic::x86_ssse3_phsub_w_128: 10451 case Intrinsic::x86_ssse3_phsub_d_128: 10452 case Intrinsic::x86_avx2_phsub_w: 10453 case Intrinsic::x86_avx2_phsub_d: { 10454 unsigned Opcode; 10455 switch (IntNo) { 10456 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10457 case Intrinsic::x86_sse3_hadd_ps: 10458 case Intrinsic::x86_sse3_hadd_pd: 10459 case Intrinsic::x86_avx_hadd_ps_256: 10460 case Intrinsic::x86_avx_hadd_pd_256: 10461 Opcode = X86ISD::FHADD; 10462 break; 10463 case Intrinsic::x86_sse3_hsub_ps: 10464 case Intrinsic::x86_sse3_hsub_pd: 10465 case Intrinsic::x86_avx_hsub_ps_256: 10466 case Intrinsic::x86_avx_hsub_pd_256: 10467 Opcode = X86ISD::FHSUB; 10468 break; 10469 case Intrinsic::x86_ssse3_phadd_w_128: 10470 case Intrinsic::x86_ssse3_phadd_d_128: 10471 case Intrinsic::x86_avx2_phadd_w: 10472 case Intrinsic::x86_avx2_phadd_d: 10473 Opcode = X86ISD::HADD; 10474 break; 10475 case Intrinsic::x86_ssse3_phsub_w_128: 10476 case Intrinsic::x86_ssse3_phsub_d_128: 10477 case Intrinsic::x86_avx2_phsub_w: 10478 case Intrinsic::x86_avx2_phsub_d: 10479 Opcode = X86ISD::HSUB; 10480 break; 10481 } 10482 return DAG.getNode(Opcode, dl, Op.getValueType(), 10483 Op.getOperand(1), Op.getOperand(2)); 10484 } 10485 10486 // SSE2/SSE41/AVX2 integer max/min intrinsics. 10487 case Intrinsic::x86_sse2_pmaxu_b: 10488 case Intrinsic::x86_sse41_pmaxuw: 10489 case Intrinsic::x86_sse41_pmaxud: 10490 case Intrinsic::x86_avx2_pmaxu_b: 10491 case Intrinsic::x86_avx2_pmaxu_w: 10492 case Intrinsic::x86_avx2_pmaxu_d: 10493 case Intrinsic::x86_sse2_pminu_b: 10494 case Intrinsic::x86_sse41_pminuw: 10495 case Intrinsic::x86_sse41_pminud: 10496 case Intrinsic::x86_avx2_pminu_b: 10497 case Intrinsic::x86_avx2_pminu_w: 10498 case Intrinsic::x86_avx2_pminu_d: 10499 case Intrinsic::x86_sse41_pmaxsb: 10500 case Intrinsic::x86_sse2_pmaxs_w: 10501 case Intrinsic::x86_sse41_pmaxsd: 10502 case Intrinsic::x86_avx2_pmaxs_b: 10503 case Intrinsic::x86_avx2_pmaxs_w: 10504 case Intrinsic::x86_avx2_pmaxs_d: 10505 case Intrinsic::x86_sse41_pminsb: 10506 case Intrinsic::x86_sse2_pmins_w: 10507 case Intrinsic::x86_sse41_pminsd: 10508 case Intrinsic::x86_avx2_pmins_b: 10509 case Intrinsic::x86_avx2_pmins_w: 10510 case Intrinsic::x86_avx2_pmins_d: { 10511 unsigned Opcode; 10512 switch (IntNo) { 10513 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10514 case Intrinsic::x86_sse2_pmaxu_b: 10515 case Intrinsic::x86_sse41_pmaxuw: 10516 case Intrinsic::x86_sse41_pmaxud: 10517 case Intrinsic::x86_avx2_pmaxu_b: 10518 case Intrinsic::x86_avx2_pmaxu_w: 10519 case Intrinsic::x86_avx2_pmaxu_d: 10520 Opcode = X86ISD::UMAX; 10521 break; 10522 case Intrinsic::x86_sse2_pminu_b: 10523 case Intrinsic::x86_sse41_pminuw: 10524 case Intrinsic::x86_sse41_pminud: 10525 case Intrinsic::x86_avx2_pminu_b: 10526 case Intrinsic::x86_avx2_pminu_w: 10527 case Intrinsic::x86_avx2_pminu_d: 10528 Opcode = X86ISD::UMIN; 10529 break; 10530 case Intrinsic::x86_sse41_pmaxsb: 10531 case Intrinsic::x86_sse2_pmaxs_w: 10532 case Intrinsic::x86_sse41_pmaxsd: 10533 case Intrinsic::x86_avx2_pmaxs_b: 10534 case Intrinsic::x86_avx2_pmaxs_w: 10535 case Intrinsic::x86_avx2_pmaxs_d: 10536 Opcode = X86ISD::SMAX; 10537 break; 10538 case Intrinsic::x86_sse41_pminsb: 10539 case Intrinsic::x86_sse2_pmins_w: 10540 case Intrinsic::x86_sse41_pminsd: 10541 case Intrinsic::x86_avx2_pmins_b: 10542 case Intrinsic::x86_avx2_pmins_w: 10543 case Intrinsic::x86_avx2_pmins_d: 10544 Opcode = X86ISD::SMIN; 10545 break; 10546 } 10547 return DAG.getNode(Opcode, dl, Op.getValueType(), 10548 Op.getOperand(1), Op.getOperand(2)); 10549 } 10550 10551 // SSE/SSE2/AVX floating point max/min intrinsics. 10552 case Intrinsic::x86_sse_max_ps: 10553 case Intrinsic::x86_sse2_max_pd: 10554 case Intrinsic::x86_avx_max_ps_256: 10555 case Intrinsic::x86_avx_max_pd_256: 10556 case Intrinsic::x86_sse_min_ps: 10557 case Intrinsic::x86_sse2_min_pd: 10558 case Intrinsic::x86_avx_min_ps_256: 10559 case Intrinsic::x86_avx_min_pd_256: { 10560 unsigned Opcode; 10561 switch (IntNo) { 10562 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10563 case Intrinsic::x86_sse_max_ps: 10564 case Intrinsic::x86_sse2_max_pd: 10565 case Intrinsic::x86_avx_max_ps_256: 10566 case Intrinsic::x86_avx_max_pd_256: 10567 Opcode = X86ISD::FMAX; 10568 break; 10569 case Intrinsic::x86_sse_min_ps: 10570 case Intrinsic::x86_sse2_min_pd: 10571 case Intrinsic::x86_avx_min_ps_256: 10572 case Intrinsic::x86_avx_min_pd_256: 10573 Opcode = X86ISD::FMIN; 10574 break; 10575 } 10576 return DAG.getNode(Opcode, dl, Op.getValueType(), 10577 Op.getOperand(1), Op.getOperand(2)); 10578 } 10579 10580 // AVX2 variable shift intrinsics 10581 case Intrinsic::x86_avx2_psllv_d: 10582 case Intrinsic::x86_avx2_psllv_q: 10583 case Intrinsic::x86_avx2_psllv_d_256: 10584 case Intrinsic::x86_avx2_psllv_q_256: 10585 case Intrinsic::x86_avx2_psrlv_d: 10586 case Intrinsic::x86_avx2_psrlv_q: 10587 case Intrinsic::x86_avx2_psrlv_d_256: 10588 case Intrinsic::x86_avx2_psrlv_q_256: 10589 case Intrinsic::x86_avx2_psrav_d: 10590 case Intrinsic::x86_avx2_psrav_d_256: { 10591 unsigned Opcode; 10592 switch (IntNo) { 10593 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10594 case Intrinsic::x86_avx2_psllv_d: 10595 case Intrinsic::x86_avx2_psllv_q: 10596 case Intrinsic::x86_avx2_psllv_d_256: 10597 case Intrinsic::x86_avx2_psllv_q_256: 10598 Opcode = ISD::SHL; 10599 break; 10600 case Intrinsic::x86_avx2_psrlv_d: 10601 case Intrinsic::x86_avx2_psrlv_q: 10602 case Intrinsic::x86_avx2_psrlv_d_256: 10603 case Intrinsic::x86_avx2_psrlv_q_256: 10604 Opcode = ISD::SRL; 10605 break; 10606 case Intrinsic::x86_avx2_psrav_d: 10607 case Intrinsic::x86_avx2_psrav_d_256: 10608 Opcode = ISD::SRA; 10609 break; 10610 } 10611 return DAG.getNode(Opcode, dl, Op.getValueType(), 10612 Op.getOperand(1), Op.getOperand(2)); 10613 } 10614 10615 case Intrinsic::x86_ssse3_pshuf_b_128: 10616 case Intrinsic::x86_avx2_pshuf_b: 10617 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 10618 Op.getOperand(1), Op.getOperand(2)); 10619 10620 case Intrinsic::x86_ssse3_psign_b_128: 10621 case Intrinsic::x86_ssse3_psign_w_128: 10622 case Intrinsic::x86_ssse3_psign_d_128: 10623 case Intrinsic::x86_avx2_psign_b: 10624 case Intrinsic::x86_avx2_psign_w: 10625 case Intrinsic::x86_avx2_psign_d: 10626 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 10627 Op.getOperand(1), Op.getOperand(2)); 10628 10629 case Intrinsic::x86_sse41_insertps: 10630 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 10631 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10632 10633 case Intrinsic::x86_avx_vperm2f128_ps_256: 10634 case Intrinsic::x86_avx_vperm2f128_pd_256: 10635 case Intrinsic::x86_avx_vperm2f128_si_256: 10636 case Intrinsic::x86_avx2_vperm2i128: 10637 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 10638 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10639 10640 case Intrinsic::x86_avx2_permd: 10641 case Intrinsic::x86_avx2_permps: 10642 // Operands intentionally swapped. Mask is last operand to intrinsic, 10643 // but second operand for node/intruction. 10644 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), 10645 Op.getOperand(2), Op.getOperand(1)); 10646 10647 case Intrinsic::x86_sse_sqrt_ps: 10648 case Intrinsic::x86_sse2_sqrt_pd: 10649 case Intrinsic::x86_avx_sqrt_ps_256: 10650 case Intrinsic::x86_avx_sqrt_pd_256: 10651 return DAG.getNode(ISD::FSQRT, dl, Op.getValueType(), Op.getOperand(1)); 10652 10653 // ptest and testp intrinsics. The intrinsic these come from are designed to 10654 // return an integer value, not just an instruction so lower it to the ptest 10655 // or testp pattern and a setcc for the result. 10656 case Intrinsic::x86_sse41_ptestz: 10657 case Intrinsic::x86_sse41_ptestc: 10658 case Intrinsic::x86_sse41_ptestnzc: 10659 case Intrinsic::x86_avx_ptestz_256: 10660 case Intrinsic::x86_avx_ptestc_256: 10661 case Intrinsic::x86_avx_ptestnzc_256: 10662 case Intrinsic::x86_avx_vtestz_ps: 10663 case Intrinsic::x86_avx_vtestc_ps: 10664 case Intrinsic::x86_avx_vtestnzc_ps: 10665 case Intrinsic::x86_avx_vtestz_pd: 10666 case Intrinsic::x86_avx_vtestc_pd: 10667 case Intrinsic::x86_avx_vtestnzc_pd: 10668 case Intrinsic::x86_avx_vtestz_ps_256: 10669 case Intrinsic::x86_avx_vtestc_ps_256: 10670 case Intrinsic::x86_avx_vtestnzc_ps_256: 10671 case Intrinsic::x86_avx_vtestz_pd_256: 10672 case Intrinsic::x86_avx_vtestc_pd_256: 10673 case Intrinsic::x86_avx_vtestnzc_pd_256: { 10674 bool IsTestPacked = false; 10675 unsigned X86CC; 10676 switch (IntNo) { 10677 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 10678 case Intrinsic::x86_avx_vtestz_ps: 10679 case Intrinsic::x86_avx_vtestz_pd: 10680 case Intrinsic::x86_avx_vtestz_ps_256: 10681 case Intrinsic::x86_avx_vtestz_pd_256: 10682 IsTestPacked = true; // Fallthrough 10683 case Intrinsic::x86_sse41_ptestz: 10684 case Intrinsic::x86_avx_ptestz_256: 10685 // ZF = 1 10686 X86CC = X86::COND_E; 10687 break; 10688 case Intrinsic::x86_avx_vtestc_ps: 10689 case Intrinsic::x86_avx_vtestc_pd: 10690 case Intrinsic::x86_avx_vtestc_ps_256: 10691 case Intrinsic::x86_avx_vtestc_pd_256: 10692 IsTestPacked = true; // Fallthrough 10693 case Intrinsic::x86_sse41_ptestc: 10694 case Intrinsic::x86_avx_ptestc_256: 10695 // CF = 1 10696 X86CC = X86::COND_B; 10697 break; 10698 case Intrinsic::x86_avx_vtestnzc_ps: 10699 case Intrinsic::x86_avx_vtestnzc_pd: 10700 case Intrinsic::x86_avx_vtestnzc_ps_256: 10701 case Intrinsic::x86_avx_vtestnzc_pd_256: 10702 IsTestPacked = true; // Fallthrough 10703 case Intrinsic::x86_sse41_ptestnzc: 10704 case Intrinsic::x86_avx_ptestnzc_256: 10705 // ZF and CF = 0 10706 X86CC = X86::COND_A; 10707 break; 10708 } 10709 10710 SDValue LHS = Op.getOperand(1); 10711 SDValue RHS = Op.getOperand(2); 10712 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 10713 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 10714 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 10715 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 10716 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10717 } 10718 10719 // SSE/AVX shift intrinsics 10720 case Intrinsic::x86_sse2_psll_w: 10721 case Intrinsic::x86_sse2_psll_d: 10722 case Intrinsic::x86_sse2_psll_q: 10723 case Intrinsic::x86_avx2_psll_w: 10724 case Intrinsic::x86_avx2_psll_d: 10725 case Intrinsic::x86_avx2_psll_q: 10726 case Intrinsic::x86_sse2_psrl_w: 10727 case Intrinsic::x86_sse2_psrl_d: 10728 case Intrinsic::x86_sse2_psrl_q: 10729 case Intrinsic::x86_avx2_psrl_w: 10730 case Intrinsic::x86_avx2_psrl_d: 10731 case Intrinsic::x86_avx2_psrl_q: 10732 case Intrinsic::x86_sse2_psra_w: 10733 case Intrinsic::x86_sse2_psra_d: 10734 case Intrinsic::x86_avx2_psra_w: 10735 case Intrinsic::x86_avx2_psra_d: { 10736 unsigned Opcode; 10737 switch (IntNo) { 10738 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10739 case Intrinsic::x86_sse2_psll_w: 10740 case Intrinsic::x86_sse2_psll_d: 10741 case Intrinsic::x86_sse2_psll_q: 10742 case Intrinsic::x86_avx2_psll_w: 10743 case Intrinsic::x86_avx2_psll_d: 10744 case Intrinsic::x86_avx2_psll_q: 10745 Opcode = X86ISD::VSHL; 10746 break; 10747 case Intrinsic::x86_sse2_psrl_w: 10748 case Intrinsic::x86_sse2_psrl_d: 10749 case Intrinsic::x86_sse2_psrl_q: 10750 case Intrinsic::x86_avx2_psrl_w: 10751 case Intrinsic::x86_avx2_psrl_d: 10752 case Intrinsic::x86_avx2_psrl_q: 10753 Opcode = X86ISD::VSRL; 10754 break; 10755 case Intrinsic::x86_sse2_psra_w: 10756 case Intrinsic::x86_sse2_psra_d: 10757 case Intrinsic::x86_avx2_psra_w: 10758 case Intrinsic::x86_avx2_psra_d: 10759 Opcode = X86ISD::VSRA; 10760 break; 10761 } 10762 return DAG.getNode(Opcode, dl, Op.getValueType(), 10763 Op.getOperand(1), Op.getOperand(2)); 10764 } 10765 10766 // SSE/AVX immediate shift intrinsics 10767 case Intrinsic::x86_sse2_pslli_w: 10768 case Intrinsic::x86_sse2_pslli_d: 10769 case Intrinsic::x86_sse2_pslli_q: 10770 case Intrinsic::x86_avx2_pslli_w: 10771 case Intrinsic::x86_avx2_pslli_d: 10772 case Intrinsic::x86_avx2_pslli_q: 10773 case Intrinsic::x86_sse2_psrli_w: 10774 case Intrinsic::x86_sse2_psrli_d: 10775 case Intrinsic::x86_sse2_psrli_q: 10776 case Intrinsic::x86_avx2_psrli_w: 10777 case Intrinsic::x86_avx2_psrli_d: 10778 case Intrinsic::x86_avx2_psrli_q: 10779 case Intrinsic::x86_sse2_psrai_w: 10780 case Intrinsic::x86_sse2_psrai_d: 10781 case Intrinsic::x86_avx2_psrai_w: 10782 case Intrinsic::x86_avx2_psrai_d: { 10783 unsigned Opcode; 10784 switch (IntNo) { 10785 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10786 case Intrinsic::x86_sse2_pslli_w: 10787 case Intrinsic::x86_sse2_pslli_d: 10788 case Intrinsic::x86_sse2_pslli_q: 10789 case Intrinsic::x86_avx2_pslli_w: 10790 case Intrinsic::x86_avx2_pslli_d: 10791 case Intrinsic::x86_avx2_pslli_q: 10792 Opcode = X86ISD::VSHLI; 10793 break; 10794 case Intrinsic::x86_sse2_psrli_w: 10795 case Intrinsic::x86_sse2_psrli_d: 10796 case Intrinsic::x86_sse2_psrli_q: 10797 case Intrinsic::x86_avx2_psrli_w: 10798 case Intrinsic::x86_avx2_psrli_d: 10799 case Intrinsic::x86_avx2_psrli_q: 10800 Opcode = X86ISD::VSRLI; 10801 break; 10802 case Intrinsic::x86_sse2_psrai_w: 10803 case Intrinsic::x86_sse2_psrai_d: 10804 case Intrinsic::x86_avx2_psrai_w: 10805 case Intrinsic::x86_avx2_psrai_d: 10806 Opcode = X86ISD::VSRAI; 10807 break; 10808 } 10809 return getTargetVShiftNode(Opcode, dl, Op.getValueType(), 10810 Op.getOperand(1), Op.getOperand(2), DAG); 10811 } 10812 10813 case Intrinsic::x86_sse42_pcmpistria128: 10814 case Intrinsic::x86_sse42_pcmpestria128: 10815 case Intrinsic::x86_sse42_pcmpistric128: 10816 case Intrinsic::x86_sse42_pcmpestric128: 10817 case Intrinsic::x86_sse42_pcmpistrio128: 10818 case Intrinsic::x86_sse42_pcmpestrio128: 10819 case Intrinsic::x86_sse42_pcmpistris128: 10820 case Intrinsic::x86_sse42_pcmpestris128: 10821 case Intrinsic::x86_sse42_pcmpistriz128: 10822 case Intrinsic::x86_sse42_pcmpestriz128: { 10823 unsigned Opcode; 10824 unsigned X86CC; 10825 switch (IntNo) { 10826 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10827 case Intrinsic::x86_sse42_pcmpistria128: 10828 Opcode = X86ISD::PCMPISTRI; 10829 X86CC = X86::COND_A; 10830 break; 10831 case Intrinsic::x86_sse42_pcmpestria128: 10832 Opcode = X86ISD::PCMPESTRI; 10833 X86CC = X86::COND_A; 10834 break; 10835 case Intrinsic::x86_sse42_pcmpistric128: 10836 Opcode = X86ISD::PCMPISTRI; 10837 X86CC = X86::COND_B; 10838 break; 10839 case Intrinsic::x86_sse42_pcmpestric128: 10840 Opcode = X86ISD::PCMPESTRI; 10841 X86CC = X86::COND_B; 10842 break; 10843 case Intrinsic::x86_sse42_pcmpistrio128: 10844 Opcode = X86ISD::PCMPISTRI; 10845 X86CC = X86::COND_O; 10846 break; 10847 case Intrinsic::x86_sse42_pcmpestrio128: 10848 Opcode = X86ISD::PCMPESTRI; 10849 X86CC = X86::COND_O; 10850 break; 10851 case Intrinsic::x86_sse42_pcmpistris128: 10852 Opcode = X86ISD::PCMPISTRI; 10853 X86CC = X86::COND_S; 10854 break; 10855 case Intrinsic::x86_sse42_pcmpestris128: 10856 Opcode = X86ISD::PCMPESTRI; 10857 X86CC = X86::COND_S; 10858 break; 10859 case Intrinsic::x86_sse42_pcmpistriz128: 10860 Opcode = X86ISD::PCMPISTRI; 10861 X86CC = X86::COND_E; 10862 break; 10863 case Intrinsic::x86_sse42_pcmpestriz128: 10864 Opcode = X86ISD::PCMPESTRI; 10865 X86CC = X86::COND_E; 10866 break; 10867 } 10868 SmallVector<SDValue, 5> NewOps; 10869 NewOps.append(Op->op_begin()+1, Op->op_end()); 10870 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10871 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10872 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10873 DAG.getConstant(X86CC, MVT::i8), 10874 SDValue(PCMP.getNode(), 1)); 10875 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10876 } 10877 10878 case Intrinsic::x86_sse42_pcmpistri128: 10879 case Intrinsic::x86_sse42_pcmpestri128: { 10880 unsigned Opcode; 10881 if (IntNo == Intrinsic::x86_sse42_pcmpistri128) 10882 Opcode = X86ISD::PCMPISTRI; 10883 else 10884 Opcode = X86ISD::PCMPESTRI; 10885 10886 SmallVector<SDValue, 5> NewOps; 10887 NewOps.append(Op->op_begin()+1, Op->op_end()); 10888 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10889 return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10890 } 10891 case Intrinsic::x86_fma_vfmadd_ps: 10892 case Intrinsic::x86_fma_vfmadd_pd: 10893 case Intrinsic::x86_fma_vfmsub_ps: 10894 case Intrinsic::x86_fma_vfmsub_pd: 10895 case Intrinsic::x86_fma_vfnmadd_ps: 10896 case Intrinsic::x86_fma_vfnmadd_pd: 10897 case Intrinsic::x86_fma_vfnmsub_ps: 10898 case Intrinsic::x86_fma_vfnmsub_pd: 10899 case Intrinsic::x86_fma_vfmaddsub_ps: 10900 case Intrinsic::x86_fma_vfmaddsub_pd: 10901 case Intrinsic::x86_fma_vfmsubadd_ps: 10902 case Intrinsic::x86_fma_vfmsubadd_pd: 10903 case Intrinsic::x86_fma_vfmadd_ps_256: 10904 case Intrinsic::x86_fma_vfmadd_pd_256: 10905 case Intrinsic::x86_fma_vfmsub_ps_256: 10906 case Intrinsic::x86_fma_vfmsub_pd_256: 10907 case Intrinsic::x86_fma_vfnmadd_ps_256: 10908 case Intrinsic::x86_fma_vfnmadd_pd_256: 10909 case Intrinsic::x86_fma_vfnmsub_ps_256: 10910 case Intrinsic::x86_fma_vfnmsub_pd_256: 10911 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10912 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10913 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10914 case Intrinsic::x86_fma_vfmsubadd_pd_256: { 10915 unsigned Opc; 10916 switch (IntNo) { 10917 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10918 case Intrinsic::x86_fma_vfmadd_ps: 10919 case Intrinsic::x86_fma_vfmadd_pd: 10920 case Intrinsic::x86_fma_vfmadd_ps_256: 10921 case Intrinsic::x86_fma_vfmadd_pd_256: 10922 Opc = X86ISD::FMADD; 10923 break; 10924 case Intrinsic::x86_fma_vfmsub_ps: 10925 case Intrinsic::x86_fma_vfmsub_pd: 10926 case Intrinsic::x86_fma_vfmsub_ps_256: 10927 case Intrinsic::x86_fma_vfmsub_pd_256: 10928 Opc = X86ISD::FMSUB; 10929 break; 10930 case Intrinsic::x86_fma_vfnmadd_ps: 10931 case Intrinsic::x86_fma_vfnmadd_pd: 10932 case Intrinsic::x86_fma_vfnmadd_ps_256: 10933 case Intrinsic::x86_fma_vfnmadd_pd_256: 10934 Opc = X86ISD::FNMADD; 10935 break; 10936 case Intrinsic::x86_fma_vfnmsub_ps: 10937 case Intrinsic::x86_fma_vfnmsub_pd: 10938 case Intrinsic::x86_fma_vfnmsub_ps_256: 10939 case Intrinsic::x86_fma_vfnmsub_pd_256: 10940 Opc = X86ISD::FNMSUB; 10941 break; 10942 case Intrinsic::x86_fma_vfmaddsub_ps: 10943 case Intrinsic::x86_fma_vfmaddsub_pd: 10944 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10945 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10946 Opc = X86ISD::FMADDSUB; 10947 break; 10948 case Intrinsic::x86_fma_vfmsubadd_ps: 10949 case Intrinsic::x86_fma_vfmsubadd_pd: 10950 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10951 case Intrinsic::x86_fma_vfmsubadd_pd_256: 10952 Opc = X86ISD::FMSUBADD; 10953 break; 10954 } 10955 10956 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1), 10957 Op.getOperand(2), Op.getOperand(3)); 10958 } 10959 } 10960} 10961 10962static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { 10963 DebugLoc dl = Op.getDebugLoc(); 10964 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10965 switch (IntNo) { 10966 default: return SDValue(); // Don't custom lower most intrinsics. 10967 10968 // RDRAND/RDSEED intrinsics. 10969 case Intrinsic::x86_rdrand_16: 10970 case Intrinsic::x86_rdrand_32: 10971 case Intrinsic::x86_rdrand_64: 10972 case Intrinsic::x86_rdseed_16: 10973 case Intrinsic::x86_rdseed_32: 10974 case Intrinsic::x86_rdseed_64: { 10975 unsigned Opcode = (IntNo == Intrinsic::x86_rdseed_16 || 10976 IntNo == Intrinsic::x86_rdseed_32 || 10977 IntNo == Intrinsic::x86_rdseed_64) ? X86ISD::RDSEED : 10978 X86ISD::RDRAND; 10979 // Emit the node with the right value type. 10980 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); 10981 SDValue Result = DAG.getNode(Opcode, dl, VTs, Op.getOperand(0)); 10982 10983 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1. 10984 // Otherwise return the value from Rand, which is always 0, casted to i32. 10985 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), 10986 DAG.getConstant(1, Op->getValueType(1)), 10987 DAG.getConstant(X86::COND_B, MVT::i32), 10988 SDValue(Result.getNode(), 1) }; 10989 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, 10990 DAG.getVTList(Op->getValueType(1), MVT::Glue), 10991 Ops, array_lengthof(Ops)); 10992 10993 // Return { result, isValid, chain }. 10994 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, 10995 SDValue(Result.getNode(), 2)); 10996 } 10997 10998 // XTEST intrinsics. 10999 case Intrinsic::x86_xtest: { 11000 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other); 11001 SDValue InTrans = DAG.getNode(X86ISD::XTEST, dl, VTs, Op.getOperand(0)); 11002 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 11003 DAG.getConstant(X86::COND_NE, MVT::i8), 11004 InTrans); 11005 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC); 11006 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), 11007 Ret, SDValue(InTrans.getNode(), 1)); 11008 } 11009 } 11010} 11011 11012SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 11013 SelectionDAG &DAG) const { 11014 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 11015 MFI->setReturnAddressIsTaken(true); 11016 11017 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11018 DebugLoc dl = Op.getDebugLoc(); 11019 EVT PtrVT = getPointerTy(); 11020 11021 if (Depth > 0) { 11022 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 11023 SDValue Offset = 11024 DAG.getConstant(RegInfo->getSlotSize(), PtrVT); 11025 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11026 DAG.getNode(ISD::ADD, dl, PtrVT, 11027 FrameAddr, Offset), 11028 MachinePointerInfo(), false, false, false, 0); 11029 } 11030 11031 // Just load the return address. 11032 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 11033 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 11034 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 11035} 11036 11037SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 11038 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 11039 MFI->setFrameAddressIsTaken(true); 11040 11041 EVT VT = Op.getValueType(); 11042 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 11043 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11044 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); 11045 assert(((FrameReg == X86::RBP && VT == MVT::i64) || 11046 (FrameReg == X86::EBP && VT == MVT::i32)) && 11047 "Invalid Frame Register!"); 11048 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 11049 while (Depth--) 11050 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 11051 MachinePointerInfo(), 11052 false, false, false, 0); 11053 return FrameAddr; 11054} 11055 11056SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 11057 SelectionDAG &DAG) const { 11058 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); 11059} 11060 11061SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 11062 SDValue Chain = Op.getOperand(0); 11063 SDValue Offset = Op.getOperand(1); 11064 SDValue Handler = Op.getOperand(2); 11065 DebugLoc dl = Op.getDebugLoc(); 11066 11067 EVT PtrVT = getPointerTy(); 11068 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); 11069 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) || 11070 (FrameReg == X86::EBP && PtrVT == MVT::i32)) && 11071 "Invalid Frame Register!"); 11072 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT); 11073 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX; 11074 11075 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame, 11076 DAG.getIntPtrConstant(RegInfo->getSlotSize())); 11077 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset); 11078 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 11079 false, false, 0); 11080 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 11081 11082 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain, 11083 DAG.getRegister(StoreAddrReg, PtrVT)); 11084} 11085 11086SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 11087 SelectionDAG &DAG) const { 11088 DebugLoc DL = Op.getDebugLoc(); 11089 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, 11090 DAG.getVTList(MVT::i32, MVT::Other), 11091 Op.getOperand(0), Op.getOperand(1)); 11092} 11093 11094SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 11095 SelectionDAG &DAG) const { 11096 DebugLoc DL = Op.getDebugLoc(); 11097 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 11098 Op.getOperand(0), Op.getOperand(1)); 11099} 11100 11101static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { 11102 return Op.getOperand(0); 11103} 11104 11105SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 11106 SelectionDAG &DAG) const { 11107 SDValue Root = Op.getOperand(0); 11108 SDValue Trmp = Op.getOperand(1); // trampoline 11109 SDValue FPtr = Op.getOperand(2); // nested function 11110 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 11111 DebugLoc dl = Op.getDebugLoc(); 11112 11113 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 11114 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 11115 11116 if (Subtarget->is64Bit()) { 11117 SDValue OutChains[6]; 11118 11119 // Large code-model. 11120 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 11121 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 11122 11123 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7; 11124 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7; 11125 11126 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 11127 11128 // Load the pointer to the nested function into R11. 11129 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 11130 SDValue Addr = Trmp; 11131 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11132 Addr, MachinePointerInfo(TrmpAddr), 11133 false, false, 0); 11134 11135 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11136 DAG.getConstant(2, MVT::i64)); 11137 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 11138 MachinePointerInfo(TrmpAddr, 2), 11139 false, false, 2); 11140 11141 // Load the 'nest' parameter value into R10. 11142 // R10 is specified in X86CallingConv.td 11143 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 11144 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11145 DAG.getConstant(10, MVT::i64)); 11146 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11147 Addr, MachinePointerInfo(TrmpAddr, 10), 11148 false, false, 0); 11149 11150 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11151 DAG.getConstant(12, MVT::i64)); 11152 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 11153 MachinePointerInfo(TrmpAddr, 12), 11154 false, false, 2); 11155 11156 // Jump to the nested function. 11157 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 11158 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11159 DAG.getConstant(20, MVT::i64)); 11160 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 11161 Addr, MachinePointerInfo(TrmpAddr, 20), 11162 false, false, 0); 11163 11164 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 11165 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 11166 DAG.getConstant(22, MVT::i64)); 11167 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 11168 MachinePointerInfo(TrmpAddr, 22), 11169 false, false, 0); 11170 11171 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 11172 } else { 11173 const Function *Func = 11174 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 11175 CallingConv::ID CC = Func->getCallingConv(); 11176 unsigned NestReg; 11177 11178 switch (CC) { 11179 default: 11180 llvm_unreachable("Unsupported calling convention"); 11181 case CallingConv::C: 11182 case CallingConv::X86_StdCall: { 11183 // Pass 'nest' parameter in ECX. 11184 // Must be kept in sync with X86CallingConv.td 11185 NestReg = X86::ECX; 11186 11187 // Check that ECX wasn't needed by an 'inreg' parameter. 11188 FunctionType *FTy = Func->getFunctionType(); 11189 const AttributeSet &Attrs = Func->getAttributes(); 11190 11191 if (!Attrs.isEmpty() && !Func->isVarArg()) { 11192 unsigned InRegCount = 0; 11193 unsigned Idx = 1; 11194 11195 for (FunctionType::param_iterator I = FTy->param_begin(), 11196 E = FTy->param_end(); I != E; ++I, ++Idx) 11197 if (Attrs.hasAttribute(Idx, Attribute::InReg)) 11198 // FIXME: should only count parameters that are lowered to integers. 11199 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 11200 11201 if (InRegCount > 2) { 11202 report_fatal_error("Nest register in use - reduce number of inreg" 11203 " parameters!"); 11204 } 11205 } 11206 break; 11207 } 11208 case CallingConv::X86_FastCall: 11209 case CallingConv::X86_ThisCall: 11210 case CallingConv::Fast: 11211 // Pass 'nest' parameter in EAX. 11212 // Must be kept in sync with X86CallingConv.td 11213 NestReg = X86::EAX; 11214 break; 11215 } 11216 11217 SDValue OutChains[4]; 11218 SDValue Addr, Disp; 11219 11220 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11221 DAG.getConstant(10, MVT::i32)); 11222 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 11223 11224 // This is storing the opcode for MOV32ri. 11225 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 11226 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; 11227 OutChains[0] = DAG.getStore(Root, dl, 11228 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 11229 Trmp, MachinePointerInfo(TrmpAddr), 11230 false, false, 0); 11231 11232 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11233 DAG.getConstant(1, MVT::i32)); 11234 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 11235 MachinePointerInfo(TrmpAddr, 1), 11236 false, false, 1); 11237 11238 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 11239 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11240 DAG.getConstant(5, MVT::i32)); 11241 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 11242 MachinePointerInfo(TrmpAddr, 5), 11243 false, false, 1); 11244 11245 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 11246 DAG.getConstant(6, MVT::i32)); 11247 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 11248 MachinePointerInfo(TrmpAddr, 6), 11249 false, false, 1); 11250 11251 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 11252 } 11253} 11254 11255SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 11256 SelectionDAG &DAG) const { 11257 /* 11258 The rounding mode is in bits 11:10 of FPSR, and has the following 11259 settings: 11260 00 Round to nearest 11261 01 Round to -inf 11262 10 Round to +inf 11263 11 Round to 0 11264 11265 FLT_ROUNDS, on the other hand, expects the following: 11266 -1 Undefined 11267 0 Round to 0 11268 1 Round to nearest 11269 2 Round to +inf 11270 3 Round to -inf 11271 11272 To perform the conversion, we do: 11273 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 11274 */ 11275 11276 MachineFunction &MF = DAG.getMachineFunction(); 11277 const TargetMachine &TM = MF.getTarget(); 11278 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 11279 unsigned StackAlignment = TFI.getStackAlignment(); 11280 EVT VT = Op.getValueType(); 11281 DebugLoc DL = Op.getDebugLoc(); 11282 11283 // Save FP Control Word to stack slot 11284 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 11285 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 11286 11287 MachineMemOperand *MMO = 11288 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 11289 MachineMemOperand::MOStore, 2, 2); 11290 11291 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 11292 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 11293 DAG.getVTList(MVT::Other), 11294 Ops, array_lengthof(Ops), MVT::i16, 11295 MMO); 11296 11297 // Load FP Control Word from stack slot 11298 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 11299 MachinePointerInfo(), false, false, false, 0); 11300 11301 // Transform as necessary 11302 SDValue CWD1 = 11303 DAG.getNode(ISD::SRL, DL, MVT::i16, 11304 DAG.getNode(ISD::AND, DL, MVT::i16, 11305 CWD, DAG.getConstant(0x800, MVT::i16)), 11306 DAG.getConstant(11, MVT::i8)); 11307 SDValue CWD2 = 11308 DAG.getNode(ISD::SRL, DL, MVT::i16, 11309 DAG.getNode(ISD::AND, DL, MVT::i16, 11310 CWD, DAG.getConstant(0x400, MVT::i16)), 11311 DAG.getConstant(9, MVT::i8)); 11312 11313 SDValue RetVal = 11314 DAG.getNode(ISD::AND, DL, MVT::i16, 11315 DAG.getNode(ISD::ADD, DL, MVT::i16, 11316 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 11317 DAG.getConstant(1, MVT::i16)), 11318 DAG.getConstant(3, MVT::i16)); 11319 11320 return DAG.getNode((VT.getSizeInBits() < 16 ? 11321 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 11322} 11323 11324static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { 11325 EVT VT = Op.getValueType(); 11326 EVT OpVT = VT; 11327 unsigned NumBits = VT.getSizeInBits(); 11328 DebugLoc dl = Op.getDebugLoc(); 11329 11330 Op = Op.getOperand(0); 11331 if (VT == MVT::i8) { 11332 // Zero extend to i32 since there is not an i8 bsr. 11333 OpVT = MVT::i32; 11334 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 11335 } 11336 11337 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 11338 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 11339 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 11340 11341 // If src is zero (i.e. bsr sets ZF), returns NumBits. 11342 SDValue Ops[] = { 11343 Op, 11344 DAG.getConstant(NumBits+NumBits-1, OpVT), 11345 DAG.getConstant(X86::COND_E, MVT::i8), 11346 Op.getValue(1) 11347 }; 11348 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 11349 11350 // Finally xor with NumBits-1. 11351 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 11352 11353 if (VT == MVT::i8) 11354 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 11355 return Op; 11356} 11357 11358static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { 11359 EVT VT = Op.getValueType(); 11360 EVT OpVT = VT; 11361 unsigned NumBits = VT.getSizeInBits(); 11362 DebugLoc dl = Op.getDebugLoc(); 11363 11364 Op = Op.getOperand(0); 11365 if (VT == MVT::i8) { 11366 // Zero extend to i32 since there is not an i8 bsr. 11367 OpVT = MVT::i32; 11368 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 11369 } 11370 11371 // Issue a bsr (scan bits in reverse). 11372 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 11373 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 11374 11375 // And xor with NumBits-1. 11376 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 11377 11378 if (VT == MVT::i8) 11379 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 11380 return Op; 11381} 11382 11383static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { 11384 EVT VT = Op.getValueType(); 11385 unsigned NumBits = VT.getSizeInBits(); 11386 DebugLoc dl = Op.getDebugLoc(); 11387 Op = Op.getOperand(0); 11388 11389 // Issue a bsf (scan bits forward) which also sets EFLAGS. 11390 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 11391 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 11392 11393 // If src is zero (i.e. bsf sets ZF), returns NumBits. 11394 SDValue Ops[] = { 11395 Op, 11396 DAG.getConstant(NumBits, VT), 11397 DAG.getConstant(X86::COND_E, MVT::i8), 11398 Op.getValue(1) 11399 }; 11400 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 11401} 11402 11403// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 11404// ones, and then concatenate the result back. 11405static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 11406 EVT VT = Op.getValueType(); 11407 11408 assert(VT.is256BitVector() && VT.isInteger() && 11409 "Unsupported value type for operation"); 11410 11411 unsigned NumElems = VT.getVectorNumElements(); 11412 DebugLoc dl = Op.getDebugLoc(); 11413 11414 // Extract the LHS vectors 11415 SDValue LHS = Op.getOperand(0); 11416 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 11417 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 11418 11419 // Extract the RHS vectors 11420 SDValue RHS = Op.getOperand(1); 11421 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 11422 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 11423 11424 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11425 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11426 11427 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 11428 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 11429 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 11430} 11431 11432static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) { 11433 assert(Op.getValueType().is256BitVector() && 11434 Op.getValueType().isInteger() && 11435 "Only handle AVX 256-bit vector integer operation"); 11436 return Lower256IntArith(Op, DAG); 11437} 11438 11439static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { 11440 assert(Op.getValueType().is256BitVector() && 11441 Op.getValueType().isInteger() && 11442 "Only handle AVX 256-bit vector integer operation"); 11443 return Lower256IntArith(Op, DAG); 11444} 11445 11446static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, 11447 SelectionDAG &DAG) { 11448 DebugLoc dl = Op.getDebugLoc(); 11449 EVT VT = Op.getValueType(); 11450 11451 // Decompose 256-bit ops into smaller 128-bit ops. 11452 if (VT.is256BitVector() && !Subtarget->hasInt256()) 11453 return Lower256IntArith(Op, DAG); 11454 11455 SDValue A = Op.getOperand(0); 11456 SDValue B = Op.getOperand(1); 11457 11458 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle. 11459 if (VT == MVT::v4i32) { 11460 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() && 11461 "Should not custom lower when pmuldq is available!"); 11462 11463 // Extract the odd parts. 11464 const int UnpackMask[] = { 1, -1, 3, -1 }; 11465 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask); 11466 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask); 11467 11468 // Multiply the even parts. 11469 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B); 11470 // Now multiply odd parts. 11471 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds); 11472 11473 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens); 11474 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds); 11475 11476 // Merge the two vectors back together with a shuffle. This expands into 2 11477 // shuffles. 11478 const int ShufMask[] = { 0, 4, 2, 6 }; 11479 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask); 11480 } 11481 11482 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && 11483 "Only know how to lower V2I64/V4I64 multiply"); 11484 11485 // Ahi = psrlqi(a, 32); 11486 // Bhi = psrlqi(b, 32); 11487 // 11488 // AloBlo = pmuludq(a, b); 11489 // AloBhi = pmuludq(a, Bhi); 11490 // AhiBlo = pmuludq(Ahi, b); 11491 11492 // AloBhi = psllqi(AloBhi, 32); 11493 // AhiBlo = psllqi(AhiBlo, 32); 11494 // return AloBlo + AloBhi + AhiBlo; 11495 11496 SDValue ShAmt = DAG.getConstant(32, MVT::i32); 11497 11498 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt); 11499 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt); 11500 11501 // Bit cast to 32-bit vectors for MULUDQ 11502 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32; 11503 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A); 11504 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B); 11505 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi); 11506 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi); 11507 11508 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); 11509 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); 11510 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); 11511 11512 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt); 11513 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt); 11514 11515 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 11516 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 11517} 11518 11519SDValue X86TargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { 11520 EVT VT = Op.getValueType(); 11521 EVT EltTy = VT.getVectorElementType(); 11522 unsigned NumElts = VT.getVectorNumElements(); 11523 SDValue N0 = Op.getOperand(0); 11524 DebugLoc dl = Op.getDebugLoc(); 11525 11526 // Lower sdiv X, pow2-const. 11527 BuildVectorSDNode *C = dyn_cast<BuildVectorSDNode>(Op.getOperand(1)); 11528 if (!C) 11529 return SDValue(); 11530 11531 APInt SplatValue, SplatUndef; 11532 unsigned MinSplatBits; 11533 bool HasAnyUndefs; 11534 if (!C->isConstantSplat(SplatValue, SplatUndef, MinSplatBits, HasAnyUndefs)) 11535 return SDValue(); 11536 11537 if ((SplatValue != 0) && 11538 (SplatValue.isPowerOf2() || (-SplatValue).isPowerOf2())) { 11539 unsigned lg2 = SplatValue.countTrailingZeros(); 11540 // Splat the sign bit. 11541 SDValue Sz = DAG.getConstant(EltTy.getSizeInBits()-1, MVT::i32); 11542 SDValue SGN = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, N0, Sz, DAG); 11543 // Add (N0 < 0) ? abs2 - 1 : 0; 11544 SDValue Amt = DAG.getConstant(EltTy.getSizeInBits() - lg2, MVT::i32); 11545 SDValue SRL = getTargetVShiftNode(X86ISD::VSRLI, dl, VT, SGN, Amt, DAG); 11546 SDValue ADD = DAG.getNode(ISD::ADD, dl, VT, N0, SRL); 11547 SDValue Lg2Amt = DAG.getConstant(lg2, MVT::i32); 11548 SDValue SRA = getTargetVShiftNode(X86ISD::VSRAI, dl, VT, ADD, Lg2Amt, DAG); 11549 11550 // If we're dividing by a positive value, we're done. Otherwise, we must 11551 // negate the result. 11552 if (SplatValue.isNonNegative()) 11553 return SRA; 11554 11555 SmallVector<SDValue, 16> V(NumElts, DAG.getConstant(0, EltTy)); 11556 SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], NumElts); 11557 return DAG.getNode(ISD::SUB, dl, VT, Zero, SRA); 11558 } 11559 return SDValue(); 11560} 11561 11562static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, 11563 const X86Subtarget *Subtarget) { 11564 EVT VT = Op.getValueType(); 11565 DebugLoc dl = Op.getDebugLoc(); 11566 SDValue R = Op.getOperand(0); 11567 SDValue Amt = Op.getOperand(1); 11568 11569 // Optimize shl/srl/sra with constant shift amount. 11570 if (isSplatVector(Amt.getNode())) { 11571 SDValue SclrAmt = Amt->getOperand(0); 11572 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 11573 uint64_t ShiftAmt = C->getZExtValue(); 11574 11575 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 11576 (Subtarget->hasInt256() && 11577 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) { 11578 if (Op.getOpcode() == ISD::SHL) 11579 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 11580 DAG.getConstant(ShiftAmt, MVT::i32)); 11581 if (Op.getOpcode() == ISD::SRL) 11582 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 11583 DAG.getConstant(ShiftAmt, MVT::i32)); 11584 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 11585 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 11586 DAG.getConstant(ShiftAmt, MVT::i32)); 11587 } 11588 11589 if (VT == MVT::v16i8) { 11590 if (Op.getOpcode() == ISD::SHL) { 11591 // Make a large shift. 11592 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 11593 DAG.getConstant(ShiftAmt, MVT::i32)); 11594 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11595 // Zero out the rightmost bits. 11596 SmallVector<SDValue, 16> V(16, 11597 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11598 MVT::i8)); 11599 return DAG.getNode(ISD::AND, dl, VT, SHL, 11600 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11601 } 11602 if (Op.getOpcode() == ISD::SRL) { 11603 // Make a large shift. 11604 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 11605 DAG.getConstant(ShiftAmt, MVT::i32)); 11606 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11607 // Zero out the leftmost bits. 11608 SmallVector<SDValue, 16> V(16, 11609 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11610 MVT::i8)); 11611 return DAG.getNode(ISD::AND, dl, VT, SRL, 11612 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11613 } 11614 if (Op.getOpcode() == ISD::SRA) { 11615 if (ShiftAmt == 7) { 11616 // R s>> 7 === R s< 0 11617 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11618 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11619 } 11620 11621 // R s>> a === ((R u>> a) ^ m) - m 11622 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11623 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 11624 MVT::i8)); 11625 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 11626 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11627 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11628 return Res; 11629 } 11630 llvm_unreachable("Unknown shift opcode."); 11631 } 11632 11633 if (Subtarget->hasInt256() && VT == MVT::v32i8) { 11634 if (Op.getOpcode() == ISD::SHL) { 11635 // Make a large shift. 11636 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 11637 DAG.getConstant(ShiftAmt, MVT::i32)); 11638 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11639 // Zero out the rightmost bits. 11640 SmallVector<SDValue, 32> V(32, 11641 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11642 MVT::i8)); 11643 return DAG.getNode(ISD::AND, dl, VT, SHL, 11644 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11645 } 11646 if (Op.getOpcode() == ISD::SRL) { 11647 // Make a large shift. 11648 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 11649 DAG.getConstant(ShiftAmt, MVT::i32)); 11650 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11651 // Zero out the leftmost bits. 11652 SmallVector<SDValue, 32> V(32, 11653 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11654 MVT::i8)); 11655 return DAG.getNode(ISD::AND, dl, VT, SRL, 11656 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11657 } 11658 if (Op.getOpcode() == ISD::SRA) { 11659 if (ShiftAmt == 7) { 11660 // R s>> 7 === R s< 0 11661 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11662 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11663 } 11664 11665 // R s>> a === ((R u>> a) ^ m) - m 11666 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11667 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 11668 MVT::i8)); 11669 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 11670 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11671 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11672 return Res; 11673 } 11674 llvm_unreachable("Unknown shift opcode."); 11675 } 11676 } 11677 } 11678 11679 // Special case in 32-bit mode, where i64 is expanded into high and low parts. 11680 if (!Subtarget->is64Bit() && 11681 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) && 11682 Amt.getOpcode() == ISD::BITCAST && 11683 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { 11684 Amt = Amt.getOperand(0); 11685 unsigned Ratio = Amt.getValueType().getVectorNumElements() / 11686 VT.getVectorNumElements(); 11687 unsigned RatioInLog2 = Log2_32_Ceil(Ratio); 11688 uint64_t ShiftAmt = 0; 11689 for (unsigned i = 0; i != Ratio; ++i) { 11690 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i)); 11691 if (C == 0) 11692 return SDValue(); 11693 // 6 == Log2(64) 11694 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2))); 11695 } 11696 // Check remaining shift amounts. 11697 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { 11698 uint64_t ShAmt = 0; 11699 for (unsigned j = 0; j != Ratio; ++j) { 11700 ConstantSDNode *C = 11701 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j)); 11702 if (C == 0) 11703 return SDValue(); 11704 // 6 == Log2(64) 11705 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2))); 11706 } 11707 if (ShAmt != ShiftAmt) 11708 return SDValue(); 11709 } 11710 switch (Op.getOpcode()) { 11711 default: 11712 llvm_unreachable("Unknown shift opcode!"); 11713 case ISD::SHL: 11714 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 11715 DAG.getConstant(ShiftAmt, MVT::i32)); 11716 case ISD::SRL: 11717 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 11718 DAG.getConstant(ShiftAmt, MVT::i32)); 11719 case ISD::SRA: 11720 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 11721 DAG.getConstant(ShiftAmt, MVT::i32)); 11722 } 11723 } 11724 11725 return SDValue(); 11726} 11727 11728static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, 11729 const X86Subtarget* Subtarget) { 11730 EVT VT = Op.getValueType(); 11731 DebugLoc dl = Op.getDebugLoc(); 11732 SDValue R = Op.getOperand(0); 11733 SDValue Amt = Op.getOperand(1); 11734 11735 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) || 11736 VT == MVT::v4i32 || VT == MVT::v8i16 || 11737 (Subtarget->hasInt256() && 11738 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) || 11739 VT == MVT::v8i32 || VT == MVT::v16i16))) { 11740 SDValue BaseShAmt; 11741 EVT EltVT = VT.getVectorElementType(); 11742 11743 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 11744 unsigned NumElts = VT.getVectorNumElements(); 11745 unsigned i, j; 11746 for (i = 0; i != NumElts; ++i) { 11747 if (Amt.getOperand(i).getOpcode() == ISD::UNDEF) 11748 continue; 11749 break; 11750 } 11751 for (j = i; j != NumElts; ++j) { 11752 SDValue Arg = Amt.getOperand(j); 11753 if (Arg.getOpcode() == ISD::UNDEF) continue; 11754 if (Arg != Amt.getOperand(i)) 11755 break; 11756 } 11757 if (i != NumElts && j == NumElts) 11758 BaseShAmt = Amt.getOperand(i); 11759 } else { 11760 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) 11761 Amt = Amt.getOperand(0); 11762 if (Amt.getOpcode() == ISD::VECTOR_SHUFFLE && 11763 cast<ShuffleVectorSDNode>(Amt)->isSplat()) { 11764 SDValue InVec = Amt.getOperand(0); 11765 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 11766 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 11767 unsigned i = 0; 11768 for (; i != NumElts; ++i) { 11769 SDValue Arg = InVec.getOperand(i); 11770 if (Arg.getOpcode() == ISD::UNDEF) continue; 11771 BaseShAmt = Arg; 11772 break; 11773 } 11774 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 11775 if (ConstantSDNode *C = 11776 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 11777 unsigned SplatIdx = 11778 cast<ShuffleVectorSDNode>(Amt)->getSplatIndex(); 11779 if (C->getZExtValue() == SplatIdx) 11780 BaseShAmt = InVec.getOperand(1); 11781 } 11782 } 11783 if (BaseShAmt.getNode() == 0) 11784 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Amt, 11785 DAG.getIntPtrConstant(0)); 11786 } 11787 } 11788 11789 if (BaseShAmt.getNode()) { 11790 if (EltVT.bitsGT(MVT::i32)) 11791 BaseShAmt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BaseShAmt); 11792 else if (EltVT.bitsLT(MVT::i32)) 11793 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt); 11794 11795 switch (Op.getOpcode()) { 11796 default: 11797 llvm_unreachable("Unknown shift opcode!"); 11798 case ISD::SHL: 11799 switch (VT.getSimpleVT().SimpleTy) { 11800 default: return SDValue(); 11801 case MVT::v2i64: 11802 case MVT::v4i32: 11803 case MVT::v8i16: 11804 case MVT::v4i64: 11805 case MVT::v8i32: 11806 case MVT::v16i16: 11807 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG); 11808 } 11809 case ISD::SRA: 11810 switch (VT.getSimpleVT().SimpleTy) { 11811 default: return SDValue(); 11812 case MVT::v4i32: 11813 case MVT::v8i16: 11814 case MVT::v8i32: 11815 case MVT::v16i16: 11816 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG); 11817 } 11818 case ISD::SRL: 11819 switch (VT.getSimpleVT().SimpleTy) { 11820 default: return SDValue(); 11821 case MVT::v2i64: 11822 case MVT::v4i32: 11823 case MVT::v8i16: 11824 case MVT::v4i64: 11825 case MVT::v8i32: 11826 case MVT::v16i16: 11827 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG); 11828 } 11829 } 11830 } 11831 } 11832 11833 // Special case in 32-bit mode, where i64 is expanded into high and low parts. 11834 if (!Subtarget->is64Bit() && 11835 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) && 11836 Amt.getOpcode() == ISD::BITCAST && 11837 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { 11838 Amt = Amt.getOperand(0); 11839 unsigned Ratio = Amt.getValueType().getVectorNumElements() / 11840 VT.getVectorNumElements(); 11841 std::vector<SDValue> Vals(Ratio); 11842 for (unsigned i = 0; i != Ratio; ++i) 11843 Vals[i] = Amt.getOperand(i); 11844 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) { 11845 for (unsigned j = 0; j != Ratio; ++j) 11846 if (Vals[j] != Amt.getOperand(i + j)) 11847 return SDValue(); 11848 } 11849 switch (Op.getOpcode()) { 11850 default: 11851 llvm_unreachable("Unknown shift opcode!"); 11852 case ISD::SHL: 11853 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1)); 11854 case ISD::SRL: 11855 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1)); 11856 case ISD::SRA: 11857 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1)); 11858 } 11859 } 11860 11861 return SDValue(); 11862} 11863 11864SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 11865 11866 EVT VT = Op.getValueType(); 11867 DebugLoc dl = Op.getDebugLoc(); 11868 SDValue R = Op.getOperand(0); 11869 SDValue Amt = Op.getOperand(1); 11870 SDValue V; 11871 11872 if (!Subtarget->hasSSE2()) 11873 return SDValue(); 11874 11875 V = LowerScalarImmediateShift(Op, DAG, Subtarget); 11876 if (V.getNode()) 11877 return V; 11878 11879 V = LowerScalarVariableShift(Op, DAG, Subtarget); 11880 if (V.getNode()) 11881 return V; 11882 11883 // AVX2 has VPSLLV/VPSRAV/VPSRLV. 11884 if (Subtarget->hasInt256()) { 11885 if (Op.getOpcode() == ISD::SRL && 11886 (VT == MVT::v2i64 || VT == MVT::v4i32 || 11887 VT == MVT::v4i64 || VT == MVT::v8i32)) 11888 return Op; 11889 if (Op.getOpcode() == ISD::SHL && 11890 (VT == MVT::v2i64 || VT == MVT::v4i32 || 11891 VT == MVT::v4i64 || VT == MVT::v8i32)) 11892 return Op; 11893 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32)) 11894 return Op; 11895 } 11896 11897 // Lower SHL with variable shift amount. 11898 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 11899 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT)); 11900 11901 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT)); 11902 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 11903 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 11904 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 11905 } 11906 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 11907 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 11908 11909 // a = a << 5; 11910 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT)); 11911 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 11912 11913 // Turn 'a' into a mask suitable for VSELECT 11914 SDValue VSelM = DAG.getConstant(0x80, VT); 11915 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11916 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11917 11918 SDValue CM1 = DAG.getConstant(0x0f, VT); 11919 SDValue CM2 = DAG.getConstant(0x3f, VT); 11920 11921 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 11922 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 11923 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11924 DAG.getConstant(4, MVT::i32), DAG); 11925 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11926 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11927 11928 // a += a 11929 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11930 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11931 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11932 11933 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 11934 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 11935 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11936 DAG.getConstant(2, MVT::i32), DAG); 11937 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11938 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11939 11940 // a += a 11941 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11942 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11943 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11944 11945 // return VSELECT(r, r+r, a); 11946 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 11947 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 11948 return R; 11949 } 11950 11951 // Decompose 256-bit shifts into smaller 128-bit shifts. 11952 if (VT.is256BitVector()) { 11953 unsigned NumElems = VT.getVectorNumElements(); 11954 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11955 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11956 11957 // Extract the two vectors 11958 SDValue V1 = Extract128BitVector(R, 0, DAG, dl); 11959 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); 11960 11961 // Recreate the shift amount vectors 11962 SDValue Amt1, Amt2; 11963 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 11964 // Constant shift amount 11965 SmallVector<SDValue, 4> Amt1Csts; 11966 SmallVector<SDValue, 4> Amt2Csts; 11967 for (unsigned i = 0; i != NumElems/2; ++i) 11968 Amt1Csts.push_back(Amt->getOperand(i)); 11969 for (unsigned i = NumElems/2; i != NumElems; ++i) 11970 Amt2Csts.push_back(Amt->getOperand(i)); 11971 11972 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11973 &Amt1Csts[0], NumElems/2); 11974 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11975 &Amt2Csts[0], NumElems/2); 11976 } else { 11977 // Variable shift amount 11978 Amt1 = Extract128BitVector(Amt, 0, DAG, dl); 11979 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); 11980 } 11981 11982 // Issue new vector shifts for the smaller types 11983 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 11984 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 11985 11986 // Concatenate the result back 11987 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 11988 } 11989 11990 return SDValue(); 11991} 11992 11993static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { 11994 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 11995 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 11996 // looks for this combo and may remove the "setcc" instruction if the "setcc" 11997 // has only one use. 11998 SDNode *N = Op.getNode(); 11999 SDValue LHS = N->getOperand(0); 12000 SDValue RHS = N->getOperand(1); 12001 unsigned BaseOp = 0; 12002 unsigned Cond = 0; 12003 DebugLoc DL = Op.getDebugLoc(); 12004 switch (Op.getOpcode()) { 12005 default: llvm_unreachable("Unknown ovf instruction!"); 12006 case ISD::SADDO: 12007 // A subtract of one will be selected as a INC. Note that INC doesn't 12008 // set CF, so we can't do this for UADDO. 12009 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 12010 if (C->isOne()) { 12011 BaseOp = X86ISD::INC; 12012 Cond = X86::COND_O; 12013 break; 12014 } 12015 BaseOp = X86ISD::ADD; 12016 Cond = X86::COND_O; 12017 break; 12018 case ISD::UADDO: 12019 BaseOp = X86ISD::ADD; 12020 Cond = X86::COND_B; 12021 break; 12022 case ISD::SSUBO: 12023 // A subtract of one will be selected as a DEC. Note that DEC doesn't 12024 // set CF, so we can't do this for USUBO. 12025 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 12026 if (C->isOne()) { 12027 BaseOp = X86ISD::DEC; 12028 Cond = X86::COND_O; 12029 break; 12030 } 12031 BaseOp = X86ISD::SUB; 12032 Cond = X86::COND_O; 12033 break; 12034 case ISD::USUBO: 12035 BaseOp = X86ISD::SUB; 12036 Cond = X86::COND_B; 12037 break; 12038 case ISD::SMULO: 12039 BaseOp = X86ISD::SMUL; 12040 Cond = X86::COND_O; 12041 break; 12042 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 12043 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 12044 MVT::i32); 12045 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 12046 12047 SDValue SetCC = 12048 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 12049 DAG.getConstant(X86::COND_O, MVT::i32), 12050 SDValue(Sum.getNode(), 2)); 12051 12052 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 12053 } 12054 } 12055 12056 // Also sets EFLAGS. 12057 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 12058 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 12059 12060 SDValue SetCC = 12061 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 12062 DAG.getConstant(Cond, MVT::i32), 12063 SDValue(Sum.getNode(), 1)); 12064 12065 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 12066} 12067 12068SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 12069 SelectionDAG &DAG) const { 12070 DebugLoc dl = Op.getDebugLoc(); 12071 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 12072 EVT VT = Op.getValueType(); 12073 12074 if (!Subtarget->hasSSE2() || !VT.isVector()) 12075 return SDValue(); 12076 12077 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 12078 ExtraVT.getScalarType().getSizeInBits(); 12079 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 12080 12081 switch (VT.getSimpleVT().SimpleTy) { 12082 default: return SDValue(); 12083 case MVT::v8i32: 12084 case MVT::v16i16: 12085 if (!Subtarget->hasFp256()) 12086 return SDValue(); 12087 if (!Subtarget->hasInt256()) { 12088 // needs to be split 12089 unsigned NumElems = VT.getVectorNumElements(); 12090 12091 // Extract the LHS vectors 12092 SDValue LHS = Op.getOperand(0); 12093 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 12094 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 12095 12096 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 12097 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 12098 12099 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 12100 unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); 12101 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 12102 ExtraNumElems/2); 12103 SDValue Extra = DAG.getValueType(ExtraVT); 12104 12105 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 12106 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 12107 12108 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2); 12109 } 12110 // fall through 12111 case MVT::v4i32: 12112 case MVT::v8i16: { 12113 // (sext (vzext x)) -> (vsext x) 12114 SDValue Op0 = Op.getOperand(0); 12115 SDValue Op00 = Op0.getOperand(0); 12116 SDValue Tmp1; 12117 // Hopefully, this VECTOR_SHUFFLE is just a VZEXT. 12118 if (Op0.getOpcode() == ISD::BITCAST && 12119 Op00.getOpcode() == ISD::VECTOR_SHUFFLE) 12120 Tmp1 = LowerVectorIntExtend(Op00, DAG); 12121 if (Tmp1.getNode()) { 12122 SDValue Tmp1Op0 = Tmp1.getOperand(0); 12123 assert(Tmp1Op0.getOpcode() == X86ISD::VZEXT && 12124 "This optimization is invalid without a VZEXT."); 12125 return DAG.getNode(X86ISD::VSEXT, dl, VT, Tmp1Op0.getOperand(0)); 12126 } 12127 12128 // If the above didn't work, then just use Shift-Left + Shift-Right. 12129 Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, Op0, ShAmt, DAG); 12130 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 12131 } 12132 } 12133} 12134 12135static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, 12136 SelectionDAG &DAG) { 12137 DebugLoc dl = Op.getDebugLoc(); 12138 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 12139 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 12140 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 12141 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 12142 12143 // The only fence that needs an instruction is a sequentially-consistent 12144 // cross-thread fence. 12145 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 12146 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 12147 // no-sse2). There isn't any reason to disable it if the target processor 12148 // supports it. 12149 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 12150 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 12151 12152 SDValue Chain = Op.getOperand(0); 12153 SDValue Zero = DAG.getConstant(0, MVT::i32); 12154 SDValue Ops[] = { 12155 DAG.getRegister(X86::ESP, MVT::i32), // Base 12156 DAG.getTargetConstant(1, MVT::i8), // Scale 12157 DAG.getRegister(0, MVT::i32), // Index 12158 DAG.getTargetConstant(0, MVT::i32), // Disp 12159 DAG.getRegister(0, MVT::i32), // Segment. 12160 Zero, 12161 Chain 12162 }; 12163 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops); 12164 return SDValue(Res, 0); 12165 } 12166 12167 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 12168 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 12169} 12170 12171static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, 12172 SelectionDAG &DAG) { 12173 EVT T = Op.getValueType(); 12174 DebugLoc DL = Op.getDebugLoc(); 12175 unsigned Reg = 0; 12176 unsigned size = 0; 12177 switch(T.getSimpleVT().SimpleTy) { 12178 default: llvm_unreachable("Invalid value type!"); 12179 case MVT::i8: Reg = X86::AL; size = 1; break; 12180 case MVT::i16: Reg = X86::AX; size = 2; break; 12181 case MVT::i32: Reg = X86::EAX; size = 4; break; 12182 case MVT::i64: 12183 assert(Subtarget->is64Bit() && "Node not type legal!"); 12184 Reg = X86::RAX; size = 8; 12185 break; 12186 } 12187 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 12188 Op.getOperand(2), SDValue()); 12189 SDValue Ops[] = { cpIn.getValue(0), 12190 Op.getOperand(1), 12191 Op.getOperand(3), 12192 DAG.getTargetConstant(size, MVT::i8), 12193 cpIn.getValue(1) }; 12194 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12195 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 12196 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 12197 Ops, array_lengthof(Ops), T, MMO); 12198 SDValue cpOut = 12199 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 12200 return cpOut; 12201} 12202 12203static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, 12204 SelectionDAG &DAG) { 12205 assert(Subtarget->is64Bit() && "Result not type legalized?"); 12206 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12207 SDValue TheChain = Op.getOperand(0); 12208 DebugLoc dl = Op.getDebugLoc(); 12209 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 12210 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 12211 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 12212 rax.getValue(2)); 12213 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 12214 DAG.getConstant(32, MVT::i8)); 12215 SDValue Ops[] = { 12216 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 12217 rdx.getValue(1) 12218 }; 12219 return DAG.getMergeValues(Ops, array_lengthof(Ops), dl); 12220} 12221 12222SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 12223 EVT SrcVT = Op.getOperand(0).getValueType(); 12224 EVT DstVT = Op.getValueType(); 12225 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 12226 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 12227 assert((DstVT == MVT::i64 || 12228 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 12229 "Unexpected custom BITCAST"); 12230 // i64 <=> MMX conversions are Legal. 12231 if (SrcVT==MVT::i64 && DstVT.isVector()) 12232 return Op; 12233 if (DstVT==MVT::i64 && SrcVT.isVector()) 12234 return Op; 12235 // MMX <=> MMX conversions are Legal. 12236 if (SrcVT.isVector() && DstVT.isVector()) 12237 return Op; 12238 // All other conversions need to be expanded. 12239 return SDValue(); 12240} 12241 12242static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { 12243 SDNode *Node = Op.getNode(); 12244 DebugLoc dl = Node->getDebugLoc(); 12245 EVT T = Node->getValueType(0); 12246 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 12247 DAG.getConstant(0, T), Node->getOperand(2)); 12248 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 12249 cast<AtomicSDNode>(Node)->getMemoryVT(), 12250 Node->getOperand(0), 12251 Node->getOperand(1), negOp, 12252 cast<AtomicSDNode>(Node)->getSrcValue(), 12253 cast<AtomicSDNode>(Node)->getAlignment(), 12254 cast<AtomicSDNode>(Node)->getOrdering(), 12255 cast<AtomicSDNode>(Node)->getSynchScope()); 12256} 12257 12258static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 12259 SDNode *Node = Op.getNode(); 12260 DebugLoc dl = Node->getDebugLoc(); 12261 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 12262 12263 // Convert seq_cst store -> xchg 12264 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 12265 // FIXME: On 32-bit, store -> fist or movq would be more efficient 12266 // (The only way to get a 16-byte store is cmpxchg16b) 12267 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 12268 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 12269 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 12270 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 12271 cast<AtomicSDNode>(Node)->getMemoryVT(), 12272 Node->getOperand(0), 12273 Node->getOperand(1), Node->getOperand(2), 12274 cast<AtomicSDNode>(Node)->getMemOperand(), 12275 cast<AtomicSDNode>(Node)->getOrdering(), 12276 cast<AtomicSDNode>(Node)->getSynchScope()); 12277 return Swap.getValue(1); 12278 } 12279 // Other atomic stores have a simple pattern. 12280 return Op; 12281} 12282 12283static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 12284 EVT VT = Op.getNode()->getValueType(0); 12285 12286 // Let legalize expand this if it isn't a legal type yet. 12287 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 12288 return SDValue(); 12289 12290 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 12291 12292 unsigned Opc; 12293 bool ExtraOp = false; 12294 switch (Op.getOpcode()) { 12295 default: llvm_unreachable("Invalid code"); 12296 case ISD::ADDC: Opc = X86ISD::ADD; break; 12297 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 12298 case ISD::SUBC: Opc = X86ISD::SUB; break; 12299 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 12300 } 12301 12302 if (!ExtraOp) 12303 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 12304 Op.getOperand(1)); 12305 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 12306 Op.getOperand(1), Op.getOperand(2)); 12307} 12308 12309SDValue X86TargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 12310 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit()); 12311 12312 // For MacOSX, we want to call an alternative entry point: __sincos_stret, 12313 // which returns the values as { float, float } (in XMM0) or 12314 // { double, double } (which is returned in XMM0, XMM1). 12315 DebugLoc dl = Op.getDebugLoc(); 12316 SDValue Arg = Op.getOperand(0); 12317 EVT ArgVT = Arg.getValueType(); 12318 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 12319 12320 ArgListTy Args; 12321 ArgListEntry Entry; 12322 12323 Entry.Node = Arg; 12324 Entry.Ty = ArgTy; 12325 Entry.isSExt = false; 12326 Entry.isZExt = false; 12327 Args.push_back(Entry); 12328 12329 bool isF64 = ArgVT == MVT::f64; 12330 // Only optimize x86_64 for now. i386 is a bit messy. For f32, 12331 // the small struct {f32, f32} is returned in (eax, edx). For f64, 12332 // the results are returned via SRet in memory. 12333 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret"; 12334 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy()); 12335 12336 Type *RetTy = isF64 12337 ? (Type*)StructType::get(ArgTy, ArgTy, NULL) 12338 : (Type*)VectorType::get(ArgTy, 4); 12339 TargetLowering:: 12340 CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, 12341 false, false, false, false, 0, 12342 CallingConv::C, /*isTaillCall=*/false, 12343 /*doesNotRet=*/false, /*isReturnValueUsed*/true, 12344 Callee, Args, DAG, dl); 12345 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 12346 12347 if (isF64) 12348 // Returned in xmm0 and xmm1. 12349 return CallResult.first; 12350 12351 // Returned in bits 0:31 and 32:64 xmm0. 12352 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, 12353 CallResult.first, DAG.getIntPtrConstant(0)); 12354 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, 12355 CallResult.first, DAG.getIntPtrConstant(1)); 12356 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 12357 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal); 12358} 12359 12360/// LowerOperation - Provide custom lowering hooks for some operations. 12361/// 12362SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 12363 switch (Op.getOpcode()) { 12364 default: llvm_unreachable("Should not custom lower this!"); 12365 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 12366 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); 12367 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); 12368 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 12369 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 12370 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 12371 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 12372 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 12373 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 12374 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 12375 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); 12376 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); 12377 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 12378 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 12379 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 12380 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 12381 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 12382 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 12383 case ISD::SHL_PARTS: 12384 case ISD::SRA_PARTS: 12385 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 12386 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 12387 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 12388 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 12389 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG); 12390 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG); 12391 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, DAG); 12392 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 12393 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 12394 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 12395 case ISD::FABS: return LowerFABS(Op, DAG); 12396 case ISD::FNEG: return LowerFNEG(Op, DAG); 12397 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 12398 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 12399 case ISD::SETCC: return LowerSETCC(Op, DAG); 12400 case ISD::SELECT: return LowerSELECT(Op, DAG); 12401 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 12402 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 12403 case ISD::VASTART: return LowerVASTART(Op, DAG); 12404 case ISD::VAARG: return LowerVAARG(Op, DAG); 12405 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); 12406 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 12407 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 12408 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 12409 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 12410 case ISD::FRAME_TO_ARGS_OFFSET: 12411 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 12412 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 12413 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 12414 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 12415 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 12416 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 12417 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 12418 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 12419 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 12420 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 12421 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 12422 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); 12423 case ISD::SRA: 12424 case ISD::SRL: 12425 case ISD::SHL: return LowerShift(Op, DAG); 12426 case ISD::SADDO: 12427 case ISD::UADDO: 12428 case ISD::SSUBO: 12429 case ISD::USUBO: 12430 case ISD::SMULO: 12431 case ISD::UMULO: return LowerXALUO(Op, DAG); 12432 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); 12433 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 12434 case ISD::ADDC: 12435 case ISD::ADDE: 12436 case ISD::SUBC: 12437 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 12438 case ISD::ADD: return LowerADD(Op, DAG); 12439 case ISD::SUB: return LowerSUB(Op, DAG); 12440 case ISD::SDIV: return LowerSDIV(Op, DAG); 12441 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 12442 } 12443} 12444 12445static void ReplaceATOMIC_LOAD(SDNode *Node, 12446 SmallVectorImpl<SDValue> &Results, 12447 SelectionDAG &DAG) { 12448 DebugLoc dl = Node->getDebugLoc(); 12449 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 12450 12451 // Convert wide load -> cmpxchg8b/cmpxchg16b 12452 // FIXME: On 32-bit, load -> fild or movq would be more efficient 12453 // (The only way to get a 16-byte load is cmpxchg16b) 12454 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 12455 SDValue Zero = DAG.getConstant(0, VT); 12456 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 12457 Node->getOperand(0), 12458 Node->getOperand(1), Zero, Zero, 12459 cast<AtomicSDNode>(Node)->getMemOperand(), 12460 cast<AtomicSDNode>(Node)->getOrdering(), 12461 cast<AtomicSDNode>(Node)->getSynchScope()); 12462 Results.push_back(Swap.getValue(0)); 12463 Results.push_back(Swap.getValue(1)); 12464} 12465 12466static void 12467ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 12468 SelectionDAG &DAG, unsigned NewOp) { 12469 DebugLoc dl = Node->getDebugLoc(); 12470 assert (Node->getValueType(0) == MVT::i64 && 12471 "Only know how to expand i64 atomics"); 12472 12473 SDValue Chain = Node->getOperand(0); 12474 SDValue In1 = Node->getOperand(1); 12475 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 12476 Node->getOperand(2), DAG.getIntPtrConstant(0)); 12477 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 12478 Node->getOperand(2), DAG.getIntPtrConstant(1)); 12479 SDValue Ops[] = { Chain, In1, In2L, In2H }; 12480 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 12481 SDValue Result = 12482 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, array_lengthof(Ops), MVT::i64, 12483 cast<MemSDNode>(Node)->getMemOperand()); 12484 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 12485 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 12486 Results.push_back(Result.getValue(2)); 12487} 12488 12489/// ReplaceNodeResults - Replace a node with an illegal result type 12490/// with a new node built out of custom code. 12491void X86TargetLowering::ReplaceNodeResults(SDNode *N, 12492 SmallVectorImpl<SDValue>&Results, 12493 SelectionDAG &DAG) const { 12494 DebugLoc dl = N->getDebugLoc(); 12495 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12496 switch (N->getOpcode()) { 12497 default: 12498 llvm_unreachable("Do not know how to custom type legalize this operation!"); 12499 case ISD::SIGN_EXTEND_INREG: 12500 case ISD::ADDC: 12501 case ISD::ADDE: 12502 case ISD::SUBC: 12503 case ISD::SUBE: 12504 // We don't want to expand or promote these. 12505 return; 12506 case ISD::FP_TO_SINT: 12507 case ISD::FP_TO_UINT: { 12508 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 12509 12510 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType())) 12511 return; 12512 12513 std::pair<SDValue,SDValue> Vals = 12514 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); 12515 SDValue FIST = Vals.first, StackSlot = Vals.second; 12516 if (FIST.getNode() != 0) { 12517 EVT VT = N->getValueType(0); 12518 // Return a load from the stack slot. 12519 if (StackSlot.getNode() != 0) 12520 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 12521 MachinePointerInfo(), 12522 false, false, false, 0)); 12523 else 12524 Results.push_back(FIST); 12525 } 12526 return; 12527 } 12528 case ISD::UINT_TO_FP: { 12529 assert(Subtarget->hasSSE2() && "Requires at least SSE2!"); 12530 if (N->getOperand(0).getValueType() != MVT::v2i32 || 12531 N->getValueType(0) != MVT::v2f32) 12532 return; 12533 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, 12534 N->getOperand(0)); 12535 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 12536 MVT::f64); 12537 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias); 12538 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, 12539 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias)); 12540 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or); 12541 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias); 12542 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub)); 12543 return; 12544 } 12545 case ISD::FP_ROUND: { 12546 if (!TLI.isTypeLegal(N->getOperand(0).getValueType())) 12547 return; 12548 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0)); 12549 Results.push_back(V); 12550 return; 12551 } 12552 case ISD::READCYCLECOUNTER: { 12553 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12554 SDValue TheChain = N->getOperand(0); 12555 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 12556 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 12557 rd.getValue(1)); 12558 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 12559 eax.getValue(2)); 12560 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 12561 SDValue Ops[] = { eax, edx }; 12562 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 12563 array_lengthof(Ops))); 12564 Results.push_back(edx.getValue(1)); 12565 return; 12566 } 12567 case ISD::ATOMIC_CMP_SWAP: { 12568 EVT T = N->getValueType(0); 12569 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 12570 bool Regs64bit = T == MVT::i128; 12571 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 12572 SDValue cpInL, cpInH; 12573 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 12574 DAG.getConstant(0, HalfT)); 12575 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 12576 DAG.getConstant(1, HalfT)); 12577 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 12578 Regs64bit ? X86::RAX : X86::EAX, 12579 cpInL, SDValue()); 12580 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 12581 Regs64bit ? X86::RDX : X86::EDX, 12582 cpInH, cpInL.getValue(1)); 12583 SDValue swapInL, swapInH; 12584 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 12585 DAG.getConstant(0, HalfT)); 12586 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 12587 DAG.getConstant(1, HalfT)); 12588 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 12589 Regs64bit ? X86::RBX : X86::EBX, 12590 swapInL, cpInH.getValue(1)); 12591 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 12592 Regs64bit ? X86::RCX : X86::ECX, 12593 swapInH, swapInL.getValue(1)); 12594 SDValue Ops[] = { swapInH.getValue(0), 12595 N->getOperand(1), 12596 swapInH.getValue(1) }; 12597 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 12598 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 12599 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 12600 X86ISD::LCMPXCHG8_DAG; 12601 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 12602 Ops, array_lengthof(Ops), T, MMO); 12603 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 12604 Regs64bit ? X86::RAX : X86::EAX, 12605 HalfT, Result.getValue(1)); 12606 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 12607 Regs64bit ? X86::RDX : X86::EDX, 12608 HalfT, cpOutL.getValue(2)); 12609 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 12610 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 12611 Results.push_back(cpOutH.getValue(1)); 12612 return; 12613 } 12614 case ISD::ATOMIC_LOAD_ADD: 12615 case ISD::ATOMIC_LOAD_AND: 12616 case ISD::ATOMIC_LOAD_NAND: 12617 case ISD::ATOMIC_LOAD_OR: 12618 case ISD::ATOMIC_LOAD_SUB: 12619 case ISD::ATOMIC_LOAD_XOR: 12620 case ISD::ATOMIC_LOAD_MAX: 12621 case ISD::ATOMIC_LOAD_MIN: 12622 case ISD::ATOMIC_LOAD_UMAX: 12623 case ISD::ATOMIC_LOAD_UMIN: 12624 case ISD::ATOMIC_SWAP: { 12625 unsigned Opc; 12626 switch (N->getOpcode()) { 12627 default: llvm_unreachable("Unexpected opcode"); 12628 case ISD::ATOMIC_LOAD_ADD: 12629 Opc = X86ISD::ATOMADD64_DAG; 12630 break; 12631 case ISD::ATOMIC_LOAD_AND: 12632 Opc = X86ISD::ATOMAND64_DAG; 12633 break; 12634 case ISD::ATOMIC_LOAD_NAND: 12635 Opc = X86ISD::ATOMNAND64_DAG; 12636 break; 12637 case ISD::ATOMIC_LOAD_OR: 12638 Opc = X86ISD::ATOMOR64_DAG; 12639 break; 12640 case ISD::ATOMIC_LOAD_SUB: 12641 Opc = X86ISD::ATOMSUB64_DAG; 12642 break; 12643 case ISD::ATOMIC_LOAD_XOR: 12644 Opc = X86ISD::ATOMXOR64_DAG; 12645 break; 12646 case ISD::ATOMIC_LOAD_MAX: 12647 Opc = X86ISD::ATOMMAX64_DAG; 12648 break; 12649 case ISD::ATOMIC_LOAD_MIN: 12650 Opc = X86ISD::ATOMMIN64_DAG; 12651 break; 12652 case ISD::ATOMIC_LOAD_UMAX: 12653 Opc = X86ISD::ATOMUMAX64_DAG; 12654 break; 12655 case ISD::ATOMIC_LOAD_UMIN: 12656 Opc = X86ISD::ATOMUMIN64_DAG; 12657 break; 12658 case ISD::ATOMIC_SWAP: 12659 Opc = X86ISD::ATOMSWAP64_DAG; 12660 break; 12661 } 12662 ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc); 12663 return; 12664 } 12665 case ISD::ATOMIC_LOAD: 12666 ReplaceATOMIC_LOAD(N, Results, DAG); 12667 } 12668} 12669 12670const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 12671 switch (Opcode) { 12672 default: return NULL; 12673 case X86ISD::BSF: return "X86ISD::BSF"; 12674 case X86ISD::BSR: return "X86ISD::BSR"; 12675 case X86ISD::SHLD: return "X86ISD::SHLD"; 12676 case X86ISD::SHRD: return "X86ISD::SHRD"; 12677 case X86ISD::FAND: return "X86ISD::FAND"; 12678 case X86ISD::FOR: return "X86ISD::FOR"; 12679 case X86ISD::FXOR: return "X86ISD::FXOR"; 12680 case X86ISD::FSRL: return "X86ISD::FSRL"; 12681 case X86ISD::FILD: return "X86ISD::FILD"; 12682 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 12683 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 12684 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 12685 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 12686 case X86ISD::FLD: return "X86ISD::FLD"; 12687 case X86ISD::FST: return "X86ISD::FST"; 12688 case X86ISD::CALL: return "X86ISD::CALL"; 12689 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 12690 case X86ISD::BT: return "X86ISD::BT"; 12691 case X86ISD::CMP: return "X86ISD::CMP"; 12692 case X86ISD::COMI: return "X86ISD::COMI"; 12693 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 12694 case X86ISD::SETCC: return "X86ISD::SETCC"; 12695 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 12696 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 12697 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 12698 case X86ISD::CMOV: return "X86ISD::CMOV"; 12699 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 12700 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 12701 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 12702 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 12703 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 12704 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 12705 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 12706 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 12707 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 12708 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 12709 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 12710 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 12711 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 12712 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 12713 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 12714 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 12715 case X86ISD::BLENDI: return "X86ISD::BLENDI"; 12716 case X86ISD::SUBUS: return "X86ISD::SUBUS"; 12717 case X86ISD::HADD: return "X86ISD::HADD"; 12718 case X86ISD::HSUB: return "X86ISD::HSUB"; 12719 case X86ISD::FHADD: return "X86ISD::FHADD"; 12720 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 12721 case X86ISD::UMAX: return "X86ISD::UMAX"; 12722 case X86ISD::UMIN: return "X86ISD::UMIN"; 12723 case X86ISD::SMAX: return "X86ISD::SMAX"; 12724 case X86ISD::SMIN: return "X86ISD::SMIN"; 12725 case X86ISD::FMAX: return "X86ISD::FMAX"; 12726 case X86ISD::FMIN: return "X86ISD::FMIN"; 12727 case X86ISD::FMAXC: return "X86ISD::FMAXC"; 12728 case X86ISD::FMINC: return "X86ISD::FMINC"; 12729 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 12730 case X86ISD::FRCP: return "X86ISD::FRCP"; 12731 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 12732 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; 12733 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 12734 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; 12735 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; 12736 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 12737 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 12738 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 12739 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; 12740 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 12741 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 12742 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 12743 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 12744 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 12745 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 12746 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 12747 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 12748 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 12749 case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL"; 12750 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 12751 case X86ISD::VZEXT: return "X86ISD::VZEXT"; 12752 case X86ISD::VSEXT: return "X86ISD::VSEXT"; 12753 case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; 12754 case X86ISD::VFPROUND: return "X86ISD::VFPROUND"; 12755 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 12756 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 12757 case X86ISD::VSHL: return "X86ISD::VSHL"; 12758 case X86ISD::VSRL: return "X86ISD::VSRL"; 12759 case X86ISD::VSRA: return "X86ISD::VSRA"; 12760 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 12761 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 12762 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 12763 case X86ISD::CMPP: return "X86ISD::CMPP"; 12764 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 12765 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 12766 case X86ISD::ADD: return "X86ISD::ADD"; 12767 case X86ISD::SUB: return "X86ISD::SUB"; 12768 case X86ISD::ADC: return "X86ISD::ADC"; 12769 case X86ISD::SBB: return "X86ISD::SBB"; 12770 case X86ISD::SMUL: return "X86ISD::SMUL"; 12771 case X86ISD::UMUL: return "X86ISD::UMUL"; 12772 case X86ISD::INC: return "X86ISD::INC"; 12773 case X86ISD::DEC: return "X86ISD::DEC"; 12774 case X86ISD::OR: return "X86ISD::OR"; 12775 case X86ISD::XOR: return "X86ISD::XOR"; 12776 case X86ISD::AND: return "X86ISD::AND"; 12777 case X86ISD::BLSI: return "X86ISD::BLSI"; 12778 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 12779 case X86ISD::BLSR: return "X86ISD::BLSR"; 12780 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 12781 case X86ISD::PTEST: return "X86ISD::PTEST"; 12782 case X86ISD::TESTP: return "X86ISD::TESTP"; 12783 case X86ISD::PALIGNR: return "X86ISD::PALIGNR"; 12784 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 12785 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 12786 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 12787 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 12788 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 12789 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 12790 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 12791 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 12792 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 12793 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 12794 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 12795 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 12796 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 12797 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 12798 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 12799 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 12800 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 12801 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 12802 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 12803 case X86ISD::VPERMV: return "X86ISD::VPERMV"; 12804 case X86ISD::VPERMI: return "X86ISD::VPERMI"; 12805 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; 12806 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 12807 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 12808 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 12809 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 12810 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 12811 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; 12812 case X86ISD::SAHF: return "X86ISD::SAHF"; 12813 case X86ISD::RDRAND: return "X86ISD::RDRAND"; 12814 case X86ISD::RDSEED: return "X86ISD::RDSEED"; 12815 case X86ISD::FMADD: return "X86ISD::FMADD"; 12816 case X86ISD::FMSUB: return "X86ISD::FMSUB"; 12817 case X86ISD::FNMADD: return "X86ISD::FNMADD"; 12818 case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; 12819 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; 12820 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; 12821 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI"; 12822 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI"; 12823 case X86ISD::XTEST: return "X86ISD::XTEST"; 12824 } 12825} 12826 12827// isLegalAddressingMode - Return true if the addressing mode represented 12828// by AM is legal for this target, for a load/store of the specified type. 12829bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 12830 Type *Ty) const { 12831 // X86 supports extremely general addressing modes. 12832 CodeModel::Model M = getTargetMachine().getCodeModel(); 12833 Reloc::Model R = getTargetMachine().getRelocationModel(); 12834 12835 // X86 allows a sign-extended 32-bit immediate field as a displacement. 12836 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 12837 return false; 12838 12839 if (AM.BaseGV) { 12840 unsigned GVFlags = 12841 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 12842 12843 // If a reference to this global requires an extra load, we can't fold it. 12844 if (isGlobalStubReference(GVFlags)) 12845 return false; 12846 12847 // If BaseGV requires a register for the PIC base, we cannot also have a 12848 // BaseReg specified. 12849 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 12850 return false; 12851 12852 // If lower 4G is not available, then we must use rip-relative addressing. 12853 if ((M != CodeModel::Small || R != Reloc::Static) && 12854 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 12855 return false; 12856 } 12857 12858 switch (AM.Scale) { 12859 case 0: 12860 case 1: 12861 case 2: 12862 case 4: 12863 case 8: 12864 // These scales always work. 12865 break; 12866 case 3: 12867 case 5: 12868 case 9: 12869 // These scales are formed with basereg+scalereg. Only accept if there is 12870 // no basereg yet. 12871 if (AM.HasBaseReg) 12872 return false; 12873 break; 12874 default: // Other stuff never works. 12875 return false; 12876 } 12877 12878 return true; 12879} 12880 12881bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 12882 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 12883 return false; 12884 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 12885 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 12886 return NumBits1 > NumBits2; 12887} 12888 12889bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { 12890 return isInt<32>(Imm); 12891} 12892 12893bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { 12894 // Can also use sub to handle negated immediates. 12895 return isInt<32>(Imm); 12896} 12897 12898bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 12899 if (!VT1.isInteger() || !VT2.isInteger()) 12900 return false; 12901 unsigned NumBits1 = VT1.getSizeInBits(); 12902 unsigned NumBits2 = VT2.getSizeInBits(); 12903 return NumBits1 > NumBits2; 12904} 12905 12906bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 12907 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12908 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 12909} 12910 12911bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 12912 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12913 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 12914} 12915 12916bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 12917 EVT VT1 = Val.getValueType(); 12918 if (isZExtFree(VT1, VT2)) 12919 return true; 12920 12921 if (Val.getOpcode() != ISD::LOAD) 12922 return false; 12923 12924 if (!VT1.isSimple() || !VT1.isInteger() || 12925 !VT2.isSimple() || !VT2.isInteger()) 12926 return false; 12927 12928 switch (VT1.getSimpleVT().SimpleTy) { 12929 default: break; 12930 case MVT::i8: 12931 case MVT::i16: 12932 case MVT::i32: 12933 // X86 has 8, 16, and 32-bit zero-extending loads. 12934 return true; 12935 } 12936 12937 return false; 12938} 12939 12940bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 12941 // i16 instructions are longer (0x66 prefix) and potentially slower. 12942 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 12943} 12944 12945/// isShuffleMaskLegal - Targets can use this to indicate that they only 12946/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 12947/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 12948/// are assumed to be legal. 12949bool 12950X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 12951 EVT VT) const { 12952 // Very little shuffling can be done for 64-bit vectors right now. 12953 if (VT.getSizeInBits() == 64) 12954 return false; 12955 12956 // FIXME: pshufb, blends, shifts. 12957 return (VT.getVectorNumElements() == 2 || 12958 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 12959 isMOVLMask(M, VT) || 12960 isSHUFPMask(M, VT, Subtarget->hasFp256()) || 12961 isPSHUFDMask(M, VT) || 12962 isPSHUFHWMask(M, VT, Subtarget->hasInt256()) || 12963 isPSHUFLWMask(M, VT, Subtarget->hasInt256()) || 12964 isPALIGNRMask(M, VT, Subtarget) || 12965 isUNPCKLMask(M, VT, Subtarget->hasInt256()) || 12966 isUNPCKHMask(M, VT, Subtarget->hasInt256()) || 12967 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasInt256()) || 12968 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasInt256())); 12969} 12970 12971bool 12972X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 12973 EVT VT) const { 12974 unsigned NumElts = VT.getVectorNumElements(); 12975 // FIXME: This collection of masks seems suspect. 12976 if (NumElts == 2) 12977 return true; 12978 if (NumElts == 4 && VT.is128BitVector()) { 12979 return (isMOVLMask(Mask, VT) || 12980 isCommutedMOVLMask(Mask, VT, true) || 12981 isSHUFPMask(Mask, VT, Subtarget->hasFp256()) || 12982 isSHUFPMask(Mask, VT, Subtarget->hasFp256(), /* Commuted */ true)); 12983 } 12984 return false; 12985} 12986 12987//===----------------------------------------------------------------------===// 12988// X86 Scheduler Hooks 12989//===----------------------------------------------------------------------===// 12990 12991/// Utility function to emit xbegin specifying the start of an RTM region. 12992static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB, 12993 const TargetInstrInfo *TII) { 12994 DebugLoc DL = MI->getDebugLoc(); 12995 12996 const BasicBlock *BB = MBB->getBasicBlock(); 12997 MachineFunction::iterator I = MBB; 12998 ++I; 12999 13000 // For the v = xbegin(), we generate 13001 // 13002 // thisMBB: 13003 // xbegin sinkMBB 13004 // 13005 // mainMBB: 13006 // eax = -1 13007 // 13008 // sinkMBB: 13009 // v = eax 13010 13011 MachineBasicBlock *thisMBB = MBB; 13012 MachineFunction *MF = MBB->getParent(); 13013 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13014 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13015 MF->insert(I, mainMBB); 13016 MF->insert(I, sinkMBB); 13017 13018 // Transfer the remainder of BB and its successor edges to sinkMBB. 13019 sinkMBB->splice(sinkMBB->begin(), MBB, 13020 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13021 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13022 13023 // thisMBB: 13024 // xbegin sinkMBB 13025 // # fallthrough to mainMBB 13026 // # abortion to sinkMBB 13027 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB); 13028 thisMBB->addSuccessor(mainMBB); 13029 thisMBB->addSuccessor(sinkMBB); 13030 13031 // mainMBB: 13032 // EAX = -1 13033 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1); 13034 mainMBB->addSuccessor(sinkMBB); 13035 13036 // sinkMBB: 13037 // EAX is live into the sinkMBB 13038 sinkMBB->addLiveIn(X86::EAX); 13039 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13040 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 13041 .addReg(X86::EAX); 13042 13043 MI->eraseFromParent(); 13044 return sinkMBB; 13045} 13046 13047// Get CMPXCHG opcode for the specified data type. 13048static unsigned getCmpXChgOpcode(EVT VT) { 13049 switch (VT.getSimpleVT().SimpleTy) { 13050 case MVT::i8: return X86::LCMPXCHG8; 13051 case MVT::i16: return X86::LCMPXCHG16; 13052 case MVT::i32: return X86::LCMPXCHG32; 13053 case MVT::i64: return X86::LCMPXCHG64; 13054 default: 13055 break; 13056 } 13057 llvm_unreachable("Invalid operand size!"); 13058} 13059 13060// Get LOAD opcode for the specified data type. 13061static unsigned getLoadOpcode(EVT VT) { 13062 switch (VT.getSimpleVT().SimpleTy) { 13063 case MVT::i8: return X86::MOV8rm; 13064 case MVT::i16: return X86::MOV16rm; 13065 case MVT::i32: return X86::MOV32rm; 13066 case MVT::i64: return X86::MOV64rm; 13067 default: 13068 break; 13069 } 13070 llvm_unreachable("Invalid operand size!"); 13071} 13072 13073// Get opcode of the non-atomic one from the specified atomic instruction. 13074static unsigned getNonAtomicOpcode(unsigned Opc) { 13075 switch (Opc) { 13076 case X86::ATOMAND8: return X86::AND8rr; 13077 case X86::ATOMAND16: return X86::AND16rr; 13078 case X86::ATOMAND32: return X86::AND32rr; 13079 case X86::ATOMAND64: return X86::AND64rr; 13080 case X86::ATOMOR8: return X86::OR8rr; 13081 case X86::ATOMOR16: return X86::OR16rr; 13082 case X86::ATOMOR32: return X86::OR32rr; 13083 case X86::ATOMOR64: return X86::OR64rr; 13084 case X86::ATOMXOR8: return X86::XOR8rr; 13085 case X86::ATOMXOR16: return X86::XOR16rr; 13086 case X86::ATOMXOR32: return X86::XOR32rr; 13087 case X86::ATOMXOR64: return X86::XOR64rr; 13088 } 13089 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13090} 13091 13092// Get opcode of the non-atomic one from the specified atomic instruction with 13093// extra opcode. 13094static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc, 13095 unsigned &ExtraOpc) { 13096 switch (Opc) { 13097 case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr; 13098 case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr; 13099 case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr; 13100 case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr; 13101 case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr; 13102 case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr; 13103 case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr; 13104 case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr; 13105 case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr; 13106 case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr; 13107 case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr; 13108 case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr; 13109 case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr; 13110 case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr; 13111 case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr; 13112 case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr; 13113 case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr; 13114 case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr; 13115 case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr; 13116 case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr; 13117 } 13118 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13119} 13120 13121// Get opcode of the non-atomic one from the specified atomic instruction for 13122// 64-bit data type on 32-bit target. 13123static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) { 13124 switch (Opc) { 13125 case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr; 13126 case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr; 13127 case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr; 13128 case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr; 13129 case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr; 13130 case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr; 13131 case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr; 13132 case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr; 13133 case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr; 13134 case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr; 13135 } 13136 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13137} 13138 13139// Get opcode of the non-atomic one from the specified atomic instruction for 13140// 64-bit data type on 32-bit target with extra opcode. 13141static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc, 13142 unsigned &HiOpc, 13143 unsigned &ExtraOpc) { 13144 switch (Opc) { 13145 case X86::ATOMNAND6432: 13146 ExtraOpc = X86::NOT32r; 13147 HiOpc = X86::AND32rr; 13148 return X86::AND32rr; 13149 } 13150 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13151} 13152 13153// Get pseudo CMOV opcode from the specified data type. 13154static unsigned getPseudoCMOVOpc(EVT VT) { 13155 switch (VT.getSimpleVT().SimpleTy) { 13156 case MVT::i8: return X86::CMOV_GR8; 13157 case MVT::i16: return X86::CMOV_GR16; 13158 case MVT::i32: return X86::CMOV_GR32; 13159 default: 13160 break; 13161 } 13162 llvm_unreachable("Unknown CMOV opcode!"); 13163} 13164 13165// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions. 13166// They will be translated into a spin-loop or compare-exchange loop from 13167// 13168// ... 13169// dst = atomic-fetch-op MI.addr, MI.val 13170// ... 13171// 13172// to 13173// 13174// ... 13175// t1 = LOAD MI.addr 13176// loop: 13177// t4 = phi(t1, t3 / loop) 13178// t2 = OP MI.val, t4 13179// EAX = t4 13180// LCMPXCHG [MI.addr], t2, [EAX is implicitly used & defined] 13181// t3 = EAX 13182// JNE loop 13183// sink: 13184// dst = t3 13185// ... 13186MachineBasicBlock * 13187X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, 13188 MachineBasicBlock *MBB) const { 13189 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13190 DebugLoc DL = MI->getDebugLoc(); 13191 13192 MachineFunction *MF = MBB->getParent(); 13193 MachineRegisterInfo &MRI = MF->getRegInfo(); 13194 13195 const BasicBlock *BB = MBB->getBasicBlock(); 13196 MachineFunction::iterator I = MBB; 13197 ++I; 13198 13199 assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 && 13200 "Unexpected number of operands"); 13201 13202 assert(MI->hasOneMemOperand() && 13203 "Expected atomic-load-op to have one memoperand"); 13204 13205 // Memory Reference 13206 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13207 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13208 13209 unsigned DstReg, SrcReg; 13210 unsigned MemOpndSlot; 13211 13212 unsigned CurOp = 0; 13213 13214 DstReg = MI->getOperand(CurOp++).getReg(); 13215 MemOpndSlot = CurOp; 13216 CurOp += X86::AddrNumOperands; 13217 SrcReg = MI->getOperand(CurOp++).getReg(); 13218 13219 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 13220 MVT::SimpleValueType VT = *RC->vt_begin(); 13221 unsigned t1 = MRI.createVirtualRegister(RC); 13222 unsigned t2 = MRI.createVirtualRegister(RC); 13223 unsigned t3 = MRI.createVirtualRegister(RC); 13224 unsigned t4 = MRI.createVirtualRegister(RC); 13225 unsigned PhyReg = getX86SubSuperRegister(X86::EAX, VT); 13226 13227 unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT); 13228 unsigned LOADOpc = getLoadOpcode(VT); 13229 13230 // For the atomic load-arith operator, we generate 13231 // 13232 // thisMBB: 13233 // t1 = LOAD [MI.addr] 13234 // mainMBB: 13235 // t4 = phi(t1 / thisMBB, t3 / mainMBB) 13236 // t1 = OP MI.val, EAX 13237 // EAX = t4 13238 // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 13239 // t3 = EAX 13240 // JNE mainMBB 13241 // sinkMBB: 13242 // dst = t3 13243 13244 MachineBasicBlock *thisMBB = MBB; 13245 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13246 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13247 MF->insert(I, mainMBB); 13248 MF->insert(I, sinkMBB); 13249 13250 MachineInstrBuilder MIB; 13251 13252 // Transfer the remainder of BB and its successor edges to sinkMBB. 13253 sinkMBB->splice(sinkMBB->begin(), MBB, 13254 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13255 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13256 13257 // thisMBB: 13258 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1); 13259 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13260 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13261 if (NewMO.isReg()) 13262 NewMO.setIsKill(false); 13263 MIB.addOperand(NewMO); 13264 } 13265 for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) { 13266 unsigned flags = (*MMOI)->getFlags(); 13267 flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad; 13268 MachineMemOperand *MMO = 13269 MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags, 13270 (*MMOI)->getSize(), 13271 (*MMOI)->getBaseAlignment(), 13272 (*MMOI)->getTBAAInfo(), 13273 (*MMOI)->getRanges()); 13274 MIB.addMemOperand(MMO); 13275 } 13276 13277 thisMBB->addSuccessor(mainMBB); 13278 13279 // mainMBB: 13280 MachineBasicBlock *origMainMBB = mainMBB; 13281 13282 // Add a PHI. 13283 MachineInstr *Phi = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4) 13284 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB); 13285 13286 unsigned Opc = MI->getOpcode(); 13287 switch (Opc) { 13288 default: 13289 llvm_unreachable("Unhandled atomic-load-op opcode!"); 13290 case X86::ATOMAND8: 13291 case X86::ATOMAND16: 13292 case X86::ATOMAND32: 13293 case X86::ATOMAND64: 13294 case X86::ATOMOR8: 13295 case X86::ATOMOR16: 13296 case X86::ATOMOR32: 13297 case X86::ATOMOR64: 13298 case X86::ATOMXOR8: 13299 case X86::ATOMXOR16: 13300 case X86::ATOMXOR32: 13301 case X86::ATOMXOR64: { 13302 unsigned ARITHOpc = getNonAtomicOpcode(Opc); 13303 BuildMI(mainMBB, DL, TII->get(ARITHOpc), t2).addReg(SrcReg) 13304 .addReg(t4); 13305 break; 13306 } 13307 case X86::ATOMNAND8: 13308 case X86::ATOMNAND16: 13309 case X86::ATOMNAND32: 13310 case X86::ATOMNAND64: { 13311 unsigned Tmp = MRI.createVirtualRegister(RC); 13312 unsigned NOTOpc; 13313 unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc); 13314 BuildMI(mainMBB, DL, TII->get(ANDOpc), Tmp).addReg(SrcReg) 13315 .addReg(t4); 13316 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2).addReg(Tmp); 13317 break; 13318 } 13319 case X86::ATOMMAX8: 13320 case X86::ATOMMAX16: 13321 case X86::ATOMMAX32: 13322 case X86::ATOMMAX64: 13323 case X86::ATOMMIN8: 13324 case X86::ATOMMIN16: 13325 case X86::ATOMMIN32: 13326 case X86::ATOMMIN64: 13327 case X86::ATOMUMAX8: 13328 case X86::ATOMUMAX16: 13329 case X86::ATOMUMAX32: 13330 case X86::ATOMUMAX64: 13331 case X86::ATOMUMIN8: 13332 case X86::ATOMUMIN16: 13333 case X86::ATOMUMIN32: 13334 case X86::ATOMUMIN64: { 13335 unsigned CMPOpc; 13336 unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc); 13337 13338 BuildMI(mainMBB, DL, TII->get(CMPOpc)) 13339 .addReg(SrcReg) 13340 .addReg(t4); 13341 13342 if (Subtarget->hasCMov()) { 13343 if (VT != MVT::i8) { 13344 // Native support 13345 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2) 13346 .addReg(SrcReg) 13347 .addReg(t4); 13348 } else { 13349 // Promote i8 to i32 to use CMOV32 13350 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 13351 const TargetRegisterClass *RC32 = 13352 TRI->getSubClassWithSubReg(getRegClassFor(MVT::i32), X86::sub_8bit); 13353 unsigned SrcReg32 = MRI.createVirtualRegister(RC32); 13354 unsigned AccReg32 = MRI.createVirtualRegister(RC32); 13355 unsigned Tmp = MRI.createVirtualRegister(RC32); 13356 13357 unsigned Undef = MRI.createVirtualRegister(RC32); 13358 BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef); 13359 13360 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32) 13361 .addReg(Undef) 13362 .addReg(SrcReg) 13363 .addImm(X86::sub_8bit); 13364 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32) 13365 .addReg(Undef) 13366 .addReg(t4) 13367 .addImm(X86::sub_8bit); 13368 13369 BuildMI(mainMBB, DL, TII->get(CMOVOpc), Tmp) 13370 .addReg(SrcReg32) 13371 .addReg(AccReg32); 13372 13373 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t2) 13374 .addReg(Tmp, 0, X86::sub_8bit); 13375 } 13376 } else { 13377 // Use pseudo select and lower them. 13378 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 13379 "Invalid atomic-load-op transformation!"); 13380 unsigned SelOpc = getPseudoCMOVOpc(VT); 13381 X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc); 13382 assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!"); 13383 MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t2) 13384 .addReg(SrcReg).addReg(t4) 13385 .addImm(CC); 13386 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13387 // Replace the original PHI node as mainMBB is changed after CMOV 13388 // lowering. 13389 BuildMI(*origMainMBB, Phi, DL, TII->get(X86::PHI), t4) 13390 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(mainMBB); 13391 Phi->eraseFromParent(); 13392 } 13393 break; 13394 } 13395 } 13396 13397 // Copy PhyReg back from virtual register. 13398 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), PhyReg) 13399 .addReg(t4); 13400 13401 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 13402 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13403 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13404 if (NewMO.isReg()) 13405 NewMO.setIsKill(false); 13406 MIB.addOperand(NewMO); 13407 } 13408 MIB.addReg(t2); 13409 MIB.setMemRefs(MMOBegin, MMOEnd); 13410 13411 // Copy PhyReg back to virtual register. 13412 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3) 13413 .addReg(PhyReg); 13414 13415 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 13416 13417 mainMBB->addSuccessor(origMainMBB); 13418 mainMBB->addSuccessor(sinkMBB); 13419 13420 // sinkMBB: 13421 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13422 TII->get(TargetOpcode::COPY), DstReg) 13423 .addReg(t3); 13424 13425 MI->eraseFromParent(); 13426 return sinkMBB; 13427} 13428 13429// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic 13430// instructions. They will be translated into a spin-loop or compare-exchange 13431// loop from 13432// 13433// ... 13434// dst = atomic-fetch-op MI.addr, MI.val 13435// ... 13436// 13437// to 13438// 13439// ... 13440// t1L = LOAD [MI.addr + 0] 13441// t1H = LOAD [MI.addr + 4] 13442// loop: 13443// t4L = phi(t1L, t3L / loop) 13444// t4H = phi(t1H, t3H / loop) 13445// t2L = OP MI.val.lo, t4L 13446// t2H = OP MI.val.hi, t4H 13447// EAX = t4L 13448// EDX = t4H 13449// EBX = t2L 13450// ECX = t2H 13451// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 13452// t3L = EAX 13453// t3H = EDX 13454// JNE loop 13455// sink: 13456// dstL = t3L 13457// dstH = t3H 13458// ... 13459MachineBasicBlock * 13460X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, 13461 MachineBasicBlock *MBB) const { 13462 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13463 DebugLoc DL = MI->getDebugLoc(); 13464 13465 MachineFunction *MF = MBB->getParent(); 13466 MachineRegisterInfo &MRI = MF->getRegInfo(); 13467 13468 const BasicBlock *BB = MBB->getBasicBlock(); 13469 MachineFunction::iterator I = MBB; 13470 ++I; 13471 13472 assert(MI->getNumOperands() <= X86::AddrNumOperands + 7 && 13473 "Unexpected number of operands"); 13474 13475 assert(MI->hasOneMemOperand() && 13476 "Expected atomic-load-op32 to have one memoperand"); 13477 13478 // Memory Reference 13479 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13480 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13481 13482 unsigned DstLoReg, DstHiReg; 13483 unsigned SrcLoReg, SrcHiReg; 13484 unsigned MemOpndSlot; 13485 13486 unsigned CurOp = 0; 13487 13488 DstLoReg = MI->getOperand(CurOp++).getReg(); 13489 DstHiReg = MI->getOperand(CurOp++).getReg(); 13490 MemOpndSlot = CurOp; 13491 CurOp += X86::AddrNumOperands; 13492 SrcLoReg = MI->getOperand(CurOp++).getReg(); 13493 SrcHiReg = MI->getOperand(CurOp++).getReg(); 13494 13495 const TargetRegisterClass *RC = &X86::GR32RegClass; 13496 const TargetRegisterClass *RC8 = &X86::GR8RegClass; 13497 13498 unsigned t1L = MRI.createVirtualRegister(RC); 13499 unsigned t1H = MRI.createVirtualRegister(RC); 13500 unsigned t2L = MRI.createVirtualRegister(RC); 13501 unsigned t2H = MRI.createVirtualRegister(RC); 13502 unsigned t3L = MRI.createVirtualRegister(RC); 13503 unsigned t3H = MRI.createVirtualRegister(RC); 13504 unsigned t4L = MRI.createVirtualRegister(RC); 13505 unsigned t4H = MRI.createVirtualRegister(RC); 13506 13507 unsigned LCMPXCHGOpc = X86::LCMPXCHG8B; 13508 unsigned LOADOpc = X86::MOV32rm; 13509 13510 // For the atomic load-arith operator, we generate 13511 // 13512 // thisMBB: 13513 // t1L = LOAD [MI.addr + 0] 13514 // t1H = LOAD [MI.addr + 4] 13515 // mainMBB: 13516 // t4L = phi(t1L / thisMBB, t3L / mainMBB) 13517 // t4H = phi(t1H / thisMBB, t3H / mainMBB) 13518 // t2L = OP MI.val.lo, t4L 13519 // t2H = OP MI.val.hi, t4H 13520 // EBX = t2L 13521 // ECX = t2H 13522 // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 13523 // t3L = EAX 13524 // t3H = EDX 13525 // JNE loop 13526 // sinkMBB: 13527 // dstL = t3L 13528 // dstH = t3H 13529 13530 MachineBasicBlock *thisMBB = MBB; 13531 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13532 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13533 MF->insert(I, mainMBB); 13534 MF->insert(I, sinkMBB); 13535 13536 MachineInstrBuilder MIB; 13537 13538 // Transfer the remainder of BB and its successor edges to sinkMBB. 13539 sinkMBB->splice(sinkMBB->begin(), MBB, 13540 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13541 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13542 13543 // thisMBB: 13544 // Lo 13545 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1L); 13546 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13547 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13548 if (NewMO.isReg()) 13549 NewMO.setIsKill(false); 13550 MIB.addOperand(NewMO); 13551 } 13552 for (MachineInstr::mmo_iterator MMOI = MMOBegin; MMOI != MMOEnd; ++MMOI) { 13553 unsigned flags = (*MMOI)->getFlags(); 13554 flags = (flags & ~MachineMemOperand::MOStore) | MachineMemOperand::MOLoad; 13555 MachineMemOperand *MMO = 13556 MF->getMachineMemOperand((*MMOI)->getPointerInfo(), flags, 13557 (*MMOI)->getSize(), 13558 (*MMOI)->getBaseAlignment(), 13559 (*MMOI)->getTBAAInfo(), 13560 (*MMOI)->getRanges()); 13561 MIB.addMemOperand(MMO); 13562 }; 13563 MachineInstr *LowMI = MIB; 13564 13565 // Hi 13566 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), t1H); 13567 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13568 if (i == X86::AddrDisp) { 13569 MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) 13570 } else { 13571 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13572 if (NewMO.isReg()) 13573 NewMO.setIsKill(false); 13574 MIB.addOperand(NewMO); 13575 } 13576 } 13577 MIB.setMemRefs(LowMI->memoperands_begin(), LowMI->memoperands_end()); 13578 13579 thisMBB->addSuccessor(mainMBB); 13580 13581 // mainMBB: 13582 MachineBasicBlock *origMainMBB = mainMBB; 13583 13584 // Add PHIs. 13585 MachineInstr *PhiL = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4L) 13586 .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB); 13587 MachineInstr *PhiH = BuildMI(mainMBB, DL, TII->get(X86::PHI), t4H) 13588 .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB); 13589 13590 unsigned Opc = MI->getOpcode(); 13591 switch (Opc) { 13592 default: 13593 llvm_unreachable("Unhandled atomic-load-op6432 opcode!"); 13594 case X86::ATOMAND6432: 13595 case X86::ATOMOR6432: 13596 case X86::ATOMXOR6432: 13597 case X86::ATOMADD6432: 13598 case X86::ATOMSUB6432: { 13599 unsigned HiOpc; 13600 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 13601 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(t4L) 13602 .addReg(SrcLoReg); 13603 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(t4H) 13604 .addReg(SrcHiReg); 13605 break; 13606 } 13607 case X86::ATOMNAND6432: { 13608 unsigned HiOpc, NOTOpc; 13609 unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc); 13610 unsigned TmpL = MRI.createVirtualRegister(RC); 13611 unsigned TmpH = MRI.createVirtualRegister(RC); 13612 BuildMI(mainMBB, DL, TII->get(LoOpc), TmpL).addReg(SrcLoReg) 13613 .addReg(t4L); 13614 BuildMI(mainMBB, DL, TII->get(HiOpc), TmpH).addReg(SrcHiReg) 13615 .addReg(t4H); 13616 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2L).addReg(TmpL); 13617 BuildMI(mainMBB, DL, TII->get(NOTOpc), t2H).addReg(TmpH); 13618 break; 13619 } 13620 case X86::ATOMMAX6432: 13621 case X86::ATOMMIN6432: 13622 case X86::ATOMUMAX6432: 13623 case X86::ATOMUMIN6432: { 13624 unsigned HiOpc; 13625 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 13626 unsigned cL = MRI.createVirtualRegister(RC8); 13627 unsigned cH = MRI.createVirtualRegister(RC8); 13628 unsigned cL32 = MRI.createVirtualRegister(RC); 13629 unsigned cH32 = MRI.createVirtualRegister(RC); 13630 unsigned cc = MRI.createVirtualRegister(RC); 13631 // cl := cmp src_lo, lo 13632 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 13633 .addReg(SrcLoReg).addReg(t4L); 13634 BuildMI(mainMBB, DL, TII->get(LoOpc), cL); 13635 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL); 13636 // ch := cmp src_hi, hi 13637 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 13638 .addReg(SrcHiReg).addReg(t4H); 13639 BuildMI(mainMBB, DL, TII->get(HiOpc), cH); 13640 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH); 13641 // cc := if (src_hi == hi) ? cl : ch; 13642 if (Subtarget->hasCMov()) { 13643 BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc) 13644 .addReg(cH32).addReg(cL32); 13645 } else { 13646 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc) 13647 .addReg(cH32).addReg(cL32) 13648 .addImm(X86::COND_E); 13649 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13650 } 13651 BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc); 13652 if (Subtarget->hasCMov()) { 13653 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2L) 13654 .addReg(SrcLoReg).addReg(t4L); 13655 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t2H) 13656 .addReg(SrcHiReg).addReg(t4H); 13657 } else { 13658 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2L) 13659 .addReg(SrcLoReg).addReg(t4L) 13660 .addImm(X86::COND_NE); 13661 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13662 // As the lowered CMOV won't clobber EFLAGS, we could reuse it for the 13663 // 2nd CMOV lowering. 13664 mainMBB->addLiveIn(X86::EFLAGS); 13665 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t2H) 13666 .addReg(SrcHiReg).addReg(t4H) 13667 .addImm(X86::COND_NE); 13668 mainMBB = EmitLoweredSelect(MIB, mainMBB); 13669 // Replace the original PHI node as mainMBB is changed after CMOV 13670 // lowering. 13671 BuildMI(*origMainMBB, PhiL, DL, TII->get(X86::PHI), t4L) 13672 .addReg(t1L).addMBB(thisMBB).addReg(t3L).addMBB(mainMBB); 13673 BuildMI(*origMainMBB, PhiH, DL, TII->get(X86::PHI), t4H) 13674 .addReg(t1H).addMBB(thisMBB).addReg(t3H).addMBB(mainMBB); 13675 PhiL->eraseFromParent(); 13676 PhiH->eraseFromParent(); 13677 } 13678 break; 13679 } 13680 case X86::ATOMSWAP6432: { 13681 unsigned HiOpc; 13682 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 13683 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg); 13684 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg); 13685 break; 13686 } 13687 } 13688 13689 // Copy EDX:EAX back from HiReg:LoReg 13690 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(t4L); 13691 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(t4H); 13692 // Copy ECX:EBX from t1H:t1L 13693 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t2L); 13694 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t2H); 13695 13696 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 13697 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13698 MachineOperand NewMO = MI->getOperand(MemOpndSlot + i); 13699 if (NewMO.isReg()) 13700 NewMO.setIsKill(false); 13701 MIB.addOperand(NewMO); 13702 } 13703 MIB.setMemRefs(MMOBegin, MMOEnd); 13704 13705 // Copy EDX:EAX back to t3H:t3L 13706 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3L).addReg(X86::EAX); 13707 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t3H).addReg(X86::EDX); 13708 13709 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 13710 13711 mainMBB->addSuccessor(origMainMBB); 13712 mainMBB->addSuccessor(sinkMBB); 13713 13714 // sinkMBB: 13715 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13716 TII->get(TargetOpcode::COPY), DstLoReg) 13717 .addReg(t3L); 13718 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13719 TII->get(TargetOpcode::COPY), DstHiReg) 13720 .addReg(t3H); 13721 13722 MI->eraseFromParent(); 13723 return sinkMBB; 13724} 13725 13726// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 13727// or XMM0_V32I8 in AVX all of this code can be replaced with that 13728// in the .td file. 13729static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB, 13730 const TargetInstrInfo *TII) { 13731 unsigned Opc; 13732 switch (MI->getOpcode()) { 13733 default: llvm_unreachable("illegal opcode!"); 13734 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break; 13735 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break; 13736 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break; 13737 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break; 13738 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break; 13739 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break; 13740 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break; 13741 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break; 13742 } 13743 13744 DebugLoc dl = MI->getDebugLoc(); 13745 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 13746 13747 unsigned NumArgs = MI->getNumOperands(); 13748 for (unsigned i = 1; i < NumArgs; ++i) { 13749 MachineOperand &Op = MI->getOperand(i); 13750 if (!(Op.isReg() && Op.isImplicit())) 13751 MIB.addOperand(Op); 13752 } 13753 if (MI->hasOneMemOperand()) 13754 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 13755 13756 BuildMI(*BB, MI, dl, 13757 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 13758 .addReg(X86::XMM0); 13759 13760 MI->eraseFromParent(); 13761 return BB; 13762} 13763 13764// FIXME: Custom handling because TableGen doesn't support multiple implicit 13765// defs in an instruction pattern 13766static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB, 13767 const TargetInstrInfo *TII) { 13768 unsigned Opc; 13769 switch (MI->getOpcode()) { 13770 default: llvm_unreachable("illegal opcode!"); 13771 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break; 13772 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break; 13773 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break; 13774 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break; 13775 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break; 13776 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break; 13777 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break; 13778 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break; 13779 } 13780 13781 DebugLoc dl = MI->getDebugLoc(); 13782 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 13783 13784 unsigned NumArgs = MI->getNumOperands(); // remove the results 13785 for (unsigned i = 1; i < NumArgs; ++i) { 13786 MachineOperand &Op = MI->getOperand(i); 13787 if (!(Op.isReg() && Op.isImplicit())) 13788 MIB.addOperand(Op); 13789 } 13790 if (MI->hasOneMemOperand()) 13791 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 13792 13793 BuildMI(*BB, MI, dl, 13794 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 13795 .addReg(X86::ECX); 13796 13797 MI->eraseFromParent(); 13798 return BB; 13799} 13800 13801static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB, 13802 const TargetInstrInfo *TII, 13803 const X86Subtarget* Subtarget) { 13804 DebugLoc dl = MI->getDebugLoc(); 13805 13806 // Address into RAX/EAX, other two args into ECX, EDX. 13807 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 13808 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 13809 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 13810 for (int i = 0; i < X86::AddrNumOperands; ++i) 13811 MIB.addOperand(MI->getOperand(i)); 13812 13813 unsigned ValOps = X86::AddrNumOperands; 13814 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 13815 .addReg(MI->getOperand(ValOps).getReg()); 13816 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 13817 .addReg(MI->getOperand(ValOps+1).getReg()); 13818 13819 // The instruction doesn't actually take any operands though. 13820 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 13821 13822 MI->eraseFromParent(); // The pseudo is gone now. 13823 return BB; 13824} 13825 13826MachineBasicBlock * 13827X86TargetLowering::EmitVAARG64WithCustomInserter( 13828 MachineInstr *MI, 13829 MachineBasicBlock *MBB) const { 13830 // Emit va_arg instruction on X86-64. 13831 13832 // Operands to this pseudo-instruction: 13833 // 0 ) Output : destination address (reg) 13834 // 1-5) Input : va_list address (addr, i64mem) 13835 // 6 ) ArgSize : Size (in bytes) of vararg type 13836 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 13837 // 8 ) Align : Alignment of type 13838 // 9 ) EFLAGS (implicit-def) 13839 13840 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 13841 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 13842 13843 unsigned DestReg = MI->getOperand(0).getReg(); 13844 MachineOperand &Base = MI->getOperand(1); 13845 MachineOperand &Scale = MI->getOperand(2); 13846 MachineOperand &Index = MI->getOperand(3); 13847 MachineOperand &Disp = MI->getOperand(4); 13848 MachineOperand &Segment = MI->getOperand(5); 13849 unsigned ArgSize = MI->getOperand(6).getImm(); 13850 unsigned ArgMode = MI->getOperand(7).getImm(); 13851 unsigned Align = MI->getOperand(8).getImm(); 13852 13853 // Memory Reference 13854 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 13855 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13856 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13857 13858 // Machine Information 13859 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13860 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 13861 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 13862 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 13863 DebugLoc DL = MI->getDebugLoc(); 13864 13865 // struct va_list { 13866 // i32 gp_offset 13867 // i32 fp_offset 13868 // i64 overflow_area (address) 13869 // i64 reg_save_area (address) 13870 // } 13871 // sizeof(va_list) = 24 13872 // alignment(va_list) = 8 13873 13874 unsigned TotalNumIntRegs = 6; 13875 unsigned TotalNumXMMRegs = 8; 13876 bool UseGPOffset = (ArgMode == 1); 13877 bool UseFPOffset = (ArgMode == 2); 13878 unsigned MaxOffset = TotalNumIntRegs * 8 + 13879 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 13880 13881 /* Align ArgSize to a multiple of 8 */ 13882 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 13883 bool NeedsAlign = (Align > 8); 13884 13885 MachineBasicBlock *thisMBB = MBB; 13886 MachineBasicBlock *overflowMBB; 13887 MachineBasicBlock *offsetMBB; 13888 MachineBasicBlock *endMBB; 13889 13890 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 13891 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 13892 unsigned OffsetReg = 0; 13893 13894 if (!UseGPOffset && !UseFPOffset) { 13895 // If we only pull from the overflow region, we don't create a branch. 13896 // We don't need to alter control flow. 13897 OffsetDestReg = 0; // unused 13898 OverflowDestReg = DestReg; 13899 13900 offsetMBB = NULL; 13901 overflowMBB = thisMBB; 13902 endMBB = thisMBB; 13903 } else { 13904 // First emit code to check if gp_offset (or fp_offset) is below the bound. 13905 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 13906 // If not, pull from overflow_area. (branch to overflowMBB) 13907 // 13908 // thisMBB 13909 // | . 13910 // | . 13911 // offsetMBB overflowMBB 13912 // | . 13913 // | . 13914 // endMBB 13915 13916 // Registers for the PHI in endMBB 13917 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 13918 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 13919 13920 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 13921 MachineFunction *MF = MBB->getParent(); 13922 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13923 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13924 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13925 13926 MachineFunction::iterator MBBIter = MBB; 13927 ++MBBIter; 13928 13929 // Insert the new basic blocks 13930 MF->insert(MBBIter, offsetMBB); 13931 MF->insert(MBBIter, overflowMBB); 13932 MF->insert(MBBIter, endMBB); 13933 13934 // Transfer the remainder of MBB and its successor edges to endMBB. 13935 endMBB->splice(endMBB->begin(), thisMBB, 13936 llvm::next(MachineBasicBlock::iterator(MI)), 13937 thisMBB->end()); 13938 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 13939 13940 // Make offsetMBB and overflowMBB successors of thisMBB 13941 thisMBB->addSuccessor(offsetMBB); 13942 thisMBB->addSuccessor(overflowMBB); 13943 13944 // endMBB is a successor of both offsetMBB and overflowMBB 13945 offsetMBB->addSuccessor(endMBB); 13946 overflowMBB->addSuccessor(endMBB); 13947 13948 // Load the offset value into a register 13949 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 13950 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 13951 .addOperand(Base) 13952 .addOperand(Scale) 13953 .addOperand(Index) 13954 .addDisp(Disp, UseFPOffset ? 4 : 0) 13955 .addOperand(Segment) 13956 .setMemRefs(MMOBegin, MMOEnd); 13957 13958 // Check if there is enough room left to pull this argument. 13959 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 13960 .addReg(OffsetReg) 13961 .addImm(MaxOffset + 8 - ArgSizeA8); 13962 13963 // Branch to "overflowMBB" if offset >= max 13964 // Fall through to "offsetMBB" otherwise 13965 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 13966 .addMBB(overflowMBB); 13967 } 13968 13969 // In offsetMBB, emit code to use the reg_save_area. 13970 if (offsetMBB) { 13971 assert(OffsetReg != 0); 13972 13973 // Read the reg_save_area address. 13974 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 13975 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 13976 .addOperand(Base) 13977 .addOperand(Scale) 13978 .addOperand(Index) 13979 .addDisp(Disp, 16) 13980 .addOperand(Segment) 13981 .setMemRefs(MMOBegin, MMOEnd); 13982 13983 // Zero-extend the offset 13984 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 13985 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 13986 .addImm(0) 13987 .addReg(OffsetReg) 13988 .addImm(X86::sub_32bit); 13989 13990 // Add the offset to the reg_save_area to get the final address. 13991 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 13992 .addReg(OffsetReg64) 13993 .addReg(RegSaveReg); 13994 13995 // Compute the offset for the next argument 13996 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 13997 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 13998 .addReg(OffsetReg) 13999 .addImm(UseFPOffset ? 16 : 8); 14000 14001 // Store it back into the va_list. 14002 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 14003 .addOperand(Base) 14004 .addOperand(Scale) 14005 .addOperand(Index) 14006 .addDisp(Disp, UseFPOffset ? 4 : 0) 14007 .addOperand(Segment) 14008 .addReg(NextOffsetReg) 14009 .setMemRefs(MMOBegin, MMOEnd); 14010 14011 // Jump to endMBB 14012 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 14013 .addMBB(endMBB); 14014 } 14015 14016 // 14017 // Emit code to use overflow area 14018 // 14019 14020 // Load the overflow_area address into a register. 14021 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 14022 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 14023 .addOperand(Base) 14024 .addOperand(Scale) 14025 .addOperand(Index) 14026 .addDisp(Disp, 8) 14027 .addOperand(Segment) 14028 .setMemRefs(MMOBegin, MMOEnd); 14029 14030 // If we need to align it, do so. Otherwise, just copy the address 14031 // to OverflowDestReg. 14032 if (NeedsAlign) { 14033 // Align the overflow address 14034 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 14035 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 14036 14037 // aligned_addr = (addr + (align-1)) & ~(align-1) 14038 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 14039 .addReg(OverflowAddrReg) 14040 .addImm(Align-1); 14041 14042 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 14043 .addReg(TmpReg) 14044 .addImm(~(uint64_t)(Align-1)); 14045 } else { 14046 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 14047 .addReg(OverflowAddrReg); 14048 } 14049 14050 // Compute the next overflow address after this argument. 14051 // (the overflow address should be kept 8-byte aligned) 14052 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 14053 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 14054 .addReg(OverflowDestReg) 14055 .addImm(ArgSizeA8); 14056 14057 // Store the new overflow address. 14058 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 14059 .addOperand(Base) 14060 .addOperand(Scale) 14061 .addOperand(Index) 14062 .addDisp(Disp, 8) 14063 .addOperand(Segment) 14064 .addReg(NextAddrReg) 14065 .setMemRefs(MMOBegin, MMOEnd); 14066 14067 // If we branched, emit the PHI to the front of endMBB. 14068 if (offsetMBB) { 14069 BuildMI(*endMBB, endMBB->begin(), DL, 14070 TII->get(X86::PHI), DestReg) 14071 .addReg(OffsetDestReg).addMBB(offsetMBB) 14072 .addReg(OverflowDestReg).addMBB(overflowMBB); 14073 } 14074 14075 // Erase the pseudo instruction 14076 MI->eraseFromParent(); 14077 14078 return endMBB; 14079} 14080 14081MachineBasicBlock * 14082X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 14083 MachineInstr *MI, 14084 MachineBasicBlock *MBB) const { 14085 // Emit code to save XMM registers to the stack. The ABI says that the 14086 // number of registers to save is given in %al, so it's theoretically 14087 // possible to do an indirect jump trick to avoid saving all of them, 14088 // however this code takes a simpler approach and just executes all 14089 // of the stores if %al is non-zero. It's less code, and it's probably 14090 // easier on the hardware branch predictor, and stores aren't all that 14091 // expensive anyway. 14092 14093 // Create the new basic blocks. One block contains all the XMM stores, 14094 // and one block is the final destination regardless of whether any 14095 // stores were performed. 14096 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 14097 MachineFunction *F = MBB->getParent(); 14098 MachineFunction::iterator MBBIter = MBB; 14099 ++MBBIter; 14100 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 14101 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 14102 F->insert(MBBIter, XMMSaveMBB); 14103 F->insert(MBBIter, EndMBB); 14104 14105 // Transfer the remainder of MBB and its successor edges to EndMBB. 14106 EndMBB->splice(EndMBB->begin(), MBB, 14107 llvm::next(MachineBasicBlock::iterator(MI)), 14108 MBB->end()); 14109 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 14110 14111 // The original block will now fall through to the XMM save block. 14112 MBB->addSuccessor(XMMSaveMBB); 14113 // The XMMSaveMBB will fall through to the end block. 14114 XMMSaveMBB->addSuccessor(EndMBB); 14115 14116 // Now add the instructions. 14117 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14118 DebugLoc DL = MI->getDebugLoc(); 14119 14120 unsigned CountReg = MI->getOperand(0).getReg(); 14121 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 14122 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 14123 14124 if (!Subtarget->isTargetWin64()) { 14125 // If %al is 0, branch around the XMM save block. 14126 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 14127 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 14128 MBB->addSuccessor(EndMBB); 14129 } 14130 14131 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr; 14132 // In the XMM save block, save all the XMM argument registers. 14133 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 14134 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 14135 MachineMemOperand *MMO = 14136 F->getMachineMemOperand( 14137 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 14138 MachineMemOperand::MOStore, 14139 /*Size=*/16, /*Align=*/16); 14140 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 14141 .addFrameIndex(RegSaveFrameIndex) 14142 .addImm(/*Scale=*/1) 14143 .addReg(/*IndexReg=*/0) 14144 .addImm(/*Disp=*/Offset) 14145 .addReg(/*Segment=*/0) 14146 .addReg(MI->getOperand(i).getReg()) 14147 .addMemOperand(MMO); 14148 } 14149 14150 MI->eraseFromParent(); // The pseudo instruction is gone now. 14151 14152 return EndMBB; 14153} 14154 14155// The EFLAGS operand of SelectItr might be missing a kill marker 14156// because there were multiple uses of EFLAGS, and ISel didn't know 14157// which to mark. Figure out whether SelectItr should have had a 14158// kill marker, and set it if it should. Returns the correct kill 14159// marker value. 14160static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, 14161 MachineBasicBlock* BB, 14162 const TargetRegisterInfo* TRI) { 14163 // Scan forward through BB for a use/def of EFLAGS. 14164 MachineBasicBlock::iterator miI(llvm::next(SelectItr)); 14165 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 14166 const MachineInstr& mi = *miI; 14167 if (mi.readsRegister(X86::EFLAGS)) 14168 return false; 14169 if (mi.definesRegister(X86::EFLAGS)) 14170 break; // Should have kill-flag - update below. 14171 } 14172 14173 // If we hit the end of the block, check whether EFLAGS is live into a 14174 // successor. 14175 if (miI == BB->end()) { 14176 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 14177 sEnd = BB->succ_end(); 14178 sItr != sEnd; ++sItr) { 14179 MachineBasicBlock* succ = *sItr; 14180 if (succ->isLiveIn(X86::EFLAGS)) 14181 return false; 14182 } 14183 } 14184 14185 // We found a def, or hit the end of the basic block and EFLAGS wasn't live 14186 // out. SelectMI should have a kill flag on EFLAGS. 14187 SelectItr->addRegisterKilled(X86::EFLAGS, TRI); 14188 return true; 14189} 14190 14191MachineBasicBlock * 14192X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 14193 MachineBasicBlock *BB) const { 14194 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14195 DebugLoc DL = MI->getDebugLoc(); 14196 14197 // To "insert" a SELECT_CC instruction, we actually have to insert the 14198 // diamond control-flow pattern. The incoming instruction knows the 14199 // destination vreg to set, the condition code register to branch on, the 14200 // true/false values to select between, and a branch opcode to use. 14201 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 14202 MachineFunction::iterator It = BB; 14203 ++It; 14204 14205 // thisMBB: 14206 // ... 14207 // TrueVal = ... 14208 // cmpTY ccX, r1, r2 14209 // bCC copy1MBB 14210 // fallthrough --> copy0MBB 14211 MachineBasicBlock *thisMBB = BB; 14212 MachineFunction *F = BB->getParent(); 14213 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 14214 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 14215 F->insert(It, copy0MBB); 14216 F->insert(It, sinkMBB); 14217 14218 // If the EFLAGS register isn't dead in the terminator, then claim that it's 14219 // live into the sink and copy blocks. 14220 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 14221 if (!MI->killsRegister(X86::EFLAGS) && 14222 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) { 14223 copy0MBB->addLiveIn(X86::EFLAGS); 14224 sinkMBB->addLiveIn(X86::EFLAGS); 14225 } 14226 14227 // Transfer the remainder of BB and its successor edges to sinkMBB. 14228 sinkMBB->splice(sinkMBB->begin(), BB, 14229 llvm::next(MachineBasicBlock::iterator(MI)), 14230 BB->end()); 14231 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 14232 14233 // Add the true and fallthrough blocks as its successors. 14234 BB->addSuccessor(copy0MBB); 14235 BB->addSuccessor(sinkMBB); 14236 14237 // Create the conditional branch instruction. 14238 unsigned Opc = 14239 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 14240 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 14241 14242 // copy0MBB: 14243 // %FalseValue = ... 14244 // # fallthrough to sinkMBB 14245 copy0MBB->addSuccessor(sinkMBB); 14246 14247 // sinkMBB: 14248 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 14249 // ... 14250 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14251 TII->get(X86::PHI), MI->getOperand(0).getReg()) 14252 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 14253 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 14254 14255 MI->eraseFromParent(); // The pseudo instruction is gone now. 14256 return sinkMBB; 14257} 14258 14259MachineBasicBlock * 14260X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 14261 bool Is64Bit) const { 14262 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14263 DebugLoc DL = MI->getDebugLoc(); 14264 MachineFunction *MF = BB->getParent(); 14265 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 14266 14267 assert(getTargetMachine().Options.EnableSegmentedStacks); 14268 14269 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 14270 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 14271 14272 // BB: 14273 // ... [Till the alloca] 14274 // If stacklet is not large enough, jump to mallocMBB 14275 // 14276 // bumpMBB: 14277 // Allocate by subtracting from RSP 14278 // Jump to continueMBB 14279 // 14280 // mallocMBB: 14281 // Allocate by call to runtime 14282 // 14283 // continueMBB: 14284 // ... 14285 // [rest of original BB] 14286 // 14287 14288 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14289 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14290 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 14291 14292 MachineRegisterInfo &MRI = MF->getRegInfo(); 14293 const TargetRegisterClass *AddrRegClass = 14294 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 14295 14296 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 14297 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 14298 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 14299 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 14300 sizeVReg = MI->getOperand(1).getReg(), 14301 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 14302 14303 MachineFunction::iterator MBBIter = BB; 14304 ++MBBIter; 14305 14306 MF->insert(MBBIter, bumpMBB); 14307 MF->insert(MBBIter, mallocMBB); 14308 MF->insert(MBBIter, continueMBB); 14309 14310 continueMBB->splice(continueMBB->begin(), BB, llvm::next 14311 (MachineBasicBlock::iterator(MI)), BB->end()); 14312 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 14313 14314 // Add code to the main basic block to check if the stack limit has been hit, 14315 // and if so, jump to mallocMBB otherwise to bumpMBB. 14316 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 14317 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 14318 .addReg(tmpSPVReg).addReg(sizeVReg); 14319 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 14320 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 14321 .addReg(SPLimitVReg); 14322 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 14323 14324 // bumpMBB simply decreases the stack pointer, since we know the current 14325 // stacklet has enough space. 14326 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 14327 .addReg(SPLimitVReg); 14328 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 14329 .addReg(SPLimitVReg); 14330 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 14331 14332 // Calls into a routine in libgcc to allocate more space from the heap. 14333 const uint32_t *RegMask = 14334 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 14335 if (Is64Bit) { 14336 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 14337 .addReg(sizeVReg); 14338 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 14339 .addExternalSymbol("__morestack_allocate_stack_space") 14340 .addRegMask(RegMask) 14341 .addReg(X86::RDI, RegState::Implicit) 14342 .addReg(X86::RAX, RegState::ImplicitDefine); 14343 } else { 14344 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 14345 .addImm(12); 14346 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 14347 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 14348 .addExternalSymbol("__morestack_allocate_stack_space") 14349 .addRegMask(RegMask) 14350 .addReg(X86::EAX, RegState::ImplicitDefine); 14351 } 14352 14353 if (!Is64Bit) 14354 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 14355 .addImm(16); 14356 14357 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 14358 .addReg(Is64Bit ? X86::RAX : X86::EAX); 14359 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 14360 14361 // Set up the CFG correctly. 14362 BB->addSuccessor(bumpMBB); 14363 BB->addSuccessor(mallocMBB); 14364 mallocMBB->addSuccessor(continueMBB); 14365 bumpMBB->addSuccessor(continueMBB); 14366 14367 // Take care of the PHI nodes. 14368 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 14369 MI->getOperand(0).getReg()) 14370 .addReg(mallocPtrVReg).addMBB(mallocMBB) 14371 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 14372 14373 // Delete the original pseudo instruction. 14374 MI->eraseFromParent(); 14375 14376 // And we're done. 14377 return continueMBB; 14378} 14379 14380MachineBasicBlock * 14381X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 14382 MachineBasicBlock *BB) const { 14383 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14384 DebugLoc DL = MI->getDebugLoc(); 14385 14386 assert(!Subtarget->isTargetEnvMacho()); 14387 14388 // The lowering is pretty easy: we're just emitting the call to _alloca. The 14389 // non-trivial part is impdef of ESP. 14390 14391 if (Subtarget->isTargetWin64()) { 14392 if (Subtarget->isTargetCygMing()) { 14393 // ___chkstk(Mingw64): 14394 // Clobbers R10, R11, RAX and EFLAGS. 14395 // Updates RSP. 14396 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 14397 .addExternalSymbol("___chkstk") 14398 .addReg(X86::RAX, RegState::Implicit) 14399 .addReg(X86::RSP, RegState::Implicit) 14400 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 14401 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 14402 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 14403 } else { 14404 // __chkstk(MSVCRT): does not update stack pointer. 14405 // Clobbers R10, R11 and EFLAGS. 14406 // FIXME: RAX(allocated size) might be reused and not killed. 14407 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 14408 .addExternalSymbol("__chkstk") 14409 .addReg(X86::RAX, RegState::Implicit) 14410 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 14411 // RAX has the offset to subtracted from RSP. 14412 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 14413 .addReg(X86::RSP) 14414 .addReg(X86::RAX); 14415 } 14416 } else { 14417 const char *StackProbeSymbol = 14418 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 14419 14420 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 14421 .addExternalSymbol(StackProbeSymbol) 14422 .addReg(X86::EAX, RegState::Implicit) 14423 .addReg(X86::ESP, RegState::Implicit) 14424 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 14425 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 14426 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 14427 } 14428 14429 MI->eraseFromParent(); // The pseudo instruction is gone now. 14430 return BB; 14431} 14432 14433MachineBasicBlock * 14434X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 14435 MachineBasicBlock *BB) const { 14436 // This is pretty easy. We're taking the value that we received from 14437 // our load from the relocation, sticking it in either RDI (x86-64) 14438 // or EAX and doing an indirect call. The return value will then 14439 // be in the normal return register. 14440 const X86InstrInfo *TII 14441 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 14442 DebugLoc DL = MI->getDebugLoc(); 14443 MachineFunction *F = BB->getParent(); 14444 14445 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 14446 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 14447 14448 // Get a register mask for the lowered call. 14449 // FIXME: The 32-bit calls have non-standard calling conventions. Use a 14450 // proper register mask. 14451 const uint32_t *RegMask = 14452 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 14453 if (Subtarget->is64Bit()) { 14454 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 14455 TII->get(X86::MOV64rm), X86::RDI) 14456 .addReg(X86::RIP) 14457 .addImm(0).addReg(0) 14458 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 14459 MI->getOperand(3).getTargetFlags()) 14460 .addReg(0); 14461 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 14462 addDirectMem(MIB, X86::RDI); 14463 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); 14464 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 14465 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 14466 TII->get(X86::MOV32rm), X86::EAX) 14467 .addReg(0) 14468 .addImm(0).addReg(0) 14469 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 14470 MI->getOperand(3).getTargetFlags()) 14471 .addReg(0); 14472 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 14473 addDirectMem(MIB, X86::EAX); 14474 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 14475 } else { 14476 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 14477 TII->get(X86::MOV32rm), X86::EAX) 14478 .addReg(TII->getGlobalBaseReg(F)) 14479 .addImm(0).addReg(0) 14480 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 14481 MI->getOperand(3).getTargetFlags()) 14482 .addReg(0); 14483 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 14484 addDirectMem(MIB, X86::EAX); 14485 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 14486 } 14487 14488 MI->eraseFromParent(); // The pseudo instruction is gone now. 14489 return BB; 14490} 14491 14492MachineBasicBlock * 14493X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 14494 MachineBasicBlock *MBB) const { 14495 DebugLoc DL = MI->getDebugLoc(); 14496 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14497 14498 MachineFunction *MF = MBB->getParent(); 14499 MachineRegisterInfo &MRI = MF->getRegInfo(); 14500 14501 const BasicBlock *BB = MBB->getBasicBlock(); 14502 MachineFunction::iterator I = MBB; 14503 ++I; 14504 14505 // Memory Reference 14506 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 14507 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 14508 14509 unsigned DstReg; 14510 unsigned MemOpndSlot = 0; 14511 14512 unsigned CurOp = 0; 14513 14514 DstReg = MI->getOperand(CurOp++).getReg(); 14515 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 14516 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 14517 unsigned mainDstReg = MRI.createVirtualRegister(RC); 14518 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 14519 14520 MemOpndSlot = CurOp; 14521 14522 MVT PVT = getPointerTy(); 14523 assert((PVT == MVT::i64 || PVT == MVT::i32) && 14524 "Invalid Pointer Size!"); 14525 14526 // For v = setjmp(buf), we generate 14527 // 14528 // thisMBB: 14529 // buf[LabelOffset] = restoreMBB 14530 // SjLjSetup restoreMBB 14531 // 14532 // mainMBB: 14533 // v_main = 0 14534 // 14535 // sinkMBB: 14536 // v = phi(main, restore) 14537 // 14538 // restoreMBB: 14539 // v_restore = 1 14540 14541 MachineBasicBlock *thisMBB = MBB; 14542 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 14543 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 14544 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); 14545 MF->insert(I, mainMBB); 14546 MF->insert(I, sinkMBB); 14547 MF->push_back(restoreMBB); 14548 14549 MachineInstrBuilder MIB; 14550 14551 // Transfer the remainder of BB and its successor edges to sinkMBB. 14552 sinkMBB->splice(sinkMBB->begin(), MBB, 14553 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 14554 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 14555 14556 // thisMBB: 14557 unsigned PtrStoreOpc = 0; 14558 unsigned LabelReg = 0; 14559 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 14560 Reloc::Model RM = getTargetMachine().getRelocationModel(); 14561 bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) && 14562 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC); 14563 14564 // Prepare IP either in reg or imm. 14565 if (!UseImmLabel) { 14566 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; 14567 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 14568 LabelReg = MRI.createVirtualRegister(PtrRC); 14569 if (Subtarget->is64Bit()) { 14570 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg) 14571 .addReg(X86::RIP) 14572 .addImm(0) 14573 .addReg(0) 14574 .addMBB(restoreMBB) 14575 .addReg(0); 14576 } else { 14577 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII); 14578 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg) 14579 .addReg(XII->getGlobalBaseReg(MF)) 14580 .addImm(0) 14581 .addReg(0) 14582 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference()) 14583 .addReg(0); 14584 } 14585 } else 14586 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; 14587 // Store IP 14588 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc)); 14589 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14590 if (i == X86::AddrDisp) 14591 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset); 14592 else 14593 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 14594 } 14595 if (!UseImmLabel) 14596 MIB.addReg(LabelReg); 14597 else 14598 MIB.addMBB(restoreMBB); 14599 MIB.setMemRefs(MMOBegin, MMOEnd); 14600 // Setup 14601 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) 14602 .addMBB(restoreMBB); 14603 MIB.addRegMask(RegInfo->getNoPreservedMask()); 14604 thisMBB->addSuccessor(mainMBB); 14605 thisMBB->addSuccessor(restoreMBB); 14606 14607 // mainMBB: 14608 // EAX = 0 14609 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg); 14610 mainMBB->addSuccessor(sinkMBB); 14611 14612 // sinkMBB: 14613 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 14614 TII->get(X86::PHI), DstReg) 14615 .addReg(mainDstReg).addMBB(mainMBB) 14616 .addReg(restoreDstReg).addMBB(restoreMBB); 14617 14618 // restoreMBB: 14619 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); 14620 BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB); 14621 restoreMBB->addSuccessor(sinkMBB); 14622 14623 MI->eraseFromParent(); 14624 return sinkMBB; 14625} 14626 14627MachineBasicBlock * 14628X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 14629 MachineBasicBlock *MBB) const { 14630 DebugLoc DL = MI->getDebugLoc(); 14631 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14632 14633 MachineFunction *MF = MBB->getParent(); 14634 MachineRegisterInfo &MRI = MF->getRegInfo(); 14635 14636 // Memory Reference 14637 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 14638 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 14639 14640 MVT PVT = getPointerTy(); 14641 assert((PVT == MVT::i64 || PVT == MVT::i32) && 14642 "Invalid Pointer Size!"); 14643 14644 const TargetRegisterClass *RC = 14645 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; 14646 unsigned Tmp = MRI.createVirtualRegister(RC); 14647 // Since FP is only updated here but NOT referenced, it's treated as GPR. 14648 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; 14649 unsigned SP = RegInfo->getStackRegister(); 14650 14651 MachineInstrBuilder MIB; 14652 14653 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 14654 const int64_t SPOffset = 2 * PVT.getStoreSize(); 14655 14656 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; 14657 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r; 14658 14659 // Reload FP 14660 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); 14661 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 14662 MIB.addOperand(MI->getOperand(i)); 14663 MIB.setMemRefs(MMOBegin, MMOEnd); 14664 // Reload IP 14665 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); 14666 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14667 if (i == X86::AddrDisp) 14668 MIB.addDisp(MI->getOperand(i), LabelOffset); 14669 else 14670 MIB.addOperand(MI->getOperand(i)); 14671 } 14672 MIB.setMemRefs(MMOBegin, MMOEnd); 14673 // Reload SP 14674 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP); 14675 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 14676 if (i == X86::AddrDisp) 14677 MIB.addDisp(MI->getOperand(i), SPOffset); 14678 else 14679 MIB.addOperand(MI->getOperand(i)); 14680 } 14681 MIB.setMemRefs(MMOBegin, MMOEnd); 14682 // Jump 14683 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); 14684 14685 MI->eraseFromParent(); 14686 return MBB; 14687} 14688 14689MachineBasicBlock * 14690X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 14691 MachineBasicBlock *BB) const { 14692 switch (MI->getOpcode()) { 14693 default: llvm_unreachable("Unexpected instr type to insert"); 14694 case X86::TAILJMPd64: 14695 case X86::TAILJMPr64: 14696 case X86::TAILJMPm64: 14697 llvm_unreachable("TAILJMP64 would not be touched here."); 14698 case X86::TCRETURNdi64: 14699 case X86::TCRETURNri64: 14700 case X86::TCRETURNmi64: 14701 return BB; 14702 case X86::WIN_ALLOCA: 14703 return EmitLoweredWinAlloca(MI, BB); 14704 case X86::SEG_ALLOCA_32: 14705 return EmitLoweredSegAlloca(MI, BB, false); 14706 case X86::SEG_ALLOCA_64: 14707 return EmitLoweredSegAlloca(MI, BB, true); 14708 case X86::TLSCall_32: 14709 case X86::TLSCall_64: 14710 return EmitLoweredTLSCall(MI, BB); 14711 case X86::CMOV_GR8: 14712 case X86::CMOV_FR32: 14713 case X86::CMOV_FR64: 14714 case X86::CMOV_V4F32: 14715 case X86::CMOV_V2F64: 14716 case X86::CMOV_V2I64: 14717 case X86::CMOV_V8F32: 14718 case X86::CMOV_V4F64: 14719 case X86::CMOV_V4I64: 14720 case X86::CMOV_GR16: 14721 case X86::CMOV_GR32: 14722 case X86::CMOV_RFP32: 14723 case X86::CMOV_RFP64: 14724 case X86::CMOV_RFP80: 14725 return EmitLoweredSelect(MI, BB); 14726 14727 case X86::FP32_TO_INT16_IN_MEM: 14728 case X86::FP32_TO_INT32_IN_MEM: 14729 case X86::FP32_TO_INT64_IN_MEM: 14730 case X86::FP64_TO_INT16_IN_MEM: 14731 case X86::FP64_TO_INT32_IN_MEM: 14732 case X86::FP64_TO_INT64_IN_MEM: 14733 case X86::FP80_TO_INT16_IN_MEM: 14734 case X86::FP80_TO_INT32_IN_MEM: 14735 case X86::FP80_TO_INT64_IN_MEM: { 14736 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 14737 DebugLoc DL = MI->getDebugLoc(); 14738 14739 // Change the floating point control register to use "round towards zero" 14740 // mode when truncating to an integer value. 14741 MachineFunction *F = BB->getParent(); 14742 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 14743 addFrameReference(BuildMI(*BB, MI, DL, 14744 TII->get(X86::FNSTCW16m)), CWFrameIdx); 14745 14746 // Load the old value of the high byte of the control word... 14747 unsigned OldCW = 14748 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); 14749 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 14750 CWFrameIdx); 14751 14752 // Set the high part to be round to zero... 14753 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 14754 .addImm(0xC7F); 14755 14756 // Reload the modified control word now... 14757 addFrameReference(BuildMI(*BB, MI, DL, 14758 TII->get(X86::FLDCW16m)), CWFrameIdx); 14759 14760 // Restore the memory image of control word to original value 14761 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 14762 .addReg(OldCW); 14763 14764 // Get the X86 opcode to use. 14765 unsigned Opc; 14766 switch (MI->getOpcode()) { 14767 default: llvm_unreachable("illegal opcode!"); 14768 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 14769 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 14770 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 14771 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 14772 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 14773 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 14774 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 14775 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 14776 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 14777 } 14778 14779 X86AddressMode AM; 14780 MachineOperand &Op = MI->getOperand(0); 14781 if (Op.isReg()) { 14782 AM.BaseType = X86AddressMode::RegBase; 14783 AM.Base.Reg = Op.getReg(); 14784 } else { 14785 AM.BaseType = X86AddressMode::FrameIndexBase; 14786 AM.Base.FrameIndex = Op.getIndex(); 14787 } 14788 Op = MI->getOperand(1); 14789 if (Op.isImm()) 14790 AM.Scale = Op.getImm(); 14791 Op = MI->getOperand(2); 14792 if (Op.isImm()) 14793 AM.IndexReg = Op.getImm(); 14794 Op = MI->getOperand(3); 14795 if (Op.isGlobal()) { 14796 AM.GV = Op.getGlobal(); 14797 } else { 14798 AM.Disp = Op.getImm(); 14799 } 14800 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 14801 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 14802 14803 // Reload the original control word now. 14804 addFrameReference(BuildMI(*BB, MI, DL, 14805 TII->get(X86::FLDCW16m)), CWFrameIdx); 14806 14807 MI->eraseFromParent(); // The pseudo instruction is gone now. 14808 return BB; 14809 } 14810 // String/text processing lowering. 14811 case X86::PCMPISTRM128REG: 14812 case X86::VPCMPISTRM128REG: 14813 case X86::PCMPISTRM128MEM: 14814 case X86::VPCMPISTRM128MEM: 14815 case X86::PCMPESTRM128REG: 14816 case X86::VPCMPESTRM128REG: 14817 case X86::PCMPESTRM128MEM: 14818 case X86::VPCMPESTRM128MEM: 14819 assert(Subtarget->hasSSE42() && 14820 "Target must have SSE4.2 or AVX features enabled"); 14821 return EmitPCMPSTRM(MI, BB, getTargetMachine().getInstrInfo()); 14822 14823 // String/text processing lowering. 14824 case X86::PCMPISTRIREG: 14825 case X86::VPCMPISTRIREG: 14826 case X86::PCMPISTRIMEM: 14827 case X86::VPCMPISTRIMEM: 14828 case X86::PCMPESTRIREG: 14829 case X86::VPCMPESTRIREG: 14830 case X86::PCMPESTRIMEM: 14831 case X86::VPCMPESTRIMEM: 14832 assert(Subtarget->hasSSE42() && 14833 "Target must have SSE4.2 or AVX features enabled"); 14834 return EmitPCMPSTRI(MI, BB, getTargetMachine().getInstrInfo()); 14835 14836 // Thread synchronization. 14837 case X86::MONITOR: 14838 return EmitMonitor(MI, BB, getTargetMachine().getInstrInfo(), Subtarget); 14839 14840 // xbegin 14841 case X86::XBEGIN: 14842 return EmitXBegin(MI, BB, getTargetMachine().getInstrInfo()); 14843 14844 // Atomic Lowering. 14845 case X86::ATOMAND8: 14846 case X86::ATOMAND16: 14847 case X86::ATOMAND32: 14848 case X86::ATOMAND64: 14849 // Fall through 14850 case X86::ATOMOR8: 14851 case X86::ATOMOR16: 14852 case X86::ATOMOR32: 14853 case X86::ATOMOR64: 14854 // Fall through 14855 case X86::ATOMXOR16: 14856 case X86::ATOMXOR8: 14857 case X86::ATOMXOR32: 14858 case X86::ATOMXOR64: 14859 // Fall through 14860 case X86::ATOMNAND8: 14861 case X86::ATOMNAND16: 14862 case X86::ATOMNAND32: 14863 case X86::ATOMNAND64: 14864 // Fall through 14865 case X86::ATOMMAX8: 14866 case X86::ATOMMAX16: 14867 case X86::ATOMMAX32: 14868 case X86::ATOMMAX64: 14869 // Fall through 14870 case X86::ATOMMIN8: 14871 case X86::ATOMMIN16: 14872 case X86::ATOMMIN32: 14873 case X86::ATOMMIN64: 14874 // Fall through 14875 case X86::ATOMUMAX8: 14876 case X86::ATOMUMAX16: 14877 case X86::ATOMUMAX32: 14878 case X86::ATOMUMAX64: 14879 // Fall through 14880 case X86::ATOMUMIN8: 14881 case X86::ATOMUMIN16: 14882 case X86::ATOMUMIN32: 14883 case X86::ATOMUMIN64: 14884 return EmitAtomicLoadArith(MI, BB); 14885 14886 // This group does 64-bit operations on a 32-bit host. 14887 case X86::ATOMAND6432: 14888 case X86::ATOMOR6432: 14889 case X86::ATOMXOR6432: 14890 case X86::ATOMNAND6432: 14891 case X86::ATOMADD6432: 14892 case X86::ATOMSUB6432: 14893 case X86::ATOMMAX6432: 14894 case X86::ATOMMIN6432: 14895 case X86::ATOMUMAX6432: 14896 case X86::ATOMUMIN6432: 14897 case X86::ATOMSWAP6432: 14898 return EmitAtomicLoadArith6432(MI, BB); 14899 14900 case X86::VASTART_SAVE_XMM_REGS: 14901 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 14902 14903 case X86::VAARG_64: 14904 return EmitVAARG64WithCustomInserter(MI, BB); 14905 14906 case X86::EH_SjLj_SetJmp32: 14907 case X86::EH_SjLj_SetJmp64: 14908 return emitEHSjLjSetJmp(MI, BB); 14909 14910 case X86::EH_SjLj_LongJmp32: 14911 case X86::EH_SjLj_LongJmp64: 14912 return emitEHSjLjLongJmp(MI, BB); 14913 } 14914} 14915 14916//===----------------------------------------------------------------------===// 14917// X86 Optimization Hooks 14918//===----------------------------------------------------------------------===// 14919 14920void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 14921 APInt &KnownZero, 14922 APInt &KnownOne, 14923 const SelectionDAG &DAG, 14924 unsigned Depth) const { 14925 unsigned BitWidth = KnownZero.getBitWidth(); 14926 unsigned Opc = Op.getOpcode(); 14927 assert((Opc >= ISD::BUILTIN_OP_END || 14928 Opc == ISD::INTRINSIC_WO_CHAIN || 14929 Opc == ISD::INTRINSIC_W_CHAIN || 14930 Opc == ISD::INTRINSIC_VOID) && 14931 "Should use MaskedValueIsZero if you don't know whether Op" 14932 " is a target node!"); 14933 14934 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 14935 switch (Opc) { 14936 default: break; 14937 case X86ISD::ADD: 14938 case X86ISD::SUB: 14939 case X86ISD::ADC: 14940 case X86ISD::SBB: 14941 case X86ISD::SMUL: 14942 case X86ISD::UMUL: 14943 case X86ISD::INC: 14944 case X86ISD::DEC: 14945 case X86ISD::OR: 14946 case X86ISD::XOR: 14947 case X86ISD::AND: 14948 // These nodes' second result is a boolean. 14949 if (Op.getResNo() == 0) 14950 break; 14951 // Fallthrough 14952 case X86ISD::SETCC: 14953 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 14954 break; 14955 case ISD::INTRINSIC_WO_CHAIN: { 14956 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14957 unsigned NumLoBits = 0; 14958 switch (IntId) { 14959 default: break; 14960 case Intrinsic::x86_sse_movmsk_ps: 14961 case Intrinsic::x86_avx_movmsk_ps_256: 14962 case Intrinsic::x86_sse2_movmsk_pd: 14963 case Intrinsic::x86_avx_movmsk_pd_256: 14964 case Intrinsic::x86_mmx_pmovmskb: 14965 case Intrinsic::x86_sse2_pmovmskb_128: 14966 case Intrinsic::x86_avx2_pmovmskb: { 14967 // High bits of movmskp{s|d}, pmovmskb are known zero. 14968 switch (IntId) { 14969 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 14970 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 14971 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 14972 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 14973 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 14974 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 14975 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 14976 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 14977 } 14978 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits); 14979 break; 14980 } 14981 } 14982 break; 14983 } 14984 } 14985} 14986 14987unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 14988 unsigned Depth) const { 14989 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 14990 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 14991 return Op.getValueType().getScalarType().getSizeInBits(); 14992 14993 // Fallback case. 14994 return 1; 14995} 14996 14997/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 14998/// node is a GlobalAddress + offset. 14999bool X86TargetLowering::isGAPlusOffset(SDNode *N, 15000 const GlobalValue* &GA, 15001 int64_t &Offset) const { 15002 if (N->getOpcode() == X86ISD::Wrapper) { 15003 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 15004 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 15005 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 15006 return true; 15007 } 15008 } 15009 return TargetLowering::isGAPlusOffset(N, GA, Offset); 15010} 15011 15012/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 15013/// same as extracting the high 128-bit part of 256-bit vector and then 15014/// inserting the result into the low part of a new 256-bit vector 15015static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 15016 EVT VT = SVOp->getValueType(0); 15017 unsigned NumElems = VT.getVectorNumElements(); 15018 15019 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 15020 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) 15021 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 15022 SVOp->getMaskElt(j) >= 0) 15023 return false; 15024 15025 return true; 15026} 15027 15028/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 15029/// same as extracting the low 128-bit part of 256-bit vector and then 15030/// inserting the result into the high part of a new 256-bit vector 15031static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 15032 EVT VT = SVOp->getValueType(0); 15033 unsigned NumElems = VT.getVectorNumElements(); 15034 15035 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 15036 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) 15037 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 15038 SVOp->getMaskElt(j) >= 0) 15039 return false; 15040 15041 return true; 15042} 15043 15044/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 15045static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 15046 TargetLowering::DAGCombinerInfo &DCI, 15047 const X86Subtarget* Subtarget) { 15048 DebugLoc dl = N->getDebugLoc(); 15049 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 15050 SDValue V1 = SVOp->getOperand(0); 15051 SDValue V2 = SVOp->getOperand(1); 15052 EVT VT = SVOp->getValueType(0); 15053 unsigned NumElems = VT.getVectorNumElements(); 15054 15055 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 15056 V2.getOpcode() == ISD::CONCAT_VECTORS) { 15057 // 15058 // 0,0,0,... 15059 // | 15060 // V UNDEF BUILD_VECTOR UNDEF 15061 // \ / \ / 15062 // CONCAT_VECTOR CONCAT_VECTOR 15063 // \ / 15064 // \ / 15065 // RESULT: V + zero extended 15066 // 15067 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 15068 V2.getOperand(1).getOpcode() != ISD::UNDEF || 15069 V1.getOperand(1).getOpcode() != ISD::UNDEF) 15070 return SDValue(); 15071 15072 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 15073 return SDValue(); 15074 15075 // To match the shuffle mask, the first half of the mask should 15076 // be exactly the first vector, and all the rest a splat with the 15077 // first element of the second one. 15078 for (unsigned i = 0; i != NumElems/2; ++i) 15079 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 15080 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 15081 return SDValue(); 15082 15083 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 15084 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 15085 if (Ld->hasNUsesOfValue(1, 0)) { 15086 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 15087 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 15088 SDValue ResNode = 15089 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 15090 array_lengthof(Ops), 15091 Ld->getMemoryVT(), 15092 Ld->getPointerInfo(), 15093 Ld->getAlignment(), 15094 false/*isVolatile*/, true/*ReadMem*/, 15095 false/*WriteMem*/); 15096 15097 // Make sure the newly-created LOAD is in the same position as Ld in 15098 // terms of dependency. We create a TokenFactor for Ld and ResNode, 15099 // and update uses of Ld's output chain to use the TokenFactor. 15100 if (Ld->hasAnyUseOfValue(1)) { 15101 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 15102 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1)); 15103 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain); 15104 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1), 15105 SDValue(ResNode.getNode(), 1)); 15106 } 15107 15108 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 15109 } 15110 } 15111 15112 // Emit a zeroed vector and insert the desired subvector on its 15113 // first half. 15114 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 15115 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); 15116 return DCI.CombineTo(N, InsV); 15117 } 15118 15119 //===--------------------------------------------------------------------===// 15120 // Combine some shuffles into subvector extracts and inserts: 15121 // 15122 15123 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 15124 if (isShuffleHigh128VectorInsertLow(SVOp)) { 15125 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); 15126 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); 15127 return DCI.CombineTo(N, InsV); 15128 } 15129 15130 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 15131 if (isShuffleLow128VectorInsertHigh(SVOp)) { 15132 SDValue V = Extract128BitVector(V1, 0, DAG, dl); 15133 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); 15134 return DCI.CombineTo(N, InsV); 15135 } 15136 15137 return SDValue(); 15138} 15139 15140/// PerformShuffleCombine - Performs several different shuffle combines. 15141static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 15142 TargetLowering::DAGCombinerInfo &DCI, 15143 const X86Subtarget *Subtarget) { 15144 DebugLoc dl = N->getDebugLoc(); 15145 EVT VT = N->getValueType(0); 15146 15147 // Don't create instructions with illegal types after legalize types has run. 15148 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15149 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 15150 return SDValue(); 15151 15152 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 15153 if (Subtarget->hasFp256() && VT.is256BitVector() && 15154 N->getOpcode() == ISD::VECTOR_SHUFFLE) 15155 return PerformShuffleCombine256(N, DAG, DCI, Subtarget); 15156 15157 // Only handle 128 wide vector from here on. 15158 if (!VT.is128BitVector()) 15159 return SDValue(); 15160 15161 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 15162 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 15163 // consecutive, non-overlapping, and in the right order. 15164 SmallVector<SDValue, 16> Elts; 15165 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 15166 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 15167 15168 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 15169} 15170 15171/// PerformTruncateCombine - Converts truncate operation to 15172/// a sequence of vector shuffle operations. 15173/// It is possible when we truncate 256-bit vector to 128-bit vector 15174static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 15175 TargetLowering::DAGCombinerInfo &DCI, 15176 const X86Subtarget *Subtarget) { 15177 return SDValue(); 15178} 15179 15180/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target 15181/// specific shuffle of a load can be folded into a single element load. 15182/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but 15183/// shuffles have been customed lowered so we need to handle those here. 15184static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, 15185 TargetLowering::DAGCombinerInfo &DCI) { 15186 if (DCI.isBeforeLegalizeOps()) 15187 return SDValue(); 15188 15189 SDValue InVec = N->getOperand(0); 15190 SDValue EltNo = N->getOperand(1); 15191 15192 if (!isa<ConstantSDNode>(EltNo)) 15193 return SDValue(); 15194 15195 EVT VT = InVec.getValueType(); 15196 15197 bool HasShuffleIntoBitcast = false; 15198 if (InVec.getOpcode() == ISD::BITCAST) { 15199 // Don't duplicate a load with other uses. 15200 if (!InVec.hasOneUse()) 15201 return SDValue(); 15202 EVT BCVT = InVec.getOperand(0).getValueType(); 15203 if (BCVT.getVectorNumElements() != VT.getVectorNumElements()) 15204 return SDValue(); 15205 InVec = InVec.getOperand(0); 15206 HasShuffleIntoBitcast = true; 15207 } 15208 15209 if (!isTargetShuffle(InVec.getOpcode())) 15210 return SDValue(); 15211 15212 // Don't duplicate a load with other uses. 15213 if (!InVec.hasOneUse()) 15214 return SDValue(); 15215 15216 SmallVector<int, 16> ShuffleMask; 15217 bool UnaryShuffle; 15218 if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, 15219 UnaryShuffle)) 15220 return SDValue(); 15221 15222 // Select the input vector, guarding against out of range extract vector. 15223 unsigned NumElems = VT.getVectorNumElements(); 15224 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 15225 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt]; 15226 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0) 15227 : InVec.getOperand(1); 15228 15229 // If inputs to shuffle are the same for both ops, then allow 2 uses 15230 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1; 15231 15232 if (LdNode.getOpcode() == ISD::BITCAST) { 15233 // Don't duplicate a load with other uses. 15234 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) 15235 return SDValue(); 15236 15237 AllowedUses = 1; // only allow 1 load use if we have a bitcast 15238 LdNode = LdNode.getOperand(0); 15239 } 15240 15241 if (!ISD::isNormalLoad(LdNode.getNode())) 15242 return SDValue(); 15243 15244 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); 15245 15246 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) 15247 return SDValue(); 15248 15249 if (HasShuffleIntoBitcast) { 15250 // If there's a bitcast before the shuffle, check if the load type and 15251 // alignment is valid. 15252 unsigned Align = LN0->getAlignment(); 15253 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15254 unsigned NewAlign = TLI.getDataLayout()-> 15255 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 15256 15257 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 15258 return SDValue(); 15259 } 15260 15261 // All checks match so transform back to vector_shuffle so that DAG combiner 15262 // can finish the job 15263 DebugLoc dl = N->getDebugLoc(); 15264 15265 // Create shuffle node taking into account the case that its a unary shuffle 15266 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); 15267 Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl, 15268 InVec.getOperand(0), Shuffle, 15269 &ShuffleMask[0]); 15270 Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 15271 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, 15272 EltNo); 15273} 15274 15275/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 15276/// generation and convert it from being a bunch of shuffles and extracts 15277/// to a simple store and scalar loads to extract the elements. 15278static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 15279 TargetLowering::DAGCombinerInfo &DCI) { 15280 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI); 15281 if (NewOp.getNode()) 15282 return NewOp; 15283 15284 SDValue InputVector = N->getOperand(0); 15285 // Detect whether we are trying to convert from mmx to i32 and the bitcast 15286 // from mmx to v2i32 has a single usage. 15287 if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST && 15288 InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx && 15289 InputVector.hasOneUse() && N->getValueType(0) == MVT::i32) 15290 return DAG.getNode(X86ISD::MMX_MOVD2W, InputVector.getDebugLoc(), 15291 N->getValueType(0), 15292 InputVector.getNode()->getOperand(0)); 15293 15294 // Only operate on vectors of 4 elements, where the alternative shuffling 15295 // gets to be more expensive. 15296 if (InputVector.getValueType() != MVT::v4i32) 15297 return SDValue(); 15298 15299 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 15300 // single use which is a sign-extend or zero-extend, and all elements are 15301 // used. 15302 SmallVector<SDNode *, 4> Uses; 15303 unsigned ExtractedElements = 0; 15304 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 15305 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 15306 if (UI.getUse().getResNo() != InputVector.getResNo()) 15307 return SDValue(); 15308 15309 SDNode *Extract = *UI; 15310 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 15311 return SDValue(); 15312 15313 if (Extract->getValueType(0) != MVT::i32) 15314 return SDValue(); 15315 if (!Extract->hasOneUse()) 15316 return SDValue(); 15317 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 15318 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 15319 return SDValue(); 15320 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 15321 return SDValue(); 15322 15323 // Record which element was extracted. 15324 ExtractedElements |= 15325 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 15326 15327 Uses.push_back(Extract); 15328 } 15329 15330 // If not all the elements were used, this may not be worthwhile. 15331 if (ExtractedElements != 15) 15332 return SDValue(); 15333 15334 // Ok, we've now decided to do the transformation. 15335 DebugLoc dl = InputVector.getDebugLoc(); 15336 15337 // Store the value to a temporary stack slot. 15338 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 15339 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 15340 MachinePointerInfo(), false, false, 0); 15341 15342 // Replace each use (extract) with a load of the appropriate element. 15343 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 15344 UE = Uses.end(); UI != UE; ++UI) { 15345 SDNode *Extract = *UI; 15346 15347 // cOMpute the element's address. 15348 SDValue Idx = Extract->getOperand(1); 15349 unsigned EltSize = 15350 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 15351 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 15352 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15353 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 15354 15355 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 15356 StackPtr, OffsetVal); 15357 15358 // Load the scalar. 15359 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 15360 ScalarAddr, MachinePointerInfo(), 15361 false, false, false, 0); 15362 15363 // Replace the exact with the load. 15364 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 15365 } 15366 15367 // The replacement was made in place; don't return anything. 15368 return SDValue(); 15369} 15370 15371/// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match. 15372static unsigned matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, 15373 SDValue RHS, SelectionDAG &DAG, 15374 const X86Subtarget *Subtarget) { 15375 if (!VT.isVector()) 15376 return 0; 15377 15378 switch (VT.getSimpleVT().SimpleTy) { 15379 default: return 0; 15380 case MVT::v32i8: 15381 case MVT::v16i16: 15382 case MVT::v8i32: 15383 if (!Subtarget->hasAVX2()) 15384 return 0; 15385 case MVT::v16i8: 15386 case MVT::v8i16: 15387 case MVT::v4i32: 15388 if (!Subtarget->hasSSE2()) 15389 return 0; 15390 } 15391 15392 // SSE2 has only a small subset of the operations. 15393 bool hasUnsigned = Subtarget->hasSSE41() || 15394 (Subtarget->hasSSE2() && VT == MVT::v16i8); 15395 bool hasSigned = Subtarget->hasSSE41() || 15396 (Subtarget->hasSSE2() && VT == MVT::v8i16); 15397 15398 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15399 15400 // Check for x CC y ? x : y. 15401 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 15402 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 15403 switch (CC) { 15404 default: break; 15405 case ISD::SETULT: 15406 case ISD::SETULE: 15407 return hasUnsigned ? X86ISD::UMIN : 0; 15408 case ISD::SETUGT: 15409 case ISD::SETUGE: 15410 return hasUnsigned ? X86ISD::UMAX : 0; 15411 case ISD::SETLT: 15412 case ISD::SETLE: 15413 return hasSigned ? X86ISD::SMIN : 0; 15414 case ISD::SETGT: 15415 case ISD::SETGE: 15416 return hasSigned ? X86ISD::SMAX : 0; 15417 } 15418 // Check for x CC y ? y : x -- a min/max with reversed arms. 15419 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 15420 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 15421 switch (CC) { 15422 default: break; 15423 case ISD::SETULT: 15424 case ISD::SETULE: 15425 return hasUnsigned ? X86ISD::UMAX : 0; 15426 case ISD::SETUGT: 15427 case ISD::SETUGE: 15428 return hasUnsigned ? X86ISD::UMIN : 0; 15429 case ISD::SETLT: 15430 case ISD::SETLE: 15431 return hasSigned ? X86ISD::SMAX : 0; 15432 case ISD::SETGT: 15433 case ISD::SETGE: 15434 return hasSigned ? X86ISD::SMIN : 0; 15435 } 15436 } 15437 15438 return 0; 15439} 15440 15441/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 15442/// nodes. 15443static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 15444 TargetLowering::DAGCombinerInfo &DCI, 15445 const X86Subtarget *Subtarget) { 15446 DebugLoc DL = N->getDebugLoc(); 15447 SDValue Cond = N->getOperand(0); 15448 // Get the LHS/RHS of the select. 15449 SDValue LHS = N->getOperand(1); 15450 SDValue RHS = N->getOperand(2); 15451 EVT VT = LHS.getValueType(); 15452 15453 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 15454 // instructions match the semantics of the common C idiom x<y?x:y but not 15455 // x<=y?x:y, because of how they handle negative zero (which can be 15456 // ignored in unsafe-math mode). 15457 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 15458 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 15459 (Subtarget->hasSSE2() || 15460 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 15461 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15462 15463 unsigned Opcode = 0; 15464 // Check for x CC y ? x : y. 15465 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 15466 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 15467 switch (CC) { 15468 default: break; 15469 case ISD::SETULT: 15470 // Converting this to a min would handle NaNs incorrectly, and swapping 15471 // the operands would cause it to handle comparisons between positive 15472 // and negative zero incorrectly. 15473 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 15474 if (!DAG.getTarget().Options.UnsafeFPMath && 15475 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 15476 break; 15477 std::swap(LHS, RHS); 15478 } 15479 Opcode = X86ISD::FMIN; 15480 break; 15481 case ISD::SETOLE: 15482 // Converting this to a min would handle comparisons between positive 15483 // and negative zero incorrectly. 15484 if (!DAG.getTarget().Options.UnsafeFPMath && 15485 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 15486 break; 15487 Opcode = X86ISD::FMIN; 15488 break; 15489 case ISD::SETULE: 15490 // Converting this to a min would handle both negative zeros and NaNs 15491 // incorrectly, but we can swap the operands to fix both. 15492 std::swap(LHS, RHS); 15493 case ISD::SETOLT: 15494 case ISD::SETLT: 15495 case ISD::SETLE: 15496 Opcode = X86ISD::FMIN; 15497 break; 15498 15499 case ISD::SETOGE: 15500 // Converting this to a max would handle comparisons between positive 15501 // and negative zero incorrectly. 15502 if (!DAG.getTarget().Options.UnsafeFPMath && 15503 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 15504 break; 15505 Opcode = X86ISD::FMAX; 15506 break; 15507 case ISD::SETUGT: 15508 // Converting this to a max would handle NaNs incorrectly, and swapping 15509 // the operands would cause it to handle comparisons between positive 15510 // and negative zero incorrectly. 15511 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 15512 if (!DAG.getTarget().Options.UnsafeFPMath && 15513 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 15514 break; 15515 std::swap(LHS, RHS); 15516 } 15517 Opcode = X86ISD::FMAX; 15518 break; 15519 case ISD::SETUGE: 15520 // Converting this to a max would handle both negative zeros and NaNs 15521 // incorrectly, but we can swap the operands to fix both. 15522 std::swap(LHS, RHS); 15523 case ISD::SETOGT: 15524 case ISD::SETGT: 15525 case ISD::SETGE: 15526 Opcode = X86ISD::FMAX; 15527 break; 15528 } 15529 // Check for x CC y ? y : x -- a min/max with reversed arms. 15530 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 15531 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 15532 switch (CC) { 15533 default: break; 15534 case ISD::SETOGE: 15535 // Converting this to a min would handle comparisons between positive 15536 // and negative zero incorrectly, and swapping the operands would 15537 // cause it to handle NaNs incorrectly. 15538 if (!DAG.getTarget().Options.UnsafeFPMath && 15539 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 15540 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 15541 break; 15542 std::swap(LHS, RHS); 15543 } 15544 Opcode = X86ISD::FMIN; 15545 break; 15546 case ISD::SETUGT: 15547 // Converting this to a min would handle NaNs incorrectly. 15548 if (!DAG.getTarget().Options.UnsafeFPMath && 15549 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 15550 break; 15551 Opcode = X86ISD::FMIN; 15552 break; 15553 case ISD::SETUGE: 15554 // Converting this to a min would handle both negative zeros and NaNs 15555 // incorrectly, but we can swap the operands to fix both. 15556 std::swap(LHS, RHS); 15557 case ISD::SETOGT: 15558 case ISD::SETGT: 15559 case ISD::SETGE: 15560 Opcode = X86ISD::FMIN; 15561 break; 15562 15563 case ISD::SETULT: 15564 // Converting this to a max would handle NaNs incorrectly. 15565 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 15566 break; 15567 Opcode = X86ISD::FMAX; 15568 break; 15569 case ISD::SETOLE: 15570 // Converting this to a max would handle comparisons between positive 15571 // and negative zero incorrectly, and swapping the operands would 15572 // cause it to handle NaNs incorrectly. 15573 if (!DAG.getTarget().Options.UnsafeFPMath && 15574 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 15575 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 15576 break; 15577 std::swap(LHS, RHS); 15578 } 15579 Opcode = X86ISD::FMAX; 15580 break; 15581 case ISD::SETULE: 15582 // Converting this to a max would handle both negative zeros and NaNs 15583 // incorrectly, but we can swap the operands to fix both. 15584 std::swap(LHS, RHS); 15585 case ISD::SETOLT: 15586 case ISD::SETLT: 15587 case ISD::SETLE: 15588 Opcode = X86ISD::FMAX; 15589 break; 15590 } 15591 } 15592 15593 if (Opcode) 15594 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 15595 } 15596 15597 // If this is a select between two integer constants, try to do some 15598 // optimizations. 15599 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 15600 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 15601 // Don't do this for crazy integer types. 15602 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 15603 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 15604 // so that TrueC (the true value) is larger than FalseC. 15605 bool NeedsCondInvert = false; 15606 15607 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 15608 // Efficiently invertible. 15609 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 15610 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 15611 isa<ConstantSDNode>(Cond.getOperand(1))))) { 15612 NeedsCondInvert = true; 15613 std::swap(TrueC, FalseC); 15614 } 15615 15616 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 15617 if (FalseC->getAPIntValue() == 0 && 15618 TrueC->getAPIntValue().isPowerOf2()) { 15619 if (NeedsCondInvert) // Invert the condition if needed. 15620 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 15621 DAG.getConstant(1, Cond.getValueType())); 15622 15623 // Zero extend the condition if needed. 15624 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 15625 15626 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 15627 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 15628 DAG.getConstant(ShAmt, MVT::i8)); 15629 } 15630 15631 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 15632 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 15633 if (NeedsCondInvert) // Invert the condition if needed. 15634 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 15635 DAG.getConstant(1, Cond.getValueType())); 15636 15637 // Zero extend the condition if needed. 15638 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 15639 FalseC->getValueType(0), Cond); 15640 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 15641 SDValue(FalseC, 0)); 15642 } 15643 15644 // Optimize cases that will turn into an LEA instruction. This requires 15645 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 15646 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 15647 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 15648 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 15649 15650 bool isFastMultiplier = false; 15651 if (Diff < 10) { 15652 switch ((unsigned char)Diff) { 15653 default: break; 15654 case 1: // result = add base, cond 15655 case 2: // result = lea base( , cond*2) 15656 case 3: // result = lea base(cond, cond*2) 15657 case 4: // result = lea base( , cond*4) 15658 case 5: // result = lea base(cond, cond*4) 15659 case 8: // result = lea base( , cond*8) 15660 case 9: // result = lea base(cond, cond*8) 15661 isFastMultiplier = true; 15662 break; 15663 } 15664 } 15665 15666 if (isFastMultiplier) { 15667 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 15668 if (NeedsCondInvert) // Invert the condition if needed. 15669 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 15670 DAG.getConstant(1, Cond.getValueType())); 15671 15672 // Zero extend the condition if needed. 15673 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 15674 Cond); 15675 // Scale the condition by the difference. 15676 if (Diff != 1) 15677 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 15678 DAG.getConstant(Diff, Cond.getValueType())); 15679 15680 // Add the base if non-zero. 15681 if (FalseC->getAPIntValue() != 0) 15682 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 15683 SDValue(FalseC, 0)); 15684 return Cond; 15685 } 15686 } 15687 } 15688 } 15689 15690 // Canonicalize max and min: 15691 // (x > y) ? x : y -> (x >= y) ? x : y 15692 // (x < y) ? x : y -> (x <= y) ? x : y 15693 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 15694 // the need for an extra compare 15695 // against zero. e.g. 15696 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 15697 // subl %esi, %edi 15698 // testl %edi, %edi 15699 // movl $0, %eax 15700 // cmovgl %edi, %eax 15701 // => 15702 // xorl %eax, %eax 15703 // subl %esi, $edi 15704 // cmovsl %eax, %edi 15705 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 15706 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 15707 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 15708 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15709 switch (CC) { 15710 default: break; 15711 case ISD::SETLT: 15712 case ISD::SETGT: { 15713 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 15714 Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), 15715 Cond.getOperand(0), Cond.getOperand(1), NewCC); 15716 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 15717 } 15718 } 15719 } 15720 15721 // Match VSELECTs into subs with unsigned saturation. 15722 if (!DCI.isBeforeLegalize() && 15723 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC && 15724 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors. 15725 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) || 15726 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) { 15727 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15728 15729 // Check if one of the arms of the VSELECT is a zero vector. If it's on the 15730 // left side invert the predicate to simplify logic below. 15731 SDValue Other; 15732 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 15733 Other = RHS; 15734 CC = ISD::getSetCCInverse(CC, true); 15735 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) { 15736 Other = LHS; 15737 } 15738 15739 if (Other.getNode() && Other->getNumOperands() == 2 && 15740 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) { 15741 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1); 15742 SDValue CondRHS = Cond->getOperand(1); 15743 15744 // Look for a general sub with unsigned saturation first. 15745 // x >= y ? x-y : 0 --> subus x, y 15746 // x > y ? x-y : 0 --> subus x, y 15747 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) && 15748 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS)) 15749 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); 15750 15751 // If the RHS is a constant we have to reverse the const canonicalization. 15752 // x > C-1 ? x+-C : 0 --> subus x, C 15753 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD && 15754 isSplatVector(CondRHS.getNode()) && isSplatVector(OpRHS.getNode())) { 15755 APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue(); 15756 if (CondRHS.getConstantOperandVal(0) == -A-1) 15757 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, 15758 DAG.getConstant(-A, VT)); 15759 } 15760 15761 // Another special case: If C was a sign bit, the sub has been 15762 // canonicalized into a xor. 15763 // FIXME: Would it be better to use ComputeMaskedBits to determine whether 15764 // it's safe to decanonicalize the xor? 15765 // x s< 0 ? x^C : 0 --> subus x, C 15766 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR && 15767 ISD::isBuildVectorAllZeros(CondRHS.getNode()) && 15768 isSplatVector(OpRHS.getNode())) { 15769 APInt A = cast<ConstantSDNode>(OpRHS.getOperand(0))->getAPIntValue(); 15770 if (A.isSignBit()) 15771 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); 15772 } 15773 } 15774 } 15775 15776 // Try to match a min/max vector operation. 15777 if (!DCI.isBeforeLegalize() && 15778 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) 15779 if (unsigned Op = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget)) 15780 return DAG.getNode(Op, DL, N->getValueType(0), LHS, RHS); 15781 15782 // Simplify vector selection if the selector will be produced by CMPP*/PCMP*. 15783 if (!DCI.isBeforeLegalize() && N->getOpcode() == ISD::VSELECT && 15784 Cond.getOpcode() == ISD::SETCC) { 15785 15786 assert(Cond.getValueType().isVector() && 15787 "vector select expects a vector selector!"); 15788 15789 EVT IntVT = Cond.getValueType(); 15790 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode()); 15791 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 15792 15793 if (!TValIsAllOnes && !FValIsAllZeros) { 15794 // Try invert the condition if true value is not all 1s and false value 15795 // is not all 0s. 15796 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode()); 15797 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode()); 15798 15799 if (TValIsAllZeros || FValIsAllOnes) { 15800 SDValue CC = Cond.getOperand(2); 15801 ISD::CondCode NewCC = 15802 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 15803 Cond.getOperand(0).getValueType().isInteger()); 15804 Cond = DAG.getSetCC(DL, IntVT, Cond.getOperand(0), Cond.getOperand(1), NewCC); 15805 std::swap(LHS, RHS); 15806 TValIsAllOnes = FValIsAllOnes; 15807 FValIsAllZeros = TValIsAllZeros; 15808 } 15809 } 15810 15811 if (TValIsAllOnes || FValIsAllZeros) { 15812 SDValue Ret; 15813 15814 if (TValIsAllOnes && FValIsAllZeros) 15815 Ret = Cond; 15816 else if (TValIsAllOnes) 15817 Ret = DAG.getNode(ISD::OR, DL, IntVT, Cond, 15818 DAG.getNode(ISD::BITCAST, DL, IntVT, RHS)); 15819 else if (FValIsAllZeros) 15820 Ret = DAG.getNode(ISD::AND, DL, IntVT, Cond, 15821 DAG.getNode(ISD::BITCAST, DL, IntVT, LHS)); 15822 15823 return DAG.getNode(ISD::BITCAST, DL, VT, Ret); 15824 } 15825 } 15826 15827 // If we know that this node is legal then we know that it is going to be 15828 // matched by one of the SSE/AVX BLEND instructions. These instructions only 15829 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 15830 // to simplify previous instructions. 15831 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15832 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 15833 !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { 15834 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 15835 15836 // Don't optimize vector selects that map to mask-registers. 15837 if (BitWidth == 1) 15838 return SDValue(); 15839 15840 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 15841 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 15842 15843 APInt KnownZero, KnownOne; 15844 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 15845 DCI.isBeforeLegalizeOps()); 15846 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 15847 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 15848 DCI.CommitTargetLoweringOpt(TLO); 15849 } 15850 15851 return SDValue(); 15852} 15853 15854// Check whether a boolean test is testing a boolean value generated by 15855// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition 15856// code. 15857// 15858// Simplify the following patterns: 15859// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or 15860// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) 15861// to (Op EFLAGS Cond) 15862// 15863// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or 15864// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) 15865// to (Op EFLAGS !Cond) 15866// 15867// where Op could be BRCOND or CMOV. 15868// 15869static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { 15870 // Quit if not CMP and SUB with its value result used. 15871 if (Cmp.getOpcode() != X86ISD::CMP && 15872 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) 15873 return SDValue(); 15874 15875 // Quit if not used as a boolean value. 15876 if (CC != X86::COND_E && CC != X86::COND_NE) 15877 return SDValue(); 15878 15879 // Check CMP operands. One of them should be 0 or 1 and the other should be 15880 // an SetCC or extended from it. 15881 SDValue Op1 = Cmp.getOperand(0); 15882 SDValue Op2 = Cmp.getOperand(1); 15883 15884 SDValue SetCC; 15885 const ConstantSDNode* C = 0; 15886 bool needOppositeCond = (CC == X86::COND_E); 15887 bool checkAgainstTrue = false; // Is it a comparison against 1? 15888 15889 if ((C = dyn_cast<ConstantSDNode>(Op1))) 15890 SetCC = Op2; 15891 else if ((C = dyn_cast<ConstantSDNode>(Op2))) 15892 SetCC = Op1; 15893 else // Quit if all operands are not constants. 15894 return SDValue(); 15895 15896 if (C->getZExtValue() == 1) { 15897 needOppositeCond = !needOppositeCond; 15898 checkAgainstTrue = true; 15899 } else if (C->getZExtValue() != 0) 15900 // Quit if the constant is neither 0 or 1. 15901 return SDValue(); 15902 15903 bool truncatedToBoolWithAnd = false; 15904 // Skip (zext $x), (trunc $x), or (and $x, 1) node. 15905 while (SetCC.getOpcode() == ISD::ZERO_EXTEND || 15906 SetCC.getOpcode() == ISD::TRUNCATE || 15907 SetCC.getOpcode() == ISD::AND) { 15908 if (SetCC.getOpcode() == ISD::AND) { 15909 int OpIdx = -1; 15910 ConstantSDNode *CS; 15911 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) && 15912 CS->getZExtValue() == 1) 15913 OpIdx = 1; 15914 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) && 15915 CS->getZExtValue() == 1) 15916 OpIdx = 0; 15917 if (OpIdx == -1) 15918 break; 15919 SetCC = SetCC.getOperand(OpIdx); 15920 truncatedToBoolWithAnd = true; 15921 } else 15922 SetCC = SetCC.getOperand(0); 15923 } 15924 15925 switch (SetCC.getOpcode()) { 15926 case X86ISD::SETCC_CARRY: 15927 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to 15928 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1, 15929 // i.e. it's a comparison against true but the result of SETCC_CARRY is not 15930 // truncated to i1 using 'and'. 15931 if (checkAgainstTrue && !truncatedToBoolWithAnd) 15932 break; 15933 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && 15934 "Invalid use of SETCC_CARRY!"); 15935 // FALL THROUGH 15936 case X86ISD::SETCC: 15937 // Set the condition code or opposite one if necessary. 15938 CC = X86::CondCode(SetCC.getConstantOperandVal(0)); 15939 if (needOppositeCond) 15940 CC = X86::GetOppositeBranchCondition(CC); 15941 return SetCC.getOperand(1); 15942 case X86ISD::CMOV: { 15943 // Check whether false/true value has canonical one, i.e. 0 or 1. 15944 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0)); 15945 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1)); 15946 // Quit if true value is not a constant. 15947 if (!TVal) 15948 return SDValue(); 15949 // Quit if false value is not a constant. 15950 if (!FVal) { 15951 SDValue Op = SetCC.getOperand(0); 15952 // Skip 'zext' or 'trunc' node. 15953 if (Op.getOpcode() == ISD::ZERO_EXTEND || 15954 Op.getOpcode() == ISD::TRUNCATE) 15955 Op = Op.getOperand(0); 15956 // A special case for rdrand/rdseed, where 0 is set if false cond is 15957 // found. 15958 if ((Op.getOpcode() != X86ISD::RDRAND && 15959 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0) 15960 return SDValue(); 15961 } 15962 // Quit if false value is not the constant 0 or 1. 15963 bool FValIsFalse = true; 15964 if (FVal && FVal->getZExtValue() != 0) { 15965 if (FVal->getZExtValue() != 1) 15966 return SDValue(); 15967 // If FVal is 1, opposite cond is needed. 15968 needOppositeCond = !needOppositeCond; 15969 FValIsFalse = false; 15970 } 15971 // Quit if TVal is not the constant opposite of FVal. 15972 if (FValIsFalse && TVal->getZExtValue() != 1) 15973 return SDValue(); 15974 if (!FValIsFalse && TVal->getZExtValue() != 0) 15975 return SDValue(); 15976 CC = X86::CondCode(SetCC.getConstantOperandVal(2)); 15977 if (needOppositeCond) 15978 CC = X86::GetOppositeBranchCondition(CC); 15979 return SetCC.getOperand(3); 15980 } 15981 } 15982 15983 return SDValue(); 15984} 15985 15986/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 15987static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 15988 TargetLowering::DAGCombinerInfo &DCI, 15989 const X86Subtarget *Subtarget) { 15990 DebugLoc DL = N->getDebugLoc(); 15991 15992 // If the flag operand isn't dead, don't touch this CMOV. 15993 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 15994 return SDValue(); 15995 15996 SDValue FalseOp = N->getOperand(0); 15997 SDValue TrueOp = N->getOperand(1); 15998 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 15999 SDValue Cond = N->getOperand(3); 16000 16001 if (CC == X86::COND_E || CC == X86::COND_NE) { 16002 switch (Cond.getOpcode()) { 16003 default: break; 16004 case X86ISD::BSR: 16005 case X86ISD::BSF: 16006 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 16007 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 16008 return (CC == X86::COND_E) ? FalseOp : TrueOp; 16009 } 16010 } 16011 16012 SDValue Flags; 16013 16014 Flags = checkBoolTestSetCCCombine(Cond, CC); 16015 if (Flags.getNode() && 16016 // Extra check as FCMOV only supports a subset of X86 cond. 16017 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { 16018 SDValue Ops[] = { FalseOp, TrueOp, 16019 DAG.getConstant(CC, MVT::i8), Flags }; 16020 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), 16021 Ops, array_lengthof(Ops)); 16022 } 16023 16024 // If this is a select between two integer constants, try to do some 16025 // optimizations. Note that the operands are ordered the opposite of SELECT 16026 // operands. 16027 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 16028 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 16029 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 16030 // larger than FalseC (the false value). 16031 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 16032 CC = X86::GetOppositeBranchCondition(CC); 16033 std::swap(TrueC, FalseC); 16034 std::swap(TrueOp, FalseOp); 16035 } 16036 16037 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 16038 // This is efficient for any integer data type (including i8/i16) and 16039 // shift amount. 16040 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 16041 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16042 DAG.getConstant(CC, MVT::i8), Cond); 16043 16044 // Zero extend the condition if needed. 16045 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 16046 16047 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 16048 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 16049 DAG.getConstant(ShAmt, MVT::i8)); 16050 if (N->getNumValues() == 2) // Dead flag value? 16051 return DCI.CombineTo(N, Cond, SDValue()); 16052 return Cond; 16053 } 16054 16055 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 16056 // for any integer data type, including i8/i16. 16057 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 16058 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16059 DAG.getConstant(CC, MVT::i8), Cond); 16060 16061 // Zero extend the condition if needed. 16062 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 16063 FalseC->getValueType(0), Cond); 16064 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16065 SDValue(FalseC, 0)); 16066 16067 if (N->getNumValues() == 2) // Dead flag value? 16068 return DCI.CombineTo(N, Cond, SDValue()); 16069 return Cond; 16070 } 16071 16072 // Optimize cases that will turn into an LEA instruction. This requires 16073 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 16074 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 16075 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 16076 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 16077 16078 bool isFastMultiplier = false; 16079 if (Diff < 10) { 16080 switch ((unsigned char)Diff) { 16081 default: break; 16082 case 1: // result = add base, cond 16083 case 2: // result = lea base( , cond*2) 16084 case 3: // result = lea base(cond, cond*2) 16085 case 4: // result = lea base( , cond*4) 16086 case 5: // result = lea base(cond, cond*4) 16087 case 8: // result = lea base( , cond*8) 16088 case 9: // result = lea base(cond, cond*8) 16089 isFastMultiplier = true; 16090 break; 16091 } 16092 } 16093 16094 if (isFastMultiplier) { 16095 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 16096 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 16097 DAG.getConstant(CC, MVT::i8), Cond); 16098 // Zero extend the condition if needed. 16099 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 16100 Cond); 16101 // Scale the condition by the difference. 16102 if (Diff != 1) 16103 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 16104 DAG.getConstant(Diff, Cond.getValueType())); 16105 16106 // Add the base if non-zero. 16107 if (FalseC->getAPIntValue() != 0) 16108 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 16109 SDValue(FalseC, 0)); 16110 if (N->getNumValues() == 2) // Dead flag value? 16111 return DCI.CombineTo(N, Cond, SDValue()); 16112 return Cond; 16113 } 16114 } 16115 } 16116 } 16117 16118 // Handle these cases: 16119 // (select (x != c), e, c) -> select (x != c), e, x), 16120 // (select (x == c), c, e) -> select (x == c), x, e) 16121 // where the c is an integer constant, and the "select" is the combination 16122 // of CMOV and CMP. 16123 // 16124 // The rationale for this change is that the conditional-move from a constant 16125 // needs two instructions, however, conditional-move from a register needs 16126 // only one instruction. 16127 // 16128 // CAVEAT: By replacing a constant with a symbolic value, it may obscure 16129 // some instruction-combining opportunities. This opt needs to be 16130 // postponed as late as possible. 16131 // 16132 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { 16133 // the DCI.xxxx conditions are provided to postpone the optimization as 16134 // late as possible. 16135 16136 ConstantSDNode *CmpAgainst = 0; 16137 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && 16138 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) && 16139 !isa<ConstantSDNode>(Cond.getOperand(0))) { 16140 16141 if (CC == X86::COND_NE && 16142 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) { 16143 CC = X86::GetOppositeBranchCondition(CC); 16144 std::swap(TrueOp, FalseOp); 16145 } 16146 16147 if (CC == X86::COND_E && 16148 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { 16149 SDValue Ops[] = { FalseOp, Cond.getOperand(0), 16150 DAG.getConstant(CC, MVT::i8), Cond }; 16151 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops, 16152 array_lengthof(Ops)); 16153 } 16154 } 16155 } 16156 16157 return SDValue(); 16158} 16159 16160/// PerformMulCombine - Optimize a single multiply with constant into two 16161/// in order to implement it with two cheaper instructions, e.g. 16162/// LEA + SHL, LEA + LEA. 16163static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 16164 TargetLowering::DAGCombinerInfo &DCI) { 16165 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 16166 return SDValue(); 16167 16168 EVT VT = N->getValueType(0); 16169 if (VT != MVT::i64) 16170 return SDValue(); 16171 16172 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 16173 if (!C) 16174 return SDValue(); 16175 uint64_t MulAmt = C->getZExtValue(); 16176 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 16177 return SDValue(); 16178 16179 uint64_t MulAmt1 = 0; 16180 uint64_t MulAmt2 = 0; 16181 if ((MulAmt % 9) == 0) { 16182 MulAmt1 = 9; 16183 MulAmt2 = MulAmt / 9; 16184 } else if ((MulAmt % 5) == 0) { 16185 MulAmt1 = 5; 16186 MulAmt2 = MulAmt / 5; 16187 } else if ((MulAmt % 3) == 0) { 16188 MulAmt1 = 3; 16189 MulAmt2 = MulAmt / 3; 16190 } 16191 if (MulAmt2 && 16192 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 16193 DebugLoc DL = N->getDebugLoc(); 16194 16195 if (isPowerOf2_64(MulAmt2) && 16196 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 16197 // If second multiplifer is pow2, issue it first. We want the multiply by 16198 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 16199 // is an add. 16200 std::swap(MulAmt1, MulAmt2); 16201 16202 SDValue NewMul; 16203 if (isPowerOf2_64(MulAmt1)) 16204 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 16205 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 16206 else 16207 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 16208 DAG.getConstant(MulAmt1, VT)); 16209 16210 if (isPowerOf2_64(MulAmt2)) 16211 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 16212 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 16213 else 16214 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 16215 DAG.getConstant(MulAmt2, VT)); 16216 16217 // Do not add new nodes to DAG combiner worklist. 16218 DCI.CombineTo(N, NewMul, false); 16219 } 16220 return SDValue(); 16221} 16222 16223static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 16224 SDValue N0 = N->getOperand(0); 16225 SDValue N1 = N->getOperand(1); 16226 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 16227 EVT VT = N0.getValueType(); 16228 16229 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 16230 // since the result of setcc_c is all zero's or all ones. 16231 if (VT.isInteger() && !VT.isVector() && 16232 N1C && N0.getOpcode() == ISD::AND && 16233 N0.getOperand(1).getOpcode() == ISD::Constant) { 16234 SDValue N00 = N0.getOperand(0); 16235 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 16236 ((N00.getOpcode() == ISD::ANY_EXTEND || 16237 N00.getOpcode() == ISD::ZERO_EXTEND) && 16238 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 16239 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 16240 APInt ShAmt = N1C->getAPIntValue(); 16241 Mask = Mask.shl(ShAmt); 16242 if (Mask != 0) 16243 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 16244 N00, DAG.getConstant(Mask, VT)); 16245 } 16246 } 16247 16248 // Hardware support for vector shifts is sparse which makes us scalarize the 16249 // vector operations in many cases. Also, on sandybridge ADD is faster than 16250 // shl. 16251 // (shl V, 1) -> add V,V 16252 if (isSplatVector(N1.getNode())) { 16253 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 16254 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 16255 // We shift all of the values by one. In many cases we do not have 16256 // hardware support for this operation. This is better expressed as an ADD 16257 // of two values. 16258 if (N1C && (1 == N1C->getZExtValue())) { 16259 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); 16260 } 16261 } 16262 16263 return SDValue(); 16264} 16265 16266/// PerformShiftCombine - Combine shifts. 16267static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 16268 TargetLowering::DAGCombinerInfo &DCI, 16269 const X86Subtarget *Subtarget) { 16270 if (N->getOpcode() == ISD::SHL) { 16271 SDValue V = PerformSHLCombine(N, DAG); 16272 if (V.getNode()) return V; 16273 } 16274 16275 return SDValue(); 16276} 16277 16278// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 16279// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 16280// and friends. Likewise for OR -> CMPNEQSS. 16281static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 16282 TargetLowering::DAGCombinerInfo &DCI, 16283 const X86Subtarget *Subtarget) { 16284 unsigned opcode; 16285 16286 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 16287 // we're requiring SSE2 for both. 16288 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 16289 SDValue N0 = N->getOperand(0); 16290 SDValue N1 = N->getOperand(1); 16291 SDValue CMP0 = N0->getOperand(1); 16292 SDValue CMP1 = N1->getOperand(1); 16293 DebugLoc DL = N->getDebugLoc(); 16294 16295 // The SETCCs should both refer to the same CMP. 16296 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 16297 return SDValue(); 16298 16299 SDValue CMP00 = CMP0->getOperand(0); 16300 SDValue CMP01 = CMP0->getOperand(1); 16301 EVT VT = CMP00.getValueType(); 16302 16303 if (VT == MVT::f32 || VT == MVT::f64) { 16304 bool ExpectingFlags = false; 16305 // Check for any users that want flags: 16306 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 16307 !ExpectingFlags && UI != UE; ++UI) 16308 switch (UI->getOpcode()) { 16309 default: 16310 case ISD::BR_CC: 16311 case ISD::BRCOND: 16312 case ISD::SELECT: 16313 ExpectingFlags = true; 16314 break; 16315 case ISD::CopyToReg: 16316 case ISD::SIGN_EXTEND: 16317 case ISD::ZERO_EXTEND: 16318 case ISD::ANY_EXTEND: 16319 break; 16320 } 16321 16322 if (!ExpectingFlags) { 16323 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 16324 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 16325 16326 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 16327 X86::CondCode tmp = cc0; 16328 cc0 = cc1; 16329 cc1 = tmp; 16330 } 16331 16332 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 16333 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 16334 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 16335 X86ISD::NodeType NTOperator = is64BitFP ? 16336 X86ISD::FSETCCsd : X86ISD::FSETCCss; 16337 // FIXME: need symbolic constants for these magic numbers. 16338 // See X86ATTInstPrinter.cpp:printSSECC(). 16339 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 16340 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 16341 DAG.getConstant(x86cc, MVT::i8)); 16342 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 16343 OnesOrZeroesF); 16344 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 16345 DAG.getConstant(1, MVT::i32)); 16346 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 16347 return OneBitOfTruth; 16348 } 16349 } 16350 } 16351 } 16352 return SDValue(); 16353} 16354 16355/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 16356/// so it can be folded inside ANDNP. 16357static bool CanFoldXORWithAllOnes(const SDNode *N) { 16358 EVT VT = N->getValueType(0); 16359 16360 // Match direct AllOnes for 128 and 256-bit vectors 16361 if (ISD::isBuildVectorAllOnes(N)) 16362 return true; 16363 16364 // Look through a bit convert. 16365 if (N->getOpcode() == ISD::BITCAST) 16366 N = N->getOperand(0).getNode(); 16367 16368 // Sometimes the operand may come from a insert_subvector building a 256-bit 16369 // allones vector 16370 if (VT.is256BitVector() && 16371 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 16372 SDValue V1 = N->getOperand(0); 16373 SDValue V2 = N->getOperand(1); 16374 16375 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 16376 V1.getOperand(0).getOpcode() == ISD::UNDEF && 16377 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 16378 ISD::isBuildVectorAllOnes(V2.getNode())) 16379 return true; 16380 } 16381 16382 return false; 16383} 16384 16385// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized 16386// register. In most cases we actually compare or select YMM-sized registers 16387// and mixing the two types creates horrible code. This method optimizes 16388// some of the transition sequences. 16389static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, 16390 TargetLowering::DAGCombinerInfo &DCI, 16391 const X86Subtarget *Subtarget) { 16392 EVT VT = N->getValueType(0); 16393 if (!VT.is256BitVector()) 16394 return SDValue(); 16395 16396 assert((N->getOpcode() == ISD::ANY_EXTEND || 16397 N->getOpcode() == ISD::ZERO_EXTEND || 16398 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node"); 16399 16400 SDValue Narrow = N->getOperand(0); 16401 EVT NarrowVT = Narrow->getValueType(0); 16402 if (!NarrowVT.is128BitVector()) 16403 return SDValue(); 16404 16405 if (Narrow->getOpcode() != ISD::XOR && 16406 Narrow->getOpcode() != ISD::AND && 16407 Narrow->getOpcode() != ISD::OR) 16408 return SDValue(); 16409 16410 SDValue N0 = Narrow->getOperand(0); 16411 SDValue N1 = Narrow->getOperand(1); 16412 DebugLoc DL = Narrow->getDebugLoc(); 16413 16414 // The Left side has to be a trunc. 16415 if (N0.getOpcode() != ISD::TRUNCATE) 16416 return SDValue(); 16417 16418 // The type of the truncated inputs. 16419 EVT WideVT = N0->getOperand(0)->getValueType(0); 16420 if (WideVT != VT) 16421 return SDValue(); 16422 16423 // The right side has to be a 'trunc' or a constant vector. 16424 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE; 16425 bool RHSConst = (isSplatVector(N1.getNode()) && 16426 isa<ConstantSDNode>(N1->getOperand(0))); 16427 if (!RHSTrunc && !RHSConst) 16428 return SDValue(); 16429 16430 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16431 16432 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT)) 16433 return SDValue(); 16434 16435 // Set N0 and N1 to hold the inputs to the new wide operation. 16436 N0 = N0->getOperand(0); 16437 if (RHSConst) { 16438 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(), 16439 N1->getOperand(0)); 16440 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1); 16441 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, &C[0], C.size()); 16442 } else if (RHSTrunc) { 16443 N1 = N1->getOperand(0); 16444 } 16445 16446 // Generate the wide operation. 16447 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1); 16448 unsigned Opcode = N->getOpcode(); 16449 switch (Opcode) { 16450 case ISD::ANY_EXTEND: 16451 return Op; 16452 case ISD::ZERO_EXTEND: { 16453 unsigned InBits = NarrowVT.getScalarType().getSizeInBits(); 16454 APInt Mask = APInt::getAllOnesValue(InBits); 16455 Mask = Mask.zext(VT.getScalarType().getSizeInBits()); 16456 return DAG.getNode(ISD::AND, DL, VT, 16457 Op, DAG.getConstant(Mask, VT)); 16458 } 16459 case ISD::SIGN_EXTEND: 16460 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, 16461 Op, DAG.getValueType(NarrowVT)); 16462 default: 16463 llvm_unreachable("Unexpected opcode"); 16464 } 16465} 16466 16467static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 16468 TargetLowering::DAGCombinerInfo &DCI, 16469 const X86Subtarget *Subtarget) { 16470 EVT VT = N->getValueType(0); 16471 if (DCI.isBeforeLegalizeOps()) 16472 return SDValue(); 16473 16474 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 16475 if (R.getNode()) 16476 return R; 16477 16478 // Create BLSI, and BLSR instructions 16479 // BLSI is X & (-X) 16480 // BLSR is X & (X-1) 16481 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 16482 SDValue N0 = N->getOperand(0); 16483 SDValue N1 = N->getOperand(1); 16484 DebugLoc DL = N->getDebugLoc(); 16485 16486 // Check LHS for neg 16487 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 16488 isZero(N0.getOperand(0))) 16489 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 16490 16491 // Check RHS for neg 16492 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 16493 isZero(N1.getOperand(0))) 16494 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 16495 16496 // Check LHS for X-1 16497 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 16498 isAllOnes(N0.getOperand(1))) 16499 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 16500 16501 // Check RHS for X-1 16502 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 16503 isAllOnes(N1.getOperand(1))) 16504 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 16505 16506 return SDValue(); 16507 } 16508 16509 // Want to form ANDNP nodes: 16510 // 1) In the hopes of then easily combining them with OR and AND nodes 16511 // to form PBLEND/PSIGN. 16512 // 2) To match ANDN packed intrinsics 16513 if (VT != MVT::v2i64 && VT != MVT::v4i64) 16514 return SDValue(); 16515 16516 SDValue N0 = N->getOperand(0); 16517 SDValue N1 = N->getOperand(1); 16518 DebugLoc DL = N->getDebugLoc(); 16519 16520 // Check LHS for vnot 16521 if (N0.getOpcode() == ISD::XOR && 16522 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 16523 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 16524 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 16525 16526 // Check RHS for vnot 16527 if (N1.getOpcode() == ISD::XOR && 16528 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 16529 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 16530 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 16531 16532 return SDValue(); 16533} 16534 16535static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 16536 TargetLowering::DAGCombinerInfo &DCI, 16537 const X86Subtarget *Subtarget) { 16538 EVT VT = N->getValueType(0); 16539 if (DCI.isBeforeLegalizeOps()) 16540 return SDValue(); 16541 16542 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 16543 if (R.getNode()) 16544 return R; 16545 16546 SDValue N0 = N->getOperand(0); 16547 SDValue N1 = N->getOperand(1); 16548 16549 // look for psign/blend 16550 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 16551 if (!Subtarget->hasSSSE3() || 16552 (VT == MVT::v4i64 && !Subtarget->hasInt256())) 16553 return SDValue(); 16554 16555 // Canonicalize pandn to RHS 16556 if (N0.getOpcode() == X86ISD::ANDNP) 16557 std::swap(N0, N1); 16558 // or (and (m, y), (pandn m, x)) 16559 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 16560 SDValue Mask = N1.getOperand(0); 16561 SDValue X = N1.getOperand(1); 16562 SDValue Y; 16563 if (N0.getOperand(0) == Mask) 16564 Y = N0.getOperand(1); 16565 if (N0.getOperand(1) == Mask) 16566 Y = N0.getOperand(0); 16567 16568 // Check to see if the mask appeared in both the AND and ANDNP and 16569 if (!Y.getNode()) 16570 return SDValue(); 16571 16572 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 16573 // Look through mask bitcast. 16574 if (Mask.getOpcode() == ISD::BITCAST) 16575 Mask = Mask.getOperand(0); 16576 if (X.getOpcode() == ISD::BITCAST) 16577 X = X.getOperand(0); 16578 if (Y.getOpcode() == ISD::BITCAST) 16579 Y = Y.getOperand(0); 16580 16581 EVT MaskVT = Mask.getValueType(); 16582 16583 // Validate that the Mask operand is a vector sra node. 16584 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 16585 // there is no psrai.b 16586 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 16587 unsigned SraAmt = ~0; 16588 if (Mask.getOpcode() == ISD::SRA) { 16589 SDValue Amt = Mask.getOperand(1); 16590 if (isSplatVector(Amt.getNode())) { 16591 SDValue SclrAmt = Amt->getOperand(0); 16592 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) 16593 SraAmt = C->getZExtValue(); 16594 } 16595 } else if (Mask.getOpcode() == X86ISD::VSRAI) { 16596 SDValue SraC = Mask.getOperand(1); 16597 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 16598 } 16599 if ((SraAmt + 1) != EltBits) 16600 return SDValue(); 16601 16602 DebugLoc DL = N->getDebugLoc(); 16603 16604 // Now we know we at least have a plendvb with the mask val. See if 16605 // we can form a psignb/w/d. 16606 // psign = x.type == y.type == mask.type && y = sub(0, x); 16607 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 16608 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 16609 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 16610 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 16611 "Unsupported VT for PSIGN"); 16612 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 16613 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 16614 } 16615 // PBLENDVB only available on SSE 4.1 16616 if (!Subtarget->hasSSE41()) 16617 return SDValue(); 16618 16619 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 16620 16621 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 16622 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 16623 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 16624 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 16625 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 16626 } 16627 } 16628 16629 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 16630 return SDValue(); 16631 16632 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 16633 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 16634 std::swap(N0, N1); 16635 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 16636 return SDValue(); 16637 if (!N0.hasOneUse() || !N1.hasOneUse()) 16638 return SDValue(); 16639 16640 SDValue ShAmt0 = N0.getOperand(1); 16641 if (ShAmt0.getValueType() != MVT::i8) 16642 return SDValue(); 16643 SDValue ShAmt1 = N1.getOperand(1); 16644 if (ShAmt1.getValueType() != MVT::i8) 16645 return SDValue(); 16646 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 16647 ShAmt0 = ShAmt0.getOperand(0); 16648 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 16649 ShAmt1 = ShAmt1.getOperand(0); 16650 16651 DebugLoc DL = N->getDebugLoc(); 16652 unsigned Opc = X86ISD::SHLD; 16653 SDValue Op0 = N0.getOperand(0); 16654 SDValue Op1 = N1.getOperand(0); 16655 if (ShAmt0.getOpcode() == ISD::SUB) { 16656 Opc = X86ISD::SHRD; 16657 std::swap(Op0, Op1); 16658 std::swap(ShAmt0, ShAmt1); 16659 } 16660 16661 unsigned Bits = VT.getSizeInBits(); 16662 if (ShAmt1.getOpcode() == ISD::SUB) { 16663 SDValue Sum = ShAmt1.getOperand(0); 16664 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 16665 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 16666 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 16667 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 16668 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 16669 return DAG.getNode(Opc, DL, VT, 16670 Op0, Op1, 16671 DAG.getNode(ISD::TRUNCATE, DL, 16672 MVT::i8, ShAmt0)); 16673 } 16674 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 16675 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 16676 if (ShAmt0C && 16677 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 16678 return DAG.getNode(Opc, DL, VT, 16679 N0.getOperand(0), N1.getOperand(0), 16680 DAG.getNode(ISD::TRUNCATE, DL, 16681 MVT::i8, ShAmt0)); 16682 } 16683 16684 return SDValue(); 16685} 16686 16687// Generate NEG and CMOV for integer abs. 16688static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 16689 EVT VT = N->getValueType(0); 16690 16691 // Since X86 does not have CMOV for 8-bit integer, we don't convert 16692 // 8-bit integer abs to NEG and CMOV. 16693 if (VT.isInteger() && VT.getSizeInBits() == 8) 16694 return SDValue(); 16695 16696 SDValue N0 = N->getOperand(0); 16697 SDValue N1 = N->getOperand(1); 16698 DebugLoc DL = N->getDebugLoc(); 16699 16700 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 16701 // and change it to SUB and CMOV. 16702 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 16703 N0.getOpcode() == ISD::ADD && 16704 N0.getOperand(1) == N1 && 16705 N1.getOpcode() == ISD::SRA && 16706 N1.getOperand(0) == N0.getOperand(0)) 16707 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 16708 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { 16709 // Generate SUB & CMOV. 16710 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), 16711 DAG.getConstant(0, VT), N0.getOperand(0)); 16712 16713 SDValue Ops[] = { N0.getOperand(0), Neg, 16714 DAG.getConstant(X86::COND_GE, MVT::i8), 16715 SDValue(Neg.getNode(), 1) }; 16716 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), 16717 Ops, array_lengthof(Ops)); 16718 } 16719 return SDValue(); 16720} 16721 16722// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 16723static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 16724 TargetLowering::DAGCombinerInfo &DCI, 16725 const X86Subtarget *Subtarget) { 16726 EVT VT = N->getValueType(0); 16727 if (DCI.isBeforeLegalizeOps()) 16728 return SDValue(); 16729 16730 if (Subtarget->hasCMov()) { 16731 SDValue RV = performIntegerAbsCombine(N, DAG); 16732 if (RV.getNode()) 16733 return RV; 16734 } 16735 16736 // Try forming BMI if it is available. 16737 if (!Subtarget->hasBMI()) 16738 return SDValue(); 16739 16740 if (VT != MVT::i32 && VT != MVT::i64) 16741 return SDValue(); 16742 16743 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 16744 16745 // Create BLSMSK instructions by finding X ^ (X-1) 16746 SDValue N0 = N->getOperand(0); 16747 SDValue N1 = N->getOperand(1); 16748 DebugLoc DL = N->getDebugLoc(); 16749 16750 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 16751 isAllOnes(N0.getOperand(1))) 16752 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 16753 16754 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 16755 isAllOnes(N1.getOperand(1))) 16756 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 16757 16758 return SDValue(); 16759} 16760 16761/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 16762static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 16763 TargetLowering::DAGCombinerInfo &DCI, 16764 const X86Subtarget *Subtarget) { 16765 LoadSDNode *Ld = cast<LoadSDNode>(N); 16766 EVT RegVT = Ld->getValueType(0); 16767 EVT MemVT = Ld->getMemoryVT(); 16768 DebugLoc dl = Ld->getDebugLoc(); 16769 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16770 unsigned RegSz = RegVT.getSizeInBits(); 16771 16772 // On Sandybridge unaligned 256bit loads are inefficient. 16773 ISD::LoadExtType Ext = Ld->getExtensionType(); 16774 unsigned Alignment = Ld->getAlignment(); 16775 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8; 16776 if (RegVT.is256BitVector() && !Subtarget->hasInt256() && 16777 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) { 16778 unsigned NumElems = RegVT.getVectorNumElements(); 16779 if (NumElems < 2) 16780 return SDValue(); 16781 16782 SDValue Ptr = Ld->getBasePtr(); 16783 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy()); 16784 16785 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 16786 NumElems/2); 16787 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, 16788 Ld->getPointerInfo(), Ld->isVolatile(), 16789 Ld->isNonTemporal(), Ld->isInvariant(), 16790 Alignment); 16791 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 16792 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, 16793 Ld->getPointerInfo(), Ld->isVolatile(), 16794 Ld->isNonTemporal(), Ld->isInvariant(), 16795 std::min(16U, Alignment)); 16796 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 16797 Load1.getValue(1), 16798 Load2.getValue(1)); 16799 16800 SDValue NewVec = DAG.getUNDEF(RegVT); 16801 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl); 16802 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl); 16803 return DCI.CombineTo(N, NewVec, TF, true); 16804 } 16805 16806 // If this is a vector EXT Load then attempt to optimize it using a 16807 // shuffle. If SSSE3 is not available we may emit an illegal shuffle but the 16808 // expansion is still better than scalar code. 16809 // We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise we'll 16810 // emit a shuffle and a arithmetic shift. 16811 // TODO: It is possible to support ZExt by zeroing the undef values 16812 // during the shuffle phase or after the shuffle. 16813 if (RegVT.isVector() && RegVT.isInteger() && Subtarget->hasSSE2() && 16814 (Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)) { 16815 assert(MemVT != RegVT && "Cannot extend to the same type"); 16816 assert(MemVT.isVector() && "Must load a vector from memory"); 16817 16818 unsigned NumElems = RegVT.getVectorNumElements(); 16819 unsigned MemSz = MemVT.getSizeInBits(); 16820 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 16821 16822 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) 16823 return SDValue(); 16824 16825 // All sizes must be a power of two. 16826 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) 16827 return SDValue(); 16828 16829 // Attempt to load the original value using scalar loads. 16830 // Find the largest scalar type that divides the total loaded size. 16831 MVT SclrLoadTy = MVT::i8; 16832 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 16833 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 16834 MVT Tp = (MVT::SimpleValueType)tp; 16835 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { 16836 SclrLoadTy = Tp; 16837 } 16838 } 16839 16840 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 16841 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && 16842 (64 <= MemSz)) 16843 SclrLoadTy = MVT::f64; 16844 16845 // Calculate the number of scalar loads that we need to perform 16846 // in order to load our vector from memory. 16847 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); 16848 if (Ext == ISD::SEXTLOAD && NumLoads > 1) 16849 return SDValue(); 16850 16851 unsigned loadRegZize = RegSz; 16852 if (Ext == ISD::SEXTLOAD && RegSz == 256) 16853 loadRegZize /= 2; 16854 16855 // Represent our vector as a sequence of elements which are the 16856 // largest scalar that we can load. 16857 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 16858 loadRegZize/SclrLoadTy.getSizeInBits()); 16859 16860 // Represent the data using the same element type that is stored in 16861 // memory. In practice, we ''widen'' MemVT. 16862 EVT WideVecVT = 16863 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 16864 loadRegZize/MemVT.getScalarType().getSizeInBits()); 16865 16866 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && 16867 "Invalid vector type"); 16868 16869 // We can't shuffle using an illegal type. 16870 if (!TLI.isTypeLegal(WideVecVT)) 16871 return SDValue(); 16872 16873 SmallVector<SDValue, 8> Chains; 16874 SDValue Ptr = Ld->getBasePtr(); 16875 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, 16876 TLI.getPointerTy()); 16877 SDValue Res = DAG.getUNDEF(LoadUnitVecVT); 16878 16879 for (unsigned i = 0; i < NumLoads; ++i) { 16880 // Perform a single load. 16881 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 16882 Ptr, Ld->getPointerInfo(), 16883 Ld->isVolatile(), Ld->isNonTemporal(), 16884 Ld->isInvariant(), Ld->getAlignment()); 16885 Chains.push_back(ScalarLoad.getValue(1)); 16886 // Create the first element type using SCALAR_TO_VECTOR in order to avoid 16887 // another round of DAGCombining. 16888 if (i == 0) 16889 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); 16890 else 16891 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, 16892 ScalarLoad, DAG.getIntPtrConstant(i)); 16893 16894 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 16895 } 16896 16897 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 16898 Chains.size()); 16899 16900 // Bitcast the loaded value to a vector of the original element type, in 16901 // the size of the target vector type. 16902 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); 16903 unsigned SizeRatio = RegSz/MemSz; 16904 16905 if (Ext == ISD::SEXTLOAD) { 16906 // If we have SSE4.1 we can directly emit a VSEXT node. 16907 if (Subtarget->hasSSE41()) { 16908 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec); 16909 return DCI.CombineTo(N, Sext, TF, true); 16910 } 16911 16912 // Otherwise we'll shuffle the small elements in the high bits of the 16913 // larger type and perform an arithmetic shift. If the shift is not legal 16914 // it's better to scalarize. 16915 if (!TLI.isOperationLegalOrCustom(ISD::SRA, RegVT)) 16916 return SDValue(); 16917 16918 // Redistribute the loaded elements into the different locations. 16919 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 16920 for (unsigned i = 0; i != NumElems; ++i) 16921 ShuffleVec[i*SizeRatio + SizeRatio-1] = i; 16922 16923 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 16924 DAG.getUNDEF(WideVecVT), 16925 &ShuffleVec[0]); 16926 16927 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 16928 16929 // Build the arithmetic shift. 16930 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() - 16931 MemVT.getVectorElementType().getSizeInBits(); 16932 Shuff = DAG.getNode(ISD::SRA, dl, RegVT, Shuff, 16933 DAG.getConstant(Amt, RegVT)); 16934 16935 return DCI.CombineTo(N, Shuff, TF, true); 16936 } 16937 16938 // Redistribute the loaded elements into the different locations. 16939 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 16940 for (unsigned i = 0; i != NumElems; ++i) 16941 ShuffleVec[i*SizeRatio] = i; 16942 16943 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 16944 DAG.getUNDEF(WideVecVT), 16945 &ShuffleVec[0]); 16946 16947 // Bitcast to the requested type. 16948 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 16949 // Replace the original load with the new sequence 16950 // and return the new chain. 16951 return DCI.CombineTo(N, Shuff, TF, true); 16952 } 16953 16954 return SDValue(); 16955} 16956 16957/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 16958static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 16959 const X86Subtarget *Subtarget) { 16960 StoreSDNode *St = cast<StoreSDNode>(N); 16961 EVT VT = St->getValue().getValueType(); 16962 EVT StVT = St->getMemoryVT(); 16963 DebugLoc dl = St->getDebugLoc(); 16964 SDValue StoredVal = St->getOperand(1); 16965 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16966 16967 // If we are saving a concatenation of two XMM registers, perform two stores. 16968 // On Sandy Bridge, 256-bit memory operations are executed by two 16969 // 128-bit ports. However, on Haswell it is better to issue a single 256-bit 16970 // memory operation. 16971 unsigned Alignment = St->getAlignment(); 16972 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8; 16973 if (VT.is256BitVector() && !Subtarget->hasInt256() && 16974 StVT == VT && !IsAligned) { 16975 unsigned NumElems = VT.getVectorNumElements(); 16976 if (NumElems < 2) 16977 return SDValue(); 16978 16979 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl); 16980 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl); 16981 16982 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 16983 SDValue Ptr0 = St->getBasePtr(); 16984 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 16985 16986 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 16987 St->getPointerInfo(), St->isVolatile(), 16988 St->isNonTemporal(), Alignment); 16989 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 16990 St->getPointerInfo(), St->isVolatile(), 16991 St->isNonTemporal(), 16992 std::min(16U, Alignment)); 16993 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 16994 } 16995 16996 // Optimize trunc store (of multiple scalars) to shuffle and store. 16997 // First, pack all of the elements in one place. Next, store to memory 16998 // in fewer chunks. 16999 if (St->isTruncatingStore() && VT.isVector()) { 17000 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17001 unsigned NumElems = VT.getVectorNumElements(); 17002 assert(StVT != VT && "Cannot truncate to the same type"); 17003 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 17004 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 17005 17006 // From, To sizes and ElemCount must be pow of two 17007 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 17008 // We are going to use the original vector elt for storing. 17009 // Accumulated smaller vector elements must be a multiple of the store size. 17010 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 17011 17012 unsigned SizeRatio = FromSz / ToSz; 17013 17014 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 17015 17016 // Create a type on which we perform the shuffle 17017 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 17018 StVT.getScalarType(), NumElems*SizeRatio); 17019 17020 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 17021 17022 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 17023 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 17024 for (unsigned i = 0; i != NumElems; ++i) 17025 ShuffleVec[i] = i * SizeRatio; 17026 17027 // Can't shuffle using an illegal type. 17028 if (!TLI.isTypeLegal(WideVecVT)) 17029 return SDValue(); 17030 17031 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 17032 DAG.getUNDEF(WideVecVT), 17033 &ShuffleVec[0]); 17034 // At this point all of the data is stored at the bottom of the 17035 // register. We now need to save it to mem. 17036 17037 // Find the largest store unit 17038 MVT StoreType = MVT::i8; 17039 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 17040 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 17041 MVT Tp = (MVT::SimpleValueType)tp; 17042 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) 17043 StoreType = Tp; 17044 } 17045 17046 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 17047 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && 17048 (64 <= NumElems * ToSz)) 17049 StoreType = MVT::f64; 17050 17051 // Bitcast the original vector into a vector of store-size units 17052 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 17053 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); 17054 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 17055 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 17056 SmallVector<SDValue, 8> Chains; 17057 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 17058 TLI.getPointerTy()); 17059 SDValue Ptr = St->getBasePtr(); 17060 17061 // Perform one or more big stores into memory. 17062 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { 17063 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 17064 StoreType, ShuffWide, 17065 DAG.getIntPtrConstant(i)); 17066 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 17067 St->getPointerInfo(), St->isVolatile(), 17068 St->isNonTemporal(), St->getAlignment()); 17069 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 17070 Chains.push_back(Ch); 17071 } 17072 17073 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 17074 Chains.size()); 17075 } 17076 17077 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 17078 // the FP state in cases where an emms may be missing. 17079 // A preferable solution to the general problem is to figure out the right 17080 // places to insert EMMS. This qualifies as a quick hack. 17081 17082 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 17083 if (VT.getSizeInBits() != 64) 17084 return SDValue(); 17085 17086 const Function *F = DAG.getMachineFunction().getFunction(); 17087 bool NoImplicitFloatOps = F->getAttributes(). 17088 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 17089 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 17090 && Subtarget->hasSSE2(); 17091 if ((VT.isVector() || 17092 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 17093 isa<LoadSDNode>(St->getValue()) && 17094 !cast<LoadSDNode>(St->getValue())->isVolatile() && 17095 St->getChain().hasOneUse() && !St->isVolatile()) { 17096 SDNode* LdVal = St->getValue().getNode(); 17097 LoadSDNode *Ld = 0; 17098 int TokenFactorIndex = -1; 17099 SmallVector<SDValue, 8> Ops; 17100 SDNode* ChainVal = St->getChain().getNode(); 17101 // Must be a store of a load. We currently handle two cases: the load 17102 // is a direct child, and it's under an intervening TokenFactor. It is 17103 // possible to dig deeper under nested TokenFactors. 17104 if (ChainVal == LdVal) 17105 Ld = cast<LoadSDNode>(St->getChain()); 17106 else if (St->getValue().hasOneUse() && 17107 ChainVal->getOpcode() == ISD::TokenFactor) { 17108 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) { 17109 if (ChainVal->getOperand(i).getNode() == LdVal) { 17110 TokenFactorIndex = i; 17111 Ld = cast<LoadSDNode>(St->getValue()); 17112 } else 17113 Ops.push_back(ChainVal->getOperand(i)); 17114 } 17115 } 17116 17117 if (!Ld || !ISD::isNormalLoad(Ld)) 17118 return SDValue(); 17119 17120 // If this is not the MMX case, i.e. we are just turning i64 load/store 17121 // into f64 load/store, avoid the transformation if there are multiple 17122 // uses of the loaded value. 17123 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 17124 return SDValue(); 17125 17126 DebugLoc LdDL = Ld->getDebugLoc(); 17127 DebugLoc StDL = N->getDebugLoc(); 17128 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 17129 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 17130 // pair instead. 17131 if (Subtarget->is64Bit() || F64IsLegal) { 17132 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 17133 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 17134 Ld->getPointerInfo(), Ld->isVolatile(), 17135 Ld->isNonTemporal(), Ld->isInvariant(), 17136 Ld->getAlignment()); 17137 SDValue NewChain = NewLd.getValue(1); 17138 if (TokenFactorIndex != -1) { 17139 Ops.push_back(NewChain); 17140 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 17141 Ops.size()); 17142 } 17143 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 17144 St->getPointerInfo(), 17145 St->isVolatile(), St->isNonTemporal(), 17146 St->getAlignment()); 17147 } 17148 17149 // Otherwise, lower to two pairs of 32-bit loads / stores. 17150 SDValue LoAddr = Ld->getBasePtr(); 17151 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 17152 DAG.getConstant(4, MVT::i32)); 17153 17154 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 17155 Ld->getPointerInfo(), 17156 Ld->isVolatile(), Ld->isNonTemporal(), 17157 Ld->isInvariant(), Ld->getAlignment()); 17158 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 17159 Ld->getPointerInfo().getWithOffset(4), 17160 Ld->isVolatile(), Ld->isNonTemporal(), 17161 Ld->isInvariant(), 17162 MinAlign(Ld->getAlignment(), 4)); 17163 17164 SDValue NewChain = LoLd.getValue(1); 17165 if (TokenFactorIndex != -1) { 17166 Ops.push_back(LoLd); 17167 Ops.push_back(HiLd); 17168 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 17169 Ops.size()); 17170 } 17171 17172 LoAddr = St->getBasePtr(); 17173 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 17174 DAG.getConstant(4, MVT::i32)); 17175 17176 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 17177 St->getPointerInfo(), 17178 St->isVolatile(), St->isNonTemporal(), 17179 St->getAlignment()); 17180 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 17181 St->getPointerInfo().getWithOffset(4), 17182 St->isVolatile(), 17183 St->isNonTemporal(), 17184 MinAlign(St->getAlignment(), 4)); 17185 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 17186 } 17187 return SDValue(); 17188} 17189 17190/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 17191/// and return the operands for the horizontal operation in LHS and RHS. A 17192/// horizontal operation performs the binary operation on successive elements 17193/// of its first operand, then on successive elements of its second operand, 17194/// returning the resulting values in a vector. For example, if 17195/// A = < float a0, float a1, float a2, float a3 > 17196/// and 17197/// B = < float b0, float b1, float b2, float b3 > 17198/// then the result of doing a horizontal operation on A and B is 17199/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 17200/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 17201/// A horizontal-op B, for some already available A and B, and if so then LHS is 17202/// set to A, RHS to B, and the routine returns 'true'. 17203/// Note that the binary operation should have the property that if one of the 17204/// operands is UNDEF then the result is UNDEF. 17205static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 17206 // Look for the following pattern: if 17207 // A = < float a0, float a1, float a2, float a3 > 17208 // B = < float b0, float b1, float b2, float b3 > 17209 // and 17210 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 17211 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 17212 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 17213 // which is A horizontal-op B. 17214 17215 // At least one of the operands should be a vector shuffle. 17216 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 17217 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 17218 return false; 17219 17220 EVT VT = LHS.getValueType(); 17221 17222 assert((VT.is128BitVector() || VT.is256BitVector()) && 17223 "Unsupported vector type for horizontal add/sub"); 17224 17225 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 17226 // operate independently on 128-bit lanes. 17227 unsigned NumElts = VT.getVectorNumElements(); 17228 unsigned NumLanes = VT.getSizeInBits()/128; 17229 unsigned NumLaneElts = NumElts / NumLanes; 17230 assert((NumLaneElts % 2 == 0) && 17231 "Vector type should have an even number of elements in each lane"); 17232 unsigned HalfLaneElts = NumLaneElts/2; 17233 17234 // View LHS in the form 17235 // LHS = VECTOR_SHUFFLE A, B, LMask 17236 // If LHS is not a shuffle then pretend it is the shuffle 17237 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 17238 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 17239 // type VT. 17240 SDValue A, B; 17241 SmallVector<int, 16> LMask(NumElts); 17242 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 17243 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 17244 A = LHS.getOperand(0); 17245 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 17246 B = LHS.getOperand(1); 17247 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 17248 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 17249 } else { 17250 if (LHS.getOpcode() != ISD::UNDEF) 17251 A = LHS; 17252 for (unsigned i = 0; i != NumElts; ++i) 17253 LMask[i] = i; 17254 } 17255 17256 // Likewise, view RHS in the form 17257 // RHS = VECTOR_SHUFFLE C, D, RMask 17258 SDValue C, D; 17259 SmallVector<int, 16> RMask(NumElts); 17260 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 17261 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 17262 C = RHS.getOperand(0); 17263 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 17264 D = RHS.getOperand(1); 17265 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 17266 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 17267 } else { 17268 if (RHS.getOpcode() != ISD::UNDEF) 17269 C = RHS; 17270 for (unsigned i = 0; i != NumElts; ++i) 17271 RMask[i] = i; 17272 } 17273 17274 // Check that the shuffles are both shuffling the same vectors. 17275 if (!(A == C && B == D) && !(A == D && B == C)) 17276 return false; 17277 17278 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 17279 if (!A.getNode() && !B.getNode()) 17280 return false; 17281 17282 // If A and B occur in reverse order in RHS, then "swap" them (which means 17283 // rewriting the mask). 17284 if (A != C) 17285 CommuteVectorShuffleMask(RMask, NumElts); 17286 17287 // At this point LHS and RHS are equivalent to 17288 // LHS = VECTOR_SHUFFLE A, B, LMask 17289 // RHS = VECTOR_SHUFFLE A, B, RMask 17290 // Check that the masks correspond to performing a horizontal operation. 17291 for (unsigned i = 0; i != NumElts; ++i) { 17292 int LIdx = LMask[i], RIdx = RMask[i]; 17293 17294 // Ignore any UNDEF components. 17295 if (LIdx < 0 || RIdx < 0 || 17296 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 17297 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 17298 continue; 17299 17300 // Check that successive elements are being operated on. If not, this is 17301 // not a horizontal operation. 17302 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 17303 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 17304 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 17305 if (!(LIdx == Index && RIdx == Index + 1) && 17306 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 17307 return false; 17308 } 17309 17310 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 17311 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 17312 return true; 17313} 17314 17315/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 17316static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 17317 const X86Subtarget *Subtarget) { 17318 EVT VT = N->getValueType(0); 17319 SDValue LHS = N->getOperand(0); 17320 SDValue RHS = N->getOperand(1); 17321 17322 // Try to synthesize horizontal adds from adds of shuffles. 17323 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 17324 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 17325 isHorizontalBinOp(LHS, RHS, true)) 17326 return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); 17327 return SDValue(); 17328} 17329 17330/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 17331static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 17332 const X86Subtarget *Subtarget) { 17333 EVT VT = N->getValueType(0); 17334 SDValue LHS = N->getOperand(0); 17335 SDValue RHS = N->getOperand(1); 17336 17337 // Try to synthesize horizontal subs from subs of shuffles. 17338 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 17339 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 17340 isHorizontalBinOp(LHS, RHS, false)) 17341 return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); 17342 return SDValue(); 17343} 17344 17345/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 17346/// X86ISD::FXOR nodes. 17347static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 17348 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 17349 // F[X]OR(0.0, x) -> x 17350 // F[X]OR(x, 0.0) -> x 17351 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 17352 if (C->getValueAPF().isPosZero()) 17353 return N->getOperand(1); 17354 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 17355 if (C->getValueAPF().isPosZero()) 17356 return N->getOperand(0); 17357 return SDValue(); 17358} 17359 17360/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and 17361/// X86ISD::FMAX nodes. 17362static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { 17363 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX); 17364 17365 // Only perform optimizations if UnsafeMath is used. 17366 if (!DAG.getTarget().Options.UnsafeFPMath) 17367 return SDValue(); 17368 17369 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes 17370 // into FMINC and FMAXC, which are Commutative operations. 17371 unsigned NewOp = 0; 17372 switch (N->getOpcode()) { 17373 default: llvm_unreachable("unknown opcode"); 17374 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; 17375 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; 17376 } 17377 17378 return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0), 17379 N->getOperand(0), N->getOperand(1)); 17380} 17381 17382/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 17383static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 17384 // FAND(0.0, x) -> 0.0 17385 // FAND(x, 0.0) -> 0.0 17386 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 17387 if (C->getValueAPF().isPosZero()) 17388 return N->getOperand(0); 17389 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 17390 if (C->getValueAPF().isPosZero()) 17391 return N->getOperand(1); 17392 return SDValue(); 17393} 17394 17395static SDValue PerformBTCombine(SDNode *N, 17396 SelectionDAG &DAG, 17397 TargetLowering::DAGCombinerInfo &DCI) { 17398 // BT ignores high bits in the bit index operand. 17399 SDValue Op1 = N->getOperand(1); 17400 if (Op1.hasOneUse()) { 17401 unsigned BitWidth = Op1.getValueSizeInBits(); 17402 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 17403 APInt KnownZero, KnownOne; 17404 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 17405 !DCI.isBeforeLegalizeOps()); 17406 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 17407 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 17408 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 17409 DCI.CommitTargetLoweringOpt(TLO); 17410 } 17411 return SDValue(); 17412} 17413 17414static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 17415 SDValue Op = N->getOperand(0); 17416 if (Op.getOpcode() == ISD::BITCAST) 17417 Op = Op.getOperand(0); 17418 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 17419 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 17420 VT.getVectorElementType().getSizeInBits() == 17421 OpVT.getVectorElementType().getSizeInBits()) { 17422 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 17423 } 17424 return SDValue(); 17425} 17426 17427static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, 17428 const X86Subtarget *Subtarget) { 17429 EVT VT = N->getValueType(0); 17430 if (!VT.isVector()) 17431 return SDValue(); 17432 17433 SDValue N0 = N->getOperand(0); 17434 SDValue N1 = N->getOperand(1); 17435 EVT ExtraVT = cast<VTSDNode>(N1)->getVT(); 17436 DebugLoc dl = N->getDebugLoc(); 17437 17438 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the 17439 // both SSE and AVX2 since there is no sign-extended shift right 17440 // operation on a vector with 64-bit elements. 17441 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) -> 17442 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT))) 17443 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND || 17444 N0.getOpcode() == ISD::SIGN_EXTEND)) { 17445 SDValue N00 = N0.getOperand(0); 17446 17447 // EXTLOAD has a better solution on AVX2, 17448 // it may be replaced with X86ISD::VSEXT node. 17449 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256()) 17450 if (!ISD::isNormalLoad(N00.getNode())) 17451 return SDValue(); 17452 17453 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) { 17454 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, 17455 N00, N1); 17456 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp); 17457 } 17458 } 17459 return SDValue(); 17460} 17461 17462static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, 17463 TargetLowering::DAGCombinerInfo &DCI, 17464 const X86Subtarget *Subtarget) { 17465 if (!DCI.isBeforeLegalizeOps()) 17466 return SDValue(); 17467 17468 if (!Subtarget->hasFp256()) 17469 return SDValue(); 17470 17471 EVT VT = N->getValueType(0); 17472 if (VT.isVector() && VT.getSizeInBits() == 256) { 17473 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget); 17474 if (R.getNode()) 17475 return R; 17476 } 17477 17478 return SDValue(); 17479} 17480 17481static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, 17482 const X86Subtarget* Subtarget) { 17483 DebugLoc dl = N->getDebugLoc(); 17484 EVT VT = N->getValueType(0); 17485 17486 // Let legalize expand this if it isn't a legal type yet. 17487 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 17488 return SDValue(); 17489 17490 EVT ScalarVT = VT.getScalarType(); 17491 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || 17492 (!Subtarget->hasFMA() && !Subtarget->hasFMA4())) 17493 return SDValue(); 17494 17495 SDValue A = N->getOperand(0); 17496 SDValue B = N->getOperand(1); 17497 SDValue C = N->getOperand(2); 17498 17499 bool NegA = (A.getOpcode() == ISD::FNEG); 17500 bool NegB = (B.getOpcode() == ISD::FNEG); 17501 bool NegC = (C.getOpcode() == ISD::FNEG); 17502 17503 // Negative multiplication when NegA xor NegB 17504 bool NegMul = (NegA != NegB); 17505 if (NegA) 17506 A = A.getOperand(0); 17507 if (NegB) 17508 B = B.getOperand(0); 17509 if (NegC) 17510 C = C.getOperand(0); 17511 17512 unsigned Opcode; 17513 if (!NegMul) 17514 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB; 17515 else 17516 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB; 17517 17518 return DAG.getNode(Opcode, dl, VT, A, B, C); 17519} 17520 17521static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 17522 TargetLowering::DAGCombinerInfo &DCI, 17523 const X86Subtarget *Subtarget) { 17524 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 17525 // (and (i32 x86isd::setcc_carry), 1) 17526 // This eliminates the zext. This transformation is necessary because 17527 // ISD::SETCC is always legalized to i8. 17528 DebugLoc dl = N->getDebugLoc(); 17529 SDValue N0 = N->getOperand(0); 17530 EVT VT = N->getValueType(0); 17531 17532 if (N0.getOpcode() == ISD::AND && 17533 N0.hasOneUse() && 17534 N0.getOperand(0).hasOneUse()) { 17535 SDValue N00 = N0.getOperand(0); 17536 if (N00.getOpcode() == X86ISD::SETCC_CARRY) { 17537 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 17538 if (!C || C->getZExtValue() != 1) 17539 return SDValue(); 17540 return DAG.getNode(ISD::AND, dl, VT, 17541 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 17542 N00.getOperand(0), N00.getOperand(1)), 17543 DAG.getConstant(1, VT)); 17544 } 17545 } 17546 17547 if (VT.is256BitVector()) { 17548 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget); 17549 if (R.getNode()) 17550 return R; 17551 } 17552 17553 return SDValue(); 17554} 17555 17556// Optimize x == -y --> x+y == 0 17557// x != -y --> x+y != 0 17558static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { 17559 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 17560 SDValue LHS = N->getOperand(0); 17561 SDValue RHS = N->getOperand(1); 17562 17563 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) 17564 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) 17565 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { 17566 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 17567 LHS.getValueType(), RHS, LHS.getOperand(1)); 17568 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 17569 addV, DAG.getConstant(0, addV.getValueType()), CC); 17570 } 17571 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) 17572 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) 17573 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { 17574 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 17575 RHS.getValueType(), LHS, RHS.getOperand(1)); 17576 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 17577 addV, DAG.getConstant(0, addV.getValueType()), CC); 17578 } 17579 return SDValue(); 17580} 17581 17582// Helper function of PerformSETCCCombine. It is to materialize "setb reg" 17583// as "sbb reg,reg", since it can be extended without zext and produces 17584// an all-ones bit which is more useful than 0/1 in some cases. 17585static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { 17586 return DAG.getNode(ISD::AND, DL, MVT::i8, 17587 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 17588 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), 17589 DAG.getConstant(1, MVT::i8)); 17590} 17591 17592// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 17593static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, 17594 TargetLowering::DAGCombinerInfo &DCI, 17595 const X86Subtarget *Subtarget) { 17596 DebugLoc DL = N->getDebugLoc(); 17597 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); 17598 SDValue EFLAGS = N->getOperand(1); 17599 17600 if (CC == X86::COND_A) { 17601 // Try to convert COND_A into COND_B in an attempt to facilitate 17602 // materializing "setb reg". 17603 // 17604 // Do not flip "e > c", where "c" is a constant, because Cmp instruction 17605 // cannot take an immediate as its first operand. 17606 // 17607 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && 17608 EFLAGS.getValueType().isInteger() && 17609 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { 17610 SDValue NewSub = DAG.getNode(X86ISD::SUB, EFLAGS.getDebugLoc(), 17611 EFLAGS.getNode()->getVTList(), 17612 EFLAGS.getOperand(1), EFLAGS.getOperand(0)); 17613 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); 17614 return MaterializeSETB(DL, NewEFLAGS, DAG); 17615 } 17616 } 17617 17618 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 17619 // a zext and produces an all-ones bit which is more useful than 0/1 in some 17620 // cases. 17621 if (CC == X86::COND_B) 17622 return MaterializeSETB(DL, EFLAGS, DAG); 17623 17624 SDValue Flags; 17625 17626 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 17627 if (Flags.getNode()) { 17628 SDValue Cond = DAG.getConstant(CC, MVT::i8); 17629 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); 17630 } 17631 17632 return SDValue(); 17633} 17634 17635// Optimize branch condition evaluation. 17636// 17637static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, 17638 TargetLowering::DAGCombinerInfo &DCI, 17639 const X86Subtarget *Subtarget) { 17640 DebugLoc DL = N->getDebugLoc(); 17641 SDValue Chain = N->getOperand(0); 17642 SDValue Dest = N->getOperand(1); 17643 SDValue EFLAGS = N->getOperand(3); 17644 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); 17645 17646 SDValue Flags; 17647 17648 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 17649 if (Flags.getNode()) { 17650 SDValue Cond = DAG.getConstant(CC, MVT::i8); 17651 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, 17652 Flags); 17653 } 17654 17655 return SDValue(); 17656} 17657 17658static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 17659 const X86TargetLowering *XTLI) { 17660 SDValue Op0 = N->getOperand(0); 17661 EVT InVT = Op0->getValueType(0); 17662 17663 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) 17664 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 17665 DebugLoc dl = N->getDebugLoc(); 17666 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 17667 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); 17668 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 17669 } 17670 17671 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 17672 // a 32-bit target where SSE doesn't support i64->FP operations. 17673 if (Op0.getOpcode() == ISD::LOAD) { 17674 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 17675 EVT VT = Ld->getValueType(0); 17676 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 17677 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 17678 !XTLI->getSubtarget()->is64Bit() && 17679 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 17680 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 17681 Ld->getChain(), Op0, DAG); 17682 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 17683 return FILDChain; 17684 } 17685 } 17686 return SDValue(); 17687} 17688 17689// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 17690static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 17691 X86TargetLowering::DAGCombinerInfo &DCI) { 17692 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 17693 // the result is either zero or one (depending on the input carry bit). 17694 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 17695 if (X86::isZeroNode(N->getOperand(0)) && 17696 X86::isZeroNode(N->getOperand(1)) && 17697 // We don't have a good way to replace an EFLAGS use, so only do this when 17698 // dead right now. 17699 SDValue(N, 1).use_empty()) { 17700 DebugLoc DL = N->getDebugLoc(); 17701 EVT VT = N->getValueType(0); 17702 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 17703 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 17704 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 17705 DAG.getConstant(X86::COND_B,MVT::i8), 17706 N->getOperand(2)), 17707 DAG.getConstant(1, VT)); 17708 return DCI.CombineTo(N, Res1, CarryOut); 17709 } 17710 17711 return SDValue(); 17712} 17713 17714// fold (add Y, (sete X, 0)) -> adc 0, Y 17715// (add Y, (setne X, 0)) -> sbb -1, Y 17716// (sub (sete X, 0), Y) -> sbb 0, Y 17717// (sub (setne X, 0), Y) -> adc -1, Y 17718static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 17719 DebugLoc DL = N->getDebugLoc(); 17720 17721 // Look through ZExts. 17722 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 17723 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 17724 return SDValue(); 17725 17726 SDValue SetCC = Ext.getOperand(0); 17727 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 17728 return SDValue(); 17729 17730 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 17731 if (CC != X86::COND_E && CC != X86::COND_NE) 17732 return SDValue(); 17733 17734 SDValue Cmp = SetCC.getOperand(1); 17735 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 17736 !X86::isZeroNode(Cmp.getOperand(1)) || 17737 !Cmp.getOperand(0).getValueType().isInteger()) 17738 return SDValue(); 17739 17740 SDValue CmpOp0 = Cmp.getOperand(0); 17741 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 17742 DAG.getConstant(1, CmpOp0.getValueType())); 17743 17744 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 17745 if (CC == X86::COND_NE) 17746 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 17747 DL, OtherVal.getValueType(), OtherVal, 17748 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 17749 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 17750 DL, OtherVal.getValueType(), OtherVal, 17751 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 17752} 17753 17754/// PerformADDCombine - Do target-specific dag combines on integer adds. 17755static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 17756 const X86Subtarget *Subtarget) { 17757 EVT VT = N->getValueType(0); 17758 SDValue Op0 = N->getOperand(0); 17759 SDValue Op1 = N->getOperand(1); 17760 17761 // Try to synthesize horizontal adds from adds of shuffles. 17762 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 17763 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 17764 isHorizontalBinOp(Op0, Op1, true)) 17765 return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); 17766 17767 return OptimizeConditionalInDecrement(N, DAG); 17768} 17769 17770static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 17771 const X86Subtarget *Subtarget) { 17772 SDValue Op0 = N->getOperand(0); 17773 SDValue Op1 = N->getOperand(1); 17774 17775 // X86 can't encode an immediate LHS of a sub. See if we can push the 17776 // negation into a preceding instruction. 17777 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 17778 // If the RHS of the sub is a XOR with one use and a constant, invert the 17779 // immediate. Then add one to the LHS of the sub so we can turn 17780 // X-Y -> X+~Y+1, saving one register. 17781 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 17782 isa<ConstantSDNode>(Op1.getOperand(1))) { 17783 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 17784 EVT VT = Op0.getValueType(); 17785 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 17786 Op1.getOperand(0), 17787 DAG.getConstant(~XorC, VT)); 17788 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 17789 DAG.getConstant(C->getAPIntValue()+1, VT)); 17790 } 17791 } 17792 17793 // Try to synthesize horizontal adds from adds of shuffles. 17794 EVT VT = N->getValueType(0); 17795 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 17796 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 17797 isHorizontalBinOp(Op0, Op1, true)) 17798 return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); 17799 17800 return OptimizeConditionalInDecrement(N, DAG); 17801} 17802 17803/// performVZEXTCombine - Performs build vector combines 17804static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, 17805 TargetLowering::DAGCombinerInfo &DCI, 17806 const X86Subtarget *Subtarget) { 17807 // (vzext (bitcast (vzext (x)) -> (vzext x) 17808 SDValue In = N->getOperand(0); 17809 while (In.getOpcode() == ISD::BITCAST) 17810 In = In.getOperand(0); 17811 17812 if (In.getOpcode() != X86ISD::VZEXT) 17813 return SDValue(); 17814 17815 return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0), 17816 In.getOperand(0)); 17817} 17818 17819SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 17820 DAGCombinerInfo &DCI) const { 17821 SelectionDAG &DAG = DCI.DAG; 17822 switch (N->getOpcode()) { 17823 default: break; 17824 case ISD::EXTRACT_VECTOR_ELT: 17825 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); 17826 case ISD::VSELECT: 17827 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 17828 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget); 17829 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 17830 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 17831 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 17832 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 17833 case ISD::SHL: 17834 case ISD::SRA: 17835 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget); 17836 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 17837 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 17838 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 17839 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); 17840 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 17841 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 17842 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 17843 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 17844 case X86ISD::FXOR: 17845 case X86ISD::FOR: return PerformFORCombine(N, DAG); 17846 case X86ISD::FMIN: 17847 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); 17848 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 17849 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 17850 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 17851 case ISD::ANY_EXTEND: 17852 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); 17853 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); 17854 case ISD::SIGN_EXTEND_INREG: return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget); 17855 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget); 17856 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); 17857 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget); 17858 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); 17859 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget); 17860 case X86ISD::SHUFP: // Handle all target specific shuffles 17861 case X86ISD::PALIGNR: 17862 case X86ISD::UNPCKH: 17863 case X86ISD::UNPCKL: 17864 case X86ISD::MOVHLPS: 17865 case X86ISD::MOVLHPS: 17866 case X86ISD::PSHUFD: 17867 case X86ISD::PSHUFHW: 17868 case X86ISD::PSHUFLW: 17869 case X86ISD::MOVSS: 17870 case X86ISD::MOVSD: 17871 case X86ISD::VPERMILP: 17872 case X86ISD::VPERM2X128: 17873 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 17874 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); 17875 } 17876 17877 return SDValue(); 17878} 17879 17880/// isTypeDesirableForOp - Return true if the target has native support for 17881/// the specified value type and it is 'desirable' to use the type for the 17882/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 17883/// instruction encodings are longer and some i16 instructions are slow. 17884bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 17885 if (!isTypeLegal(VT)) 17886 return false; 17887 if (VT != MVT::i16) 17888 return true; 17889 17890 switch (Opc) { 17891 default: 17892 return true; 17893 case ISD::LOAD: 17894 case ISD::SIGN_EXTEND: 17895 case ISD::ZERO_EXTEND: 17896 case ISD::ANY_EXTEND: 17897 case ISD::SHL: 17898 case ISD::SRL: 17899 case ISD::SUB: 17900 case ISD::ADD: 17901 case ISD::MUL: 17902 case ISD::AND: 17903 case ISD::OR: 17904 case ISD::XOR: 17905 return false; 17906 } 17907} 17908 17909/// IsDesirableToPromoteOp - This method query the target whether it is 17910/// beneficial for dag combiner to promote the specified node. If true, it 17911/// should return the desired promotion type by reference. 17912bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 17913 EVT VT = Op.getValueType(); 17914 if (VT != MVT::i16) 17915 return false; 17916 17917 bool Promote = false; 17918 bool Commute = false; 17919 switch (Op.getOpcode()) { 17920 default: break; 17921 case ISD::LOAD: { 17922 LoadSDNode *LD = cast<LoadSDNode>(Op); 17923 // If the non-extending load has a single use and it's not live out, then it 17924 // might be folded. 17925 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 17926 Op.hasOneUse()*/) { 17927 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 17928 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 17929 // The only case where we'd want to promote LOAD (rather then it being 17930 // promoted as an operand is when it's only use is liveout. 17931 if (UI->getOpcode() != ISD::CopyToReg) 17932 return false; 17933 } 17934 } 17935 Promote = true; 17936 break; 17937 } 17938 case ISD::SIGN_EXTEND: 17939 case ISD::ZERO_EXTEND: 17940 case ISD::ANY_EXTEND: 17941 Promote = true; 17942 break; 17943 case ISD::SHL: 17944 case ISD::SRL: { 17945 SDValue N0 = Op.getOperand(0); 17946 // Look out for (store (shl (load), x)). 17947 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 17948 return false; 17949 Promote = true; 17950 break; 17951 } 17952 case ISD::ADD: 17953 case ISD::MUL: 17954 case ISD::AND: 17955 case ISD::OR: 17956 case ISD::XOR: 17957 Commute = true; 17958 // fallthrough 17959 case ISD::SUB: { 17960 SDValue N0 = Op.getOperand(0); 17961 SDValue N1 = Op.getOperand(1); 17962 if (!Commute && MayFoldLoad(N1)) 17963 return false; 17964 // Avoid disabling potential load folding opportunities. 17965 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 17966 return false; 17967 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 17968 return false; 17969 Promote = true; 17970 } 17971 } 17972 17973 PVT = MVT::i32; 17974 return Promote; 17975} 17976 17977//===----------------------------------------------------------------------===// 17978// X86 Inline Assembly Support 17979//===----------------------------------------------------------------------===// 17980 17981namespace { 17982 // Helper to match a string separated by whitespace. 17983 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 17984 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 17985 17986 for (unsigned i = 0, e = args.size(); i != e; ++i) { 17987 StringRef piece(*args[i]); 17988 if (!s.startswith(piece)) // Check if the piece matches. 17989 return false; 17990 17991 s = s.substr(piece.size()); 17992 StringRef::size_type pos = s.find_first_not_of(" \t"); 17993 if (pos == 0) // We matched a prefix. 17994 return false; 17995 17996 s = s.substr(pos); 17997 } 17998 17999 return s.empty(); 18000 } 18001 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 18002} 18003 18004bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 18005 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 18006 18007 std::string AsmStr = IA->getAsmString(); 18008 18009 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 18010 if (!Ty || Ty->getBitWidth() % 16 != 0) 18011 return false; 18012 18013 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 18014 SmallVector<StringRef, 4> AsmPieces; 18015 SplitString(AsmStr, AsmPieces, ";\n"); 18016 18017 switch (AsmPieces.size()) { 18018 default: return false; 18019 case 1: 18020 // FIXME: this should verify that we are targeting a 486 or better. If not, 18021 // we will turn this bswap into something that will be lowered to logical 18022 // ops instead of emitting the bswap asm. For now, we don't support 486 or 18023 // lower so don't worry about this. 18024 // bswap $0 18025 if (matchAsm(AsmPieces[0], "bswap", "$0") || 18026 matchAsm(AsmPieces[0], "bswapl", "$0") || 18027 matchAsm(AsmPieces[0], "bswapq", "$0") || 18028 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 18029 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 18030 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 18031 // No need to check constraints, nothing other than the equivalent of 18032 // "=r,0" would be valid here. 18033 return IntrinsicLowering::LowerToByteSwap(CI); 18034 } 18035 18036 // rorw $$8, ${0:w} --> llvm.bswap.i16 18037 if (CI->getType()->isIntegerTy(16) && 18038 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 18039 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 18040 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 18041 AsmPieces.clear(); 18042 const std::string &ConstraintsStr = IA->getConstraintString(); 18043 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 18044 array_pod_sort(AsmPieces.begin(), AsmPieces.end()); 18045 if (AsmPieces.size() == 4 && 18046 AsmPieces[0] == "~{cc}" && 18047 AsmPieces[1] == "~{dirflag}" && 18048 AsmPieces[2] == "~{flags}" && 18049 AsmPieces[3] == "~{fpsr}") 18050 return IntrinsicLowering::LowerToByteSwap(CI); 18051 } 18052 break; 18053 case 3: 18054 if (CI->getType()->isIntegerTy(32) && 18055 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 18056 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 18057 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 18058 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 18059 AsmPieces.clear(); 18060 const std::string &ConstraintsStr = IA->getConstraintString(); 18061 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 18062 array_pod_sort(AsmPieces.begin(), AsmPieces.end()); 18063 if (AsmPieces.size() == 4 && 18064 AsmPieces[0] == "~{cc}" && 18065 AsmPieces[1] == "~{dirflag}" && 18066 AsmPieces[2] == "~{flags}" && 18067 AsmPieces[3] == "~{fpsr}") 18068 return IntrinsicLowering::LowerToByteSwap(CI); 18069 } 18070 18071 if (CI->getType()->isIntegerTy(64)) { 18072 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 18073 if (Constraints.size() >= 2 && 18074 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 18075 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 18076 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 18077 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 18078 matchAsm(AsmPieces[1], "bswap", "%edx") && 18079 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 18080 return IntrinsicLowering::LowerToByteSwap(CI); 18081 } 18082 } 18083 break; 18084 } 18085 return false; 18086} 18087 18088/// getConstraintType - Given a constraint letter, return the type of 18089/// constraint it is for this target. 18090X86TargetLowering::ConstraintType 18091X86TargetLowering::getConstraintType(const std::string &Constraint) const { 18092 if (Constraint.size() == 1) { 18093 switch (Constraint[0]) { 18094 case 'R': 18095 case 'q': 18096 case 'Q': 18097 case 'f': 18098 case 't': 18099 case 'u': 18100 case 'y': 18101 case 'x': 18102 case 'Y': 18103 case 'l': 18104 return C_RegisterClass; 18105 case 'a': 18106 case 'b': 18107 case 'c': 18108 case 'd': 18109 case 'S': 18110 case 'D': 18111 case 'A': 18112 return C_Register; 18113 case 'I': 18114 case 'J': 18115 case 'K': 18116 case 'L': 18117 case 'M': 18118 case 'N': 18119 case 'G': 18120 case 'C': 18121 case 'e': 18122 case 'Z': 18123 return C_Other; 18124 default: 18125 break; 18126 } 18127 } 18128 return TargetLowering::getConstraintType(Constraint); 18129} 18130 18131/// Examine constraint type and operand type and determine a weight value. 18132/// This object must already have been set up with the operand type 18133/// and the current alternative constraint selected. 18134TargetLowering::ConstraintWeight 18135 X86TargetLowering::getSingleConstraintMatchWeight( 18136 AsmOperandInfo &info, const char *constraint) const { 18137 ConstraintWeight weight = CW_Invalid; 18138 Value *CallOperandVal = info.CallOperandVal; 18139 // If we don't have a value, we can't do a match, 18140 // but allow it at the lowest weight. 18141 if (CallOperandVal == NULL) 18142 return CW_Default; 18143 Type *type = CallOperandVal->getType(); 18144 // Look at the constraint type. 18145 switch (*constraint) { 18146 default: 18147 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 18148 case 'R': 18149 case 'q': 18150 case 'Q': 18151 case 'a': 18152 case 'b': 18153 case 'c': 18154 case 'd': 18155 case 'S': 18156 case 'D': 18157 case 'A': 18158 if (CallOperandVal->getType()->isIntegerTy()) 18159 weight = CW_SpecificReg; 18160 break; 18161 case 'f': 18162 case 't': 18163 case 'u': 18164 if (type->isFloatingPointTy()) 18165 weight = CW_SpecificReg; 18166 break; 18167 case 'y': 18168 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 18169 weight = CW_SpecificReg; 18170 break; 18171 case 'x': 18172 case 'Y': 18173 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 18174 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256())) 18175 weight = CW_Register; 18176 break; 18177 case 'I': 18178 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 18179 if (C->getZExtValue() <= 31) 18180 weight = CW_Constant; 18181 } 18182 break; 18183 case 'J': 18184 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18185 if (C->getZExtValue() <= 63) 18186 weight = CW_Constant; 18187 } 18188 break; 18189 case 'K': 18190 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18191 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 18192 weight = CW_Constant; 18193 } 18194 break; 18195 case 'L': 18196 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18197 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 18198 weight = CW_Constant; 18199 } 18200 break; 18201 case 'M': 18202 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18203 if (C->getZExtValue() <= 3) 18204 weight = CW_Constant; 18205 } 18206 break; 18207 case 'N': 18208 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18209 if (C->getZExtValue() <= 0xff) 18210 weight = CW_Constant; 18211 } 18212 break; 18213 case 'G': 18214 case 'C': 18215 if (dyn_cast<ConstantFP>(CallOperandVal)) { 18216 weight = CW_Constant; 18217 } 18218 break; 18219 case 'e': 18220 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18221 if ((C->getSExtValue() >= -0x80000000LL) && 18222 (C->getSExtValue() <= 0x7fffffffLL)) 18223 weight = CW_Constant; 18224 } 18225 break; 18226 case 'Z': 18227 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 18228 if (C->getZExtValue() <= 0xffffffff) 18229 weight = CW_Constant; 18230 } 18231 break; 18232 } 18233 return weight; 18234} 18235 18236/// LowerXConstraint - try to replace an X constraint, which matches anything, 18237/// with another that has more specific requirements based on the type of the 18238/// corresponding operand. 18239const char *X86TargetLowering:: 18240LowerXConstraint(EVT ConstraintVT) const { 18241 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 18242 // 'f' like normal targets. 18243 if (ConstraintVT.isFloatingPoint()) { 18244 if (Subtarget->hasSSE2()) 18245 return "Y"; 18246 if (Subtarget->hasSSE1()) 18247 return "x"; 18248 } 18249 18250 return TargetLowering::LowerXConstraint(ConstraintVT); 18251} 18252 18253/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 18254/// vector. If it is invalid, don't add anything to Ops. 18255void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 18256 std::string &Constraint, 18257 std::vector<SDValue>&Ops, 18258 SelectionDAG &DAG) const { 18259 SDValue Result(0, 0); 18260 18261 // Only support length 1 constraints for now. 18262 if (Constraint.length() > 1) return; 18263 18264 char ConstraintLetter = Constraint[0]; 18265 switch (ConstraintLetter) { 18266 default: break; 18267 case 'I': 18268 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18269 if (C->getZExtValue() <= 31) { 18270 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18271 break; 18272 } 18273 } 18274 return; 18275 case 'J': 18276 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18277 if (C->getZExtValue() <= 63) { 18278 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18279 break; 18280 } 18281 } 18282 return; 18283 case 'K': 18284 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18285 if (isInt<8>(C->getSExtValue())) { 18286 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18287 break; 18288 } 18289 } 18290 return; 18291 case 'N': 18292 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18293 if (C->getZExtValue() <= 255) { 18294 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18295 break; 18296 } 18297 } 18298 return; 18299 case 'e': { 18300 // 32-bit signed value 18301 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18302 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 18303 C->getSExtValue())) { 18304 // Widen to 64 bits here to get it sign extended. 18305 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 18306 break; 18307 } 18308 // FIXME gcc accepts some relocatable values here too, but only in certain 18309 // memory models; it's complicated. 18310 } 18311 return; 18312 } 18313 case 'Z': { 18314 // 32-bit unsigned value 18315 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 18316 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 18317 C->getZExtValue())) { 18318 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 18319 break; 18320 } 18321 } 18322 // FIXME gcc accepts some relocatable values here too, but only in certain 18323 // memory models; it's complicated. 18324 return; 18325 } 18326 case 'i': { 18327 // Literal immediates are always ok. 18328 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 18329 // Widen to 64 bits here to get it sign extended. 18330 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 18331 break; 18332 } 18333 18334 // In any sort of PIC mode addresses need to be computed at runtime by 18335 // adding in a register or some sort of table lookup. These can't 18336 // be used as immediates. 18337 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 18338 return; 18339 18340 // If we are in non-pic codegen mode, we allow the address of a global (with 18341 // an optional displacement) to be used with 'i'. 18342 GlobalAddressSDNode *GA = 0; 18343 int64_t Offset = 0; 18344 18345 // Match either (GA), (GA+C), (GA+C1+C2), etc. 18346 while (1) { 18347 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 18348 Offset += GA->getOffset(); 18349 break; 18350 } else if (Op.getOpcode() == ISD::ADD) { 18351 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 18352 Offset += C->getZExtValue(); 18353 Op = Op.getOperand(0); 18354 continue; 18355 } 18356 } else if (Op.getOpcode() == ISD::SUB) { 18357 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 18358 Offset += -C->getZExtValue(); 18359 Op = Op.getOperand(0); 18360 continue; 18361 } 18362 } 18363 18364 // Otherwise, this isn't something we can handle, reject it. 18365 return; 18366 } 18367 18368 const GlobalValue *GV = GA->getGlobal(); 18369 // If we require an extra load to get this address, as in PIC mode, we 18370 // can't accept it. 18371 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 18372 getTargetMachine()))) 18373 return; 18374 18375 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 18376 GA->getValueType(0), Offset); 18377 break; 18378 } 18379 } 18380 18381 if (Result.getNode()) { 18382 Ops.push_back(Result); 18383 return; 18384 } 18385 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 18386} 18387 18388std::pair<unsigned, const TargetRegisterClass*> 18389X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 18390 EVT VT) const { 18391 // First, see if this is a constraint that directly corresponds to an LLVM 18392 // register class. 18393 if (Constraint.size() == 1) { 18394 // GCC Constraint Letters 18395 switch (Constraint[0]) { 18396 default: break; 18397 // TODO: Slight differences here in allocation order and leaving 18398 // RIP in the class. Do they matter any more here than they do 18399 // in the normal allocation? 18400 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 18401 if (Subtarget->is64Bit()) { 18402 if (VT == MVT::i32 || VT == MVT::f32) 18403 return std::make_pair(0U, &X86::GR32RegClass); 18404 if (VT == MVT::i16) 18405 return std::make_pair(0U, &X86::GR16RegClass); 18406 if (VT == MVT::i8 || VT == MVT::i1) 18407 return std::make_pair(0U, &X86::GR8RegClass); 18408 if (VT == MVT::i64 || VT == MVT::f64) 18409 return std::make_pair(0U, &X86::GR64RegClass); 18410 break; 18411 } 18412 // 32-bit fallthrough 18413 case 'Q': // Q_REGS 18414 if (VT == MVT::i32 || VT == MVT::f32) 18415 return std::make_pair(0U, &X86::GR32_ABCDRegClass); 18416 if (VT == MVT::i16) 18417 return std::make_pair(0U, &X86::GR16_ABCDRegClass); 18418 if (VT == MVT::i8 || VT == MVT::i1) 18419 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); 18420 if (VT == MVT::i64) 18421 return std::make_pair(0U, &X86::GR64_ABCDRegClass); 18422 break; 18423 case 'r': // GENERAL_REGS 18424 case 'l': // INDEX_REGS 18425 if (VT == MVT::i8 || VT == MVT::i1) 18426 return std::make_pair(0U, &X86::GR8RegClass); 18427 if (VT == MVT::i16) 18428 return std::make_pair(0U, &X86::GR16RegClass); 18429 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 18430 return std::make_pair(0U, &X86::GR32RegClass); 18431 return std::make_pair(0U, &X86::GR64RegClass); 18432 case 'R': // LEGACY_REGS 18433 if (VT == MVT::i8 || VT == MVT::i1) 18434 return std::make_pair(0U, &X86::GR8_NOREXRegClass); 18435 if (VT == MVT::i16) 18436 return std::make_pair(0U, &X86::GR16_NOREXRegClass); 18437 if (VT == MVT::i32 || !Subtarget->is64Bit()) 18438 return std::make_pair(0U, &X86::GR32_NOREXRegClass); 18439 return std::make_pair(0U, &X86::GR64_NOREXRegClass); 18440 case 'f': // FP Stack registers. 18441 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 18442 // value to the correct fpstack register class. 18443 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 18444 return std::make_pair(0U, &X86::RFP32RegClass); 18445 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 18446 return std::make_pair(0U, &X86::RFP64RegClass); 18447 return std::make_pair(0U, &X86::RFP80RegClass); 18448 case 'y': // MMX_REGS if MMX allowed. 18449 if (!Subtarget->hasMMX()) break; 18450 return std::make_pair(0U, &X86::VR64RegClass); 18451 case 'Y': // SSE_REGS if SSE2 allowed 18452 if (!Subtarget->hasSSE2()) break; 18453 // FALL THROUGH. 18454 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 18455 if (!Subtarget->hasSSE1()) break; 18456 18457 switch (VT.getSimpleVT().SimpleTy) { 18458 default: break; 18459 // Scalar SSE types. 18460 case MVT::f32: 18461 case MVT::i32: 18462 return std::make_pair(0U, &X86::FR32RegClass); 18463 case MVT::f64: 18464 case MVT::i64: 18465 return std::make_pair(0U, &X86::FR64RegClass); 18466 // Vector types. 18467 case MVT::v16i8: 18468 case MVT::v8i16: 18469 case MVT::v4i32: 18470 case MVT::v2i64: 18471 case MVT::v4f32: 18472 case MVT::v2f64: 18473 return std::make_pair(0U, &X86::VR128RegClass); 18474 // AVX types. 18475 case MVT::v32i8: 18476 case MVT::v16i16: 18477 case MVT::v8i32: 18478 case MVT::v4i64: 18479 case MVT::v8f32: 18480 case MVT::v4f64: 18481 return std::make_pair(0U, &X86::VR256RegClass); 18482 } 18483 break; 18484 } 18485 } 18486 18487 // Use the default implementation in TargetLowering to convert the register 18488 // constraint into a member of a register class. 18489 std::pair<unsigned, const TargetRegisterClass*> Res; 18490 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 18491 18492 // Not found as a standard register? 18493 if (Res.second == 0) { 18494 // Map st(0) -> st(7) -> ST0 18495 if (Constraint.size() == 7 && Constraint[0] == '{' && 18496 tolower(Constraint[1]) == 's' && 18497 tolower(Constraint[2]) == 't' && 18498 Constraint[3] == '(' && 18499 (Constraint[4] >= '0' && Constraint[4] <= '7') && 18500 Constraint[5] == ')' && 18501 Constraint[6] == '}') { 18502 18503 Res.first = X86::ST0+Constraint[4]-'0'; 18504 Res.second = &X86::RFP80RegClass; 18505 return Res; 18506 } 18507 18508 // GCC allows "st(0)" to be called just plain "st". 18509 if (StringRef("{st}").equals_lower(Constraint)) { 18510 Res.first = X86::ST0; 18511 Res.second = &X86::RFP80RegClass; 18512 return Res; 18513 } 18514 18515 // flags -> EFLAGS 18516 if (StringRef("{flags}").equals_lower(Constraint)) { 18517 Res.first = X86::EFLAGS; 18518 Res.second = &X86::CCRRegClass; 18519 return Res; 18520 } 18521 18522 // 'A' means EAX + EDX. 18523 if (Constraint == "A") { 18524 Res.first = X86::EAX; 18525 Res.second = &X86::GR32_ADRegClass; 18526 return Res; 18527 } 18528 return Res; 18529 } 18530 18531 // Otherwise, check to see if this is a register class of the wrong value 18532 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 18533 // turn into {ax},{dx}. 18534 if (Res.second->hasType(VT)) 18535 return Res; // Correct type already, nothing to do. 18536 18537 // All of the single-register GCC register classes map their values onto 18538 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 18539 // really want an 8-bit or 32-bit register, map to the appropriate register 18540 // class and return the appropriate register. 18541 if (Res.second == &X86::GR16RegClass) { 18542 if (VT == MVT::i8 || VT == MVT::i1) { 18543 unsigned DestReg = 0; 18544 switch (Res.first) { 18545 default: break; 18546 case X86::AX: DestReg = X86::AL; break; 18547 case X86::DX: DestReg = X86::DL; break; 18548 case X86::CX: DestReg = X86::CL; break; 18549 case X86::BX: DestReg = X86::BL; break; 18550 } 18551 if (DestReg) { 18552 Res.first = DestReg; 18553 Res.second = &X86::GR8RegClass; 18554 } 18555 } else if (VT == MVT::i32 || VT == MVT::f32) { 18556 unsigned DestReg = 0; 18557 switch (Res.first) { 18558 default: break; 18559 case X86::AX: DestReg = X86::EAX; break; 18560 case X86::DX: DestReg = X86::EDX; break; 18561 case X86::CX: DestReg = X86::ECX; break; 18562 case X86::BX: DestReg = X86::EBX; break; 18563 case X86::SI: DestReg = X86::ESI; break; 18564 case X86::DI: DestReg = X86::EDI; break; 18565 case X86::BP: DestReg = X86::EBP; break; 18566 case X86::SP: DestReg = X86::ESP; break; 18567 } 18568 if (DestReg) { 18569 Res.first = DestReg; 18570 Res.second = &X86::GR32RegClass; 18571 } 18572 } else if (VT == MVT::i64 || VT == MVT::f64) { 18573 unsigned DestReg = 0; 18574 switch (Res.first) { 18575 default: break; 18576 case X86::AX: DestReg = X86::RAX; break; 18577 case X86::DX: DestReg = X86::RDX; break; 18578 case X86::CX: DestReg = X86::RCX; break; 18579 case X86::BX: DestReg = X86::RBX; break; 18580 case X86::SI: DestReg = X86::RSI; break; 18581 case X86::DI: DestReg = X86::RDI; break; 18582 case X86::BP: DestReg = X86::RBP; break; 18583 case X86::SP: DestReg = X86::RSP; break; 18584 } 18585 if (DestReg) { 18586 Res.first = DestReg; 18587 Res.second = &X86::GR64RegClass; 18588 } 18589 } 18590 } else if (Res.second == &X86::FR32RegClass || 18591 Res.second == &X86::FR64RegClass || 18592 Res.second == &X86::VR128RegClass) { 18593 // Handle references to XMM physical registers that got mapped into the 18594 // wrong class. This can happen with constraints like {xmm0} where the 18595 // target independent register mapper will just pick the first match it can 18596 // find, ignoring the required type. 18597 18598 if (VT == MVT::f32 || VT == MVT::i32) 18599 Res.second = &X86::FR32RegClass; 18600 else if (VT == MVT::f64 || VT == MVT::i64) 18601 Res.second = &X86::FR64RegClass; 18602 else if (X86::VR128RegClass.hasType(VT)) 18603 Res.second = &X86::VR128RegClass; 18604 else if (X86::VR256RegClass.hasType(VT)) 18605 Res.second = &X86::VR256RegClass; 18606 } 18607 18608 return Res; 18609} 18610