X86ISelLowering.cpp revision b14a5f5f951bb327c28e61198e8ff7193ce3b599
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86ISelLowering.h" 17#include "X86.h" 18#include "X86InstrBuilder.h" 19#include "X86TargetMachine.h" 20#include "X86TargetObjectFile.h" 21#include "Utils/X86ShuffleDecode.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/IntrinsicLowering.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunction.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineJumpTableInfo.h" 36#include "llvm/CodeGen/MachineModuleInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/SmallSet.h" 43#include "llvm/ADT/Statistic.h" 44#include "llvm/ADT/StringExtras.h" 45#include "llvm/ADT/VariadicFunction.h" 46#include "llvm/Support/CallSite.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Target/TargetOptions.h" 51#include <bitset> 52#include <cctype> 53using namespace llvm; 54 55STATISTIC(NumTailCalls, "Number of tail calls"); 56 57// Forward declarations. 58static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 62/// sets things up to match to an AVX VEXTRACTF128 instruction or a 63/// simple subregister reference. Idx is an index in the 128 bits we 64/// want. It need not be aligned to a 128-bit bounday. That makes 65/// lowering EXTRACT_VECTOR_ELT operations easier. 66static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 67 SelectionDAG &DAG, DebugLoc dl) { 68 EVT VT = Vec.getValueType(); 69 assert(VT.is256BitVector() && "Unexpected vector size!"); 70 EVT ElVT = VT.getVectorElementType(); 71 unsigned Factor = VT.getSizeInBits()/128; 72 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 73 VT.getVectorNumElements()/Factor); 74 75 // Extract from UNDEF is UNDEF. 76 if (Vec.getOpcode() == ISD::UNDEF) 77 return DAG.getUNDEF(ResultVT); 78 79 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 80 // we can match to VEXTRACTF128. 81 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 82 83 // This is the index of the first element of the 128-bit chunk 84 // we want. 85 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 86 * ElemsPerChunk); 87 88 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 89 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 90 VecIdx); 91 92 return Result; 93} 94 95/// Generate a DAG to put 128-bits into a vector > 128 bits. This 96/// sets things up to match to an AVX VINSERTF128 instruction or a 97/// simple superregister reference. Idx is an index in the 128 bits 98/// we want. It need not be aligned to a 128-bit bounday. That makes 99/// lowering INSERT_VECTOR_ELT operations easier. 100static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 101 unsigned IdxVal, SelectionDAG &DAG, 102 DebugLoc dl) { 103 // Inserting UNDEF is Result 104 if (Vec.getOpcode() == ISD::UNDEF) 105 return Result; 106 107 EVT VT = Vec.getValueType(); 108 assert(VT.is128BitVector() && "Unexpected vector size!"); 109 110 EVT ElVT = VT.getVectorElementType(); 111 EVT ResultVT = Result.getValueType(); 112 113 // Insert the relevant 128 bits. 114 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 115 116 // This is the index of the first element of the 128-bit chunk 117 // we want. 118 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 119 * ElemsPerChunk); 120 121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 122 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 123 VecIdx); 124} 125 126/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 127/// instructions. This is used because creating CONCAT_VECTOR nodes of 128/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 129/// large BUILD_VECTORS. 130static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 131 unsigned NumElems, SelectionDAG &DAG, 132 DebugLoc dl) { 133 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 134 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 135} 136 137static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 138 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 139 bool is64Bit = Subtarget->is64Bit(); 140 141 if (Subtarget->isTargetEnvMacho()) { 142 if (is64Bit) 143 return new X86_64MachoTargetObjectFile(); 144 return new TargetLoweringObjectFileMachO(); 145 } 146 147 if (Subtarget->isTargetLinux()) 148 return new X86LinuxTargetObjectFile(); 149 if (Subtarget->isTargetELF()) 150 return new TargetLoweringObjectFileELF(); 151 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 152 return new TargetLoweringObjectFileCOFF(); 153 llvm_unreachable("unknown subtarget type"); 154} 155 156X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 157 : TargetLowering(TM, createTLOF(TM)) { 158 Subtarget = &TM.getSubtarget<X86Subtarget>(); 159 X86ScalarSSEf64 = Subtarget->hasSSE2(); 160 X86ScalarSSEf32 = Subtarget->hasSSE1(); 161 162 RegInfo = TM.getRegisterInfo(); 163 TD = getDataLayout(); 164 165 // Set up the TargetLowering object. 166 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 167 168 // X86 is weird, it always uses i8 for shift amounts and setcc results. 169 setBooleanContents(ZeroOrOneBooleanContent); 170 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 171 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 172 173 // For 64-bit since we have so many registers use the ILP scheduler, for 174 // 32-bit code use the register pressure specific scheduling. 175 // For Atom, always use ILP scheduling. 176 if (Subtarget->isAtom()) 177 setSchedulingPreference(Sched::ILP); 178 else if (Subtarget->is64Bit()) 179 setSchedulingPreference(Sched::ILP); 180 else 181 setSchedulingPreference(Sched::RegPressure); 182 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); 183 184 // Bypass i32 with i8 on Atom when compiling with O2 185 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) 186 addBypassSlowDiv(32, 8); 187 188 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 189 // Setup Windows compiler runtime calls. 190 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 191 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 192 setLibcallName(RTLIB::SREM_I64, "_allrem"); 193 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 194 setLibcallName(RTLIB::MUL_I64, "_allmul"); 195 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 196 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 197 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 198 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 199 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 200 201 // The _ftol2 runtime function has an unusual calling conv, which 202 // is modeled by a special pseudo-instruction. 203 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 204 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 205 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 206 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 207 } 208 209 if (Subtarget->isTargetDarwin()) { 210 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 211 setUseUnderscoreSetJmp(false); 212 setUseUnderscoreLongJmp(false); 213 } else if (Subtarget->isTargetMingw()) { 214 // MS runtime is weird: it exports _setjmp, but longjmp! 215 setUseUnderscoreSetJmp(true); 216 setUseUnderscoreLongJmp(false); 217 } else { 218 setUseUnderscoreSetJmp(true); 219 setUseUnderscoreLongJmp(true); 220 } 221 222 // Set up the register classes. 223 addRegisterClass(MVT::i8, &X86::GR8RegClass); 224 addRegisterClass(MVT::i16, &X86::GR16RegClass); 225 addRegisterClass(MVT::i32, &X86::GR32RegClass); 226 if (Subtarget->is64Bit()) 227 addRegisterClass(MVT::i64, &X86::GR64RegClass); 228 229 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 230 231 // We don't accept any truncstore of integer registers. 232 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 233 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 234 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 235 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 236 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 237 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 238 239 // SETOEQ and SETUNE require checking two conditions. 240 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 241 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 242 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 243 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 244 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 245 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 246 247 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 248 // operation. 249 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 250 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 251 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 252 253 if (Subtarget->is64Bit()) { 254 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 255 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 256 } else if (!TM.Options.UseSoftFloat) { 257 // We have an algorithm for SSE2->double, and we turn this into a 258 // 64-bit FILD followed by conditional FADD for other targets. 259 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 260 // We have an algorithm for SSE2, and we turn this into a 64-bit 261 // FILD for other targets. 262 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 263 } 264 265 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 266 // this operation. 267 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 268 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 269 270 if (!TM.Options.UseSoftFloat) { 271 // SSE has no i16 to fp conversion, only i32 272 if (X86ScalarSSEf32) { 273 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 274 // f32 and f64 cases are Legal, f80 case is not 275 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 276 } else { 277 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 278 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 279 } 280 } else { 281 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 282 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 283 } 284 285 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 286 // are Legal, f80 is custom lowered. 287 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 288 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 289 290 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 291 // this operation. 292 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 293 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 294 295 if (X86ScalarSSEf32) { 296 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 297 // f32 and f64 cases are Legal, f80 case is not 298 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 299 } else { 300 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 301 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 302 } 303 304 // Handle FP_TO_UINT by promoting the destination to a larger signed 305 // conversion. 306 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 307 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 308 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 309 310 if (Subtarget->is64Bit()) { 311 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 312 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 313 } else if (!TM.Options.UseSoftFloat) { 314 // Since AVX is a superset of SSE3, only check for SSE here. 315 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 316 // Expand FP_TO_UINT into a select. 317 // FIXME: We would like to use a Custom expander here eventually to do 318 // the optimal thing for SSE vs. the default expansion in the legalizer. 319 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 320 else 321 // With SSE3 we can use fisttpll to convert to a signed i64; without 322 // SSE, we're stuck with a fistpll. 323 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 324 } 325 326 if (isTargetFTOL()) { 327 // Use the _ftol2 runtime function, which has a pseudo-instruction 328 // to handle its weird calling convention. 329 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 330 } 331 332 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 333 if (!X86ScalarSSEf64) { 334 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 335 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 336 if (Subtarget->is64Bit()) { 337 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 338 // Without SSE, i64->f64 goes through memory. 339 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 340 } 341 } 342 343 // Scalar integer divide and remainder are lowered to use operations that 344 // produce two results, to match the available instructions. This exposes 345 // the two-result form to trivial CSE, which is able to combine x/y and x%y 346 // into a single instruction. 347 // 348 // Scalar integer multiply-high is also lowered to use two-result 349 // operations, to match the available instructions. However, plain multiply 350 // (low) operations are left as Legal, as there are single-result 351 // instructions for this in x86. Using the two-result multiply instructions 352 // when both high and low results are needed must be arranged by dagcombine. 353 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 354 MVT VT = IntVTs[i]; 355 setOperationAction(ISD::MULHS, VT, Expand); 356 setOperationAction(ISD::MULHU, VT, Expand); 357 setOperationAction(ISD::SDIV, VT, Expand); 358 setOperationAction(ISD::UDIV, VT, Expand); 359 setOperationAction(ISD::SREM, VT, Expand); 360 setOperationAction(ISD::UREM, VT, Expand); 361 362 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 363 setOperationAction(ISD::ADDC, VT, Custom); 364 setOperationAction(ISD::ADDE, VT, Custom); 365 setOperationAction(ISD::SUBC, VT, Custom); 366 setOperationAction(ISD::SUBE, VT, Custom); 367 } 368 369 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 370 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 371 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 372 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 373 if (Subtarget->is64Bit()) 374 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 375 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 376 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 377 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 378 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 379 setOperationAction(ISD::FREM , MVT::f32 , Expand); 380 setOperationAction(ISD::FREM , MVT::f64 , Expand); 381 setOperationAction(ISD::FREM , MVT::f80 , Expand); 382 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 383 384 // Promote the i8 variants and force them on up to i32 which has a shorter 385 // encoding. 386 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 387 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 388 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 389 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 390 if (Subtarget->hasBMI()) { 391 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 392 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 393 if (Subtarget->is64Bit()) 394 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 395 } else { 396 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 397 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 398 if (Subtarget->is64Bit()) 399 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 400 } 401 402 if (Subtarget->hasLZCNT()) { 403 // When promoting the i8 variants, force them to i32 for a shorter 404 // encoding. 405 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 406 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 407 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 408 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 409 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 410 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 411 if (Subtarget->is64Bit()) 412 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 413 } else { 414 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 415 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 416 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 417 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 418 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 419 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 420 if (Subtarget->is64Bit()) { 421 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 422 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 423 } 424 } 425 426 if (Subtarget->hasPOPCNT()) { 427 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 428 } else { 429 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 430 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 431 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 432 if (Subtarget->is64Bit()) 433 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 434 } 435 436 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 437 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 438 439 // These should be promoted to a larger select which is supported. 440 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 441 // X86 wants to expand cmov itself. 442 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 443 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 444 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 445 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 446 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 447 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 448 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 449 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 450 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 451 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 452 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 453 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 454 if (Subtarget->is64Bit()) { 455 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 456 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 457 } 458 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 459 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intened to support 460 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 461 // support continuation, user-level threading, and etc.. As a result, no 462 // other SjLj exception interfaces are implemented and please don't build 463 // your own exception handling based on them. 464 // LLVM/Clang supports zero-cost DWARF exception handling. 465 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 466 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 467 468 // Darwin ABI issue. 469 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 470 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 471 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 472 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 473 if (Subtarget->is64Bit()) 474 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 475 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 476 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 477 if (Subtarget->is64Bit()) { 478 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 479 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 480 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 481 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 482 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 483 } 484 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 485 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 486 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 487 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 488 if (Subtarget->is64Bit()) { 489 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 490 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 491 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 492 } 493 494 if (Subtarget->hasSSE1()) 495 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 496 497 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 498 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 499 500 // On X86 and X86-64, atomic operations are lowered to locked instructions. 501 // Locked instructions, in turn, have implicit fence semantics (all memory 502 // operations are flushed before issuing the locked instruction, and they 503 // are not buffered), so we can fold away the common pattern of 504 // fence-atomic-fence. 505 setShouldFoldAtomicFences(true); 506 507 // Expand certain atomics 508 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 509 MVT VT = IntVTs[i]; 510 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 511 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 512 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 513 } 514 515 if (!Subtarget->is64Bit()) { 516 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 517 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 518 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 519 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 520 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 521 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 522 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 523 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 524 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 525 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 526 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 527 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 528 } 529 530 if (Subtarget->hasCmpxchg16b()) { 531 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 532 } 533 534 // FIXME - use subtarget debug flags 535 if (!Subtarget->isTargetDarwin() && 536 !Subtarget->isTargetELF() && 537 !Subtarget->isTargetCygMing()) { 538 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 539 } 540 541 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 542 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 543 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 544 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 545 if (Subtarget->is64Bit()) { 546 setExceptionPointerRegister(X86::RAX); 547 setExceptionSelectorRegister(X86::RDX); 548 } else { 549 setExceptionPointerRegister(X86::EAX); 550 setExceptionSelectorRegister(X86::EDX); 551 } 552 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 553 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 554 555 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 556 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 557 558 setOperationAction(ISD::TRAP, MVT::Other, Legal); 559 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 560 561 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 562 setOperationAction(ISD::VASTART , MVT::Other, Custom); 563 setOperationAction(ISD::VAEND , MVT::Other, Expand); 564 if (Subtarget->is64Bit()) { 565 setOperationAction(ISD::VAARG , MVT::Other, Custom); 566 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 567 } else { 568 setOperationAction(ISD::VAARG , MVT::Other, Expand); 569 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 570 } 571 572 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 573 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 574 575 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 576 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 577 MVT::i64 : MVT::i32, Custom); 578 else if (TM.Options.EnableSegmentedStacks) 579 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 580 MVT::i64 : MVT::i32, Custom); 581 else 582 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 583 MVT::i64 : MVT::i32, Expand); 584 585 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 586 // f32 and f64 use SSE. 587 // Set up the FP register classes. 588 addRegisterClass(MVT::f32, &X86::FR32RegClass); 589 addRegisterClass(MVT::f64, &X86::FR64RegClass); 590 591 // Use ANDPD to simulate FABS. 592 setOperationAction(ISD::FABS , MVT::f64, Custom); 593 setOperationAction(ISD::FABS , MVT::f32, Custom); 594 595 // Use XORP to simulate FNEG. 596 setOperationAction(ISD::FNEG , MVT::f64, Custom); 597 setOperationAction(ISD::FNEG , MVT::f32, Custom); 598 599 // Use ANDPD and ORPD to simulate FCOPYSIGN. 600 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 601 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 602 603 // Lower this to FGETSIGNx86 plus an AND. 604 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 605 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 606 607 // We don't support sin/cos/fmod 608 setOperationAction(ISD::FSIN , MVT::f64, Expand); 609 setOperationAction(ISD::FCOS , MVT::f64, Expand); 610 setOperationAction(ISD::FSIN , MVT::f32, Expand); 611 setOperationAction(ISD::FCOS , MVT::f32, Expand); 612 613 // Expand FP immediates into loads from the stack, except for the special 614 // cases we handle. 615 addLegalFPImmediate(APFloat(+0.0)); // xorpd 616 addLegalFPImmediate(APFloat(+0.0f)); // xorps 617 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 618 // Use SSE for f32, x87 for f64. 619 // Set up the FP register classes. 620 addRegisterClass(MVT::f32, &X86::FR32RegClass); 621 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 622 623 // Use ANDPS to simulate FABS. 624 setOperationAction(ISD::FABS , MVT::f32, Custom); 625 626 // Use XORP to simulate FNEG. 627 setOperationAction(ISD::FNEG , MVT::f32, Custom); 628 629 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 630 631 // Use ANDPS and ORPS to simulate FCOPYSIGN. 632 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 633 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 634 635 // We don't support sin/cos/fmod 636 setOperationAction(ISD::FSIN , MVT::f32, Expand); 637 setOperationAction(ISD::FCOS , MVT::f32, Expand); 638 639 // Special cases we handle for FP constants. 640 addLegalFPImmediate(APFloat(+0.0f)); // xorps 641 addLegalFPImmediate(APFloat(+0.0)); // FLD0 642 addLegalFPImmediate(APFloat(+1.0)); // FLD1 643 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 644 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 645 646 if (!TM.Options.UnsafeFPMath) { 647 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 648 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 649 } 650 } else if (!TM.Options.UseSoftFloat) { 651 // f32 and f64 in x87. 652 // Set up the FP register classes. 653 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 654 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 655 656 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 657 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 658 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 659 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 660 661 if (!TM.Options.UnsafeFPMath) { 662 setOperationAction(ISD::FSIN , MVT::f32 , Expand); 663 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 664 setOperationAction(ISD::FCOS , MVT::f32 , Expand); 665 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 666 } 667 addLegalFPImmediate(APFloat(+0.0)); // FLD0 668 addLegalFPImmediate(APFloat(+1.0)); // FLD1 669 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 670 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 671 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 672 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 673 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 674 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 675 } 676 677 // We don't support FMA. 678 setOperationAction(ISD::FMA, MVT::f64, Expand); 679 setOperationAction(ISD::FMA, MVT::f32, Expand); 680 681 // Long double always uses X87. 682 if (!TM.Options.UseSoftFloat) { 683 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 684 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 685 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 686 { 687 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 688 addLegalFPImmediate(TmpFlt); // FLD0 689 TmpFlt.changeSign(); 690 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 691 692 bool ignored; 693 APFloat TmpFlt2(+1.0); 694 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 695 &ignored); 696 addLegalFPImmediate(TmpFlt2); // FLD1 697 TmpFlt2.changeSign(); 698 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 699 } 700 701 if (!TM.Options.UnsafeFPMath) { 702 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 703 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 704 } 705 706 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 707 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 708 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 709 setOperationAction(ISD::FRINT, MVT::f80, Expand); 710 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 711 setOperationAction(ISD::FMA, MVT::f80, Expand); 712 } 713 714 // Always use a library call for pow. 715 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 716 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 717 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 718 719 setOperationAction(ISD::FLOG, MVT::f80, Expand); 720 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 721 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 722 setOperationAction(ISD::FEXP, MVT::f80, Expand); 723 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 724 725 // First set operation action for all vector types to either promote 726 // (for widening) or expand (for scalarization). Then we will selectively 727 // turn on ones that can be effectively codegen'd. 728 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 729 VT <= MVT::LAST_VECTOR_VALUETYPE; ++VT) { 730 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 731 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 733 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 734 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 738 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 740 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 741 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 745 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 746 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 747 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 748 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 749 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 750 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 751 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 752 setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Expand); 753 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 754 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 755 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 756 setOperationAction(ISD::FFLOOR, (MVT::SimpleValueType)VT, Expand); 757 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 758 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 759 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 760 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 761 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 762 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 763 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 764 setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 765 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 766 setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 767 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 768 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 769 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 770 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 771 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 772 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 773 setOperationAction(ISD::SETCC, (MVT::SimpleValueType)VT, Expand); 774 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 775 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 776 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 777 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 778 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 779 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 780 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 781 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 782 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 783 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 784 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 785 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 786 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 787 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 788 setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); 789 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 790 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 791 setTruncStoreAction((MVT::SimpleValueType)VT, 792 (MVT::SimpleValueType)InnerVT, Expand); 793 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 794 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 795 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 796 } 797 798 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 799 // with -msoft-float, disable use of MMX as well. 800 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 801 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 802 // No operations on x86mmx supported, everything uses intrinsics. 803 } 804 805 // MMX-sized vectors (other than x86mmx) are expected to be expanded 806 // into smaller operations. 807 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 808 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 809 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 810 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 811 setOperationAction(ISD::AND, MVT::v8i8, Expand); 812 setOperationAction(ISD::AND, MVT::v4i16, Expand); 813 setOperationAction(ISD::AND, MVT::v2i32, Expand); 814 setOperationAction(ISD::AND, MVT::v1i64, Expand); 815 setOperationAction(ISD::OR, MVT::v8i8, Expand); 816 setOperationAction(ISD::OR, MVT::v4i16, Expand); 817 setOperationAction(ISD::OR, MVT::v2i32, Expand); 818 setOperationAction(ISD::OR, MVT::v1i64, Expand); 819 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 820 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 821 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 822 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 823 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 824 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 825 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 826 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 827 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 828 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 829 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 830 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 831 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 832 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 833 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 834 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 835 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 836 837 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 838 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 839 840 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 841 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 842 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 843 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 844 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 845 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 846 setOperationAction(ISD::FABS, MVT::v4f32, Custom); 847 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 848 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 849 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 850 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 851 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 852 } 853 854 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 855 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 856 857 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 858 // registers cannot be used even for integer operations. 859 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 860 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 861 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 862 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 863 864 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 865 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 866 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 867 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 868 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 869 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 870 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 871 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 872 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 873 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 874 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 875 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 876 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 877 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 878 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 879 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 880 setOperationAction(ISD::FABS, MVT::v2f64, Custom); 881 882 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 883 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 884 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 885 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 886 887 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 888 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 889 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 890 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 891 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 892 893 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 894 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 895 MVT VT = (MVT::SimpleValueType)i; 896 // Do not attempt to custom lower non-power-of-2 vectors 897 if (!isPowerOf2_32(VT.getVectorNumElements())) 898 continue; 899 // Do not attempt to custom lower non-128-bit vectors 900 if (!VT.is128BitVector()) 901 continue; 902 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 903 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 904 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 905 } 906 907 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 908 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 909 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 910 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 911 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 912 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 913 914 if (Subtarget->is64Bit()) { 915 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 916 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 917 } 918 919 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 920 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 921 MVT VT = (MVT::SimpleValueType)i; 922 923 // Do not attempt to promote non-128-bit vectors 924 if (!VT.is128BitVector()) 925 continue; 926 927 setOperationAction(ISD::AND, VT, Promote); 928 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 929 setOperationAction(ISD::OR, VT, Promote); 930 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 931 setOperationAction(ISD::XOR, VT, Promote); 932 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 933 setOperationAction(ISD::LOAD, VT, Promote); 934 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 935 setOperationAction(ISD::SELECT, VT, Promote); 936 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 937 } 938 939 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 940 941 // Custom lower v2i64 and v2f64 selects. 942 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 943 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 944 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 945 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 946 947 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 948 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 949 950 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 951 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 952 // As there is no 64-bit GPR available, we need build a special custom 953 // sequence to convert from v2i32 to v2f32. 954 if (!Subtarget->is64Bit()) 955 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); 956 957 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 958 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); 959 960 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); 961 } 962 963 if (Subtarget->hasSSE41()) { 964 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 965 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 966 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 967 setOperationAction(ISD::FRINT, MVT::f32, Legal); 968 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 969 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 970 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 971 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 972 setOperationAction(ISD::FRINT, MVT::f64, Legal); 973 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 974 975 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 976 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 977 978 // FIXME: Do we need to handle scalar-to-vector here? 979 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 980 981 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 982 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 983 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 984 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 985 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 986 987 // i8 and i16 vectors are custom , because the source register and source 988 // source memory operand types are not the same width. f32 vectors are 989 // custom since the immediate controlling the insert encodes additional 990 // information. 991 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 992 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 993 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 994 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 995 996 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 998 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 999 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 1000 1001 // FIXME: these should be Legal but thats only for the case where 1002 // the index is constant. For now custom expand to deal with that. 1003 if (Subtarget->is64Bit()) { 1004 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 1005 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 1006 } 1007 } 1008 1009 if (Subtarget->hasSSE2()) { 1010 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 1011 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 1012 1013 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 1014 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 1015 1016 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 1017 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 1018 1019 if (Subtarget->hasAVX2()) { 1020 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 1021 setOperationAction(ISD::SRL, MVT::v4i32, Legal); 1022 1023 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 1024 setOperationAction(ISD::SHL, MVT::v4i32, Legal); 1025 1026 setOperationAction(ISD::SRA, MVT::v4i32, Legal); 1027 } else { 1028 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1029 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1030 1031 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1032 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1033 1034 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1035 } 1036 } 1037 1038 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { 1039 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1040 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1041 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1042 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1043 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1044 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1045 1046 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1047 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1048 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1049 1050 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1051 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1052 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1053 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1054 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1055 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); 1056 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1057 setOperationAction(ISD::FABS, MVT::v8f32, Custom); 1058 1059 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1060 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1061 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1062 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1063 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1064 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1065 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1066 setOperationAction(ISD::FABS, MVT::v4f64, Custom); 1067 1068 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); 1069 1070 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 1071 1072 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1073 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1074 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1075 1076 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1077 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); 1078 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 1079 1080 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); 1081 1082 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1083 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1084 1085 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1086 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1087 1088 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1089 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1090 1091 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1092 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1093 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1094 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1095 1096 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1097 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1098 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1099 1100 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1101 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1102 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1103 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1104 1105 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { 1106 setOperationAction(ISD::FMA, MVT::v8f32, Custom); 1107 setOperationAction(ISD::FMA, MVT::v4f64, Custom); 1108 setOperationAction(ISD::FMA, MVT::v4f32, Custom); 1109 setOperationAction(ISD::FMA, MVT::v2f64, Custom); 1110 setOperationAction(ISD::FMA, MVT::f32, Custom); 1111 setOperationAction(ISD::FMA, MVT::f64, Custom); 1112 } 1113 1114 if (Subtarget->hasAVX2()) { 1115 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1116 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1117 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1118 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1119 1120 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1121 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1122 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1123 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1124 1125 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1126 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1127 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1128 // Don't lower v32i8 because there is no 128-bit byte mul 1129 1130 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1131 1132 setOperationAction(ISD::SRL, MVT::v4i64, Legal); 1133 setOperationAction(ISD::SRL, MVT::v8i32, Legal); 1134 1135 setOperationAction(ISD::SHL, MVT::v4i64, Legal); 1136 setOperationAction(ISD::SHL, MVT::v8i32, Legal); 1137 1138 setOperationAction(ISD::SRA, MVT::v8i32, Legal); 1139 } else { 1140 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1141 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1142 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1143 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1144 1145 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1146 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1147 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1148 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1149 1150 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1151 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1152 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1153 // Don't lower v32i8 because there is no 128-bit byte mul 1154 1155 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1156 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1157 1158 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1159 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1160 1161 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1162 } 1163 1164 // Custom lower several nodes for 256-bit types. 1165 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1166 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1167 MVT VT = (MVT::SimpleValueType)i; 1168 1169 // Extract subvector is special because the value type 1170 // (result) is 128-bit but the source is 256-bit wide. 1171 if (VT.is128BitVector()) 1172 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1173 1174 // Do not attempt to custom lower other non-256-bit vectors 1175 if (!VT.is256BitVector()) 1176 continue; 1177 1178 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1179 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1180 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1181 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1182 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1183 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1184 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1185 } 1186 1187 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1188 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1189 MVT VT = (MVT::SimpleValueType)i; 1190 1191 // Do not attempt to promote non-256-bit vectors 1192 if (!VT.is256BitVector()) 1193 continue; 1194 1195 setOperationAction(ISD::AND, VT, Promote); 1196 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1197 setOperationAction(ISD::OR, VT, Promote); 1198 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1199 setOperationAction(ISD::XOR, VT, Promote); 1200 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1201 setOperationAction(ISD::LOAD, VT, Promote); 1202 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1203 setOperationAction(ISD::SELECT, VT, Promote); 1204 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1205 } 1206 } 1207 1208 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1209 // of this type with custom code. 1210 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1211 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1212 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1213 Custom); 1214 } 1215 1216 // We want to custom lower some of our intrinsics. 1217 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1218 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1219 1220 1221 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1222 // handle type legalization for these operations here. 1223 // 1224 // FIXME: We really should do custom legalization for addition and 1225 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1226 // than generic legalization for 64-bit multiplication-with-overflow, though. 1227 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1228 // Add/Sub/Mul with overflow operations are custom lowered. 1229 MVT VT = IntVTs[i]; 1230 setOperationAction(ISD::SADDO, VT, Custom); 1231 setOperationAction(ISD::UADDO, VT, Custom); 1232 setOperationAction(ISD::SSUBO, VT, Custom); 1233 setOperationAction(ISD::USUBO, VT, Custom); 1234 setOperationAction(ISD::SMULO, VT, Custom); 1235 setOperationAction(ISD::UMULO, VT, Custom); 1236 } 1237 1238 // There are no 8-bit 3-address imul/mul instructions 1239 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1240 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1241 1242 if (!Subtarget->is64Bit()) { 1243 // These libcalls are not available in 32-bit. 1244 setLibcallName(RTLIB::SHL_I128, 0); 1245 setLibcallName(RTLIB::SRL_I128, 0); 1246 setLibcallName(RTLIB::SRA_I128, 0); 1247 } 1248 1249 // We have target-specific dag combine patterns for the following nodes: 1250 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1251 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1252 setTargetDAGCombine(ISD::VSELECT); 1253 setTargetDAGCombine(ISD::SELECT); 1254 setTargetDAGCombine(ISD::SHL); 1255 setTargetDAGCombine(ISD::SRA); 1256 setTargetDAGCombine(ISD::SRL); 1257 setTargetDAGCombine(ISD::OR); 1258 setTargetDAGCombine(ISD::AND); 1259 setTargetDAGCombine(ISD::ADD); 1260 setTargetDAGCombine(ISD::FADD); 1261 setTargetDAGCombine(ISD::FSUB); 1262 setTargetDAGCombine(ISD::FMA); 1263 setTargetDAGCombine(ISD::SUB); 1264 setTargetDAGCombine(ISD::LOAD); 1265 setTargetDAGCombine(ISD::STORE); 1266 setTargetDAGCombine(ISD::ZERO_EXTEND); 1267 setTargetDAGCombine(ISD::ANY_EXTEND); 1268 setTargetDAGCombine(ISD::SIGN_EXTEND); 1269 setTargetDAGCombine(ISD::TRUNCATE); 1270 setTargetDAGCombine(ISD::SINT_TO_FP); 1271 setTargetDAGCombine(ISD::SETCC); 1272 if (Subtarget->is64Bit()) 1273 setTargetDAGCombine(ISD::MUL); 1274 setTargetDAGCombine(ISD::XOR); 1275 1276 computeRegisterProperties(); 1277 1278 // On Darwin, -Os means optimize for size without hurting performance, 1279 // do not reduce the limit. 1280 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1281 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1282 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1283 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1284 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1285 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1286 setPrefLoopAlignment(4); // 2^4 bytes. 1287 benefitFromCodePlacementOpt = true; 1288 1289 // Predictable cmov don't hurt on atom because it's in-order. 1290 predictableSelectIsExpensive = !Subtarget->isAtom(); 1291 1292 setPrefFunctionAlignment(4); // 2^4 bytes. 1293} 1294 1295 1296EVT X86TargetLowering::getSetCCResultType(EVT VT) const { 1297 if (!VT.isVector()) return MVT::i8; 1298 return VT.changeVectorElementTypeToInteger(); 1299} 1300 1301 1302/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1303/// the desired ByVal argument alignment. 1304static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1305 if (MaxAlign == 16) 1306 return; 1307 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1308 if (VTy->getBitWidth() == 128) 1309 MaxAlign = 16; 1310 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1311 unsigned EltAlign = 0; 1312 getMaxByValAlign(ATy->getElementType(), EltAlign); 1313 if (EltAlign > MaxAlign) 1314 MaxAlign = EltAlign; 1315 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1316 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1317 unsigned EltAlign = 0; 1318 getMaxByValAlign(STy->getElementType(i), EltAlign); 1319 if (EltAlign > MaxAlign) 1320 MaxAlign = EltAlign; 1321 if (MaxAlign == 16) 1322 break; 1323 } 1324 } 1325} 1326 1327/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1328/// function arguments in the caller parameter area. For X86, aggregates 1329/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1330/// are at 4-byte boundaries. 1331unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1332 if (Subtarget->is64Bit()) { 1333 // Max of 8 and alignment of type. 1334 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1335 if (TyAlign > 8) 1336 return TyAlign; 1337 return 8; 1338 } 1339 1340 unsigned Align = 4; 1341 if (Subtarget->hasSSE1()) 1342 getMaxByValAlign(Ty, Align); 1343 return Align; 1344} 1345 1346/// getOptimalMemOpType - Returns the target specific optimal type for load 1347/// and store operations as a result of memset, memcpy, and memmove 1348/// lowering. If DstAlign is zero that means it's safe to destination 1349/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1350/// means there isn't a need to check it against alignment requirement, 1351/// probably because the source does not need to be loaded. If 1352/// 'IsZeroVal' is true, that means it's safe to return a 1353/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1354/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1355/// constant so it does not need to be loaded. 1356/// It returns EVT::Other if the type should be determined using generic 1357/// target-independent logic. 1358EVT 1359X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1360 unsigned DstAlign, unsigned SrcAlign, 1361 bool IsZeroVal, 1362 bool MemcpyStrSrc, 1363 MachineFunction &MF) const { 1364 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1365 // linux. This is because the stack realignment code can't handle certain 1366 // cases like PR2962. This should be removed when PR2962 is fixed. 1367 const Function *F = MF.getFunction(); 1368 if (IsZeroVal && 1369 !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat)) { 1370 if (Size >= 16 && 1371 (Subtarget->isUnalignedMemAccessFast() || 1372 ((DstAlign == 0 || DstAlign >= 16) && 1373 (SrcAlign == 0 || SrcAlign >= 16))) && 1374 Subtarget->getStackAlignment() >= 16) { 1375 if (Subtarget->getStackAlignment() >= 32) { 1376 if (Subtarget->hasAVX2()) 1377 return MVT::v8i32; 1378 if (Subtarget->hasAVX()) 1379 return MVT::v8f32; 1380 } 1381 if (Subtarget->hasSSE2()) 1382 return MVT::v4i32; 1383 if (Subtarget->hasSSE1()) 1384 return MVT::v4f32; 1385 } else if (!MemcpyStrSrc && Size >= 8 && 1386 !Subtarget->is64Bit() && 1387 Subtarget->getStackAlignment() >= 8 && 1388 Subtarget->hasSSE2()) { 1389 // Do not use f64 to lower memcpy if source is string constant. It's 1390 // better to use i32 to avoid the loads. 1391 return MVT::f64; 1392 } 1393 } 1394 if (Subtarget->is64Bit() && Size >= 8) 1395 return MVT::i64; 1396 return MVT::i32; 1397} 1398 1399/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1400/// current function. The returned value is a member of the 1401/// MachineJumpTableInfo::JTEntryKind enum. 1402unsigned X86TargetLowering::getJumpTableEncoding() const { 1403 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1404 // symbol. 1405 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1406 Subtarget->isPICStyleGOT()) 1407 return MachineJumpTableInfo::EK_Custom32; 1408 1409 // Otherwise, use the normal jump table encoding heuristics. 1410 return TargetLowering::getJumpTableEncoding(); 1411} 1412 1413const MCExpr * 1414X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1415 const MachineBasicBlock *MBB, 1416 unsigned uid,MCContext &Ctx) const{ 1417 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1418 Subtarget->isPICStyleGOT()); 1419 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1420 // entries. 1421 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1422 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1423} 1424 1425/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1426/// jumptable. 1427SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1428 SelectionDAG &DAG) const { 1429 if (!Subtarget->is64Bit()) 1430 // This doesn't have DebugLoc associated with it, but is not really the 1431 // same as a Register. 1432 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1433 return Table; 1434} 1435 1436/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1437/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1438/// MCExpr. 1439const MCExpr *X86TargetLowering:: 1440getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1441 MCContext &Ctx) const { 1442 // X86-64 uses RIP relative addressing based on the jump table label. 1443 if (Subtarget->isPICStyleRIPRel()) 1444 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1445 1446 // Otherwise, the reference is relative to the PIC base. 1447 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1448} 1449 1450// FIXME: Why this routine is here? Move to RegInfo! 1451std::pair<const TargetRegisterClass*, uint8_t> 1452X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1453 const TargetRegisterClass *RRC = 0; 1454 uint8_t Cost = 1; 1455 switch (VT.getSimpleVT().SimpleTy) { 1456 default: 1457 return TargetLowering::findRepresentativeClass(VT); 1458 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1459 RRC = Subtarget->is64Bit() ? 1460 (const TargetRegisterClass*)&X86::GR64RegClass : 1461 (const TargetRegisterClass*)&X86::GR32RegClass; 1462 break; 1463 case MVT::x86mmx: 1464 RRC = &X86::VR64RegClass; 1465 break; 1466 case MVT::f32: case MVT::f64: 1467 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1468 case MVT::v4f32: case MVT::v2f64: 1469 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1470 case MVT::v4f64: 1471 RRC = &X86::VR128RegClass; 1472 break; 1473 } 1474 return std::make_pair(RRC, Cost); 1475} 1476 1477bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1478 unsigned &Offset) const { 1479 if (!Subtarget->isTargetLinux()) 1480 return false; 1481 1482 if (Subtarget->is64Bit()) { 1483 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1484 Offset = 0x28; 1485 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1486 AddressSpace = 256; 1487 else 1488 AddressSpace = 257; 1489 } else { 1490 // %gs:0x14 on i386 1491 Offset = 0x14; 1492 AddressSpace = 256; 1493 } 1494 return true; 1495} 1496 1497 1498//===----------------------------------------------------------------------===// 1499// Return Value Calling Convention Implementation 1500//===----------------------------------------------------------------------===// 1501 1502#include "X86GenCallingConv.inc" 1503 1504bool 1505X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1506 MachineFunction &MF, bool isVarArg, 1507 const SmallVectorImpl<ISD::OutputArg> &Outs, 1508 LLVMContext &Context) const { 1509 SmallVector<CCValAssign, 16> RVLocs; 1510 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1511 RVLocs, Context); 1512 return CCInfo.CheckReturn(Outs, RetCC_X86); 1513} 1514 1515SDValue 1516X86TargetLowering::LowerReturn(SDValue Chain, 1517 CallingConv::ID CallConv, bool isVarArg, 1518 const SmallVectorImpl<ISD::OutputArg> &Outs, 1519 const SmallVectorImpl<SDValue> &OutVals, 1520 DebugLoc dl, SelectionDAG &DAG) const { 1521 MachineFunction &MF = DAG.getMachineFunction(); 1522 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1523 1524 SmallVector<CCValAssign, 16> RVLocs; 1525 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1526 RVLocs, *DAG.getContext()); 1527 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1528 1529 // Add the regs to the liveout set for the function. 1530 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1531 for (unsigned i = 0; i != RVLocs.size(); ++i) 1532 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1533 MRI.addLiveOut(RVLocs[i].getLocReg()); 1534 1535 SDValue Flag; 1536 1537 SmallVector<SDValue, 6> RetOps; 1538 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1539 // Operand #1 = Bytes To Pop 1540 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1541 MVT::i16)); 1542 1543 // Copy the result values into the output registers. 1544 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1545 CCValAssign &VA = RVLocs[i]; 1546 assert(VA.isRegLoc() && "Can only return in registers!"); 1547 SDValue ValToCopy = OutVals[i]; 1548 EVT ValVT = ValToCopy.getValueType(); 1549 1550 // Promote values to the appropriate types 1551 if (VA.getLocInfo() == CCValAssign::SExt) 1552 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1553 else if (VA.getLocInfo() == CCValAssign::ZExt) 1554 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1555 else if (VA.getLocInfo() == CCValAssign::AExt) 1556 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1557 else if (VA.getLocInfo() == CCValAssign::BCvt) 1558 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1559 1560 // If this is x86-64, and we disabled SSE, we can't return FP values, 1561 // or SSE or MMX vectors. 1562 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1563 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1564 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1565 report_fatal_error("SSE register return with SSE disabled"); 1566 } 1567 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1568 // llvm-gcc has never done it right and no one has noticed, so this 1569 // should be OK for now. 1570 if (ValVT == MVT::f64 && 1571 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1572 report_fatal_error("SSE2 register return with SSE2 disabled"); 1573 1574 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1575 // the RET instruction and handled by the FP Stackifier. 1576 if (VA.getLocReg() == X86::ST0 || 1577 VA.getLocReg() == X86::ST1) { 1578 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1579 // change the value to the FP stack register class. 1580 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1581 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1582 RetOps.push_back(ValToCopy); 1583 // Don't emit a copytoreg. 1584 continue; 1585 } 1586 1587 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1588 // which is returned in RAX / RDX. 1589 if (Subtarget->is64Bit()) { 1590 if (ValVT == MVT::x86mmx) { 1591 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1592 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1593 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1594 ValToCopy); 1595 // If we don't have SSE2 available, convert to v4f32 so the generated 1596 // register is legal. 1597 if (!Subtarget->hasSSE2()) 1598 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1599 } 1600 } 1601 } 1602 1603 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1604 Flag = Chain.getValue(1); 1605 } 1606 1607 // The x86-64 ABI for returning structs by value requires that we copy 1608 // the sret argument into %rax for the return. We saved the argument into 1609 // a virtual register in the entry block, so now we copy the value out 1610 // and into %rax. 1611 if (Subtarget->is64Bit() && 1612 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1613 MachineFunction &MF = DAG.getMachineFunction(); 1614 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1615 unsigned Reg = FuncInfo->getSRetReturnReg(); 1616 assert(Reg && 1617 "SRetReturnReg should have been set in LowerFormalArguments()."); 1618 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1619 1620 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1621 Flag = Chain.getValue(1); 1622 1623 // RAX now acts like a return value. 1624 MRI.addLiveOut(X86::RAX); 1625 } 1626 1627 RetOps[0] = Chain; // Update chain. 1628 1629 // Add the flag if we have it. 1630 if (Flag.getNode()) 1631 RetOps.push_back(Flag); 1632 1633 return DAG.getNode(X86ISD::RET_FLAG, dl, 1634 MVT::Other, &RetOps[0], RetOps.size()); 1635} 1636 1637bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1638 if (N->getNumValues() != 1) 1639 return false; 1640 if (!N->hasNUsesOfValue(1, 0)) 1641 return false; 1642 1643 SDValue TCChain = Chain; 1644 SDNode *Copy = *N->use_begin(); 1645 if (Copy->getOpcode() == ISD::CopyToReg) { 1646 // If the copy has a glue operand, we conservatively assume it isn't safe to 1647 // perform a tail call. 1648 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1649 return false; 1650 TCChain = Copy->getOperand(0); 1651 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1652 return false; 1653 1654 bool HasRet = false; 1655 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1656 UI != UE; ++UI) { 1657 if (UI->getOpcode() != X86ISD::RET_FLAG) 1658 return false; 1659 HasRet = true; 1660 } 1661 1662 if (!HasRet) 1663 return false; 1664 1665 Chain = TCChain; 1666 return true; 1667} 1668 1669EVT 1670X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1671 ISD::NodeType ExtendKind) const { 1672 MVT ReturnMVT; 1673 // TODO: Is this also valid on 32-bit? 1674 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1675 ReturnMVT = MVT::i8; 1676 else 1677 ReturnMVT = MVT::i32; 1678 1679 EVT MinVT = getRegisterType(Context, ReturnMVT); 1680 return VT.bitsLT(MinVT) ? MinVT : VT; 1681} 1682 1683/// LowerCallResult - Lower the result values of a call into the 1684/// appropriate copies out of appropriate physical registers. 1685/// 1686SDValue 1687X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1688 CallingConv::ID CallConv, bool isVarArg, 1689 const SmallVectorImpl<ISD::InputArg> &Ins, 1690 DebugLoc dl, SelectionDAG &DAG, 1691 SmallVectorImpl<SDValue> &InVals) const { 1692 1693 // Assign locations to each value returned by this call. 1694 SmallVector<CCValAssign, 16> RVLocs; 1695 bool Is64Bit = Subtarget->is64Bit(); 1696 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1697 getTargetMachine(), RVLocs, *DAG.getContext()); 1698 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1699 1700 // Copy all of the result registers out of their specified physreg. 1701 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1702 CCValAssign &VA = RVLocs[i]; 1703 EVT CopyVT = VA.getValVT(); 1704 1705 // If this is x86-64, and we disabled SSE, we can't return FP values 1706 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1707 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1708 report_fatal_error("SSE register return with SSE disabled"); 1709 } 1710 1711 SDValue Val; 1712 1713 // If this is a call to a function that returns an fp value on the floating 1714 // point stack, we must guarantee the value is popped from the stack, so 1715 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1716 // if the return value is not used. We use the FpPOP_RETVAL instruction 1717 // instead. 1718 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1719 // If we prefer to use the value in xmm registers, copy it out as f80 and 1720 // use a truncate to move it from fp stack reg to xmm reg. 1721 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1722 SDValue Ops[] = { Chain, InFlag }; 1723 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1724 MVT::Other, MVT::Glue, Ops, 2), 1); 1725 Val = Chain.getValue(0); 1726 1727 // Round the f80 to the right size, which also moves it to the appropriate 1728 // xmm register. 1729 if (CopyVT != VA.getValVT()) 1730 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1731 // This truncation won't change the value. 1732 DAG.getIntPtrConstant(1)); 1733 } else { 1734 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1735 CopyVT, InFlag).getValue(1); 1736 Val = Chain.getValue(0); 1737 } 1738 InFlag = Chain.getValue(2); 1739 InVals.push_back(Val); 1740 } 1741 1742 return Chain; 1743} 1744 1745 1746//===----------------------------------------------------------------------===// 1747// C & StdCall & Fast Calling Convention implementation 1748//===----------------------------------------------------------------------===// 1749// StdCall calling convention seems to be standard for many Windows' API 1750// routines and around. It differs from C calling convention just a little: 1751// callee should clean up the stack, not caller. Symbols should be also 1752// decorated in some fancy way :) It doesn't support any vector arguments. 1753// For info on fast calling convention see Fast Calling Convention (tail call) 1754// implementation LowerX86_32FastCCCallTo. 1755 1756/// CallIsStructReturn - Determines whether a call uses struct return 1757/// semantics. 1758enum StructReturnType { 1759 NotStructReturn, 1760 RegStructReturn, 1761 StackStructReturn 1762}; 1763static StructReturnType 1764callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1765 if (Outs.empty()) 1766 return NotStructReturn; 1767 1768 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 1769 if (!Flags.isSRet()) 1770 return NotStructReturn; 1771 if (Flags.isInReg()) 1772 return RegStructReturn; 1773 return StackStructReturn; 1774} 1775 1776/// ArgsAreStructReturn - Determines whether a function uses struct 1777/// return semantics. 1778static StructReturnType 1779argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1780 if (Ins.empty()) 1781 return NotStructReturn; 1782 1783 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 1784 if (!Flags.isSRet()) 1785 return NotStructReturn; 1786 if (Flags.isInReg()) 1787 return RegStructReturn; 1788 return StackStructReturn; 1789} 1790 1791/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1792/// by "Src" to address "Dst" with size and alignment information specified by 1793/// the specific parameter attribute. The copy will be passed as a byval 1794/// function parameter. 1795static SDValue 1796CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1797 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1798 DebugLoc dl) { 1799 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1800 1801 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1802 /*isVolatile*/false, /*AlwaysInline=*/true, 1803 MachinePointerInfo(), MachinePointerInfo()); 1804} 1805 1806/// IsTailCallConvention - Return true if the calling convention is one that 1807/// supports tail call optimization. 1808static bool IsTailCallConvention(CallingConv::ID CC) { 1809 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1810} 1811 1812bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1813 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1814 return false; 1815 1816 CallSite CS(CI); 1817 CallingConv::ID CalleeCC = CS.getCallingConv(); 1818 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1819 return false; 1820 1821 return true; 1822} 1823 1824/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1825/// a tailcall target by changing its ABI. 1826static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1827 bool GuaranteedTailCallOpt) { 1828 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1829} 1830 1831SDValue 1832X86TargetLowering::LowerMemArgument(SDValue Chain, 1833 CallingConv::ID CallConv, 1834 const SmallVectorImpl<ISD::InputArg> &Ins, 1835 DebugLoc dl, SelectionDAG &DAG, 1836 const CCValAssign &VA, 1837 MachineFrameInfo *MFI, 1838 unsigned i) const { 1839 // Create the nodes corresponding to a load from this parameter slot. 1840 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1841 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1842 getTargetMachine().Options.GuaranteedTailCallOpt); 1843 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1844 EVT ValVT; 1845 1846 // If value is passed by pointer we have address passed instead of the value 1847 // itself. 1848 if (VA.getLocInfo() == CCValAssign::Indirect) 1849 ValVT = VA.getLocVT(); 1850 else 1851 ValVT = VA.getValVT(); 1852 1853 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1854 // changed with more analysis. 1855 // In case of tail call optimization mark all arguments mutable. Since they 1856 // could be overwritten by lowering of arguments in case of a tail call. 1857 if (Flags.isByVal()) { 1858 unsigned Bytes = Flags.getByValSize(); 1859 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1860 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1861 return DAG.getFrameIndex(FI, getPointerTy()); 1862 } else { 1863 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1864 VA.getLocMemOffset(), isImmutable); 1865 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1866 return DAG.getLoad(ValVT, dl, Chain, FIN, 1867 MachinePointerInfo::getFixedStack(FI), 1868 false, false, false, 0); 1869 } 1870} 1871 1872SDValue 1873X86TargetLowering::LowerFormalArguments(SDValue Chain, 1874 CallingConv::ID CallConv, 1875 bool isVarArg, 1876 const SmallVectorImpl<ISD::InputArg> &Ins, 1877 DebugLoc dl, 1878 SelectionDAG &DAG, 1879 SmallVectorImpl<SDValue> &InVals) 1880 const { 1881 MachineFunction &MF = DAG.getMachineFunction(); 1882 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1883 1884 const Function* Fn = MF.getFunction(); 1885 if (Fn->hasExternalLinkage() && 1886 Subtarget->isTargetCygMing() && 1887 Fn->getName() == "main") 1888 FuncInfo->setForceFramePointer(true); 1889 1890 MachineFrameInfo *MFI = MF.getFrameInfo(); 1891 bool Is64Bit = Subtarget->is64Bit(); 1892 bool IsWindows = Subtarget->isTargetWindows(); 1893 bool IsWin64 = Subtarget->isTargetWin64(); 1894 1895 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1896 "Var args not supported with calling convention fastcc or ghc"); 1897 1898 // Assign locations to all of the incoming arguments. 1899 SmallVector<CCValAssign, 16> ArgLocs; 1900 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1901 ArgLocs, *DAG.getContext()); 1902 1903 // Allocate shadow area for Win64 1904 if (IsWin64) { 1905 CCInfo.AllocateStack(32, 8); 1906 } 1907 1908 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1909 1910 unsigned LastVal = ~0U; 1911 SDValue ArgValue; 1912 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1913 CCValAssign &VA = ArgLocs[i]; 1914 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1915 // places. 1916 assert(VA.getValNo() != LastVal && 1917 "Don't support value assigned to multiple locs yet"); 1918 (void)LastVal; 1919 LastVal = VA.getValNo(); 1920 1921 if (VA.isRegLoc()) { 1922 EVT RegVT = VA.getLocVT(); 1923 const TargetRegisterClass *RC; 1924 if (RegVT == MVT::i32) 1925 RC = &X86::GR32RegClass; 1926 else if (Is64Bit && RegVT == MVT::i64) 1927 RC = &X86::GR64RegClass; 1928 else if (RegVT == MVT::f32) 1929 RC = &X86::FR32RegClass; 1930 else if (RegVT == MVT::f64) 1931 RC = &X86::FR64RegClass; 1932 else if (RegVT.is256BitVector()) 1933 RC = &X86::VR256RegClass; 1934 else if (RegVT.is128BitVector()) 1935 RC = &X86::VR128RegClass; 1936 else if (RegVT == MVT::x86mmx) 1937 RC = &X86::VR64RegClass; 1938 else 1939 llvm_unreachable("Unknown argument type!"); 1940 1941 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1942 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1943 1944 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1945 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1946 // right size. 1947 if (VA.getLocInfo() == CCValAssign::SExt) 1948 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1949 DAG.getValueType(VA.getValVT())); 1950 else if (VA.getLocInfo() == CCValAssign::ZExt) 1951 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1952 DAG.getValueType(VA.getValVT())); 1953 else if (VA.getLocInfo() == CCValAssign::BCvt) 1954 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1955 1956 if (VA.isExtInLoc()) { 1957 // Handle MMX values passed in XMM regs. 1958 if (RegVT.isVector()) { 1959 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1960 ArgValue); 1961 } else 1962 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1963 } 1964 } else { 1965 assert(VA.isMemLoc()); 1966 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1967 } 1968 1969 // If value is passed via pointer - do a load. 1970 if (VA.getLocInfo() == CCValAssign::Indirect) 1971 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1972 MachinePointerInfo(), false, false, false, 0); 1973 1974 InVals.push_back(ArgValue); 1975 } 1976 1977 // The x86-64 ABI for returning structs by value requires that we copy 1978 // the sret argument into %rax for the return. Save the argument into 1979 // a virtual register so that we can access it from the return points. 1980 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1981 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1982 unsigned Reg = FuncInfo->getSRetReturnReg(); 1983 if (!Reg) { 1984 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1985 FuncInfo->setSRetReturnReg(Reg); 1986 } 1987 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1988 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1989 } 1990 1991 unsigned StackSize = CCInfo.getNextStackOffset(); 1992 // Align stack specially for tail calls. 1993 if (FuncIsMadeTailCallSafe(CallConv, 1994 MF.getTarget().Options.GuaranteedTailCallOpt)) 1995 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1996 1997 // If the function takes variable number of arguments, make a frame index for 1998 // the start of the first vararg value... for expansion of llvm.va_start. 1999 if (isVarArg) { 2000 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 2001 CallConv != CallingConv::X86_ThisCall)) { 2002 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 2003 } 2004 if (Is64Bit) { 2005 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 2006 2007 // FIXME: We should really autogenerate these arrays 2008 static const uint16_t GPR64ArgRegsWin64[] = { 2009 X86::RCX, X86::RDX, X86::R8, X86::R9 2010 }; 2011 static const uint16_t GPR64ArgRegs64Bit[] = { 2012 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 2013 }; 2014 static const uint16_t XMMArgRegs64Bit[] = { 2015 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2016 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2017 }; 2018 const uint16_t *GPR64ArgRegs; 2019 unsigned NumXMMRegs = 0; 2020 2021 if (IsWin64) { 2022 // The XMM registers which might contain var arg parameters are shadowed 2023 // in their paired GPR. So we only need to save the GPR to their home 2024 // slots. 2025 TotalNumIntRegs = 4; 2026 GPR64ArgRegs = GPR64ArgRegsWin64; 2027 } else { 2028 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 2029 GPR64ArgRegs = GPR64ArgRegs64Bit; 2030 2031 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 2032 TotalNumXMMRegs); 2033 } 2034 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 2035 TotalNumIntRegs); 2036 2037 bool NoImplicitFloatOps = Fn->getFnAttributes(). 2038 hasAttribute(Attributes::NoImplicitFloat); 2039 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 2040 "SSE register cannot be used when SSE is disabled!"); 2041 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2042 NoImplicitFloatOps) && 2043 "SSE register cannot be used when SSE is disabled!"); 2044 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2045 !Subtarget->hasSSE1()) 2046 // Kernel mode asks for SSE to be disabled, so don't push them 2047 // on the stack. 2048 TotalNumXMMRegs = 0; 2049 2050 if (IsWin64) { 2051 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2052 // Get to the caller-allocated home save location. Add 8 to account 2053 // for the return address. 2054 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2055 FuncInfo->setRegSaveFrameIndex( 2056 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2057 // Fixup to set vararg frame on shadow area (4 x i64). 2058 if (NumIntRegs < 4) 2059 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2060 } else { 2061 // For X86-64, if there are vararg parameters that are passed via 2062 // registers, then we must store them to their spots on the stack so 2063 // they may be loaded by deferencing the result of va_next. 2064 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2065 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2066 FuncInfo->setRegSaveFrameIndex( 2067 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2068 false)); 2069 } 2070 2071 // Store the integer parameter registers. 2072 SmallVector<SDValue, 8> MemOps; 2073 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2074 getPointerTy()); 2075 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2076 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2077 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2078 DAG.getIntPtrConstant(Offset)); 2079 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2080 &X86::GR64RegClass); 2081 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2082 SDValue Store = 2083 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2084 MachinePointerInfo::getFixedStack( 2085 FuncInfo->getRegSaveFrameIndex(), Offset), 2086 false, false, 0); 2087 MemOps.push_back(Store); 2088 Offset += 8; 2089 } 2090 2091 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2092 // Now store the XMM (fp + vector) parameter registers. 2093 SmallVector<SDValue, 11> SaveXMMOps; 2094 SaveXMMOps.push_back(Chain); 2095 2096 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2097 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2098 SaveXMMOps.push_back(ALVal); 2099 2100 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2101 FuncInfo->getRegSaveFrameIndex())); 2102 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2103 FuncInfo->getVarArgsFPOffset())); 2104 2105 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2106 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2107 &X86::VR128RegClass); 2108 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2109 SaveXMMOps.push_back(Val); 2110 } 2111 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2112 MVT::Other, 2113 &SaveXMMOps[0], SaveXMMOps.size())); 2114 } 2115 2116 if (!MemOps.empty()) 2117 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2118 &MemOps[0], MemOps.size()); 2119 } 2120 } 2121 2122 // Some CCs need callee pop. 2123 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2124 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2125 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2126 } else { 2127 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2128 // If this is an sret function, the return should pop the hidden pointer. 2129 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2130 argsAreStructReturn(Ins) == StackStructReturn) 2131 FuncInfo->setBytesToPopOnReturn(4); 2132 } 2133 2134 if (!Is64Bit) { 2135 // RegSaveFrameIndex is X86-64 only. 2136 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2137 if (CallConv == CallingConv::X86_FastCall || 2138 CallConv == CallingConv::X86_ThisCall) 2139 // fastcc functions can't have varargs. 2140 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2141 } 2142 2143 FuncInfo->setArgumentStackSize(StackSize); 2144 2145 return Chain; 2146} 2147 2148SDValue 2149X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2150 SDValue StackPtr, SDValue Arg, 2151 DebugLoc dl, SelectionDAG &DAG, 2152 const CCValAssign &VA, 2153 ISD::ArgFlagsTy Flags) const { 2154 unsigned LocMemOffset = VA.getLocMemOffset(); 2155 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2156 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2157 if (Flags.isByVal()) 2158 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2159 2160 return DAG.getStore(Chain, dl, Arg, PtrOff, 2161 MachinePointerInfo::getStack(LocMemOffset), 2162 false, false, 0); 2163} 2164 2165/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2166/// optimization is performed and it is required. 2167SDValue 2168X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2169 SDValue &OutRetAddr, SDValue Chain, 2170 bool IsTailCall, bool Is64Bit, 2171 int FPDiff, DebugLoc dl) const { 2172 // Adjust the Return address stack slot. 2173 EVT VT = getPointerTy(); 2174 OutRetAddr = getReturnAddressFrameIndex(DAG); 2175 2176 // Load the "old" Return address. 2177 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2178 false, false, false, 0); 2179 return SDValue(OutRetAddr.getNode(), 1); 2180} 2181 2182/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2183/// optimization is performed and it is required (FPDiff!=0). 2184static SDValue 2185EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2186 SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, 2187 unsigned SlotSize, int FPDiff, DebugLoc dl) { 2188 // Store the return address to the appropriate stack slot. 2189 if (!FPDiff) return Chain; 2190 // Calculate the new stack slot for the return address. 2191 int NewReturnAddrFI = 2192 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2193 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); 2194 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2195 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2196 false, false, 0); 2197 return Chain; 2198} 2199 2200SDValue 2201X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2202 SmallVectorImpl<SDValue> &InVals) const { 2203 SelectionDAG &DAG = CLI.DAG; 2204 DebugLoc &dl = CLI.DL; 2205 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2206 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2207 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2208 SDValue Chain = CLI.Chain; 2209 SDValue Callee = CLI.Callee; 2210 CallingConv::ID CallConv = CLI.CallConv; 2211 bool &isTailCall = CLI.IsTailCall; 2212 bool isVarArg = CLI.IsVarArg; 2213 2214 MachineFunction &MF = DAG.getMachineFunction(); 2215 bool Is64Bit = Subtarget->is64Bit(); 2216 bool IsWin64 = Subtarget->isTargetWin64(); 2217 bool IsWindows = Subtarget->isTargetWindows(); 2218 StructReturnType SR = callIsStructReturn(Outs); 2219 bool IsSibcall = false; 2220 2221 if (MF.getTarget().Options.DisableTailCalls) 2222 isTailCall = false; 2223 2224 if (isTailCall) { 2225 // Check if it's really possible to do a tail call. 2226 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2227 isVarArg, SR != NotStructReturn, 2228 MF.getFunction()->hasStructRetAttr(), CLI.RetTy, 2229 Outs, OutVals, Ins, DAG); 2230 2231 // Sibcalls are automatically detected tailcalls which do not require 2232 // ABI changes. 2233 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2234 IsSibcall = true; 2235 2236 if (isTailCall) 2237 ++NumTailCalls; 2238 } 2239 2240 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2241 "Var args not supported with calling convention fastcc or ghc"); 2242 2243 // Analyze operands of the call, assigning locations to each operand. 2244 SmallVector<CCValAssign, 16> ArgLocs; 2245 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2246 ArgLocs, *DAG.getContext()); 2247 2248 // Allocate shadow area for Win64 2249 if (IsWin64) { 2250 CCInfo.AllocateStack(32, 8); 2251 } 2252 2253 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2254 2255 // Get a count of how many bytes are to be pushed on the stack. 2256 unsigned NumBytes = CCInfo.getNextStackOffset(); 2257 if (IsSibcall) 2258 // This is a sibcall. The memory operands are available in caller's 2259 // own caller's stack. 2260 NumBytes = 0; 2261 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2262 IsTailCallConvention(CallConv)) 2263 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2264 2265 int FPDiff = 0; 2266 if (isTailCall && !IsSibcall) { 2267 // Lower arguments at fp - stackoffset + fpdiff. 2268 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); 2269 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn(); 2270 2271 FPDiff = NumBytesCallerPushed - NumBytes; 2272 2273 // Set the delta of movement of the returnaddr stackslot. 2274 // But only set if delta is greater than previous delta. 2275 if (FPDiff < X86Info->getTCReturnAddrDelta()) 2276 X86Info->setTCReturnAddrDelta(FPDiff); 2277 } 2278 2279 if (!IsSibcall) 2280 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2281 2282 SDValue RetAddrFrIdx; 2283 // Load return address for tail calls. 2284 if (isTailCall && FPDiff) 2285 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2286 Is64Bit, FPDiff, dl); 2287 2288 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2289 SmallVector<SDValue, 8> MemOpChains; 2290 SDValue StackPtr; 2291 2292 // Walk the register/memloc assignments, inserting copies/loads. In the case 2293 // of tail call optimization arguments are handle later. 2294 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2295 CCValAssign &VA = ArgLocs[i]; 2296 EVT RegVT = VA.getLocVT(); 2297 SDValue Arg = OutVals[i]; 2298 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2299 bool isByVal = Flags.isByVal(); 2300 2301 // Promote the value if needed. 2302 switch (VA.getLocInfo()) { 2303 default: llvm_unreachable("Unknown loc info!"); 2304 case CCValAssign::Full: break; 2305 case CCValAssign::SExt: 2306 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2307 break; 2308 case CCValAssign::ZExt: 2309 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2310 break; 2311 case CCValAssign::AExt: 2312 if (RegVT.is128BitVector()) { 2313 // Special case: passing MMX values in XMM registers. 2314 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2315 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2316 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2317 } else 2318 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2319 break; 2320 case CCValAssign::BCvt: 2321 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2322 break; 2323 case CCValAssign::Indirect: { 2324 // Store the argument. 2325 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2326 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2327 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2328 MachinePointerInfo::getFixedStack(FI), 2329 false, false, 0); 2330 Arg = SpillSlot; 2331 break; 2332 } 2333 } 2334 2335 if (VA.isRegLoc()) { 2336 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2337 if (isVarArg && IsWin64) { 2338 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2339 // shadow reg if callee is a varargs function. 2340 unsigned ShadowReg = 0; 2341 switch (VA.getLocReg()) { 2342 case X86::XMM0: ShadowReg = X86::RCX; break; 2343 case X86::XMM1: ShadowReg = X86::RDX; break; 2344 case X86::XMM2: ShadowReg = X86::R8; break; 2345 case X86::XMM3: ShadowReg = X86::R9; break; 2346 } 2347 if (ShadowReg) 2348 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2349 } 2350 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2351 assert(VA.isMemLoc()); 2352 if (StackPtr.getNode() == 0) 2353 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 2354 getPointerTy()); 2355 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2356 dl, DAG, VA, Flags)); 2357 } 2358 } 2359 2360 if (!MemOpChains.empty()) 2361 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2362 &MemOpChains[0], MemOpChains.size()); 2363 2364 if (Subtarget->isPICStyleGOT()) { 2365 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2366 // GOT pointer. 2367 if (!isTailCall) { 2368 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2369 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); 2370 } else { 2371 // If we are tail calling and generating PIC/GOT style code load the 2372 // address of the callee into ECX. The value in ecx is used as target of 2373 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2374 // for tail calls on PIC/GOT architectures. Normally we would just put the 2375 // address of GOT into ebx and then call target@PLT. But for tail calls 2376 // ebx would be restored (since ebx is callee saved) before jumping to the 2377 // target@PLT. 2378 2379 // Note: The actual moving to ECX is done further down. 2380 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2381 if (G && !G->getGlobal()->hasHiddenVisibility() && 2382 !G->getGlobal()->hasProtectedVisibility()) 2383 Callee = LowerGlobalAddress(Callee, DAG); 2384 else if (isa<ExternalSymbolSDNode>(Callee)) 2385 Callee = LowerExternalSymbol(Callee, DAG); 2386 } 2387 } 2388 2389 if (Is64Bit && isVarArg && !IsWin64) { 2390 // From AMD64 ABI document: 2391 // For calls that may call functions that use varargs or stdargs 2392 // (prototype-less calls or calls to functions containing ellipsis (...) in 2393 // the declaration) %al is used as hidden argument to specify the number 2394 // of SSE registers used. The contents of %al do not need to match exactly 2395 // the number of registers, but must be an ubound on the number of SSE 2396 // registers used and is in the range 0 - 8 inclusive. 2397 2398 // Count the number of XMM registers allocated. 2399 static const uint16_t XMMArgRegs[] = { 2400 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2401 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2402 }; 2403 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2404 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2405 && "SSE registers cannot be used when SSE is disabled"); 2406 2407 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2408 DAG.getConstant(NumXMMRegs, MVT::i8))); 2409 } 2410 2411 // For tail calls lower the arguments to the 'real' stack slot. 2412 if (isTailCall) { 2413 // Force all the incoming stack arguments to be loaded from the stack 2414 // before any new outgoing arguments are stored to the stack, because the 2415 // outgoing stack slots may alias the incoming argument stack slots, and 2416 // the alias isn't otherwise explicit. This is slightly more conservative 2417 // than necessary, because it means that each store effectively depends 2418 // on every argument instead of just those arguments it would clobber. 2419 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2420 2421 SmallVector<SDValue, 8> MemOpChains2; 2422 SDValue FIN; 2423 int FI = 0; 2424 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2425 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2426 CCValAssign &VA = ArgLocs[i]; 2427 if (VA.isRegLoc()) 2428 continue; 2429 assert(VA.isMemLoc()); 2430 SDValue Arg = OutVals[i]; 2431 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2432 // Create frame index. 2433 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2434 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2435 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2436 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2437 2438 if (Flags.isByVal()) { 2439 // Copy relative to framepointer. 2440 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2441 if (StackPtr.getNode() == 0) 2442 StackPtr = DAG.getCopyFromReg(Chain, dl, 2443 RegInfo->getStackRegister(), 2444 getPointerTy()); 2445 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2446 2447 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2448 ArgChain, 2449 Flags, DAG, dl)); 2450 } else { 2451 // Store relative to framepointer. 2452 MemOpChains2.push_back( 2453 DAG.getStore(ArgChain, dl, Arg, FIN, 2454 MachinePointerInfo::getFixedStack(FI), 2455 false, false, 0)); 2456 } 2457 } 2458 } 2459 2460 if (!MemOpChains2.empty()) 2461 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2462 &MemOpChains2[0], MemOpChains2.size()); 2463 2464 // Store the return address to the appropriate stack slot. 2465 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, 2466 getPointerTy(), RegInfo->getSlotSize(), 2467 FPDiff, dl); 2468 } 2469 2470 // Build a sequence of copy-to-reg nodes chained together with token chain 2471 // and flag operands which copy the outgoing args into registers. 2472 SDValue InFlag; 2473 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2474 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2475 RegsToPass[i].second, InFlag); 2476 InFlag = Chain.getValue(1); 2477 } 2478 2479 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2480 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2481 // In the 64-bit large code model, we have to make all calls 2482 // through a register, since the call instruction's 32-bit 2483 // pc-relative offset may not be large enough to hold the whole 2484 // address. 2485 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2486 // If the callee is a GlobalAddress node (quite common, every direct call 2487 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2488 // it. 2489 2490 // We should use extra load for direct calls to dllimported functions in 2491 // non-JIT mode. 2492 const GlobalValue *GV = G->getGlobal(); 2493 if (!GV->hasDLLImportLinkage()) { 2494 unsigned char OpFlags = 0; 2495 bool ExtraLoad = false; 2496 unsigned WrapperKind = ISD::DELETED_NODE; 2497 2498 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2499 // external symbols most go through the PLT in PIC mode. If the symbol 2500 // has hidden or protected visibility, or if it is static or local, then 2501 // we don't need to use the PLT - we can directly call it. 2502 if (Subtarget->isTargetELF() && 2503 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2504 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2505 OpFlags = X86II::MO_PLT; 2506 } else if (Subtarget->isPICStyleStubAny() && 2507 (GV->isDeclaration() || GV->isWeakForLinker()) && 2508 (!Subtarget->getTargetTriple().isMacOSX() || 2509 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2510 // PC-relative references to external symbols should go through $stub, 2511 // unless we're building with the leopard linker or later, which 2512 // automatically synthesizes these stubs. 2513 OpFlags = X86II::MO_DARWIN_STUB; 2514 } else if (Subtarget->isPICStyleRIPRel() && 2515 isa<Function>(GV) && 2516 cast<Function>(GV)->getFnAttributes(). 2517 hasAttribute(Attributes::NonLazyBind)) { 2518 // If the function is marked as non-lazy, generate an indirect call 2519 // which loads from the GOT directly. This avoids runtime overhead 2520 // at the cost of eager binding (and one extra byte of encoding). 2521 OpFlags = X86II::MO_GOTPCREL; 2522 WrapperKind = X86ISD::WrapperRIP; 2523 ExtraLoad = true; 2524 } 2525 2526 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2527 G->getOffset(), OpFlags); 2528 2529 // Add a wrapper if needed. 2530 if (WrapperKind != ISD::DELETED_NODE) 2531 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2532 // Add extra indirection if needed. 2533 if (ExtraLoad) 2534 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2535 MachinePointerInfo::getGOT(), 2536 false, false, false, 0); 2537 } 2538 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2539 unsigned char OpFlags = 0; 2540 2541 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2542 // external symbols should go through the PLT. 2543 if (Subtarget->isTargetELF() && 2544 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2545 OpFlags = X86II::MO_PLT; 2546 } else if (Subtarget->isPICStyleStubAny() && 2547 (!Subtarget->getTargetTriple().isMacOSX() || 2548 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2549 // PC-relative references to external symbols should go through $stub, 2550 // unless we're building with the leopard linker or later, which 2551 // automatically synthesizes these stubs. 2552 OpFlags = X86II::MO_DARWIN_STUB; 2553 } 2554 2555 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2556 OpFlags); 2557 } 2558 2559 // Returns a chain & a flag for retval copy to use. 2560 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2561 SmallVector<SDValue, 8> Ops; 2562 2563 if (!IsSibcall && isTailCall) { 2564 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2565 DAG.getIntPtrConstant(0, true), InFlag); 2566 InFlag = Chain.getValue(1); 2567 } 2568 2569 Ops.push_back(Chain); 2570 Ops.push_back(Callee); 2571 2572 if (isTailCall) 2573 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2574 2575 // Add argument registers to the end of the list so that they are known live 2576 // into the call. 2577 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2578 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2579 RegsToPass[i].second.getValueType())); 2580 2581 // Add a register mask operand representing the call-preserved registers. 2582 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2583 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2584 assert(Mask && "Missing call preserved mask for calling convention"); 2585 Ops.push_back(DAG.getRegisterMask(Mask)); 2586 2587 if (InFlag.getNode()) 2588 Ops.push_back(InFlag); 2589 2590 if (isTailCall) { 2591 // We used to do: 2592 //// If this is the first return lowered for this function, add the regs 2593 //// to the liveout set for the function. 2594 // This isn't right, although it's probably harmless on x86; liveouts 2595 // should be computed from returns not tail calls. Consider a void 2596 // function making a tail call to a function returning int. 2597 return DAG.getNode(X86ISD::TC_RETURN, dl, 2598 NodeTys, &Ops[0], Ops.size()); 2599 } 2600 2601 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2602 InFlag = Chain.getValue(1); 2603 2604 // Create the CALLSEQ_END node. 2605 unsigned NumBytesForCalleeToPush; 2606 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2607 getTargetMachine().Options.GuaranteedTailCallOpt)) 2608 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2609 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2610 SR == StackStructReturn) 2611 // If this is a call to a struct-return function, the callee 2612 // pops the hidden struct pointer, so we have to push it back. 2613 // This is common for Darwin/X86, Linux & Mingw32 targets. 2614 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2615 NumBytesForCalleeToPush = 4; 2616 else 2617 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2618 2619 // Returns a flag for retval copy to use. 2620 if (!IsSibcall) { 2621 Chain = DAG.getCALLSEQ_END(Chain, 2622 DAG.getIntPtrConstant(NumBytes, true), 2623 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2624 true), 2625 InFlag); 2626 InFlag = Chain.getValue(1); 2627 } 2628 2629 // Handle result values, copying them out of physregs into vregs that we 2630 // return. 2631 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2632 Ins, dl, DAG, InVals); 2633} 2634 2635 2636//===----------------------------------------------------------------------===// 2637// Fast Calling Convention (tail call) implementation 2638//===----------------------------------------------------------------------===// 2639 2640// Like std call, callee cleans arguments, convention except that ECX is 2641// reserved for storing the tail called function address. Only 2 registers are 2642// free for argument passing (inreg). Tail call optimization is performed 2643// provided: 2644// * tailcallopt is enabled 2645// * caller/callee are fastcc 2646// On X86_64 architecture with GOT-style position independent code only local 2647// (within module) calls are supported at the moment. 2648// To keep the stack aligned according to platform abi the function 2649// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2650// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2651// If a tail called function callee has more arguments than the caller the 2652// caller needs to make sure that there is room to move the RETADDR to. This is 2653// achieved by reserving an area the size of the argument delta right after the 2654// original REtADDR, but before the saved framepointer or the spilled registers 2655// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2656// stack layout: 2657// arg1 2658// arg2 2659// RETADDR 2660// [ new RETADDR 2661// move area ] 2662// (possible EBP) 2663// ESI 2664// EDI 2665// local1 .. 2666 2667/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2668/// for a 16 byte align requirement. 2669unsigned 2670X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2671 SelectionDAG& DAG) const { 2672 MachineFunction &MF = DAG.getMachineFunction(); 2673 const TargetMachine &TM = MF.getTarget(); 2674 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2675 unsigned StackAlignment = TFI.getStackAlignment(); 2676 uint64_t AlignMask = StackAlignment - 1; 2677 int64_t Offset = StackSize; 2678 unsigned SlotSize = RegInfo->getSlotSize(); 2679 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2680 // Number smaller than 12 so just add the difference. 2681 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2682 } else { 2683 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2684 Offset = ((~AlignMask) & Offset) + StackAlignment + 2685 (StackAlignment-SlotSize); 2686 } 2687 return Offset; 2688} 2689 2690/// MatchingStackOffset - Return true if the given stack call argument is 2691/// already available in the same position (relatively) of the caller's 2692/// incoming argument stack. 2693static 2694bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2695 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2696 const X86InstrInfo *TII) { 2697 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2698 int FI = INT_MAX; 2699 if (Arg.getOpcode() == ISD::CopyFromReg) { 2700 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2701 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2702 return false; 2703 MachineInstr *Def = MRI->getVRegDef(VR); 2704 if (!Def) 2705 return false; 2706 if (!Flags.isByVal()) { 2707 if (!TII->isLoadFromStackSlot(Def, FI)) 2708 return false; 2709 } else { 2710 unsigned Opcode = Def->getOpcode(); 2711 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2712 Def->getOperand(1).isFI()) { 2713 FI = Def->getOperand(1).getIndex(); 2714 Bytes = Flags.getByValSize(); 2715 } else 2716 return false; 2717 } 2718 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2719 if (Flags.isByVal()) 2720 // ByVal argument is passed in as a pointer but it's now being 2721 // dereferenced. e.g. 2722 // define @foo(%struct.X* %A) { 2723 // tail call @bar(%struct.X* byval %A) 2724 // } 2725 return false; 2726 SDValue Ptr = Ld->getBasePtr(); 2727 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2728 if (!FINode) 2729 return false; 2730 FI = FINode->getIndex(); 2731 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2732 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2733 FI = FINode->getIndex(); 2734 Bytes = Flags.getByValSize(); 2735 } else 2736 return false; 2737 2738 assert(FI != INT_MAX); 2739 if (!MFI->isFixedObjectIndex(FI)) 2740 return false; 2741 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2742} 2743 2744/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2745/// for tail call optimization. Targets which want to do tail call 2746/// optimization should implement this function. 2747bool 2748X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2749 CallingConv::ID CalleeCC, 2750 bool isVarArg, 2751 bool isCalleeStructRet, 2752 bool isCallerStructRet, 2753 Type *RetTy, 2754 const SmallVectorImpl<ISD::OutputArg> &Outs, 2755 const SmallVectorImpl<SDValue> &OutVals, 2756 const SmallVectorImpl<ISD::InputArg> &Ins, 2757 SelectionDAG& DAG) const { 2758 if (!IsTailCallConvention(CalleeCC) && 2759 CalleeCC != CallingConv::C) 2760 return false; 2761 2762 // If -tailcallopt is specified, make fastcc functions tail-callable. 2763 const MachineFunction &MF = DAG.getMachineFunction(); 2764 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2765 2766 // If the function return type is x86_fp80 and the callee return type is not, 2767 // then the FP_EXTEND of the call result is not a nop. It's not safe to 2768 // perform a tailcall optimization here. 2769 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) 2770 return false; 2771 2772 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2773 bool CCMatch = CallerCC == CalleeCC; 2774 2775 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2776 if (IsTailCallConvention(CalleeCC) && CCMatch) 2777 return true; 2778 return false; 2779 } 2780 2781 // Look for obvious safe cases to perform tail call optimization that do not 2782 // require ABI changes. This is what gcc calls sibcall. 2783 2784 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2785 // emit a special epilogue. 2786 if (RegInfo->needsStackRealignment(MF)) 2787 return false; 2788 2789 // Also avoid sibcall optimization if either caller or callee uses struct 2790 // return semantics. 2791 if (isCalleeStructRet || isCallerStructRet) 2792 return false; 2793 2794 // An stdcall caller is expected to clean up its arguments; the callee 2795 // isn't going to do that. 2796 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2797 return false; 2798 2799 // Do not sibcall optimize vararg calls unless all arguments are passed via 2800 // registers. 2801 if (isVarArg && !Outs.empty()) { 2802 2803 // Optimizing for varargs on Win64 is unlikely to be safe without 2804 // additional testing. 2805 if (Subtarget->isTargetWin64()) 2806 return false; 2807 2808 SmallVector<CCValAssign, 16> ArgLocs; 2809 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2810 getTargetMachine(), ArgLocs, *DAG.getContext()); 2811 2812 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2813 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2814 if (!ArgLocs[i].isRegLoc()) 2815 return false; 2816 } 2817 2818 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2819 // stack. Therefore, if it's not used by the call it is not safe to optimize 2820 // this into a sibcall. 2821 bool Unused = false; 2822 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2823 if (!Ins[i].Used) { 2824 Unused = true; 2825 break; 2826 } 2827 } 2828 if (Unused) { 2829 SmallVector<CCValAssign, 16> RVLocs; 2830 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2831 getTargetMachine(), RVLocs, *DAG.getContext()); 2832 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2833 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2834 CCValAssign &VA = RVLocs[i]; 2835 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2836 return false; 2837 } 2838 } 2839 2840 // If the calling conventions do not match, then we'd better make sure the 2841 // results are returned in the same way as what the caller expects. 2842 if (!CCMatch) { 2843 SmallVector<CCValAssign, 16> RVLocs1; 2844 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2845 getTargetMachine(), RVLocs1, *DAG.getContext()); 2846 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2847 2848 SmallVector<CCValAssign, 16> RVLocs2; 2849 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2850 getTargetMachine(), RVLocs2, *DAG.getContext()); 2851 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2852 2853 if (RVLocs1.size() != RVLocs2.size()) 2854 return false; 2855 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2856 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2857 return false; 2858 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2859 return false; 2860 if (RVLocs1[i].isRegLoc()) { 2861 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2862 return false; 2863 } else { 2864 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2865 return false; 2866 } 2867 } 2868 } 2869 2870 // If the callee takes no arguments then go on to check the results of the 2871 // call. 2872 if (!Outs.empty()) { 2873 // Check if stack adjustment is needed. For now, do not do this if any 2874 // argument is passed on the stack. 2875 SmallVector<CCValAssign, 16> ArgLocs; 2876 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2877 getTargetMachine(), ArgLocs, *DAG.getContext()); 2878 2879 // Allocate shadow area for Win64 2880 if (Subtarget->isTargetWin64()) { 2881 CCInfo.AllocateStack(32, 8); 2882 } 2883 2884 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2885 if (CCInfo.getNextStackOffset()) { 2886 MachineFunction &MF = DAG.getMachineFunction(); 2887 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2888 return false; 2889 2890 // Check if the arguments are already laid out in the right way as 2891 // the caller's fixed stack objects. 2892 MachineFrameInfo *MFI = MF.getFrameInfo(); 2893 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2894 const X86InstrInfo *TII = 2895 ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2896 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2897 CCValAssign &VA = ArgLocs[i]; 2898 SDValue Arg = OutVals[i]; 2899 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2900 if (VA.getLocInfo() == CCValAssign::Indirect) 2901 return false; 2902 if (!VA.isRegLoc()) { 2903 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2904 MFI, MRI, TII)) 2905 return false; 2906 } 2907 } 2908 } 2909 2910 // If the tailcall address may be in a register, then make sure it's 2911 // possible to register allocate for it. In 32-bit, the call address can 2912 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2913 // callee-saved registers are restored. These happen to be the same 2914 // registers used to pass 'inreg' arguments so watch out for those. 2915 if (!Subtarget->is64Bit() && 2916 !isa<GlobalAddressSDNode>(Callee) && 2917 !isa<ExternalSymbolSDNode>(Callee)) { 2918 unsigned NumInRegs = 0; 2919 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2920 CCValAssign &VA = ArgLocs[i]; 2921 if (!VA.isRegLoc()) 2922 continue; 2923 unsigned Reg = VA.getLocReg(); 2924 switch (Reg) { 2925 default: break; 2926 case X86::EAX: case X86::EDX: case X86::ECX: 2927 if (++NumInRegs == 3) 2928 return false; 2929 break; 2930 } 2931 } 2932 } 2933 } 2934 2935 return true; 2936} 2937 2938FastISel * 2939X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 2940 const TargetLibraryInfo *libInfo) const { 2941 return X86::createFastISel(funcInfo, libInfo); 2942} 2943 2944 2945//===----------------------------------------------------------------------===// 2946// Other Lowering Hooks 2947//===----------------------------------------------------------------------===// 2948 2949static bool MayFoldLoad(SDValue Op) { 2950 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2951} 2952 2953static bool MayFoldIntoStore(SDValue Op) { 2954 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2955} 2956 2957static bool isTargetShuffle(unsigned Opcode) { 2958 switch(Opcode) { 2959 default: return false; 2960 case X86ISD::PSHUFD: 2961 case X86ISD::PSHUFHW: 2962 case X86ISD::PSHUFLW: 2963 case X86ISD::SHUFP: 2964 case X86ISD::PALIGN: 2965 case X86ISD::MOVLHPS: 2966 case X86ISD::MOVLHPD: 2967 case X86ISD::MOVHLPS: 2968 case X86ISD::MOVLPS: 2969 case X86ISD::MOVLPD: 2970 case X86ISD::MOVSHDUP: 2971 case X86ISD::MOVSLDUP: 2972 case X86ISD::MOVDDUP: 2973 case X86ISD::MOVSS: 2974 case X86ISD::MOVSD: 2975 case X86ISD::UNPCKL: 2976 case X86ISD::UNPCKH: 2977 case X86ISD::VPERMILP: 2978 case X86ISD::VPERM2X128: 2979 case X86ISD::VPERMI: 2980 return true; 2981 } 2982} 2983 2984static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2985 SDValue V1, SelectionDAG &DAG) { 2986 switch(Opc) { 2987 default: llvm_unreachable("Unknown x86 shuffle node"); 2988 case X86ISD::MOVSHDUP: 2989 case X86ISD::MOVSLDUP: 2990 case X86ISD::MOVDDUP: 2991 return DAG.getNode(Opc, dl, VT, V1); 2992 } 2993} 2994 2995static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2996 SDValue V1, unsigned TargetMask, 2997 SelectionDAG &DAG) { 2998 switch(Opc) { 2999 default: llvm_unreachable("Unknown x86 shuffle node"); 3000 case X86ISD::PSHUFD: 3001 case X86ISD::PSHUFHW: 3002 case X86ISD::PSHUFLW: 3003 case X86ISD::VPERMILP: 3004 case X86ISD::VPERMI: 3005 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 3006 } 3007} 3008 3009static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3010 SDValue V1, SDValue V2, unsigned TargetMask, 3011 SelectionDAG &DAG) { 3012 switch(Opc) { 3013 default: llvm_unreachable("Unknown x86 shuffle node"); 3014 case X86ISD::PALIGN: 3015 case X86ISD::SHUFP: 3016 case X86ISD::VPERM2X128: 3017 return DAG.getNode(Opc, dl, VT, V1, V2, 3018 DAG.getConstant(TargetMask, MVT::i8)); 3019 } 3020} 3021 3022static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3023 SDValue V1, SDValue V2, SelectionDAG &DAG) { 3024 switch(Opc) { 3025 default: llvm_unreachable("Unknown x86 shuffle node"); 3026 case X86ISD::MOVLHPS: 3027 case X86ISD::MOVLHPD: 3028 case X86ISD::MOVHLPS: 3029 case X86ISD::MOVLPS: 3030 case X86ISD::MOVLPD: 3031 case X86ISD::MOVSS: 3032 case X86ISD::MOVSD: 3033 case X86ISD::UNPCKL: 3034 case X86ISD::UNPCKH: 3035 return DAG.getNode(Opc, dl, VT, V1, V2); 3036 } 3037} 3038 3039SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 3040 MachineFunction &MF = DAG.getMachineFunction(); 3041 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 3042 int ReturnAddrIndex = FuncInfo->getRAIndex(); 3043 3044 if (ReturnAddrIndex == 0) { 3045 // Set up a frame object for the return address. 3046 unsigned SlotSize = RegInfo->getSlotSize(); 3047 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 3048 false); 3049 FuncInfo->setRAIndex(ReturnAddrIndex); 3050 } 3051 3052 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3053} 3054 3055 3056bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3057 bool hasSymbolicDisplacement) { 3058 // Offset should fit into 32 bit immediate field. 3059 if (!isInt<32>(Offset)) 3060 return false; 3061 3062 // If we don't have a symbolic displacement - we don't have any extra 3063 // restrictions. 3064 if (!hasSymbolicDisplacement) 3065 return true; 3066 3067 // FIXME: Some tweaks might be needed for medium code model. 3068 if (M != CodeModel::Small && M != CodeModel::Kernel) 3069 return false; 3070 3071 // For small code model we assume that latest object is 16MB before end of 31 3072 // bits boundary. We may also accept pretty large negative constants knowing 3073 // that all objects are in the positive half of address space. 3074 if (M == CodeModel::Small && Offset < 16*1024*1024) 3075 return true; 3076 3077 // For kernel code model we know that all object resist in the negative half 3078 // of 32bits address space. We may not accept negative offsets, since they may 3079 // be just off and we may accept pretty large positive ones. 3080 if (M == CodeModel::Kernel && Offset > 0) 3081 return true; 3082 3083 return false; 3084} 3085 3086/// isCalleePop - Determines whether the callee is required to pop its 3087/// own arguments. Callee pop is necessary to support tail calls. 3088bool X86::isCalleePop(CallingConv::ID CallingConv, 3089 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3090 if (IsVarArg) 3091 return false; 3092 3093 switch (CallingConv) { 3094 default: 3095 return false; 3096 case CallingConv::X86_StdCall: 3097 return !is64Bit; 3098 case CallingConv::X86_FastCall: 3099 return !is64Bit; 3100 case CallingConv::X86_ThisCall: 3101 return !is64Bit; 3102 case CallingConv::Fast: 3103 return TailCallOpt; 3104 case CallingConv::GHC: 3105 return TailCallOpt; 3106 } 3107} 3108 3109/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3110/// specific condition code, returning the condition code and the LHS/RHS of the 3111/// comparison to make. 3112static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3113 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3114 if (!isFP) { 3115 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3116 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3117 // X > -1 -> X == 0, jump !sign. 3118 RHS = DAG.getConstant(0, RHS.getValueType()); 3119 return X86::COND_NS; 3120 } 3121 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3122 // X < 0 -> X == 0, jump on sign. 3123 return X86::COND_S; 3124 } 3125 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3126 // X < 1 -> X <= 0 3127 RHS = DAG.getConstant(0, RHS.getValueType()); 3128 return X86::COND_LE; 3129 } 3130 } 3131 3132 switch (SetCCOpcode) { 3133 default: llvm_unreachable("Invalid integer condition!"); 3134 case ISD::SETEQ: return X86::COND_E; 3135 case ISD::SETGT: return X86::COND_G; 3136 case ISD::SETGE: return X86::COND_GE; 3137 case ISD::SETLT: return X86::COND_L; 3138 case ISD::SETLE: return X86::COND_LE; 3139 case ISD::SETNE: return X86::COND_NE; 3140 case ISD::SETULT: return X86::COND_B; 3141 case ISD::SETUGT: return X86::COND_A; 3142 case ISD::SETULE: return X86::COND_BE; 3143 case ISD::SETUGE: return X86::COND_AE; 3144 } 3145 } 3146 3147 // First determine if it is required or is profitable to flip the operands. 3148 3149 // If LHS is a foldable load, but RHS is not, flip the condition. 3150 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3151 !ISD::isNON_EXTLoad(RHS.getNode())) { 3152 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3153 std::swap(LHS, RHS); 3154 } 3155 3156 switch (SetCCOpcode) { 3157 default: break; 3158 case ISD::SETOLT: 3159 case ISD::SETOLE: 3160 case ISD::SETUGT: 3161 case ISD::SETUGE: 3162 std::swap(LHS, RHS); 3163 break; 3164 } 3165 3166 // On a floating point condition, the flags are set as follows: 3167 // ZF PF CF op 3168 // 0 | 0 | 0 | X > Y 3169 // 0 | 0 | 1 | X < Y 3170 // 1 | 0 | 0 | X == Y 3171 // 1 | 1 | 1 | unordered 3172 switch (SetCCOpcode) { 3173 default: llvm_unreachable("Condcode should be pre-legalized away"); 3174 case ISD::SETUEQ: 3175 case ISD::SETEQ: return X86::COND_E; 3176 case ISD::SETOLT: // flipped 3177 case ISD::SETOGT: 3178 case ISD::SETGT: return X86::COND_A; 3179 case ISD::SETOLE: // flipped 3180 case ISD::SETOGE: 3181 case ISD::SETGE: return X86::COND_AE; 3182 case ISD::SETUGT: // flipped 3183 case ISD::SETULT: 3184 case ISD::SETLT: return X86::COND_B; 3185 case ISD::SETUGE: // flipped 3186 case ISD::SETULE: 3187 case ISD::SETLE: return X86::COND_BE; 3188 case ISD::SETONE: 3189 case ISD::SETNE: return X86::COND_NE; 3190 case ISD::SETUO: return X86::COND_P; 3191 case ISD::SETO: return X86::COND_NP; 3192 case ISD::SETOEQ: 3193 case ISD::SETUNE: return X86::COND_INVALID; 3194 } 3195} 3196 3197/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3198/// code. Current x86 isa includes the following FP cmov instructions: 3199/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3200static bool hasFPCMov(unsigned X86CC) { 3201 switch (X86CC) { 3202 default: 3203 return false; 3204 case X86::COND_B: 3205 case X86::COND_BE: 3206 case X86::COND_E: 3207 case X86::COND_P: 3208 case X86::COND_A: 3209 case X86::COND_AE: 3210 case X86::COND_NE: 3211 case X86::COND_NP: 3212 return true; 3213 } 3214} 3215 3216/// isFPImmLegal - Returns true if the target can instruction select the 3217/// specified FP immediate natively. If false, the legalizer will 3218/// materialize the FP immediate as a load from a constant pool. 3219bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3220 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3221 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3222 return true; 3223 } 3224 return false; 3225} 3226 3227/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3228/// the specified range (L, H]. 3229static bool isUndefOrInRange(int Val, int Low, int Hi) { 3230 return (Val < 0) || (Val >= Low && Val < Hi); 3231} 3232 3233/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3234/// specified value. 3235static bool isUndefOrEqual(int Val, int CmpVal) { 3236 if (Val < 0 || Val == CmpVal) 3237 return true; 3238 return false; 3239} 3240 3241/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3242/// from position Pos and ending in Pos+Size, falls within the specified 3243/// sequential range (L, L+Pos]. or is undef. 3244static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3245 unsigned Pos, unsigned Size, int Low) { 3246 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3247 if (!isUndefOrEqual(Mask[i], Low)) 3248 return false; 3249 return true; 3250} 3251 3252/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3253/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3254/// the second operand. 3255static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3256 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3257 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3258 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3259 return (Mask[0] < 2 && Mask[1] < 2); 3260 return false; 3261} 3262 3263/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3264/// is suitable for input to PSHUFHW. 3265static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3266 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3267 return false; 3268 3269 // Lower quadword copied in order or undef. 3270 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3271 return false; 3272 3273 // Upper quadword shuffled. 3274 for (unsigned i = 4; i != 8; ++i) 3275 if (!isUndefOrInRange(Mask[i], 4, 8)) 3276 return false; 3277 3278 if (VT == MVT::v16i16) { 3279 // Lower quadword copied in order or undef. 3280 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3281 return false; 3282 3283 // Upper quadword shuffled. 3284 for (unsigned i = 12; i != 16; ++i) 3285 if (!isUndefOrInRange(Mask[i], 12, 16)) 3286 return false; 3287 } 3288 3289 return true; 3290} 3291 3292/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3293/// is suitable for input to PSHUFLW. 3294static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3295 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3296 return false; 3297 3298 // Upper quadword copied in order. 3299 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3300 return false; 3301 3302 // Lower quadword shuffled. 3303 for (unsigned i = 0; i != 4; ++i) 3304 if (!isUndefOrInRange(Mask[i], 0, 4)) 3305 return false; 3306 3307 if (VT == MVT::v16i16) { 3308 // Upper quadword copied in order. 3309 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3310 return false; 3311 3312 // Lower quadword shuffled. 3313 for (unsigned i = 8; i != 12; ++i) 3314 if (!isUndefOrInRange(Mask[i], 8, 12)) 3315 return false; 3316 } 3317 3318 return true; 3319} 3320 3321/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3322/// is suitable for input to PALIGNR. 3323static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3324 const X86Subtarget *Subtarget) { 3325 if ((VT.getSizeInBits() == 128 && !Subtarget->hasSSSE3()) || 3326 (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())) 3327 return false; 3328 3329 unsigned NumElts = VT.getVectorNumElements(); 3330 unsigned NumLanes = VT.getSizeInBits()/128; 3331 unsigned NumLaneElts = NumElts/NumLanes; 3332 3333 // Do not handle 64-bit element shuffles with palignr. 3334 if (NumLaneElts == 2) 3335 return false; 3336 3337 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3338 unsigned i; 3339 for (i = 0; i != NumLaneElts; ++i) { 3340 if (Mask[i+l] >= 0) 3341 break; 3342 } 3343 3344 // Lane is all undef, go to next lane 3345 if (i == NumLaneElts) 3346 continue; 3347 3348 int Start = Mask[i+l]; 3349 3350 // Make sure its in this lane in one of the sources 3351 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3352 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3353 return false; 3354 3355 // If not lane 0, then we must match lane 0 3356 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3357 return false; 3358 3359 // Correct second source to be contiguous with first source 3360 if (Start >= (int)NumElts) 3361 Start -= NumElts - NumLaneElts; 3362 3363 // Make sure we're shifting in the right direction. 3364 if (Start <= (int)(i+l)) 3365 return false; 3366 3367 Start -= i; 3368 3369 // Check the rest of the elements to see if they are consecutive. 3370 for (++i; i != NumLaneElts; ++i) { 3371 int Idx = Mask[i+l]; 3372 3373 // Make sure its in this lane 3374 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3375 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3376 return false; 3377 3378 // If not lane 0, then we must match lane 0 3379 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3380 return false; 3381 3382 if (Idx >= (int)NumElts) 3383 Idx -= NumElts - NumLaneElts; 3384 3385 if (!isUndefOrEqual(Idx, Start+i)) 3386 return false; 3387 3388 } 3389 } 3390 3391 return true; 3392} 3393 3394/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3395/// the two vector operands have swapped position. 3396static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3397 unsigned NumElems) { 3398 for (unsigned i = 0; i != NumElems; ++i) { 3399 int idx = Mask[i]; 3400 if (idx < 0) 3401 continue; 3402 else if (idx < (int)NumElems) 3403 Mask[i] = idx + NumElems; 3404 else 3405 Mask[i] = idx - NumElems; 3406 } 3407} 3408 3409/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3410/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3411/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3412/// reverse of what x86 shuffles want. 3413static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX, 3414 bool Commuted = false) { 3415 if (!HasAVX && VT.getSizeInBits() == 256) 3416 return false; 3417 3418 unsigned NumElems = VT.getVectorNumElements(); 3419 unsigned NumLanes = VT.getSizeInBits()/128; 3420 unsigned NumLaneElems = NumElems/NumLanes; 3421 3422 if (NumLaneElems != 2 && NumLaneElems != 4) 3423 return false; 3424 3425 // VSHUFPSY divides the resulting vector into 4 chunks. 3426 // The sources are also splitted into 4 chunks, and each destination 3427 // chunk must come from a different source chunk. 3428 // 3429 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3430 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3431 // 3432 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3433 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3434 // 3435 // VSHUFPDY divides the resulting vector into 4 chunks. 3436 // The sources are also splitted into 4 chunks, and each destination 3437 // chunk must come from a different source chunk. 3438 // 3439 // SRC1 => X3 X2 X1 X0 3440 // SRC2 => Y3 Y2 Y1 Y0 3441 // 3442 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3443 // 3444 unsigned HalfLaneElems = NumLaneElems/2; 3445 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3446 for (unsigned i = 0; i != NumLaneElems; ++i) { 3447 int Idx = Mask[i+l]; 3448 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3449 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3450 return false; 3451 // For VSHUFPSY, the mask of the second half must be the same as the 3452 // first but with the appropriate offsets. This works in the same way as 3453 // VPERMILPS works with masks. 3454 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3455 continue; 3456 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3457 return false; 3458 } 3459 } 3460 3461 return true; 3462} 3463 3464/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3465/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3466static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) { 3467 if (!VT.is128BitVector()) 3468 return false; 3469 3470 unsigned NumElems = VT.getVectorNumElements(); 3471 3472 if (NumElems != 4) 3473 return false; 3474 3475 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3476 return isUndefOrEqual(Mask[0], 6) && 3477 isUndefOrEqual(Mask[1], 7) && 3478 isUndefOrEqual(Mask[2], 2) && 3479 isUndefOrEqual(Mask[3], 3); 3480} 3481 3482/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3483/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3484/// <2, 3, 2, 3> 3485static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) { 3486 if (!VT.is128BitVector()) 3487 return false; 3488 3489 unsigned NumElems = VT.getVectorNumElements(); 3490 3491 if (NumElems != 4) 3492 return false; 3493 3494 return isUndefOrEqual(Mask[0], 2) && 3495 isUndefOrEqual(Mask[1], 3) && 3496 isUndefOrEqual(Mask[2], 2) && 3497 isUndefOrEqual(Mask[3], 3); 3498} 3499 3500/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3501/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3502static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) { 3503 if (!VT.is128BitVector()) 3504 return false; 3505 3506 unsigned NumElems = VT.getVectorNumElements(); 3507 3508 if (NumElems != 2 && NumElems != 4) 3509 return false; 3510 3511 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3512 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3513 return false; 3514 3515 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3516 if (!isUndefOrEqual(Mask[i], i)) 3517 return false; 3518 3519 return true; 3520} 3521 3522/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3523/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3524static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) { 3525 if (!VT.is128BitVector()) 3526 return false; 3527 3528 unsigned NumElems = VT.getVectorNumElements(); 3529 3530 if (NumElems != 2 && NumElems != 4) 3531 return false; 3532 3533 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3534 if (!isUndefOrEqual(Mask[i], i)) 3535 return false; 3536 3537 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3538 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3539 return false; 3540 3541 return true; 3542} 3543 3544// 3545// Some special combinations that can be optimized. 3546// 3547static 3548SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3549 SelectionDAG &DAG) { 3550 EVT VT = SVOp->getValueType(0); 3551 DebugLoc dl = SVOp->getDebugLoc(); 3552 3553 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3554 return SDValue(); 3555 3556 ArrayRef<int> Mask = SVOp->getMask(); 3557 3558 // These are the special masks that may be optimized. 3559 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3560 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3561 bool MatchEvenMask = true; 3562 bool MatchOddMask = true; 3563 for (int i=0; i<8; ++i) { 3564 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3565 MatchEvenMask = false; 3566 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3567 MatchOddMask = false; 3568 } 3569 3570 if (!MatchEvenMask && !MatchOddMask) 3571 return SDValue(); 3572 3573 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3574 3575 SDValue Op0 = SVOp->getOperand(0); 3576 SDValue Op1 = SVOp->getOperand(1); 3577 3578 if (MatchEvenMask) { 3579 // Shift the second operand right to 32 bits. 3580 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; 3581 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); 3582 } else { 3583 // Shift the first operand left to 32 bits. 3584 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; 3585 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); 3586 } 3587 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; 3588 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); 3589} 3590 3591/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3592/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3593static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3594 bool HasAVX2, bool V2IsSplat = false) { 3595 unsigned NumElts = VT.getVectorNumElements(); 3596 3597 assert((VT.is128BitVector() || VT.is256BitVector()) && 3598 "Unsupported vector type for unpckh"); 3599 3600 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3601 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3602 return false; 3603 3604 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3605 // independently on 128-bit lanes. 3606 unsigned NumLanes = VT.getSizeInBits()/128; 3607 unsigned NumLaneElts = NumElts/NumLanes; 3608 3609 for (unsigned l = 0; l != NumLanes; ++l) { 3610 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3611 i != (l+1)*NumLaneElts; 3612 i += 2, ++j) { 3613 int BitI = Mask[i]; 3614 int BitI1 = Mask[i+1]; 3615 if (!isUndefOrEqual(BitI, j)) 3616 return false; 3617 if (V2IsSplat) { 3618 if (!isUndefOrEqual(BitI1, NumElts)) 3619 return false; 3620 } else { 3621 if (!isUndefOrEqual(BitI1, j + NumElts)) 3622 return false; 3623 } 3624 } 3625 } 3626 3627 return true; 3628} 3629 3630/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3631/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3632static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3633 bool HasAVX2, bool V2IsSplat = false) { 3634 unsigned NumElts = VT.getVectorNumElements(); 3635 3636 assert((VT.is128BitVector() || VT.is256BitVector()) && 3637 "Unsupported vector type for unpckh"); 3638 3639 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3640 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3641 return false; 3642 3643 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3644 // independently on 128-bit lanes. 3645 unsigned NumLanes = VT.getSizeInBits()/128; 3646 unsigned NumLaneElts = NumElts/NumLanes; 3647 3648 for (unsigned l = 0; l != NumLanes; ++l) { 3649 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3650 i != (l+1)*NumLaneElts; i += 2, ++j) { 3651 int BitI = Mask[i]; 3652 int BitI1 = Mask[i+1]; 3653 if (!isUndefOrEqual(BitI, j)) 3654 return false; 3655 if (V2IsSplat) { 3656 if (isUndefOrEqual(BitI1, NumElts)) 3657 return false; 3658 } else { 3659 if (!isUndefOrEqual(BitI1, j+NumElts)) 3660 return false; 3661 } 3662 } 3663 } 3664 return true; 3665} 3666 3667/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3668/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3669/// <0, 0, 1, 1> 3670static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, 3671 bool HasAVX2) { 3672 unsigned NumElts = VT.getVectorNumElements(); 3673 3674 assert((VT.is128BitVector() || VT.is256BitVector()) && 3675 "Unsupported vector type for unpckh"); 3676 3677 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3678 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3679 return false; 3680 3681 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3682 // FIXME: Need a better way to get rid of this, there's no latency difference 3683 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3684 // the former later. We should also remove the "_undef" special mask. 3685 if (NumElts == 4 && VT.getSizeInBits() == 256) 3686 return false; 3687 3688 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3689 // independently on 128-bit lanes. 3690 unsigned NumLanes = VT.getSizeInBits()/128; 3691 unsigned NumLaneElts = NumElts/NumLanes; 3692 3693 for (unsigned l = 0; l != NumLanes; ++l) { 3694 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3695 i != (l+1)*NumLaneElts; 3696 i += 2, ++j) { 3697 int BitI = Mask[i]; 3698 int BitI1 = Mask[i+1]; 3699 3700 if (!isUndefOrEqual(BitI, j)) 3701 return false; 3702 if (!isUndefOrEqual(BitI1, j)) 3703 return false; 3704 } 3705 } 3706 3707 return true; 3708} 3709 3710/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3711/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3712/// <2, 2, 3, 3> 3713static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3714 unsigned NumElts = VT.getVectorNumElements(); 3715 3716 assert((VT.is128BitVector() || VT.is256BitVector()) && 3717 "Unsupported vector type for unpckh"); 3718 3719 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3720 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3721 return false; 3722 3723 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3724 // independently on 128-bit lanes. 3725 unsigned NumLanes = VT.getSizeInBits()/128; 3726 unsigned NumLaneElts = NumElts/NumLanes; 3727 3728 for (unsigned l = 0; l != NumLanes; ++l) { 3729 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3730 i != (l+1)*NumLaneElts; i += 2, ++j) { 3731 int BitI = Mask[i]; 3732 int BitI1 = Mask[i+1]; 3733 if (!isUndefOrEqual(BitI, j)) 3734 return false; 3735 if (!isUndefOrEqual(BitI1, j)) 3736 return false; 3737 } 3738 } 3739 return true; 3740} 3741 3742/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3743/// specifies a shuffle of elements that is suitable for input to MOVSS, 3744/// MOVSD, and MOVD, i.e. setting the lowest element. 3745static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3746 if (VT.getVectorElementType().getSizeInBits() < 32) 3747 return false; 3748 if (!VT.is128BitVector()) 3749 return false; 3750 3751 unsigned NumElts = VT.getVectorNumElements(); 3752 3753 if (!isUndefOrEqual(Mask[0], NumElts)) 3754 return false; 3755 3756 for (unsigned i = 1; i != NumElts; ++i) 3757 if (!isUndefOrEqual(Mask[i], i)) 3758 return false; 3759 3760 return true; 3761} 3762 3763/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3764/// as permutations between 128-bit chunks or halves. As an example: this 3765/// shuffle bellow: 3766/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3767/// The first half comes from the second half of V1 and the second half from the 3768/// the second half of V2. 3769static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3770 if (!HasAVX || !VT.is256BitVector()) 3771 return false; 3772 3773 // The shuffle result is divided into half A and half B. In total the two 3774 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3775 // B must come from C, D, E or F. 3776 unsigned HalfSize = VT.getVectorNumElements()/2; 3777 bool MatchA = false, MatchB = false; 3778 3779 // Check if A comes from one of C, D, E, F. 3780 for (unsigned Half = 0; Half != 4; ++Half) { 3781 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3782 MatchA = true; 3783 break; 3784 } 3785 } 3786 3787 // Check if B comes from one of C, D, E, F. 3788 for (unsigned Half = 0; Half != 4; ++Half) { 3789 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3790 MatchB = true; 3791 break; 3792 } 3793 } 3794 3795 return MatchA && MatchB; 3796} 3797 3798/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3799/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3800static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3801 EVT VT = SVOp->getValueType(0); 3802 3803 unsigned HalfSize = VT.getVectorNumElements()/2; 3804 3805 unsigned FstHalf = 0, SndHalf = 0; 3806 for (unsigned i = 0; i < HalfSize; ++i) { 3807 if (SVOp->getMaskElt(i) > 0) { 3808 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3809 break; 3810 } 3811 } 3812 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3813 if (SVOp->getMaskElt(i) > 0) { 3814 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3815 break; 3816 } 3817 } 3818 3819 return (FstHalf | (SndHalf << 4)); 3820} 3821 3822/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3823/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3824/// Note that VPERMIL mask matching is different depending whether theunderlying 3825/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3826/// to the same elements of the low, but to the higher half of the source. 3827/// In VPERMILPD the two lanes could be shuffled independently of each other 3828/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 3829static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3830 if (!HasAVX) 3831 return false; 3832 3833 unsigned NumElts = VT.getVectorNumElements(); 3834 // Only match 256-bit with 32/64-bit types 3835 if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8)) 3836 return false; 3837 3838 unsigned NumLanes = VT.getSizeInBits()/128; 3839 unsigned LaneSize = NumElts/NumLanes; 3840 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3841 for (unsigned i = 0; i != LaneSize; ++i) { 3842 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3843 return false; 3844 if (NumElts != 8 || l == 0) 3845 continue; 3846 // VPERMILPS handling 3847 if (Mask[i] < 0) 3848 continue; 3849 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3850 return false; 3851 } 3852 } 3853 3854 return true; 3855} 3856 3857/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 3858/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3859/// element of vector 2 and the other elements to come from vector 1 in order. 3860static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3861 bool V2IsSplat = false, bool V2IsUndef = false) { 3862 if (!VT.is128BitVector()) 3863 return false; 3864 3865 unsigned NumOps = VT.getVectorNumElements(); 3866 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3867 return false; 3868 3869 if (!isUndefOrEqual(Mask[0], 0)) 3870 return false; 3871 3872 for (unsigned i = 1; i != NumOps; ++i) 3873 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3874 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3875 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3876 return false; 3877 3878 return true; 3879} 3880 3881/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3882/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3883/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3884static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT, 3885 const X86Subtarget *Subtarget) { 3886 if (!Subtarget->hasSSE3()) 3887 return false; 3888 3889 unsigned NumElems = VT.getVectorNumElements(); 3890 3891 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3892 (VT.getSizeInBits() == 256 && NumElems != 8)) 3893 return false; 3894 3895 // "i+1" is the value the indexed mask element must have 3896 for (unsigned i = 0; i != NumElems; i += 2) 3897 if (!isUndefOrEqual(Mask[i], i+1) || 3898 !isUndefOrEqual(Mask[i+1], i+1)) 3899 return false; 3900 3901 return true; 3902} 3903 3904/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3905/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3906/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3907static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT, 3908 const X86Subtarget *Subtarget) { 3909 if (!Subtarget->hasSSE3()) 3910 return false; 3911 3912 unsigned NumElems = VT.getVectorNumElements(); 3913 3914 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3915 (VT.getSizeInBits() == 256 && NumElems != 8)) 3916 return false; 3917 3918 // "i" is the value the indexed mask element must have 3919 for (unsigned i = 0; i != NumElems; i += 2) 3920 if (!isUndefOrEqual(Mask[i], i) || 3921 !isUndefOrEqual(Mask[i+1], i)) 3922 return false; 3923 3924 return true; 3925} 3926 3927/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 3928/// specifies a shuffle of elements that is suitable for input to 256-bit 3929/// version of MOVDDUP. 3930static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3931 if (!HasAVX || !VT.is256BitVector()) 3932 return false; 3933 3934 unsigned NumElts = VT.getVectorNumElements(); 3935 if (NumElts != 4) 3936 return false; 3937 3938 for (unsigned i = 0; i != NumElts/2; ++i) 3939 if (!isUndefOrEqual(Mask[i], 0)) 3940 return false; 3941 for (unsigned i = NumElts/2; i != NumElts; ++i) 3942 if (!isUndefOrEqual(Mask[i], NumElts/2)) 3943 return false; 3944 return true; 3945} 3946 3947/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3948/// specifies a shuffle of elements that is suitable for input to 128-bit 3949/// version of MOVDDUP. 3950static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { 3951 if (!VT.is128BitVector()) 3952 return false; 3953 3954 unsigned e = VT.getVectorNumElements() / 2; 3955 for (unsigned i = 0; i != e; ++i) 3956 if (!isUndefOrEqual(Mask[i], i)) 3957 return false; 3958 for (unsigned i = 0; i != e; ++i) 3959 if (!isUndefOrEqual(Mask[e+i], i)) 3960 return false; 3961 return true; 3962} 3963 3964/// isVEXTRACTF128Index - Return true if the specified 3965/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3966/// suitable for input to VEXTRACTF128. 3967bool X86::isVEXTRACTF128Index(SDNode *N) { 3968 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3969 return false; 3970 3971 // The index should be aligned on a 128-bit boundary. 3972 uint64_t Index = 3973 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3974 3975 unsigned VL = N->getValueType(0).getVectorNumElements(); 3976 unsigned VBits = N->getValueType(0).getSizeInBits(); 3977 unsigned ElSize = VBits / VL; 3978 bool Result = (Index * ElSize) % 128 == 0; 3979 3980 return Result; 3981} 3982 3983/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3984/// operand specifies a subvector insert that is suitable for input to 3985/// VINSERTF128. 3986bool X86::isVINSERTF128Index(SDNode *N) { 3987 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3988 return false; 3989 3990 // The index should be aligned on a 128-bit boundary. 3991 uint64_t Index = 3992 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3993 3994 unsigned VL = N->getValueType(0).getVectorNumElements(); 3995 unsigned VBits = N->getValueType(0).getSizeInBits(); 3996 unsigned ElSize = VBits / VL; 3997 bool Result = (Index * ElSize) % 128 == 0; 3998 3999 return Result; 4000} 4001 4002/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 4003/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 4004/// Handles 128-bit and 256-bit. 4005static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 4006 EVT VT = N->getValueType(0); 4007 4008 assert((VT.is128BitVector() || VT.is256BitVector()) && 4009 "Unsupported vector type for PSHUF/SHUFP"); 4010 4011 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 4012 // independently on 128-bit lanes. 4013 unsigned NumElts = VT.getVectorNumElements(); 4014 unsigned NumLanes = VT.getSizeInBits()/128; 4015 unsigned NumLaneElts = NumElts/NumLanes; 4016 4017 assert((NumLaneElts == 2 || NumLaneElts == 4) && 4018 "Only supports 2 or 4 elements per lane"); 4019 4020 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 4021 unsigned Mask = 0; 4022 for (unsigned i = 0; i != NumElts; ++i) { 4023 int Elt = N->getMaskElt(i); 4024 if (Elt < 0) continue; 4025 Elt &= NumLaneElts - 1; 4026 unsigned ShAmt = (i << Shift) % 8; 4027 Mask |= Elt << ShAmt; 4028 } 4029 4030 return Mask; 4031} 4032 4033/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 4034/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 4035static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 4036 EVT VT = N->getValueType(0); 4037 4038 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4039 "Unsupported vector type for PSHUFHW"); 4040 4041 unsigned NumElts = VT.getVectorNumElements(); 4042 4043 unsigned Mask = 0; 4044 for (unsigned l = 0; l != NumElts; l += 8) { 4045 // 8 nodes per lane, but we only care about the last 4. 4046 for (unsigned i = 0; i < 4; ++i) { 4047 int Elt = N->getMaskElt(l+i+4); 4048 if (Elt < 0) continue; 4049 Elt &= 0x3; // only 2-bits. 4050 Mask |= Elt << (i * 2); 4051 } 4052 } 4053 4054 return Mask; 4055} 4056 4057/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4058/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4059static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4060 EVT VT = N->getValueType(0); 4061 4062 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4063 "Unsupported vector type for PSHUFHW"); 4064 4065 unsigned NumElts = VT.getVectorNumElements(); 4066 4067 unsigned Mask = 0; 4068 for (unsigned l = 0; l != NumElts; l += 8) { 4069 // 8 nodes per lane, but we only care about the first 4. 4070 for (unsigned i = 0; i < 4; ++i) { 4071 int Elt = N->getMaskElt(l+i); 4072 if (Elt < 0) continue; 4073 Elt &= 0x3; // only 2-bits 4074 Mask |= Elt << (i * 2); 4075 } 4076 } 4077 4078 return Mask; 4079} 4080 4081/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4082/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4083static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4084 EVT VT = SVOp->getValueType(0); 4085 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4086 4087 unsigned NumElts = VT.getVectorNumElements(); 4088 unsigned NumLanes = VT.getSizeInBits()/128; 4089 unsigned NumLaneElts = NumElts/NumLanes; 4090 4091 int Val = 0; 4092 unsigned i; 4093 for (i = 0; i != NumElts; ++i) { 4094 Val = SVOp->getMaskElt(i); 4095 if (Val >= 0) 4096 break; 4097 } 4098 if (Val >= (int)NumElts) 4099 Val -= NumElts - NumLaneElts; 4100 4101 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4102 return (Val - i) * EltSize; 4103} 4104 4105/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4106/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4107/// instructions. 4108unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4109 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4110 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4111 4112 uint64_t Index = 4113 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4114 4115 EVT VecVT = N->getOperand(0).getValueType(); 4116 EVT ElVT = VecVT.getVectorElementType(); 4117 4118 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4119 return Index / NumElemsPerChunk; 4120} 4121 4122/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4123/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4124/// instructions. 4125unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4126 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4127 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4128 4129 uint64_t Index = 4130 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4131 4132 EVT VecVT = N->getValueType(0); 4133 EVT ElVT = VecVT.getVectorElementType(); 4134 4135 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4136 return Index / NumElemsPerChunk; 4137} 4138 4139/// getShuffleCLImmediate - Return the appropriate immediate to shuffle 4140/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. 4141/// Handles 256-bit. 4142static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { 4143 EVT VT = N->getValueType(0); 4144 4145 unsigned NumElts = VT.getVectorNumElements(); 4146 4147 assert((VT.is256BitVector() && NumElts == 4) && 4148 "Unsupported vector type for VPERMQ/VPERMPD"); 4149 4150 unsigned Mask = 0; 4151 for (unsigned i = 0; i != NumElts; ++i) { 4152 int Elt = N->getMaskElt(i); 4153 if (Elt < 0) 4154 continue; 4155 Mask |= Elt << (i*2); 4156 } 4157 4158 return Mask; 4159} 4160/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4161/// constant +0.0. 4162bool X86::isZeroNode(SDValue Elt) { 4163 return ((isa<ConstantSDNode>(Elt) && 4164 cast<ConstantSDNode>(Elt)->isNullValue()) || 4165 (isa<ConstantFPSDNode>(Elt) && 4166 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4167} 4168 4169/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4170/// their permute mask. 4171static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4172 SelectionDAG &DAG) { 4173 EVT VT = SVOp->getValueType(0); 4174 unsigned NumElems = VT.getVectorNumElements(); 4175 SmallVector<int, 8> MaskVec; 4176 4177 for (unsigned i = 0; i != NumElems; ++i) { 4178 int Idx = SVOp->getMaskElt(i); 4179 if (Idx >= 0) { 4180 if (Idx < (int)NumElems) 4181 Idx += NumElems; 4182 else 4183 Idx -= NumElems; 4184 } 4185 MaskVec.push_back(Idx); 4186 } 4187 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4188 SVOp->getOperand(0), &MaskVec[0]); 4189} 4190 4191/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4192/// match movhlps. The lower half elements should come from upper half of 4193/// V1 (and in order), and the upper half elements should come from the upper 4194/// half of V2 (and in order). 4195static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) { 4196 if (!VT.is128BitVector()) 4197 return false; 4198 if (VT.getVectorNumElements() != 4) 4199 return false; 4200 for (unsigned i = 0, e = 2; i != e; ++i) 4201 if (!isUndefOrEqual(Mask[i], i+2)) 4202 return false; 4203 for (unsigned i = 2; i != 4; ++i) 4204 if (!isUndefOrEqual(Mask[i], i+4)) 4205 return false; 4206 return true; 4207} 4208 4209/// isScalarLoadToVector - Returns true if the node is a scalar load that 4210/// is promoted to a vector. It also returns the LoadSDNode by reference if 4211/// required. 4212static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4213 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4214 return false; 4215 N = N->getOperand(0).getNode(); 4216 if (!ISD::isNON_EXTLoad(N)) 4217 return false; 4218 if (LD) 4219 *LD = cast<LoadSDNode>(N); 4220 return true; 4221} 4222 4223// Test whether the given value is a vector value which will be legalized 4224// into a load. 4225static bool WillBeConstantPoolLoad(SDNode *N) { 4226 if (N->getOpcode() != ISD::BUILD_VECTOR) 4227 return false; 4228 4229 // Check for any non-constant elements. 4230 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4231 switch (N->getOperand(i).getNode()->getOpcode()) { 4232 case ISD::UNDEF: 4233 case ISD::ConstantFP: 4234 case ISD::Constant: 4235 break; 4236 default: 4237 return false; 4238 } 4239 4240 // Vectors of all-zeros and all-ones are materialized with special 4241 // instructions rather than being loaded. 4242 return !ISD::isBuildVectorAllZeros(N) && 4243 !ISD::isBuildVectorAllOnes(N); 4244} 4245 4246/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4247/// match movlp{s|d}. The lower half elements should come from lower half of 4248/// V1 (and in order), and the upper half elements should come from the upper 4249/// half of V2 (and in order). And since V1 will become the source of the 4250/// MOVLP, it must be either a vector load or a scalar load to vector. 4251static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4252 ArrayRef<int> Mask, EVT VT) { 4253 if (!VT.is128BitVector()) 4254 return false; 4255 4256 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4257 return false; 4258 // Is V2 is a vector load, don't do this transformation. We will try to use 4259 // load folding shufps op. 4260 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4261 return false; 4262 4263 unsigned NumElems = VT.getVectorNumElements(); 4264 4265 if (NumElems != 2 && NumElems != 4) 4266 return false; 4267 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4268 if (!isUndefOrEqual(Mask[i], i)) 4269 return false; 4270 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4271 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4272 return false; 4273 return true; 4274} 4275 4276/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4277/// all the same. 4278static bool isSplatVector(SDNode *N) { 4279 if (N->getOpcode() != ISD::BUILD_VECTOR) 4280 return false; 4281 4282 SDValue SplatValue = N->getOperand(0); 4283 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4284 if (N->getOperand(i) != SplatValue) 4285 return false; 4286 return true; 4287} 4288 4289/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4290/// to an zero vector. 4291/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4292static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4293 SDValue V1 = N->getOperand(0); 4294 SDValue V2 = N->getOperand(1); 4295 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4296 for (unsigned i = 0; i != NumElems; ++i) { 4297 int Idx = N->getMaskElt(i); 4298 if (Idx >= (int)NumElems) { 4299 unsigned Opc = V2.getOpcode(); 4300 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4301 continue; 4302 if (Opc != ISD::BUILD_VECTOR || 4303 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4304 return false; 4305 } else if (Idx >= 0) { 4306 unsigned Opc = V1.getOpcode(); 4307 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4308 continue; 4309 if (Opc != ISD::BUILD_VECTOR || 4310 !X86::isZeroNode(V1.getOperand(Idx))) 4311 return false; 4312 } 4313 } 4314 return true; 4315} 4316 4317/// getZeroVector - Returns a vector of specified type with all zero elements. 4318/// 4319static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4320 SelectionDAG &DAG, DebugLoc dl) { 4321 assert(VT.isVector() && "Expected a vector type"); 4322 unsigned Size = VT.getSizeInBits(); 4323 4324 // Always build SSE zero vectors as <4 x i32> bitcasted 4325 // to their dest type. This ensures they get CSE'd. 4326 SDValue Vec; 4327 if (Size == 128) { // SSE 4328 if (Subtarget->hasSSE2()) { // SSE2 4329 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4330 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4331 } else { // SSE1 4332 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4333 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4334 } 4335 } else if (Size == 256) { // AVX 4336 if (Subtarget->hasAVX2()) { // AVX2 4337 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4338 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4339 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4340 } else { 4341 // 256-bit logic and arithmetic instructions in AVX are all 4342 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4343 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4344 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4345 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4346 } 4347 } else 4348 llvm_unreachable("Unexpected vector type"); 4349 4350 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4351} 4352 4353/// getOnesVector - Returns a vector of specified type with all bits set. 4354/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4355/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4356/// Then bitcast to their original type, ensuring they get CSE'd. 4357static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, 4358 DebugLoc dl) { 4359 assert(VT.isVector() && "Expected a vector type"); 4360 unsigned Size = VT.getSizeInBits(); 4361 4362 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4363 SDValue Vec; 4364 if (Size == 256) { 4365 if (HasAVX2) { // AVX2 4366 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4367 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4368 } else { // AVX 4369 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4370 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4371 } 4372 } else if (Size == 128) { 4373 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4374 } else 4375 llvm_unreachable("Unexpected vector type"); 4376 4377 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4378} 4379 4380/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4381/// that point to V2 points to its first element. 4382static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4383 for (unsigned i = 0; i != NumElems; ++i) { 4384 if (Mask[i] > (int)NumElems) { 4385 Mask[i] = NumElems; 4386 } 4387 } 4388} 4389 4390/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4391/// operation of specified width. 4392static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4393 SDValue V2) { 4394 unsigned NumElems = VT.getVectorNumElements(); 4395 SmallVector<int, 8> Mask; 4396 Mask.push_back(NumElems); 4397 for (unsigned i = 1; i != NumElems; ++i) 4398 Mask.push_back(i); 4399 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4400} 4401 4402/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4403static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4404 SDValue V2) { 4405 unsigned NumElems = VT.getVectorNumElements(); 4406 SmallVector<int, 8> Mask; 4407 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4408 Mask.push_back(i); 4409 Mask.push_back(i + NumElems); 4410 } 4411 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4412} 4413 4414/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4415static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4416 SDValue V2) { 4417 unsigned NumElems = VT.getVectorNumElements(); 4418 SmallVector<int, 8> Mask; 4419 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4420 Mask.push_back(i + Half); 4421 Mask.push_back(i + NumElems + Half); 4422 } 4423 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4424} 4425 4426// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4427// a generic shuffle instruction because the target has no such instructions. 4428// Generate shuffles which repeat i16 and i8 several times until they can be 4429// represented by v4f32 and then be manipulated by target suported shuffles. 4430static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4431 EVT VT = V.getValueType(); 4432 int NumElems = VT.getVectorNumElements(); 4433 DebugLoc dl = V.getDebugLoc(); 4434 4435 while (NumElems > 4) { 4436 if (EltNo < NumElems/2) { 4437 V = getUnpackl(DAG, dl, VT, V, V); 4438 } else { 4439 V = getUnpackh(DAG, dl, VT, V, V); 4440 EltNo -= NumElems/2; 4441 } 4442 NumElems >>= 1; 4443 } 4444 return V; 4445} 4446 4447/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4448static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4449 EVT VT = V.getValueType(); 4450 DebugLoc dl = V.getDebugLoc(); 4451 unsigned Size = VT.getSizeInBits(); 4452 4453 if (Size == 128) { 4454 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4455 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4456 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4457 &SplatMask[0]); 4458 } else if (Size == 256) { 4459 // To use VPERMILPS to splat scalars, the second half of indicies must 4460 // refer to the higher part, which is a duplication of the lower one, 4461 // because VPERMILPS can only handle in-lane permutations. 4462 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4463 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4464 4465 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4466 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4467 &SplatMask[0]); 4468 } else 4469 llvm_unreachable("Vector size not supported"); 4470 4471 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4472} 4473 4474/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4475static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4476 EVT SrcVT = SV->getValueType(0); 4477 SDValue V1 = SV->getOperand(0); 4478 DebugLoc dl = SV->getDebugLoc(); 4479 4480 int EltNo = SV->getSplatIndex(); 4481 int NumElems = SrcVT.getVectorNumElements(); 4482 unsigned Size = SrcVT.getSizeInBits(); 4483 4484 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4485 "Unknown how to promote splat for type"); 4486 4487 // Extract the 128-bit part containing the splat element and update 4488 // the splat element index when it refers to the higher register. 4489 if (Size == 256) { 4490 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4491 if (EltNo >= NumElems/2) 4492 EltNo -= NumElems/2; 4493 } 4494 4495 // All i16 and i8 vector types can't be used directly by a generic shuffle 4496 // instruction because the target has no such instruction. Generate shuffles 4497 // which repeat i16 and i8 several times until they fit in i32, and then can 4498 // be manipulated by target suported shuffles. 4499 EVT EltVT = SrcVT.getVectorElementType(); 4500 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4501 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4502 4503 // Recreate the 256-bit vector and place the same 128-bit vector 4504 // into the low and high part. This is necessary because we want 4505 // to use VPERM* to shuffle the vectors 4506 if (Size == 256) { 4507 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4508 } 4509 4510 return getLegalSplat(DAG, V1, EltNo); 4511} 4512 4513/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4514/// vector of zero or undef vector. This produces a shuffle where the low 4515/// element of V2 is swizzled into the zero/undef vector, landing at element 4516/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4517static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4518 bool IsZero, 4519 const X86Subtarget *Subtarget, 4520 SelectionDAG &DAG) { 4521 EVT VT = V2.getValueType(); 4522 SDValue V1 = IsZero 4523 ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4524 unsigned NumElems = VT.getVectorNumElements(); 4525 SmallVector<int, 16> MaskVec; 4526 for (unsigned i = 0; i != NumElems; ++i) 4527 // If this is the insertion idx, put the low elt of V2 here. 4528 MaskVec.push_back(i == Idx ? NumElems : i); 4529 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4530} 4531 4532/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4533/// target specific opcode. Returns true if the Mask could be calculated. 4534/// Sets IsUnary to true if only uses one source. 4535static bool getTargetShuffleMask(SDNode *N, MVT VT, 4536 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4537 unsigned NumElems = VT.getVectorNumElements(); 4538 SDValue ImmN; 4539 4540 IsUnary = false; 4541 switch(N->getOpcode()) { 4542 case X86ISD::SHUFP: 4543 ImmN = N->getOperand(N->getNumOperands()-1); 4544 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4545 break; 4546 case X86ISD::UNPCKH: 4547 DecodeUNPCKHMask(VT, Mask); 4548 break; 4549 case X86ISD::UNPCKL: 4550 DecodeUNPCKLMask(VT, Mask); 4551 break; 4552 case X86ISD::MOVHLPS: 4553 DecodeMOVHLPSMask(NumElems, Mask); 4554 break; 4555 case X86ISD::MOVLHPS: 4556 DecodeMOVLHPSMask(NumElems, Mask); 4557 break; 4558 case X86ISD::PSHUFD: 4559 case X86ISD::VPERMILP: 4560 ImmN = N->getOperand(N->getNumOperands()-1); 4561 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4562 IsUnary = true; 4563 break; 4564 case X86ISD::PSHUFHW: 4565 ImmN = N->getOperand(N->getNumOperands()-1); 4566 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4567 IsUnary = true; 4568 break; 4569 case X86ISD::PSHUFLW: 4570 ImmN = N->getOperand(N->getNumOperands()-1); 4571 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4572 IsUnary = true; 4573 break; 4574 case X86ISD::VPERMI: 4575 ImmN = N->getOperand(N->getNumOperands()-1); 4576 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4577 IsUnary = true; 4578 break; 4579 case X86ISD::MOVSS: 4580 case X86ISD::MOVSD: { 4581 // The index 0 always comes from the first element of the second source, 4582 // this is why MOVSS and MOVSD are used in the first place. The other 4583 // elements come from the other positions of the first source vector 4584 Mask.push_back(NumElems); 4585 for (unsigned i = 1; i != NumElems; ++i) { 4586 Mask.push_back(i); 4587 } 4588 break; 4589 } 4590 case X86ISD::VPERM2X128: 4591 ImmN = N->getOperand(N->getNumOperands()-1); 4592 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4593 if (Mask.empty()) return false; 4594 break; 4595 case X86ISD::MOVDDUP: 4596 case X86ISD::MOVLHPD: 4597 case X86ISD::MOVLPD: 4598 case X86ISD::MOVLPS: 4599 case X86ISD::MOVSHDUP: 4600 case X86ISD::MOVSLDUP: 4601 case X86ISD::PALIGN: 4602 // Not yet implemented 4603 return false; 4604 default: llvm_unreachable("unknown target shuffle node"); 4605 } 4606 4607 return true; 4608} 4609 4610/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4611/// element of the result of the vector shuffle. 4612static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4613 unsigned Depth) { 4614 if (Depth == 6) 4615 return SDValue(); // Limit search depth. 4616 4617 SDValue V = SDValue(N, 0); 4618 EVT VT = V.getValueType(); 4619 unsigned Opcode = V.getOpcode(); 4620 4621 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4622 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4623 int Elt = SV->getMaskElt(Index); 4624 4625 if (Elt < 0) 4626 return DAG.getUNDEF(VT.getVectorElementType()); 4627 4628 unsigned NumElems = VT.getVectorNumElements(); 4629 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4630 : SV->getOperand(1); 4631 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 4632 } 4633 4634 // Recurse into target specific vector shuffles to find scalars. 4635 if (isTargetShuffle(Opcode)) { 4636 MVT ShufVT = V.getValueType().getSimpleVT(); 4637 unsigned NumElems = ShufVT.getVectorNumElements(); 4638 SmallVector<int, 16> ShuffleMask; 4639 bool IsUnary; 4640 4641 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 4642 return SDValue(); 4643 4644 int Elt = ShuffleMask[Index]; 4645 if (Elt < 0) 4646 return DAG.getUNDEF(ShufVT.getVectorElementType()); 4647 4648 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 4649 : N->getOperand(1); 4650 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 4651 Depth+1); 4652 } 4653 4654 // Actual nodes that may contain scalar elements 4655 if (Opcode == ISD::BITCAST) { 4656 V = V.getOperand(0); 4657 EVT SrcVT = V.getValueType(); 4658 unsigned NumElems = VT.getVectorNumElements(); 4659 4660 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4661 return SDValue(); 4662 } 4663 4664 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4665 return (Index == 0) ? V.getOperand(0) 4666 : DAG.getUNDEF(VT.getVectorElementType()); 4667 4668 if (V.getOpcode() == ISD::BUILD_VECTOR) 4669 return V.getOperand(Index); 4670 4671 return SDValue(); 4672} 4673 4674/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4675/// shuffle operation which come from a consecutively from a zero. The 4676/// search can start in two different directions, from left or right. 4677static 4678unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems, 4679 bool ZerosFromLeft, SelectionDAG &DAG) { 4680 unsigned i; 4681 for (i = 0; i != NumElems; ++i) { 4682 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4683 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 4684 if (!(Elt.getNode() && 4685 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4686 break; 4687 } 4688 4689 return i; 4690} 4691 4692/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 4693/// correspond consecutively to elements from one of the vector operands, 4694/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4695static 4696bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 4697 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 4698 unsigned NumElems, unsigned &OpNum) { 4699 bool SeenV1 = false; 4700 bool SeenV2 = false; 4701 4702 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 4703 int Idx = SVOp->getMaskElt(i); 4704 // Ignore undef indicies 4705 if (Idx < 0) 4706 continue; 4707 4708 if (Idx < (int)NumElems) 4709 SeenV1 = true; 4710 else 4711 SeenV2 = true; 4712 4713 // Only accept consecutive elements from the same vector 4714 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4715 return false; 4716 } 4717 4718 OpNum = SeenV1 ? 0 : 1; 4719 return true; 4720} 4721 4722/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4723/// logical left shift of a vector. 4724static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4725 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4726 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4727 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4728 false /* check zeros from right */, DAG); 4729 unsigned OpSrc; 4730 4731 if (!NumZeros) 4732 return false; 4733 4734 // Considering the elements in the mask that are not consecutive zeros, 4735 // check if they consecutively come from only one of the source vectors. 4736 // 4737 // V1 = {X, A, B, C} 0 4738 // \ \ \ / 4739 // vector_shuffle V1, V2 <1, 2, 3, X> 4740 // 4741 if (!isShuffleMaskConsecutive(SVOp, 4742 0, // Mask Start Index 4743 NumElems-NumZeros, // Mask End Index(exclusive) 4744 NumZeros, // Where to start looking in the src vector 4745 NumElems, // Number of elements in vector 4746 OpSrc)) // Which source operand ? 4747 return false; 4748 4749 isLeft = false; 4750 ShAmt = NumZeros; 4751 ShVal = SVOp->getOperand(OpSrc); 4752 return true; 4753} 4754 4755/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4756/// logical left shift of a vector. 4757static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4758 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4759 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4760 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4761 true /* check zeros from left */, DAG); 4762 unsigned OpSrc; 4763 4764 if (!NumZeros) 4765 return false; 4766 4767 // Considering the elements in the mask that are not consecutive zeros, 4768 // check if they consecutively come from only one of the source vectors. 4769 // 4770 // 0 { A, B, X, X } = V2 4771 // / \ / / 4772 // vector_shuffle V1, V2 <X, X, 4, 5> 4773 // 4774 if (!isShuffleMaskConsecutive(SVOp, 4775 NumZeros, // Mask Start Index 4776 NumElems, // Mask End Index(exclusive) 4777 0, // Where to start looking in the src vector 4778 NumElems, // Number of elements in vector 4779 OpSrc)) // Which source operand ? 4780 return false; 4781 4782 isLeft = true; 4783 ShAmt = NumZeros; 4784 ShVal = SVOp->getOperand(OpSrc); 4785 return true; 4786} 4787 4788/// isVectorShift - Returns true if the shuffle can be implemented as a 4789/// logical left or right shift of a vector. 4790static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4791 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4792 // Although the logic below support any bitwidth size, there are no 4793 // shift instructions which handle more than 128-bit vectors. 4794 if (!SVOp->getValueType(0).is128BitVector()) 4795 return false; 4796 4797 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4798 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4799 return true; 4800 4801 return false; 4802} 4803 4804/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4805/// 4806static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4807 unsigned NumNonZero, unsigned NumZero, 4808 SelectionDAG &DAG, 4809 const X86Subtarget* Subtarget, 4810 const TargetLowering &TLI) { 4811 if (NumNonZero > 8) 4812 return SDValue(); 4813 4814 DebugLoc dl = Op.getDebugLoc(); 4815 SDValue V(0, 0); 4816 bool First = true; 4817 for (unsigned i = 0; i < 16; ++i) { 4818 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4819 if (ThisIsNonZero && First) { 4820 if (NumZero) 4821 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4822 else 4823 V = DAG.getUNDEF(MVT::v8i16); 4824 First = false; 4825 } 4826 4827 if ((i & 1) != 0) { 4828 SDValue ThisElt(0, 0), LastElt(0, 0); 4829 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4830 if (LastIsNonZero) { 4831 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4832 MVT::i16, Op.getOperand(i-1)); 4833 } 4834 if (ThisIsNonZero) { 4835 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4836 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4837 ThisElt, DAG.getConstant(8, MVT::i8)); 4838 if (LastIsNonZero) 4839 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4840 } else 4841 ThisElt = LastElt; 4842 4843 if (ThisElt.getNode()) 4844 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4845 DAG.getIntPtrConstant(i/2)); 4846 } 4847 } 4848 4849 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4850} 4851 4852/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4853/// 4854static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4855 unsigned NumNonZero, unsigned NumZero, 4856 SelectionDAG &DAG, 4857 const X86Subtarget* Subtarget, 4858 const TargetLowering &TLI) { 4859 if (NumNonZero > 4) 4860 return SDValue(); 4861 4862 DebugLoc dl = Op.getDebugLoc(); 4863 SDValue V(0, 0); 4864 bool First = true; 4865 for (unsigned i = 0; i < 8; ++i) { 4866 bool isNonZero = (NonZeros & (1 << i)) != 0; 4867 if (isNonZero) { 4868 if (First) { 4869 if (NumZero) 4870 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4871 else 4872 V = DAG.getUNDEF(MVT::v8i16); 4873 First = false; 4874 } 4875 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4876 MVT::v8i16, V, Op.getOperand(i), 4877 DAG.getIntPtrConstant(i)); 4878 } 4879 } 4880 4881 return V; 4882} 4883 4884/// getVShift - Return a vector logical shift node. 4885/// 4886static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4887 unsigned NumBits, SelectionDAG &DAG, 4888 const TargetLowering &TLI, DebugLoc dl) { 4889 assert(VT.is128BitVector() && "Unknown type for VShift"); 4890 EVT ShVT = MVT::v2i64; 4891 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4892 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4893 return DAG.getNode(ISD::BITCAST, dl, VT, 4894 DAG.getNode(Opc, dl, ShVT, SrcOp, 4895 DAG.getConstant(NumBits, 4896 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4897} 4898 4899SDValue 4900X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4901 SelectionDAG &DAG) const { 4902 4903 // Check if the scalar load can be widened into a vector load. And if 4904 // the address is "base + cst" see if the cst can be "absorbed" into 4905 // the shuffle mask. 4906 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4907 SDValue Ptr = LD->getBasePtr(); 4908 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4909 return SDValue(); 4910 EVT PVT = LD->getValueType(0); 4911 if (PVT != MVT::i32 && PVT != MVT::f32) 4912 return SDValue(); 4913 4914 int FI = -1; 4915 int64_t Offset = 0; 4916 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4917 FI = FINode->getIndex(); 4918 Offset = 0; 4919 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4920 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4921 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4922 Offset = Ptr.getConstantOperandVal(1); 4923 Ptr = Ptr.getOperand(0); 4924 } else { 4925 return SDValue(); 4926 } 4927 4928 // FIXME: 256-bit vector instructions don't require a strict alignment, 4929 // improve this code to support it better. 4930 unsigned RequiredAlign = VT.getSizeInBits()/8; 4931 SDValue Chain = LD->getChain(); 4932 // Make sure the stack object alignment is at least 16 or 32. 4933 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4934 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4935 if (MFI->isFixedObjectIndex(FI)) { 4936 // Can't change the alignment. FIXME: It's possible to compute 4937 // the exact stack offset and reference FI + adjust offset instead. 4938 // If someone *really* cares about this. That's the way to implement it. 4939 return SDValue(); 4940 } else { 4941 MFI->setObjectAlignment(FI, RequiredAlign); 4942 } 4943 } 4944 4945 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4946 // Ptr + (Offset & ~15). 4947 if (Offset < 0) 4948 return SDValue(); 4949 if ((Offset % RequiredAlign) & 3) 4950 return SDValue(); 4951 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4952 if (StartOffset) 4953 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4954 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4955 4956 int EltNo = (Offset - StartOffset) >> 2; 4957 unsigned NumElems = VT.getVectorNumElements(); 4958 4959 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4960 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4961 LD->getPointerInfo().getWithOffset(StartOffset), 4962 false, false, false, 0); 4963 4964 SmallVector<int, 8> Mask; 4965 for (unsigned i = 0; i != NumElems; ++i) 4966 Mask.push_back(EltNo); 4967 4968 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 4969 } 4970 4971 return SDValue(); 4972} 4973 4974/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4975/// vector of type 'VT', see if the elements can be replaced by a single large 4976/// load which has the same value as a build_vector whose operands are 'elts'. 4977/// 4978/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4979/// 4980/// FIXME: we'd also like to handle the case where the last elements are zero 4981/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4982/// There's even a handy isZeroNode for that purpose. 4983static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4984 DebugLoc &DL, SelectionDAG &DAG) { 4985 EVT EltVT = VT.getVectorElementType(); 4986 unsigned NumElems = Elts.size(); 4987 4988 LoadSDNode *LDBase = NULL; 4989 unsigned LastLoadedElt = -1U; 4990 4991 // For each element in the initializer, see if we've found a load or an undef. 4992 // If we don't find an initial load element, or later load elements are 4993 // non-consecutive, bail out. 4994 for (unsigned i = 0; i < NumElems; ++i) { 4995 SDValue Elt = Elts[i]; 4996 4997 if (!Elt.getNode() || 4998 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4999 return SDValue(); 5000 if (!LDBase) { 5001 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 5002 return SDValue(); 5003 LDBase = cast<LoadSDNode>(Elt.getNode()); 5004 LastLoadedElt = i; 5005 continue; 5006 } 5007 if (Elt.getOpcode() == ISD::UNDEF) 5008 continue; 5009 5010 LoadSDNode *LD = cast<LoadSDNode>(Elt); 5011 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 5012 return SDValue(); 5013 LastLoadedElt = i; 5014 } 5015 5016 // If we have found an entire vector of loads and undefs, then return a large 5017 // load of the entire vector width starting at the base pointer. If we found 5018 // consecutive loads for the low half, generate a vzext_load node. 5019 if (LastLoadedElt == NumElems - 1) { 5020 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 5021 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5022 LDBase->getPointerInfo(), 5023 LDBase->isVolatile(), LDBase->isNonTemporal(), 5024 LDBase->isInvariant(), 0); 5025 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5026 LDBase->getPointerInfo(), 5027 LDBase->isVolatile(), LDBase->isNonTemporal(), 5028 LDBase->isInvariant(), LDBase->getAlignment()); 5029 } 5030 if (NumElems == 4 && LastLoadedElt == 1 && 5031 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 5032 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 5033 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 5034 SDValue ResNode = 5035 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, 5036 LDBase->getPointerInfo(), 5037 LDBase->getAlignment(), 5038 false/*isVolatile*/, true/*ReadMem*/, 5039 false/*WriteMem*/); 5040 5041 // Make sure the newly-created LOAD is in the same position as LDBase in 5042 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and 5043 // update uses of LDBase's output chain to use the TokenFactor. 5044 if (LDBase->hasAnyUseOfValue(1)) { 5045 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5046 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); 5047 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5048 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5049 SDValue(ResNode.getNode(), 1)); 5050 } 5051 5052 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 5053 } 5054 return SDValue(); 5055} 5056 5057/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 5058/// to generate a splat value for the following cases: 5059/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 5060/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 5061/// a scalar load, or a constant. 5062/// The VBROADCAST node is returned when a pattern is found, 5063/// or SDValue() otherwise. 5064SDValue 5065X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { 5066 if (!Subtarget->hasAVX()) 5067 return SDValue(); 5068 5069 EVT VT = Op.getValueType(); 5070 DebugLoc dl = Op.getDebugLoc(); 5071 5072 assert((VT.is128BitVector() || VT.is256BitVector()) && 5073 "Unsupported vector type for broadcast."); 5074 5075 SDValue Ld; 5076 bool ConstSplatVal; 5077 5078 switch (Op.getOpcode()) { 5079 default: 5080 // Unknown pattern found. 5081 return SDValue(); 5082 5083 case ISD::BUILD_VECTOR: { 5084 // The BUILD_VECTOR node must be a splat. 5085 if (!isSplatVector(Op.getNode())) 5086 return SDValue(); 5087 5088 Ld = Op.getOperand(0); 5089 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5090 Ld.getOpcode() == ISD::ConstantFP); 5091 5092 // The suspected load node has several users. Make sure that all 5093 // of its users are from the BUILD_VECTOR node. 5094 // Constants may have multiple users. 5095 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5096 return SDValue(); 5097 break; 5098 } 5099 5100 case ISD::VECTOR_SHUFFLE: { 5101 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5102 5103 // Shuffles must have a splat mask where the first element is 5104 // broadcasted. 5105 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5106 return SDValue(); 5107 5108 SDValue Sc = Op.getOperand(0); 5109 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5110 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5111 5112 if (!Subtarget->hasAVX2()) 5113 return SDValue(); 5114 5115 // Use the register form of the broadcast instruction available on AVX2. 5116 if (VT.is256BitVector()) 5117 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5118 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5119 } 5120 5121 Ld = Sc.getOperand(0); 5122 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5123 Ld.getOpcode() == ISD::ConstantFP); 5124 5125 // The scalar_to_vector node and the suspected 5126 // load node must have exactly one user. 5127 // Constants may have multiple users. 5128 if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) 5129 return SDValue(); 5130 break; 5131 } 5132 } 5133 5134 bool Is256 = VT.is256BitVector(); 5135 5136 // Handle the broadcasting a single constant scalar from the constant pool 5137 // into a vector. On Sandybridge it is still better to load a constant vector 5138 // from the constant pool and not to broadcast it from a scalar. 5139 if (ConstSplatVal && Subtarget->hasAVX2()) { 5140 EVT CVT = Ld.getValueType(); 5141 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5142 unsigned ScalarSize = CVT.getSizeInBits(); 5143 5144 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { 5145 const Constant *C = 0; 5146 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5147 C = CI->getConstantIntValue(); 5148 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5149 C = CF->getConstantFPValue(); 5150 5151 assert(C && "Invalid constant type"); 5152 5153 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 5154 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5155 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5156 MachinePointerInfo::getConstantPool(), 5157 false, false, false, Alignment); 5158 5159 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5160 } 5161 } 5162 5163 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5164 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5165 5166 // Handle AVX2 in-register broadcasts. 5167 if (!IsLoad && Subtarget->hasAVX2() && 5168 (ScalarSize == 32 || (Is256 && ScalarSize == 64))) 5169 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5170 5171 // The scalar source must be a normal load. 5172 if (!IsLoad) 5173 return SDValue(); 5174 5175 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) 5176 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5177 5178 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5179 // double since there is no vbroadcastsd xmm 5180 if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { 5181 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5182 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5183 } 5184 5185 // Unsupported broadcast. 5186 return SDValue(); 5187} 5188 5189SDValue 5190X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { 5191 EVT VT = Op.getValueType(); 5192 5193 // Skip if insert_vec_elt is not supported. 5194 if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) 5195 return SDValue(); 5196 5197 DebugLoc DL = Op.getDebugLoc(); 5198 unsigned NumElems = Op.getNumOperands(); 5199 5200 SDValue VecIn1; 5201 SDValue VecIn2; 5202 SmallVector<unsigned, 4> InsertIndices; 5203 SmallVector<int, 8> Mask(NumElems, -1); 5204 5205 for (unsigned i = 0; i != NumElems; ++i) { 5206 unsigned Opc = Op.getOperand(i).getOpcode(); 5207 5208 if (Opc == ISD::UNDEF) 5209 continue; 5210 5211 if (Opc != ISD::EXTRACT_VECTOR_ELT) { 5212 // Quit if more than 1 elements need inserting. 5213 if (InsertIndices.size() > 1) 5214 return SDValue(); 5215 5216 InsertIndices.push_back(i); 5217 continue; 5218 } 5219 5220 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0); 5221 SDValue ExtIdx = Op.getOperand(i).getOperand(1); 5222 5223 // Quit if extracted from vector of different type. 5224 if (ExtractedFromVec.getValueType() != VT) 5225 return SDValue(); 5226 5227 // Quit if non-constant index. 5228 if (!isa<ConstantSDNode>(ExtIdx)) 5229 return SDValue(); 5230 5231 if (VecIn1.getNode() == 0) 5232 VecIn1 = ExtractedFromVec; 5233 else if (VecIn1 != ExtractedFromVec) { 5234 if (VecIn2.getNode() == 0) 5235 VecIn2 = ExtractedFromVec; 5236 else if (VecIn2 != ExtractedFromVec) 5237 // Quit if more than 2 vectors to shuffle 5238 return SDValue(); 5239 } 5240 5241 unsigned Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue(); 5242 5243 if (ExtractedFromVec == VecIn1) 5244 Mask[i] = Idx; 5245 else if (ExtractedFromVec == VecIn2) 5246 Mask[i] = Idx + NumElems; 5247 } 5248 5249 if (VecIn1.getNode() == 0) 5250 return SDValue(); 5251 5252 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 5253 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]); 5254 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) { 5255 unsigned Idx = InsertIndices[i]; 5256 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), 5257 DAG.getIntPtrConstant(Idx)); 5258 } 5259 5260 return NV; 5261} 5262 5263SDValue 5264X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5265 DebugLoc dl = Op.getDebugLoc(); 5266 5267 EVT VT = Op.getValueType(); 5268 EVT ExtVT = VT.getVectorElementType(); 5269 unsigned NumElems = Op.getNumOperands(); 5270 5271 // Vectors containing all zeros can be matched by pxor and xorps later 5272 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5273 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5274 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5275 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5276 return Op; 5277 5278 return getZeroVector(VT, Subtarget, DAG, dl); 5279 } 5280 5281 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5282 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5283 // vpcmpeqd on 256-bit vectors. 5284 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5285 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasAVX2())) 5286 return Op; 5287 5288 return getOnesVector(VT, Subtarget->hasAVX2(), DAG, dl); 5289 } 5290 5291 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 5292 if (Broadcast.getNode()) 5293 return Broadcast; 5294 5295 unsigned EVTBits = ExtVT.getSizeInBits(); 5296 5297 unsigned NumZero = 0; 5298 unsigned NumNonZero = 0; 5299 unsigned NonZeros = 0; 5300 bool IsAllConstants = true; 5301 SmallSet<SDValue, 8> Values; 5302 for (unsigned i = 0; i < NumElems; ++i) { 5303 SDValue Elt = Op.getOperand(i); 5304 if (Elt.getOpcode() == ISD::UNDEF) 5305 continue; 5306 Values.insert(Elt); 5307 if (Elt.getOpcode() != ISD::Constant && 5308 Elt.getOpcode() != ISD::ConstantFP) 5309 IsAllConstants = false; 5310 if (X86::isZeroNode(Elt)) 5311 NumZero++; 5312 else { 5313 NonZeros |= (1 << i); 5314 NumNonZero++; 5315 } 5316 } 5317 5318 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5319 if (NumNonZero == 0) 5320 return DAG.getUNDEF(VT); 5321 5322 // Special case for single non-zero, non-undef, element. 5323 if (NumNonZero == 1) { 5324 unsigned Idx = CountTrailingZeros_32(NonZeros); 5325 SDValue Item = Op.getOperand(Idx); 5326 5327 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5328 // the value are obviously zero, truncate the value to i32 and do the 5329 // insertion that way. Only do this if the value is non-constant or if the 5330 // value is a constant being inserted into element 0. It is cheaper to do 5331 // a constant pool load than it is to do a movd + shuffle. 5332 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5333 (!IsAllConstants || Idx == 0)) { 5334 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5335 // Handle SSE only. 5336 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5337 EVT VecVT = MVT::v4i32; 5338 unsigned VecElts = 4; 5339 5340 // Truncate the value (which may itself be a constant) to i32, and 5341 // convert it to a vector with movd (S2V+shuffle to zero extend). 5342 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5343 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5344 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5345 5346 // Now we have our 32-bit value zero extended in the low element of 5347 // a vector. If Idx != 0, swizzle it into place. 5348 if (Idx != 0) { 5349 SmallVector<int, 4> Mask; 5350 Mask.push_back(Idx); 5351 for (unsigned i = 1; i != VecElts; ++i) 5352 Mask.push_back(i); 5353 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5354 &Mask[0]); 5355 } 5356 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5357 } 5358 } 5359 5360 // If we have a constant or non-constant insertion into the low element of 5361 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5362 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5363 // depending on what the source datatype is. 5364 if (Idx == 0) { 5365 if (NumZero == 0) 5366 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5367 5368 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5369 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5370 if (VT.is256BitVector()) { 5371 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5372 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5373 Item, DAG.getIntPtrConstant(0)); 5374 } 5375 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5376 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5377 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5378 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5379 } 5380 5381 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5382 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5383 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5384 if (VT.is256BitVector()) { 5385 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5386 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5387 } else { 5388 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5389 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5390 } 5391 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5392 } 5393 } 5394 5395 // Is it a vector logical left shift? 5396 if (NumElems == 2 && Idx == 1 && 5397 X86::isZeroNode(Op.getOperand(0)) && 5398 !X86::isZeroNode(Op.getOperand(1))) { 5399 unsigned NumBits = VT.getSizeInBits(); 5400 return getVShift(true, VT, 5401 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5402 VT, Op.getOperand(1)), 5403 NumBits/2, DAG, *this, dl); 5404 } 5405 5406 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5407 return SDValue(); 5408 5409 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5410 // is a non-constant being inserted into an element other than the low one, 5411 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5412 // movd/movss) to move this into the low element, then shuffle it into 5413 // place. 5414 if (EVTBits == 32) { 5415 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5416 5417 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5418 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5419 SmallVector<int, 8> MaskVec; 5420 for (unsigned i = 0; i != NumElems; ++i) 5421 MaskVec.push_back(i == Idx ? 0 : 1); 5422 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5423 } 5424 } 5425 5426 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5427 if (Values.size() == 1) { 5428 if (EVTBits == 32) { 5429 // Instead of a shuffle like this: 5430 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5431 // Check if it's possible to issue this instead. 5432 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5433 unsigned Idx = CountTrailingZeros_32(NonZeros); 5434 SDValue Item = Op.getOperand(Idx); 5435 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5436 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5437 } 5438 return SDValue(); 5439 } 5440 5441 // A vector full of immediates; various special cases are already 5442 // handled, so this is best done with a single constant-pool load. 5443 if (IsAllConstants) 5444 return SDValue(); 5445 5446 // For AVX-length vectors, build the individual 128-bit pieces and use 5447 // shuffles to put them in place. 5448 if (VT.is256BitVector()) { 5449 SmallVector<SDValue, 32> V; 5450 for (unsigned i = 0; i != NumElems; ++i) 5451 V.push_back(Op.getOperand(i)); 5452 5453 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5454 5455 // Build both the lower and upper subvector. 5456 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5457 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5458 NumElems/2); 5459 5460 // Recreate the wider vector with the lower and upper part. 5461 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5462 } 5463 5464 // Let legalizer expand 2-wide build_vectors. 5465 if (EVTBits == 64) { 5466 if (NumNonZero == 1) { 5467 // One half is zero or undef. 5468 unsigned Idx = CountTrailingZeros_32(NonZeros); 5469 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5470 Op.getOperand(Idx)); 5471 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5472 } 5473 return SDValue(); 5474 } 5475 5476 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5477 if (EVTBits == 8 && NumElems == 16) { 5478 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5479 Subtarget, *this); 5480 if (V.getNode()) return V; 5481 } 5482 5483 if (EVTBits == 16 && NumElems == 8) { 5484 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5485 Subtarget, *this); 5486 if (V.getNode()) return V; 5487 } 5488 5489 // If element VT is == 32 bits, turn it into a number of shuffles. 5490 SmallVector<SDValue, 8> V(NumElems); 5491 if (NumElems == 4 && NumZero > 0) { 5492 for (unsigned i = 0; i < 4; ++i) { 5493 bool isZero = !(NonZeros & (1 << i)); 5494 if (isZero) 5495 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 5496 else 5497 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5498 } 5499 5500 for (unsigned i = 0; i < 2; ++i) { 5501 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5502 default: break; 5503 case 0: 5504 V[i] = V[i*2]; // Must be a zero vector. 5505 break; 5506 case 1: 5507 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5508 break; 5509 case 2: 5510 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5511 break; 5512 case 3: 5513 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5514 break; 5515 } 5516 } 5517 5518 bool Reverse1 = (NonZeros & 0x3) == 2; 5519 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5520 int MaskVec[] = { 5521 Reverse1 ? 1 : 0, 5522 Reverse1 ? 0 : 1, 5523 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 5524 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 5525 }; 5526 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5527 } 5528 5529 if (Values.size() > 1 && VT.is128BitVector()) { 5530 // Check for a build vector of consecutive loads. 5531 for (unsigned i = 0; i < NumElems; ++i) 5532 V[i] = Op.getOperand(i); 5533 5534 // Check for elements which are consecutive loads. 5535 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5536 if (LD.getNode()) 5537 return LD; 5538 5539 // Check for a build vector from mostly shuffle plus few inserting. 5540 SDValue Sh = buildFromShuffleMostly(Op, DAG); 5541 if (Sh.getNode()) 5542 return Sh; 5543 5544 // For SSE 4.1, use insertps to put the high elements into the low element. 5545 if (getSubtarget()->hasSSE41()) { 5546 SDValue Result; 5547 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5548 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5549 else 5550 Result = DAG.getUNDEF(VT); 5551 5552 for (unsigned i = 1; i < NumElems; ++i) { 5553 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5554 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5555 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5556 } 5557 return Result; 5558 } 5559 5560 // Otherwise, expand into a number of unpckl*, start by extending each of 5561 // our (non-undef) elements to the full vector width with the element in the 5562 // bottom slot of the vector (which generates no code for SSE). 5563 for (unsigned i = 0; i < NumElems; ++i) { 5564 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5565 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5566 else 5567 V[i] = DAG.getUNDEF(VT); 5568 } 5569 5570 // Next, we iteratively mix elements, e.g. for v4f32: 5571 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5572 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5573 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5574 unsigned EltStride = NumElems >> 1; 5575 while (EltStride != 0) { 5576 for (unsigned i = 0; i < EltStride; ++i) { 5577 // If V[i+EltStride] is undef and this is the first round of mixing, 5578 // then it is safe to just drop this shuffle: V[i] is already in the 5579 // right place, the one element (since it's the first round) being 5580 // inserted as undef can be dropped. This isn't safe for successive 5581 // rounds because they will permute elements within both vectors. 5582 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5583 EltStride == NumElems/2) 5584 continue; 5585 5586 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5587 } 5588 EltStride >>= 1; 5589 } 5590 return V[0]; 5591 } 5592 return SDValue(); 5593} 5594 5595// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5596// to create 256-bit vectors from two other 128-bit ones. 5597static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5598 DebugLoc dl = Op.getDebugLoc(); 5599 EVT ResVT = Op.getValueType(); 5600 5601 assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); 5602 5603 SDValue V1 = Op.getOperand(0); 5604 SDValue V2 = Op.getOperand(1); 5605 unsigned NumElems = ResVT.getVectorNumElements(); 5606 5607 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 5608} 5609 5610static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5611 assert(Op.getNumOperands() == 2); 5612 5613 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5614 // from two other 128-bit ones. 5615 return LowerAVXCONCAT_VECTORS(Op, DAG); 5616} 5617 5618// Try to lower a shuffle node into a simple blend instruction. 5619static SDValue 5620LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 5621 const X86Subtarget *Subtarget, SelectionDAG &DAG) { 5622 SDValue V1 = SVOp->getOperand(0); 5623 SDValue V2 = SVOp->getOperand(1); 5624 DebugLoc dl = SVOp->getDebugLoc(); 5625 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5626 unsigned NumElems = VT.getVectorNumElements(); 5627 5628 if (!Subtarget->hasSSE41()) 5629 return SDValue(); 5630 5631 unsigned ISDNo = 0; 5632 MVT OpTy; 5633 5634 switch (VT.SimpleTy) { 5635 default: return SDValue(); 5636 case MVT::v8i16: 5637 ISDNo = X86ISD::BLENDPW; 5638 OpTy = MVT::v8i16; 5639 break; 5640 case MVT::v4i32: 5641 case MVT::v4f32: 5642 ISDNo = X86ISD::BLENDPS; 5643 OpTy = MVT::v4f32; 5644 break; 5645 case MVT::v2i64: 5646 case MVT::v2f64: 5647 ISDNo = X86ISD::BLENDPD; 5648 OpTy = MVT::v2f64; 5649 break; 5650 case MVT::v8i32: 5651 case MVT::v8f32: 5652 if (!Subtarget->hasAVX()) 5653 return SDValue(); 5654 ISDNo = X86ISD::BLENDPS; 5655 OpTy = MVT::v8f32; 5656 break; 5657 case MVT::v4i64: 5658 case MVT::v4f64: 5659 if (!Subtarget->hasAVX()) 5660 return SDValue(); 5661 ISDNo = X86ISD::BLENDPD; 5662 OpTy = MVT::v4f64; 5663 break; 5664 } 5665 assert(ISDNo && "Invalid Op Number"); 5666 5667 unsigned MaskVals = 0; 5668 5669 for (unsigned i = 0; i != NumElems; ++i) { 5670 int EltIdx = SVOp->getMaskElt(i); 5671 if (EltIdx == (int)i || EltIdx < 0) 5672 MaskVals |= (1<<i); 5673 else if (EltIdx == (int)(i + NumElems)) 5674 continue; // Bit is set to zero; 5675 else 5676 return SDValue(); 5677 } 5678 5679 V1 = DAG.getNode(ISD::BITCAST, dl, OpTy, V1); 5680 V2 = DAG.getNode(ISD::BITCAST, dl, OpTy, V2); 5681 SDValue Ret = DAG.getNode(ISDNo, dl, OpTy, V1, V2, 5682 DAG.getConstant(MaskVals, MVT::i32)); 5683 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 5684} 5685 5686// v8i16 shuffles - Prefer shuffles in the following order: 5687// 1. [all] pshuflw, pshufhw, optional move 5688// 2. [ssse3] 1 x pshufb 5689// 3. [ssse3] 2 x pshufb + 1 x por 5690// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5691static SDValue 5692LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, 5693 SelectionDAG &DAG) { 5694 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5695 SDValue V1 = SVOp->getOperand(0); 5696 SDValue V2 = SVOp->getOperand(1); 5697 DebugLoc dl = SVOp->getDebugLoc(); 5698 SmallVector<int, 8> MaskVals; 5699 5700 // Determine if more than 1 of the words in each of the low and high quadwords 5701 // of the result come from the same quadword of one of the two inputs. Undef 5702 // mask values count as coming from any quadword, for better codegen. 5703 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5704 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5705 std::bitset<4> InputQuads; 5706 for (unsigned i = 0; i < 8; ++i) { 5707 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5708 int EltIdx = SVOp->getMaskElt(i); 5709 MaskVals.push_back(EltIdx); 5710 if (EltIdx < 0) { 5711 ++Quad[0]; 5712 ++Quad[1]; 5713 ++Quad[2]; 5714 ++Quad[3]; 5715 continue; 5716 } 5717 ++Quad[EltIdx / 4]; 5718 InputQuads.set(EltIdx / 4); 5719 } 5720 5721 int BestLoQuad = -1; 5722 unsigned MaxQuad = 1; 5723 for (unsigned i = 0; i < 4; ++i) { 5724 if (LoQuad[i] > MaxQuad) { 5725 BestLoQuad = i; 5726 MaxQuad = LoQuad[i]; 5727 } 5728 } 5729 5730 int BestHiQuad = -1; 5731 MaxQuad = 1; 5732 for (unsigned i = 0; i < 4; ++i) { 5733 if (HiQuad[i] > MaxQuad) { 5734 BestHiQuad = i; 5735 MaxQuad = HiQuad[i]; 5736 } 5737 } 5738 5739 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5740 // of the two input vectors, shuffle them into one input vector so only a 5741 // single pshufb instruction is necessary. If There are more than 2 input 5742 // quads, disable the next transformation since it does not help SSSE3. 5743 bool V1Used = InputQuads[0] || InputQuads[1]; 5744 bool V2Used = InputQuads[2] || InputQuads[3]; 5745 if (Subtarget->hasSSSE3()) { 5746 if (InputQuads.count() == 2 && V1Used && V2Used) { 5747 BestLoQuad = InputQuads[0] ? 0 : 1; 5748 BestHiQuad = InputQuads[2] ? 2 : 3; 5749 } 5750 if (InputQuads.count() > 2) { 5751 BestLoQuad = -1; 5752 BestHiQuad = -1; 5753 } 5754 } 5755 5756 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5757 // the shuffle mask. If a quad is scored as -1, that means that it contains 5758 // words from all 4 input quadwords. 5759 SDValue NewV; 5760 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5761 int MaskV[] = { 5762 BestLoQuad < 0 ? 0 : BestLoQuad, 5763 BestHiQuad < 0 ? 1 : BestHiQuad 5764 }; 5765 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5766 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5767 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5768 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5769 5770 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5771 // source words for the shuffle, to aid later transformations. 5772 bool AllWordsInNewV = true; 5773 bool InOrder[2] = { true, true }; 5774 for (unsigned i = 0; i != 8; ++i) { 5775 int idx = MaskVals[i]; 5776 if (idx != (int)i) 5777 InOrder[i/4] = false; 5778 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5779 continue; 5780 AllWordsInNewV = false; 5781 break; 5782 } 5783 5784 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5785 if (AllWordsInNewV) { 5786 for (int i = 0; i != 8; ++i) { 5787 int idx = MaskVals[i]; 5788 if (idx < 0) 5789 continue; 5790 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5791 if ((idx != i) && idx < 4) 5792 pshufhw = false; 5793 if ((idx != i) && idx > 3) 5794 pshuflw = false; 5795 } 5796 V1 = NewV; 5797 V2Used = false; 5798 BestLoQuad = 0; 5799 BestHiQuad = 1; 5800 } 5801 5802 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5803 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5804 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5805 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5806 unsigned TargetMask = 0; 5807 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5808 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5809 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5810 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 5811 getShufflePSHUFLWImmediate(SVOp); 5812 V1 = NewV.getOperand(0); 5813 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5814 } 5815 } 5816 5817 // If we have SSSE3, and all words of the result are from 1 input vector, 5818 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5819 // is present, fall back to case 4. 5820 if (Subtarget->hasSSSE3()) { 5821 SmallVector<SDValue,16> pshufbMask; 5822 5823 // If we have elements from both input vectors, set the high bit of the 5824 // shuffle mask element to zero out elements that come from V2 in the V1 5825 // mask, and elements that come from V1 in the V2 mask, so that the two 5826 // results can be OR'd together. 5827 bool TwoInputs = V1Used && V2Used; 5828 for (unsigned i = 0; i != 8; ++i) { 5829 int EltIdx = MaskVals[i] * 2; 5830 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 5831 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 5832 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5833 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5834 } 5835 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5836 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5837 DAG.getNode(ISD::BUILD_VECTOR, dl, 5838 MVT::v16i8, &pshufbMask[0], 16)); 5839 if (!TwoInputs) 5840 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5841 5842 // Calculate the shuffle mask for the second input, shuffle it, and 5843 // OR it with the first shuffled input. 5844 pshufbMask.clear(); 5845 for (unsigned i = 0; i != 8; ++i) { 5846 int EltIdx = MaskVals[i] * 2; 5847 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5848 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; 5849 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5850 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5851 } 5852 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5853 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5854 DAG.getNode(ISD::BUILD_VECTOR, dl, 5855 MVT::v16i8, &pshufbMask[0], 16)); 5856 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5857 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5858 } 5859 5860 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5861 // and update MaskVals with new element order. 5862 std::bitset<8> InOrder; 5863 if (BestLoQuad >= 0) { 5864 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 5865 for (int i = 0; i != 4; ++i) { 5866 int idx = MaskVals[i]; 5867 if (idx < 0) { 5868 InOrder.set(i); 5869 } else if ((idx / 4) == BestLoQuad) { 5870 MaskV[i] = idx & 3; 5871 InOrder.set(i); 5872 } 5873 } 5874 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5875 &MaskV[0]); 5876 5877 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5878 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5879 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5880 NewV.getOperand(0), 5881 getShufflePSHUFLWImmediate(SVOp), DAG); 5882 } 5883 } 5884 5885 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5886 // and update MaskVals with the new element order. 5887 if (BestHiQuad >= 0) { 5888 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 5889 for (unsigned i = 4; i != 8; ++i) { 5890 int idx = MaskVals[i]; 5891 if (idx < 0) { 5892 InOrder.set(i); 5893 } else if ((idx / 4) == BestHiQuad) { 5894 MaskV[i] = (idx & 3) + 4; 5895 InOrder.set(i); 5896 } 5897 } 5898 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5899 &MaskV[0]); 5900 5901 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5902 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5903 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5904 NewV.getOperand(0), 5905 getShufflePSHUFHWImmediate(SVOp), DAG); 5906 } 5907 } 5908 5909 // In case BestHi & BestLo were both -1, which means each quadword has a word 5910 // from each of the four input quadwords, calculate the InOrder bitvector now 5911 // before falling through to the insert/extract cleanup. 5912 if (BestLoQuad == -1 && BestHiQuad == -1) { 5913 NewV = V1; 5914 for (int i = 0; i != 8; ++i) 5915 if (MaskVals[i] < 0 || MaskVals[i] == i) 5916 InOrder.set(i); 5917 } 5918 5919 // The other elements are put in the right place using pextrw and pinsrw. 5920 for (unsigned i = 0; i != 8; ++i) { 5921 if (InOrder[i]) 5922 continue; 5923 int EltIdx = MaskVals[i]; 5924 if (EltIdx < 0) 5925 continue; 5926 SDValue ExtOp = (EltIdx < 8) ? 5927 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5928 DAG.getIntPtrConstant(EltIdx)) : 5929 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5930 DAG.getIntPtrConstant(EltIdx - 8)); 5931 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5932 DAG.getIntPtrConstant(i)); 5933 } 5934 return NewV; 5935} 5936 5937// v16i8 shuffles - Prefer shuffles in the following order: 5938// 1. [ssse3] 1 x pshufb 5939// 2. [ssse3] 2 x pshufb + 1 x por 5940// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5941static 5942SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5943 SelectionDAG &DAG, 5944 const X86TargetLowering &TLI) { 5945 SDValue V1 = SVOp->getOperand(0); 5946 SDValue V2 = SVOp->getOperand(1); 5947 DebugLoc dl = SVOp->getDebugLoc(); 5948 ArrayRef<int> MaskVals = SVOp->getMask(); 5949 5950 // If we have SSSE3, case 1 is generated when all result bytes come from 5951 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5952 // present, fall back to case 3. 5953 5954 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5955 if (TLI.getSubtarget()->hasSSSE3()) { 5956 SmallVector<SDValue,16> pshufbMask; 5957 5958 // If all result elements are from one input vector, then only translate 5959 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5960 // 5961 // Otherwise, we have elements from both input vectors, and must zero out 5962 // elements that come from V2 in the first mask, and V1 in the second mask 5963 // so that we can OR them together. 5964 for (unsigned i = 0; i != 16; ++i) { 5965 int EltIdx = MaskVals[i]; 5966 if (EltIdx < 0 || EltIdx >= 16) 5967 EltIdx = 0x80; 5968 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5969 } 5970 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5971 DAG.getNode(ISD::BUILD_VECTOR, dl, 5972 MVT::v16i8, &pshufbMask[0], 16)); 5973 5974 // As PSHUFB will zero elements with negative indices, it's safe to ignore 5975 // the 2nd operand if it's undefined or zero. 5976 if (V2.getOpcode() == ISD::UNDEF || 5977 ISD::isBuildVectorAllZeros(V2.getNode())) 5978 return V1; 5979 5980 // Calculate the shuffle mask for the second input, shuffle it, and 5981 // OR it with the first shuffled input. 5982 pshufbMask.clear(); 5983 for (unsigned i = 0; i != 16; ++i) { 5984 int EltIdx = MaskVals[i]; 5985 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5986 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5987 } 5988 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5989 DAG.getNode(ISD::BUILD_VECTOR, dl, 5990 MVT::v16i8, &pshufbMask[0], 16)); 5991 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5992 } 5993 5994 // No SSSE3 - Calculate in place words and then fix all out of place words 5995 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5996 // the 16 different words that comprise the two doublequadword input vectors. 5997 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5998 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5999 SDValue NewV = V1; 6000 for (int i = 0; i != 8; ++i) { 6001 int Elt0 = MaskVals[i*2]; 6002 int Elt1 = MaskVals[i*2+1]; 6003 6004 // This word of the result is all undef, skip it. 6005 if (Elt0 < 0 && Elt1 < 0) 6006 continue; 6007 6008 // This word of the result is already in the correct place, skip it. 6009 if ((Elt0 == i*2) && (Elt1 == i*2+1)) 6010 continue; 6011 6012 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 6013 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 6014 SDValue InsElt; 6015 6016 // If Elt0 and Elt1 are defined, are consecutive, and can be load 6017 // using a single extract together, load it and store it. 6018 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 6019 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6020 DAG.getIntPtrConstant(Elt1 / 2)); 6021 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6022 DAG.getIntPtrConstant(i)); 6023 continue; 6024 } 6025 6026 // If Elt1 is defined, extract it from the appropriate source. If the 6027 // source byte is not also odd, shift the extracted word left 8 bits 6028 // otherwise clear the bottom 8 bits if we need to do an or. 6029 if (Elt1 >= 0) { 6030 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6031 DAG.getIntPtrConstant(Elt1 / 2)); 6032 if ((Elt1 & 1) == 0) 6033 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 6034 DAG.getConstant(8, 6035 TLI.getShiftAmountTy(InsElt.getValueType()))); 6036 else if (Elt0 >= 0) 6037 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 6038 DAG.getConstant(0xFF00, MVT::i16)); 6039 } 6040 // If Elt0 is defined, extract it from the appropriate source. If the 6041 // source byte is not also even, shift the extracted word right 8 bits. If 6042 // Elt1 was also defined, OR the extracted values together before 6043 // inserting them in the result. 6044 if (Elt0 >= 0) { 6045 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 6046 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 6047 if ((Elt0 & 1) != 0) 6048 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 6049 DAG.getConstant(8, 6050 TLI.getShiftAmountTy(InsElt0.getValueType()))); 6051 else if (Elt1 >= 0) 6052 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 6053 DAG.getConstant(0x00FF, MVT::i16)); 6054 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 6055 : InsElt0; 6056 } 6057 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6058 DAG.getIntPtrConstant(i)); 6059 } 6060 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 6061} 6062 6063// v32i8 shuffles - Translate to VPSHUFB if possible. 6064static 6065SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, 6066 const X86Subtarget *Subtarget, 6067 SelectionDAG &DAG) { 6068 EVT VT = SVOp->getValueType(0); 6069 SDValue V1 = SVOp->getOperand(0); 6070 SDValue V2 = SVOp->getOperand(1); 6071 DebugLoc dl = SVOp->getDebugLoc(); 6072 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); 6073 6074 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6075 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode()); 6076 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode()); 6077 6078 // VPSHUFB may be generated if 6079 // (1) one of input vector is undefined or zeroinitializer. 6080 // The mask value 0x80 puts 0 in the corresponding slot of the vector. 6081 // And (2) the mask indexes don't cross the 128-bit lane. 6082 if (VT != MVT::v32i8 || !Subtarget->hasAVX2() || 6083 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero)) 6084 return SDValue(); 6085 6086 if (V1IsAllZero && !V2IsAllZero) { 6087 CommuteVectorShuffleMask(MaskVals, 32); 6088 V1 = V2; 6089 } 6090 SmallVector<SDValue, 32> pshufbMask; 6091 for (unsigned i = 0; i != 32; i++) { 6092 int EltIdx = MaskVals[i]; 6093 if (EltIdx < 0 || EltIdx >= 32) 6094 EltIdx = 0x80; 6095 else { 6096 if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16)) 6097 // Cross lane is not allowed. 6098 return SDValue(); 6099 EltIdx &= 0xf; 6100 } 6101 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6102 } 6103 return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1, 6104 DAG.getNode(ISD::BUILD_VECTOR, dl, 6105 MVT::v32i8, &pshufbMask[0], 32)); 6106} 6107 6108/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 6109/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 6110/// done when every pair / quad of shuffle mask elements point to elements in 6111/// the right sequence. e.g. 6112/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 6113static 6114SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 6115 SelectionDAG &DAG, DebugLoc dl) { 6116 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6117 unsigned NumElems = VT.getVectorNumElements(); 6118 MVT NewVT; 6119 unsigned Scale; 6120 switch (VT.SimpleTy) { 6121 default: llvm_unreachable("Unexpected!"); 6122 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; 6123 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; 6124 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; 6125 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; 6126 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; 6127 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; 6128 } 6129 6130 SmallVector<int, 8> MaskVec; 6131 for (unsigned i = 0; i != NumElems; i += Scale) { 6132 int StartIdx = -1; 6133 for (unsigned j = 0; j != Scale; ++j) { 6134 int EltIdx = SVOp->getMaskElt(i+j); 6135 if (EltIdx < 0) 6136 continue; 6137 if (StartIdx < 0) 6138 StartIdx = (EltIdx / Scale); 6139 if (EltIdx != (int)(StartIdx*Scale + j)) 6140 return SDValue(); 6141 } 6142 MaskVec.push_back(StartIdx); 6143 } 6144 6145 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); 6146 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); 6147 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 6148} 6149 6150/// getVZextMovL - Return a zero-extending vector move low node. 6151/// 6152static SDValue getVZextMovL(EVT VT, EVT OpVT, 6153 SDValue SrcOp, SelectionDAG &DAG, 6154 const X86Subtarget *Subtarget, DebugLoc dl) { 6155 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 6156 LoadSDNode *LD = NULL; 6157 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 6158 LD = dyn_cast<LoadSDNode>(SrcOp); 6159 if (!LD) { 6160 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 6161 // instead. 6162 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 6163 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 6164 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 6165 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 6166 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 6167 // PR2108 6168 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 6169 return DAG.getNode(ISD::BITCAST, dl, VT, 6170 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6171 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6172 OpVT, 6173 SrcOp.getOperand(0) 6174 .getOperand(0)))); 6175 } 6176 } 6177 } 6178 6179 return DAG.getNode(ISD::BITCAST, dl, VT, 6180 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6181 DAG.getNode(ISD::BITCAST, dl, 6182 OpVT, SrcOp))); 6183} 6184 6185/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 6186/// which could not be matched by any known target speficic shuffle 6187static SDValue 6188LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6189 6190 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); 6191 if (NewOp.getNode()) 6192 return NewOp; 6193 6194 EVT VT = SVOp->getValueType(0); 6195 6196 unsigned NumElems = VT.getVectorNumElements(); 6197 unsigned NumLaneElems = NumElems / 2; 6198 6199 DebugLoc dl = SVOp->getDebugLoc(); 6200 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 6201 EVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); 6202 SDValue Output[2]; 6203 6204 SmallVector<int, 16> Mask; 6205 for (unsigned l = 0; l < 2; ++l) { 6206 // Build a shuffle mask for the output, discovering on the fly which 6207 // input vectors to use as shuffle operands (recorded in InputUsed). 6208 // If building a suitable shuffle vector proves too hard, then bail 6209 // out with UseBuildVector set. 6210 bool UseBuildVector = false; 6211 int InputUsed[2] = { -1, -1 }; // Not yet discovered. 6212 unsigned LaneStart = l * NumLaneElems; 6213 for (unsigned i = 0; i != NumLaneElems; ++i) { 6214 // The mask element. This indexes into the input. 6215 int Idx = SVOp->getMaskElt(i+LaneStart); 6216 if (Idx < 0) { 6217 // the mask element does not index into any input vector. 6218 Mask.push_back(-1); 6219 continue; 6220 } 6221 6222 // The input vector this mask element indexes into. 6223 int Input = Idx / NumLaneElems; 6224 6225 // Turn the index into an offset from the start of the input vector. 6226 Idx -= Input * NumLaneElems; 6227 6228 // Find or create a shuffle vector operand to hold this input. 6229 unsigned OpNo; 6230 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { 6231 if (InputUsed[OpNo] == Input) 6232 // This input vector is already an operand. 6233 break; 6234 if (InputUsed[OpNo] < 0) { 6235 // Create a new operand for this input vector. 6236 InputUsed[OpNo] = Input; 6237 break; 6238 } 6239 } 6240 6241 if (OpNo >= array_lengthof(InputUsed)) { 6242 // More than two input vectors used! Give up on trying to create a 6243 // shuffle vector. Insert all elements into a BUILD_VECTOR instead. 6244 UseBuildVector = true; 6245 break; 6246 } 6247 6248 // Add the mask index for the new shuffle vector. 6249 Mask.push_back(Idx + OpNo * NumLaneElems); 6250 } 6251 6252 if (UseBuildVector) { 6253 SmallVector<SDValue, 16> SVOps; 6254 for (unsigned i = 0; i != NumLaneElems; ++i) { 6255 // The mask element. This indexes into the input. 6256 int Idx = SVOp->getMaskElt(i+LaneStart); 6257 if (Idx < 0) { 6258 SVOps.push_back(DAG.getUNDEF(EltVT)); 6259 continue; 6260 } 6261 6262 // The input vector this mask element indexes into. 6263 int Input = Idx / NumElems; 6264 6265 // Turn the index into an offset from the start of the input vector. 6266 Idx -= Input * NumElems; 6267 6268 // Extract the vector element by hand. 6269 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6270 SVOp->getOperand(Input), 6271 DAG.getIntPtrConstant(Idx))); 6272 } 6273 6274 // Construct the output using a BUILD_VECTOR. 6275 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], 6276 SVOps.size()); 6277 } else if (InputUsed[0] < 0) { 6278 // No input vectors were used! The result is undefined. 6279 Output[l] = DAG.getUNDEF(NVT); 6280 } else { 6281 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), 6282 (InputUsed[0] % 2) * NumLaneElems, 6283 DAG, dl); 6284 // If only one input was used, use an undefined vector for the other. 6285 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : 6286 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), 6287 (InputUsed[1] % 2) * NumLaneElems, DAG, dl); 6288 // At least one input vector was used. Create a new shuffle vector. 6289 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); 6290 } 6291 6292 Mask.clear(); 6293 } 6294 6295 // Concatenate the result back 6296 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); 6297} 6298 6299/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6300/// 4 elements, and match them with several different shuffle types. 6301static SDValue 6302LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6303 SDValue V1 = SVOp->getOperand(0); 6304 SDValue V2 = SVOp->getOperand(1); 6305 DebugLoc dl = SVOp->getDebugLoc(); 6306 EVT VT = SVOp->getValueType(0); 6307 6308 assert(VT.is128BitVector() && "Unsupported vector size"); 6309 6310 std::pair<int, int> Locs[4]; 6311 int Mask1[] = { -1, -1, -1, -1 }; 6312 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6313 6314 unsigned NumHi = 0; 6315 unsigned NumLo = 0; 6316 for (unsigned i = 0; i != 4; ++i) { 6317 int Idx = PermMask[i]; 6318 if (Idx < 0) { 6319 Locs[i] = std::make_pair(-1, -1); 6320 } else { 6321 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6322 if (Idx < 4) { 6323 Locs[i] = std::make_pair(0, NumLo); 6324 Mask1[NumLo] = Idx; 6325 NumLo++; 6326 } else { 6327 Locs[i] = std::make_pair(1, NumHi); 6328 if (2+NumHi < 4) 6329 Mask1[2+NumHi] = Idx; 6330 NumHi++; 6331 } 6332 } 6333 } 6334 6335 if (NumLo <= 2 && NumHi <= 2) { 6336 // If no more than two elements come from either vector. This can be 6337 // implemented with two shuffles. First shuffle gather the elements. 6338 // The second shuffle, which takes the first shuffle as both of its 6339 // vector operands, put the elements into the right order. 6340 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6341 6342 int Mask2[] = { -1, -1, -1, -1 }; 6343 6344 for (unsigned i = 0; i != 4; ++i) 6345 if (Locs[i].first != -1) { 6346 unsigned Idx = (i < 2) ? 0 : 4; 6347 Idx += Locs[i].first * 2 + Locs[i].second; 6348 Mask2[i] = Idx; 6349 } 6350 6351 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6352 } 6353 6354 if (NumLo == 3 || NumHi == 3) { 6355 // Otherwise, we must have three elements from one vector, call it X, and 6356 // one element from the other, call it Y. First, use a shufps to build an 6357 // intermediate vector with the one element from Y and the element from X 6358 // that will be in the same half in the final destination (the indexes don't 6359 // matter). Then, use a shufps to build the final vector, taking the half 6360 // containing the element from Y from the intermediate, and the other half 6361 // from X. 6362 if (NumHi == 3) { 6363 // Normalize it so the 3 elements come from V1. 6364 CommuteVectorShuffleMask(PermMask, 4); 6365 std::swap(V1, V2); 6366 } 6367 6368 // Find the element from V2. 6369 unsigned HiIndex; 6370 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6371 int Val = PermMask[HiIndex]; 6372 if (Val < 0) 6373 continue; 6374 if (Val >= 4) 6375 break; 6376 } 6377 6378 Mask1[0] = PermMask[HiIndex]; 6379 Mask1[1] = -1; 6380 Mask1[2] = PermMask[HiIndex^1]; 6381 Mask1[3] = -1; 6382 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6383 6384 if (HiIndex >= 2) { 6385 Mask1[0] = PermMask[0]; 6386 Mask1[1] = PermMask[1]; 6387 Mask1[2] = HiIndex & 1 ? 6 : 4; 6388 Mask1[3] = HiIndex & 1 ? 4 : 6; 6389 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6390 } 6391 6392 Mask1[0] = HiIndex & 1 ? 2 : 0; 6393 Mask1[1] = HiIndex & 1 ? 0 : 2; 6394 Mask1[2] = PermMask[2]; 6395 Mask1[3] = PermMask[3]; 6396 if (Mask1[2] >= 0) 6397 Mask1[2] += 4; 6398 if (Mask1[3] >= 0) 6399 Mask1[3] += 4; 6400 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6401 } 6402 6403 // Break it into (shuffle shuffle_hi, shuffle_lo). 6404 int LoMask[] = { -1, -1, -1, -1 }; 6405 int HiMask[] = { -1, -1, -1, -1 }; 6406 6407 int *MaskPtr = LoMask; 6408 unsigned MaskIdx = 0; 6409 unsigned LoIdx = 0; 6410 unsigned HiIdx = 2; 6411 for (unsigned i = 0; i != 4; ++i) { 6412 if (i == 2) { 6413 MaskPtr = HiMask; 6414 MaskIdx = 1; 6415 LoIdx = 0; 6416 HiIdx = 2; 6417 } 6418 int Idx = PermMask[i]; 6419 if (Idx < 0) { 6420 Locs[i] = std::make_pair(-1, -1); 6421 } else if (Idx < 4) { 6422 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6423 MaskPtr[LoIdx] = Idx; 6424 LoIdx++; 6425 } else { 6426 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6427 MaskPtr[HiIdx] = Idx; 6428 HiIdx++; 6429 } 6430 } 6431 6432 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6433 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6434 int MaskOps[] = { -1, -1, -1, -1 }; 6435 for (unsigned i = 0; i != 4; ++i) 6436 if (Locs[i].first != -1) 6437 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6438 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6439} 6440 6441static bool MayFoldVectorLoad(SDValue V) { 6442 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6443 V = V.getOperand(0); 6444 6445 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6446 V = V.getOperand(0); 6447 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6448 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6449 // BUILD_VECTOR (load), undef 6450 V = V.getOperand(0); 6451 6452 return MayFoldLoad(V); 6453} 6454 6455// FIXME: the version above should always be used. Since there's 6456// a bug where several vector shuffles can't be folded because the 6457// DAG is not updated during lowering and a node claims to have two 6458// uses while it only has one, use this version, and let isel match 6459// another instruction if the load really happens to have more than 6460// one use. Remove this version after this bug get fixed. 6461// rdar://8434668, PR8156 6462static bool RelaxedMayFoldVectorLoad(SDValue V) { 6463 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6464 V = V.getOperand(0); 6465 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6466 V = V.getOperand(0); 6467 if (ISD::isNormalLoad(V.getNode())) 6468 return true; 6469 return false; 6470} 6471 6472static 6473SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6474 EVT VT = Op.getValueType(); 6475 6476 // Canonizalize to v2f64. 6477 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6478 return DAG.getNode(ISD::BITCAST, dl, VT, 6479 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6480 V1, DAG)); 6481} 6482 6483static 6484SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6485 bool HasSSE2) { 6486 SDValue V1 = Op.getOperand(0); 6487 SDValue V2 = Op.getOperand(1); 6488 EVT VT = Op.getValueType(); 6489 6490 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6491 6492 if (HasSSE2 && VT == MVT::v2f64) 6493 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6494 6495 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6496 return DAG.getNode(ISD::BITCAST, dl, VT, 6497 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6498 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6499 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6500} 6501 6502static 6503SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6504 SDValue V1 = Op.getOperand(0); 6505 SDValue V2 = Op.getOperand(1); 6506 EVT VT = Op.getValueType(); 6507 6508 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6509 "unsupported shuffle type"); 6510 6511 if (V2.getOpcode() == ISD::UNDEF) 6512 V2 = V1; 6513 6514 // v4i32 or v4f32 6515 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6516} 6517 6518static 6519SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6520 SDValue V1 = Op.getOperand(0); 6521 SDValue V2 = Op.getOperand(1); 6522 EVT VT = Op.getValueType(); 6523 unsigned NumElems = VT.getVectorNumElements(); 6524 6525 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6526 // operand of these instructions is only memory, so check if there's a 6527 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6528 // same masks. 6529 bool CanFoldLoad = false; 6530 6531 // Trivial case, when V2 comes from a load. 6532 if (MayFoldVectorLoad(V2)) 6533 CanFoldLoad = true; 6534 6535 // When V1 is a load, it can be folded later into a store in isel, example: 6536 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6537 // turns into: 6538 // (MOVLPSmr addr:$src1, VR128:$src2) 6539 // So, recognize this potential and also use MOVLPS or MOVLPD 6540 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6541 CanFoldLoad = true; 6542 6543 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6544 if (CanFoldLoad) { 6545 if (HasSSE2 && NumElems == 2) 6546 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6547 6548 if (NumElems == 4) 6549 // If we don't care about the second element, proceed to use movss. 6550 if (SVOp->getMaskElt(1) != -1) 6551 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6552 } 6553 6554 // movl and movlp will both match v2i64, but v2i64 is never matched by 6555 // movl earlier because we make it strict to avoid messing with the movlp load 6556 // folding logic (see the code above getMOVLP call). Match it here then, 6557 // this is horrible, but will stay like this until we move all shuffle 6558 // matching to x86 specific nodes. Note that for the 1st condition all 6559 // types are matched with movsd. 6560 if (HasSSE2) { 6561 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6562 // as to remove this logic from here, as much as possible 6563 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT)) 6564 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6565 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6566 } 6567 6568 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6569 6570 // Invert the operand order and use SHUFPS to match it. 6571 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6572 getShuffleSHUFImmediate(SVOp), DAG); 6573} 6574 6575// Reduce a vector shuffle to zext. 6576SDValue 6577X86TargetLowering::lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { 6578 // PMOVZX is only available from SSE41. 6579 if (!Subtarget->hasSSE41()) 6580 return SDValue(); 6581 6582 EVT VT = Op.getValueType(); 6583 6584 // Only AVX2 support 256-bit vector integer extending. 6585 if (!Subtarget->hasAVX2() && VT.is256BitVector()) 6586 return SDValue(); 6587 6588 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6589 DebugLoc DL = Op.getDebugLoc(); 6590 SDValue V1 = Op.getOperand(0); 6591 SDValue V2 = Op.getOperand(1); 6592 unsigned NumElems = VT.getVectorNumElements(); 6593 6594 // Extending is an unary operation and the element type of the source vector 6595 // won't be equal to or larger than i64. 6596 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() || 6597 VT.getVectorElementType() == MVT::i64) 6598 return SDValue(); 6599 6600 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4. 6601 unsigned Shift = 1; // Start from 2, i.e. 1 << 1. 6602 while ((1U << Shift) < NumElems) { 6603 if (SVOp->getMaskElt(1U << Shift) == 1) 6604 break; 6605 Shift += 1; 6606 // The maximal ratio is 8, i.e. from i8 to i64. 6607 if (Shift > 3) 6608 return SDValue(); 6609 } 6610 6611 // Check the shuffle mask. 6612 unsigned Mask = (1U << Shift) - 1; 6613 for (unsigned i = 0; i != NumElems; ++i) { 6614 int EltIdx = SVOp->getMaskElt(i); 6615 if ((i & Mask) != 0 && EltIdx != -1) 6616 return SDValue(); 6617 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift)) 6618 return SDValue(); 6619 } 6620 6621 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift; 6622 EVT NeVT = EVT::getIntegerVT(*DAG.getContext(), NBits); 6623 EVT NVT = EVT::getVectorVT(*DAG.getContext(), NeVT, NumElems >> Shift); 6624 6625 if (!isTypeLegal(NVT)) 6626 return SDValue(); 6627 6628 // Simplify the operand as it's prepared to be fed into shuffle. 6629 unsigned SignificantBits = NVT.getSizeInBits() >> Shift; 6630 if (V1.getOpcode() == ISD::BITCAST && 6631 V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 6632 V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6633 V1.getOperand(0) 6634 .getOperand(0).getValueType().getSizeInBits() == SignificantBits) { 6635 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x) 6636 SDValue V = V1.getOperand(0).getOperand(0).getOperand(0); 6637 ConstantSDNode *CIdx = 6638 dyn_cast<ConstantSDNode>(V1.getOperand(0).getOperand(0).getOperand(1)); 6639 // If it's foldable, i.e. normal load with single use, we will let code 6640 // selection to fold it. Otherwise, we will short the conversion sequence. 6641 if (CIdx && CIdx->getZExtValue() == 0 && 6642 (!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse())) 6643 V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V); 6644 } 6645 6646 return DAG.getNode(ISD::BITCAST, DL, VT, 6647 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1)); 6648} 6649 6650SDValue 6651X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { 6652 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6653 EVT VT = Op.getValueType(); 6654 DebugLoc dl = Op.getDebugLoc(); 6655 SDValue V1 = Op.getOperand(0); 6656 SDValue V2 = Op.getOperand(1); 6657 6658 if (isZeroShuffle(SVOp)) 6659 return getZeroVector(VT, Subtarget, DAG, dl); 6660 6661 // Handle splat operations 6662 if (SVOp->isSplat()) { 6663 unsigned NumElem = VT.getVectorNumElements(); 6664 int Size = VT.getSizeInBits(); 6665 6666 // Use vbroadcast whenever the splat comes from a foldable load 6667 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 6668 if (Broadcast.getNode()) 6669 return Broadcast; 6670 6671 // Handle splats by matching through known shuffle masks 6672 if ((Size == 128 && NumElem <= 4) || 6673 (Size == 256 && NumElem < 8)) 6674 return SDValue(); 6675 6676 // All remaning splats are promoted to target supported vector shuffles. 6677 return PromoteSplat(SVOp, DAG); 6678 } 6679 6680 // Check integer expanding shuffles. 6681 SDValue NewOp = lowerVectorIntExtend(Op, DAG); 6682 if (NewOp.getNode()) 6683 return NewOp; 6684 6685 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6686 // do it! 6687 if (VT == MVT::v8i16 || VT == MVT::v16i8 || 6688 VT == MVT::v16i16 || VT == MVT::v32i8) { 6689 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6690 if (NewOp.getNode()) 6691 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6692 } else if ((VT == MVT::v4i32 || 6693 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6694 // FIXME: Figure out a cleaner way to do this. 6695 // Try to make use of movq to zero out the top part. 6696 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6697 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6698 if (NewOp.getNode()) { 6699 EVT NewVT = NewOp.getValueType(); 6700 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), 6701 NewVT, true, false)) 6702 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), 6703 DAG, Subtarget, dl); 6704 } 6705 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6706 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6707 if (NewOp.getNode()) { 6708 EVT NewVT = NewOp.getValueType(); 6709 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT)) 6710 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), 6711 DAG, Subtarget, dl); 6712 } 6713 } 6714 } 6715 return SDValue(); 6716} 6717 6718SDValue 6719X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6720 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6721 SDValue V1 = Op.getOperand(0); 6722 SDValue V2 = Op.getOperand(1); 6723 EVT VT = Op.getValueType(); 6724 DebugLoc dl = Op.getDebugLoc(); 6725 unsigned NumElems = VT.getVectorNumElements(); 6726 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6727 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6728 bool V1IsSplat = false; 6729 bool V2IsSplat = false; 6730 bool HasSSE2 = Subtarget->hasSSE2(); 6731 bool HasAVX = Subtarget->hasAVX(); 6732 bool HasAVX2 = Subtarget->hasAVX2(); 6733 MachineFunction &MF = DAG.getMachineFunction(); 6734 bool OptForSize = MF.getFunction()->getFnAttributes(). 6735 hasAttribute(Attributes::OptimizeForSize); 6736 6737 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6738 6739 if (V1IsUndef && V2IsUndef) 6740 return DAG.getUNDEF(VT); 6741 6742 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 6743 6744 // Vector shuffle lowering takes 3 steps: 6745 // 6746 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6747 // narrowing and commutation of operands should be handled. 6748 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6749 // shuffle nodes. 6750 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6751 // so the shuffle can be broken into other shuffles and the legalizer can 6752 // try the lowering again. 6753 // 6754 // The general idea is that no vector_shuffle operation should be left to 6755 // be matched during isel, all of them must be converted to a target specific 6756 // node here. 6757 6758 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6759 // narrowing and commutation of operands should be handled. The actual code 6760 // doesn't include all of those, work in progress... 6761 SDValue NewOp = NormalizeVectorShuffle(Op, DAG); 6762 if (NewOp.getNode()) 6763 return NewOp; 6764 6765 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end()); 6766 6767 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6768 // unpckh_undef). Only use pshufd if speed is more important than size. 6769 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6770 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6771 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6772 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6773 6774 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() && 6775 V2IsUndef && RelaxedMayFoldVectorLoad(V1)) 6776 return getMOVDDup(Op, dl, V1, DAG); 6777 6778 if (isMOVHLPS_v_undef_Mask(M, VT)) 6779 return getMOVHighToLow(Op, dl, DAG); 6780 6781 // Use to match splats 6782 if (HasSSE2 && isUNPCKHMask(M, VT, HasAVX2) && V2IsUndef && 6783 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6784 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6785 6786 if (isPSHUFDMask(M, VT)) { 6787 // The actual implementation will match the mask in the if above and then 6788 // during isel it can match several different instructions, not only pshufd 6789 // as its name says, sad but true, emulate the behavior for now... 6790 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6791 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6792 6793 unsigned TargetMask = getShuffleSHUFImmediate(SVOp); 6794 6795 if (HasAVX && (VT == MVT::v4f32 || VT == MVT::v2f64)) 6796 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, DAG); 6797 6798 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6799 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6800 6801 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6802 TargetMask, DAG); 6803 } 6804 6805 // Check if this can be converted into a logical shift. 6806 bool isLeft = false; 6807 unsigned ShAmt = 0; 6808 SDValue ShVal; 6809 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6810 if (isShift && ShVal.hasOneUse()) { 6811 // If the shifted value has multiple uses, it may be cheaper to use 6812 // v_set0 + movlhps or movhlps, etc. 6813 EVT EltVT = VT.getVectorElementType(); 6814 ShAmt *= EltVT.getSizeInBits(); 6815 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6816 } 6817 6818 if (isMOVLMask(M, VT)) { 6819 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6820 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6821 if (!isMOVLPMask(M, VT)) { 6822 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6823 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6824 6825 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6826 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6827 } 6828 } 6829 6830 // FIXME: fold these into legal mask. 6831 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasAVX2)) 6832 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6833 6834 if (isMOVHLPSMask(M, VT)) 6835 return getMOVHighToLow(Op, dl, DAG); 6836 6837 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget)) 6838 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6839 6840 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget)) 6841 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6842 6843 if (isMOVLPMask(M, VT)) 6844 return getMOVLP(Op, dl, DAG, HasSSE2); 6845 6846 if (ShouldXformToMOVHLPS(M, VT) || 6847 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT)) 6848 return CommuteVectorShuffle(SVOp, DAG); 6849 6850 if (isShift) { 6851 // No better options. Use a vshldq / vsrldq. 6852 EVT EltVT = VT.getVectorElementType(); 6853 ShAmt *= EltVT.getSizeInBits(); 6854 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6855 } 6856 6857 bool Commuted = false; 6858 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6859 // 1,1,1,1 -> v8i16 though. 6860 V1IsSplat = isSplatVector(V1.getNode()); 6861 V2IsSplat = isSplatVector(V2.getNode()); 6862 6863 // Canonicalize the splat or undef, if present, to be on the RHS. 6864 if (!V2IsUndef && V1IsSplat && !V2IsSplat) { 6865 CommuteVectorShuffleMask(M, NumElems); 6866 std::swap(V1, V2); 6867 std::swap(V1IsSplat, V2IsSplat); 6868 Commuted = true; 6869 } 6870 6871 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6872 // Shuffling low element of v1 into undef, just return v1. 6873 if (V2IsUndef) 6874 return V1; 6875 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6876 // the instruction selector will not match, so get a canonical MOVL with 6877 // swapped operands to undo the commute. 6878 return getMOVL(DAG, dl, VT, V2, V1); 6879 } 6880 6881 if (isUNPCKLMask(M, VT, HasAVX2)) 6882 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6883 6884 if (isUNPCKHMask(M, VT, HasAVX2)) 6885 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6886 6887 if (V2IsSplat) { 6888 // Normalize mask so all entries that point to V2 points to its first 6889 // element then try to match unpck{h|l} again. If match, return a 6890 // new vector_shuffle with the corrected mask.p 6891 SmallVector<int, 8> NewMask(M.begin(), M.end()); 6892 NormalizeMask(NewMask, NumElems); 6893 if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) 6894 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6895 if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) 6896 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6897 } 6898 6899 if (Commuted) { 6900 // Commute is back and try unpck* again. 6901 // FIXME: this seems wrong. 6902 CommuteVectorShuffleMask(M, NumElems); 6903 std::swap(V1, V2); 6904 std::swap(V1IsSplat, V2IsSplat); 6905 Commuted = false; 6906 6907 if (isUNPCKLMask(M, VT, HasAVX2)) 6908 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6909 6910 if (isUNPCKHMask(M, VT, HasAVX2)) 6911 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6912 } 6913 6914 // Normalize the node to match x86 shuffle ops if needed 6915 if (!V2IsUndef && (isSHUFPMask(M, VT, HasAVX, /* Commuted */ true))) 6916 return CommuteVectorShuffle(SVOp, DAG); 6917 6918 // The checks below are all present in isShuffleMaskLegal, but they are 6919 // inlined here right now to enable us to directly emit target specific 6920 // nodes, and remove one by one until they don't return Op anymore. 6921 6922 if (isPALIGNRMask(M, VT, Subtarget)) 6923 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 6924 getShufflePALIGNRImmediate(SVOp), 6925 DAG); 6926 6927 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 6928 SVOp->getSplatIndex() == 0 && V2IsUndef) { 6929 if (VT == MVT::v2f64 || VT == MVT::v2i64) 6930 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6931 } 6932 6933 if (isPSHUFHWMask(M, VT, HasAVX2)) 6934 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 6935 getShufflePSHUFHWImmediate(SVOp), 6936 DAG); 6937 6938 if (isPSHUFLWMask(M, VT, HasAVX2)) 6939 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 6940 getShufflePSHUFLWImmediate(SVOp), 6941 DAG); 6942 6943 if (isSHUFPMask(M, VT, HasAVX)) 6944 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 6945 getShuffleSHUFImmediate(SVOp), DAG); 6946 6947 if (isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6948 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6949 if (isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6950 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6951 6952 //===--------------------------------------------------------------------===// 6953 // Generate target specific nodes for 128 or 256-bit shuffles only 6954 // supported in the AVX instruction set. 6955 // 6956 6957 // Handle VMOVDDUPY permutations 6958 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasAVX)) 6959 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 6960 6961 // Handle VPERMILPS/D* permutations 6962 if (isVPERMILPMask(M, VT, HasAVX)) { 6963 if (HasAVX2 && VT == MVT::v8i32) 6964 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, 6965 getShuffleSHUFImmediate(SVOp), DAG); 6966 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 6967 getShuffleSHUFImmediate(SVOp), DAG); 6968 } 6969 6970 // Handle VPERM2F128/VPERM2I128 permutations 6971 if (isVPERM2X128Mask(M, VT, HasAVX)) 6972 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 6973 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 6974 6975 SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG); 6976 if (BlendOp.getNode()) 6977 return BlendOp; 6978 6979 if (V2IsUndef && HasAVX2 && (VT == MVT::v8i32 || VT == MVT::v8f32)) { 6980 SmallVector<SDValue, 8> permclMask; 6981 for (unsigned i = 0; i != 8; ++i) { 6982 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32)); 6983 } 6984 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, 6985 &permclMask[0], 8); 6986 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 6987 return DAG.getNode(X86ISD::VPERMV, dl, VT, 6988 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); 6989 } 6990 6991 if (V2IsUndef && HasAVX2 && (VT == MVT::v4i64 || VT == MVT::v4f64)) 6992 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, 6993 getShuffleCLImmediate(SVOp), DAG); 6994 6995 6996 //===--------------------------------------------------------------------===// 6997 // Since no target specific shuffle was selected for this generic one, 6998 // lower it into other known shuffles. FIXME: this isn't true yet, but 6999 // this is the plan. 7000 // 7001 7002 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 7003 if (VT == MVT::v8i16) { 7004 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG); 7005 if (NewOp.getNode()) 7006 return NewOp; 7007 } 7008 7009 if (VT == MVT::v16i8) { 7010 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 7011 if (NewOp.getNode()) 7012 return NewOp; 7013 } 7014 7015 if (VT == MVT::v32i8) { 7016 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG); 7017 if (NewOp.getNode()) 7018 return NewOp; 7019 } 7020 7021 // Handle all 128-bit wide vectors with 4 elements, and match them with 7022 // several different shuffle types. 7023 if (NumElems == 4 && VT.is128BitVector()) 7024 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 7025 7026 // Handle general 256-bit shuffles 7027 if (VT.is256BitVector()) 7028 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 7029 7030 return SDValue(); 7031} 7032 7033SDValue 7034X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 7035 SelectionDAG &DAG) const { 7036 EVT VT = Op.getValueType(); 7037 DebugLoc dl = Op.getDebugLoc(); 7038 7039 if (!Op.getOperand(0).getValueType().is128BitVector()) 7040 return SDValue(); 7041 7042 if (VT.getSizeInBits() == 8) { 7043 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 7044 Op.getOperand(0), Op.getOperand(1)); 7045 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7046 DAG.getValueType(VT)); 7047 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7048 } 7049 7050 if (VT.getSizeInBits() == 16) { 7051 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7052 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 7053 if (Idx == 0) 7054 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7055 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7056 DAG.getNode(ISD::BITCAST, dl, 7057 MVT::v4i32, 7058 Op.getOperand(0)), 7059 Op.getOperand(1))); 7060 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 7061 Op.getOperand(0), Op.getOperand(1)); 7062 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7063 DAG.getValueType(VT)); 7064 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7065 } 7066 7067 if (VT == MVT::f32) { 7068 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 7069 // the result back to FR32 register. It's only worth matching if the 7070 // result has a single use which is a store or a bitcast to i32. And in 7071 // the case of a store, it's not worth it if the index is a constant 0, 7072 // because a MOVSSmr can be used instead, which is smaller and faster. 7073 if (!Op.hasOneUse()) 7074 return SDValue(); 7075 SDNode *User = *Op.getNode()->use_begin(); 7076 if ((User->getOpcode() != ISD::STORE || 7077 (isa<ConstantSDNode>(Op.getOperand(1)) && 7078 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 7079 (User->getOpcode() != ISD::BITCAST || 7080 User->getValueType(0) != MVT::i32)) 7081 return SDValue(); 7082 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7083 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 7084 Op.getOperand(0)), 7085 Op.getOperand(1)); 7086 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 7087 } 7088 7089 if (VT == MVT::i32 || VT == MVT::i64) { 7090 // ExtractPS/pextrq works with constant index. 7091 if (isa<ConstantSDNode>(Op.getOperand(1))) 7092 return Op; 7093 } 7094 return SDValue(); 7095} 7096 7097 7098SDValue 7099X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7100 SelectionDAG &DAG) const { 7101 if (!isa<ConstantSDNode>(Op.getOperand(1))) 7102 return SDValue(); 7103 7104 SDValue Vec = Op.getOperand(0); 7105 EVT VecVT = Vec.getValueType(); 7106 7107 // If this is a 256-bit vector result, first extract the 128-bit vector and 7108 // then extract the element from the 128-bit vector. 7109 if (VecVT.is256BitVector()) { 7110 DebugLoc dl = Op.getNode()->getDebugLoc(); 7111 unsigned NumElems = VecVT.getVectorNumElements(); 7112 SDValue Idx = Op.getOperand(1); 7113 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7114 7115 // Get the 128-bit vector. 7116 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); 7117 7118 if (IdxVal >= NumElems/2) 7119 IdxVal -= NumElems/2; 7120 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 7121 DAG.getConstant(IdxVal, MVT::i32)); 7122 } 7123 7124 assert(VecVT.is128BitVector() && "Unexpected vector length"); 7125 7126 if (Subtarget->hasSSE41()) { 7127 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 7128 if (Res.getNode()) 7129 return Res; 7130 } 7131 7132 EVT VT = Op.getValueType(); 7133 DebugLoc dl = Op.getDebugLoc(); 7134 // TODO: handle v16i8. 7135 if (VT.getSizeInBits() == 16) { 7136 SDValue Vec = Op.getOperand(0); 7137 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7138 if (Idx == 0) 7139 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7140 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7141 DAG.getNode(ISD::BITCAST, dl, 7142 MVT::v4i32, Vec), 7143 Op.getOperand(1))); 7144 // Transform it so it match pextrw which produces a 32-bit result. 7145 EVT EltVT = MVT::i32; 7146 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 7147 Op.getOperand(0), Op.getOperand(1)); 7148 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 7149 DAG.getValueType(VT)); 7150 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7151 } 7152 7153 if (VT.getSizeInBits() == 32) { 7154 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7155 if (Idx == 0) 7156 return Op; 7157 7158 // SHUFPS the element to the lowest double word, then movss. 7159 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 7160 EVT VVT = Op.getOperand(0).getValueType(); 7161 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7162 DAG.getUNDEF(VVT), Mask); 7163 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7164 DAG.getIntPtrConstant(0)); 7165 } 7166 7167 if (VT.getSizeInBits() == 64) { 7168 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 7169 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 7170 // to match extract_elt for f64. 7171 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7172 if (Idx == 0) 7173 return Op; 7174 7175 // UNPCKHPD the element to the lowest double word, then movsd. 7176 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 7177 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 7178 int Mask[2] = { 1, -1 }; 7179 EVT VVT = Op.getOperand(0).getValueType(); 7180 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7181 DAG.getUNDEF(VVT), Mask); 7182 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7183 DAG.getIntPtrConstant(0)); 7184 } 7185 7186 return SDValue(); 7187} 7188 7189SDValue 7190X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 7191 SelectionDAG &DAG) const { 7192 EVT VT = Op.getValueType(); 7193 EVT EltVT = VT.getVectorElementType(); 7194 DebugLoc dl = Op.getDebugLoc(); 7195 7196 SDValue N0 = Op.getOperand(0); 7197 SDValue N1 = Op.getOperand(1); 7198 SDValue N2 = Op.getOperand(2); 7199 7200 if (!VT.is128BitVector()) 7201 return SDValue(); 7202 7203 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 7204 isa<ConstantSDNode>(N2)) { 7205 unsigned Opc; 7206 if (VT == MVT::v8i16) 7207 Opc = X86ISD::PINSRW; 7208 else if (VT == MVT::v16i8) 7209 Opc = X86ISD::PINSRB; 7210 else 7211 Opc = X86ISD::PINSRB; 7212 7213 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 7214 // argument. 7215 if (N1.getValueType() != MVT::i32) 7216 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7217 if (N2.getValueType() != MVT::i32) 7218 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7219 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 7220 } 7221 7222 if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 7223 // Bits [7:6] of the constant are the source select. This will always be 7224 // zero here. The DAG Combiner may combine an extract_elt index into these 7225 // bits. For example (insert (extract, 3), 2) could be matched by putting 7226 // the '3' into bits [7:6] of X86ISD::INSERTPS. 7227 // Bits [5:4] of the constant are the destination select. This is the 7228 // value of the incoming immediate. 7229 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 7230 // combine either bitwise AND or insert of float 0.0 to set these bits. 7231 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 7232 // Create this as a scalar to vector.. 7233 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 7234 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 7235 } 7236 7237 if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) { 7238 // PINSR* works with constant index. 7239 return Op; 7240 } 7241 return SDValue(); 7242} 7243 7244SDValue 7245X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 7246 EVT VT = Op.getValueType(); 7247 EVT EltVT = VT.getVectorElementType(); 7248 7249 DebugLoc dl = Op.getDebugLoc(); 7250 SDValue N0 = Op.getOperand(0); 7251 SDValue N1 = Op.getOperand(1); 7252 SDValue N2 = Op.getOperand(2); 7253 7254 // If this is a 256-bit vector result, first extract the 128-bit vector, 7255 // insert the element into the extracted half and then place it back. 7256 if (VT.is256BitVector()) { 7257 if (!isa<ConstantSDNode>(N2)) 7258 return SDValue(); 7259 7260 // Get the desired 128-bit vector half. 7261 unsigned NumElems = VT.getVectorNumElements(); 7262 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 7263 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); 7264 7265 // Insert the element into the desired half. 7266 bool Upper = IdxVal >= NumElems/2; 7267 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, 7268 DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); 7269 7270 // Insert the changed part back to the 256-bit vector 7271 return Insert128BitVector(N0, V, IdxVal, DAG, dl); 7272 } 7273 7274 if (Subtarget->hasSSE41()) 7275 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 7276 7277 if (EltVT == MVT::i8) 7278 return SDValue(); 7279 7280 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 7281 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 7282 // as its second argument. 7283 if (N1.getValueType() != MVT::i32) 7284 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7285 if (N2.getValueType() != MVT::i32) 7286 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7287 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 7288 } 7289 return SDValue(); 7290} 7291 7292static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { 7293 LLVMContext *Context = DAG.getContext(); 7294 DebugLoc dl = Op.getDebugLoc(); 7295 EVT OpVT = Op.getValueType(); 7296 7297 // If this is a 256-bit vector result, first insert into a 128-bit 7298 // vector and then insert into the 256-bit vector. 7299 if (!OpVT.is128BitVector()) { 7300 // Insert into a 128-bit vector. 7301 EVT VT128 = EVT::getVectorVT(*Context, 7302 OpVT.getVectorElementType(), 7303 OpVT.getVectorNumElements() / 2); 7304 7305 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7306 7307 // Insert the 128-bit vector. 7308 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); 7309 } 7310 7311 if (OpVT == MVT::v1i64 && 7312 Op.getOperand(0).getValueType() == MVT::i64) 7313 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7314 7315 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7316 assert(OpVT.is128BitVector() && "Expected an SSE type!"); 7317 return DAG.getNode(ISD::BITCAST, dl, OpVT, 7318 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7319} 7320 7321// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7322// a simple subregister reference or explicit instructions to grab 7323// upper bits of a vector. 7324static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7325 SelectionDAG &DAG) { 7326 if (Subtarget->hasAVX()) { 7327 DebugLoc dl = Op.getNode()->getDebugLoc(); 7328 SDValue Vec = Op.getNode()->getOperand(0); 7329 SDValue Idx = Op.getNode()->getOperand(1); 7330 7331 if (Op.getNode()->getValueType(0).is128BitVector() && 7332 Vec.getNode()->getValueType(0).is256BitVector() && 7333 isa<ConstantSDNode>(Idx)) { 7334 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7335 return Extract128BitVector(Vec, IdxVal, DAG, dl); 7336 } 7337 } 7338 return SDValue(); 7339} 7340 7341// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7342// simple superregister reference or explicit instructions to insert 7343// the upper bits of a vector. 7344static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7345 SelectionDAG &DAG) { 7346 if (Subtarget->hasAVX()) { 7347 DebugLoc dl = Op.getNode()->getDebugLoc(); 7348 SDValue Vec = Op.getNode()->getOperand(0); 7349 SDValue SubVec = Op.getNode()->getOperand(1); 7350 SDValue Idx = Op.getNode()->getOperand(2); 7351 7352 if (Op.getNode()->getValueType(0).is256BitVector() && 7353 SubVec.getNode()->getValueType(0).is128BitVector() && 7354 isa<ConstantSDNode>(Idx)) { 7355 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7356 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); 7357 } 7358 } 7359 return SDValue(); 7360} 7361 7362// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7363// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7364// one of the above mentioned nodes. It has to be wrapped because otherwise 7365// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7366// be used to form addressing mode. These wrapped nodes will be selected 7367// into MOV32ri. 7368SDValue 7369X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7370 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7371 7372 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7373 // global base reg. 7374 unsigned char OpFlag = 0; 7375 unsigned WrapperKind = X86ISD::Wrapper; 7376 CodeModel::Model M = getTargetMachine().getCodeModel(); 7377 7378 if (Subtarget->isPICStyleRIPRel() && 7379 (M == CodeModel::Small || M == CodeModel::Kernel)) 7380 WrapperKind = X86ISD::WrapperRIP; 7381 else if (Subtarget->isPICStyleGOT()) 7382 OpFlag = X86II::MO_GOTOFF; 7383 else if (Subtarget->isPICStyleStubPIC()) 7384 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7385 7386 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7387 CP->getAlignment(), 7388 CP->getOffset(), OpFlag); 7389 DebugLoc DL = CP->getDebugLoc(); 7390 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7391 // With PIC, the address is actually $g + Offset. 7392 if (OpFlag) { 7393 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7394 DAG.getNode(X86ISD::GlobalBaseReg, 7395 DebugLoc(), getPointerTy()), 7396 Result); 7397 } 7398 7399 return Result; 7400} 7401 7402SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7403 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7404 7405 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7406 // global base reg. 7407 unsigned char OpFlag = 0; 7408 unsigned WrapperKind = X86ISD::Wrapper; 7409 CodeModel::Model M = getTargetMachine().getCodeModel(); 7410 7411 if (Subtarget->isPICStyleRIPRel() && 7412 (M == CodeModel::Small || M == CodeModel::Kernel)) 7413 WrapperKind = X86ISD::WrapperRIP; 7414 else if (Subtarget->isPICStyleGOT()) 7415 OpFlag = X86II::MO_GOTOFF; 7416 else if (Subtarget->isPICStyleStubPIC()) 7417 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7418 7419 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7420 OpFlag); 7421 DebugLoc DL = JT->getDebugLoc(); 7422 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7423 7424 // With PIC, the address is actually $g + Offset. 7425 if (OpFlag) 7426 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7427 DAG.getNode(X86ISD::GlobalBaseReg, 7428 DebugLoc(), getPointerTy()), 7429 Result); 7430 7431 return Result; 7432} 7433 7434SDValue 7435X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7436 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7437 7438 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7439 // global base reg. 7440 unsigned char OpFlag = 0; 7441 unsigned WrapperKind = X86ISD::Wrapper; 7442 CodeModel::Model M = getTargetMachine().getCodeModel(); 7443 7444 if (Subtarget->isPICStyleRIPRel() && 7445 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7446 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7447 OpFlag = X86II::MO_GOTPCREL; 7448 WrapperKind = X86ISD::WrapperRIP; 7449 } else if (Subtarget->isPICStyleGOT()) { 7450 OpFlag = X86II::MO_GOT; 7451 } else if (Subtarget->isPICStyleStubPIC()) { 7452 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7453 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7454 OpFlag = X86II::MO_DARWIN_NONLAZY; 7455 } 7456 7457 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7458 7459 DebugLoc DL = Op.getDebugLoc(); 7460 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7461 7462 7463 // With PIC, the address is actually $g + Offset. 7464 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7465 !Subtarget->is64Bit()) { 7466 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7467 DAG.getNode(X86ISD::GlobalBaseReg, 7468 DebugLoc(), getPointerTy()), 7469 Result); 7470 } 7471 7472 // For symbols that require a load from a stub to get the address, emit the 7473 // load. 7474 if (isGlobalStubReference(OpFlag)) 7475 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7476 MachinePointerInfo::getGOT(), false, false, false, 0); 7477 7478 return Result; 7479} 7480 7481SDValue 7482X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7483 // Create the TargetBlockAddressAddress node. 7484 unsigned char OpFlags = 7485 Subtarget->ClassifyBlockAddressReference(); 7486 CodeModel::Model M = getTargetMachine().getCodeModel(); 7487 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7488 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); 7489 DebugLoc dl = Op.getDebugLoc(); 7490 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, 7491 OpFlags); 7492 7493 if (Subtarget->isPICStyleRIPRel() && 7494 (M == CodeModel::Small || M == CodeModel::Kernel)) 7495 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7496 else 7497 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7498 7499 // With PIC, the address is actually $g + Offset. 7500 if (isGlobalRelativeToPICBase(OpFlags)) { 7501 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7502 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7503 Result); 7504 } 7505 7506 return Result; 7507} 7508 7509SDValue 7510X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7511 int64_t Offset, 7512 SelectionDAG &DAG) const { 7513 // Create the TargetGlobalAddress node, folding in the constant 7514 // offset if it is legal. 7515 unsigned char OpFlags = 7516 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7517 CodeModel::Model M = getTargetMachine().getCodeModel(); 7518 SDValue Result; 7519 if (OpFlags == X86II::MO_NO_FLAG && 7520 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7521 // A direct static reference to a global. 7522 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7523 Offset = 0; 7524 } else { 7525 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7526 } 7527 7528 if (Subtarget->isPICStyleRIPRel() && 7529 (M == CodeModel::Small || M == CodeModel::Kernel)) 7530 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7531 else 7532 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7533 7534 // With PIC, the address is actually $g + Offset. 7535 if (isGlobalRelativeToPICBase(OpFlags)) { 7536 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7537 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7538 Result); 7539 } 7540 7541 // For globals that require a load from a stub to get the address, emit the 7542 // load. 7543 if (isGlobalStubReference(OpFlags)) 7544 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7545 MachinePointerInfo::getGOT(), false, false, false, 0); 7546 7547 // If there was a non-zero offset that we didn't fold, create an explicit 7548 // addition for it. 7549 if (Offset != 0) 7550 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7551 DAG.getConstant(Offset, getPointerTy())); 7552 7553 return Result; 7554} 7555 7556SDValue 7557X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7558 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7559 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7560 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7561} 7562 7563static SDValue 7564GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7565 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7566 unsigned char OperandFlags, bool LocalDynamic = false) { 7567 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7568 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7569 DebugLoc dl = GA->getDebugLoc(); 7570 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7571 GA->getValueType(0), 7572 GA->getOffset(), 7573 OperandFlags); 7574 7575 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR 7576 : X86ISD::TLSADDR; 7577 7578 if (InFlag) { 7579 SDValue Ops[] = { Chain, TGA, *InFlag }; 7580 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 3); 7581 } else { 7582 SDValue Ops[] = { Chain, TGA }; 7583 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 2); 7584 } 7585 7586 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7587 MFI->setAdjustsStack(true); 7588 7589 SDValue Flag = Chain.getValue(1); 7590 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7591} 7592 7593// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7594static SDValue 7595LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7596 const EVT PtrVT) { 7597 SDValue InFlag; 7598 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7599 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7600 DAG.getNode(X86ISD::GlobalBaseReg, 7601 DebugLoc(), PtrVT), InFlag); 7602 InFlag = Chain.getValue(1); 7603 7604 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7605} 7606 7607// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7608static SDValue 7609LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7610 const EVT PtrVT) { 7611 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7612 X86::RAX, X86II::MO_TLSGD); 7613} 7614 7615static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, 7616 SelectionDAG &DAG, 7617 const EVT PtrVT, 7618 bool is64Bit) { 7619 DebugLoc dl = GA->getDebugLoc(); 7620 7621 // Get the start address of the TLS block for this module. 7622 X86MachineFunctionInfo* MFI = DAG.getMachineFunction() 7623 .getInfo<X86MachineFunctionInfo>(); 7624 MFI->incNumLocalDynamicTLSAccesses(); 7625 7626 SDValue Base; 7627 if (is64Bit) { 7628 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, 7629 X86II::MO_TLSLD, /*LocalDynamic=*/true); 7630 } else { 7631 SDValue InFlag; 7632 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7633 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); 7634 InFlag = Chain.getValue(1); 7635 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, 7636 X86II::MO_TLSLDM, /*LocalDynamic=*/true); 7637 } 7638 7639 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations 7640 // of Base. 7641 7642 // Build x@dtpoff. 7643 unsigned char OperandFlags = X86II::MO_DTPOFF; 7644 unsigned WrapperKind = X86ISD::Wrapper; 7645 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7646 GA->getValueType(0), 7647 GA->getOffset(), OperandFlags); 7648 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7649 7650 // Add x@dtpoff with the base. 7651 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); 7652} 7653 7654// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. 7655static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7656 const EVT PtrVT, TLSModel::Model model, 7657 bool is64Bit, bool isPIC) { 7658 DebugLoc dl = GA->getDebugLoc(); 7659 7660 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7661 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7662 is64Bit ? 257 : 256)); 7663 7664 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7665 DAG.getIntPtrConstant(0), 7666 MachinePointerInfo(Ptr), 7667 false, false, false, 0); 7668 7669 unsigned char OperandFlags = 0; 7670 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7671 // initialexec. 7672 unsigned WrapperKind = X86ISD::Wrapper; 7673 if (model == TLSModel::LocalExec) { 7674 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7675 } else if (model == TLSModel::InitialExec) { 7676 if (is64Bit) { 7677 OperandFlags = X86II::MO_GOTTPOFF; 7678 WrapperKind = X86ISD::WrapperRIP; 7679 } else { 7680 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; 7681 } 7682 } else { 7683 llvm_unreachable("Unexpected model"); 7684 } 7685 7686 // emit "addl x@ntpoff,%eax" (local exec) 7687 // or "addl x@indntpoff,%eax" (initial exec) 7688 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) 7689 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7690 GA->getValueType(0), 7691 GA->getOffset(), OperandFlags); 7692 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7693 7694 if (model == TLSModel::InitialExec) { 7695 if (isPIC && !is64Bit) { 7696 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, 7697 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), 7698 Offset); 7699 } 7700 7701 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7702 MachinePointerInfo::getGOT(), false, false, false, 7703 0); 7704 } 7705 7706 // The address of the thread local variable is the add of the thread 7707 // pointer with the offset of the variable. 7708 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7709} 7710 7711SDValue 7712X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7713 7714 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7715 const GlobalValue *GV = GA->getGlobal(); 7716 7717 if (Subtarget->isTargetELF()) { 7718 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 7719 7720 switch (model) { 7721 case TLSModel::GeneralDynamic: 7722 if (Subtarget->is64Bit()) 7723 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7724 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7725 case TLSModel::LocalDynamic: 7726 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), 7727 Subtarget->is64Bit()); 7728 case TLSModel::InitialExec: 7729 case TLSModel::LocalExec: 7730 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7731 Subtarget->is64Bit(), 7732 getTargetMachine().getRelocationModel() == Reloc::PIC_); 7733 } 7734 llvm_unreachable("Unknown TLS model."); 7735 } 7736 7737 if (Subtarget->isTargetDarwin()) { 7738 // Darwin only has one model of TLS. Lower to that. 7739 unsigned char OpFlag = 0; 7740 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7741 X86ISD::WrapperRIP : X86ISD::Wrapper; 7742 7743 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7744 // global base reg. 7745 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7746 !Subtarget->is64Bit(); 7747 if (PIC32) 7748 OpFlag = X86II::MO_TLVP_PIC_BASE; 7749 else 7750 OpFlag = X86II::MO_TLVP; 7751 DebugLoc DL = Op.getDebugLoc(); 7752 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7753 GA->getValueType(0), 7754 GA->getOffset(), OpFlag); 7755 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7756 7757 // With PIC32, the address is actually $g + Offset. 7758 if (PIC32) 7759 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7760 DAG.getNode(X86ISD::GlobalBaseReg, 7761 DebugLoc(), getPointerTy()), 7762 Offset); 7763 7764 // Lowering the machine isd will make sure everything is in the right 7765 // location. 7766 SDValue Chain = DAG.getEntryNode(); 7767 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7768 SDValue Args[] = { Chain, Offset }; 7769 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7770 7771 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7772 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7773 MFI->setAdjustsStack(true); 7774 7775 // And our return value (tls address) is in the standard call return value 7776 // location. 7777 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7778 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7779 Chain.getValue(1)); 7780 } 7781 7782 if (Subtarget->isTargetWindows()) { 7783 // Just use the implicit TLS architecture 7784 // Need to generate someting similar to: 7785 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage 7786 // ; from TEB 7787 // mov ecx, dword [rel _tls_index]: Load index (from C runtime) 7788 // mov rcx, qword [rdx+rcx*8] 7789 // mov eax, .tls$:tlsvar 7790 // [rax+rcx] contains the address 7791 // Windows 64bit: gs:0x58 7792 // Windows 32bit: fs:__tls_array 7793 7794 // If GV is an alias then use the aliasee for determining 7795 // thread-localness. 7796 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7797 GV = GA->resolveAliasedGlobal(false); 7798 DebugLoc dl = GA->getDebugLoc(); 7799 SDValue Chain = DAG.getEntryNode(); 7800 7801 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or 7802 // %gs:0x58 (64-bit). 7803 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit() 7804 ? Type::getInt8PtrTy(*DAG.getContext(), 7805 256) 7806 : Type::getInt32PtrTy(*DAG.getContext(), 7807 257)); 7808 7809 SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, 7810 Subtarget->is64Bit() 7811 ? DAG.getIntPtrConstant(0x58) 7812 : DAG.getExternalSymbol("_tls_array", 7813 getPointerTy()), 7814 MachinePointerInfo(Ptr), 7815 false, false, false, 0); 7816 7817 // Load the _tls_index variable 7818 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy()); 7819 if (Subtarget->is64Bit()) 7820 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain, 7821 IDX, MachinePointerInfo(), MVT::i32, 7822 false, false, 0); 7823 else 7824 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), 7825 false, false, false, 0); 7826 7827 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()), 7828 getPointerTy()); 7829 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); 7830 7831 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); 7832 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(), 7833 false, false, false, 0); 7834 7835 // Get the offset of start of .tls section 7836 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7837 GA->getValueType(0), 7838 GA->getOffset(), X86II::MO_SECREL); 7839 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA); 7840 7841 // The address of the thread local variable is the add of the thread 7842 // pointer with the offset of the variable. 7843 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset); 7844 } 7845 7846 llvm_unreachable("TLS not implemented for this target."); 7847} 7848 7849 7850/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7851/// and take a 2 x i32 value to shift plus a shift amount. 7852SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7853 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7854 EVT VT = Op.getValueType(); 7855 unsigned VTBits = VT.getSizeInBits(); 7856 DebugLoc dl = Op.getDebugLoc(); 7857 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7858 SDValue ShOpLo = Op.getOperand(0); 7859 SDValue ShOpHi = Op.getOperand(1); 7860 SDValue ShAmt = Op.getOperand(2); 7861 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7862 DAG.getConstant(VTBits - 1, MVT::i8)) 7863 : DAG.getConstant(0, VT); 7864 7865 SDValue Tmp2, Tmp3; 7866 if (Op.getOpcode() == ISD::SHL_PARTS) { 7867 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7868 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7869 } else { 7870 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7871 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7872 } 7873 7874 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7875 DAG.getConstant(VTBits, MVT::i8)); 7876 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7877 AndNode, DAG.getConstant(0, MVT::i8)); 7878 7879 SDValue Hi, Lo; 7880 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7881 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7882 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7883 7884 if (Op.getOpcode() == ISD::SHL_PARTS) { 7885 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7886 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7887 } else { 7888 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7889 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7890 } 7891 7892 SDValue Ops[2] = { Lo, Hi }; 7893 return DAG.getMergeValues(Ops, 2, dl); 7894} 7895 7896SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7897 SelectionDAG &DAG) const { 7898 EVT SrcVT = Op.getOperand(0).getValueType(); 7899 7900 if (SrcVT.isVector()) 7901 return SDValue(); 7902 7903 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7904 "Unknown SINT_TO_FP to lower!"); 7905 7906 // These are really Legal; return the operand so the caller accepts it as 7907 // Legal. 7908 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7909 return Op; 7910 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7911 Subtarget->is64Bit()) { 7912 return Op; 7913 } 7914 7915 DebugLoc dl = Op.getDebugLoc(); 7916 unsigned Size = SrcVT.getSizeInBits()/8; 7917 MachineFunction &MF = DAG.getMachineFunction(); 7918 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7919 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7920 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7921 StackSlot, 7922 MachinePointerInfo::getFixedStack(SSFI), 7923 false, false, 0); 7924 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7925} 7926 7927SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7928 SDValue StackSlot, 7929 SelectionDAG &DAG) const { 7930 // Build the FILD 7931 DebugLoc DL = Op.getDebugLoc(); 7932 SDVTList Tys; 7933 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 7934 if (useSSE) 7935 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 7936 else 7937 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 7938 7939 unsigned ByteSize = SrcVT.getSizeInBits()/8; 7940 7941 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 7942 MachineMemOperand *MMO; 7943 if (FI) { 7944 int SSFI = FI->getIndex(); 7945 MMO = 7946 DAG.getMachineFunction() 7947 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7948 MachineMemOperand::MOLoad, ByteSize, ByteSize); 7949 } else { 7950 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 7951 StackSlot = StackSlot.getOperand(1); 7952 } 7953 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 7954 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 7955 X86ISD::FILD, DL, 7956 Tys, Ops, array_lengthof(Ops), 7957 SrcVT, MMO); 7958 7959 if (useSSE) { 7960 Chain = Result.getValue(1); 7961 SDValue InFlag = Result.getValue(2); 7962 7963 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 7964 // shouldn't be necessary except that RFP cannot be live across 7965 // multiple blocks. When stackifier is fixed, they can be uncoupled. 7966 MachineFunction &MF = DAG.getMachineFunction(); 7967 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 7968 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 7969 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7970 Tys = DAG.getVTList(MVT::Other); 7971 SDValue Ops[] = { 7972 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 7973 }; 7974 MachineMemOperand *MMO = 7975 DAG.getMachineFunction() 7976 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7977 MachineMemOperand::MOStore, SSFISize, SSFISize); 7978 7979 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 7980 Ops, array_lengthof(Ops), 7981 Op.getValueType(), MMO); 7982 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 7983 MachinePointerInfo::getFixedStack(SSFI), 7984 false, false, false, 0); 7985 } 7986 7987 return Result; 7988} 7989 7990// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 7991SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 7992 SelectionDAG &DAG) const { 7993 // This algorithm is not obvious. Here it is what we're trying to output: 7994 /* 7995 movq %rax, %xmm0 7996 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 7997 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 7998 #ifdef __SSE3__ 7999 haddpd %xmm0, %xmm0 8000 #else 8001 pshufd $0x4e, %xmm0, %xmm1 8002 addpd %xmm1, %xmm0 8003 #endif 8004 */ 8005 8006 DebugLoc dl = Op.getDebugLoc(); 8007 LLVMContext *Context = DAG.getContext(); 8008 8009 // Build some magic constants. 8010 const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; 8011 Constant *C0 = ConstantDataVector::get(*Context, CV0); 8012 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 8013 8014 SmallVector<Constant*,2> CV1; 8015 CV1.push_back( 8016 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 8017 CV1.push_back( 8018 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 8019 Constant *C1 = ConstantVector::get(CV1); 8020 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 8021 8022 // Load the 64-bit value into an XMM register. 8023 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 8024 Op.getOperand(0)); 8025 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 8026 MachinePointerInfo::getConstantPool(), 8027 false, false, false, 16); 8028 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 8029 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 8030 CLod0); 8031 8032 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 8033 MachinePointerInfo::getConstantPool(), 8034 false, false, false, 16); 8035 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 8036 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 8037 SDValue Result; 8038 8039 if (Subtarget->hasSSE3()) { 8040 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 8041 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 8042 } else { 8043 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 8044 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 8045 S2F, 0x4E, DAG); 8046 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 8047 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 8048 Sub); 8049 } 8050 8051 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 8052 DAG.getIntPtrConstant(0)); 8053} 8054 8055// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 8056SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 8057 SelectionDAG &DAG) const { 8058 DebugLoc dl = Op.getDebugLoc(); 8059 // FP constant to bias correct the final result. 8060 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 8061 MVT::f64); 8062 8063 // Load the 32-bit value into an XMM register. 8064 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 8065 Op.getOperand(0)); 8066 8067 // Zero out the upper parts of the register. 8068 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 8069 8070 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8071 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 8072 DAG.getIntPtrConstant(0)); 8073 8074 // Or the load with the bias. 8075 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 8076 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8077 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8078 MVT::v2f64, Load)), 8079 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8080 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8081 MVT::v2f64, Bias))); 8082 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8083 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 8084 DAG.getIntPtrConstant(0)); 8085 8086 // Subtract the bias. 8087 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 8088 8089 // Handle final rounding. 8090 EVT DestVT = Op.getValueType(); 8091 8092 if (DestVT.bitsLT(MVT::f64)) 8093 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 8094 DAG.getIntPtrConstant(0)); 8095 if (DestVT.bitsGT(MVT::f64)) 8096 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 8097 8098 // Handle final rounding. 8099 return Sub; 8100} 8101 8102SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, 8103 SelectionDAG &DAG) const { 8104 SDValue N0 = Op.getOperand(0); 8105 EVT SVT = N0.getValueType(); 8106 DebugLoc dl = Op.getDebugLoc(); 8107 8108 assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || 8109 SVT == MVT::v8i8 || SVT == MVT::v8i16) && 8110 "Custom UINT_TO_FP is not supported!"); 8111 8112 EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, SVT.getVectorNumElements()); 8113 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), 8114 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0)); 8115} 8116 8117SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 8118 SelectionDAG &DAG) const { 8119 SDValue N0 = Op.getOperand(0); 8120 DebugLoc dl = Op.getDebugLoc(); 8121 8122 if (Op.getValueType().isVector()) 8123 return lowerUINT_TO_FP_vec(Op, DAG); 8124 8125 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 8126 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 8127 // the optimization here. 8128 if (DAG.SignBitIsZero(N0)) 8129 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 8130 8131 EVT SrcVT = N0.getValueType(); 8132 EVT DstVT = Op.getValueType(); 8133 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 8134 return LowerUINT_TO_FP_i64(Op, DAG); 8135 if (SrcVT == MVT::i32 && X86ScalarSSEf64) 8136 return LowerUINT_TO_FP_i32(Op, DAG); 8137 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 8138 return SDValue(); 8139 8140 // Make a 64-bit buffer, and use it to build an FILD. 8141 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 8142 if (SrcVT == MVT::i32) { 8143 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 8144 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 8145 getPointerTy(), StackSlot, WordOff); 8146 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8147 StackSlot, MachinePointerInfo(), 8148 false, false, 0); 8149 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 8150 OffsetSlot, MachinePointerInfo(), 8151 false, false, 0); 8152 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 8153 return Fild; 8154 } 8155 8156 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 8157 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8158 StackSlot, MachinePointerInfo(), 8159 false, false, 0); 8160 // For i64 source, we need to add the appropriate power of 2 if the input 8161 // was negative. This is the same as the optimization in 8162 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 8163 // we must be careful to do the computation in x87 extended precision, not 8164 // in SSE. (The generic code can't know it's OK to do this, or how to.) 8165 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 8166 MachineMemOperand *MMO = 8167 DAG.getMachineFunction() 8168 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8169 MachineMemOperand::MOLoad, 8, 8); 8170 8171 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 8172 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 8173 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 8174 MVT::i64, MMO); 8175 8176 APInt FF(32, 0x5F800000ULL); 8177 8178 // Check whether the sign bit is set. 8179 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 8180 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 8181 ISD::SETLT); 8182 8183 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 8184 SDValue FudgePtr = DAG.getConstantPool( 8185 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 8186 getPointerTy()); 8187 8188 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 8189 SDValue Zero = DAG.getIntPtrConstant(0); 8190 SDValue Four = DAG.getIntPtrConstant(4); 8191 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 8192 Zero, Four); 8193 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 8194 8195 // Load the value out, extending it from f32 to f80. 8196 // FIXME: Avoid the extend by constructing the right constant pool? 8197 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 8198 FudgePtr, MachinePointerInfo::getConstantPool(), 8199 MVT::f32, false, false, 4); 8200 // Extend everything to 80 bits to force it to be done on x87. 8201 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 8202 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 8203} 8204 8205std::pair<SDValue,SDValue> X86TargetLowering:: 8206FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) const { 8207 DebugLoc DL = Op.getDebugLoc(); 8208 8209 EVT DstTy = Op.getValueType(); 8210 8211 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) { 8212 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 8213 DstTy = MVT::i64; 8214 } 8215 8216 assert(DstTy.getSimpleVT() <= MVT::i64 && 8217 DstTy.getSimpleVT() >= MVT::i16 && 8218 "Unknown FP_TO_INT to lower!"); 8219 8220 // These are really Legal. 8221 if (DstTy == MVT::i32 && 8222 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8223 return std::make_pair(SDValue(), SDValue()); 8224 if (Subtarget->is64Bit() && 8225 DstTy == MVT::i64 && 8226 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8227 return std::make_pair(SDValue(), SDValue()); 8228 8229 // We lower FP->int64 either into FISTP64 followed by a load from a temporary 8230 // stack slot, or into the FTOL runtime function. 8231 MachineFunction &MF = DAG.getMachineFunction(); 8232 unsigned MemSize = DstTy.getSizeInBits()/8; 8233 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8234 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8235 8236 unsigned Opc; 8237 if (!IsSigned && isIntegerTypeFTOL(DstTy)) 8238 Opc = X86ISD::WIN_FTOL; 8239 else 8240 switch (DstTy.getSimpleVT().SimpleTy) { 8241 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 8242 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 8243 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 8244 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 8245 } 8246 8247 SDValue Chain = DAG.getEntryNode(); 8248 SDValue Value = Op.getOperand(0); 8249 EVT TheVT = Op.getOperand(0).getValueType(); 8250 // FIXME This causes a redundant load/store if the SSE-class value is already 8251 // in memory, such as if it is on the callstack. 8252 if (isScalarFPTypeInSSEReg(TheVT)) { 8253 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 8254 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 8255 MachinePointerInfo::getFixedStack(SSFI), 8256 false, false, 0); 8257 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 8258 SDValue Ops[] = { 8259 Chain, StackSlot, DAG.getValueType(TheVT) 8260 }; 8261 8262 MachineMemOperand *MMO = 8263 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8264 MachineMemOperand::MOLoad, MemSize, MemSize); 8265 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 8266 DstTy, MMO); 8267 Chain = Value.getValue(1); 8268 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8269 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8270 } 8271 8272 MachineMemOperand *MMO = 8273 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8274 MachineMemOperand::MOStore, MemSize, MemSize); 8275 8276 if (Opc != X86ISD::WIN_FTOL) { 8277 // Build the FP_TO_INT*_IN_MEM 8278 SDValue Ops[] = { Chain, Value, StackSlot }; 8279 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 8280 Ops, 3, DstTy, MMO); 8281 return std::make_pair(FIST, StackSlot); 8282 } else { 8283 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, 8284 DAG.getVTList(MVT::Other, MVT::Glue), 8285 Chain, Value); 8286 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX, 8287 MVT::i32, ftol.getValue(1)); 8288 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX, 8289 MVT::i32, eax.getValue(2)); 8290 SDValue Ops[] = { eax, edx }; 8291 SDValue pair = IsReplace 8292 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, 2) 8293 : DAG.getMergeValues(Ops, 2, DL); 8294 return std::make_pair(pair, SDValue()); 8295 } 8296} 8297 8298SDValue X86TargetLowering::lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const { 8299 DebugLoc DL = Op.getDebugLoc(); 8300 EVT VT = Op.getValueType(); 8301 SDValue In = Op.getOperand(0); 8302 EVT SVT = In.getValueType(); 8303 8304 if (!VT.is256BitVector() || !SVT.is128BitVector() || 8305 VT.getVectorNumElements() != SVT.getVectorNumElements()) 8306 return SDValue(); 8307 8308 assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!"); 8309 8310 // AVX2 has better support of integer extending. 8311 if (Subtarget->hasAVX2()) 8312 return DAG.getNode(X86ISD::VZEXT, DL, VT, In); 8313 8314 SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In); 8315 static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1}; 8316 SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, 8317 DAG.getVectorShuffle(MVT::v8i16, DL, In, DAG.getUNDEF(MVT::v8i16), &Mask[0])); 8318 8319 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi); 8320} 8321 8322SDValue X86TargetLowering::lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 8323 DebugLoc DL = Op.getDebugLoc(); 8324 EVT VT = Op.getValueType(); 8325 EVT SVT = Op.getOperand(0).getValueType(); 8326 8327 if (!VT.is128BitVector() || !SVT.is256BitVector() || 8328 VT.getVectorNumElements() != SVT.getVectorNumElements()) 8329 return SDValue(); 8330 8331 assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!"); 8332 8333 unsigned NumElems = VT.getVectorNumElements(); 8334 EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 8335 NumElems * 2); 8336 8337 SDValue In = Op.getOperand(0); 8338 SmallVector<int, 16> MaskVec(NumElems * 2, -1); 8339 // Prepare truncation shuffle mask 8340 for (unsigned i = 0; i != NumElems; ++i) 8341 MaskVec[i] = i * 2; 8342 SDValue V = DAG.getVectorShuffle(NVT, DL, 8343 DAG.getNode(ISD::BITCAST, DL, NVT, In), 8344 DAG.getUNDEF(NVT), &MaskVec[0]); 8345 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, 8346 DAG.getIntPtrConstant(0)); 8347} 8348 8349SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 8350 SelectionDAG &DAG) const { 8351 if (Op.getValueType().isVector()) { 8352 if (Op.getValueType() == MVT::v8i16) 8353 return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), Op.getValueType(), 8354 DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(), 8355 MVT::v8i32, Op.getOperand(0))); 8356 return SDValue(); 8357 } 8358 8359 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8360 /*IsSigned=*/ true, /*IsReplace=*/ false); 8361 SDValue FIST = Vals.first, StackSlot = Vals.second; 8362 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 8363 if (FIST.getNode() == 0) return Op; 8364 8365 if (StackSlot.getNode()) 8366 // Load the result. 8367 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8368 FIST, StackSlot, MachinePointerInfo(), 8369 false, false, false, 0); 8370 8371 // The node is the result. 8372 return FIST; 8373} 8374 8375SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 8376 SelectionDAG &DAG) const { 8377 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8378 /*IsSigned=*/ false, /*IsReplace=*/ false); 8379 SDValue FIST = Vals.first, StackSlot = Vals.second; 8380 assert(FIST.getNode() && "Unexpected failure"); 8381 8382 if (StackSlot.getNode()) 8383 // Load the result. 8384 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8385 FIST, StackSlot, MachinePointerInfo(), 8386 false, false, false, 0); 8387 8388 // The node is the result. 8389 return FIST; 8390} 8391 8392SDValue X86TargetLowering::lowerFP_EXTEND(SDValue Op, 8393 SelectionDAG &DAG) const { 8394 DebugLoc DL = Op.getDebugLoc(); 8395 EVT VT = Op.getValueType(); 8396 SDValue In = Op.getOperand(0); 8397 EVT SVT = In.getValueType(); 8398 8399 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!"); 8400 8401 return DAG.getNode(X86ISD::VFPEXT, DL, VT, 8402 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, 8403 In, DAG.getUNDEF(SVT))); 8404} 8405 8406SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { 8407 LLVMContext *Context = DAG.getContext(); 8408 DebugLoc dl = Op.getDebugLoc(); 8409 EVT VT = Op.getValueType(); 8410 EVT EltVT = VT; 8411 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8412 if (VT.isVector()) { 8413 EltVT = VT.getVectorElementType(); 8414 NumElts = VT.getVectorNumElements(); 8415 } 8416 Constant *C; 8417 if (EltVT == MVT::f64) 8418 C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 8419 else 8420 C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 8421 C = ConstantVector::getSplat(NumElts, C); 8422 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8423 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8424 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8425 MachinePointerInfo::getConstantPool(), 8426 false, false, false, Alignment); 8427 if (VT.isVector()) { 8428 MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8429 return DAG.getNode(ISD::BITCAST, dl, VT, 8430 DAG.getNode(ISD::AND, dl, ANDVT, 8431 DAG.getNode(ISD::BITCAST, dl, ANDVT, 8432 Op.getOperand(0)), 8433 DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask))); 8434 } 8435 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 8436} 8437 8438SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 8439 LLVMContext *Context = DAG.getContext(); 8440 DebugLoc dl = Op.getDebugLoc(); 8441 EVT VT = Op.getValueType(); 8442 EVT EltVT = VT; 8443 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8444 if (VT.isVector()) { 8445 EltVT = VT.getVectorElementType(); 8446 NumElts = VT.getVectorNumElements(); 8447 } 8448 Constant *C; 8449 if (EltVT == MVT::f64) 8450 C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 8451 else 8452 C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 8453 C = ConstantVector::getSplat(NumElts, C); 8454 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8455 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8456 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8457 MachinePointerInfo::getConstantPool(), 8458 false, false, false, Alignment); 8459 if (VT.isVector()) { 8460 MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8461 return DAG.getNode(ISD::BITCAST, dl, VT, 8462 DAG.getNode(ISD::XOR, dl, XORVT, 8463 DAG.getNode(ISD::BITCAST, dl, XORVT, 8464 Op.getOperand(0)), 8465 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 8466 } 8467 8468 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 8469} 8470 8471SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 8472 LLVMContext *Context = DAG.getContext(); 8473 SDValue Op0 = Op.getOperand(0); 8474 SDValue Op1 = Op.getOperand(1); 8475 DebugLoc dl = Op.getDebugLoc(); 8476 EVT VT = Op.getValueType(); 8477 EVT SrcVT = Op1.getValueType(); 8478 8479 // If second operand is smaller, extend it first. 8480 if (SrcVT.bitsLT(VT)) { 8481 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 8482 SrcVT = VT; 8483 } 8484 // And if it is bigger, shrink it first. 8485 if (SrcVT.bitsGT(VT)) { 8486 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 8487 SrcVT = VT; 8488 } 8489 8490 // At this point the operands and the result should have the same 8491 // type, and that won't be f80 since that is not custom lowered. 8492 8493 // First get the sign bit of second operand. 8494 SmallVector<Constant*,4> CV; 8495 if (SrcVT == MVT::f64) { 8496 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 8497 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8498 } else { 8499 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 8500 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8501 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8502 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8503 } 8504 Constant *C = ConstantVector::get(CV); 8505 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8506 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 8507 MachinePointerInfo::getConstantPool(), 8508 false, false, false, 16); 8509 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 8510 8511 // Shift sign bit right or left if the two operands have different types. 8512 if (SrcVT.bitsGT(VT)) { 8513 // Op0 is MVT::f32, Op1 is MVT::f64. 8514 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 8515 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 8516 DAG.getConstant(32, MVT::i32)); 8517 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 8518 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 8519 DAG.getIntPtrConstant(0)); 8520 } 8521 8522 // Clear first operand sign bit. 8523 CV.clear(); 8524 if (VT == MVT::f64) { 8525 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 8526 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8527 } else { 8528 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 8529 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8530 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8531 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8532 } 8533 C = ConstantVector::get(CV); 8534 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8535 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8536 MachinePointerInfo::getConstantPool(), 8537 false, false, false, 16); 8538 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8539 8540 // Or the value with the sign bit. 8541 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8542} 8543 8544static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { 8545 SDValue N0 = Op.getOperand(0); 8546 DebugLoc dl = Op.getDebugLoc(); 8547 EVT VT = Op.getValueType(); 8548 8549 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8550 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8551 DAG.getConstant(1, VT)); 8552 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8553} 8554 8555// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able. 8556// 8557SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const { 8558 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree."); 8559 8560 if (!Subtarget->hasSSE41()) 8561 return SDValue(); 8562 8563 if (!Op->hasOneUse()) 8564 return SDValue(); 8565 8566 SDNode *N = Op.getNode(); 8567 DebugLoc DL = N->getDebugLoc(); 8568 8569 SmallVector<SDValue, 8> Opnds; 8570 DenseMap<SDValue, unsigned> VecInMap; 8571 EVT VT = MVT::Other; 8572 8573 // Recognize a special case where a vector is casted into wide integer to 8574 // test all 0s. 8575 Opnds.push_back(N->getOperand(0)); 8576 Opnds.push_back(N->getOperand(1)); 8577 8578 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { 8579 SmallVector<SDValue, 8>::const_iterator I = Opnds.begin() + Slot; 8580 // BFS traverse all OR'd operands. 8581 if (I->getOpcode() == ISD::OR) { 8582 Opnds.push_back(I->getOperand(0)); 8583 Opnds.push_back(I->getOperand(1)); 8584 // Re-evaluate the number of nodes to be traversed. 8585 e += 2; // 2 more nodes (LHS and RHS) are pushed. 8586 continue; 8587 } 8588 8589 // Quit if a non-EXTRACT_VECTOR_ELT 8590 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8591 return SDValue(); 8592 8593 // Quit if without a constant index. 8594 SDValue Idx = I->getOperand(1); 8595 if (!isa<ConstantSDNode>(Idx)) 8596 return SDValue(); 8597 8598 SDValue ExtractedFromVec = I->getOperand(0); 8599 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec); 8600 if (M == VecInMap.end()) { 8601 VT = ExtractedFromVec.getValueType(); 8602 // Quit if not 128/256-bit vector. 8603 if (!VT.is128BitVector() && !VT.is256BitVector()) 8604 return SDValue(); 8605 // Quit if not the same type. 8606 if (VecInMap.begin() != VecInMap.end() && 8607 VT != VecInMap.begin()->first.getValueType()) 8608 return SDValue(); 8609 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; 8610 } 8611 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue(); 8612 } 8613 8614 assert((VT.is128BitVector() || VT.is256BitVector()) && 8615 "Not extracted from 128-/256-bit vector."); 8616 8617 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; 8618 SmallVector<SDValue, 8> VecIns; 8619 8620 for (DenseMap<SDValue, unsigned>::const_iterator 8621 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { 8622 // Quit if not all elements are used. 8623 if (I->second != FullMask) 8624 return SDValue(); 8625 VecIns.push_back(I->first); 8626 } 8627 8628 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8629 8630 // Cast all vectors into TestVT for PTEST. 8631 for (unsigned i = 0, e = VecIns.size(); i < e; ++i) 8632 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]); 8633 8634 // If more than one full vectors are evaluated, OR them first before PTEST. 8635 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { 8636 // Each iteration will OR 2 nodes and append the result until there is only 8637 // 1 node left, i.e. the final OR'd value of all vectors. 8638 SDValue LHS = VecIns[Slot]; 8639 SDValue RHS = VecIns[Slot + 1]; 8640 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); 8641 } 8642 8643 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, 8644 VecIns.back(), VecIns.back()); 8645} 8646 8647/// Emit nodes that will be selected as "test Op0,Op0", or something 8648/// equivalent. 8649SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8650 SelectionDAG &DAG) const { 8651 DebugLoc dl = Op.getDebugLoc(); 8652 8653 // CF and OF aren't always set the way we want. Determine which 8654 // of these we need. 8655 bool NeedCF = false; 8656 bool NeedOF = false; 8657 switch (X86CC) { 8658 default: break; 8659 case X86::COND_A: case X86::COND_AE: 8660 case X86::COND_B: case X86::COND_BE: 8661 NeedCF = true; 8662 break; 8663 case X86::COND_G: case X86::COND_GE: 8664 case X86::COND_L: case X86::COND_LE: 8665 case X86::COND_O: case X86::COND_NO: 8666 NeedOF = true; 8667 break; 8668 } 8669 8670 // See if we can use the EFLAGS value from the operand instead of 8671 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8672 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8673 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8674 // Emit a CMP with 0, which is the TEST pattern. 8675 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8676 DAG.getConstant(0, Op.getValueType())); 8677 8678 unsigned Opcode = 0; 8679 unsigned NumOperands = 0; 8680 8681 // Truncate operations may prevent the merge of the SETCC instruction 8682 // and the arithmetic intruction before it. Attempt to truncate the operands 8683 // of the arithmetic instruction and use a reduced bit-width instruction. 8684 bool NeedTruncation = false; 8685 SDValue ArithOp = Op; 8686 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { 8687 SDValue Arith = Op->getOperand(0); 8688 // Both the trunc and the arithmetic op need to have one user each. 8689 if (Arith->hasOneUse()) 8690 switch (Arith.getOpcode()) { 8691 default: break; 8692 case ISD::ADD: 8693 case ISD::SUB: 8694 case ISD::AND: 8695 case ISD::OR: 8696 case ISD::XOR: { 8697 NeedTruncation = true; 8698 ArithOp = Arith; 8699 } 8700 } 8701 } 8702 8703 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation 8704 // which may be the result of a CAST. We use the variable 'Op', which is the 8705 // non-casted variable when we check for possible users. 8706 switch (ArithOp.getOpcode()) { 8707 case ISD::ADD: 8708 // Due to an isel shortcoming, be conservative if this add is likely to be 8709 // selected as part of a load-modify-store instruction. When the root node 8710 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8711 // uses of other nodes in the match, such as the ADD in this case. This 8712 // leads to the ADD being left around and reselected, with the result being 8713 // two adds in the output. Alas, even if none our users are stores, that 8714 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8715 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8716 // climbing the DAG back to the root, and it doesn't seem to be worth the 8717 // effort. 8718 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8719 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8720 if (UI->getOpcode() != ISD::CopyToReg && 8721 UI->getOpcode() != ISD::SETCC && 8722 UI->getOpcode() != ISD::STORE) 8723 goto default_case; 8724 8725 if (ConstantSDNode *C = 8726 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) { 8727 // An add of one will be selected as an INC. 8728 if (C->getAPIntValue() == 1) { 8729 Opcode = X86ISD::INC; 8730 NumOperands = 1; 8731 break; 8732 } 8733 8734 // An add of negative one (subtract of one) will be selected as a DEC. 8735 if (C->getAPIntValue().isAllOnesValue()) { 8736 Opcode = X86ISD::DEC; 8737 NumOperands = 1; 8738 break; 8739 } 8740 } 8741 8742 // Otherwise use a regular EFLAGS-setting add. 8743 Opcode = X86ISD::ADD; 8744 NumOperands = 2; 8745 break; 8746 case ISD::AND: { 8747 // If the primary and result isn't used, don't bother using X86ISD::AND, 8748 // because a TEST instruction will be better. 8749 bool NonFlagUse = false; 8750 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8751 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8752 SDNode *User = *UI; 8753 unsigned UOpNo = UI.getOperandNo(); 8754 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8755 // Look pass truncate. 8756 UOpNo = User->use_begin().getOperandNo(); 8757 User = *User->use_begin(); 8758 } 8759 8760 if (User->getOpcode() != ISD::BRCOND && 8761 User->getOpcode() != ISD::SETCC && 8762 !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) { 8763 NonFlagUse = true; 8764 break; 8765 } 8766 } 8767 8768 if (!NonFlagUse) 8769 break; 8770 } 8771 // FALL THROUGH 8772 case ISD::SUB: 8773 case ISD::OR: 8774 case ISD::XOR: 8775 // Due to the ISEL shortcoming noted above, be conservative if this op is 8776 // likely to be selected as part of a load-modify-store instruction. 8777 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8778 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8779 if (UI->getOpcode() == ISD::STORE) 8780 goto default_case; 8781 8782 // Otherwise use a regular EFLAGS-setting instruction. 8783 switch (ArithOp.getOpcode()) { 8784 default: llvm_unreachable("unexpected operator!"); 8785 case ISD::SUB: Opcode = X86ISD::SUB; break; 8786 case ISD::XOR: Opcode = X86ISD::XOR; break; 8787 case ISD::AND: Opcode = X86ISD::AND; break; 8788 case ISD::OR: { 8789 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) { 8790 SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG); 8791 if (EFLAGS.getNode()) 8792 return EFLAGS; 8793 } 8794 Opcode = X86ISD::OR; 8795 break; 8796 } 8797 } 8798 8799 NumOperands = 2; 8800 break; 8801 case X86ISD::ADD: 8802 case X86ISD::SUB: 8803 case X86ISD::INC: 8804 case X86ISD::DEC: 8805 case X86ISD::OR: 8806 case X86ISD::XOR: 8807 case X86ISD::AND: 8808 return SDValue(Op.getNode(), 1); 8809 default: 8810 default_case: 8811 break; 8812 } 8813 8814 // If we found that truncation is beneficial, perform the truncation and 8815 // update 'Op'. 8816 if (NeedTruncation) { 8817 EVT VT = Op.getValueType(); 8818 SDValue WideVal = Op->getOperand(0); 8819 EVT WideVT = WideVal.getValueType(); 8820 unsigned ConvertedOp = 0; 8821 // Use a target machine opcode to prevent further DAGCombine 8822 // optimizations that may separate the arithmetic operations 8823 // from the setcc node. 8824 switch (WideVal.getOpcode()) { 8825 default: break; 8826 case ISD::ADD: ConvertedOp = X86ISD::ADD; break; 8827 case ISD::SUB: ConvertedOp = X86ISD::SUB; break; 8828 case ISD::AND: ConvertedOp = X86ISD::AND; break; 8829 case ISD::OR: ConvertedOp = X86ISD::OR; break; 8830 case ISD::XOR: ConvertedOp = X86ISD::XOR; break; 8831 } 8832 8833 if (ConvertedOp) { 8834 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8835 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { 8836 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); 8837 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); 8838 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); 8839 } 8840 } 8841 } 8842 8843 if (Opcode == 0) 8844 // Emit a CMP with 0, which is the TEST pattern. 8845 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8846 DAG.getConstant(0, Op.getValueType())); 8847 8848 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 8849 SmallVector<SDValue, 4> Ops; 8850 for (unsigned i = 0; i != NumOperands; ++i) 8851 Ops.push_back(Op.getOperand(i)); 8852 8853 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 8854 DAG.ReplaceAllUsesWith(Op, New); 8855 return SDValue(New.getNode(), 1); 8856} 8857 8858/// Emit nodes that will be selected as "cmp Op0,Op1", or something 8859/// equivalent. 8860SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 8861 SelectionDAG &DAG) const { 8862 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 8863 if (C->getAPIntValue() == 0) 8864 return EmitTest(Op0, X86CC, DAG); 8865 8866 DebugLoc dl = Op0.getDebugLoc(); 8867 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || 8868 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { 8869 // Use SUB instead of CMP to enable CSE between SUB and CMP. 8870 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); 8871 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, 8872 Op0, Op1); 8873 return SDValue(Sub.getNode(), 1); 8874 } 8875 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 8876} 8877 8878/// Convert a comparison if required by the subtarget. 8879SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, 8880 SelectionDAG &DAG) const { 8881 // If the subtarget does not support the FUCOMI instruction, floating-point 8882 // comparisons have to be converted. 8883 if (Subtarget->hasCMov() || 8884 Cmp.getOpcode() != X86ISD::CMP || 8885 !Cmp.getOperand(0).getValueType().isFloatingPoint() || 8886 !Cmp.getOperand(1).getValueType().isFloatingPoint()) 8887 return Cmp; 8888 8889 // The instruction selector will select an FUCOM instruction instead of 8890 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence 8891 // build an SDNode sequence that transfers the result from FPSW into EFLAGS: 8892 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) 8893 DebugLoc dl = Cmp.getDebugLoc(); 8894 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); 8895 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); 8896 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, 8897 DAG.getConstant(8, MVT::i8)); 8898 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); 8899 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); 8900} 8901 8902/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 8903/// if it's possible. 8904SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 8905 DebugLoc dl, SelectionDAG &DAG) const { 8906 SDValue Op0 = And.getOperand(0); 8907 SDValue Op1 = And.getOperand(1); 8908 if (Op0.getOpcode() == ISD::TRUNCATE) 8909 Op0 = Op0.getOperand(0); 8910 if (Op1.getOpcode() == ISD::TRUNCATE) 8911 Op1 = Op1.getOperand(0); 8912 8913 SDValue LHS, RHS; 8914 if (Op1.getOpcode() == ISD::SHL) 8915 std::swap(Op0, Op1); 8916 if (Op0.getOpcode() == ISD::SHL) { 8917 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 8918 if (And00C->getZExtValue() == 1) { 8919 // If we looked past a truncate, check that it's only truncating away 8920 // known zeros. 8921 unsigned BitWidth = Op0.getValueSizeInBits(); 8922 unsigned AndBitWidth = And.getValueSizeInBits(); 8923 if (BitWidth > AndBitWidth) { 8924 APInt Zeros, Ones; 8925 DAG.ComputeMaskedBits(Op0, Zeros, Ones); 8926 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 8927 return SDValue(); 8928 } 8929 LHS = Op1; 8930 RHS = Op0.getOperand(1); 8931 } 8932 } else if (Op1.getOpcode() == ISD::Constant) { 8933 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 8934 uint64_t AndRHSVal = AndRHS->getZExtValue(); 8935 SDValue AndLHS = Op0; 8936 8937 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 8938 LHS = AndLHS.getOperand(0); 8939 RHS = AndLHS.getOperand(1); 8940 } 8941 8942 // Use BT if the immediate can't be encoded in a TEST instruction. 8943 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 8944 LHS = AndLHS; 8945 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 8946 } 8947 } 8948 8949 if (LHS.getNode()) { 8950 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 8951 // instruction. Since the shift amount is in-range-or-undefined, we know 8952 // that doing a bittest on the i32 value is ok. We extend to i32 because 8953 // the encoding for the i16 version is larger than the i32 version. 8954 // Also promote i16 to i32 for performance / code size reason. 8955 if (LHS.getValueType() == MVT::i8 || 8956 LHS.getValueType() == MVT::i16) 8957 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 8958 8959 // If the operand types disagree, extend the shift amount to match. Since 8960 // BT ignores high bits (like shifts) we can use anyextend. 8961 if (LHS.getValueType() != RHS.getValueType()) 8962 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 8963 8964 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 8965 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 8966 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8967 DAG.getConstant(Cond, MVT::i8), BT); 8968 } 8969 8970 return SDValue(); 8971} 8972 8973SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 8974 8975 if (Op.getValueType().isVector()) return LowerVSETCC(Op, DAG); 8976 8977 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 8978 SDValue Op0 = Op.getOperand(0); 8979 SDValue Op1 = Op.getOperand(1); 8980 DebugLoc dl = Op.getDebugLoc(); 8981 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8982 8983 // Optimize to BT if possible. 8984 // Lower (X & (1 << N)) == 0 to BT(X, N). 8985 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 8986 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 8987 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 8988 Op1.getOpcode() == ISD::Constant && 8989 cast<ConstantSDNode>(Op1)->isNullValue() && 8990 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8991 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 8992 if (NewSetCC.getNode()) 8993 return NewSetCC; 8994 } 8995 8996 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 8997 // these. 8998 if (Op1.getOpcode() == ISD::Constant && 8999 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 9000 cast<ConstantSDNode>(Op1)->isNullValue()) && 9001 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 9002 9003 // If the input is a setcc, then reuse the input setcc or use a new one with 9004 // the inverted condition. 9005 if (Op0.getOpcode() == X86ISD::SETCC) { 9006 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 9007 bool Invert = (CC == ISD::SETNE) ^ 9008 cast<ConstantSDNode>(Op1)->isNullValue(); 9009 if (!Invert) return Op0; 9010 9011 CCode = X86::GetOppositeBranchCondition(CCode); 9012 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9013 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 9014 } 9015 } 9016 9017 bool isFP = Op1.getValueType().isFloatingPoint(); 9018 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 9019 if (X86CC == X86::COND_INVALID) 9020 return SDValue(); 9021 9022 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 9023 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 9024 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9025 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 9026} 9027 9028// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 9029// ones, and then concatenate the result back. 9030static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 9031 EVT VT = Op.getValueType(); 9032 9033 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && 9034 "Unsupported value type for operation"); 9035 9036 unsigned NumElems = VT.getVectorNumElements(); 9037 DebugLoc dl = Op.getDebugLoc(); 9038 SDValue CC = Op.getOperand(2); 9039 9040 // Extract the LHS vectors 9041 SDValue LHS = Op.getOperand(0); 9042 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 9043 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 9044 9045 // Extract the RHS vectors 9046 SDValue RHS = Op.getOperand(1); 9047 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 9048 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 9049 9050 // Issue the operation on the smaller types and concatenate the result back 9051 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9052 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9053 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9054 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 9055 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 9056} 9057 9058 9059SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 9060 SDValue Cond; 9061 SDValue Op0 = Op.getOperand(0); 9062 SDValue Op1 = Op.getOperand(1); 9063 SDValue CC = Op.getOperand(2); 9064 EVT VT = Op.getValueType(); 9065 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 9066 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 9067 DebugLoc dl = Op.getDebugLoc(); 9068 9069 if (isFP) { 9070#ifndef NDEBUG 9071 EVT EltVT = Op0.getValueType().getVectorElementType(); 9072 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 9073#endif 9074 9075 unsigned SSECC; 9076 bool Swap = false; 9077 9078 // SSE Condition code mapping: 9079 // 0 - EQ 9080 // 1 - LT 9081 // 2 - LE 9082 // 3 - UNORD 9083 // 4 - NEQ 9084 // 5 - NLT 9085 // 6 - NLE 9086 // 7 - ORD 9087 switch (SetCCOpcode) { 9088 default: llvm_unreachable("Unexpected SETCC condition"); 9089 case ISD::SETOEQ: 9090 case ISD::SETEQ: SSECC = 0; break; 9091 case ISD::SETOGT: 9092 case ISD::SETGT: Swap = true; // Fallthrough 9093 case ISD::SETLT: 9094 case ISD::SETOLT: SSECC = 1; break; 9095 case ISD::SETOGE: 9096 case ISD::SETGE: Swap = true; // Fallthrough 9097 case ISD::SETLE: 9098 case ISD::SETOLE: SSECC = 2; break; 9099 case ISD::SETUO: SSECC = 3; break; 9100 case ISD::SETUNE: 9101 case ISD::SETNE: SSECC = 4; break; 9102 case ISD::SETULE: Swap = true; // Fallthrough 9103 case ISD::SETUGE: SSECC = 5; break; 9104 case ISD::SETULT: Swap = true; // Fallthrough 9105 case ISD::SETUGT: SSECC = 6; break; 9106 case ISD::SETO: SSECC = 7; break; 9107 case ISD::SETUEQ: 9108 case ISD::SETONE: SSECC = 8; break; 9109 } 9110 if (Swap) 9111 std::swap(Op0, Op1); 9112 9113 // In the two special cases we can't handle, emit two comparisons. 9114 if (SSECC == 8) { 9115 unsigned CC0, CC1; 9116 unsigned CombineOpc; 9117 if (SetCCOpcode == ISD::SETUEQ) { 9118 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR; 9119 } else { 9120 assert(SetCCOpcode == ISD::SETONE); 9121 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND; 9122 } 9123 9124 SDValue Cmp0 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9125 DAG.getConstant(CC0, MVT::i8)); 9126 SDValue Cmp1 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9127 DAG.getConstant(CC1, MVT::i8)); 9128 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); 9129 } 9130 // Handle all other FP comparisons here. 9131 return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9132 DAG.getConstant(SSECC, MVT::i8)); 9133 } 9134 9135 // Break 256-bit integer vector compare into smaller ones. 9136 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 9137 return Lower256IntVSETCC(Op, DAG); 9138 9139 // We are handling one of the integer comparisons here. Since SSE only has 9140 // GT and EQ comparisons for integer, swapping operands and multiple 9141 // operations may be required for some comparisons. 9142 unsigned Opc; 9143 bool Swap = false, Invert = false, FlipSigns = false; 9144 9145 switch (SetCCOpcode) { 9146 default: llvm_unreachable("Unexpected SETCC condition"); 9147 case ISD::SETNE: Invert = true; 9148 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break; 9149 case ISD::SETLT: Swap = true; 9150 case ISD::SETGT: Opc = X86ISD::PCMPGT; break; 9151 case ISD::SETGE: Swap = true; 9152 case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break; 9153 case ISD::SETULT: Swap = true; 9154 case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break; 9155 case ISD::SETUGE: Swap = true; 9156 case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; 9157 } 9158 if (Swap) 9159 std::swap(Op0, Op1); 9160 9161 // Check that the operation in question is available (most are plain SSE2, 9162 // but PCMPGTQ and PCMPEQQ have different requirements). 9163 if (VT == MVT::v2i64) { 9164 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) 9165 return SDValue(); 9166 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) 9167 return SDValue(); 9168 } 9169 9170 // Since SSE has no unsigned integer comparisons, we need to flip the sign 9171 // bits of the inputs before performing those operations. 9172 if (FlipSigns) { 9173 EVT EltVT = VT.getVectorElementType(); 9174 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 9175 EltVT); 9176 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 9177 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 9178 SignBits.size()); 9179 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 9180 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 9181 } 9182 9183 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 9184 9185 // If the logical-not of the result is required, perform that now. 9186 if (Invert) 9187 Result = DAG.getNOT(dl, Result, VT); 9188 9189 return Result; 9190} 9191 9192// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 9193static bool isX86LogicalCmp(SDValue Op) { 9194 unsigned Opc = Op.getNode()->getOpcode(); 9195 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || 9196 Opc == X86ISD::SAHF) 9197 return true; 9198 if (Op.getResNo() == 1 && 9199 (Opc == X86ISD::ADD || 9200 Opc == X86ISD::SUB || 9201 Opc == X86ISD::ADC || 9202 Opc == X86ISD::SBB || 9203 Opc == X86ISD::SMUL || 9204 Opc == X86ISD::UMUL || 9205 Opc == X86ISD::INC || 9206 Opc == X86ISD::DEC || 9207 Opc == X86ISD::OR || 9208 Opc == X86ISD::XOR || 9209 Opc == X86ISD::AND)) 9210 return true; 9211 9212 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 9213 return true; 9214 9215 return false; 9216} 9217 9218static bool isZero(SDValue V) { 9219 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9220 return C && C->isNullValue(); 9221} 9222 9223static bool isAllOnes(SDValue V) { 9224 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9225 return C && C->isAllOnesValue(); 9226} 9227 9228static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { 9229 if (V.getOpcode() != ISD::TRUNCATE) 9230 return false; 9231 9232 SDValue VOp0 = V.getOperand(0); 9233 unsigned InBits = VOp0.getValueSizeInBits(); 9234 unsigned Bits = V.getValueSizeInBits(); 9235 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); 9236} 9237 9238SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 9239 bool addTest = true; 9240 SDValue Cond = Op.getOperand(0); 9241 SDValue Op1 = Op.getOperand(1); 9242 SDValue Op2 = Op.getOperand(2); 9243 DebugLoc DL = Op.getDebugLoc(); 9244 SDValue CC; 9245 9246 if (Cond.getOpcode() == ISD::SETCC) { 9247 SDValue NewCond = LowerSETCC(Cond, DAG); 9248 if (NewCond.getNode()) 9249 Cond = NewCond; 9250 } 9251 9252 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 9253 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 9254 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 9255 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 9256 if (Cond.getOpcode() == X86ISD::SETCC && 9257 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 9258 isZero(Cond.getOperand(1).getOperand(1))) { 9259 SDValue Cmp = Cond.getOperand(1); 9260 9261 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 9262 9263 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 9264 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 9265 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 9266 9267 SDValue CmpOp0 = Cmp.getOperand(0); 9268 // Apply further optimizations for special cases 9269 // (select (x != 0), -1, 0) -> neg & sbb 9270 // (select (x == 0), 0, -1) -> neg & sbb 9271 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y)) 9272 if (YC->isNullValue() && 9273 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { 9274 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); 9275 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, 9276 DAG.getConstant(0, CmpOp0.getValueType()), 9277 CmpOp0); 9278 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9279 DAG.getConstant(X86::COND_B, MVT::i8), 9280 SDValue(Neg.getNode(), 1)); 9281 return Res; 9282 } 9283 9284 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 9285 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 9286 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9287 9288 SDValue Res = // Res = 0 or -1. 9289 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9290 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 9291 9292 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 9293 Res = DAG.getNOT(DL, Res, Res.getValueType()); 9294 9295 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 9296 if (N2C == 0 || !N2C->isNullValue()) 9297 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 9298 return Res; 9299 } 9300 } 9301 9302 // Look past (and (setcc_carry (cmp ...)), 1). 9303 if (Cond.getOpcode() == ISD::AND && 9304 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9305 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9306 if (C && C->getAPIntValue() == 1) 9307 Cond = Cond.getOperand(0); 9308 } 9309 9310 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9311 // setting operand in place of the X86ISD::SETCC. 9312 unsigned CondOpcode = Cond.getOpcode(); 9313 if (CondOpcode == X86ISD::SETCC || 9314 CondOpcode == X86ISD::SETCC_CARRY) { 9315 CC = Cond.getOperand(0); 9316 9317 SDValue Cmp = Cond.getOperand(1); 9318 unsigned Opc = Cmp.getOpcode(); 9319 EVT VT = Op.getValueType(); 9320 9321 bool IllegalFPCMov = false; 9322 if (VT.isFloatingPoint() && !VT.isVector() && 9323 !isScalarFPTypeInSSEReg(VT)) // FPStack? 9324 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 9325 9326 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 9327 Opc == X86ISD::BT) { // FIXME 9328 Cond = Cmp; 9329 addTest = false; 9330 } 9331 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9332 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9333 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9334 Cond.getOperand(0).getValueType() != MVT::i8)) { 9335 SDValue LHS = Cond.getOperand(0); 9336 SDValue RHS = Cond.getOperand(1); 9337 unsigned X86Opcode; 9338 unsigned X86Cond; 9339 SDVTList VTs; 9340 switch (CondOpcode) { 9341 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9342 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9343 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9344 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9345 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9346 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9347 default: llvm_unreachable("unexpected overflowing operator"); 9348 } 9349 if (CondOpcode == ISD::UMULO) 9350 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9351 MVT::i32); 9352 else 9353 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9354 9355 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 9356 9357 if (CondOpcode == ISD::UMULO) 9358 Cond = X86Op.getValue(2); 9359 else 9360 Cond = X86Op.getValue(1); 9361 9362 CC = DAG.getConstant(X86Cond, MVT::i8); 9363 addTest = false; 9364 } 9365 9366 if (addTest) { 9367 // Look pass the truncate if the high bits are known zero. 9368 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9369 Cond = Cond.getOperand(0); 9370 9371 // We know the result of AND is compared against zero. Try to match 9372 // it to BT. 9373 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9374 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 9375 if (NewSetCC.getNode()) { 9376 CC = NewSetCC.getOperand(0); 9377 Cond = NewSetCC.getOperand(1); 9378 addTest = false; 9379 } 9380 } 9381 } 9382 9383 if (addTest) { 9384 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9385 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9386 } 9387 9388 // a < b ? -1 : 0 -> RES = ~setcc_carry 9389 // a < b ? 0 : -1 -> RES = setcc_carry 9390 // a >= b ? -1 : 0 -> RES = setcc_carry 9391 // a >= b ? 0 : -1 -> RES = ~setcc_carry 9392 if (Cond.getOpcode() == X86ISD::SUB) { 9393 Cond = ConvertCmpIfNecessary(Cond, DAG); 9394 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 9395 9396 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 9397 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 9398 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9399 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 9400 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 9401 return DAG.getNOT(DL, Res, Res.getValueType()); 9402 return Res; 9403 } 9404 } 9405 9406 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate 9407 // widen the cmov and push the truncate through. This avoids introducing a new 9408 // branch during isel and doesn't add any extensions. 9409 if (Op.getValueType() == MVT::i8 && 9410 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) { 9411 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); 9412 if (T1.getValueType() == T2.getValueType() && 9413 // Blacklist CopyFromReg to avoid partial register stalls. 9414 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){ 9415 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue); 9416 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond); 9417 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); 9418 } 9419 } 9420 9421 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 9422 // condition is true. 9423 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 9424 SDValue Ops[] = { Op2, Op1, CC, Cond }; 9425 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 9426} 9427 9428// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 9429// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 9430// from the AND / OR. 9431static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 9432 Opc = Op.getOpcode(); 9433 if (Opc != ISD::OR && Opc != ISD::AND) 9434 return false; 9435 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9436 Op.getOperand(0).hasOneUse() && 9437 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 9438 Op.getOperand(1).hasOneUse()); 9439} 9440 9441// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 9442// 1 and that the SETCC node has a single use. 9443static bool isXor1OfSetCC(SDValue Op) { 9444 if (Op.getOpcode() != ISD::XOR) 9445 return false; 9446 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9447 if (N1C && N1C->getAPIntValue() == 1) { 9448 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9449 Op.getOperand(0).hasOneUse(); 9450 } 9451 return false; 9452} 9453 9454SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 9455 bool addTest = true; 9456 SDValue Chain = Op.getOperand(0); 9457 SDValue Cond = Op.getOperand(1); 9458 SDValue Dest = Op.getOperand(2); 9459 DebugLoc dl = Op.getDebugLoc(); 9460 SDValue CC; 9461 bool Inverted = false; 9462 9463 if (Cond.getOpcode() == ISD::SETCC) { 9464 // Check for setcc([su]{add,sub,mul}o == 0). 9465 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 9466 isa<ConstantSDNode>(Cond.getOperand(1)) && 9467 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 9468 Cond.getOperand(0).getResNo() == 1 && 9469 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 9470 Cond.getOperand(0).getOpcode() == ISD::UADDO || 9471 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 9472 Cond.getOperand(0).getOpcode() == ISD::USUBO || 9473 Cond.getOperand(0).getOpcode() == ISD::SMULO || 9474 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 9475 Inverted = true; 9476 Cond = Cond.getOperand(0); 9477 } else { 9478 SDValue NewCond = LowerSETCC(Cond, DAG); 9479 if (NewCond.getNode()) 9480 Cond = NewCond; 9481 } 9482 } 9483#if 0 9484 // FIXME: LowerXALUO doesn't handle these!! 9485 else if (Cond.getOpcode() == X86ISD::ADD || 9486 Cond.getOpcode() == X86ISD::SUB || 9487 Cond.getOpcode() == X86ISD::SMUL || 9488 Cond.getOpcode() == X86ISD::UMUL) 9489 Cond = LowerXALUO(Cond, DAG); 9490#endif 9491 9492 // Look pass (and (setcc_carry (cmp ...)), 1). 9493 if (Cond.getOpcode() == ISD::AND && 9494 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9495 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9496 if (C && C->getAPIntValue() == 1) 9497 Cond = Cond.getOperand(0); 9498 } 9499 9500 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9501 // setting operand in place of the X86ISD::SETCC. 9502 unsigned CondOpcode = Cond.getOpcode(); 9503 if (CondOpcode == X86ISD::SETCC || 9504 CondOpcode == X86ISD::SETCC_CARRY) { 9505 CC = Cond.getOperand(0); 9506 9507 SDValue Cmp = Cond.getOperand(1); 9508 unsigned Opc = Cmp.getOpcode(); 9509 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 9510 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 9511 Cond = Cmp; 9512 addTest = false; 9513 } else { 9514 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 9515 default: break; 9516 case X86::COND_O: 9517 case X86::COND_B: 9518 // These can only come from an arithmetic instruction with overflow, 9519 // e.g. SADDO, UADDO. 9520 Cond = Cond.getNode()->getOperand(1); 9521 addTest = false; 9522 break; 9523 } 9524 } 9525 } 9526 CondOpcode = Cond.getOpcode(); 9527 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9528 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9529 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9530 Cond.getOperand(0).getValueType() != MVT::i8)) { 9531 SDValue LHS = Cond.getOperand(0); 9532 SDValue RHS = Cond.getOperand(1); 9533 unsigned X86Opcode; 9534 unsigned X86Cond; 9535 SDVTList VTs; 9536 switch (CondOpcode) { 9537 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9538 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9539 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9540 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9541 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9542 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9543 default: llvm_unreachable("unexpected overflowing operator"); 9544 } 9545 if (Inverted) 9546 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 9547 if (CondOpcode == ISD::UMULO) 9548 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9549 MVT::i32); 9550 else 9551 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9552 9553 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 9554 9555 if (CondOpcode == ISD::UMULO) 9556 Cond = X86Op.getValue(2); 9557 else 9558 Cond = X86Op.getValue(1); 9559 9560 CC = DAG.getConstant(X86Cond, MVT::i8); 9561 addTest = false; 9562 } else { 9563 unsigned CondOpc; 9564 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 9565 SDValue Cmp = Cond.getOperand(0).getOperand(1); 9566 if (CondOpc == ISD::OR) { 9567 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 9568 // two branches instead of an explicit OR instruction with a 9569 // separate test. 9570 if (Cmp == Cond.getOperand(1).getOperand(1) && 9571 isX86LogicalCmp(Cmp)) { 9572 CC = Cond.getOperand(0).getOperand(0); 9573 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9574 Chain, Dest, CC, Cmp); 9575 CC = Cond.getOperand(1).getOperand(0); 9576 Cond = Cmp; 9577 addTest = false; 9578 } 9579 } else { // ISD::AND 9580 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 9581 // two branches instead of an explicit AND instruction with a 9582 // separate test. However, we only do this if this block doesn't 9583 // have a fall-through edge, because this requires an explicit 9584 // jmp when the condition is false. 9585 if (Cmp == Cond.getOperand(1).getOperand(1) && 9586 isX86LogicalCmp(Cmp) && 9587 Op.getNode()->hasOneUse()) { 9588 X86::CondCode CCode = 9589 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9590 CCode = X86::GetOppositeBranchCondition(CCode); 9591 CC = DAG.getConstant(CCode, MVT::i8); 9592 SDNode *User = *Op.getNode()->use_begin(); 9593 // Look for an unconditional branch following this conditional branch. 9594 // We need this because we need to reverse the successors in order 9595 // to implement FCMP_OEQ. 9596 if (User->getOpcode() == ISD::BR) { 9597 SDValue FalseBB = User->getOperand(1); 9598 SDNode *NewBR = 9599 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9600 assert(NewBR == User); 9601 (void)NewBR; 9602 Dest = FalseBB; 9603 9604 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9605 Chain, Dest, CC, Cmp); 9606 X86::CondCode CCode = 9607 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 9608 CCode = X86::GetOppositeBranchCondition(CCode); 9609 CC = DAG.getConstant(CCode, MVT::i8); 9610 Cond = Cmp; 9611 addTest = false; 9612 } 9613 } 9614 } 9615 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 9616 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 9617 // It should be transformed during dag combiner except when the condition 9618 // is set by a arithmetics with overflow node. 9619 X86::CondCode CCode = 9620 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9621 CCode = X86::GetOppositeBranchCondition(CCode); 9622 CC = DAG.getConstant(CCode, MVT::i8); 9623 Cond = Cond.getOperand(0).getOperand(1); 9624 addTest = false; 9625 } else if (Cond.getOpcode() == ISD::SETCC && 9626 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 9627 // For FCMP_OEQ, we can emit 9628 // two branches instead of an explicit AND instruction with a 9629 // separate test. However, we only do this if this block doesn't 9630 // have a fall-through edge, because this requires an explicit 9631 // jmp when the condition is false. 9632 if (Op.getNode()->hasOneUse()) { 9633 SDNode *User = *Op.getNode()->use_begin(); 9634 // Look for an unconditional branch following this conditional branch. 9635 // We need this because we need to reverse the successors in order 9636 // to implement FCMP_OEQ. 9637 if (User->getOpcode() == ISD::BR) { 9638 SDValue FalseBB = User->getOperand(1); 9639 SDNode *NewBR = 9640 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9641 assert(NewBR == User); 9642 (void)NewBR; 9643 Dest = FalseBB; 9644 9645 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9646 Cond.getOperand(0), Cond.getOperand(1)); 9647 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9648 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9649 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9650 Chain, Dest, CC, Cmp); 9651 CC = DAG.getConstant(X86::COND_P, MVT::i8); 9652 Cond = Cmp; 9653 addTest = false; 9654 } 9655 } 9656 } else if (Cond.getOpcode() == ISD::SETCC && 9657 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 9658 // For FCMP_UNE, we can emit 9659 // two branches instead of an explicit AND instruction with a 9660 // separate test. However, we only do this if this block doesn't 9661 // have a fall-through edge, because this requires an explicit 9662 // jmp when the condition is false. 9663 if (Op.getNode()->hasOneUse()) { 9664 SDNode *User = *Op.getNode()->use_begin(); 9665 // Look for an unconditional branch following this conditional branch. 9666 // We need this because we need to reverse the successors in order 9667 // to implement FCMP_UNE. 9668 if (User->getOpcode() == ISD::BR) { 9669 SDValue FalseBB = User->getOperand(1); 9670 SDNode *NewBR = 9671 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9672 assert(NewBR == User); 9673 (void)NewBR; 9674 9675 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9676 Cond.getOperand(0), Cond.getOperand(1)); 9677 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9678 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9679 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9680 Chain, Dest, CC, Cmp); 9681 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 9682 Cond = Cmp; 9683 addTest = false; 9684 Dest = FalseBB; 9685 } 9686 } 9687 } 9688 } 9689 9690 if (addTest) { 9691 // Look pass the truncate if the high bits are known zero. 9692 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9693 Cond = Cond.getOperand(0); 9694 9695 // We know the result of AND is compared against zero. Try to match 9696 // it to BT. 9697 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9698 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 9699 if (NewSetCC.getNode()) { 9700 CC = NewSetCC.getOperand(0); 9701 Cond = NewSetCC.getOperand(1); 9702 addTest = false; 9703 } 9704 } 9705 } 9706 9707 if (addTest) { 9708 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9709 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9710 } 9711 Cond = ConvertCmpIfNecessary(Cond, DAG); 9712 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9713 Chain, Dest, CC, Cond); 9714} 9715 9716 9717// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 9718// Calls to _alloca is needed to probe the stack when allocating more than 4k 9719// bytes in one go. Touching the stack at 4K increments is necessary to ensure 9720// that the guard pages used by the OS virtual memory manager are allocated in 9721// correct sequence. 9722SDValue 9723X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 9724 SelectionDAG &DAG) const { 9725 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 9726 getTargetMachine().Options.EnableSegmentedStacks) && 9727 "This should be used only on Windows targets or when segmented stacks " 9728 "are being used"); 9729 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 9730 DebugLoc dl = Op.getDebugLoc(); 9731 9732 // Get the inputs. 9733 SDValue Chain = Op.getOperand(0); 9734 SDValue Size = Op.getOperand(1); 9735 // FIXME: Ensure alignment here 9736 9737 bool Is64Bit = Subtarget->is64Bit(); 9738 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 9739 9740 if (getTargetMachine().Options.EnableSegmentedStacks) { 9741 MachineFunction &MF = DAG.getMachineFunction(); 9742 MachineRegisterInfo &MRI = MF.getRegInfo(); 9743 9744 if (Is64Bit) { 9745 // The 64 bit implementation of segmented stacks needs to clobber both r10 9746 // r11. This makes it impossible to use it along with nested parameters. 9747 const Function *F = MF.getFunction(); 9748 9749 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 9750 I != E; ++I) 9751 if (I->hasNestAttr()) 9752 report_fatal_error("Cannot use segmented stacks with functions that " 9753 "have nested arguments."); 9754 } 9755 9756 const TargetRegisterClass *AddrRegClass = 9757 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 9758 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 9759 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 9760 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 9761 DAG.getRegister(Vreg, SPTy)); 9762 SDValue Ops1[2] = { Value, Chain }; 9763 return DAG.getMergeValues(Ops1, 2, dl); 9764 } else { 9765 SDValue Flag; 9766 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 9767 9768 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 9769 Flag = Chain.getValue(1); 9770 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 9771 9772 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 9773 Flag = Chain.getValue(1); 9774 9775 Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), 9776 SPTy).getValue(1); 9777 9778 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 9779 return DAG.getMergeValues(Ops1, 2, dl); 9780 } 9781} 9782 9783SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 9784 MachineFunction &MF = DAG.getMachineFunction(); 9785 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 9786 9787 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9788 DebugLoc DL = Op.getDebugLoc(); 9789 9790 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 9791 // vastart just stores the address of the VarArgsFrameIndex slot into the 9792 // memory location argument. 9793 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9794 getPointerTy()); 9795 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 9796 MachinePointerInfo(SV), false, false, 0); 9797 } 9798 9799 // __va_list_tag: 9800 // gp_offset (0 - 6 * 8) 9801 // fp_offset (48 - 48 + 8 * 16) 9802 // overflow_arg_area (point to parameters coming in memory). 9803 // reg_save_area 9804 SmallVector<SDValue, 8> MemOps; 9805 SDValue FIN = Op.getOperand(1); 9806 // Store gp_offset 9807 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 9808 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 9809 MVT::i32), 9810 FIN, MachinePointerInfo(SV), false, false, 0); 9811 MemOps.push_back(Store); 9812 9813 // Store fp_offset 9814 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9815 FIN, DAG.getIntPtrConstant(4)); 9816 Store = DAG.getStore(Op.getOperand(0), DL, 9817 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 9818 MVT::i32), 9819 FIN, MachinePointerInfo(SV, 4), false, false, 0); 9820 MemOps.push_back(Store); 9821 9822 // Store ptr to overflow_arg_area 9823 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9824 FIN, DAG.getIntPtrConstant(4)); 9825 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9826 getPointerTy()); 9827 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 9828 MachinePointerInfo(SV, 8), 9829 false, false, 0); 9830 MemOps.push_back(Store); 9831 9832 // Store ptr to reg_save_area. 9833 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9834 FIN, DAG.getIntPtrConstant(8)); 9835 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 9836 getPointerTy()); 9837 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 9838 MachinePointerInfo(SV, 16), false, false, 0); 9839 MemOps.push_back(Store); 9840 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 9841 &MemOps[0], MemOps.size()); 9842} 9843 9844SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 9845 assert(Subtarget->is64Bit() && 9846 "LowerVAARG only handles 64-bit va_arg!"); 9847 assert((Subtarget->isTargetLinux() || 9848 Subtarget->isTargetDarwin()) && 9849 "Unhandled target in LowerVAARG"); 9850 assert(Op.getNode()->getNumOperands() == 4); 9851 SDValue Chain = Op.getOperand(0); 9852 SDValue SrcPtr = Op.getOperand(1); 9853 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9854 unsigned Align = Op.getConstantOperandVal(3); 9855 DebugLoc dl = Op.getDebugLoc(); 9856 9857 EVT ArgVT = Op.getNode()->getValueType(0); 9858 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9859 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy); 9860 uint8_t ArgMode; 9861 9862 // Decide which area this value should be read from. 9863 // TODO: Implement the AMD64 ABI in its entirety. This simple 9864 // selection mechanism works only for the basic types. 9865 if (ArgVT == MVT::f80) { 9866 llvm_unreachable("va_arg for f80 not yet implemented"); 9867 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 9868 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 9869 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 9870 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 9871 } else { 9872 llvm_unreachable("Unhandled argument type in LowerVAARG"); 9873 } 9874 9875 if (ArgMode == 2) { 9876 // Sanity Check: Make sure using fp_offset makes sense. 9877 assert(!getTargetMachine().Options.UseSoftFloat && 9878 !(DAG.getMachineFunction() 9879 .getFunction()->getFnAttributes() 9880 .hasAttribute(Attributes::NoImplicitFloat)) && 9881 Subtarget->hasSSE1()); 9882 } 9883 9884 // Insert VAARG_64 node into the DAG 9885 // VAARG_64 returns two values: Variable Argument Address, Chain 9886 SmallVector<SDValue, 11> InstOps; 9887 InstOps.push_back(Chain); 9888 InstOps.push_back(SrcPtr); 9889 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 9890 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 9891 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 9892 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 9893 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 9894 VTs, &InstOps[0], InstOps.size(), 9895 MVT::i64, 9896 MachinePointerInfo(SV), 9897 /*Align=*/0, 9898 /*Volatile=*/false, 9899 /*ReadMem=*/true, 9900 /*WriteMem=*/true); 9901 Chain = VAARG.getValue(1); 9902 9903 // Load the next argument and return it 9904 return DAG.getLoad(ArgVT, dl, 9905 Chain, 9906 VAARG, 9907 MachinePointerInfo(), 9908 false, false, false, 0); 9909} 9910 9911static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, 9912 SelectionDAG &DAG) { 9913 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 9914 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 9915 SDValue Chain = Op.getOperand(0); 9916 SDValue DstPtr = Op.getOperand(1); 9917 SDValue SrcPtr = Op.getOperand(2); 9918 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 9919 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9920 DebugLoc DL = Op.getDebugLoc(); 9921 9922 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 9923 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 9924 false, 9925 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 9926} 9927 9928// getTargetVShiftNOde - Handle vector element shifts where the shift amount 9929// may or may not be a constant. Takes immediate version of shift as input. 9930static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, 9931 SDValue SrcOp, SDValue ShAmt, 9932 SelectionDAG &DAG) { 9933 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 9934 9935 if (isa<ConstantSDNode>(ShAmt)) { 9936 // Constant may be a TargetConstant. Use a regular constant. 9937 uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 9938 switch (Opc) { 9939 default: llvm_unreachable("Unknown target vector shift node"); 9940 case X86ISD::VSHLI: 9941 case X86ISD::VSRLI: 9942 case X86ISD::VSRAI: 9943 return DAG.getNode(Opc, dl, VT, SrcOp, 9944 DAG.getConstant(ShiftAmt, MVT::i32)); 9945 } 9946 } 9947 9948 // Change opcode to non-immediate version 9949 switch (Opc) { 9950 default: llvm_unreachable("Unknown target vector shift node"); 9951 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 9952 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 9953 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 9954 } 9955 9956 // Need to build a vector containing shift amount 9957 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 9958 SDValue ShOps[4]; 9959 ShOps[0] = ShAmt; 9960 ShOps[1] = DAG.getConstant(0, MVT::i32); 9961 ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32); 9962 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 9963 9964 // The return type has to be a 128-bit type with the same element 9965 // type as the input type. 9966 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9967 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); 9968 9969 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); 9970 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 9971} 9972 9973static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { 9974 DebugLoc dl = Op.getDebugLoc(); 9975 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9976 switch (IntNo) { 9977 default: return SDValue(); // Don't custom lower most intrinsics. 9978 // Comparison intrinsics. 9979 case Intrinsic::x86_sse_comieq_ss: 9980 case Intrinsic::x86_sse_comilt_ss: 9981 case Intrinsic::x86_sse_comile_ss: 9982 case Intrinsic::x86_sse_comigt_ss: 9983 case Intrinsic::x86_sse_comige_ss: 9984 case Intrinsic::x86_sse_comineq_ss: 9985 case Intrinsic::x86_sse_ucomieq_ss: 9986 case Intrinsic::x86_sse_ucomilt_ss: 9987 case Intrinsic::x86_sse_ucomile_ss: 9988 case Intrinsic::x86_sse_ucomigt_ss: 9989 case Intrinsic::x86_sse_ucomige_ss: 9990 case Intrinsic::x86_sse_ucomineq_ss: 9991 case Intrinsic::x86_sse2_comieq_sd: 9992 case Intrinsic::x86_sse2_comilt_sd: 9993 case Intrinsic::x86_sse2_comile_sd: 9994 case Intrinsic::x86_sse2_comigt_sd: 9995 case Intrinsic::x86_sse2_comige_sd: 9996 case Intrinsic::x86_sse2_comineq_sd: 9997 case Intrinsic::x86_sse2_ucomieq_sd: 9998 case Intrinsic::x86_sse2_ucomilt_sd: 9999 case Intrinsic::x86_sse2_ucomile_sd: 10000 case Intrinsic::x86_sse2_ucomigt_sd: 10001 case Intrinsic::x86_sse2_ucomige_sd: 10002 case Intrinsic::x86_sse2_ucomineq_sd: { 10003 unsigned Opc; 10004 ISD::CondCode CC; 10005 switch (IntNo) { 10006 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10007 case Intrinsic::x86_sse_comieq_ss: 10008 case Intrinsic::x86_sse2_comieq_sd: 10009 Opc = X86ISD::COMI; 10010 CC = ISD::SETEQ; 10011 break; 10012 case Intrinsic::x86_sse_comilt_ss: 10013 case Intrinsic::x86_sse2_comilt_sd: 10014 Opc = X86ISD::COMI; 10015 CC = ISD::SETLT; 10016 break; 10017 case Intrinsic::x86_sse_comile_ss: 10018 case Intrinsic::x86_sse2_comile_sd: 10019 Opc = X86ISD::COMI; 10020 CC = ISD::SETLE; 10021 break; 10022 case Intrinsic::x86_sse_comigt_ss: 10023 case Intrinsic::x86_sse2_comigt_sd: 10024 Opc = X86ISD::COMI; 10025 CC = ISD::SETGT; 10026 break; 10027 case Intrinsic::x86_sse_comige_ss: 10028 case Intrinsic::x86_sse2_comige_sd: 10029 Opc = X86ISD::COMI; 10030 CC = ISD::SETGE; 10031 break; 10032 case Intrinsic::x86_sse_comineq_ss: 10033 case Intrinsic::x86_sse2_comineq_sd: 10034 Opc = X86ISD::COMI; 10035 CC = ISD::SETNE; 10036 break; 10037 case Intrinsic::x86_sse_ucomieq_ss: 10038 case Intrinsic::x86_sse2_ucomieq_sd: 10039 Opc = X86ISD::UCOMI; 10040 CC = ISD::SETEQ; 10041 break; 10042 case Intrinsic::x86_sse_ucomilt_ss: 10043 case Intrinsic::x86_sse2_ucomilt_sd: 10044 Opc = X86ISD::UCOMI; 10045 CC = ISD::SETLT; 10046 break; 10047 case Intrinsic::x86_sse_ucomile_ss: 10048 case Intrinsic::x86_sse2_ucomile_sd: 10049 Opc = X86ISD::UCOMI; 10050 CC = ISD::SETLE; 10051 break; 10052 case Intrinsic::x86_sse_ucomigt_ss: 10053 case Intrinsic::x86_sse2_ucomigt_sd: 10054 Opc = X86ISD::UCOMI; 10055 CC = ISD::SETGT; 10056 break; 10057 case Intrinsic::x86_sse_ucomige_ss: 10058 case Intrinsic::x86_sse2_ucomige_sd: 10059 Opc = X86ISD::UCOMI; 10060 CC = ISD::SETGE; 10061 break; 10062 case Intrinsic::x86_sse_ucomineq_ss: 10063 case Intrinsic::x86_sse2_ucomineq_sd: 10064 Opc = X86ISD::UCOMI; 10065 CC = ISD::SETNE; 10066 break; 10067 } 10068 10069 SDValue LHS = Op.getOperand(1); 10070 SDValue RHS = Op.getOperand(2); 10071 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 10072 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 10073 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 10074 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10075 DAG.getConstant(X86CC, MVT::i8), Cond); 10076 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10077 } 10078 10079 // Arithmetic intrinsics. 10080 case Intrinsic::x86_sse2_pmulu_dq: 10081 case Intrinsic::x86_avx2_pmulu_dq: 10082 return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(), 10083 Op.getOperand(1), Op.getOperand(2)); 10084 10085 // SSE3/AVX horizontal add/sub intrinsics 10086 case Intrinsic::x86_sse3_hadd_ps: 10087 case Intrinsic::x86_sse3_hadd_pd: 10088 case Intrinsic::x86_avx_hadd_ps_256: 10089 case Intrinsic::x86_avx_hadd_pd_256: 10090 case Intrinsic::x86_sse3_hsub_ps: 10091 case Intrinsic::x86_sse3_hsub_pd: 10092 case Intrinsic::x86_avx_hsub_ps_256: 10093 case Intrinsic::x86_avx_hsub_pd_256: 10094 case Intrinsic::x86_ssse3_phadd_w_128: 10095 case Intrinsic::x86_ssse3_phadd_d_128: 10096 case Intrinsic::x86_avx2_phadd_w: 10097 case Intrinsic::x86_avx2_phadd_d: 10098 case Intrinsic::x86_ssse3_phsub_w_128: 10099 case Intrinsic::x86_ssse3_phsub_d_128: 10100 case Intrinsic::x86_avx2_phsub_w: 10101 case Intrinsic::x86_avx2_phsub_d: { 10102 unsigned Opcode; 10103 switch (IntNo) { 10104 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10105 case Intrinsic::x86_sse3_hadd_ps: 10106 case Intrinsic::x86_sse3_hadd_pd: 10107 case Intrinsic::x86_avx_hadd_ps_256: 10108 case Intrinsic::x86_avx_hadd_pd_256: 10109 Opcode = X86ISD::FHADD; 10110 break; 10111 case Intrinsic::x86_sse3_hsub_ps: 10112 case Intrinsic::x86_sse3_hsub_pd: 10113 case Intrinsic::x86_avx_hsub_ps_256: 10114 case Intrinsic::x86_avx_hsub_pd_256: 10115 Opcode = X86ISD::FHSUB; 10116 break; 10117 case Intrinsic::x86_ssse3_phadd_w_128: 10118 case Intrinsic::x86_ssse3_phadd_d_128: 10119 case Intrinsic::x86_avx2_phadd_w: 10120 case Intrinsic::x86_avx2_phadd_d: 10121 Opcode = X86ISD::HADD; 10122 break; 10123 case Intrinsic::x86_ssse3_phsub_w_128: 10124 case Intrinsic::x86_ssse3_phsub_d_128: 10125 case Intrinsic::x86_avx2_phsub_w: 10126 case Intrinsic::x86_avx2_phsub_d: 10127 Opcode = X86ISD::HSUB; 10128 break; 10129 } 10130 return DAG.getNode(Opcode, dl, Op.getValueType(), 10131 Op.getOperand(1), Op.getOperand(2)); 10132 } 10133 10134 // AVX2 variable shift intrinsics 10135 case Intrinsic::x86_avx2_psllv_d: 10136 case Intrinsic::x86_avx2_psllv_q: 10137 case Intrinsic::x86_avx2_psllv_d_256: 10138 case Intrinsic::x86_avx2_psllv_q_256: 10139 case Intrinsic::x86_avx2_psrlv_d: 10140 case Intrinsic::x86_avx2_psrlv_q: 10141 case Intrinsic::x86_avx2_psrlv_d_256: 10142 case Intrinsic::x86_avx2_psrlv_q_256: 10143 case Intrinsic::x86_avx2_psrav_d: 10144 case Intrinsic::x86_avx2_psrav_d_256: { 10145 unsigned Opcode; 10146 switch (IntNo) { 10147 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10148 case Intrinsic::x86_avx2_psllv_d: 10149 case Intrinsic::x86_avx2_psllv_q: 10150 case Intrinsic::x86_avx2_psllv_d_256: 10151 case Intrinsic::x86_avx2_psllv_q_256: 10152 Opcode = ISD::SHL; 10153 break; 10154 case Intrinsic::x86_avx2_psrlv_d: 10155 case Intrinsic::x86_avx2_psrlv_q: 10156 case Intrinsic::x86_avx2_psrlv_d_256: 10157 case Intrinsic::x86_avx2_psrlv_q_256: 10158 Opcode = ISD::SRL; 10159 break; 10160 case Intrinsic::x86_avx2_psrav_d: 10161 case Intrinsic::x86_avx2_psrav_d_256: 10162 Opcode = ISD::SRA; 10163 break; 10164 } 10165 return DAG.getNode(Opcode, dl, Op.getValueType(), 10166 Op.getOperand(1), Op.getOperand(2)); 10167 } 10168 10169 case Intrinsic::x86_ssse3_pshuf_b_128: 10170 case Intrinsic::x86_avx2_pshuf_b: 10171 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 10172 Op.getOperand(1), Op.getOperand(2)); 10173 10174 case Intrinsic::x86_ssse3_psign_b_128: 10175 case Intrinsic::x86_ssse3_psign_w_128: 10176 case Intrinsic::x86_ssse3_psign_d_128: 10177 case Intrinsic::x86_avx2_psign_b: 10178 case Intrinsic::x86_avx2_psign_w: 10179 case Intrinsic::x86_avx2_psign_d: 10180 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 10181 Op.getOperand(1), Op.getOperand(2)); 10182 10183 case Intrinsic::x86_sse41_insertps: 10184 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 10185 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10186 10187 case Intrinsic::x86_avx_vperm2f128_ps_256: 10188 case Intrinsic::x86_avx_vperm2f128_pd_256: 10189 case Intrinsic::x86_avx_vperm2f128_si_256: 10190 case Intrinsic::x86_avx2_vperm2i128: 10191 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 10192 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10193 10194 case Intrinsic::x86_avx2_permd: 10195 case Intrinsic::x86_avx2_permps: 10196 // Operands intentionally swapped. Mask is last operand to intrinsic, 10197 // but second operand for node/intruction. 10198 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), 10199 Op.getOperand(2), Op.getOperand(1)); 10200 10201 // ptest and testp intrinsics. The intrinsic these come from are designed to 10202 // return an integer value, not just an instruction so lower it to the ptest 10203 // or testp pattern and a setcc for the result. 10204 case Intrinsic::x86_sse41_ptestz: 10205 case Intrinsic::x86_sse41_ptestc: 10206 case Intrinsic::x86_sse41_ptestnzc: 10207 case Intrinsic::x86_avx_ptestz_256: 10208 case Intrinsic::x86_avx_ptestc_256: 10209 case Intrinsic::x86_avx_ptestnzc_256: 10210 case Intrinsic::x86_avx_vtestz_ps: 10211 case Intrinsic::x86_avx_vtestc_ps: 10212 case Intrinsic::x86_avx_vtestnzc_ps: 10213 case Intrinsic::x86_avx_vtestz_pd: 10214 case Intrinsic::x86_avx_vtestc_pd: 10215 case Intrinsic::x86_avx_vtestnzc_pd: 10216 case Intrinsic::x86_avx_vtestz_ps_256: 10217 case Intrinsic::x86_avx_vtestc_ps_256: 10218 case Intrinsic::x86_avx_vtestnzc_ps_256: 10219 case Intrinsic::x86_avx_vtestz_pd_256: 10220 case Intrinsic::x86_avx_vtestc_pd_256: 10221 case Intrinsic::x86_avx_vtestnzc_pd_256: { 10222 bool IsTestPacked = false; 10223 unsigned X86CC; 10224 switch (IntNo) { 10225 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 10226 case Intrinsic::x86_avx_vtestz_ps: 10227 case Intrinsic::x86_avx_vtestz_pd: 10228 case Intrinsic::x86_avx_vtestz_ps_256: 10229 case Intrinsic::x86_avx_vtestz_pd_256: 10230 IsTestPacked = true; // Fallthrough 10231 case Intrinsic::x86_sse41_ptestz: 10232 case Intrinsic::x86_avx_ptestz_256: 10233 // ZF = 1 10234 X86CC = X86::COND_E; 10235 break; 10236 case Intrinsic::x86_avx_vtestc_ps: 10237 case Intrinsic::x86_avx_vtestc_pd: 10238 case Intrinsic::x86_avx_vtestc_ps_256: 10239 case Intrinsic::x86_avx_vtestc_pd_256: 10240 IsTestPacked = true; // Fallthrough 10241 case Intrinsic::x86_sse41_ptestc: 10242 case Intrinsic::x86_avx_ptestc_256: 10243 // CF = 1 10244 X86CC = X86::COND_B; 10245 break; 10246 case Intrinsic::x86_avx_vtestnzc_ps: 10247 case Intrinsic::x86_avx_vtestnzc_pd: 10248 case Intrinsic::x86_avx_vtestnzc_ps_256: 10249 case Intrinsic::x86_avx_vtestnzc_pd_256: 10250 IsTestPacked = true; // Fallthrough 10251 case Intrinsic::x86_sse41_ptestnzc: 10252 case Intrinsic::x86_avx_ptestnzc_256: 10253 // ZF and CF = 0 10254 X86CC = X86::COND_A; 10255 break; 10256 } 10257 10258 SDValue LHS = Op.getOperand(1); 10259 SDValue RHS = Op.getOperand(2); 10260 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 10261 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 10262 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 10263 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 10264 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10265 } 10266 10267 // SSE/AVX shift intrinsics 10268 case Intrinsic::x86_sse2_psll_w: 10269 case Intrinsic::x86_sse2_psll_d: 10270 case Intrinsic::x86_sse2_psll_q: 10271 case Intrinsic::x86_avx2_psll_w: 10272 case Intrinsic::x86_avx2_psll_d: 10273 case Intrinsic::x86_avx2_psll_q: 10274 case Intrinsic::x86_sse2_psrl_w: 10275 case Intrinsic::x86_sse2_psrl_d: 10276 case Intrinsic::x86_sse2_psrl_q: 10277 case Intrinsic::x86_avx2_psrl_w: 10278 case Intrinsic::x86_avx2_psrl_d: 10279 case Intrinsic::x86_avx2_psrl_q: 10280 case Intrinsic::x86_sse2_psra_w: 10281 case Intrinsic::x86_sse2_psra_d: 10282 case Intrinsic::x86_avx2_psra_w: 10283 case Intrinsic::x86_avx2_psra_d: { 10284 unsigned Opcode; 10285 switch (IntNo) { 10286 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10287 case Intrinsic::x86_sse2_psll_w: 10288 case Intrinsic::x86_sse2_psll_d: 10289 case Intrinsic::x86_sse2_psll_q: 10290 case Intrinsic::x86_avx2_psll_w: 10291 case Intrinsic::x86_avx2_psll_d: 10292 case Intrinsic::x86_avx2_psll_q: 10293 Opcode = X86ISD::VSHL; 10294 break; 10295 case Intrinsic::x86_sse2_psrl_w: 10296 case Intrinsic::x86_sse2_psrl_d: 10297 case Intrinsic::x86_sse2_psrl_q: 10298 case Intrinsic::x86_avx2_psrl_w: 10299 case Intrinsic::x86_avx2_psrl_d: 10300 case Intrinsic::x86_avx2_psrl_q: 10301 Opcode = X86ISD::VSRL; 10302 break; 10303 case Intrinsic::x86_sse2_psra_w: 10304 case Intrinsic::x86_sse2_psra_d: 10305 case Intrinsic::x86_avx2_psra_w: 10306 case Intrinsic::x86_avx2_psra_d: 10307 Opcode = X86ISD::VSRA; 10308 break; 10309 } 10310 return DAG.getNode(Opcode, dl, Op.getValueType(), 10311 Op.getOperand(1), Op.getOperand(2)); 10312 } 10313 10314 // SSE/AVX immediate shift intrinsics 10315 case Intrinsic::x86_sse2_pslli_w: 10316 case Intrinsic::x86_sse2_pslli_d: 10317 case Intrinsic::x86_sse2_pslli_q: 10318 case Intrinsic::x86_avx2_pslli_w: 10319 case Intrinsic::x86_avx2_pslli_d: 10320 case Intrinsic::x86_avx2_pslli_q: 10321 case Intrinsic::x86_sse2_psrli_w: 10322 case Intrinsic::x86_sse2_psrli_d: 10323 case Intrinsic::x86_sse2_psrli_q: 10324 case Intrinsic::x86_avx2_psrli_w: 10325 case Intrinsic::x86_avx2_psrli_d: 10326 case Intrinsic::x86_avx2_psrli_q: 10327 case Intrinsic::x86_sse2_psrai_w: 10328 case Intrinsic::x86_sse2_psrai_d: 10329 case Intrinsic::x86_avx2_psrai_w: 10330 case Intrinsic::x86_avx2_psrai_d: { 10331 unsigned Opcode; 10332 switch (IntNo) { 10333 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10334 case Intrinsic::x86_sse2_pslli_w: 10335 case Intrinsic::x86_sse2_pslli_d: 10336 case Intrinsic::x86_sse2_pslli_q: 10337 case Intrinsic::x86_avx2_pslli_w: 10338 case Intrinsic::x86_avx2_pslli_d: 10339 case Intrinsic::x86_avx2_pslli_q: 10340 Opcode = X86ISD::VSHLI; 10341 break; 10342 case Intrinsic::x86_sse2_psrli_w: 10343 case Intrinsic::x86_sse2_psrli_d: 10344 case Intrinsic::x86_sse2_psrli_q: 10345 case Intrinsic::x86_avx2_psrli_w: 10346 case Intrinsic::x86_avx2_psrli_d: 10347 case Intrinsic::x86_avx2_psrli_q: 10348 Opcode = X86ISD::VSRLI; 10349 break; 10350 case Intrinsic::x86_sse2_psrai_w: 10351 case Intrinsic::x86_sse2_psrai_d: 10352 case Intrinsic::x86_avx2_psrai_w: 10353 case Intrinsic::x86_avx2_psrai_d: 10354 Opcode = X86ISD::VSRAI; 10355 break; 10356 } 10357 return getTargetVShiftNode(Opcode, dl, Op.getValueType(), 10358 Op.getOperand(1), Op.getOperand(2), DAG); 10359 } 10360 10361 case Intrinsic::x86_sse42_pcmpistria128: 10362 case Intrinsic::x86_sse42_pcmpestria128: 10363 case Intrinsic::x86_sse42_pcmpistric128: 10364 case Intrinsic::x86_sse42_pcmpestric128: 10365 case Intrinsic::x86_sse42_pcmpistrio128: 10366 case Intrinsic::x86_sse42_pcmpestrio128: 10367 case Intrinsic::x86_sse42_pcmpistris128: 10368 case Intrinsic::x86_sse42_pcmpestris128: 10369 case Intrinsic::x86_sse42_pcmpistriz128: 10370 case Intrinsic::x86_sse42_pcmpestriz128: { 10371 unsigned Opcode; 10372 unsigned X86CC; 10373 switch (IntNo) { 10374 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10375 case Intrinsic::x86_sse42_pcmpistria128: 10376 Opcode = X86ISD::PCMPISTRI; 10377 X86CC = X86::COND_A; 10378 break; 10379 case Intrinsic::x86_sse42_pcmpestria128: 10380 Opcode = X86ISD::PCMPESTRI; 10381 X86CC = X86::COND_A; 10382 break; 10383 case Intrinsic::x86_sse42_pcmpistric128: 10384 Opcode = X86ISD::PCMPISTRI; 10385 X86CC = X86::COND_B; 10386 break; 10387 case Intrinsic::x86_sse42_pcmpestric128: 10388 Opcode = X86ISD::PCMPESTRI; 10389 X86CC = X86::COND_B; 10390 break; 10391 case Intrinsic::x86_sse42_pcmpistrio128: 10392 Opcode = X86ISD::PCMPISTRI; 10393 X86CC = X86::COND_O; 10394 break; 10395 case Intrinsic::x86_sse42_pcmpestrio128: 10396 Opcode = X86ISD::PCMPESTRI; 10397 X86CC = X86::COND_O; 10398 break; 10399 case Intrinsic::x86_sse42_pcmpistris128: 10400 Opcode = X86ISD::PCMPISTRI; 10401 X86CC = X86::COND_S; 10402 break; 10403 case Intrinsic::x86_sse42_pcmpestris128: 10404 Opcode = X86ISD::PCMPESTRI; 10405 X86CC = X86::COND_S; 10406 break; 10407 case Intrinsic::x86_sse42_pcmpistriz128: 10408 Opcode = X86ISD::PCMPISTRI; 10409 X86CC = X86::COND_E; 10410 break; 10411 case Intrinsic::x86_sse42_pcmpestriz128: 10412 Opcode = X86ISD::PCMPESTRI; 10413 X86CC = X86::COND_E; 10414 break; 10415 } 10416 SmallVector<SDValue, 5> NewOps; 10417 NewOps.append(Op->op_begin()+1, Op->op_end()); 10418 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10419 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10420 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10421 DAG.getConstant(X86CC, MVT::i8), 10422 SDValue(PCMP.getNode(), 1)); 10423 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10424 } 10425 10426 case Intrinsic::x86_sse42_pcmpistri128: 10427 case Intrinsic::x86_sse42_pcmpestri128: { 10428 unsigned Opcode; 10429 if (IntNo == Intrinsic::x86_sse42_pcmpistri128) 10430 Opcode = X86ISD::PCMPISTRI; 10431 else 10432 Opcode = X86ISD::PCMPESTRI; 10433 10434 SmallVector<SDValue, 5> NewOps; 10435 NewOps.append(Op->op_begin()+1, Op->op_end()); 10436 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10437 return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10438 } 10439 case Intrinsic::x86_fma_vfmadd_ps: 10440 case Intrinsic::x86_fma_vfmadd_pd: 10441 case Intrinsic::x86_fma_vfmsub_ps: 10442 case Intrinsic::x86_fma_vfmsub_pd: 10443 case Intrinsic::x86_fma_vfnmadd_ps: 10444 case Intrinsic::x86_fma_vfnmadd_pd: 10445 case Intrinsic::x86_fma_vfnmsub_ps: 10446 case Intrinsic::x86_fma_vfnmsub_pd: 10447 case Intrinsic::x86_fma_vfmaddsub_ps: 10448 case Intrinsic::x86_fma_vfmaddsub_pd: 10449 case Intrinsic::x86_fma_vfmsubadd_ps: 10450 case Intrinsic::x86_fma_vfmsubadd_pd: 10451 case Intrinsic::x86_fma_vfmadd_ps_256: 10452 case Intrinsic::x86_fma_vfmadd_pd_256: 10453 case Intrinsic::x86_fma_vfmsub_ps_256: 10454 case Intrinsic::x86_fma_vfmsub_pd_256: 10455 case Intrinsic::x86_fma_vfnmadd_ps_256: 10456 case Intrinsic::x86_fma_vfnmadd_pd_256: 10457 case Intrinsic::x86_fma_vfnmsub_ps_256: 10458 case Intrinsic::x86_fma_vfnmsub_pd_256: 10459 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10460 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10461 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10462 case Intrinsic::x86_fma_vfmsubadd_pd_256: { 10463 unsigned Opc; 10464 switch (IntNo) { 10465 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10466 case Intrinsic::x86_fma_vfmadd_ps: 10467 case Intrinsic::x86_fma_vfmadd_pd: 10468 case Intrinsic::x86_fma_vfmadd_ps_256: 10469 case Intrinsic::x86_fma_vfmadd_pd_256: 10470 Opc = X86ISD::FMADD; 10471 break; 10472 case Intrinsic::x86_fma_vfmsub_ps: 10473 case Intrinsic::x86_fma_vfmsub_pd: 10474 case Intrinsic::x86_fma_vfmsub_ps_256: 10475 case Intrinsic::x86_fma_vfmsub_pd_256: 10476 Opc = X86ISD::FMSUB; 10477 break; 10478 case Intrinsic::x86_fma_vfnmadd_ps: 10479 case Intrinsic::x86_fma_vfnmadd_pd: 10480 case Intrinsic::x86_fma_vfnmadd_ps_256: 10481 case Intrinsic::x86_fma_vfnmadd_pd_256: 10482 Opc = X86ISD::FNMADD; 10483 break; 10484 case Intrinsic::x86_fma_vfnmsub_ps: 10485 case Intrinsic::x86_fma_vfnmsub_pd: 10486 case Intrinsic::x86_fma_vfnmsub_ps_256: 10487 case Intrinsic::x86_fma_vfnmsub_pd_256: 10488 Opc = X86ISD::FNMSUB; 10489 break; 10490 case Intrinsic::x86_fma_vfmaddsub_ps: 10491 case Intrinsic::x86_fma_vfmaddsub_pd: 10492 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10493 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10494 Opc = X86ISD::FMADDSUB; 10495 break; 10496 case Intrinsic::x86_fma_vfmsubadd_ps: 10497 case Intrinsic::x86_fma_vfmsubadd_pd: 10498 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10499 case Intrinsic::x86_fma_vfmsubadd_pd_256: 10500 Opc = X86ISD::FMSUBADD; 10501 break; 10502 } 10503 10504 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1), 10505 Op.getOperand(2), Op.getOperand(3)); 10506 } 10507 } 10508} 10509 10510static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { 10511 DebugLoc dl = Op.getDebugLoc(); 10512 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10513 switch (IntNo) { 10514 default: return SDValue(); // Don't custom lower most intrinsics. 10515 10516 // RDRAND intrinsics. 10517 case Intrinsic::x86_rdrand_16: 10518 case Intrinsic::x86_rdrand_32: 10519 case Intrinsic::x86_rdrand_64: { 10520 // Emit the node with the right value type. 10521 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); 10522 SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, VTs, Op.getOperand(0)); 10523 10524 // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise 10525 // return the value from Rand, which is always 0, casted to i32. 10526 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), 10527 DAG.getConstant(1, Op->getValueType(1)), 10528 DAG.getConstant(X86::COND_B, MVT::i32), 10529 SDValue(Result.getNode(), 1) }; 10530 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, 10531 DAG.getVTList(Op->getValueType(1), MVT::Glue), 10532 Ops, 4); 10533 10534 // Return { result, isValid, chain }. 10535 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, 10536 SDValue(Result.getNode(), 2)); 10537 } 10538 } 10539} 10540 10541SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 10542 SelectionDAG &DAG) const { 10543 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 10544 MFI->setReturnAddressIsTaken(true); 10545 10546 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10547 DebugLoc dl = Op.getDebugLoc(); 10548 EVT PtrVT = getPointerTy(); 10549 10550 if (Depth > 0) { 10551 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 10552 SDValue Offset = 10553 DAG.getConstant(RegInfo->getSlotSize(), PtrVT); 10554 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 10555 DAG.getNode(ISD::ADD, dl, PtrVT, 10556 FrameAddr, Offset), 10557 MachinePointerInfo(), false, false, false, 0); 10558 } 10559 10560 // Just load the return address. 10561 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 10562 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 10563 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 10564} 10565 10566SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 10567 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 10568 MFI->setFrameAddressIsTaken(true); 10569 10570 EVT VT = Op.getValueType(); 10571 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 10572 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10573 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 10574 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 10575 while (Depth--) 10576 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 10577 MachinePointerInfo(), 10578 false, false, false, 0); 10579 return FrameAddr; 10580} 10581 10582SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 10583 SelectionDAG &DAG) const { 10584 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); 10585} 10586 10587SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 10588 SDValue Chain = Op.getOperand(0); 10589 SDValue Offset = Op.getOperand(1); 10590 SDValue Handler = Op.getOperand(2); 10591 DebugLoc dl = Op.getDebugLoc(); 10592 10593 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 10594 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 10595 getPointerTy()); 10596 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 10597 10598 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 10599 DAG.getIntPtrConstant(RegInfo->getSlotSize())); 10600 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 10601 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 10602 false, false, 0); 10603 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 10604 10605 return DAG.getNode(X86ISD::EH_RETURN, dl, 10606 MVT::Other, 10607 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 10608} 10609 10610SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 10611 SelectionDAG &DAG) const { 10612 DebugLoc DL = Op.getDebugLoc(); 10613 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, 10614 DAG.getVTList(MVT::i32, MVT::Other), 10615 Op.getOperand(0), Op.getOperand(1)); 10616} 10617 10618SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 10619 SelectionDAG &DAG) const { 10620 DebugLoc DL = Op.getDebugLoc(); 10621 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 10622 Op.getOperand(0), Op.getOperand(1)); 10623} 10624 10625static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { 10626 return Op.getOperand(0); 10627} 10628 10629SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 10630 SelectionDAG &DAG) const { 10631 SDValue Root = Op.getOperand(0); 10632 SDValue Trmp = Op.getOperand(1); // trampoline 10633 SDValue FPtr = Op.getOperand(2); // nested function 10634 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 10635 DebugLoc dl = Op.getDebugLoc(); 10636 10637 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 10638 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 10639 10640 if (Subtarget->is64Bit()) { 10641 SDValue OutChains[6]; 10642 10643 // Large code-model. 10644 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 10645 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 10646 10647 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7; 10648 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7; 10649 10650 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 10651 10652 // Load the pointer to the nested function into R11. 10653 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 10654 SDValue Addr = Trmp; 10655 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10656 Addr, MachinePointerInfo(TrmpAddr), 10657 false, false, 0); 10658 10659 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10660 DAG.getConstant(2, MVT::i64)); 10661 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 10662 MachinePointerInfo(TrmpAddr, 2), 10663 false, false, 2); 10664 10665 // Load the 'nest' parameter value into R10. 10666 // R10 is specified in X86CallingConv.td 10667 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 10668 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10669 DAG.getConstant(10, MVT::i64)); 10670 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10671 Addr, MachinePointerInfo(TrmpAddr, 10), 10672 false, false, 0); 10673 10674 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10675 DAG.getConstant(12, MVT::i64)); 10676 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 10677 MachinePointerInfo(TrmpAddr, 12), 10678 false, false, 2); 10679 10680 // Jump to the nested function. 10681 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 10682 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10683 DAG.getConstant(20, MVT::i64)); 10684 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10685 Addr, MachinePointerInfo(TrmpAddr, 20), 10686 false, false, 0); 10687 10688 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 10689 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10690 DAG.getConstant(22, MVT::i64)); 10691 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 10692 MachinePointerInfo(TrmpAddr, 22), 10693 false, false, 0); 10694 10695 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 10696 } else { 10697 const Function *Func = 10698 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 10699 CallingConv::ID CC = Func->getCallingConv(); 10700 unsigned NestReg; 10701 10702 switch (CC) { 10703 default: 10704 llvm_unreachable("Unsupported calling convention"); 10705 case CallingConv::C: 10706 case CallingConv::X86_StdCall: { 10707 // Pass 'nest' parameter in ECX. 10708 // Must be kept in sync with X86CallingConv.td 10709 NestReg = X86::ECX; 10710 10711 // Check that ECX wasn't needed by an 'inreg' parameter. 10712 FunctionType *FTy = Func->getFunctionType(); 10713 const AttrListPtr &Attrs = Func->getAttributes(); 10714 10715 if (!Attrs.isEmpty() && !Func->isVarArg()) { 10716 unsigned InRegCount = 0; 10717 unsigned Idx = 1; 10718 10719 for (FunctionType::param_iterator I = FTy->param_begin(), 10720 E = FTy->param_end(); I != E; ++I, ++Idx) 10721 if (Attrs.getParamAttributes(Idx).hasAttribute(Attributes::InReg)) 10722 // FIXME: should only count parameters that are lowered to integers. 10723 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 10724 10725 if (InRegCount > 2) { 10726 report_fatal_error("Nest register in use - reduce number of inreg" 10727 " parameters!"); 10728 } 10729 } 10730 break; 10731 } 10732 case CallingConv::X86_FastCall: 10733 case CallingConv::X86_ThisCall: 10734 case CallingConv::Fast: 10735 // Pass 'nest' parameter in EAX. 10736 // Must be kept in sync with X86CallingConv.td 10737 NestReg = X86::EAX; 10738 break; 10739 } 10740 10741 SDValue OutChains[4]; 10742 SDValue Addr, Disp; 10743 10744 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10745 DAG.getConstant(10, MVT::i32)); 10746 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 10747 10748 // This is storing the opcode for MOV32ri. 10749 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 10750 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; 10751 OutChains[0] = DAG.getStore(Root, dl, 10752 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 10753 Trmp, MachinePointerInfo(TrmpAddr), 10754 false, false, 0); 10755 10756 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10757 DAG.getConstant(1, MVT::i32)); 10758 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 10759 MachinePointerInfo(TrmpAddr, 1), 10760 false, false, 1); 10761 10762 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 10763 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10764 DAG.getConstant(5, MVT::i32)); 10765 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 10766 MachinePointerInfo(TrmpAddr, 5), 10767 false, false, 1); 10768 10769 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10770 DAG.getConstant(6, MVT::i32)); 10771 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 10772 MachinePointerInfo(TrmpAddr, 6), 10773 false, false, 1); 10774 10775 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 10776 } 10777} 10778 10779SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 10780 SelectionDAG &DAG) const { 10781 /* 10782 The rounding mode is in bits 11:10 of FPSR, and has the following 10783 settings: 10784 00 Round to nearest 10785 01 Round to -inf 10786 10 Round to +inf 10787 11 Round to 0 10788 10789 FLT_ROUNDS, on the other hand, expects the following: 10790 -1 Undefined 10791 0 Round to 0 10792 1 Round to nearest 10793 2 Round to +inf 10794 3 Round to -inf 10795 10796 To perform the conversion, we do: 10797 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 10798 */ 10799 10800 MachineFunction &MF = DAG.getMachineFunction(); 10801 const TargetMachine &TM = MF.getTarget(); 10802 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 10803 unsigned StackAlignment = TFI.getStackAlignment(); 10804 EVT VT = Op.getValueType(); 10805 DebugLoc DL = Op.getDebugLoc(); 10806 10807 // Save FP Control Word to stack slot 10808 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 10809 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 10810 10811 10812 MachineMemOperand *MMO = 10813 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 10814 MachineMemOperand::MOStore, 2, 2); 10815 10816 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 10817 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 10818 DAG.getVTList(MVT::Other), 10819 Ops, 2, MVT::i16, MMO); 10820 10821 // Load FP Control Word from stack slot 10822 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 10823 MachinePointerInfo(), false, false, false, 0); 10824 10825 // Transform as necessary 10826 SDValue CWD1 = 10827 DAG.getNode(ISD::SRL, DL, MVT::i16, 10828 DAG.getNode(ISD::AND, DL, MVT::i16, 10829 CWD, DAG.getConstant(0x800, MVT::i16)), 10830 DAG.getConstant(11, MVT::i8)); 10831 SDValue CWD2 = 10832 DAG.getNode(ISD::SRL, DL, MVT::i16, 10833 DAG.getNode(ISD::AND, DL, MVT::i16, 10834 CWD, DAG.getConstant(0x400, MVT::i16)), 10835 DAG.getConstant(9, MVT::i8)); 10836 10837 SDValue RetVal = 10838 DAG.getNode(ISD::AND, DL, MVT::i16, 10839 DAG.getNode(ISD::ADD, DL, MVT::i16, 10840 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 10841 DAG.getConstant(1, MVT::i16)), 10842 DAG.getConstant(3, MVT::i16)); 10843 10844 10845 return DAG.getNode((VT.getSizeInBits() < 16 ? 10846 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 10847} 10848 10849static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { 10850 EVT VT = Op.getValueType(); 10851 EVT OpVT = VT; 10852 unsigned NumBits = VT.getSizeInBits(); 10853 DebugLoc dl = Op.getDebugLoc(); 10854 10855 Op = Op.getOperand(0); 10856 if (VT == MVT::i8) { 10857 // Zero extend to i32 since there is not an i8 bsr. 10858 OpVT = MVT::i32; 10859 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10860 } 10861 10862 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 10863 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10864 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10865 10866 // If src is zero (i.e. bsr sets ZF), returns NumBits. 10867 SDValue Ops[] = { 10868 Op, 10869 DAG.getConstant(NumBits+NumBits-1, OpVT), 10870 DAG.getConstant(X86::COND_E, MVT::i8), 10871 Op.getValue(1) 10872 }; 10873 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 10874 10875 // Finally xor with NumBits-1. 10876 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10877 10878 if (VT == MVT::i8) 10879 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10880 return Op; 10881} 10882 10883static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { 10884 EVT VT = Op.getValueType(); 10885 EVT OpVT = VT; 10886 unsigned NumBits = VT.getSizeInBits(); 10887 DebugLoc dl = Op.getDebugLoc(); 10888 10889 Op = Op.getOperand(0); 10890 if (VT == MVT::i8) { 10891 // Zero extend to i32 since there is not an i8 bsr. 10892 OpVT = MVT::i32; 10893 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10894 } 10895 10896 // Issue a bsr (scan bits in reverse). 10897 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10898 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10899 10900 // And xor with NumBits-1. 10901 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10902 10903 if (VT == MVT::i8) 10904 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10905 return Op; 10906} 10907 10908static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { 10909 EVT VT = Op.getValueType(); 10910 unsigned NumBits = VT.getSizeInBits(); 10911 DebugLoc dl = Op.getDebugLoc(); 10912 Op = Op.getOperand(0); 10913 10914 // Issue a bsf (scan bits forward) which also sets EFLAGS. 10915 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10916 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 10917 10918 // If src is zero (i.e. bsf sets ZF), returns NumBits. 10919 SDValue Ops[] = { 10920 Op, 10921 DAG.getConstant(NumBits, VT), 10922 DAG.getConstant(X86::COND_E, MVT::i8), 10923 Op.getValue(1) 10924 }; 10925 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 10926} 10927 10928// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 10929// ones, and then concatenate the result back. 10930static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 10931 EVT VT = Op.getValueType(); 10932 10933 assert(VT.is256BitVector() && VT.isInteger() && 10934 "Unsupported value type for operation"); 10935 10936 unsigned NumElems = VT.getVectorNumElements(); 10937 DebugLoc dl = Op.getDebugLoc(); 10938 10939 // Extract the LHS vectors 10940 SDValue LHS = Op.getOperand(0); 10941 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 10942 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 10943 10944 // Extract the RHS vectors 10945 SDValue RHS = Op.getOperand(1); 10946 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 10947 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 10948 10949 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10950 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10951 10952 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 10953 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 10954 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 10955} 10956 10957static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) { 10958 assert(Op.getValueType().is256BitVector() && 10959 Op.getValueType().isInteger() && 10960 "Only handle AVX 256-bit vector integer operation"); 10961 return Lower256IntArith(Op, DAG); 10962} 10963 10964static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { 10965 assert(Op.getValueType().is256BitVector() && 10966 Op.getValueType().isInteger() && 10967 "Only handle AVX 256-bit vector integer operation"); 10968 return Lower256IntArith(Op, DAG); 10969} 10970 10971static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, 10972 SelectionDAG &DAG) { 10973 EVT VT = Op.getValueType(); 10974 10975 // Decompose 256-bit ops into smaller 128-bit ops. 10976 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 10977 return Lower256IntArith(Op, DAG); 10978 10979 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && 10980 "Only know how to lower V2I64/V4I64 multiply"); 10981 10982 DebugLoc dl = Op.getDebugLoc(); 10983 10984 // Ahi = psrlqi(a, 32); 10985 // Bhi = psrlqi(b, 32); 10986 // 10987 // AloBlo = pmuludq(a, b); 10988 // AloBhi = pmuludq(a, Bhi); 10989 // AhiBlo = pmuludq(Ahi, b); 10990 10991 // AloBhi = psllqi(AloBhi, 32); 10992 // AhiBlo = psllqi(AhiBlo, 32); 10993 // return AloBlo + AloBhi + AhiBlo; 10994 10995 SDValue A = Op.getOperand(0); 10996 SDValue B = Op.getOperand(1); 10997 10998 SDValue ShAmt = DAG.getConstant(32, MVT::i32); 10999 11000 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt); 11001 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt); 11002 11003 // Bit cast to 32-bit vectors for MULUDQ 11004 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32; 11005 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A); 11006 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B); 11007 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi); 11008 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi); 11009 11010 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); 11011 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); 11012 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); 11013 11014 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt); 11015 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt); 11016 11017 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 11018 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 11019} 11020 11021SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 11022 11023 EVT VT = Op.getValueType(); 11024 DebugLoc dl = Op.getDebugLoc(); 11025 SDValue R = Op.getOperand(0); 11026 SDValue Amt = Op.getOperand(1); 11027 LLVMContext *Context = DAG.getContext(); 11028 11029 if (!Subtarget->hasSSE2()) 11030 return SDValue(); 11031 11032 // Optimize shl/srl/sra with constant shift amount. 11033 if (isSplatVector(Amt.getNode())) { 11034 SDValue SclrAmt = Amt->getOperand(0); 11035 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 11036 uint64_t ShiftAmt = C->getZExtValue(); 11037 11038 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 11039 (Subtarget->hasAVX2() && 11040 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) { 11041 if (Op.getOpcode() == ISD::SHL) 11042 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 11043 DAG.getConstant(ShiftAmt, MVT::i32)); 11044 if (Op.getOpcode() == ISD::SRL) 11045 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 11046 DAG.getConstant(ShiftAmt, MVT::i32)); 11047 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 11048 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 11049 DAG.getConstant(ShiftAmt, MVT::i32)); 11050 } 11051 11052 if (VT == MVT::v16i8) { 11053 if (Op.getOpcode() == ISD::SHL) { 11054 // Make a large shift. 11055 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 11056 DAG.getConstant(ShiftAmt, MVT::i32)); 11057 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11058 // Zero out the rightmost bits. 11059 SmallVector<SDValue, 16> V(16, 11060 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11061 MVT::i8)); 11062 return DAG.getNode(ISD::AND, dl, VT, SHL, 11063 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11064 } 11065 if (Op.getOpcode() == ISD::SRL) { 11066 // Make a large shift. 11067 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 11068 DAG.getConstant(ShiftAmt, MVT::i32)); 11069 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11070 // Zero out the leftmost bits. 11071 SmallVector<SDValue, 16> V(16, 11072 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11073 MVT::i8)); 11074 return DAG.getNode(ISD::AND, dl, VT, SRL, 11075 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11076 } 11077 if (Op.getOpcode() == ISD::SRA) { 11078 if (ShiftAmt == 7) { 11079 // R s>> 7 === R s< 0 11080 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11081 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11082 } 11083 11084 // R s>> a === ((R u>> a) ^ m) - m 11085 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11086 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 11087 MVT::i8)); 11088 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 11089 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11090 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11091 return Res; 11092 } 11093 llvm_unreachable("Unknown shift opcode."); 11094 } 11095 11096 if (Subtarget->hasAVX2() && VT == MVT::v32i8) { 11097 if (Op.getOpcode() == ISD::SHL) { 11098 // Make a large shift. 11099 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 11100 DAG.getConstant(ShiftAmt, MVT::i32)); 11101 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11102 // Zero out the rightmost bits. 11103 SmallVector<SDValue, 32> V(32, 11104 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11105 MVT::i8)); 11106 return DAG.getNode(ISD::AND, dl, VT, SHL, 11107 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11108 } 11109 if (Op.getOpcode() == ISD::SRL) { 11110 // Make a large shift. 11111 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 11112 DAG.getConstant(ShiftAmt, MVT::i32)); 11113 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11114 // Zero out the leftmost bits. 11115 SmallVector<SDValue, 32> V(32, 11116 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11117 MVT::i8)); 11118 return DAG.getNode(ISD::AND, dl, VT, SRL, 11119 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11120 } 11121 if (Op.getOpcode() == ISD::SRA) { 11122 if (ShiftAmt == 7) { 11123 // R s>> 7 === R s< 0 11124 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11125 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11126 } 11127 11128 // R s>> a === ((R u>> a) ^ m) - m 11129 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11130 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 11131 MVT::i8)); 11132 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 11133 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11134 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11135 return Res; 11136 } 11137 llvm_unreachable("Unknown shift opcode."); 11138 } 11139 } 11140 } 11141 11142 // Lower SHL with variable shift amount. 11143 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 11144 Op = DAG.getNode(X86ISD::VSHLI, dl, VT, Op.getOperand(1), 11145 DAG.getConstant(23, MVT::i32)); 11146 11147 const uint32_t CV[] = { 0x3f800000U, 0x3f800000U, 0x3f800000U, 0x3f800000U}; 11148 Constant *C = ConstantDataVector::get(*Context, CV); 11149 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 11150 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 11151 MachinePointerInfo::getConstantPool(), 11152 false, false, false, 16); 11153 11154 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 11155 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 11156 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 11157 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 11158 } 11159 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 11160 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 11161 11162 // a = a << 5; 11163 Op = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, Op.getOperand(1), 11164 DAG.getConstant(5, MVT::i32)); 11165 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 11166 11167 // Turn 'a' into a mask suitable for VSELECT 11168 SDValue VSelM = DAG.getConstant(0x80, VT); 11169 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11170 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11171 11172 SDValue CM1 = DAG.getConstant(0x0f, VT); 11173 SDValue CM2 = DAG.getConstant(0x3f, VT); 11174 11175 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 11176 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 11177 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11178 DAG.getConstant(4, MVT::i32), DAG); 11179 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11180 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11181 11182 // a += a 11183 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11184 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11185 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11186 11187 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 11188 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 11189 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11190 DAG.getConstant(2, MVT::i32), DAG); 11191 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11192 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11193 11194 // a += a 11195 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11196 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11197 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11198 11199 // return VSELECT(r, r+r, a); 11200 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 11201 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 11202 return R; 11203 } 11204 11205 // Decompose 256-bit shifts into smaller 128-bit shifts. 11206 if (VT.is256BitVector()) { 11207 unsigned NumElems = VT.getVectorNumElements(); 11208 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11209 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11210 11211 // Extract the two vectors 11212 SDValue V1 = Extract128BitVector(R, 0, DAG, dl); 11213 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); 11214 11215 // Recreate the shift amount vectors 11216 SDValue Amt1, Amt2; 11217 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 11218 // Constant shift amount 11219 SmallVector<SDValue, 4> Amt1Csts; 11220 SmallVector<SDValue, 4> Amt2Csts; 11221 for (unsigned i = 0; i != NumElems/2; ++i) 11222 Amt1Csts.push_back(Amt->getOperand(i)); 11223 for (unsigned i = NumElems/2; i != NumElems; ++i) 11224 Amt2Csts.push_back(Amt->getOperand(i)); 11225 11226 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11227 &Amt1Csts[0], NumElems/2); 11228 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11229 &Amt2Csts[0], NumElems/2); 11230 } else { 11231 // Variable shift amount 11232 Amt1 = Extract128BitVector(Amt, 0, DAG, dl); 11233 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); 11234 } 11235 11236 // Issue new vector shifts for the smaller types 11237 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 11238 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 11239 11240 // Concatenate the result back 11241 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 11242 } 11243 11244 return SDValue(); 11245} 11246 11247static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { 11248 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 11249 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 11250 // looks for this combo and may remove the "setcc" instruction if the "setcc" 11251 // has only one use. 11252 SDNode *N = Op.getNode(); 11253 SDValue LHS = N->getOperand(0); 11254 SDValue RHS = N->getOperand(1); 11255 unsigned BaseOp = 0; 11256 unsigned Cond = 0; 11257 DebugLoc DL = Op.getDebugLoc(); 11258 switch (Op.getOpcode()) { 11259 default: llvm_unreachable("Unknown ovf instruction!"); 11260 case ISD::SADDO: 11261 // A subtract of one will be selected as a INC. Note that INC doesn't 11262 // set CF, so we can't do this for UADDO. 11263 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 11264 if (C->isOne()) { 11265 BaseOp = X86ISD::INC; 11266 Cond = X86::COND_O; 11267 break; 11268 } 11269 BaseOp = X86ISD::ADD; 11270 Cond = X86::COND_O; 11271 break; 11272 case ISD::UADDO: 11273 BaseOp = X86ISD::ADD; 11274 Cond = X86::COND_B; 11275 break; 11276 case ISD::SSUBO: 11277 // A subtract of one will be selected as a DEC. Note that DEC doesn't 11278 // set CF, so we can't do this for USUBO. 11279 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 11280 if (C->isOne()) { 11281 BaseOp = X86ISD::DEC; 11282 Cond = X86::COND_O; 11283 break; 11284 } 11285 BaseOp = X86ISD::SUB; 11286 Cond = X86::COND_O; 11287 break; 11288 case ISD::USUBO: 11289 BaseOp = X86ISD::SUB; 11290 Cond = X86::COND_B; 11291 break; 11292 case ISD::SMULO: 11293 BaseOp = X86ISD::SMUL; 11294 Cond = X86::COND_O; 11295 break; 11296 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 11297 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 11298 MVT::i32); 11299 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 11300 11301 SDValue SetCC = 11302 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 11303 DAG.getConstant(X86::COND_O, MVT::i32), 11304 SDValue(Sum.getNode(), 2)); 11305 11306 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 11307 } 11308 } 11309 11310 // Also sets EFLAGS. 11311 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 11312 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 11313 11314 SDValue SetCC = 11315 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 11316 DAG.getConstant(Cond, MVT::i32), 11317 SDValue(Sum.getNode(), 1)); 11318 11319 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 11320} 11321 11322SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 11323 SelectionDAG &DAG) const { 11324 DebugLoc dl = Op.getDebugLoc(); 11325 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 11326 EVT VT = Op.getValueType(); 11327 11328 if (!Subtarget->hasSSE2() || !VT.isVector()) 11329 return SDValue(); 11330 11331 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 11332 ExtraVT.getScalarType().getSizeInBits(); 11333 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 11334 11335 switch (VT.getSimpleVT().SimpleTy) { 11336 default: return SDValue(); 11337 case MVT::v8i32: 11338 case MVT::v16i16: 11339 if (!Subtarget->hasAVX()) 11340 return SDValue(); 11341 if (!Subtarget->hasAVX2()) { 11342 // needs to be split 11343 unsigned NumElems = VT.getVectorNumElements(); 11344 11345 // Extract the LHS vectors 11346 SDValue LHS = Op.getOperand(0); 11347 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 11348 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 11349 11350 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11351 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11352 11353 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 11354 unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); 11355 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 11356 ExtraNumElems/2); 11357 SDValue Extra = DAG.getValueType(ExtraVT); 11358 11359 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 11360 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 11361 11362 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2); 11363 } 11364 // fall through 11365 case MVT::v4i32: 11366 case MVT::v8i16: { 11367 SDValue Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, 11368 Op.getOperand(0), ShAmt, DAG); 11369 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 11370 } 11371 } 11372} 11373 11374 11375static SDValue LowerMEMBARRIER(SDValue Op, const X86Subtarget *Subtarget, 11376 SelectionDAG &DAG) { 11377 DebugLoc dl = Op.getDebugLoc(); 11378 11379 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. 11380 // There isn't any reason to disable it if the target processor supports it. 11381 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { 11382 SDValue Chain = Op.getOperand(0); 11383 SDValue Zero = DAG.getConstant(0, MVT::i32); 11384 SDValue Ops[] = { 11385 DAG.getRegister(X86::ESP, MVT::i32), // Base 11386 DAG.getTargetConstant(1, MVT::i8), // Scale 11387 DAG.getRegister(0, MVT::i32), // Index 11388 DAG.getTargetConstant(0, MVT::i32), // Disp 11389 DAG.getRegister(0, MVT::i32), // Segment. 11390 Zero, 11391 Chain 11392 }; 11393 SDNode *Res = 11394 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 11395 array_lengthof(Ops)); 11396 return SDValue(Res, 0); 11397 } 11398 11399 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 11400 if (!isDev) 11401 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 11402 11403 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 11404 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 11405 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 11406 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 11407 11408 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 11409 if (!Op1 && !Op2 && !Op3 && Op4) 11410 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 11411 11412 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 11413 if (Op1 && !Op2 && !Op3 && !Op4) 11414 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 11415 11416 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 11417 // (MFENCE)>; 11418 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 11419} 11420 11421static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, 11422 SelectionDAG &DAG) { 11423 DebugLoc dl = Op.getDebugLoc(); 11424 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 11425 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 11426 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 11427 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 11428 11429 // The only fence that needs an instruction is a sequentially-consistent 11430 // cross-thread fence. 11431 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 11432 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 11433 // no-sse2). There isn't any reason to disable it if the target processor 11434 // supports it. 11435 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 11436 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 11437 11438 SDValue Chain = Op.getOperand(0); 11439 SDValue Zero = DAG.getConstant(0, MVT::i32); 11440 SDValue Ops[] = { 11441 DAG.getRegister(X86::ESP, MVT::i32), // Base 11442 DAG.getTargetConstant(1, MVT::i8), // Scale 11443 DAG.getRegister(0, MVT::i32), // Index 11444 DAG.getTargetConstant(0, MVT::i32), // Disp 11445 DAG.getRegister(0, MVT::i32), // Segment. 11446 Zero, 11447 Chain 11448 }; 11449 SDNode *Res = 11450 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 11451 array_lengthof(Ops)); 11452 return SDValue(Res, 0); 11453 } 11454 11455 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 11456 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 11457} 11458 11459 11460static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, 11461 SelectionDAG &DAG) { 11462 EVT T = Op.getValueType(); 11463 DebugLoc DL = Op.getDebugLoc(); 11464 unsigned Reg = 0; 11465 unsigned size = 0; 11466 switch(T.getSimpleVT().SimpleTy) { 11467 default: llvm_unreachable("Invalid value type!"); 11468 case MVT::i8: Reg = X86::AL; size = 1; break; 11469 case MVT::i16: Reg = X86::AX; size = 2; break; 11470 case MVT::i32: Reg = X86::EAX; size = 4; break; 11471 case MVT::i64: 11472 assert(Subtarget->is64Bit() && "Node not type legal!"); 11473 Reg = X86::RAX; size = 8; 11474 break; 11475 } 11476 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 11477 Op.getOperand(2), SDValue()); 11478 SDValue Ops[] = { cpIn.getValue(0), 11479 Op.getOperand(1), 11480 Op.getOperand(3), 11481 DAG.getTargetConstant(size, MVT::i8), 11482 cpIn.getValue(1) }; 11483 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11484 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 11485 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 11486 Ops, 5, T, MMO); 11487 SDValue cpOut = 11488 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 11489 return cpOut; 11490} 11491 11492static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, 11493 SelectionDAG &DAG) { 11494 assert(Subtarget->is64Bit() && "Result not type legalized?"); 11495 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11496 SDValue TheChain = Op.getOperand(0); 11497 DebugLoc dl = Op.getDebugLoc(); 11498 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 11499 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 11500 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 11501 rax.getValue(2)); 11502 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 11503 DAG.getConstant(32, MVT::i8)); 11504 SDValue Ops[] = { 11505 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 11506 rdx.getValue(1) 11507 }; 11508 return DAG.getMergeValues(Ops, 2, dl); 11509} 11510 11511SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 11512 EVT SrcVT = Op.getOperand(0).getValueType(); 11513 EVT DstVT = Op.getValueType(); 11514 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 11515 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 11516 assert((DstVT == MVT::i64 || 11517 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 11518 "Unexpected custom BITCAST"); 11519 // i64 <=> MMX conversions are Legal. 11520 if (SrcVT==MVT::i64 && DstVT.isVector()) 11521 return Op; 11522 if (DstVT==MVT::i64 && SrcVT.isVector()) 11523 return Op; 11524 // MMX <=> MMX conversions are Legal. 11525 if (SrcVT.isVector() && DstVT.isVector()) 11526 return Op; 11527 // All other conversions need to be expanded. 11528 return SDValue(); 11529} 11530 11531static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { 11532 SDNode *Node = Op.getNode(); 11533 DebugLoc dl = Node->getDebugLoc(); 11534 EVT T = Node->getValueType(0); 11535 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 11536 DAG.getConstant(0, T), Node->getOperand(2)); 11537 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 11538 cast<AtomicSDNode>(Node)->getMemoryVT(), 11539 Node->getOperand(0), 11540 Node->getOperand(1), negOp, 11541 cast<AtomicSDNode>(Node)->getSrcValue(), 11542 cast<AtomicSDNode>(Node)->getAlignment(), 11543 cast<AtomicSDNode>(Node)->getOrdering(), 11544 cast<AtomicSDNode>(Node)->getSynchScope()); 11545} 11546 11547static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 11548 SDNode *Node = Op.getNode(); 11549 DebugLoc dl = Node->getDebugLoc(); 11550 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 11551 11552 // Convert seq_cst store -> xchg 11553 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 11554 // FIXME: On 32-bit, store -> fist or movq would be more efficient 11555 // (The only way to get a 16-byte store is cmpxchg16b) 11556 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 11557 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 11558 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 11559 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 11560 cast<AtomicSDNode>(Node)->getMemoryVT(), 11561 Node->getOperand(0), 11562 Node->getOperand(1), Node->getOperand(2), 11563 cast<AtomicSDNode>(Node)->getMemOperand(), 11564 cast<AtomicSDNode>(Node)->getOrdering(), 11565 cast<AtomicSDNode>(Node)->getSynchScope()); 11566 return Swap.getValue(1); 11567 } 11568 // Other atomic stores have a simple pattern. 11569 return Op; 11570} 11571 11572static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 11573 EVT VT = Op.getNode()->getValueType(0); 11574 11575 // Let legalize expand this if it isn't a legal type yet. 11576 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 11577 return SDValue(); 11578 11579 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 11580 11581 unsigned Opc; 11582 bool ExtraOp = false; 11583 switch (Op.getOpcode()) { 11584 default: llvm_unreachable("Invalid code"); 11585 case ISD::ADDC: Opc = X86ISD::ADD; break; 11586 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 11587 case ISD::SUBC: Opc = X86ISD::SUB; break; 11588 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 11589 } 11590 11591 if (!ExtraOp) 11592 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 11593 Op.getOperand(1)); 11594 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 11595 Op.getOperand(1), Op.getOperand(2)); 11596} 11597 11598/// LowerOperation - Provide custom lowering hooks for some operations. 11599/// 11600SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 11601 switch (Op.getOpcode()) { 11602 default: llvm_unreachable("Should not custom lower this!"); 11603 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 11604 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, Subtarget, DAG); 11605 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); 11606 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); 11607 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 11608 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 11609 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 11610 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 11611 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 11612 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 11613 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 11614 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); 11615 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); 11616 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 11617 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 11618 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 11619 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 11620 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 11621 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 11622 case ISD::SHL_PARTS: 11623 case ISD::SRA_PARTS: 11624 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 11625 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 11626 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 11627 case ISD::TRUNCATE: return lowerTRUNCATE(Op, DAG); 11628 case ISD::ZERO_EXTEND: return lowerZERO_EXTEND(Op, DAG); 11629 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 11630 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 11631 case ISD::FP_EXTEND: return lowerFP_EXTEND(Op, DAG); 11632 case ISD::FABS: return LowerFABS(Op, DAG); 11633 case ISD::FNEG: return LowerFNEG(Op, DAG); 11634 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 11635 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 11636 case ISD::SETCC: return LowerSETCC(Op, DAG); 11637 case ISD::SELECT: return LowerSELECT(Op, DAG); 11638 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 11639 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 11640 case ISD::VASTART: return LowerVASTART(Op, DAG); 11641 case ISD::VAARG: return LowerVAARG(Op, DAG); 11642 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); 11643 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 11644 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 11645 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 11646 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 11647 case ISD::FRAME_TO_ARGS_OFFSET: 11648 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 11649 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 11650 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 11651 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 11652 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 11653 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 11654 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 11655 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 11656 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 11657 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 11658 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 11659 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); 11660 case ISD::SRA: 11661 case ISD::SRL: 11662 case ISD::SHL: return LowerShift(Op, DAG); 11663 case ISD::SADDO: 11664 case ISD::UADDO: 11665 case ISD::SSUBO: 11666 case ISD::USUBO: 11667 case ISD::SMULO: 11668 case ISD::UMULO: return LowerXALUO(Op, DAG); 11669 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); 11670 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 11671 case ISD::ADDC: 11672 case ISD::ADDE: 11673 case ISD::SUBC: 11674 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 11675 case ISD::ADD: return LowerADD(Op, DAG); 11676 case ISD::SUB: return LowerSUB(Op, DAG); 11677 } 11678} 11679 11680static void ReplaceATOMIC_LOAD(SDNode *Node, 11681 SmallVectorImpl<SDValue> &Results, 11682 SelectionDAG &DAG) { 11683 DebugLoc dl = Node->getDebugLoc(); 11684 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 11685 11686 // Convert wide load -> cmpxchg8b/cmpxchg16b 11687 // FIXME: On 32-bit, load -> fild or movq would be more efficient 11688 // (The only way to get a 16-byte load is cmpxchg16b) 11689 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 11690 SDValue Zero = DAG.getConstant(0, VT); 11691 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 11692 Node->getOperand(0), 11693 Node->getOperand(1), Zero, Zero, 11694 cast<AtomicSDNode>(Node)->getMemOperand(), 11695 cast<AtomicSDNode>(Node)->getOrdering(), 11696 cast<AtomicSDNode>(Node)->getSynchScope()); 11697 Results.push_back(Swap.getValue(0)); 11698 Results.push_back(Swap.getValue(1)); 11699} 11700 11701static void 11702ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 11703 SelectionDAG &DAG, unsigned NewOp) { 11704 DebugLoc dl = Node->getDebugLoc(); 11705 assert (Node->getValueType(0) == MVT::i64 && 11706 "Only know how to expand i64 atomics"); 11707 11708 SDValue Chain = Node->getOperand(0); 11709 SDValue In1 = Node->getOperand(1); 11710 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11711 Node->getOperand(2), DAG.getIntPtrConstant(0)); 11712 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11713 Node->getOperand(2), DAG.getIntPtrConstant(1)); 11714 SDValue Ops[] = { Chain, In1, In2L, In2H }; 11715 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 11716 SDValue Result = 11717 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 11718 cast<MemSDNode>(Node)->getMemOperand()); 11719 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 11720 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 11721 Results.push_back(Result.getValue(2)); 11722} 11723 11724/// ReplaceNodeResults - Replace a node with an illegal result type 11725/// with a new node built out of custom code. 11726void X86TargetLowering::ReplaceNodeResults(SDNode *N, 11727 SmallVectorImpl<SDValue>&Results, 11728 SelectionDAG &DAG) const { 11729 DebugLoc dl = N->getDebugLoc(); 11730 switch (N->getOpcode()) { 11731 default: 11732 llvm_unreachable("Do not know how to custom type legalize this operation!"); 11733 case ISD::SIGN_EXTEND_INREG: 11734 case ISD::ADDC: 11735 case ISD::ADDE: 11736 case ISD::SUBC: 11737 case ISD::SUBE: 11738 // We don't want to expand or promote these. 11739 return; 11740 case ISD::FP_TO_SINT: 11741 case ISD::FP_TO_UINT: { 11742 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 11743 11744 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType())) 11745 return; 11746 11747 std::pair<SDValue,SDValue> Vals = 11748 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); 11749 SDValue FIST = Vals.first, StackSlot = Vals.second; 11750 if (FIST.getNode() != 0) { 11751 EVT VT = N->getValueType(0); 11752 // Return a load from the stack slot. 11753 if (StackSlot.getNode() != 0) 11754 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 11755 MachinePointerInfo(), 11756 false, false, false, 0)); 11757 else 11758 Results.push_back(FIST); 11759 } 11760 return; 11761 } 11762 case ISD::UINT_TO_FP: { 11763 if (N->getOperand(0).getValueType() != MVT::v2i32 && 11764 N->getValueType(0) != MVT::v2f32) 11765 return; 11766 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, 11767 N->getOperand(0)); 11768 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 11769 MVT::f64); 11770 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias); 11771 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, 11772 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias)); 11773 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or); 11774 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias); 11775 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub)); 11776 return; 11777 } 11778 case ISD::FP_ROUND: { 11779 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0)); 11780 Results.push_back(V); 11781 return; 11782 } 11783 case ISD::READCYCLECOUNTER: { 11784 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11785 SDValue TheChain = N->getOperand(0); 11786 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 11787 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 11788 rd.getValue(1)); 11789 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 11790 eax.getValue(2)); 11791 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 11792 SDValue Ops[] = { eax, edx }; 11793 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 11794 Results.push_back(edx.getValue(1)); 11795 return; 11796 } 11797 case ISD::ATOMIC_CMP_SWAP: { 11798 EVT T = N->getValueType(0); 11799 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 11800 bool Regs64bit = T == MVT::i128; 11801 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 11802 SDValue cpInL, cpInH; 11803 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11804 DAG.getConstant(0, HalfT)); 11805 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11806 DAG.getConstant(1, HalfT)); 11807 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 11808 Regs64bit ? X86::RAX : X86::EAX, 11809 cpInL, SDValue()); 11810 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 11811 Regs64bit ? X86::RDX : X86::EDX, 11812 cpInH, cpInL.getValue(1)); 11813 SDValue swapInL, swapInH; 11814 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11815 DAG.getConstant(0, HalfT)); 11816 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11817 DAG.getConstant(1, HalfT)); 11818 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 11819 Regs64bit ? X86::RBX : X86::EBX, 11820 swapInL, cpInH.getValue(1)); 11821 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 11822 Regs64bit ? X86::RCX : X86::ECX, 11823 swapInH, swapInL.getValue(1)); 11824 SDValue Ops[] = { swapInH.getValue(0), 11825 N->getOperand(1), 11826 swapInH.getValue(1) }; 11827 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11828 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 11829 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 11830 X86ISD::LCMPXCHG8_DAG; 11831 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 11832 Ops, 3, T, MMO); 11833 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 11834 Regs64bit ? X86::RAX : X86::EAX, 11835 HalfT, Result.getValue(1)); 11836 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 11837 Regs64bit ? X86::RDX : X86::EDX, 11838 HalfT, cpOutL.getValue(2)); 11839 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 11840 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 11841 Results.push_back(cpOutH.getValue(1)); 11842 return; 11843 } 11844 case ISD::ATOMIC_LOAD_ADD: 11845 case ISD::ATOMIC_LOAD_AND: 11846 case ISD::ATOMIC_LOAD_NAND: 11847 case ISD::ATOMIC_LOAD_OR: 11848 case ISD::ATOMIC_LOAD_SUB: 11849 case ISD::ATOMIC_LOAD_XOR: 11850 case ISD::ATOMIC_LOAD_MAX: 11851 case ISD::ATOMIC_LOAD_MIN: 11852 case ISD::ATOMIC_LOAD_UMAX: 11853 case ISD::ATOMIC_LOAD_UMIN: 11854 case ISD::ATOMIC_SWAP: { 11855 unsigned Opc; 11856 switch (N->getOpcode()) { 11857 default: llvm_unreachable("Unexpected opcode"); 11858 case ISD::ATOMIC_LOAD_ADD: 11859 Opc = X86ISD::ATOMADD64_DAG; 11860 break; 11861 case ISD::ATOMIC_LOAD_AND: 11862 Opc = X86ISD::ATOMAND64_DAG; 11863 break; 11864 case ISD::ATOMIC_LOAD_NAND: 11865 Opc = X86ISD::ATOMNAND64_DAG; 11866 break; 11867 case ISD::ATOMIC_LOAD_OR: 11868 Opc = X86ISD::ATOMOR64_DAG; 11869 break; 11870 case ISD::ATOMIC_LOAD_SUB: 11871 Opc = X86ISD::ATOMSUB64_DAG; 11872 break; 11873 case ISD::ATOMIC_LOAD_XOR: 11874 Opc = X86ISD::ATOMXOR64_DAG; 11875 break; 11876 case ISD::ATOMIC_LOAD_MAX: 11877 Opc = X86ISD::ATOMMAX64_DAG; 11878 break; 11879 case ISD::ATOMIC_LOAD_MIN: 11880 Opc = X86ISD::ATOMMIN64_DAG; 11881 break; 11882 case ISD::ATOMIC_LOAD_UMAX: 11883 Opc = X86ISD::ATOMUMAX64_DAG; 11884 break; 11885 case ISD::ATOMIC_LOAD_UMIN: 11886 Opc = X86ISD::ATOMUMIN64_DAG; 11887 break; 11888 case ISD::ATOMIC_SWAP: 11889 Opc = X86ISD::ATOMSWAP64_DAG; 11890 break; 11891 } 11892 ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc); 11893 return; 11894 } 11895 case ISD::ATOMIC_LOAD: 11896 ReplaceATOMIC_LOAD(N, Results, DAG); 11897 } 11898} 11899 11900const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 11901 switch (Opcode) { 11902 default: return NULL; 11903 case X86ISD::BSF: return "X86ISD::BSF"; 11904 case X86ISD::BSR: return "X86ISD::BSR"; 11905 case X86ISD::SHLD: return "X86ISD::SHLD"; 11906 case X86ISD::SHRD: return "X86ISD::SHRD"; 11907 case X86ISD::FAND: return "X86ISD::FAND"; 11908 case X86ISD::FOR: return "X86ISD::FOR"; 11909 case X86ISD::FXOR: return "X86ISD::FXOR"; 11910 case X86ISD::FSRL: return "X86ISD::FSRL"; 11911 case X86ISD::FILD: return "X86ISD::FILD"; 11912 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 11913 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 11914 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 11915 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 11916 case X86ISD::FLD: return "X86ISD::FLD"; 11917 case X86ISD::FST: return "X86ISD::FST"; 11918 case X86ISD::CALL: return "X86ISD::CALL"; 11919 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 11920 case X86ISD::BT: return "X86ISD::BT"; 11921 case X86ISD::CMP: return "X86ISD::CMP"; 11922 case X86ISD::COMI: return "X86ISD::COMI"; 11923 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 11924 case X86ISD::SETCC: return "X86ISD::SETCC"; 11925 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 11926 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 11927 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 11928 case X86ISD::CMOV: return "X86ISD::CMOV"; 11929 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 11930 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 11931 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 11932 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 11933 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 11934 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 11935 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 11936 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 11937 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 11938 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 11939 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 11940 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 11941 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 11942 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 11943 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 11944 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 11945 case X86ISD::BLENDPW: return "X86ISD::BLENDPW"; 11946 case X86ISD::BLENDPS: return "X86ISD::BLENDPS"; 11947 case X86ISD::BLENDPD: return "X86ISD::BLENDPD"; 11948 case X86ISD::HADD: return "X86ISD::HADD"; 11949 case X86ISD::HSUB: return "X86ISD::HSUB"; 11950 case X86ISD::FHADD: return "X86ISD::FHADD"; 11951 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 11952 case X86ISD::FMAX: return "X86ISD::FMAX"; 11953 case X86ISD::FMIN: return "X86ISD::FMIN"; 11954 case X86ISD::FMAXC: return "X86ISD::FMAXC"; 11955 case X86ISD::FMINC: return "X86ISD::FMINC"; 11956 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 11957 case X86ISD::FRCP: return "X86ISD::FRCP"; 11958 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 11959 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; 11960 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 11961 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; 11962 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; 11963 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 11964 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 11965 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 11966 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; 11967 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 11968 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 11969 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 11970 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 11971 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 11972 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 11973 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 11974 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 11975 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 11976 case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL"; 11977 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 11978 case X86ISD::VZEXT: return "X86ISD::VZEXT"; 11979 case X86ISD::VSEXT: return "X86ISD::VSEXT"; 11980 case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; 11981 case X86ISD::VFPROUND: return "X86ISD::VFPROUND"; 11982 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 11983 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 11984 case X86ISD::VSHL: return "X86ISD::VSHL"; 11985 case X86ISD::VSRL: return "X86ISD::VSRL"; 11986 case X86ISD::VSRA: return "X86ISD::VSRA"; 11987 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 11988 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 11989 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 11990 case X86ISD::CMPP: return "X86ISD::CMPP"; 11991 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 11992 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 11993 case X86ISD::ADD: return "X86ISD::ADD"; 11994 case X86ISD::SUB: return "X86ISD::SUB"; 11995 case X86ISD::ADC: return "X86ISD::ADC"; 11996 case X86ISD::SBB: return "X86ISD::SBB"; 11997 case X86ISD::SMUL: return "X86ISD::SMUL"; 11998 case X86ISD::UMUL: return "X86ISD::UMUL"; 11999 case X86ISD::INC: return "X86ISD::INC"; 12000 case X86ISD::DEC: return "X86ISD::DEC"; 12001 case X86ISD::OR: return "X86ISD::OR"; 12002 case X86ISD::XOR: return "X86ISD::XOR"; 12003 case X86ISD::AND: return "X86ISD::AND"; 12004 case X86ISD::ANDN: return "X86ISD::ANDN"; 12005 case X86ISD::BLSI: return "X86ISD::BLSI"; 12006 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 12007 case X86ISD::BLSR: return "X86ISD::BLSR"; 12008 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 12009 case X86ISD::PTEST: return "X86ISD::PTEST"; 12010 case X86ISD::TESTP: return "X86ISD::TESTP"; 12011 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 12012 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 12013 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 12014 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 12015 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 12016 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 12017 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 12018 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 12019 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 12020 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 12021 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 12022 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 12023 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 12024 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 12025 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 12026 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 12027 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 12028 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 12029 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 12030 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 12031 case X86ISD::VPERMV: return "X86ISD::VPERMV"; 12032 case X86ISD::VPERMI: return "X86ISD::VPERMI"; 12033 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; 12034 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 12035 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 12036 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 12037 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 12038 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 12039 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; 12040 case X86ISD::SAHF: return "X86ISD::SAHF"; 12041 case X86ISD::RDRAND: return "X86ISD::RDRAND"; 12042 case X86ISD::FMADD: return "X86ISD::FMADD"; 12043 case X86ISD::FMSUB: return "X86ISD::FMSUB"; 12044 case X86ISD::FNMADD: return "X86ISD::FNMADD"; 12045 case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; 12046 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; 12047 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; 12048 } 12049} 12050 12051// isLegalAddressingMode - Return true if the addressing mode represented 12052// by AM is legal for this target, for a load/store of the specified type. 12053bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 12054 Type *Ty) const { 12055 // X86 supports extremely general addressing modes. 12056 CodeModel::Model M = getTargetMachine().getCodeModel(); 12057 Reloc::Model R = getTargetMachine().getRelocationModel(); 12058 12059 // X86 allows a sign-extended 32-bit immediate field as a displacement. 12060 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 12061 return false; 12062 12063 if (AM.BaseGV) { 12064 unsigned GVFlags = 12065 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 12066 12067 // If a reference to this global requires an extra load, we can't fold it. 12068 if (isGlobalStubReference(GVFlags)) 12069 return false; 12070 12071 // If BaseGV requires a register for the PIC base, we cannot also have a 12072 // BaseReg specified. 12073 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 12074 return false; 12075 12076 // If lower 4G is not available, then we must use rip-relative addressing. 12077 if ((M != CodeModel::Small || R != Reloc::Static) && 12078 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 12079 return false; 12080 } 12081 12082 switch (AM.Scale) { 12083 case 0: 12084 case 1: 12085 case 2: 12086 case 4: 12087 case 8: 12088 // These scales always work. 12089 break; 12090 case 3: 12091 case 5: 12092 case 9: 12093 // These scales are formed with basereg+scalereg. Only accept if there is 12094 // no basereg yet. 12095 if (AM.HasBaseReg) 12096 return false; 12097 break; 12098 default: // Other stuff never works. 12099 return false; 12100 } 12101 12102 return true; 12103} 12104 12105 12106bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 12107 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 12108 return false; 12109 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 12110 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 12111 if (NumBits1 <= NumBits2) 12112 return false; 12113 return true; 12114} 12115 12116bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { 12117 return Imm == (int32_t)Imm; 12118} 12119 12120bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { 12121 // Can also use sub to handle negated immediates. 12122 return Imm == (int32_t)Imm; 12123} 12124 12125bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 12126 if (!VT1.isInteger() || !VT2.isInteger()) 12127 return false; 12128 unsigned NumBits1 = VT1.getSizeInBits(); 12129 unsigned NumBits2 = VT2.getSizeInBits(); 12130 if (NumBits1 <= NumBits2) 12131 return false; 12132 return true; 12133} 12134 12135bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 12136 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12137 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 12138} 12139 12140bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 12141 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12142 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 12143} 12144 12145bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 12146 // i16 instructions are longer (0x66 prefix) and potentially slower. 12147 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 12148} 12149 12150/// isShuffleMaskLegal - Targets can use this to indicate that they only 12151/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 12152/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 12153/// are assumed to be legal. 12154bool 12155X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 12156 EVT VT) const { 12157 // Very little shuffling can be done for 64-bit vectors right now. 12158 if (VT.getSizeInBits() == 64) 12159 return false; 12160 12161 // FIXME: pshufb, blends, shifts. 12162 return (VT.getVectorNumElements() == 2 || 12163 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 12164 isMOVLMask(M, VT) || 12165 isSHUFPMask(M, VT, Subtarget->hasAVX()) || 12166 isPSHUFDMask(M, VT) || 12167 isPSHUFHWMask(M, VT, Subtarget->hasAVX2()) || 12168 isPSHUFLWMask(M, VT, Subtarget->hasAVX2()) || 12169 isPALIGNRMask(M, VT, Subtarget) || 12170 isUNPCKLMask(M, VT, Subtarget->hasAVX2()) || 12171 isUNPCKHMask(M, VT, Subtarget->hasAVX2()) || 12172 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasAVX2()) || 12173 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasAVX2())); 12174} 12175 12176bool 12177X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 12178 EVT VT) const { 12179 unsigned NumElts = VT.getVectorNumElements(); 12180 // FIXME: This collection of masks seems suspect. 12181 if (NumElts == 2) 12182 return true; 12183 if (NumElts == 4 && VT.is128BitVector()) { 12184 return (isMOVLMask(Mask, VT) || 12185 isCommutedMOVLMask(Mask, VT, true) || 12186 isSHUFPMask(Mask, VT, Subtarget->hasAVX()) || 12187 isSHUFPMask(Mask, VT, Subtarget->hasAVX(), /* Commuted */ true)); 12188 } 12189 return false; 12190} 12191 12192//===----------------------------------------------------------------------===// 12193// X86 Scheduler Hooks 12194//===----------------------------------------------------------------------===// 12195 12196// private utility function 12197 12198/// Utility function to emit xbegin specifying the start of an RTM region. 12199MachineBasicBlock * 12200X86TargetLowering::EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB) const { 12201 DebugLoc DL = MI->getDebugLoc(); 12202 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12203 12204 const BasicBlock *BB = MBB->getBasicBlock(); 12205 MachineFunction::iterator I = MBB; 12206 ++I; 12207 12208 // For the v = xbegin(), we generate 12209 // 12210 // thisMBB: 12211 // xbegin sinkMBB 12212 // 12213 // mainMBB: 12214 // eax = -1 12215 // 12216 // sinkMBB: 12217 // v = eax 12218 12219 MachineBasicBlock *thisMBB = MBB; 12220 MachineFunction *MF = MBB->getParent(); 12221 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 12222 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 12223 MF->insert(I, mainMBB); 12224 MF->insert(I, sinkMBB); 12225 12226 // Transfer the remainder of BB and its successor edges to sinkMBB. 12227 sinkMBB->splice(sinkMBB->begin(), MBB, 12228 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12229 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 12230 12231 // thisMBB: 12232 // xbegin sinkMBB 12233 // # fallthrough to mainMBB 12234 // # abortion to sinkMBB 12235 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB); 12236 thisMBB->addSuccessor(mainMBB); 12237 thisMBB->addSuccessor(sinkMBB); 12238 12239 // mainMBB: 12240 // EAX = -1 12241 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1); 12242 mainMBB->addSuccessor(sinkMBB); 12243 12244 // sinkMBB: 12245 // EAX is live into the sinkMBB 12246 sinkMBB->addLiveIn(X86::EAX); 12247 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12248 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 12249 .addReg(X86::EAX); 12250 12251 MI->eraseFromParent(); 12252 return sinkMBB; 12253} 12254 12255// Get CMPXCHG opcode for the specified data type. 12256static unsigned getCmpXChgOpcode(EVT VT) { 12257 switch (VT.getSimpleVT().SimpleTy) { 12258 case MVT::i8: return X86::LCMPXCHG8; 12259 case MVT::i16: return X86::LCMPXCHG16; 12260 case MVT::i32: return X86::LCMPXCHG32; 12261 case MVT::i64: return X86::LCMPXCHG64; 12262 default: 12263 break; 12264 } 12265 llvm_unreachable("Invalid operand size!"); 12266} 12267 12268// Get LOAD opcode for the specified data type. 12269static unsigned getLoadOpcode(EVT VT) { 12270 switch (VT.getSimpleVT().SimpleTy) { 12271 case MVT::i8: return X86::MOV8rm; 12272 case MVT::i16: return X86::MOV16rm; 12273 case MVT::i32: return X86::MOV32rm; 12274 case MVT::i64: return X86::MOV64rm; 12275 default: 12276 break; 12277 } 12278 llvm_unreachable("Invalid operand size!"); 12279} 12280 12281// Get opcode of the non-atomic one from the specified atomic instruction. 12282static unsigned getNonAtomicOpcode(unsigned Opc) { 12283 switch (Opc) { 12284 case X86::ATOMAND8: return X86::AND8rr; 12285 case X86::ATOMAND16: return X86::AND16rr; 12286 case X86::ATOMAND32: return X86::AND32rr; 12287 case X86::ATOMAND64: return X86::AND64rr; 12288 case X86::ATOMOR8: return X86::OR8rr; 12289 case X86::ATOMOR16: return X86::OR16rr; 12290 case X86::ATOMOR32: return X86::OR32rr; 12291 case X86::ATOMOR64: return X86::OR64rr; 12292 case X86::ATOMXOR8: return X86::XOR8rr; 12293 case X86::ATOMXOR16: return X86::XOR16rr; 12294 case X86::ATOMXOR32: return X86::XOR32rr; 12295 case X86::ATOMXOR64: return X86::XOR64rr; 12296 } 12297 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12298} 12299 12300// Get opcode of the non-atomic one from the specified atomic instruction with 12301// extra opcode. 12302static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc, 12303 unsigned &ExtraOpc) { 12304 switch (Opc) { 12305 case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr; 12306 case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr; 12307 case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr; 12308 case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr; 12309 case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr; 12310 case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr; 12311 case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr; 12312 case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr; 12313 case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr; 12314 case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr; 12315 case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr; 12316 case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr; 12317 case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr; 12318 case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr; 12319 case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr; 12320 case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr; 12321 case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr; 12322 case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr; 12323 case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr; 12324 case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr; 12325 } 12326 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12327} 12328 12329// Get opcode of the non-atomic one from the specified atomic instruction for 12330// 64-bit data type on 32-bit target. 12331static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) { 12332 switch (Opc) { 12333 case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr; 12334 case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr; 12335 case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr; 12336 case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr; 12337 case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr; 12338 case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr; 12339 case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr; 12340 case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr; 12341 case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr; 12342 case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr; 12343 } 12344 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12345} 12346 12347// Get opcode of the non-atomic one from the specified atomic instruction for 12348// 64-bit data type on 32-bit target with extra opcode. 12349static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc, 12350 unsigned &HiOpc, 12351 unsigned &ExtraOpc) { 12352 switch (Opc) { 12353 case X86::ATOMNAND6432: 12354 ExtraOpc = X86::NOT32r; 12355 HiOpc = X86::AND32rr; 12356 return X86::AND32rr; 12357 } 12358 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12359} 12360 12361// Get pseudo CMOV opcode from the specified data type. 12362static unsigned getPseudoCMOVOpc(EVT VT) { 12363 switch (VT.getSimpleVT().SimpleTy) { 12364 case MVT::i8: return X86::CMOV_GR8; 12365 case MVT::i16: return X86::CMOV_GR16; 12366 case MVT::i32: return X86::CMOV_GR32; 12367 default: 12368 break; 12369 } 12370 llvm_unreachable("Unknown CMOV opcode!"); 12371} 12372 12373// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions. 12374// They will be translated into a spin-loop or compare-exchange loop from 12375// 12376// ... 12377// dst = atomic-fetch-op MI.addr, MI.val 12378// ... 12379// 12380// to 12381// 12382// ... 12383// EAX = LOAD MI.addr 12384// loop: 12385// t1 = OP MI.val, EAX 12386// LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 12387// JNE loop 12388// sink: 12389// dst = EAX 12390// ... 12391MachineBasicBlock * 12392X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, 12393 MachineBasicBlock *MBB) const { 12394 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12395 DebugLoc DL = MI->getDebugLoc(); 12396 12397 MachineFunction *MF = MBB->getParent(); 12398 MachineRegisterInfo &MRI = MF->getRegInfo(); 12399 12400 const BasicBlock *BB = MBB->getBasicBlock(); 12401 MachineFunction::iterator I = MBB; 12402 ++I; 12403 12404 assert(MI->getNumOperands() <= X86::AddrNumOperands + 2 && 12405 "Unexpected number of operands"); 12406 12407 assert(MI->hasOneMemOperand() && 12408 "Expected atomic-load-op to have one memoperand"); 12409 12410 // Memory Reference 12411 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12412 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12413 12414 unsigned DstReg, SrcReg; 12415 unsigned MemOpndSlot; 12416 12417 unsigned CurOp = 0; 12418 12419 DstReg = MI->getOperand(CurOp++).getReg(); 12420 MemOpndSlot = CurOp; 12421 CurOp += X86::AddrNumOperands; 12422 SrcReg = MI->getOperand(CurOp++).getReg(); 12423 12424 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 12425 MVT::SimpleValueType VT = *RC->vt_begin(); 12426 unsigned AccPhyReg = getX86SubSuperRegister(X86::EAX, VT); 12427 12428 unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT); 12429 unsigned LOADOpc = getLoadOpcode(VT); 12430 12431 // For the atomic load-arith operator, we generate 12432 // 12433 // thisMBB: 12434 // EAX = LOAD [MI.addr] 12435 // mainMBB: 12436 // t1 = OP MI.val, EAX 12437 // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 12438 // JNE mainMBB 12439 // sinkMBB: 12440 12441 MachineBasicBlock *thisMBB = MBB; 12442 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 12443 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 12444 MF->insert(I, mainMBB); 12445 MF->insert(I, sinkMBB); 12446 12447 MachineInstrBuilder MIB; 12448 12449 // Transfer the remainder of BB and its successor edges to sinkMBB. 12450 sinkMBB->splice(sinkMBB->begin(), MBB, 12451 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12452 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 12453 12454 // thisMBB: 12455 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), AccPhyReg); 12456 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12457 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12458 MIB.setMemRefs(MMOBegin, MMOEnd); 12459 12460 thisMBB->addSuccessor(mainMBB); 12461 12462 // mainMBB: 12463 MachineBasicBlock *origMainMBB = mainMBB; 12464 mainMBB->addLiveIn(AccPhyReg); 12465 12466 // Copy AccPhyReg as it is used more than once. 12467 unsigned AccReg = MRI.createVirtualRegister(RC); 12468 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccReg) 12469 .addReg(AccPhyReg); 12470 12471 unsigned t1 = MRI.createVirtualRegister(RC); 12472 unsigned Opc = MI->getOpcode(); 12473 switch (Opc) { 12474 default: 12475 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12476 case X86::ATOMAND8: 12477 case X86::ATOMAND16: 12478 case X86::ATOMAND32: 12479 case X86::ATOMAND64: 12480 case X86::ATOMOR8: 12481 case X86::ATOMOR16: 12482 case X86::ATOMOR32: 12483 case X86::ATOMOR64: 12484 case X86::ATOMXOR8: 12485 case X86::ATOMXOR16: 12486 case X86::ATOMXOR32: 12487 case X86::ATOMXOR64: { 12488 unsigned ARITHOpc = getNonAtomicOpcode(Opc); 12489 BuildMI(mainMBB, DL, TII->get(ARITHOpc), t1).addReg(SrcReg) 12490 .addReg(AccReg); 12491 break; 12492 } 12493 case X86::ATOMNAND8: 12494 case X86::ATOMNAND16: 12495 case X86::ATOMNAND32: 12496 case X86::ATOMNAND64: { 12497 unsigned t2 = MRI.createVirtualRegister(RC); 12498 unsigned NOTOpc; 12499 unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc); 12500 BuildMI(mainMBB, DL, TII->get(ANDOpc), t2).addReg(SrcReg) 12501 .addReg(AccReg); 12502 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1).addReg(t2); 12503 break; 12504 } 12505 case X86::ATOMMAX8: 12506 case X86::ATOMMAX16: 12507 case X86::ATOMMAX32: 12508 case X86::ATOMMAX64: 12509 case X86::ATOMMIN8: 12510 case X86::ATOMMIN16: 12511 case X86::ATOMMIN32: 12512 case X86::ATOMMIN64: 12513 case X86::ATOMUMAX8: 12514 case X86::ATOMUMAX16: 12515 case X86::ATOMUMAX32: 12516 case X86::ATOMUMAX64: 12517 case X86::ATOMUMIN8: 12518 case X86::ATOMUMIN16: 12519 case X86::ATOMUMIN32: 12520 case X86::ATOMUMIN64: { 12521 unsigned CMPOpc; 12522 unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc); 12523 12524 BuildMI(mainMBB, DL, TII->get(CMPOpc)) 12525 .addReg(SrcReg) 12526 .addReg(AccReg); 12527 12528 if (Subtarget->hasCMov()) { 12529 if (VT != MVT::i8) { 12530 // Native support 12531 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t1) 12532 .addReg(SrcReg) 12533 .addReg(AccReg); 12534 } else { 12535 // Promote i8 to i32 to use CMOV32 12536 const TargetRegisterClass *RC32 = getRegClassFor(MVT::i32); 12537 unsigned SrcReg32 = MRI.createVirtualRegister(RC32); 12538 unsigned AccReg32 = MRI.createVirtualRegister(RC32); 12539 unsigned t2 = MRI.createVirtualRegister(RC32); 12540 12541 unsigned Undef = MRI.createVirtualRegister(RC32); 12542 BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef); 12543 12544 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32) 12545 .addReg(Undef) 12546 .addReg(SrcReg) 12547 .addImm(X86::sub_8bit); 12548 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32) 12549 .addReg(Undef) 12550 .addReg(AccReg) 12551 .addImm(X86::sub_8bit); 12552 12553 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2) 12554 .addReg(SrcReg32) 12555 .addReg(AccReg32); 12556 12557 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t1) 12558 .addReg(t2, 0, X86::sub_8bit); 12559 } 12560 } else { 12561 // Use pseudo select and lower them. 12562 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 12563 "Invalid atomic-load-op transformation!"); 12564 unsigned SelOpc = getPseudoCMOVOpc(VT); 12565 X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc); 12566 assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!"); 12567 MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t1) 12568 .addReg(SrcReg).addReg(AccReg) 12569 .addImm(CC); 12570 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12571 } 12572 break; 12573 } 12574 } 12575 12576 // Copy AccPhyReg back from virtual register. 12577 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccPhyReg) 12578 .addReg(AccReg); 12579 12580 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 12581 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12582 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12583 MIB.addReg(t1); 12584 MIB.setMemRefs(MMOBegin, MMOEnd); 12585 12586 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 12587 12588 mainMBB->addSuccessor(origMainMBB); 12589 mainMBB->addSuccessor(sinkMBB); 12590 12591 // sinkMBB: 12592 sinkMBB->addLiveIn(AccPhyReg); 12593 12594 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12595 TII->get(TargetOpcode::COPY), DstReg) 12596 .addReg(AccPhyReg); 12597 12598 MI->eraseFromParent(); 12599 return sinkMBB; 12600} 12601 12602// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic 12603// instructions. They will be translated into a spin-loop or compare-exchange 12604// loop from 12605// 12606// ... 12607// dst = atomic-fetch-op MI.addr, MI.val 12608// ... 12609// 12610// to 12611// 12612// ... 12613// EAX = LOAD [MI.addr + 0] 12614// EDX = LOAD [MI.addr + 4] 12615// loop: 12616// EBX = OP MI.val.lo, EAX 12617// ECX = OP MI.val.hi, EDX 12618// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 12619// JNE loop 12620// sink: 12621// dst = EDX:EAX 12622// ... 12623MachineBasicBlock * 12624X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, 12625 MachineBasicBlock *MBB) const { 12626 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12627 DebugLoc DL = MI->getDebugLoc(); 12628 12629 MachineFunction *MF = MBB->getParent(); 12630 MachineRegisterInfo &MRI = MF->getRegInfo(); 12631 12632 const BasicBlock *BB = MBB->getBasicBlock(); 12633 MachineFunction::iterator I = MBB; 12634 ++I; 12635 12636 assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 && 12637 "Unexpected number of operands"); 12638 12639 assert(MI->hasOneMemOperand() && 12640 "Expected atomic-load-op32 to have one memoperand"); 12641 12642 // Memory Reference 12643 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12644 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12645 12646 unsigned DstLoReg, DstHiReg; 12647 unsigned SrcLoReg, SrcHiReg; 12648 unsigned MemOpndSlot; 12649 12650 unsigned CurOp = 0; 12651 12652 DstLoReg = MI->getOperand(CurOp++).getReg(); 12653 DstHiReg = MI->getOperand(CurOp++).getReg(); 12654 MemOpndSlot = CurOp; 12655 CurOp += X86::AddrNumOperands; 12656 SrcLoReg = MI->getOperand(CurOp++).getReg(); 12657 SrcHiReg = MI->getOperand(CurOp++).getReg(); 12658 12659 const TargetRegisterClass *RC = &X86::GR32RegClass; 12660 const TargetRegisterClass *RC8 = &X86::GR8RegClass; 12661 12662 unsigned LCMPXCHGOpc = X86::LCMPXCHG8B; 12663 unsigned LOADOpc = X86::MOV32rm; 12664 12665 // For the atomic load-arith operator, we generate 12666 // 12667 // thisMBB: 12668 // EAX = LOAD [MI.addr + 0] 12669 // EDX = LOAD [MI.addr + 4] 12670 // mainMBB: 12671 // EBX = OP MI.vallo, EAX 12672 // ECX = OP MI.valhi, EDX 12673 // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 12674 // JNE mainMBB 12675 // sinkMBB: 12676 12677 MachineBasicBlock *thisMBB = MBB; 12678 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 12679 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 12680 MF->insert(I, mainMBB); 12681 MF->insert(I, sinkMBB); 12682 12683 MachineInstrBuilder MIB; 12684 12685 // Transfer the remainder of BB and its successor edges to sinkMBB. 12686 sinkMBB->splice(sinkMBB->begin(), MBB, 12687 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12688 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 12689 12690 // thisMBB: 12691 // Lo 12692 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EAX); 12693 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12694 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12695 MIB.setMemRefs(MMOBegin, MMOEnd); 12696 // Hi 12697 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EDX); 12698 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 12699 if (i == X86::AddrDisp) 12700 MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) 12701 else 12702 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12703 } 12704 MIB.setMemRefs(MMOBegin, MMOEnd); 12705 12706 thisMBB->addSuccessor(mainMBB); 12707 12708 // mainMBB: 12709 MachineBasicBlock *origMainMBB = mainMBB; 12710 mainMBB->addLiveIn(X86::EAX); 12711 mainMBB->addLiveIn(X86::EDX); 12712 12713 // Copy EDX:EAX as they are used more than once. 12714 unsigned LoReg = MRI.createVirtualRegister(RC); 12715 unsigned HiReg = MRI.createVirtualRegister(RC); 12716 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), LoReg).addReg(X86::EAX); 12717 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), HiReg).addReg(X86::EDX); 12718 12719 unsigned t1L = MRI.createVirtualRegister(RC); 12720 unsigned t1H = MRI.createVirtualRegister(RC); 12721 12722 unsigned Opc = MI->getOpcode(); 12723 switch (Opc) { 12724 default: 12725 llvm_unreachable("Unhandled atomic-load-op6432 opcode!"); 12726 case X86::ATOMAND6432: 12727 case X86::ATOMOR6432: 12728 case X86::ATOMXOR6432: 12729 case X86::ATOMADD6432: 12730 case X86::ATOMSUB6432: { 12731 unsigned HiOpc; 12732 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12733 BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg).addReg(LoReg); 12734 BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg).addReg(HiReg); 12735 break; 12736 } 12737 case X86::ATOMNAND6432: { 12738 unsigned HiOpc, NOTOpc; 12739 unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc); 12740 unsigned t2L = MRI.createVirtualRegister(RC); 12741 unsigned t2H = MRI.createVirtualRegister(RC); 12742 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg).addReg(LoReg); 12743 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg).addReg(HiReg); 12744 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1L).addReg(t2L); 12745 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1H).addReg(t2H); 12746 break; 12747 } 12748 case X86::ATOMMAX6432: 12749 case X86::ATOMMIN6432: 12750 case X86::ATOMUMAX6432: 12751 case X86::ATOMUMIN6432: { 12752 unsigned HiOpc; 12753 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12754 unsigned cL = MRI.createVirtualRegister(RC8); 12755 unsigned cH = MRI.createVirtualRegister(RC8); 12756 unsigned cL32 = MRI.createVirtualRegister(RC); 12757 unsigned cH32 = MRI.createVirtualRegister(RC); 12758 unsigned cc = MRI.createVirtualRegister(RC); 12759 // cl := cmp src_lo, lo 12760 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 12761 .addReg(SrcLoReg).addReg(LoReg); 12762 BuildMI(mainMBB, DL, TII->get(LoOpc), cL); 12763 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL); 12764 // ch := cmp src_hi, hi 12765 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 12766 .addReg(SrcHiReg).addReg(HiReg); 12767 BuildMI(mainMBB, DL, TII->get(HiOpc), cH); 12768 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH); 12769 // cc := if (src_hi == hi) ? cl : ch; 12770 if (Subtarget->hasCMov()) { 12771 BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc) 12772 .addReg(cH32).addReg(cL32); 12773 } else { 12774 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc) 12775 .addReg(cH32).addReg(cL32) 12776 .addImm(X86::COND_E); 12777 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12778 } 12779 BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc); 12780 if (Subtarget->hasCMov()) { 12781 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1L) 12782 .addReg(SrcLoReg).addReg(LoReg); 12783 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1H) 12784 .addReg(SrcHiReg).addReg(HiReg); 12785 } else { 12786 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1L) 12787 .addReg(SrcLoReg).addReg(LoReg) 12788 .addImm(X86::COND_NE); 12789 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12790 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1H) 12791 .addReg(SrcHiReg).addReg(HiReg) 12792 .addImm(X86::COND_NE); 12793 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12794 } 12795 break; 12796 } 12797 case X86::ATOMSWAP6432: { 12798 unsigned HiOpc; 12799 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12800 BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg); 12801 BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg); 12802 break; 12803 } 12804 } 12805 12806 // Copy EDX:EAX back from HiReg:LoReg 12807 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(LoReg); 12808 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(HiReg); 12809 // Copy ECX:EBX from t1H:t1L 12810 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t1L); 12811 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t1H); 12812 12813 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 12814 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12815 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12816 MIB.setMemRefs(MMOBegin, MMOEnd); 12817 12818 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 12819 12820 mainMBB->addSuccessor(origMainMBB); 12821 mainMBB->addSuccessor(sinkMBB); 12822 12823 // sinkMBB: 12824 sinkMBB->addLiveIn(X86::EAX); 12825 sinkMBB->addLiveIn(X86::EDX); 12826 12827 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12828 TII->get(TargetOpcode::COPY), DstLoReg) 12829 .addReg(X86::EAX); 12830 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12831 TII->get(TargetOpcode::COPY), DstHiReg) 12832 .addReg(X86::EDX); 12833 12834 MI->eraseFromParent(); 12835 return sinkMBB; 12836} 12837 12838// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 12839// or XMM0_V32I8 in AVX all of this code can be replaced with that 12840// in the .td file. 12841MachineBasicBlock * 12842X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 12843 unsigned numArgs, bool memArg) const { 12844 assert(Subtarget->hasSSE42() && 12845 "Target must have SSE4.2 or AVX features enabled"); 12846 12847 DebugLoc dl = MI->getDebugLoc(); 12848 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12849 unsigned Opc; 12850 if (!Subtarget->hasAVX()) { 12851 if (memArg) 12852 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 12853 else 12854 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 12855 } else { 12856 if (memArg) 12857 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 12858 else 12859 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 12860 } 12861 12862 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 12863 for (unsigned i = 0; i < numArgs; ++i) { 12864 MachineOperand &Op = MI->getOperand(i+1); 12865 if (!(Op.isReg() && Op.isImplicit())) 12866 MIB.addOperand(Op); 12867 } 12868 BuildMI(*BB, MI, dl, 12869 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 12870 .addReg(X86::XMM0); 12871 12872 MI->eraseFromParent(); 12873 return BB; 12874} 12875 12876MachineBasicBlock * 12877X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 12878 DebugLoc dl = MI->getDebugLoc(); 12879 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12880 12881 // Address into RAX/EAX, other two args into ECX, EDX. 12882 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 12883 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 12884 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 12885 for (int i = 0; i < X86::AddrNumOperands; ++i) 12886 MIB.addOperand(MI->getOperand(i)); 12887 12888 unsigned ValOps = X86::AddrNumOperands; 12889 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 12890 .addReg(MI->getOperand(ValOps).getReg()); 12891 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 12892 .addReg(MI->getOperand(ValOps+1).getReg()); 12893 12894 // The instruction doesn't actually take any operands though. 12895 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 12896 12897 MI->eraseFromParent(); // The pseudo is gone now. 12898 return BB; 12899} 12900 12901MachineBasicBlock * 12902X86TargetLowering::EmitVAARG64WithCustomInserter( 12903 MachineInstr *MI, 12904 MachineBasicBlock *MBB) const { 12905 // Emit va_arg instruction on X86-64. 12906 12907 // Operands to this pseudo-instruction: 12908 // 0 ) Output : destination address (reg) 12909 // 1-5) Input : va_list address (addr, i64mem) 12910 // 6 ) ArgSize : Size (in bytes) of vararg type 12911 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 12912 // 8 ) Align : Alignment of type 12913 // 9 ) EFLAGS (implicit-def) 12914 12915 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 12916 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 12917 12918 unsigned DestReg = MI->getOperand(0).getReg(); 12919 MachineOperand &Base = MI->getOperand(1); 12920 MachineOperand &Scale = MI->getOperand(2); 12921 MachineOperand &Index = MI->getOperand(3); 12922 MachineOperand &Disp = MI->getOperand(4); 12923 MachineOperand &Segment = MI->getOperand(5); 12924 unsigned ArgSize = MI->getOperand(6).getImm(); 12925 unsigned ArgMode = MI->getOperand(7).getImm(); 12926 unsigned Align = MI->getOperand(8).getImm(); 12927 12928 // Memory Reference 12929 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 12930 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12931 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12932 12933 // Machine Information 12934 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12935 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 12936 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 12937 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 12938 DebugLoc DL = MI->getDebugLoc(); 12939 12940 // struct va_list { 12941 // i32 gp_offset 12942 // i32 fp_offset 12943 // i64 overflow_area (address) 12944 // i64 reg_save_area (address) 12945 // } 12946 // sizeof(va_list) = 24 12947 // alignment(va_list) = 8 12948 12949 unsigned TotalNumIntRegs = 6; 12950 unsigned TotalNumXMMRegs = 8; 12951 bool UseGPOffset = (ArgMode == 1); 12952 bool UseFPOffset = (ArgMode == 2); 12953 unsigned MaxOffset = TotalNumIntRegs * 8 + 12954 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 12955 12956 /* Align ArgSize to a multiple of 8 */ 12957 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 12958 bool NeedsAlign = (Align > 8); 12959 12960 MachineBasicBlock *thisMBB = MBB; 12961 MachineBasicBlock *overflowMBB; 12962 MachineBasicBlock *offsetMBB; 12963 MachineBasicBlock *endMBB; 12964 12965 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 12966 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 12967 unsigned OffsetReg = 0; 12968 12969 if (!UseGPOffset && !UseFPOffset) { 12970 // If we only pull from the overflow region, we don't create a branch. 12971 // We don't need to alter control flow. 12972 OffsetDestReg = 0; // unused 12973 OverflowDestReg = DestReg; 12974 12975 offsetMBB = NULL; 12976 overflowMBB = thisMBB; 12977 endMBB = thisMBB; 12978 } else { 12979 // First emit code to check if gp_offset (or fp_offset) is below the bound. 12980 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 12981 // If not, pull from overflow_area. (branch to overflowMBB) 12982 // 12983 // thisMBB 12984 // | . 12985 // | . 12986 // offsetMBB overflowMBB 12987 // | . 12988 // | . 12989 // endMBB 12990 12991 // Registers for the PHI in endMBB 12992 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 12993 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 12994 12995 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 12996 MachineFunction *MF = MBB->getParent(); 12997 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12998 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12999 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13000 13001 MachineFunction::iterator MBBIter = MBB; 13002 ++MBBIter; 13003 13004 // Insert the new basic blocks 13005 MF->insert(MBBIter, offsetMBB); 13006 MF->insert(MBBIter, overflowMBB); 13007 MF->insert(MBBIter, endMBB); 13008 13009 // Transfer the remainder of MBB and its successor edges to endMBB. 13010 endMBB->splice(endMBB->begin(), thisMBB, 13011 llvm::next(MachineBasicBlock::iterator(MI)), 13012 thisMBB->end()); 13013 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 13014 13015 // Make offsetMBB and overflowMBB successors of thisMBB 13016 thisMBB->addSuccessor(offsetMBB); 13017 thisMBB->addSuccessor(overflowMBB); 13018 13019 // endMBB is a successor of both offsetMBB and overflowMBB 13020 offsetMBB->addSuccessor(endMBB); 13021 overflowMBB->addSuccessor(endMBB); 13022 13023 // Load the offset value into a register 13024 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 13025 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 13026 .addOperand(Base) 13027 .addOperand(Scale) 13028 .addOperand(Index) 13029 .addDisp(Disp, UseFPOffset ? 4 : 0) 13030 .addOperand(Segment) 13031 .setMemRefs(MMOBegin, MMOEnd); 13032 13033 // Check if there is enough room left to pull this argument. 13034 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 13035 .addReg(OffsetReg) 13036 .addImm(MaxOffset + 8 - ArgSizeA8); 13037 13038 // Branch to "overflowMBB" if offset >= max 13039 // Fall through to "offsetMBB" otherwise 13040 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 13041 .addMBB(overflowMBB); 13042 } 13043 13044 // In offsetMBB, emit code to use the reg_save_area. 13045 if (offsetMBB) { 13046 assert(OffsetReg != 0); 13047 13048 // Read the reg_save_area address. 13049 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 13050 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 13051 .addOperand(Base) 13052 .addOperand(Scale) 13053 .addOperand(Index) 13054 .addDisp(Disp, 16) 13055 .addOperand(Segment) 13056 .setMemRefs(MMOBegin, MMOEnd); 13057 13058 // Zero-extend the offset 13059 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 13060 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 13061 .addImm(0) 13062 .addReg(OffsetReg) 13063 .addImm(X86::sub_32bit); 13064 13065 // Add the offset to the reg_save_area to get the final address. 13066 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 13067 .addReg(OffsetReg64) 13068 .addReg(RegSaveReg); 13069 13070 // Compute the offset for the next argument 13071 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 13072 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 13073 .addReg(OffsetReg) 13074 .addImm(UseFPOffset ? 16 : 8); 13075 13076 // Store it back into the va_list. 13077 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 13078 .addOperand(Base) 13079 .addOperand(Scale) 13080 .addOperand(Index) 13081 .addDisp(Disp, UseFPOffset ? 4 : 0) 13082 .addOperand(Segment) 13083 .addReg(NextOffsetReg) 13084 .setMemRefs(MMOBegin, MMOEnd); 13085 13086 // Jump to endMBB 13087 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 13088 .addMBB(endMBB); 13089 } 13090 13091 // 13092 // Emit code to use overflow area 13093 // 13094 13095 // Load the overflow_area address into a register. 13096 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 13097 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 13098 .addOperand(Base) 13099 .addOperand(Scale) 13100 .addOperand(Index) 13101 .addDisp(Disp, 8) 13102 .addOperand(Segment) 13103 .setMemRefs(MMOBegin, MMOEnd); 13104 13105 // If we need to align it, do so. Otherwise, just copy the address 13106 // to OverflowDestReg. 13107 if (NeedsAlign) { 13108 // Align the overflow address 13109 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 13110 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 13111 13112 // aligned_addr = (addr + (align-1)) & ~(align-1) 13113 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 13114 .addReg(OverflowAddrReg) 13115 .addImm(Align-1); 13116 13117 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 13118 .addReg(TmpReg) 13119 .addImm(~(uint64_t)(Align-1)); 13120 } else { 13121 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 13122 .addReg(OverflowAddrReg); 13123 } 13124 13125 // Compute the next overflow address after this argument. 13126 // (the overflow address should be kept 8-byte aligned) 13127 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 13128 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 13129 .addReg(OverflowDestReg) 13130 .addImm(ArgSizeA8); 13131 13132 // Store the new overflow address. 13133 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 13134 .addOperand(Base) 13135 .addOperand(Scale) 13136 .addOperand(Index) 13137 .addDisp(Disp, 8) 13138 .addOperand(Segment) 13139 .addReg(NextAddrReg) 13140 .setMemRefs(MMOBegin, MMOEnd); 13141 13142 // If we branched, emit the PHI to the front of endMBB. 13143 if (offsetMBB) { 13144 BuildMI(*endMBB, endMBB->begin(), DL, 13145 TII->get(X86::PHI), DestReg) 13146 .addReg(OffsetDestReg).addMBB(offsetMBB) 13147 .addReg(OverflowDestReg).addMBB(overflowMBB); 13148 } 13149 13150 // Erase the pseudo instruction 13151 MI->eraseFromParent(); 13152 13153 return endMBB; 13154} 13155 13156MachineBasicBlock * 13157X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 13158 MachineInstr *MI, 13159 MachineBasicBlock *MBB) const { 13160 // Emit code to save XMM registers to the stack. The ABI says that the 13161 // number of registers to save is given in %al, so it's theoretically 13162 // possible to do an indirect jump trick to avoid saving all of them, 13163 // however this code takes a simpler approach and just executes all 13164 // of the stores if %al is non-zero. It's less code, and it's probably 13165 // easier on the hardware branch predictor, and stores aren't all that 13166 // expensive anyway. 13167 13168 // Create the new basic blocks. One block contains all the XMM stores, 13169 // and one block is the final destination regardless of whether any 13170 // stores were performed. 13171 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 13172 MachineFunction *F = MBB->getParent(); 13173 MachineFunction::iterator MBBIter = MBB; 13174 ++MBBIter; 13175 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 13176 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 13177 F->insert(MBBIter, XMMSaveMBB); 13178 F->insert(MBBIter, EndMBB); 13179 13180 // Transfer the remainder of MBB and its successor edges to EndMBB. 13181 EndMBB->splice(EndMBB->begin(), MBB, 13182 llvm::next(MachineBasicBlock::iterator(MI)), 13183 MBB->end()); 13184 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 13185 13186 // The original block will now fall through to the XMM save block. 13187 MBB->addSuccessor(XMMSaveMBB); 13188 // The XMMSaveMBB will fall through to the end block. 13189 XMMSaveMBB->addSuccessor(EndMBB); 13190 13191 // Now add the instructions. 13192 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13193 DebugLoc DL = MI->getDebugLoc(); 13194 13195 unsigned CountReg = MI->getOperand(0).getReg(); 13196 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 13197 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 13198 13199 if (!Subtarget->isTargetWin64()) { 13200 // If %al is 0, branch around the XMM save block. 13201 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 13202 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 13203 MBB->addSuccessor(EndMBB); 13204 } 13205 13206 unsigned MOVOpc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; 13207 // In the XMM save block, save all the XMM argument registers. 13208 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 13209 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 13210 MachineMemOperand *MMO = 13211 F->getMachineMemOperand( 13212 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 13213 MachineMemOperand::MOStore, 13214 /*Size=*/16, /*Align=*/16); 13215 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 13216 .addFrameIndex(RegSaveFrameIndex) 13217 .addImm(/*Scale=*/1) 13218 .addReg(/*IndexReg=*/0) 13219 .addImm(/*Disp=*/Offset) 13220 .addReg(/*Segment=*/0) 13221 .addReg(MI->getOperand(i).getReg()) 13222 .addMemOperand(MMO); 13223 } 13224 13225 MI->eraseFromParent(); // The pseudo instruction is gone now. 13226 13227 return EndMBB; 13228} 13229 13230// The EFLAGS operand of SelectItr might be missing a kill marker 13231// because there were multiple uses of EFLAGS, and ISel didn't know 13232// which to mark. Figure out whether SelectItr should have had a 13233// kill marker, and set it if it should. Returns the correct kill 13234// marker value. 13235static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, 13236 MachineBasicBlock* BB, 13237 const TargetRegisterInfo* TRI) { 13238 // Scan forward through BB for a use/def of EFLAGS. 13239 MachineBasicBlock::iterator miI(llvm::next(SelectItr)); 13240 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 13241 const MachineInstr& mi = *miI; 13242 if (mi.readsRegister(X86::EFLAGS)) 13243 return false; 13244 if (mi.definesRegister(X86::EFLAGS)) 13245 break; // Should have kill-flag - update below. 13246 } 13247 13248 // If we hit the end of the block, check whether EFLAGS is live into a 13249 // successor. 13250 if (miI == BB->end()) { 13251 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 13252 sEnd = BB->succ_end(); 13253 sItr != sEnd; ++sItr) { 13254 MachineBasicBlock* succ = *sItr; 13255 if (succ->isLiveIn(X86::EFLAGS)) 13256 return false; 13257 } 13258 } 13259 13260 // We found a def, or hit the end of the basic block and EFLAGS wasn't live 13261 // out. SelectMI should have a kill flag on EFLAGS. 13262 SelectItr->addRegisterKilled(X86::EFLAGS, TRI); 13263 return true; 13264} 13265 13266MachineBasicBlock * 13267X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 13268 MachineBasicBlock *BB) const { 13269 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13270 DebugLoc DL = MI->getDebugLoc(); 13271 13272 // To "insert" a SELECT_CC instruction, we actually have to insert the 13273 // diamond control-flow pattern. The incoming instruction knows the 13274 // destination vreg to set, the condition code register to branch on, the 13275 // true/false values to select between, and a branch opcode to use. 13276 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 13277 MachineFunction::iterator It = BB; 13278 ++It; 13279 13280 // thisMBB: 13281 // ... 13282 // TrueVal = ... 13283 // cmpTY ccX, r1, r2 13284 // bCC copy1MBB 13285 // fallthrough --> copy0MBB 13286 MachineBasicBlock *thisMBB = BB; 13287 MachineFunction *F = BB->getParent(); 13288 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 13289 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 13290 F->insert(It, copy0MBB); 13291 F->insert(It, sinkMBB); 13292 13293 // If the EFLAGS register isn't dead in the terminator, then claim that it's 13294 // live into the sink and copy blocks. 13295 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 13296 if (!MI->killsRegister(X86::EFLAGS) && 13297 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) { 13298 copy0MBB->addLiveIn(X86::EFLAGS); 13299 sinkMBB->addLiveIn(X86::EFLAGS); 13300 } 13301 13302 // Transfer the remainder of BB and its successor edges to sinkMBB. 13303 sinkMBB->splice(sinkMBB->begin(), BB, 13304 llvm::next(MachineBasicBlock::iterator(MI)), 13305 BB->end()); 13306 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 13307 13308 // Add the true and fallthrough blocks as its successors. 13309 BB->addSuccessor(copy0MBB); 13310 BB->addSuccessor(sinkMBB); 13311 13312 // Create the conditional branch instruction. 13313 unsigned Opc = 13314 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 13315 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 13316 13317 // copy0MBB: 13318 // %FalseValue = ... 13319 // # fallthrough to sinkMBB 13320 copy0MBB->addSuccessor(sinkMBB); 13321 13322 // sinkMBB: 13323 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 13324 // ... 13325 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13326 TII->get(X86::PHI), MI->getOperand(0).getReg()) 13327 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 13328 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 13329 13330 MI->eraseFromParent(); // The pseudo instruction is gone now. 13331 return sinkMBB; 13332} 13333 13334MachineBasicBlock * 13335X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 13336 bool Is64Bit) const { 13337 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13338 DebugLoc DL = MI->getDebugLoc(); 13339 MachineFunction *MF = BB->getParent(); 13340 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 13341 13342 assert(getTargetMachine().Options.EnableSegmentedStacks); 13343 13344 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 13345 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 13346 13347 // BB: 13348 // ... [Till the alloca] 13349 // If stacklet is not large enough, jump to mallocMBB 13350 // 13351 // bumpMBB: 13352 // Allocate by subtracting from RSP 13353 // Jump to continueMBB 13354 // 13355 // mallocMBB: 13356 // Allocate by call to runtime 13357 // 13358 // continueMBB: 13359 // ... 13360 // [rest of original BB] 13361 // 13362 13363 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13364 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13365 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13366 13367 MachineRegisterInfo &MRI = MF->getRegInfo(); 13368 const TargetRegisterClass *AddrRegClass = 13369 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 13370 13371 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 13372 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 13373 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 13374 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 13375 sizeVReg = MI->getOperand(1).getReg(), 13376 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 13377 13378 MachineFunction::iterator MBBIter = BB; 13379 ++MBBIter; 13380 13381 MF->insert(MBBIter, bumpMBB); 13382 MF->insert(MBBIter, mallocMBB); 13383 MF->insert(MBBIter, continueMBB); 13384 13385 continueMBB->splice(continueMBB->begin(), BB, llvm::next 13386 (MachineBasicBlock::iterator(MI)), BB->end()); 13387 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 13388 13389 // Add code to the main basic block to check if the stack limit has been hit, 13390 // and if so, jump to mallocMBB otherwise to bumpMBB. 13391 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 13392 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 13393 .addReg(tmpSPVReg).addReg(sizeVReg); 13394 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 13395 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 13396 .addReg(SPLimitVReg); 13397 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 13398 13399 // bumpMBB simply decreases the stack pointer, since we know the current 13400 // stacklet has enough space. 13401 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 13402 .addReg(SPLimitVReg); 13403 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 13404 .addReg(SPLimitVReg); 13405 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 13406 13407 // Calls into a routine in libgcc to allocate more space from the heap. 13408 const uint32_t *RegMask = 13409 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 13410 if (Is64Bit) { 13411 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 13412 .addReg(sizeVReg); 13413 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 13414 .addExternalSymbol("__morestack_allocate_stack_space") 13415 .addRegMask(RegMask) 13416 .addReg(X86::RDI, RegState::Implicit) 13417 .addReg(X86::RAX, RegState::ImplicitDefine); 13418 } else { 13419 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 13420 .addImm(12); 13421 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 13422 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 13423 .addExternalSymbol("__morestack_allocate_stack_space") 13424 .addRegMask(RegMask) 13425 .addReg(X86::EAX, RegState::ImplicitDefine); 13426 } 13427 13428 if (!Is64Bit) 13429 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 13430 .addImm(16); 13431 13432 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 13433 .addReg(Is64Bit ? X86::RAX : X86::EAX); 13434 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 13435 13436 // Set up the CFG correctly. 13437 BB->addSuccessor(bumpMBB); 13438 BB->addSuccessor(mallocMBB); 13439 mallocMBB->addSuccessor(continueMBB); 13440 bumpMBB->addSuccessor(continueMBB); 13441 13442 // Take care of the PHI nodes. 13443 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 13444 MI->getOperand(0).getReg()) 13445 .addReg(mallocPtrVReg).addMBB(mallocMBB) 13446 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 13447 13448 // Delete the original pseudo instruction. 13449 MI->eraseFromParent(); 13450 13451 // And we're done. 13452 return continueMBB; 13453} 13454 13455MachineBasicBlock * 13456X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 13457 MachineBasicBlock *BB) const { 13458 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13459 DebugLoc DL = MI->getDebugLoc(); 13460 13461 assert(!Subtarget->isTargetEnvMacho()); 13462 13463 // The lowering is pretty easy: we're just emitting the call to _alloca. The 13464 // non-trivial part is impdef of ESP. 13465 13466 if (Subtarget->isTargetWin64()) { 13467 if (Subtarget->isTargetCygMing()) { 13468 // ___chkstk(Mingw64): 13469 // Clobbers R10, R11, RAX and EFLAGS. 13470 // Updates RSP. 13471 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 13472 .addExternalSymbol("___chkstk") 13473 .addReg(X86::RAX, RegState::Implicit) 13474 .addReg(X86::RSP, RegState::Implicit) 13475 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 13476 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 13477 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13478 } else { 13479 // __chkstk(MSVCRT): does not update stack pointer. 13480 // Clobbers R10, R11 and EFLAGS. 13481 // FIXME: RAX(allocated size) might be reused and not killed. 13482 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 13483 .addExternalSymbol("__chkstk") 13484 .addReg(X86::RAX, RegState::Implicit) 13485 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13486 // RAX has the offset to subtracted from RSP. 13487 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 13488 .addReg(X86::RSP) 13489 .addReg(X86::RAX); 13490 } 13491 } else { 13492 const char *StackProbeSymbol = 13493 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 13494 13495 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 13496 .addExternalSymbol(StackProbeSymbol) 13497 .addReg(X86::EAX, RegState::Implicit) 13498 .addReg(X86::ESP, RegState::Implicit) 13499 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 13500 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 13501 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13502 } 13503 13504 MI->eraseFromParent(); // The pseudo instruction is gone now. 13505 return BB; 13506} 13507 13508MachineBasicBlock * 13509X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 13510 MachineBasicBlock *BB) const { 13511 // This is pretty easy. We're taking the value that we received from 13512 // our load from the relocation, sticking it in either RDI (x86-64) 13513 // or EAX and doing an indirect call. The return value will then 13514 // be in the normal return register. 13515 const X86InstrInfo *TII 13516 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 13517 DebugLoc DL = MI->getDebugLoc(); 13518 MachineFunction *F = BB->getParent(); 13519 13520 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 13521 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 13522 13523 // Get a register mask for the lowered call. 13524 // FIXME: The 32-bit calls have non-standard calling conventions. Use a 13525 // proper register mask. 13526 const uint32_t *RegMask = 13527 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 13528 if (Subtarget->is64Bit()) { 13529 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13530 TII->get(X86::MOV64rm), X86::RDI) 13531 .addReg(X86::RIP) 13532 .addImm(0).addReg(0) 13533 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13534 MI->getOperand(3).getTargetFlags()) 13535 .addReg(0); 13536 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 13537 addDirectMem(MIB, X86::RDI); 13538 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); 13539 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 13540 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13541 TII->get(X86::MOV32rm), X86::EAX) 13542 .addReg(0) 13543 .addImm(0).addReg(0) 13544 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13545 MI->getOperand(3).getTargetFlags()) 13546 .addReg(0); 13547 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 13548 addDirectMem(MIB, X86::EAX); 13549 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 13550 } else { 13551 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13552 TII->get(X86::MOV32rm), X86::EAX) 13553 .addReg(TII->getGlobalBaseReg(F)) 13554 .addImm(0).addReg(0) 13555 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13556 MI->getOperand(3).getTargetFlags()) 13557 .addReg(0); 13558 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 13559 addDirectMem(MIB, X86::EAX); 13560 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 13561 } 13562 13563 MI->eraseFromParent(); // The pseudo instruction is gone now. 13564 return BB; 13565} 13566 13567MachineBasicBlock * 13568X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 13569 MachineBasicBlock *MBB) const { 13570 DebugLoc DL = MI->getDebugLoc(); 13571 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13572 13573 MachineFunction *MF = MBB->getParent(); 13574 MachineRegisterInfo &MRI = MF->getRegInfo(); 13575 13576 const BasicBlock *BB = MBB->getBasicBlock(); 13577 MachineFunction::iterator I = MBB; 13578 ++I; 13579 13580 // Memory Reference 13581 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13582 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13583 13584 unsigned DstReg; 13585 unsigned MemOpndSlot = 0; 13586 13587 unsigned CurOp = 0; 13588 13589 DstReg = MI->getOperand(CurOp++).getReg(); 13590 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 13591 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 13592 unsigned mainDstReg = MRI.createVirtualRegister(RC); 13593 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 13594 13595 MemOpndSlot = CurOp; 13596 13597 MVT PVT = getPointerTy(); 13598 assert((PVT == MVT::i64 || PVT == MVT::i32) && 13599 "Invalid Pointer Size!"); 13600 13601 // For v = setjmp(buf), we generate 13602 // 13603 // thisMBB: 13604 // buf[LabelOffset] = restoreMBB 13605 // SjLjSetup restoreMBB 13606 // 13607 // mainMBB: 13608 // v_main = 0 13609 // 13610 // sinkMBB: 13611 // v = phi(main, restore) 13612 // 13613 // restoreMBB: 13614 // v_restore = 1 13615 13616 MachineBasicBlock *thisMBB = MBB; 13617 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13618 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13619 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); 13620 MF->insert(I, mainMBB); 13621 MF->insert(I, sinkMBB); 13622 MF->push_back(restoreMBB); 13623 13624 MachineInstrBuilder MIB; 13625 13626 // Transfer the remainder of BB and its successor edges to sinkMBB. 13627 sinkMBB->splice(sinkMBB->begin(), MBB, 13628 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13629 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13630 13631 // thisMBB: 13632 unsigned PtrStoreOpc = 0; 13633 unsigned LabelReg = 0; 13634 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 13635 Reloc::Model RM = getTargetMachine().getRelocationModel(); 13636 bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) && 13637 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC); 13638 13639 // Prepare IP either in reg or imm. 13640 if (!UseImmLabel) { 13641 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; 13642 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 13643 LabelReg = MRI.createVirtualRegister(PtrRC); 13644 if (Subtarget->is64Bit()) { 13645 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg) 13646 .addReg(X86::RIP) 13647 .addImm(0) 13648 .addReg(0) 13649 .addMBB(restoreMBB) 13650 .addReg(0); 13651 } else { 13652 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII); 13653 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg) 13654 .addReg(XII->getGlobalBaseReg(MF)) 13655 .addImm(0) 13656 .addReg(0) 13657 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference()) 13658 .addReg(0); 13659 } 13660 } else 13661 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; 13662 // Store IP 13663 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc)); 13664 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13665 if (i == X86::AddrDisp) 13666 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset); 13667 else 13668 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 13669 } 13670 if (!UseImmLabel) 13671 MIB.addReg(LabelReg); 13672 else 13673 MIB.addMBB(restoreMBB); 13674 MIB.setMemRefs(MMOBegin, MMOEnd); 13675 // Setup 13676 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) 13677 .addMBB(restoreMBB); 13678 MIB.addRegMask(RegInfo->getNoPreservedMask()); 13679 thisMBB->addSuccessor(mainMBB); 13680 thisMBB->addSuccessor(restoreMBB); 13681 13682 // mainMBB: 13683 // EAX = 0 13684 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg); 13685 mainMBB->addSuccessor(sinkMBB); 13686 13687 // sinkMBB: 13688 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13689 TII->get(X86::PHI), DstReg) 13690 .addReg(mainDstReg).addMBB(mainMBB) 13691 .addReg(restoreDstReg).addMBB(restoreMBB); 13692 13693 // restoreMBB: 13694 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); 13695 BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB); 13696 restoreMBB->addSuccessor(sinkMBB); 13697 13698 MI->eraseFromParent(); 13699 return sinkMBB; 13700} 13701 13702MachineBasicBlock * 13703X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 13704 MachineBasicBlock *MBB) const { 13705 DebugLoc DL = MI->getDebugLoc(); 13706 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13707 13708 MachineFunction *MF = MBB->getParent(); 13709 MachineRegisterInfo &MRI = MF->getRegInfo(); 13710 13711 // Memory Reference 13712 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13713 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13714 13715 MVT PVT = getPointerTy(); 13716 assert((PVT == MVT::i64 || PVT == MVT::i32) && 13717 "Invalid Pointer Size!"); 13718 13719 const TargetRegisterClass *RC = 13720 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; 13721 unsigned Tmp = MRI.createVirtualRegister(RC); 13722 // Since FP is only updated here but NOT referenced, it's treated as GPR. 13723 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; 13724 unsigned SP = RegInfo->getStackRegister(); 13725 13726 MachineInstrBuilder MIB; 13727 13728 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 13729 const int64_t SPOffset = 2 * PVT.getStoreSize(); 13730 13731 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; 13732 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r; 13733 13734 // Reload FP 13735 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); 13736 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 13737 MIB.addOperand(MI->getOperand(i)); 13738 MIB.setMemRefs(MMOBegin, MMOEnd); 13739 // Reload IP 13740 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); 13741 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13742 if (i == X86::AddrDisp) 13743 MIB.addDisp(MI->getOperand(i), LabelOffset); 13744 else 13745 MIB.addOperand(MI->getOperand(i)); 13746 } 13747 MIB.setMemRefs(MMOBegin, MMOEnd); 13748 // Reload SP 13749 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP); 13750 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13751 if (i == X86::AddrDisp) 13752 MIB.addDisp(MI->getOperand(i), SPOffset); 13753 else 13754 MIB.addOperand(MI->getOperand(i)); 13755 } 13756 MIB.setMemRefs(MMOBegin, MMOEnd); 13757 // Jump 13758 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); 13759 13760 MI->eraseFromParent(); 13761 return MBB; 13762} 13763 13764MachineBasicBlock * 13765X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 13766 MachineBasicBlock *BB) const { 13767 switch (MI->getOpcode()) { 13768 default: llvm_unreachable("Unexpected instr type to insert"); 13769 case X86::TAILJMPd64: 13770 case X86::TAILJMPr64: 13771 case X86::TAILJMPm64: 13772 llvm_unreachable("TAILJMP64 would not be touched here."); 13773 case X86::TCRETURNdi64: 13774 case X86::TCRETURNri64: 13775 case X86::TCRETURNmi64: 13776 return BB; 13777 case X86::WIN_ALLOCA: 13778 return EmitLoweredWinAlloca(MI, BB); 13779 case X86::SEG_ALLOCA_32: 13780 return EmitLoweredSegAlloca(MI, BB, false); 13781 case X86::SEG_ALLOCA_64: 13782 return EmitLoweredSegAlloca(MI, BB, true); 13783 case X86::TLSCall_32: 13784 case X86::TLSCall_64: 13785 return EmitLoweredTLSCall(MI, BB); 13786 case X86::CMOV_GR8: 13787 case X86::CMOV_FR32: 13788 case X86::CMOV_FR64: 13789 case X86::CMOV_V4F32: 13790 case X86::CMOV_V2F64: 13791 case X86::CMOV_V2I64: 13792 case X86::CMOV_V8F32: 13793 case X86::CMOV_V4F64: 13794 case X86::CMOV_V4I64: 13795 case X86::CMOV_GR16: 13796 case X86::CMOV_GR32: 13797 case X86::CMOV_RFP32: 13798 case X86::CMOV_RFP64: 13799 case X86::CMOV_RFP80: 13800 return EmitLoweredSelect(MI, BB); 13801 13802 case X86::FP32_TO_INT16_IN_MEM: 13803 case X86::FP32_TO_INT32_IN_MEM: 13804 case X86::FP32_TO_INT64_IN_MEM: 13805 case X86::FP64_TO_INT16_IN_MEM: 13806 case X86::FP64_TO_INT32_IN_MEM: 13807 case X86::FP64_TO_INT64_IN_MEM: 13808 case X86::FP80_TO_INT16_IN_MEM: 13809 case X86::FP80_TO_INT32_IN_MEM: 13810 case X86::FP80_TO_INT64_IN_MEM: { 13811 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13812 DebugLoc DL = MI->getDebugLoc(); 13813 13814 // Change the floating point control register to use "round towards zero" 13815 // mode when truncating to an integer value. 13816 MachineFunction *F = BB->getParent(); 13817 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 13818 addFrameReference(BuildMI(*BB, MI, DL, 13819 TII->get(X86::FNSTCW16m)), CWFrameIdx); 13820 13821 // Load the old value of the high byte of the control word... 13822 unsigned OldCW = 13823 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); 13824 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 13825 CWFrameIdx); 13826 13827 // Set the high part to be round to zero... 13828 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 13829 .addImm(0xC7F); 13830 13831 // Reload the modified control word now... 13832 addFrameReference(BuildMI(*BB, MI, DL, 13833 TII->get(X86::FLDCW16m)), CWFrameIdx); 13834 13835 // Restore the memory image of control word to original value 13836 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 13837 .addReg(OldCW); 13838 13839 // Get the X86 opcode to use. 13840 unsigned Opc; 13841 switch (MI->getOpcode()) { 13842 default: llvm_unreachable("illegal opcode!"); 13843 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 13844 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 13845 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 13846 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 13847 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 13848 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 13849 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 13850 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 13851 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 13852 } 13853 13854 X86AddressMode AM; 13855 MachineOperand &Op = MI->getOperand(0); 13856 if (Op.isReg()) { 13857 AM.BaseType = X86AddressMode::RegBase; 13858 AM.Base.Reg = Op.getReg(); 13859 } else { 13860 AM.BaseType = X86AddressMode::FrameIndexBase; 13861 AM.Base.FrameIndex = Op.getIndex(); 13862 } 13863 Op = MI->getOperand(1); 13864 if (Op.isImm()) 13865 AM.Scale = Op.getImm(); 13866 Op = MI->getOperand(2); 13867 if (Op.isImm()) 13868 AM.IndexReg = Op.getImm(); 13869 Op = MI->getOperand(3); 13870 if (Op.isGlobal()) { 13871 AM.GV = Op.getGlobal(); 13872 } else { 13873 AM.Disp = Op.getImm(); 13874 } 13875 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 13876 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 13877 13878 // Reload the original control word now. 13879 addFrameReference(BuildMI(*BB, MI, DL, 13880 TII->get(X86::FLDCW16m)), CWFrameIdx); 13881 13882 MI->eraseFromParent(); // The pseudo instruction is gone now. 13883 return BB; 13884 } 13885 // String/text processing lowering. 13886 case X86::PCMPISTRM128REG: 13887 case X86::VPCMPISTRM128REG: 13888 case X86::PCMPISTRM128MEM: 13889 case X86::VPCMPISTRM128MEM: 13890 case X86::PCMPESTRM128REG: 13891 case X86::VPCMPESTRM128REG: 13892 case X86::PCMPESTRM128MEM: 13893 case X86::VPCMPESTRM128MEM: { 13894 unsigned NumArgs; 13895 bool MemArg; 13896 switch (MI->getOpcode()) { 13897 default: llvm_unreachable("illegal opcode!"); 13898 case X86::PCMPISTRM128REG: 13899 case X86::VPCMPISTRM128REG: 13900 NumArgs = 3; MemArg = false; break; 13901 case X86::PCMPISTRM128MEM: 13902 case X86::VPCMPISTRM128MEM: 13903 NumArgs = 3; MemArg = true; break; 13904 case X86::PCMPESTRM128REG: 13905 case X86::VPCMPESTRM128REG: 13906 NumArgs = 5; MemArg = false; break; 13907 case X86::PCMPESTRM128MEM: 13908 case X86::VPCMPESTRM128MEM: 13909 NumArgs = 5; MemArg = true; break; 13910 } 13911 return EmitPCMP(MI, BB, NumArgs, MemArg); 13912 } 13913 13914 // Thread synchronization. 13915 case X86::MONITOR: 13916 return EmitMonitor(MI, BB); 13917 13918 // xbegin 13919 case X86::XBEGIN: 13920 return EmitXBegin(MI, BB); 13921 13922 // Atomic Lowering. 13923 case X86::ATOMAND8: 13924 case X86::ATOMAND16: 13925 case X86::ATOMAND32: 13926 case X86::ATOMAND64: 13927 // Fall through 13928 case X86::ATOMOR8: 13929 case X86::ATOMOR16: 13930 case X86::ATOMOR32: 13931 case X86::ATOMOR64: 13932 // Fall through 13933 case X86::ATOMXOR16: 13934 case X86::ATOMXOR8: 13935 case X86::ATOMXOR32: 13936 case X86::ATOMXOR64: 13937 // Fall through 13938 case X86::ATOMNAND8: 13939 case X86::ATOMNAND16: 13940 case X86::ATOMNAND32: 13941 case X86::ATOMNAND64: 13942 // Fall through 13943 case X86::ATOMMAX8: 13944 case X86::ATOMMAX16: 13945 case X86::ATOMMAX32: 13946 case X86::ATOMMAX64: 13947 // Fall through 13948 case X86::ATOMMIN8: 13949 case X86::ATOMMIN16: 13950 case X86::ATOMMIN32: 13951 case X86::ATOMMIN64: 13952 // Fall through 13953 case X86::ATOMUMAX8: 13954 case X86::ATOMUMAX16: 13955 case X86::ATOMUMAX32: 13956 case X86::ATOMUMAX64: 13957 // Fall through 13958 case X86::ATOMUMIN8: 13959 case X86::ATOMUMIN16: 13960 case X86::ATOMUMIN32: 13961 case X86::ATOMUMIN64: 13962 return EmitAtomicLoadArith(MI, BB); 13963 13964 // This group does 64-bit operations on a 32-bit host. 13965 case X86::ATOMAND6432: 13966 case X86::ATOMOR6432: 13967 case X86::ATOMXOR6432: 13968 case X86::ATOMNAND6432: 13969 case X86::ATOMADD6432: 13970 case X86::ATOMSUB6432: 13971 case X86::ATOMMAX6432: 13972 case X86::ATOMMIN6432: 13973 case X86::ATOMUMAX6432: 13974 case X86::ATOMUMIN6432: 13975 case X86::ATOMSWAP6432: 13976 return EmitAtomicLoadArith6432(MI, BB); 13977 13978 case X86::VASTART_SAVE_XMM_REGS: 13979 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 13980 13981 case X86::VAARG_64: 13982 return EmitVAARG64WithCustomInserter(MI, BB); 13983 13984 case X86::EH_SjLj_SetJmp32: 13985 case X86::EH_SjLj_SetJmp64: 13986 return emitEHSjLjSetJmp(MI, BB); 13987 13988 case X86::EH_SjLj_LongJmp32: 13989 case X86::EH_SjLj_LongJmp64: 13990 return emitEHSjLjLongJmp(MI, BB); 13991 } 13992} 13993 13994//===----------------------------------------------------------------------===// 13995// X86 Optimization Hooks 13996//===----------------------------------------------------------------------===// 13997 13998void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 13999 APInt &KnownZero, 14000 APInt &KnownOne, 14001 const SelectionDAG &DAG, 14002 unsigned Depth) const { 14003 unsigned BitWidth = KnownZero.getBitWidth(); 14004 unsigned Opc = Op.getOpcode(); 14005 assert((Opc >= ISD::BUILTIN_OP_END || 14006 Opc == ISD::INTRINSIC_WO_CHAIN || 14007 Opc == ISD::INTRINSIC_W_CHAIN || 14008 Opc == ISD::INTRINSIC_VOID) && 14009 "Should use MaskedValueIsZero if you don't know whether Op" 14010 " is a target node!"); 14011 14012 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 14013 switch (Opc) { 14014 default: break; 14015 case X86ISD::ADD: 14016 case X86ISD::SUB: 14017 case X86ISD::ADC: 14018 case X86ISD::SBB: 14019 case X86ISD::SMUL: 14020 case X86ISD::UMUL: 14021 case X86ISD::INC: 14022 case X86ISD::DEC: 14023 case X86ISD::OR: 14024 case X86ISD::XOR: 14025 case X86ISD::AND: 14026 // These nodes' second result is a boolean. 14027 if (Op.getResNo() == 0) 14028 break; 14029 // Fallthrough 14030 case X86ISD::SETCC: 14031 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 14032 break; 14033 case ISD::INTRINSIC_WO_CHAIN: { 14034 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14035 unsigned NumLoBits = 0; 14036 switch (IntId) { 14037 default: break; 14038 case Intrinsic::x86_sse_movmsk_ps: 14039 case Intrinsic::x86_avx_movmsk_ps_256: 14040 case Intrinsic::x86_sse2_movmsk_pd: 14041 case Intrinsic::x86_avx_movmsk_pd_256: 14042 case Intrinsic::x86_mmx_pmovmskb: 14043 case Intrinsic::x86_sse2_pmovmskb_128: 14044 case Intrinsic::x86_avx2_pmovmskb: { 14045 // High bits of movmskp{s|d}, pmovmskb are known zero. 14046 switch (IntId) { 14047 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 14048 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 14049 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 14050 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 14051 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 14052 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 14053 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 14054 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 14055 } 14056 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits); 14057 break; 14058 } 14059 } 14060 break; 14061 } 14062 } 14063} 14064 14065unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 14066 unsigned Depth) const { 14067 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 14068 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 14069 return Op.getValueType().getScalarType().getSizeInBits(); 14070 14071 // Fallback case. 14072 return 1; 14073} 14074 14075/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 14076/// node is a GlobalAddress + offset. 14077bool X86TargetLowering::isGAPlusOffset(SDNode *N, 14078 const GlobalValue* &GA, 14079 int64_t &Offset) const { 14080 if (N->getOpcode() == X86ISD::Wrapper) { 14081 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 14082 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 14083 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 14084 return true; 14085 } 14086 } 14087 return TargetLowering::isGAPlusOffset(N, GA, Offset); 14088} 14089 14090/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 14091/// same as extracting the high 128-bit part of 256-bit vector and then 14092/// inserting the result into the low part of a new 256-bit vector 14093static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 14094 EVT VT = SVOp->getValueType(0); 14095 unsigned NumElems = VT.getVectorNumElements(); 14096 14097 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 14098 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) 14099 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 14100 SVOp->getMaskElt(j) >= 0) 14101 return false; 14102 14103 return true; 14104} 14105 14106/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 14107/// same as extracting the low 128-bit part of 256-bit vector and then 14108/// inserting the result into the high part of a new 256-bit vector 14109static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 14110 EVT VT = SVOp->getValueType(0); 14111 unsigned NumElems = VT.getVectorNumElements(); 14112 14113 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 14114 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) 14115 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 14116 SVOp->getMaskElt(j) >= 0) 14117 return false; 14118 14119 return true; 14120} 14121 14122/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 14123static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 14124 TargetLowering::DAGCombinerInfo &DCI, 14125 const X86Subtarget* Subtarget) { 14126 DebugLoc dl = N->getDebugLoc(); 14127 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 14128 SDValue V1 = SVOp->getOperand(0); 14129 SDValue V2 = SVOp->getOperand(1); 14130 EVT VT = SVOp->getValueType(0); 14131 unsigned NumElems = VT.getVectorNumElements(); 14132 14133 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 14134 V2.getOpcode() == ISD::CONCAT_VECTORS) { 14135 // 14136 // 0,0,0,... 14137 // | 14138 // V UNDEF BUILD_VECTOR UNDEF 14139 // \ / \ / 14140 // CONCAT_VECTOR CONCAT_VECTOR 14141 // \ / 14142 // \ / 14143 // RESULT: V + zero extended 14144 // 14145 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 14146 V2.getOperand(1).getOpcode() != ISD::UNDEF || 14147 V1.getOperand(1).getOpcode() != ISD::UNDEF) 14148 return SDValue(); 14149 14150 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 14151 return SDValue(); 14152 14153 // To match the shuffle mask, the first half of the mask should 14154 // be exactly the first vector, and all the rest a splat with the 14155 // first element of the second one. 14156 for (unsigned i = 0; i != NumElems/2; ++i) 14157 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 14158 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 14159 return SDValue(); 14160 14161 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 14162 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 14163 if (Ld->hasNUsesOfValue(1, 0)) { 14164 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 14165 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 14166 SDValue ResNode = 14167 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, 14168 Ld->getMemoryVT(), 14169 Ld->getPointerInfo(), 14170 Ld->getAlignment(), 14171 false/*isVolatile*/, true/*ReadMem*/, 14172 false/*WriteMem*/); 14173 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 14174 } 14175 } 14176 14177 // Emit a zeroed vector and insert the desired subvector on its 14178 // first half. 14179 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 14180 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); 14181 return DCI.CombineTo(N, InsV); 14182 } 14183 14184 //===--------------------------------------------------------------------===// 14185 // Combine some shuffles into subvector extracts and inserts: 14186 // 14187 14188 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 14189 if (isShuffleHigh128VectorInsertLow(SVOp)) { 14190 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); 14191 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); 14192 return DCI.CombineTo(N, InsV); 14193 } 14194 14195 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 14196 if (isShuffleLow128VectorInsertHigh(SVOp)) { 14197 SDValue V = Extract128BitVector(V1, 0, DAG, dl); 14198 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); 14199 return DCI.CombineTo(N, InsV); 14200 } 14201 14202 return SDValue(); 14203} 14204 14205/// PerformShuffleCombine - Performs several different shuffle combines. 14206static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 14207 TargetLowering::DAGCombinerInfo &DCI, 14208 const X86Subtarget *Subtarget) { 14209 DebugLoc dl = N->getDebugLoc(); 14210 EVT VT = N->getValueType(0); 14211 14212 // Don't create instructions with illegal types after legalize types has run. 14213 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14214 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 14215 return SDValue(); 14216 14217 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 14218 if (Subtarget->hasAVX() && VT.is256BitVector() && 14219 N->getOpcode() == ISD::VECTOR_SHUFFLE) 14220 return PerformShuffleCombine256(N, DAG, DCI, Subtarget); 14221 14222 // Only handle 128 wide vector from here on. 14223 if (!VT.is128BitVector()) 14224 return SDValue(); 14225 14226 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 14227 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 14228 // consecutive, non-overlapping, and in the right order. 14229 SmallVector<SDValue, 16> Elts; 14230 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 14231 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 14232 14233 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 14234} 14235 14236 14237/// PerformTruncateCombine - Converts truncate operation to 14238/// a sequence of vector shuffle operations. 14239/// It is possible when we truncate 256-bit vector to 128-bit vector 14240static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 14241 TargetLowering::DAGCombinerInfo &DCI, 14242 const X86Subtarget *Subtarget) { 14243 if (!DCI.isBeforeLegalizeOps()) 14244 return SDValue(); 14245 14246 if (!Subtarget->hasAVX()) 14247 return SDValue(); 14248 14249 EVT VT = N->getValueType(0); 14250 SDValue Op = N->getOperand(0); 14251 EVT OpVT = Op.getValueType(); 14252 DebugLoc dl = N->getDebugLoc(); 14253 14254 if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) { 14255 14256 if (Subtarget->hasAVX2()) { 14257 // AVX2: v4i64 -> v4i32 14258 14259 // VPERMD 14260 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; 14261 14262 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v8i32, Op); 14263 Op = DAG.getVectorShuffle(MVT::v8i32, dl, Op, DAG.getUNDEF(MVT::v8i32), 14264 ShufMask); 14265 14266 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op, 14267 DAG.getIntPtrConstant(0)); 14268 } 14269 14270 // AVX: v4i64 -> v4i32 14271 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 14272 DAG.getIntPtrConstant(0)); 14273 14274 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 14275 DAG.getIntPtrConstant(2)); 14276 14277 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 14278 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 14279 14280 // PSHUFD 14281 static const int ShufMask1[] = {0, 2, 0, 0}; 14282 14283 SDValue Undef = DAG.getUNDEF(VT); 14284 OpLo = DAG.getVectorShuffle(VT, dl, OpLo, Undef, ShufMask1); 14285 OpHi = DAG.getVectorShuffle(VT, dl, OpHi, Undef, ShufMask1); 14286 14287 // MOVLHPS 14288 static const int ShufMask2[] = {0, 1, 4, 5}; 14289 14290 return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2); 14291 } 14292 14293 if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) { 14294 14295 if (Subtarget->hasAVX2()) { 14296 // AVX2: v8i32 -> v8i16 14297 14298 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v32i8, Op); 14299 14300 // PSHUFB 14301 SmallVector<SDValue,32> pshufbMask; 14302 for (unsigned i = 0; i < 2; ++i) { 14303 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); 14304 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); 14305 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); 14306 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); 14307 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); 14308 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); 14309 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); 14310 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); 14311 for (unsigned j = 0; j < 8; ++j) 14312 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 14313 } 14314 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8, 14315 &pshufbMask[0], 32); 14316 Op = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, Op, BV); 14317 14318 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i64, Op); 14319 14320 static const int ShufMask[] = {0, 2, -1, -1}; 14321 Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64), 14322 &ShufMask[0]); 14323 14324 Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 14325 DAG.getIntPtrConstant(0)); 14326 14327 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 14328 } 14329 14330 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 14331 DAG.getIntPtrConstant(0)); 14332 14333 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 14334 DAG.getIntPtrConstant(4)); 14335 14336 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo); 14337 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi); 14338 14339 // PSHUFB 14340 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, 14341 -1, -1, -1, -1, -1, -1, -1, -1}; 14342 14343 SDValue Undef = DAG.getUNDEF(MVT::v16i8); 14344 OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, Undef, ShufMask1); 14345 OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi, Undef, ShufMask1); 14346 14347 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 14348 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 14349 14350 // MOVLHPS 14351 static const int ShufMask2[] = {0, 1, 4, 5}; 14352 14353 SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2); 14354 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res); 14355 } 14356 14357 return SDValue(); 14358} 14359 14360/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target 14361/// specific shuffle of a load can be folded into a single element load. 14362/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but 14363/// shuffles have been customed lowered so we need to handle those here. 14364static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, 14365 TargetLowering::DAGCombinerInfo &DCI) { 14366 if (DCI.isBeforeLegalizeOps()) 14367 return SDValue(); 14368 14369 SDValue InVec = N->getOperand(0); 14370 SDValue EltNo = N->getOperand(1); 14371 14372 if (!isa<ConstantSDNode>(EltNo)) 14373 return SDValue(); 14374 14375 EVT VT = InVec.getValueType(); 14376 14377 bool HasShuffleIntoBitcast = false; 14378 if (InVec.getOpcode() == ISD::BITCAST) { 14379 // Don't duplicate a load with other uses. 14380 if (!InVec.hasOneUse()) 14381 return SDValue(); 14382 EVT BCVT = InVec.getOperand(0).getValueType(); 14383 if (BCVT.getVectorNumElements() != VT.getVectorNumElements()) 14384 return SDValue(); 14385 InVec = InVec.getOperand(0); 14386 HasShuffleIntoBitcast = true; 14387 } 14388 14389 if (!isTargetShuffle(InVec.getOpcode())) 14390 return SDValue(); 14391 14392 // Don't duplicate a load with other uses. 14393 if (!InVec.hasOneUse()) 14394 return SDValue(); 14395 14396 SmallVector<int, 16> ShuffleMask; 14397 bool UnaryShuffle; 14398 if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, 14399 UnaryShuffle)) 14400 return SDValue(); 14401 14402 // Select the input vector, guarding against out of range extract vector. 14403 unsigned NumElems = VT.getVectorNumElements(); 14404 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 14405 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt]; 14406 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0) 14407 : InVec.getOperand(1); 14408 14409 // If inputs to shuffle are the same for both ops, then allow 2 uses 14410 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1; 14411 14412 if (LdNode.getOpcode() == ISD::BITCAST) { 14413 // Don't duplicate a load with other uses. 14414 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) 14415 return SDValue(); 14416 14417 AllowedUses = 1; // only allow 1 load use if we have a bitcast 14418 LdNode = LdNode.getOperand(0); 14419 } 14420 14421 if (!ISD::isNormalLoad(LdNode.getNode())) 14422 return SDValue(); 14423 14424 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); 14425 14426 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) 14427 return SDValue(); 14428 14429 if (HasShuffleIntoBitcast) { 14430 // If there's a bitcast before the shuffle, check if the load type and 14431 // alignment is valid. 14432 unsigned Align = LN0->getAlignment(); 14433 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14434 unsigned NewAlign = TLI.getDataLayout()-> 14435 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 14436 14437 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 14438 return SDValue(); 14439 } 14440 14441 // All checks match so transform back to vector_shuffle so that DAG combiner 14442 // can finish the job 14443 DebugLoc dl = N->getDebugLoc(); 14444 14445 // Create shuffle node taking into account the case that its a unary shuffle 14446 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); 14447 Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl, 14448 InVec.getOperand(0), Shuffle, 14449 &ShuffleMask[0]); 14450 Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 14451 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, 14452 EltNo); 14453} 14454 14455/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 14456/// generation and convert it from being a bunch of shuffles and extracts 14457/// to a simple store and scalar loads to extract the elements. 14458static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 14459 TargetLowering::DAGCombinerInfo &DCI) { 14460 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI); 14461 if (NewOp.getNode()) 14462 return NewOp; 14463 14464 SDValue InputVector = N->getOperand(0); 14465 // Detect whether we are trying to convert from mmx to i32 and the bitcast 14466 // from mmx to v2i32 has a single usage. 14467 if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST && 14468 InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx && 14469 InputVector.hasOneUse() && N->getValueType(0) == MVT::i32) 14470 return DAG.getNode(X86ISD::MMX_MOVD2W, InputVector.getDebugLoc(), 14471 N->getValueType(0), 14472 InputVector.getNode()->getOperand(0)); 14473 14474 // Only operate on vectors of 4 elements, where the alternative shuffling 14475 // gets to be more expensive. 14476 if (InputVector.getValueType() != MVT::v4i32) 14477 return SDValue(); 14478 14479 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 14480 // single use which is a sign-extend or zero-extend, and all elements are 14481 // used. 14482 SmallVector<SDNode *, 4> Uses; 14483 unsigned ExtractedElements = 0; 14484 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 14485 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 14486 if (UI.getUse().getResNo() != InputVector.getResNo()) 14487 return SDValue(); 14488 14489 SDNode *Extract = *UI; 14490 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 14491 return SDValue(); 14492 14493 if (Extract->getValueType(0) != MVT::i32) 14494 return SDValue(); 14495 if (!Extract->hasOneUse()) 14496 return SDValue(); 14497 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 14498 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 14499 return SDValue(); 14500 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 14501 return SDValue(); 14502 14503 // Record which element was extracted. 14504 ExtractedElements |= 14505 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 14506 14507 Uses.push_back(Extract); 14508 } 14509 14510 // If not all the elements were used, this may not be worthwhile. 14511 if (ExtractedElements != 15) 14512 return SDValue(); 14513 14514 // Ok, we've now decided to do the transformation. 14515 DebugLoc dl = InputVector.getDebugLoc(); 14516 14517 // Store the value to a temporary stack slot. 14518 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 14519 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 14520 MachinePointerInfo(), false, false, 0); 14521 14522 // Replace each use (extract) with a load of the appropriate element. 14523 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 14524 UE = Uses.end(); UI != UE; ++UI) { 14525 SDNode *Extract = *UI; 14526 14527 // cOMpute the element's address. 14528 SDValue Idx = Extract->getOperand(1); 14529 unsigned EltSize = 14530 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 14531 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 14532 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14533 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 14534 14535 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 14536 StackPtr, OffsetVal); 14537 14538 // Load the scalar. 14539 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 14540 ScalarAddr, MachinePointerInfo(), 14541 false, false, false, 0); 14542 14543 // Replace the exact with the load. 14544 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 14545 } 14546 14547 // The replacement was made in place; don't return anything. 14548 return SDValue(); 14549} 14550 14551/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 14552/// nodes. 14553static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 14554 TargetLowering::DAGCombinerInfo &DCI, 14555 const X86Subtarget *Subtarget) { 14556 DebugLoc DL = N->getDebugLoc(); 14557 SDValue Cond = N->getOperand(0); 14558 // Get the LHS/RHS of the select. 14559 SDValue LHS = N->getOperand(1); 14560 SDValue RHS = N->getOperand(2); 14561 EVT VT = LHS.getValueType(); 14562 14563 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 14564 // instructions match the semantics of the common C idiom x<y?x:y but not 14565 // x<=y?x:y, because of how they handle negative zero (which can be 14566 // ignored in unsafe-math mode). 14567 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 14568 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 14569 (Subtarget->hasSSE2() || 14570 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 14571 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 14572 14573 unsigned Opcode = 0; 14574 // Check for x CC y ? x : y. 14575 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 14576 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 14577 switch (CC) { 14578 default: break; 14579 case ISD::SETULT: 14580 // Converting this to a min would handle NaNs incorrectly, and swapping 14581 // the operands would cause it to handle comparisons between positive 14582 // and negative zero incorrectly. 14583 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 14584 if (!DAG.getTarget().Options.UnsafeFPMath && 14585 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 14586 break; 14587 std::swap(LHS, RHS); 14588 } 14589 Opcode = X86ISD::FMIN; 14590 break; 14591 case ISD::SETOLE: 14592 // Converting this to a min would handle comparisons between positive 14593 // and negative zero incorrectly. 14594 if (!DAG.getTarget().Options.UnsafeFPMath && 14595 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 14596 break; 14597 Opcode = X86ISD::FMIN; 14598 break; 14599 case ISD::SETULE: 14600 // Converting this to a min would handle both negative zeros and NaNs 14601 // incorrectly, but we can swap the operands to fix both. 14602 std::swap(LHS, RHS); 14603 case ISD::SETOLT: 14604 case ISD::SETLT: 14605 case ISD::SETLE: 14606 Opcode = X86ISD::FMIN; 14607 break; 14608 14609 case ISD::SETOGE: 14610 // Converting this to a max would handle comparisons between positive 14611 // and negative zero incorrectly. 14612 if (!DAG.getTarget().Options.UnsafeFPMath && 14613 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 14614 break; 14615 Opcode = X86ISD::FMAX; 14616 break; 14617 case ISD::SETUGT: 14618 // Converting this to a max would handle NaNs incorrectly, and swapping 14619 // the operands would cause it to handle comparisons between positive 14620 // and negative zero incorrectly. 14621 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 14622 if (!DAG.getTarget().Options.UnsafeFPMath && 14623 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 14624 break; 14625 std::swap(LHS, RHS); 14626 } 14627 Opcode = X86ISD::FMAX; 14628 break; 14629 case ISD::SETUGE: 14630 // Converting this to a max would handle both negative zeros and NaNs 14631 // incorrectly, but we can swap the operands to fix both. 14632 std::swap(LHS, RHS); 14633 case ISD::SETOGT: 14634 case ISD::SETGT: 14635 case ISD::SETGE: 14636 Opcode = X86ISD::FMAX; 14637 break; 14638 } 14639 // Check for x CC y ? y : x -- a min/max with reversed arms. 14640 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 14641 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 14642 switch (CC) { 14643 default: break; 14644 case ISD::SETOGE: 14645 // Converting this to a min would handle comparisons between positive 14646 // and negative zero incorrectly, and swapping the operands would 14647 // cause it to handle NaNs incorrectly. 14648 if (!DAG.getTarget().Options.UnsafeFPMath && 14649 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 14650 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14651 break; 14652 std::swap(LHS, RHS); 14653 } 14654 Opcode = X86ISD::FMIN; 14655 break; 14656 case ISD::SETUGT: 14657 // Converting this to a min would handle NaNs incorrectly. 14658 if (!DAG.getTarget().Options.UnsafeFPMath && 14659 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 14660 break; 14661 Opcode = X86ISD::FMIN; 14662 break; 14663 case ISD::SETUGE: 14664 // Converting this to a min would handle both negative zeros and NaNs 14665 // incorrectly, but we can swap the operands to fix both. 14666 std::swap(LHS, RHS); 14667 case ISD::SETOGT: 14668 case ISD::SETGT: 14669 case ISD::SETGE: 14670 Opcode = X86ISD::FMIN; 14671 break; 14672 14673 case ISD::SETULT: 14674 // Converting this to a max would handle NaNs incorrectly. 14675 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14676 break; 14677 Opcode = X86ISD::FMAX; 14678 break; 14679 case ISD::SETOLE: 14680 // Converting this to a max would handle comparisons between positive 14681 // and negative zero incorrectly, and swapping the operands would 14682 // cause it to handle NaNs incorrectly. 14683 if (!DAG.getTarget().Options.UnsafeFPMath && 14684 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 14685 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14686 break; 14687 std::swap(LHS, RHS); 14688 } 14689 Opcode = X86ISD::FMAX; 14690 break; 14691 case ISD::SETULE: 14692 // Converting this to a max would handle both negative zeros and NaNs 14693 // incorrectly, but we can swap the operands to fix both. 14694 std::swap(LHS, RHS); 14695 case ISD::SETOLT: 14696 case ISD::SETLT: 14697 case ISD::SETLE: 14698 Opcode = X86ISD::FMAX; 14699 break; 14700 } 14701 } 14702 14703 if (Opcode) 14704 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 14705 } 14706 14707 // If this is a select between two integer constants, try to do some 14708 // optimizations. 14709 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 14710 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 14711 // Don't do this for crazy integer types. 14712 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 14713 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 14714 // so that TrueC (the true value) is larger than FalseC. 14715 bool NeedsCondInvert = false; 14716 14717 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 14718 // Efficiently invertible. 14719 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 14720 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 14721 isa<ConstantSDNode>(Cond.getOperand(1))))) { 14722 NeedsCondInvert = true; 14723 std::swap(TrueC, FalseC); 14724 } 14725 14726 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 14727 if (FalseC->getAPIntValue() == 0 && 14728 TrueC->getAPIntValue().isPowerOf2()) { 14729 if (NeedsCondInvert) // Invert the condition if needed. 14730 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14731 DAG.getConstant(1, Cond.getValueType())); 14732 14733 // Zero extend the condition if needed. 14734 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 14735 14736 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 14737 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 14738 DAG.getConstant(ShAmt, MVT::i8)); 14739 } 14740 14741 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 14742 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 14743 if (NeedsCondInvert) // Invert the condition if needed. 14744 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14745 DAG.getConstant(1, Cond.getValueType())); 14746 14747 // Zero extend the condition if needed. 14748 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 14749 FalseC->getValueType(0), Cond); 14750 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14751 SDValue(FalseC, 0)); 14752 } 14753 14754 // Optimize cases that will turn into an LEA instruction. This requires 14755 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 14756 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 14757 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 14758 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 14759 14760 bool isFastMultiplier = false; 14761 if (Diff < 10) { 14762 switch ((unsigned char)Diff) { 14763 default: break; 14764 case 1: // result = add base, cond 14765 case 2: // result = lea base( , cond*2) 14766 case 3: // result = lea base(cond, cond*2) 14767 case 4: // result = lea base( , cond*4) 14768 case 5: // result = lea base(cond, cond*4) 14769 case 8: // result = lea base( , cond*8) 14770 case 9: // result = lea base(cond, cond*8) 14771 isFastMultiplier = true; 14772 break; 14773 } 14774 } 14775 14776 if (isFastMultiplier) { 14777 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 14778 if (NeedsCondInvert) // Invert the condition if needed. 14779 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14780 DAG.getConstant(1, Cond.getValueType())); 14781 14782 // Zero extend the condition if needed. 14783 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 14784 Cond); 14785 // Scale the condition by the difference. 14786 if (Diff != 1) 14787 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 14788 DAG.getConstant(Diff, Cond.getValueType())); 14789 14790 // Add the base if non-zero. 14791 if (FalseC->getAPIntValue() != 0) 14792 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14793 SDValue(FalseC, 0)); 14794 return Cond; 14795 } 14796 } 14797 } 14798 } 14799 14800 // Canonicalize max and min: 14801 // (x > y) ? x : y -> (x >= y) ? x : y 14802 // (x < y) ? x : y -> (x <= y) ? x : y 14803 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 14804 // the need for an extra compare 14805 // against zero. e.g. 14806 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 14807 // subl %esi, %edi 14808 // testl %edi, %edi 14809 // movl $0, %eax 14810 // cmovgl %edi, %eax 14811 // => 14812 // xorl %eax, %eax 14813 // subl %esi, $edi 14814 // cmovsl %eax, %edi 14815 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 14816 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 14817 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 14818 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 14819 switch (CC) { 14820 default: break; 14821 case ISD::SETLT: 14822 case ISD::SETGT: { 14823 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 14824 Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), 14825 Cond.getOperand(0), Cond.getOperand(1), NewCC); 14826 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 14827 } 14828 } 14829 } 14830 14831 // If we know that this node is legal then we know that it is going to be 14832 // matched by one of the SSE/AVX BLEND instructions. These instructions only 14833 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 14834 // to simplify previous instructions. 14835 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14836 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 14837 !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { 14838 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 14839 14840 // Don't optimize vector selects that map to mask-registers. 14841 if (BitWidth == 1) 14842 return SDValue(); 14843 14844 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 14845 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 14846 14847 APInt KnownZero, KnownOne; 14848 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 14849 DCI.isBeforeLegalizeOps()); 14850 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 14851 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 14852 DCI.CommitTargetLoweringOpt(TLO); 14853 } 14854 14855 return SDValue(); 14856} 14857 14858// Check whether a boolean test is testing a boolean value generated by 14859// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition 14860// code. 14861// 14862// Simplify the following patterns: 14863// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or 14864// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) 14865// to (Op EFLAGS Cond) 14866// 14867// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or 14868// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) 14869// to (Op EFLAGS !Cond) 14870// 14871// where Op could be BRCOND or CMOV. 14872// 14873static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { 14874 // Quit if not CMP and SUB with its value result used. 14875 if (Cmp.getOpcode() != X86ISD::CMP && 14876 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) 14877 return SDValue(); 14878 14879 // Quit if not used as a boolean value. 14880 if (CC != X86::COND_E && CC != X86::COND_NE) 14881 return SDValue(); 14882 14883 // Check CMP operands. One of them should be 0 or 1 and the other should be 14884 // an SetCC or extended from it. 14885 SDValue Op1 = Cmp.getOperand(0); 14886 SDValue Op2 = Cmp.getOperand(1); 14887 14888 SDValue SetCC; 14889 const ConstantSDNode* C = 0; 14890 bool needOppositeCond = (CC == X86::COND_E); 14891 14892 if ((C = dyn_cast<ConstantSDNode>(Op1))) 14893 SetCC = Op2; 14894 else if ((C = dyn_cast<ConstantSDNode>(Op2))) 14895 SetCC = Op1; 14896 else // Quit if all operands are not constants. 14897 return SDValue(); 14898 14899 if (C->getZExtValue() == 1) 14900 needOppositeCond = !needOppositeCond; 14901 else if (C->getZExtValue() != 0) 14902 // Quit if the constant is neither 0 or 1. 14903 return SDValue(); 14904 14905 // Skip 'zext' node. 14906 if (SetCC.getOpcode() == ISD::ZERO_EXTEND) 14907 SetCC = SetCC.getOperand(0); 14908 14909 switch (SetCC.getOpcode()) { 14910 case X86ISD::SETCC: 14911 // Set the condition code or opposite one if necessary. 14912 CC = X86::CondCode(SetCC.getConstantOperandVal(0)); 14913 if (needOppositeCond) 14914 CC = X86::GetOppositeBranchCondition(CC); 14915 return SetCC.getOperand(1); 14916 case X86ISD::CMOV: { 14917 // Check whether false/true value has canonical one, i.e. 0 or 1. 14918 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0)); 14919 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1)); 14920 // Quit if true value is not a constant. 14921 if (!TVal) 14922 return SDValue(); 14923 // Quit if false value is not a constant. 14924 if (!FVal) { 14925 // A special case for rdrand, where 0 is set if false cond is found. 14926 SDValue Op = SetCC.getOperand(0); 14927 if (Op.getOpcode() != X86ISD::RDRAND) 14928 return SDValue(); 14929 } 14930 // Quit if false value is not the constant 0 or 1. 14931 bool FValIsFalse = true; 14932 if (FVal && FVal->getZExtValue() != 0) { 14933 if (FVal->getZExtValue() != 1) 14934 return SDValue(); 14935 // If FVal is 1, opposite cond is needed. 14936 needOppositeCond = !needOppositeCond; 14937 FValIsFalse = false; 14938 } 14939 // Quit if TVal is not the constant opposite of FVal. 14940 if (FValIsFalse && TVal->getZExtValue() != 1) 14941 return SDValue(); 14942 if (!FValIsFalse && TVal->getZExtValue() != 0) 14943 return SDValue(); 14944 CC = X86::CondCode(SetCC.getConstantOperandVal(2)); 14945 if (needOppositeCond) 14946 CC = X86::GetOppositeBranchCondition(CC); 14947 return SetCC.getOperand(3); 14948 } 14949 } 14950 14951 return SDValue(); 14952} 14953 14954/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 14955static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 14956 TargetLowering::DAGCombinerInfo &DCI, 14957 const X86Subtarget *Subtarget) { 14958 DebugLoc DL = N->getDebugLoc(); 14959 14960 // If the flag operand isn't dead, don't touch this CMOV. 14961 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 14962 return SDValue(); 14963 14964 SDValue FalseOp = N->getOperand(0); 14965 SDValue TrueOp = N->getOperand(1); 14966 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 14967 SDValue Cond = N->getOperand(3); 14968 14969 if (CC == X86::COND_E || CC == X86::COND_NE) { 14970 switch (Cond.getOpcode()) { 14971 default: break; 14972 case X86ISD::BSR: 14973 case X86ISD::BSF: 14974 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 14975 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 14976 return (CC == X86::COND_E) ? FalseOp : TrueOp; 14977 } 14978 } 14979 14980 SDValue Flags; 14981 14982 Flags = checkBoolTestSetCCCombine(Cond, CC); 14983 if (Flags.getNode() && 14984 // Extra check as FCMOV only supports a subset of X86 cond. 14985 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { 14986 SDValue Ops[] = { FalseOp, TrueOp, 14987 DAG.getConstant(CC, MVT::i8), Flags }; 14988 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), 14989 Ops, array_lengthof(Ops)); 14990 } 14991 14992 // If this is a select between two integer constants, try to do some 14993 // optimizations. Note that the operands are ordered the opposite of SELECT 14994 // operands. 14995 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 14996 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 14997 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 14998 // larger than FalseC (the false value). 14999 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 15000 CC = X86::GetOppositeBranchCondition(CC); 15001 std::swap(TrueC, FalseC); 15002 std::swap(TrueOp, FalseOp); 15003 } 15004 15005 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 15006 // This is efficient for any integer data type (including i8/i16) and 15007 // shift amount. 15008 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 15009 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 15010 DAG.getConstant(CC, MVT::i8), Cond); 15011 15012 // Zero extend the condition if needed. 15013 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 15014 15015 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 15016 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 15017 DAG.getConstant(ShAmt, MVT::i8)); 15018 if (N->getNumValues() == 2) // Dead flag value? 15019 return DCI.CombineTo(N, Cond, SDValue()); 15020 return Cond; 15021 } 15022 15023 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 15024 // for any integer data type, including i8/i16. 15025 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 15026 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 15027 DAG.getConstant(CC, MVT::i8), Cond); 15028 15029 // Zero extend the condition if needed. 15030 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 15031 FalseC->getValueType(0), Cond); 15032 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 15033 SDValue(FalseC, 0)); 15034 15035 if (N->getNumValues() == 2) // Dead flag value? 15036 return DCI.CombineTo(N, Cond, SDValue()); 15037 return Cond; 15038 } 15039 15040 // Optimize cases that will turn into an LEA instruction. This requires 15041 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 15042 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 15043 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 15044 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 15045 15046 bool isFastMultiplier = false; 15047 if (Diff < 10) { 15048 switch ((unsigned char)Diff) { 15049 default: break; 15050 case 1: // result = add base, cond 15051 case 2: // result = lea base( , cond*2) 15052 case 3: // result = lea base(cond, cond*2) 15053 case 4: // result = lea base( , cond*4) 15054 case 5: // result = lea base(cond, cond*4) 15055 case 8: // result = lea base( , cond*8) 15056 case 9: // result = lea base(cond, cond*8) 15057 isFastMultiplier = true; 15058 break; 15059 } 15060 } 15061 15062 if (isFastMultiplier) { 15063 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 15064 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 15065 DAG.getConstant(CC, MVT::i8), Cond); 15066 // Zero extend the condition if needed. 15067 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 15068 Cond); 15069 // Scale the condition by the difference. 15070 if (Diff != 1) 15071 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 15072 DAG.getConstant(Diff, Cond.getValueType())); 15073 15074 // Add the base if non-zero. 15075 if (FalseC->getAPIntValue() != 0) 15076 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 15077 SDValue(FalseC, 0)); 15078 if (N->getNumValues() == 2) // Dead flag value? 15079 return DCI.CombineTo(N, Cond, SDValue()); 15080 return Cond; 15081 } 15082 } 15083 } 15084 } 15085 15086 // Handle these cases: 15087 // (select (x != c), e, c) -> select (x != c), e, x), 15088 // (select (x == c), c, e) -> select (x == c), x, e) 15089 // where the c is an integer constant, and the "select" is the combination 15090 // of CMOV and CMP. 15091 // 15092 // The rationale for this change is that the conditional-move from a constant 15093 // needs two instructions, however, conditional-move from a register needs 15094 // only one instruction. 15095 // 15096 // CAVEAT: By replacing a constant with a symbolic value, it may obscure 15097 // some instruction-combining opportunities. This opt needs to be 15098 // postponed as late as possible. 15099 // 15100 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { 15101 // the DCI.xxxx conditions are provided to postpone the optimization as 15102 // late as possible. 15103 15104 ConstantSDNode *CmpAgainst = 0; 15105 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && 15106 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) && 15107 dyn_cast<ConstantSDNode>(Cond.getOperand(0)) == 0) { 15108 15109 if (CC == X86::COND_NE && 15110 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) { 15111 CC = X86::GetOppositeBranchCondition(CC); 15112 std::swap(TrueOp, FalseOp); 15113 } 15114 15115 if (CC == X86::COND_E && 15116 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { 15117 SDValue Ops[] = { FalseOp, Cond.getOperand(0), 15118 DAG.getConstant(CC, MVT::i8), Cond }; 15119 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops, 15120 array_lengthof(Ops)); 15121 } 15122 } 15123 } 15124 15125 return SDValue(); 15126} 15127 15128 15129/// PerformMulCombine - Optimize a single multiply with constant into two 15130/// in order to implement it with two cheaper instructions, e.g. 15131/// LEA + SHL, LEA + LEA. 15132static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 15133 TargetLowering::DAGCombinerInfo &DCI) { 15134 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 15135 return SDValue(); 15136 15137 EVT VT = N->getValueType(0); 15138 if (VT != MVT::i64) 15139 return SDValue(); 15140 15141 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15142 if (!C) 15143 return SDValue(); 15144 uint64_t MulAmt = C->getZExtValue(); 15145 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 15146 return SDValue(); 15147 15148 uint64_t MulAmt1 = 0; 15149 uint64_t MulAmt2 = 0; 15150 if ((MulAmt % 9) == 0) { 15151 MulAmt1 = 9; 15152 MulAmt2 = MulAmt / 9; 15153 } else if ((MulAmt % 5) == 0) { 15154 MulAmt1 = 5; 15155 MulAmt2 = MulAmt / 5; 15156 } else if ((MulAmt % 3) == 0) { 15157 MulAmt1 = 3; 15158 MulAmt2 = MulAmt / 3; 15159 } 15160 if (MulAmt2 && 15161 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 15162 DebugLoc DL = N->getDebugLoc(); 15163 15164 if (isPowerOf2_64(MulAmt2) && 15165 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 15166 // If second multiplifer is pow2, issue it first. We want the multiply by 15167 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 15168 // is an add. 15169 std::swap(MulAmt1, MulAmt2); 15170 15171 SDValue NewMul; 15172 if (isPowerOf2_64(MulAmt1)) 15173 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15174 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 15175 else 15176 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 15177 DAG.getConstant(MulAmt1, VT)); 15178 15179 if (isPowerOf2_64(MulAmt2)) 15180 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 15181 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 15182 else 15183 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 15184 DAG.getConstant(MulAmt2, VT)); 15185 15186 // Do not add new nodes to DAG combiner worklist. 15187 DCI.CombineTo(N, NewMul, false); 15188 } 15189 return SDValue(); 15190} 15191 15192static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 15193 SDValue N0 = N->getOperand(0); 15194 SDValue N1 = N->getOperand(1); 15195 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 15196 EVT VT = N0.getValueType(); 15197 15198 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 15199 // since the result of setcc_c is all zero's or all ones. 15200 if (VT.isInteger() && !VT.isVector() && 15201 N1C && N0.getOpcode() == ISD::AND && 15202 N0.getOperand(1).getOpcode() == ISD::Constant) { 15203 SDValue N00 = N0.getOperand(0); 15204 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 15205 ((N00.getOpcode() == ISD::ANY_EXTEND || 15206 N00.getOpcode() == ISD::ZERO_EXTEND) && 15207 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 15208 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 15209 APInt ShAmt = N1C->getAPIntValue(); 15210 Mask = Mask.shl(ShAmt); 15211 if (Mask != 0) 15212 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 15213 N00, DAG.getConstant(Mask, VT)); 15214 } 15215 } 15216 15217 15218 // Hardware support for vector shifts is sparse which makes us scalarize the 15219 // vector operations in many cases. Also, on sandybridge ADD is faster than 15220 // shl. 15221 // (shl V, 1) -> add V,V 15222 if (isSplatVector(N1.getNode())) { 15223 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 15224 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 15225 // We shift all of the values by one. In many cases we do not have 15226 // hardware support for this operation. This is better expressed as an ADD 15227 // of two values. 15228 if (N1C && (1 == N1C->getZExtValue())) { 15229 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); 15230 } 15231 } 15232 15233 return SDValue(); 15234} 15235 15236/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 15237/// when possible. 15238static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 15239 TargetLowering::DAGCombinerInfo &DCI, 15240 const X86Subtarget *Subtarget) { 15241 EVT VT = N->getValueType(0); 15242 if (N->getOpcode() == ISD::SHL) { 15243 SDValue V = PerformSHLCombine(N, DAG); 15244 if (V.getNode()) return V; 15245 } 15246 15247 // On X86 with SSE2 support, we can transform this to a vector shift if 15248 // all elements are shifted by the same amount. We can't do this in legalize 15249 // because the a constant vector is typically transformed to a constant pool 15250 // so we have no knowledge of the shift amount. 15251 if (!Subtarget->hasSSE2()) 15252 return SDValue(); 15253 15254 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 15255 (!Subtarget->hasAVX2() || 15256 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 15257 return SDValue(); 15258 15259 SDValue ShAmtOp = N->getOperand(1); 15260 EVT EltVT = VT.getVectorElementType(); 15261 DebugLoc DL = N->getDebugLoc(); 15262 SDValue BaseShAmt = SDValue(); 15263 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 15264 unsigned NumElts = VT.getVectorNumElements(); 15265 unsigned i = 0; 15266 for (; i != NumElts; ++i) { 15267 SDValue Arg = ShAmtOp.getOperand(i); 15268 if (Arg.getOpcode() == ISD::UNDEF) continue; 15269 BaseShAmt = Arg; 15270 break; 15271 } 15272 // Handle the case where the build_vector is all undef 15273 // FIXME: Should DAG allow this? 15274 if (i == NumElts) 15275 return SDValue(); 15276 15277 for (; i != NumElts; ++i) { 15278 SDValue Arg = ShAmtOp.getOperand(i); 15279 if (Arg.getOpcode() == ISD::UNDEF) continue; 15280 if (Arg != BaseShAmt) { 15281 return SDValue(); 15282 } 15283 } 15284 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 15285 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 15286 SDValue InVec = ShAmtOp.getOperand(0); 15287 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 15288 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 15289 unsigned i = 0; 15290 for (; i != NumElts; ++i) { 15291 SDValue Arg = InVec.getOperand(i); 15292 if (Arg.getOpcode() == ISD::UNDEF) continue; 15293 BaseShAmt = Arg; 15294 break; 15295 } 15296 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 15297 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 15298 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 15299 if (C->getZExtValue() == SplatIdx) 15300 BaseShAmt = InVec.getOperand(1); 15301 } 15302 } 15303 if (BaseShAmt.getNode() == 0) { 15304 // Don't create instructions with illegal types after legalize 15305 // types has run. 15306 if (!DAG.getTargetLoweringInfo().isTypeLegal(EltVT) && 15307 !DCI.isBeforeLegalize()) 15308 return SDValue(); 15309 15310 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 15311 DAG.getIntPtrConstant(0)); 15312 } 15313 } else 15314 return SDValue(); 15315 15316 // The shift amount is an i32. 15317 if (EltVT.bitsGT(MVT::i32)) 15318 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 15319 else if (EltVT.bitsLT(MVT::i32)) 15320 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 15321 15322 // The shift amount is identical so we can do a vector shift. 15323 SDValue ValOp = N->getOperand(0); 15324 switch (N->getOpcode()) { 15325 default: 15326 llvm_unreachable("Unknown shift opcode!"); 15327 case ISD::SHL: 15328 switch (VT.getSimpleVT().SimpleTy) { 15329 default: return SDValue(); 15330 case MVT::v2i64: 15331 case MVT::v4i32: 15332 case MVT::v8i16: 15333 case MVT::v4i64: 15334 case MVT::v8i32: 15335 case MVT::v16i16: 15336 return getTargetVShiftNode(X86ISD::VSHLI, DL, VT, ValOp, BaseShAmt, DAG); 15337 } 15338 case ISD::SRA: 15339 switch (VT.getSimpleVT().SimpleTy) { 15340 default: return SDValue(); 15341 case MVT::v4i32: 15342 case MVT::v8i16: 15343 case MVT::v8i32: 15344 case MVT::v16i16: 15345 return getTargetVShiftNode(X86ISD::VSRAI, DL, VT, ValOp, BaseShAmt, DAG); 15346 } 15347 case ISD::SRL: 15348 switch (VT.getSimpleVT().SimpleTy) { 15349 default: return SDValue(); 15350 case MVT::v2i64: 15351 case MVT::v4i32: 15352 case MVT::v8i16: 15353 case MVT::v4i64: 15354 case MVT::v8i32: 15355 case MVT::v16i16: 15356 return getTargetVShiftNode(X86ISD::VSRLI, DL, VT, ValOp, BaseShAmt, DAG); 15357 } 15358 } 15359} 15360 15361 15362// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 15363// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 15364// and friends. Likewise for OR -> CMPNEQSS. 15365static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 15366 TargetLowering::DAGCombinerInfo &DCI, 15367 const X86Subtarget *Subtarget) { 15368 unsigned opcode; 15369 15370 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 15371 // we're requiring SSE2 for both. 15372 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 15373 SDValue N0 = N->getOperand(0); 15374 SDValue N1 = N->getOperand(1); 15375 SDValue CMP0 = N0->getOperand(1); 15376 SDValue CMP1 = N1->getOperand(1); 15377 DebugLoc DL = N->getDebugLoc(); 15378 15379 // The SETCCs should both refer to the same CMP. 15380 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 15381 return SDValue(); 15382 15383 SDValue CMP00 = CMP0->getOperand(0); 15384 SDValue CMP01 = CMP0->getOperand(1); 15385 EVT VT = CMP00.getValueType(); 15386 15387 if (VT == MVT::f32 || VT == MVT::f64) { 15388 bool ExpectingFlags = false; 15389 // Check for any users that want flags: 15390 for (SDNode::use_iterator UI = N->use_begin(), 15391 UE = N->use_end(); 15392 !ExpectingFlags && UI != UE; ++UI) 15393 switch (UI->getOpcode()) { 15394 default: 15395 case ISD::BR_CC: 15396 case ISD::BRCOND: 15397 case ISD::SELECT: 15398 ExpectingFlags = true; 15399 break; 15400 case ISD::CopyToReg: 15401 case ISD::SIGN_EXTEND: 15402 case ISD::ZERO_EXTEND: 15403 case ISD::ANY_EXTEND: 15404 break; 15405 } 15406 15407 if (!ExpectingFlags) { 15408 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 15409 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 15410 15411 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 15412 X86::CondCode tmp = cc0; 15413 cc0 = cc1; 15414 cc1 = tmp; 15415 } 15416 15417 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 15418 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 15419 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 15420 X86ISD::NodeType NTOperator = is64BitFP ? 15421 X86ISD::FSETCCsd : X86ISD::FSETCCss; 15422 // FIXME: need symbolic constants for these magic numbers. 15423 // See X86ATTInstPrinter.cpp:printSSECC(). 15424 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 15425 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 15426 DAG.getConstant(x86cc, MVT::i8)); 15427 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 15428 OnesOrZeroesF); 15429 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 15430 DAG.getConstant(1, MVT::i32)); 15431 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 15432 return OneBitOfTruth; 15433 } 15434 } 15435 } 15436 } 15437 return SDValue(); 15438} 15439 15440/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 15441/// so it can be folded inside ANDNP. 15442static bool CanFoldXORWithAllOnes(const SDNode *N) { 15443 EVT VT = N->getValueType(0); 15444 15445 // Match direct AllOnes for 128 and 256-bit vectors 15446 if (ISD::isBuildVectorAllOnes(N)) 15447 return true; 15448 15449 // Look through a bit convert. 15450 if (N->getOpcode() == ISD::BITCAST) 15451 N = N->getOperand(0).getNode(); 15452 15453 // Sometimes the operand may come from a insert_subvector building a 256-bit 15454 // allones vector 15455 if (VT.is256BitVector() && 15456 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 15457 SDValue V1 = N->getOperand(0); 15458 SDValue V2 = N->getOperand(1); 15459 15460 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 15461 V1.getOperand(0).getOpcode() == ISD::UNDEF && 15462 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 15463 ISD::isBuildVectorAllOnes(V2.getNode())) 15464 return true; 15465 } 15466 15467 return false; 15468} 15469 15470static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 15471 TargetLowering::DAGCombinerInfo &DCI, 15472 const X86Subtarget *Subtarget) { 15473 if (DCI.isBeforeLegalizeOps()) 15474 return SDValue(); 15475 15476 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 15477 if (R.getNode()) 15478 return R; 15479 15480 EVT VT = N->getValueType(0); 15481 15482 // Create ANDN, BLSI, and BLSR instructions 15483 // BLSI is X & (-X) 15484 // BLSR is X & (X-1) 15485 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 15486 SDValue N0 = N->getOperand(0); 15487 SDValue N1 = N->getOperand(1); 15488 DebugLoc DL = N->getDebugLoc(); 15489 15490 // Check LHS for not 15491 if (N0.getOpcode() == ISD::XOR && isAllOnes(N0.getOperand(1))) 15492 return DAG.getNode(X86ISD::ANDN, DL, VT, N0.getOperand(0), N1); 15493 // Check RHS for not 15494 if (N1.getOpcode() == ISD::XOR && isAllOnes(N1.getOperand(1))) 15495 return DAG.getNode(X86ISD::ANDN, DL, VT, N1.getOperand(0), N0); 15496 15497 // Check LHS for neg 15498 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 15499 isZero(N0.getOperand(0))) 15500 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 15501 15502 // Check RHS for neg 15503 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 15504 isZero(N1.getOperand(0))) 15505 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 15506 15507 // Check LHS for X-1 15508 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 15509 isAllOnes(N0.getOperand(1))) 15510 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 15511 15512 // Check RHS for X-1 15513 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 15514 isAllOnes(N1.getOperand(1))) 15515 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 15516 15517 return SDValue(); 15518 } 15519 15520 // Want to form ANDNP nodes: 15521 // 1) In the hopes of then easily combining them with OR and AND nodes 15522 // to form PBLEND/PSIGN. 15523 // 2) To match ANDN packed intrinsics 15524 if (VT != MVT::v2i64 && VT != MVT::v4i64) 15525 return SDValue(); 15526 15527 SDValue N0 = N->getOperand(0); 15528 SDValue N1 = N->getOperand(1); 15529 DebugLoc DL = N->getDebugLoc(); 15530 15531 // Check LHS for vnot 15532 if (N0.getOpcode() == ISD::XOR && 15533 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 15534 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 15535 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 15536 15537 // Check RHS for vnot 15538 if (N1.getOpcode() == ISD::XOR && 15539 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 15540 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 15541 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 15542 15543 return SDValue(); 15544} 15545 15546static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 15547 TargetLowering::DAGCombinerInfo &DCI, 15548 const X86Subtarget *Subtarget) { 15549 if (DCI.isBeforeLegalizeOps()) 15550 return SDValue(); 15551 15552 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 15553 if (R.getNode()) 15554 return R; 15555 15556 EVT VT = N->getValueType(0); 15557 15558 SDValue N0 = N->getOperand(0); 15559 SDValue N1 = N->getOperand(1); 15560 15561 // look for psign/blend 15562 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 15563 if (!Subtarget->hasSSSE3() || 15564 (VT == MVT::v4i64 && !Subtarget->hasAVX2())) 15565 return SDValue(); 15566 15567 // Canonicalize pandn to RHS 15568 if (N0.getOpcode() == X86ISD::ANDNP) 15569 std::swap(N0, N1); 15570 // or (and (m, y), (pandn m, x)) 15571 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 15572 SDValue Mask = N1.getOperand(0); 15573 SDValue X = N1.getOperand(1); 15574 SDValue Y; 15575 if (N0.getOperand(0) == Mask) 15576 Y = N0.getOperand(1); 15577 if (N0.getOperand(1) == Mask) 15578 Y = N0.getOperand(0); 15579 15580 // Check to see if the mask appeared in both the AND and ANDNP and 15581 if (!Y.getNode()) 15582 return SDValue(); 15583 15584 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 15585 // Look through mask bitcast. 15586 if (Mask.getOpcode() == ISD::BITCAST) 15587 Mask = Mask.getOperand(0); 15588 if (X.getOpcode() == ISD::BITCAST) 15589 X = X.getOperand(0); 15590 if (Y.getOpcode() == ISD::BITCAST) 15591 Y = Y.getOperand(0); 15592 15593 EVT MaskVT = Mask.getValueType(); 15594 15595 // Validate that the Mask operand is a vector sra node. 15596 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 15597 // there is no psrai.b 15598 if (Mask.getOpcode() != X86ISD::VSRAI) 15599 return SDValue(); 15600 15601 // Check that the SRA is all signbits. 15602 SDValue SraC = Mask.getOperand(1); 15603 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 15604 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 15605 if ((SraAmt + 1) != EltBits) 15606 return SDValue(); 15607 15608 DebugLoc DL = N->getDebugLoc(); 15609 15610 // Now we know we at least have a plendvb with the mask val. See if 15611 // we can form a psignb/w/d. 15612 // psign = x.type == y.type == mask.type && y = sub(0, x); 15613 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 15614 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 15615 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 15616 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 15617 "Unsupported VT for PSIGN"); 15618 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 15619 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 15620 } 15621 // PBLENDVB only available on SSE 4.1 15622 if (!Subtarget->hasSSE41()) 15623 return SDValue(); 15624 15625 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 15626 15627 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 15628 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 15629 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 15630 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 15631 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 15632 } 15633 } 15634 15635 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 15636 return SDValue(); 15637 15638 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 15639 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 15640 std::swap(N0, N1); 15641 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 15642 return SDValue(); 15643 if (!N0.hasOneUse() || !N1.hasOneUse()) 15644 return SDValue(); 15645 15646 SDValue ShAmt0 = N0.getOperand(1); 15647 if (ShAmt0.getValueType() != MVT::i8) 15648 return SDValue(); 15649 SDValue ShAmt1 = N1.getOperand(1); 15650 if (ShAmt1.getValueType() != MVT::i8) 15651 return SDValue(); 15652 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 15653 ShAmt0 = ShAmt0.getOperand(0); 15654 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 15655 ShAmt1 = ShAmt1.getOperand(0); 15656 15657 DebugLoc DL = N->getDebugLoc(); 15658 unsigned Opc = X86ISD::SHLD; 15659 SDValue Op0 = N0.getOperand(0); 15660 SDValue Op1 = N1.getOperand(0); 15661 if (ShAmt0.getOpcode() == ISD::SUB) { 15662 Opc = X86ISD::SHRD; 15663 std::swap(Op0, Op1); 15664 std::swap(ShAmt0, ShAmt1); 15665 } 15666 15667 unsigned Bits = VT.getSizeInBits(); 15668 if (ShAmt1.getOpcode() == ISD::SUB) { 15669 SDValue Sum = ShAmt1.getOperand(0); 15670 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 15671 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 15672 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 15673 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 15674 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 15675 return DAG.getNode(Opc, DL, VT, 15676 Op0, Op1, 15677 DAG.getNode(ISD::TRUNCATE, DL, 15678 MVT::i8, ShAmt0)); 15679 } 15680 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 15681 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 15682 if (ShAmt0C && 15683 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 15684 return DAG.getNode(Opc, DL, VT, 15685 N0.getOperand(0), N1.getOperand(0), 15686 DAG.getNode(ISD::TRUNCATE, DL, 15687 MVT::i8, ShAmt0)); 15688 } 15689 15690 return SDValue(); 15691} 15692 15693// Generate NEG and CMOV for integer abs. 15694static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 15695 EVT VT = N->getValueType(0); 15696 15697 // Since X86 does not have CMOV for 8-bit integer, we don't convert 15698 // 8-bit integer abs to NEG and CMOV. 15699 if (VT.isInteger() && VT.getSizeInBits() == 8) 15700 return SDValue(); 15701 15702 SDValue N0 = N->getOperand(0); 15703 SDValue N1 = N->getOperand(1); 15704 DebugLoc DL = N->getDebugLoc(); 15705 15706 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 15707 // and change it to SUB and CMOV. 15708 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 15709 N0.getOpcode() == ISD::ADD && 15710 N0.getOperand(1) == N1 && 15711 N1.getOpcode() == ISD::SRA && 15712 N1.getOperand(0) == N0.getOperand(0)) 15713 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 15714 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { 15715 // Generate SUB & CMOV. 15716 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), 15717 DAG.getConstant(0, VT), N0.getOperand(0)); 15718 15719 SDValue Ops[] = { N0.getOperand(0), Neg, 15720 DAG.getConstant(X86::COND_GE, MVT::i8), 15721 SDValue(Neg.getNode(), 1) }; 15722 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), 15723 Ops, array_lengthof(Ops)); 15724 } 15725 return SDValue(); 15726} 15727 15728// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 15729static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 15730 TargetLowering::DAGCombinerInfo &DCI, 15731 const X86Subtarget *Subtarget) { 15732 if (DCI.isBeforeLegalizeOps()) 15733 return SDValue(); 15734 15735 if (Subtarget->hasCMov()) { 15736 SDValue RV = performIntegerAbsCombine(N, DAG); 15737 if (RV.getNode()) 15738 return RV; 15739 } 15740 15741 // Try forming BMI if it is available. 15742 if (!Subtarget->hasBMI()) 15743 return SDValue(); 15744 15745 EVT VT = N->getValueType(0); 15746 15747 if (VT != MVT::i32 && VT != MVT::i64) 15748 return SDValue(); 15749 15750 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 15751 15752 // Create BLSMSK instructions by finding X ^ (X-1) 15753 SDValue N0 = N->getOperand(0); 15754 SDValue N1 = N->getOperand(1); 15755 DebugLoc DL = N->getDebugLoc(); 15756 15757 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 15758 isAllOnes(N0.getOperand(1))) 15759 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 15760 15761 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 15762 isAllOnes(N1.getOperand(1))) 15763 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 15764 15765 return SDValue(); 15766} 15767 15768/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 15769static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 15770 TargetLowering::DAGCombinerInfo &DCI, 15771 const X86Subtarget *Subtarget) { 15772 LoadSDNode *Ld = cast<LoadSDNode>(N); 15773 EVT RegVT = Ld->getValueType(0); 15774 EVT MemVT = Ld->getMemoryVT(); 15775 DebugLoc dl = Ld->getDebugLoc(); 15776 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15777 15778 ISD::LoadExtType Ext = Ld->getExtensionType(); 15779 15780 // If this is a vector EXT Load then attempt to optimize it using a 15781 // shuffle. We need SSSE3 shuffles. 15782 // TODO: It is possible to support ZExt by zeroing the undef values 15783 // during the shuffle phase or after the shuffle. 15784 if (RegVT.isVector() && RegVT.isInteger() && 15785 Ext == ISD::EXTLOAD && Subtarget->hasSSSE3()) { 15786 assert(MemVT != RegVT && "Cannot extend to the same type"); 15787 assert(MemVT.isVector() && "Must load a vector from memory"); 15788 15789 unsigned NumElems = RegVT.getVectorNumElements(); 15790 unsigned RegSz = RegVT.getSizeInBits(); 15791 unsigned MemSz = MemVT.getSizeInBits(); 15792 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 15793 15794 // All sizes must be a power of two. 15795 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) 15796 return SDValue(); 15797 15798 // Attempt to load the original value using scalar loads. 15799 // Find the largest scalar type that divides the total loaded size. 15800 MVT SclrLoadTy = MVT::i8; 15801 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 15802 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 15803 MVT Tp = (MVT::SimpleValueType)tp; 15804 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { 15805 SclrLoadTy = Tp; 15806 } 15807 } 15808 15809 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 15810 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && 15811 (64 <= MemSz)) 15812 SclrLoadTy = MVT::f64; 15813 15814 // Calculate the number of scalar loads that we need to perform 15815 // in order to load our vector from memory. 15816 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); 15817 15818 // Represent our vector as a sequence of elements which are the 15819 // largest scalar that we can load. 15820 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 15821 RegSz/SclrLoadTy.getSizeInBits()); 15822 15823 // Represent the data using the same element type that is stored in 15824 // memory. In practice, we ''widen'' MemVT. 15825 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 15826 RegSz/MemVT.getScalarType().getSizeInBits()); 15827 15828 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && 15829 "Invalid vector type"); 15830 15831 // We can't shuffle using an illegal type. 15832 if (!TLI.isTypeLegal(WideVecVT)) 15833 return SDValue(); 15834 15835 SmallVector<SDValue, 8> Chains; 15836 SDValue Ptr = Ld->getBasePtr(); 15837 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, 15838 TLI.getPointerTy()); 15839 SDValue Res = DAG.getUNDEF(LoadUnitVecVT); 15840 15841 for (unsigned i = 0; i < NumLoads; ++i) { 15842 // Perform a single load. 15843 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 15844 Ptr, Ld->getPointerInfo(), 15845 Ld->isVolatile(), Ld->isNonTemporal(), 15846 Ld->isInvariant(), Ld->getAlignment()); 15847 Chains.push_back(ScalarLoad.getValue(1)); 15848 // Create the first element type using SCALAR_TO_VECTOR in order to avoid 15849 // another round of DAGCombining. 15850 if (i == 0) 15851 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); 15852 else 15853 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, 15854 ScalarLoad, DAG.getIntPtrConstant(i)); 15855 15856 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 15857 } 15858 15859 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 15860 Chains.size()); 15861 15862 // Bitcast the loaded value to a vector of the original element type, in 15863 // the size of the target vector type. 15864 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); 15865 unsigned SizeRatio = RegSz/MemSz; 15866 15867 // Redistribute the loaded elements into the different locations. 15868 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 15869 for (unsigned i = 0; i != NumElems; ++i) 15870 ShuffleVec[i*SizeRatio] = i; 15871 15872 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 15873 DAG.getUNDEF(WideVecVT), 15874 &ShuffleVec[0]); 15875 15876 // Bitcast to the requested type. 15877 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 15878 // Replace the original load with the new sequence 15879 // and return the new chain. 15880 return DCI.CombineTo(N, Shuff, TF, true); 15881 } 15882 15883 return SDValue(); 15884} 15885 15886/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 15887static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 15888 const X86Subtarget *Subtarget) { 15889 StoreSDNode *St = cast<StoreSDNode>(N); 15890 EVT VT = St->getValue().getValueType(); 15891 EVT StVT = St->getMemoryVT(); 15892 DebugLoc dl = St->getDebugLoc(); 15893 SDValue StoredVal = St->getOperand(1); 15894 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15895 15896 // If we are saving a concatenation of two XMM registers, perform two stores. 15897 // On Sandy Bridge, 256-bit memory operations are executed by two 15898 // 128-bit ports. However, on Haswell it is better to issue a single 256-bit 15899 // memory operation. 15900 if (VT.is256BitVector() && !Subtarget->hasAVX2() && 15901 StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && 15902 StoredVal.getNumOperands() == 2) { 15903 SDValue Value0 = StoredVal.getOperand(0); 15904 SDValue Value1 = StoredVal.getOperand(1); 15905 15906 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 15907 SDValue Ptr0 = St->getBasePtr(); 15908 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 15909 15910 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 15911 St->getPointerInfo(), St->isVolatile(), 15912 St->isNonTemporal(), St->getAlignment()); 15913 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 15914 St->getPointerInfo(), St->isVolatile(), 15915 St->isNonTemporal(), St->getAlignment()); 15916 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 15917 } 15918 15919 // Optimize trunc store (of multiple scalars) to shuffle and store. 15920 // First, pack all of the elements in one place. Next, store to memory 15921 // in fewer chunks. 15922 if (St->isTruncatingStore() && VT.isVector()) { 15923 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15924 unsigned NumElems = VT.getVectorNumElements(); 15925 assert(StVT != VT && "Cannot truncate to the same type"); 15926 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 15927 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 15928 15929 // From, To sizes and ElemCount must be pow of two 15930 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 15931 // We are going to use the original vector elt for storing. 15932 // Accumulated smaller vector elements must be a multiple of the store size. 15933 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 15934 15935 unsigned SizeRatio = FromSz / ToSz; 15936 15937 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 15938 15939 // Create a type on which we perform the shuffle 15940 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 15941 StVT.getScalarType(), NumElems*SizeRatio); 15942 15943 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 15944 15945 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 15946 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 15947 for (unsigned i = 0; i != NumElems; ++i) 15948 ShuffleVec[i] = i * SizeRatio; 15949 15950 // Can't shuffle using an illegal type. 15951 if (!TLI.isTypeLegal(WideVecVT)) 15952 return SDValue(); 15953 15954 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 15955 DAG.getUNDEF(WideVecVT), 15956 &ShuffleVec[0]); 15957 // At this point all of the data is stored at the bottom of the 15958 // register. We now need to save it to mem. 15959 15960 // Find the largest store unit 15961 MVT StoreType = MVT::i8; 15962 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 15963 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 15964 MVT Tp = (MVT::SimpleValueType)tp; 15965 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) 15966 StoreType = Tp; 15967 } 15968 15969 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 15970 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && 15971 (64 <= NumElems * ToSz)) 15972 StoreType = MVT::f64; 15973 15974 // Bitcast the original vector into a vector of store-size units 15975 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 15976 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); 15977 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 15978 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 15979 SmallVector<SDValue, 8> Chains; 15980 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 15981 TLI.getPointerTy()); 15982 SDValue Ptr = St->getBasePtr(); 15983 15984 // Perform one or more big stores into memory. 15985 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { 15986 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 15987 StoreType, ShuffWide, 15988 DAG.getIntPtrConstant(i)); 15989 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 15990 St->getPointerInfo(), St->isVolatile(), 15991 St->isNonTemporal(), St->getAlignment()); 15992 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 15993 Chains.push_back(Ch); 15994 } 15995 15996 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 15997 Chains.size()); 15998 } 15999 16000 16001 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 16002 // the FP state in cases where an emms may be missing. 16003 // A preferable solution to the general problem is to figure out the right 16004 // places to insert EMMS. This qualifies as a quick hack. 16005 16006 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 16007 if (VT.getSizeInBits() != 64) 16008 return SDValue(); 16009 16010 const Function *F = DAG.getMachineFunction().getFunction(); 16011 bool NoImplicitFloatOps = F->getFnAttributes(). 16012 hasAttribute(Attributes::NoImplicitFloat); 16013 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 16014 && Subtarget->hasSSE2(); 16015 if ((VT.isVector() || 16016 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 16017 isa<LoadSDNode>(St->getValue()) && 16018 !cast<LoadSDNode>(St->getValue())->isVolatile() && 16019 St->getChain().hasOneUse() && !St->isVolatile()) { 16020 SDNode* LdVal = St->getValue().getNode(); 16021 LoadSDNode *Ld = 0; 16022 int TokenFactorIndex = -1; 16023 SmallVector<SDValue, 8> Ops; 16024 SDNode* ChainVal = St->getChain().getNode(); 16025 // Must be a store of a load. We currently handle two cases: the load 16026 // is a direct child, and it's under an intervening TokenFactor. It is 16027 // possible to dig deeper under nested TokenFactors. 16028 if (ChainVal == LdVal) 16029 Ld = cast<LoadSDNode>(St->getChain()); 16030 else if (St->getValue().hasOneUse() && 16031 ChainVal->getOpcode() == ISD::TokenFactor) { 16032 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) { 16033 if (ChainVal->getOperand(i).getNode() == LdVal) { 16034 TokenFactorIndex = i; 16035 Ld = cast<LoadSDNode>(St->getValue()); 16036 } else 16037 Ops.push_back(ChainVal->getOperand(i)); 16038 } 16039 } 16040 16041 if (!Ld || !ISD::isNormalLoad(Ld)) 16042 return SDValue(); 16043 16044 // If this is not the MMX case, i.e. we are just turning i64 load/store 16045 // into f64 load/store, avoid the transformation if there are multiple 16046 // uses of the loaded value. 16047 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 16048 return SDValue(); 16049 16050 DebugLoc LdDL = Ld->getDebugLoc(); 16051 DebugLoc StDL = N->getDebugLoc(); 16052 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 16053 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 16054 // pair instead. 16055 if (Subtarget->is64Bit() || F64IsLegal) { 16056 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 16057 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 16058 Ld->getPointerInfo(), Ld->isVolatile(), 16059 Ld->isNonTemporal(), Ld->isInvariant(), 16060 Ld->getAlignment()); 16061 SDValue NewChain = NewLd.getValue(1); 16062 if (TokenFactorIndex != -1) { 16063 Ops.push_back(NewChain); 16064 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 16065 Ops.size()); 16066 } 16067 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 16068 St->getPointerInfo(), 16069 St->isVolatile(), St->isNonTemporal(), 16070 St->getAlignment()); 16071 } 16072 16073 // Otherwise, lower to two pairs of 32-bit loads / stores. 16074 SDValue LoAddr = Ld->getBasePtr(); 16075 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 16076 DAG.getConstant(4, MVT::i32)); 16077 16078 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 16079 Ld->getPointerInfo(), 16080 Ld->isVolatile(), Ld->isNonTemporal(), 16081 Ld->isInvariant(), Ld->getAlignment()); 16082 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 16083 Ld->getPointerInfo().getWithOffset(4), 16084 Ld->isVolatile(), Ld->isNonTemporal(), 16085 Ld->isInvariant(), 16086 MinAlign(Ld->getAlignment(), 4)); 16087 16088 SDValue NewChain = LoLd.getValue(1); 16089 if (TokenFactorIndex != -1) { 16090 Ops.push_back(LoLd); 16091 Ops.push_back(HiLd); 16092 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 16093 Ops.size()); 16094 } 16095 16096 LoAddr = St->getBasePtr(); 16097 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 16098 DAG.getConstant(4, MVT::i32)); 16099 16100 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 16101 St->getPointerInfo(), 16102 St->isVolatile(), St->isNonTemporal(), 16103 St->getAlignment()); 16104 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 16105 St->getPointerInfo().getWithOffset(4), 16106 St->isVolatile(), 16107 St->isNonTemporal(), 16108 MinAlign(St->getAlignment(), 4)); 16109 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 16110 } 16111 return SDValue(); 16112} 16113 16114/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 16115/// and return the operands for the horizontal operation in LHS and RHS. A 16116/// horizontal operation performs the binary operation on successive elements 16117/// of its first operand, then on successive elements of its second operand, 16118/// returning the resulting values in a vector. For example, if 16119/// A = < float a0, float a1, float a2, float a3 > 16120/// and 16121/// B = < float b0, float b1, float b2, float b3 > 16122/// then the result of doing a horizontal operation on A and B is 16123/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 16124/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 16125/// A horizontal-op B, for some already available A and B, and if so then LHS is 16126/// set to A, RHS to B, and the routine returns 'true'. 16127/// Note that the binary operation should have the property that if one of the 16128/// operands is UNDEF then the result is UNDEF. 16129static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 16130 // Look for the following pattern: if 16131 // A = < float a0, float a1, float a2, float a3 > 16132 // B = < float b0, float b1, float b2, float b3 > 16133 // and 16134 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 16135 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 16136 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 16137 // which is A horizontal-op B. 16138 16139 // At least one of the operands should be a vector shuffle. 16140 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 16141 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 16142 return false; 16143 16144 EVT VT = LHS.getValueType(); 16145 16146 assert((VT.is128BitVector() || VT.is256BitVector()) && 16147 "Unsupported vector type for horizontal add/sub"); 16148 16149 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 16150 // operate independently on 128-bit lanes. 16151 unsigned NumElts = VT.getVectorNumElements(); 16152 unsigned NumLanes = VT.getSizeInBits()/128; 16153 unsigned NumLaneElts = NumElts / NumLanes; 16154 assert((NumLaneElts % 2 == 0) && 16155 "Vector type should have an even number of elements in each lane"); 16156 unsigned HalfLaneElts = NumLaneElts/2; 16157 16158 // View LHS in the form 16159 // LHS = VECTOR_SHUFFLE A, B, LMask 16160 // If LHS is not a shuffle then pretend it is the shuffle 16161 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 16162 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 16163 // type VT. 16164 SDValue A, B; 16165 SmallVector<int, 16> LMask(NumElts); 16166 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 16167 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 16168 A = LHS.getOperand(0); 16169 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 16170 B = LHS.getOperand(1); 16171 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 16172 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 16173 } else { 16174 if (LHS.getOpcode() != ISD::UNDEF) 16175 A = LHS; 16176 for (unsigned i = 0; i != NumElts; ++i) 16177 LMask[i] = i; 16178 } 16179 16180 // Likewise, view RHS in the form 16181 // RHS = VECTOR_SHUFFLE C, D, RMask 16182 SDValue C, D; 16183 SmallVector<int, 16> RMask(NumElts); 16184 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 16185 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 16186 C = RHS.getOperand(0); 16187 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 16188 D = RHS.getOperand(1); 16189 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 16190 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 16191 } else { 16192 if (RHS.getOpcode() != ISD::UNDEF) 16193 C = RHS; 16194 for (unsigned i = 0; i != NumElts; ++i) 16195 RMask[i] = i; 16196 } 16197 16198 // Check that the shuffles are both shuffling the same vectors. 16199 if (!(A == C && B == D) && !(A == D && B == C)) 16200 return false; 16201 16202 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 16203 if (!A.getNode() && !B.getNode()) 16204 return false; 16205 16206 // If A and B occur in reverse order in RHS, then "swap" them (which means 16207 // rewriting the mask). 16208 if (A != C) 16209 CommuteVectorShuffleMask(RMask, NumElts); 16210 16211 // At this point LHS and RHS are equivalent to 16212 // LHS = VECTOR_SHUFFLE A, B, LMask 16213 // RHS = VECTOR_SHUFFLE A, B, RMask 16214 // Check that the masks correspond to performing a horizontal operation. 16215 for (unsigned i = 0; i != NumElts; ++i) { 16216 int LIdx = LMask[i], RIdx = RMask[i]; 16217 16218 // Ignore any UNDEF components. 16219 if (LIdx < 0 || RIdx < 0 || 16220 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 16221 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 16222 continue; 16223 16224 // Check that successive elements are being operated on. If not, this is 16225 // not a horizontal operation. 16226 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 16227 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 16228 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 16229 if (!(LIdx == Index && RIdx == Index + 1) && 16230 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 16231 return false; 16232 } 16233 16234 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 16235 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 16236 return true; 16237} 16238 16239/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 16240static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 16241 const X86Subtarget *Subtarget) { 16242 EVT VT = N->getValueType(0); 16243 SDValue LHS = N->getOperand(0); 16244 SDValue RHS = N->getOperand(1); 16245 16246 // Try to synthesize horizontal adds from adds of shuffles. 16247 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 16248 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 16249 isHorizontalBinOp(LHS, RHS, true)) 16250 return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); 16251 return SDValue(); 16252} 16253 16254/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 16255static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 16256 const X86Subtarget *Subtarget) { 16257 EVT VT = N->getValueType(0); 16258 SDValue LHS = N->getOperand(0); 16259 SDValue RHS = N->getOperand(1); 16260 16261 // Try to synthesize horizontal subs from subs of shuffles. 16262 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 16263 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 16264 isHorizontalBinOp(LHS, RHS, false)) 16265 return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); 16266 return SDValue(); 16267} 16268 16269/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 16270/// X86ISD::FXOR nodes. 16271static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 16272 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 16273 // F[X]OR(0.0, x) -> x 16274 // F[X]OR(x, 0.0) -> x 16275 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 16276 if (C->getValueAPF().isPosZero()) 16277 return N->getOperand(1); 16278 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 16279 if (C->getValueAPF().isPosZero()) 16280 return N->getOperand(0); 16281 return SDValue(); 16282} 16283 16284/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and 16285/// X86ISD::FMAX nodes. 16286static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { 16287 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX); 16288 16289 // Only perform optimizations if UnsafeMath is used. 16290 if (!DAG.getTarget().Options.UnsafeFPMath) 16291 return SDValue(); 16292 16293 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes 16294 // into FMINC and FMAXC, which are Commutative operations. 16295 unsigned NewOp = 0; 16296 switch (N->getOpcode()) { 16297 default: llvm_unreachable("unknown opcode"); 16298 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; 16299 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; 16300 } 16301 16302 return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0), 16303 N->getOperand(0), N->getOperand(1)); 16304} 16305 16306 16307/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 16308static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 16309 // FAND(0.0, x) -> 0.0 16310 // FAND(x, 0.0) -> 0.0 16311 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 16312 if (C->getValueAPF().isPosZero()) 16313 return N->getOperand(0); 16314 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 16315 if (C->getValueAPF().isPosZero()) 16316 return N->getOperand(1); 16317 return SDValue(); 16318} 16319 16320static SDValue PerformBTCombine(SDNode *N, 16321 SelectionDAG &DAG, 16322 TargetLowering::DAGCombinerInfo &DCI) { 16323 // BT ignores high bits in the bit index operand. 16324 SDValue Op1 = N->getOperand(1); 16325 if (Op1.hasOneUse()) { 16326 unsigned BitWidth = Op1.getValueSizeInBits(); 16327 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 16328 APInt KnownZero, KnownOne; 16329 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 16330 !DCI.isBeforeLegalizeOps()); 16331 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16332 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 16333 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 16334 DCI.CommitTargetLoweringOpt(TLO); 16335 } 16336 return SDValue(); 16337} 16338 16339static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 16340 SDValue Op = N->getOperand(0); 16341 if (Op.getOpcode() == ISD::BITCAST) 16342 Op = Op.getOperand(0); 16343 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 16344 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 16345 VT.getVectorElementType().getSizeInBits() == 16346 OpVT.getVectorElementType().getSizeInBits()) { 16347 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 16348 } 16349 return SDValue(); 16350} 16351 16352static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, 16353 TargetLowering::DAGCombinerInfo &DCI, 16354 const X86Subtarget *Subtarget) { 16355 if (!DCI.isBeforeLegalizeOps()) 16356 return SDValue(); 16357 16358 if (!Subtarget->hasAVX()) 16359 return SDValue(); 16360 16361 EVT VT = N->getValueType(0); 16362 SDValue Op = N->getOperand(0); 16363 EVT OpVT = Op.getValueType(); 16364 DebugLoc dl = N->getDebugLoc(); 16365 16366 if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) || 16367 (VT == MVT::v8i32 && OpVT == MVT::v8i16)) { 16368 16369 if (Subtarget->hasAVX2()) 16370 return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, Op); 16371 16372 // Optimize vectors in AVX mode 16373 // Sign extend v8i16 to v8i32 and 16374 // v4i32 to v4i64 16375 // 16376 // Divide input vector into two parts 16377 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} 16378 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 16379 // concat the vectors to original VT 16380 16381 unsigned NumElems = OpVT.getVectorNumElements(); 16382 SDValue Undef = DAG.getUNDEF(OpVT); 16383 16384 SmallVector<int,8> ShufMask1(NumElems, -1); 16385 for (unsigned i = 0; i != NumElems/2; ++i) 16386 ShufMask1[i] = i; 16387 16388 SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask1[0]); 16389 16390 SmallVector<int,8> ShufMask2(NumElems, -1); 16391 for (unsigned i = 0; i != NumElems/2; ++i) 16392 ShufMask2[i] = i + NumElems/2; 16393 16394 SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask2[0]); 16395 16396 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), 16397 VT.getVectorNumElements()/2); 16398 16399 OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); 16400 OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); 16401 16402 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 16403 } 16404 return SDValue(); 16405} 16406 16407static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, 16408 const X86Subtarget* Subtarget) { 16409 DebugLoc dl = N->getDebugLoc(); 16410 EVT VT = N->getValueType(0); 16411 16412 // Let legalize expand this if it isn't a legal type yet. 16413 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 16414 return SDValue(); 16415 16416 EVT ScalarVT = VT.getScalarType(); 16417 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || 16418 (!Subtarget->hasFMA() && !Subtarget->hasFMA4())) 16419 return SDValue(); 16420 16421 SDValue A = N->getOperand(0); 16422 SDValue B = N->getOperand(1); 16423 SDValue C = N->getOperand(2); 16424 16425 bool NegA = (A.getOpcode() == ISD::FNEG); 16426 bool NegB = (B.getOpcode() == ISD::FNEG); 16427 bool NegC = (C.getOpcode() == ISD::FNEG); 16428 16429 // Negative multiplication when NegA xor NegB 16430 bool NegMul = (NegA != NegB); 16431 if (NegA) 16432 A = A.getOperand(0); 16433 if (NegB) 16434 B = B.getOperand(0); 16435 if (NegC) 16436 C = C.getOperand(0); 16437 16438 unsigned Opcode; 16439 if (!NegMul) 16440 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB; 16441 else 16442 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB; 16443 16444 return DAG.getNode(Opcode, dl, VT, A, B, C); 16445} 16446 16447static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 16448 TargetLowering::DAGCombinerInfo &DCI, 16449 const X86Subtarget *Subtarget) { 16450 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 16451 // (and (i32 x86isd::setcc_carry), 1) 16452 // This eliminates the zext. This transformation is necessary because 16453 // ISD::SETCC is always legalized to i8. 16454 DebugLoc dl = N->getDebugLoc(); 16455 SDValue N0 = N->getOperand(0); 16456 EVT VT = N->getValueType(0); 16457 EVT OpVT = N0.getValueType(); 16458 16459 if (N0.getOpcode() == ISD::AND && 16460 N0.hasOneUse() && 16461 N0.getOperand(0).hasOneUse()) { 16462 SDValue N00 = N0.getOperand(0); 16463 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 16464 return SDValue(); 16465 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 16466 if (!C || C->getZExtValue() != 1) 16467 return SDValue(); 16468 return DAG.getNode(ISD::AND, dl, VT, 16469 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 16470 N00.getOperand(0), N00.getOperand(1)), 16471 DAG.getConstant(1, VT)); 16472 } 16473 16474 // Optimize vectors in AVX mode: 16475 // 16476 // v8i16 -> v8i32 16477 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 16478 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 16479 // Concat upper and lower parts. 16480 // 16481 // v4i32 -> v4i64 16482 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 16483 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 16484 // Concat upper and lower parts. 16485 // 16486 if (!DCI.isBeforeLegalizeOps()) 16487 return SDValue(); 16488 16489 if (!Subtarget->hasAVX()) 16490 return SDValue(); 16491 16492 if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || 16493 ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { 16494 16495 if (Subtarget->hasAVX2()) 16496 return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0); 16497 16498 SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); 16499 SDValue OpLo = getUnpackl(DAG, dl, OpVT, N0, ZeroVec); 16500 SDValue OpHi = getUnpackh(DAG, dl, OpVT, N0, ZeroVec); 16501 16502 EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 16503 VT.getVectorNumElements()/2); 16504 16505 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 16506 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 16507 16508 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 16509 } 16510 16511 return SDValue(); 16512} 16513 16514// Optimize x == -y --> x+y == 0 16515// x != -y --> x+y != 0 16516static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { 16517 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 16518 SDValue LHS = N->getOperand(0); 16519 SDValue RHS = N->getOperand(1); 16520 16521 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) 16522 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) 16523 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { 16524 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 16525 LHS.getValueType(), RHS, LHS.getOperand(1)); 16526 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 16527 addV, DAG.getConstant(0, addV.getValueType()), CC); 16528 } 16529 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) 16530 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) 16531 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { 16532 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 16533 RHS.getValueType(), LHS, RHS.getOperand(1)); 16534 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 16535 addV, DAG.getConstant(0, addV.getValueType()), CC); 16536 } 16537 return SDValue(); 16538} 16539 16540// Helper function of PerformSETCCCombine. It is to materialize "setb reg" 16541// as "sbb reg,reg", since it can be extended without zext and produces 16542// an all-ones bit which is more useful than 0/1 in some cases. 16543static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { 16544 return DAG.getNode(ISD::AND, DL, MVT::i8, 16545 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 16546 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), 16547 DAG.getConstant(1, MVT::i8)); 16548} 16549 16550// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 16551static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, 16552 TargetLowering::DAGCombinerInfo &DCI, 16553 const X86Subtarget *Subtarget) { 16554 DebugLoc DL = N->getDebugLoc(); 16555 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); 16556 SDValue EFLAGS = N->getOperand(1); 16557 16558 if (CC == X86::COND_A) { 16559 // Try to convert COND_A into COND_B in an attempt to facilitate 16560 // materializing "setb reg". 16561 // 16562 // Do not flip "e > c", where "c" is a constant, because Cmp instruction 16563 // cannot take an immediate as its first operand. 16564 // 16565 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && 16566 EFLAGS.getValueType().isInteger() && 16567 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { 16568 SDValue NewSub = DAG.getNode(X86ISD::SUB, EFLAGS.getDebugLoc(), 16569 EFLAGS.getNode()->getVTList(), 16570 EFLAGS.getOperand(1), EFLAGS.getOperand(0)); 16571 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); 16572 return MaterializeSETB(DL, NewEFLAGS, DAG); 16573 } 16574 } 16575 16576 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 16577 // a zext and produces an all-ones bit which is more useful than 0/1 in some 16578 // cases. 16579 if (CC == X86::COND_B) 16580 return MaterializeSETB(DL, EFLAGS, DAG); 16581 16582 SDValue Flags; 16583 16584 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 16585 if (Flags.getNode()) { 16586 SDValue Cond = DAG.getConstant(CC, MVT::i8); 16587 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); 16588 } 16589 16590 return SDValue(); 16591} 16592 16593// Optimize branch condition evaluation. 16594// 16595static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, 16596 TargetLowering::DAGCombinerInfo &DCI, 16597 const X86Subtarget *Subtarget) { 16598 DebugLoc DL = N->getDebugLoc(); 16599 SDValue Chain = N->getOperand(0); 16600 SDValue Dest = N->getOperand(1); 16601 SDValue EFLAGS = N->getOperand(3); 16602 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); 16603 16604 SDValue Flags; 16605 16606 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 16607 if (Flags.getNode()) { 16608 SDValue Cond = DAG.getConstant(CC, MVT::i8); 16609 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, 16610 Flags); 16611 } 16612 16613 return SDValue(); 16614} 16615 16616static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 16617 const X86TargetLowering *XTLI) { 16618 SDValue Op0 = N->getOperand(0); 16619 EVT InVT = Op0->getValueType(0); 16620 16621 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) 16622 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 16623 DebugLoc dl = N->getDebugLoc(); 16624 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 16625 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); 16626 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 16627 } 16628 16629 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 16630 // a 32-bit target where SSE doesn't support i64->FP operations. 16631 if (Op0.getOpcode() == ISD::LOAD) { 16632 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 16633 EVT VT = Ld->getValueType(0); 16634 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 16635 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 16636 !XTLI->getSubtarget()->is64Bit() && 16637 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 16638 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 16639 Ld->getChain(), Op0, DAG); 16640 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 16641 return FILDChain; 16642 } 16643 } 16644 return SDValue(); 16645} 16646 16647// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 16648static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 16649 X86TargetLowering::DAGCombinerInfo &DCI) { 16650 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 16651 // the result is either zero or one (depending on the input carry bit). 16652 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 16653 if (X86::isZeroNode(N->getOperand(0)) && 16654 X86::isZeroNode(N->getOperand(1)) && 16655 // We don't have a good way to replace an EFLAGS use, so only do this when 16656 // dead right now. 16657 SDValue(N, 1).use_empty()) { 16658 DebugLoc DL = N->getDebugLoc(); 16659 EVT VT = N->getValueType(0); 16660 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 16661 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 16662 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 16663 DAG.getConstant(X86::COND_B,MVT::i8), 16664 N->getOperand(2)), 16665 DAG.getConstant(1, VT)); 16666 return DCI.CombineTo(N, Res1, CarryOut); 16667 } 16668 16669 return SDValue(); 16670} 16671 16672// fold (add Y, (sete X, 0)) -> adc 0, Y 16673// (add Y, (setne X, 0)) -> sbb -1, Y 16674// (sub (sete X, 0), Y) -> sbb 0, Y 16675// (sub (setne X, 0), Y) -> adc -1, Y 16676static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 16677 DebugLoc DL = N->getDebugLoc(); 16678 16679 // Look through ZExts. 16680 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 16681 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 16682 return SDValue(); 16683 16684 SDValue SetCC = Ext.getOperand(0); 16685 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 16686 return SDValue(); 16687 16688 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 16689 if (CC != X86::COND_E && CC != X86::COND_NE) 16690 return SDValue(); 16691 16692 SDValue Cmp = SetCC.getOperand(1); 16693 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 16694 !X86::isZeroNode(Cmp.getOperand(1)) || 16695 !Cmp.getOperand(0).getValueType().isInteger()) 16696 return SDValue(); 16697 16698 SDValue CmpOp0 = Cmp.getOperand(0); 16699 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 16700 DAG.getConstant(1, CmpOp0.getValueType())); 16701 16702 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 16703 if (CC == X86::COND_NE) 16704 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 16705 DL, OtherVal.getValueType(), OtherVal, 16706 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 16707 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 16708 DL, OtherVal.getValueType(), OtherVal, 16709 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 16710} 16711 16712/// PerformADDCombine - Do target-specific dag combines on integer adds. 16713static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 16714 const X86Subtarget *Subtarget) { 16715 EVT VT = N->getValueType(0); 16716 SDValue Op0 = N->getOperand(0); 16717 SDValue Op1 = N->getOperand(1); 16718 16719 // Try to synthesize horizontal adds from adds of shuffles. 16720 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 16721 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 16722 isHorizontalBinOp(Op0, Op1, true)) 16723 return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); 16724 16725 return OptimizeConditionalInDecrement(N, DAG); 16726} 16727 16728static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 16729 const X86Subtarget *Subtarget) { 16730 SDValue Op0 = N->getOperand(0); 16731 SDValue Op1 = N->getOperand(1); 16732 16733 // X86 can't encode an immediate LHS of a sub. See if we can push the 16734 // negation into a preceding instruction. 16735 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 16736 // If the RHS of the sub is a XOR with one use and a constant, invert the 16737 // immediate. Then add one to the LHS of the sub so we can turn 16738 // X-Y -> X+~Y+1, saving one register. 16739 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 16740 isa<ConstantSDNode>(Op1.getOperand(1))) { 16741 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 16742 EVT VT = Op0.getValueType(); 16743 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 16744 Op1.getOperand(0), 16745 DAG.getConstant(~XorC, VT)); 16746 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 16747 DAG.getConstant(C->getAPIntValue()+1, VT)); 16748 } 16749 } 16750 16751 // Try to synthesize horizontal adds from adds of shuffles. 16752 EVT VT = N->getValueType(0); 16753 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 16754 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 16755 isHorizontalBinOp(Op0, Op1, true)) 16756 return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); 16757 16758 return OptimizeConditionalInDecrement(N, DAG); 16759} 16760 16761/// performVZEXTCombine - Performs build vector combines 16762static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, 16763 TargetLowering::DAGCombinerInfo &DCI, 16764 const X86Subtarget *Subtarget) { 16765 // (vzext (bitcast (vzext (x)) -> (vzext x) 16766 SDValue In = N->getOperand(0); 16767 while (In.getOpcode() == ISD::BITCAST) 16768 In = In.getOperand(0); 16769 16770 if (In.getOpcode() != X86ISD::VZEXT) 16771 return SDValue(); 16772 16773 return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0), In.getOperand(0)); 16774} 16775 16776SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 16777 DAGCombinerInfo &DCI) const { 16778 SelectionDAG &DAG = DCI.DAG; 16779 switch (N->getOpcode()) { 16780 default: break; 16781 case ISD::EXTRACT_VECTOR_ELT: 16782 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); 16783 case ISD::VSELECT: 16784 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 16785 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget); 16786 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 16787 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 16788 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 16789 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 16790 case ISD::SHL: 16791 case ISD::SRA: 16792 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget); 16793 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 16794 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 16795 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 16796 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); 16797 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 16798 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 16799 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 16800 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 16801 case X86ISD::FXOR: 16802 case X86ISD::FOR: return PerformFORCombine(N, DAG); 16803 case X86ISD::FMIN: 16804 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); 16805 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 16806 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 16807 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 16808 case ISD::ANY_EXTEND: 16809 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); 16810 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); 16811 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget); 16812 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); 16813 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget); 16814 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); 16815 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget); 16816 case X86ISD::SHUFP: // Handle all target specific shuffles 16817 case X86ISD::PALIGN: 16818 case X86ISD::UNPCKH: 16819 case X86ISD::UNPCKL: 16820 case X86ISD::MOVHLPS: 16821 case X86ISD::MOVLHPS: 16822 case X86ISD::PSHUFD: 16823 case X86ISD::PSHUFHW: 16824 case X86ISD::PSHUFLW: 16825 case X86ISD::MOVSS: 16826 case X86ISD::MOVSD: 16827 case X86ISD::VPERMILP: 16828 case X86ISD::VPERM2X128: 16829 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 16830 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); 16831 } 16832 16833 return SDValue(); 16834} 16835 16836/// isTypeDesirableForOp - Return true if the target has native support for 16837/// the specified value type and it is 'desirable' to use the type for the 16838/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 16839/// instruction encodings are longer and some i16 instructions are slow. 16840bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 16841 if (!isTypeLegal(VT)) 16842 return false; 16843 if (VT != MVT::i16) 16844 return true; 16845 16846 switch (Opc) { 16847 default: 16848 return true; 16849 case ISD::LOAD: 16850 case ISD::SIGN_EXTEND: 16851 case ISD::ZERO_EXTEND: 16852 case ISD::ANY_EXTEND: 16853 case ISD::SHL: 16854 case ISD::SRL: 16855 case ISD::SUB: 16856 case ISD::ADD: 16857 case ISD::MUL: 16858 case ISD::AND: 16859 case ISD::OR: 16860 case ISD::XOR: 16861 return false; 16862 } 16863} 16864 16865/// IsDesirableToPromoteOp - This method query the target whether it is 16866/// beneficial for dag combiner to promote the specified node. If true, it 16867/// should return the desired promotion type by reference. 16868bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 16869 EVT VT = Op.getValueType(); 16870 if (VT != MVT::i16) 16871 return false; 16872 16873 bool Promote = false; 16874 bool Commute = false; 16875 switch (Op.getOpcode()) { 16876 default: break; 16877 case ISD::LOAD: { 16878 LoadSDNode *LD = cast<LoadSDNode>(Op); 16879 // If the non-extending load has a single use and it's not live out, then it 16880 // might be folded. 16881 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 16882 Op.hasOneUse()*/) { 16883 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 16884 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 16885 // The only case where we'd want to promote LOAD (rather then it being 16886 // promoted as an operand is when it's only use is liveout. 16887 if (UI->getOpcode() != ISD::CopyToReg) 16888 return false; 16889 } 16890 } 16891 Promote = true; 16892 break; 16893 } 16894 case ISD::SIGN_EXTEND: 16895 case ISD::ZERO_EXTEND: 16896 case ISD::ANY_EXTEND: 16897 Promote = true; 16898 break; 16899 case ISD::SHL: 16900 case ISD::SRL: { 16901 SDValue N0 = Op.getOperand(0); 16902 // Look out for (store (shl (load), x)). 16903 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 16904 return false; 16905 Promote = true; 16906 break; 16907 } 16908 case ISD::ADD: 16909 case ISD::MUL: 16910 case ISD::AND: 16911 case ISD::OR: 16912 case ISD::XOR: 16913 Commute = true; 16914 // fallthrough 16915 case ISD::SUB: { 16916 SDValue N0 = Op.getOperand(0); 16917 SDValue N1 = Op.getOperand(1); 16918 if (!Commute && MayFoldLoad(N1)) 16919 return false; 16920 // Avoid disabling potential load folding opportunities. 16921 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 16922 return false; 16923 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 16924 return false; 16925 Promote = true; 16926 } 16927 } 16928 16929 PVT = MVT::i32; 16930 return Promote; 16931} 16932 16933//===----------------------------------------------------------------------===// 16934// X86 Inline Assembly Support 16935//===----------------------------------------------------------------------===// 16936 16937namespace { 16938 // Helper to match a string separated by whitespace. 16939 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 16940 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 16941 16942 for (unsigned i = 0, e = args.size(); i != e; ++i) { 16943 StringRef piece(*args[i]); 16944 if (!s.startswith(piece)) // Check if the piece matches. 16945 return false; 16946 16947 s = s.substr(piece.size()); 16948 StringRef::size_type pos = s.find_first_not_of(" \t"); 16949 if (pos == 0) // We matched a prefix. 16950 return false; 16951 16952 s = s.substr(pos); 16953 } 16954 16955 return s.empty(); 16956 } 16957 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 16958} 16959 16960bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 16961 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 16962 16963 std::string AsmStr = IA->getAsmString(); 16964 16965 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 16966 if (!Ty || Ty->getBitWidth() % 16 != 0) 16967 return false; 16968 16969 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 16970 SmallVector<StringRef, 4> AsmPieces; 16971 SplitString(AsmStr, AsmPieces, ";\n"); 16972 16973 switch (AsmPieces.size()) { 16974 default: return false; 16975 case 1: 16976 // FIXME: this should verify that we are targeting a 486 or better. If not, 16977 // we will turn this bswap into something that will be lowered to logical 16978 // ops instead of emitting the bswap asm. For now, we don't support 486 or 16979 // lower so don't worry about this. 16980 // bswap $0 16981 if (matchAsm(AsmPieces[0], "bswap", "$0") || 16982 matchAsm(AsmPieces[0], "bswapl", "$0") || 16983 matchAsm(AsmPieces[0], "bswapq", "$0") || 16984 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 16985 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 16986 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 16987 // No need to check constraints, nothing other than the equivalent of 16988 // "=r,0" would be valid here. 16989 return IntrinsicLowering::LowerToByteSwap(CI); 16990 } 16991 16992 // rorw $$8, ${0:w} --> llvm.bswap.i16 16993 if (CI->getType()->isIntegerTy(16) && 16994 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 16995 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 16996 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 16997 AsmPieces.clear(); 16998 const std::string &ConstraintsStr = IA->getConstraintString(); 16999 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 17000 std::sort(AsmPieces.begin(), AsmPieces.end()); 17001 if (AsmPieces.size() == 4 && 17002 AsmPieces[0] == "~{cc}" && 17003 AsmPieces[1] == "~{dirflag}" && 17004 AsmPieces[2] == "~{flags}" && 17005 AsmPieces[3] == "~{fpsr}") 17006 return IntrinsicLowering::LowerToByteSwap(CI); 17007 } 17008 break; 17009 case 3: 17010 if (CI->getType()->isIntegerTy(32) && 17011 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 17012 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 17013 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 17014 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 17015 AsmPieces.clear(); 17016 const std::string &ConstraintsStr = IA->getConstraintString(); 17017 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 17018 std::sort(AsmPieces.begin(), AsmPieces.end()); 17019 if (AsmPieces.size() == 4 && 17020 AsmPieces[0] == "~{cc}" && 17021 AsmPieces[1] == "~{dirflag}" && 17022 AsmPieces[2] == "~{flags}" && 17023 AsmPieces[3] == "~{fpsr}") 17024 return IntrinsicLowering::LowerToByteSwap(CI); 17025 } 17026 17027 if (CI->getType()->isIntegerTy(64)) { 17028 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 17029 if (Constraints.size() >= 2 && 17030 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 17031 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 17032 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 17033 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 17034 matchAsm(AsmPieces[1], "bswap", "%edx") && 17035 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 17036 return IntrinsicLowering::LowerToByteSwap(CI); 17037 } 17038 } 17039 break; 17040 } 17041 return false; 17042} 17043 17044 17045 17046/// getConstraintType - Given a constraint letter, return the type of 17047/// constraint it is for this target. 17048X86TargetLowering::ConstraintType 17049X86TargetLowering::getConstraintType(const std::string &Constraint) const { 17050 if (Constraint.size() == 1) { 17051 switch (Constraint[0]) { 17052 case 'R': 17053 case 'q': 17054 case 'Q': 17055 case 'f': 17056 case 't': 17057 case 'u': 17058 case 'y': 17059 case 'x': 17060 case 'Y': 17061 case 'l': 17062 return C_RegisterClass; 17063 case 'a': 17064 case 'b': 17065 case 'c': 17066 case 'd': 17067 case 'S': 17068 case 'D': 17069 case 'A': 17070 return C_Register; 17071 case 'I': 17072 case 'J': 17073 case 'K': 17074 case 'L': 17075 case 'M': 17076 case 'N': 17077 case 'G': 17078 case 'C': 17079 case 'e': 17080 case 'Z': 17081 return C_Other; 17082 default: 17083 break; 17084 } 17085 } 17086 return TargetLowering::getConstraintType(Constraint); 17087} 17088 17089/// Examine constraint type and operand type and determine a weight value. 17090/// This object must already have been set up with the operand type 17091/// and the current alternative constraint selected. 17092TargetLowering::ConstraintWeight 17093 X86TargetLowering::getSingleConstraintMatchWeight( 17094 AsmOperandInfo &info, const char *constraint) const { 17095 ConstraintWeight weight = CW_Invalid; 17096 Value *CallOperandVal = info.CallOperandVal; 17097 // If we don't have a value, we can't do a match, 17098 // but allow it at the lowest weight. 17099 if (CallOperandVal == NULL) 17100 return CW_Default; 17101 Type *type = CallOperandVal->getType(); 17102 // Look at the constraint type. 17103 switch (*constraint) { 17104 default: 17105 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 17106 case 'R': 17107 case 'q': 17108 case 'Q': 17109 case 'a': 17110 case 'b': 17111 case 'c': 17112 case 'd': 17113 case 'S': 17114 case 'D': 17115 case 'A': 17116 if (CallOperandVal->getType()->isIntegerTy()) 17117 weight = CW_SpecificReg; 17118 break; 17119 case 'f': 17120 case 't': 17121 case 'u': 17122 if (type->isFloatingPointTy()) 17123 weight = CW_SpecificReg; 17124 break; 17125 case 'y': 17126 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 17127 weight = CW_SpecificReg; 17128 break; 17129 case 'x': 17130 case 'Y': 17131 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 17132 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasAVX())) 17133 weight = CW_Register; 17134 break; 17135 case 'I': 17136 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 17137 if (C->getZExtValue() <= 31) 17138 weight = CW_Constant; 17139 } 17140 break; 17141 case 'J': 17142 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17143 if (C->getZExtValue() <= 63) 17144 weight = CW_Constant; 17145 } 17146 break; 17147 case 'K': 17148 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17149 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 17150 weight = CW_Constant; 17151 } 17152 break; 17153 case 'L': 17154 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17155 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 17156 weight = CW_Constant; 17157 } 17158 break; 17159 case 'M': 17160 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17161 if (C->getZExtValue() <= 3) 17162 weight = CW_Constant; 17163 } 17164 break; 17165 case 'N': 17166 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17167 if (C->getZExtValue() <= 0xff) 17168 weight = CW_Constant; 17169 } 17170 break; 17171 case 'G': 17172 case 'C': 17173 if (dyn_cast<ConstantFP>(CallOperandVal)) { 17174 weight = CW_Constant; 17175 } 17176 break; 17177 case 'e': 17178 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17179 if ((C->getSExtValue() >= -0x80000000LL) && 17180 (C->getSExtValue() <= 0x7fffffffLL)) 17181 weight = CW_Constant; 17182 } 17183 break; 17184 case 'Z': 17185 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17186 if (C->getZExtValue() <= 0xffffffff) 17187 weight = CW_Constant; 17188 } 17189 break; 17190 } 17191 return weight; 17192} 17193 17194/// LowerXConstraint - try to replace an X constraint, which matches anything, 17195/// with another that has more specific requirements based on the type of the 17196/// corresponding operand. 17197const char *X86TargetLowering:: 17198LowerXConstraint(EVT ConstraintVT) const { 17199 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 17200 // 'f' like normal targets. 17201 if (ConstraintVT.isFloatingPoint()) { 17202 if (Subtarget->hasSSE2()) 17203 return "Y"; 17204 if (Subtarget->hasSSE1()) 17205 return "x"; 17206 } 17207 17208 return TargetLowering::LowerXConstraint(ConstraintVT); 17209} 17210 17211/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 17212/// vector. If it is invalid, don't add anything to Ops. 17213void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 17214 std::string &Constraint, 17215 std::vector<SDValue>&Ops, 17216 SelectionDAG &DAG) const { 17217 SDValue Result(0, 0); 17218 17219 // Only support length 1 constraints for now. 17220 if (Constraint.length() > 1) return; 17221 17222 char ConstraintLetter = Constraint[0]; 17223 switch (ConstraintLetter) { 17224 default: break; 17225 case 'I': 17226 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17227 if (C->getZExtValue() <= 31) { 17228 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17229 break; 17230 } 17231 } 17232 return; 17233 case 'J': 17234 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17235 if (C->getZExtValue() <= 63) { 17236 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17237 break; 17238 } 17239 } 17240 return; 17241 case 'K': 17242 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17243 if (isInt<8>(C->getSExtValue())) { 17244 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17245 break; 17246 } 17247 } 17248 return; 17249 case 'N': 17250 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17251 if (C->getZExtValue() <= 255) { 17252 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17253 break; 17254 } 17255 } 17256 return; 17257 case 'e': { 17258 // 32-bit signed value 17259 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17260 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 17261 C->getSExtValue())) { 17262 // Widen to 64 bits here to get it sign extended. 17263 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 17264 break; 17265 } 17266 // FIXME gcc accepts some relocatable values here too, but only in certain 17267 // memory models; it's complicated. 17268 } 17269 return; 17270 } 17271 case 'Z': { 17272 // 32-bit unsigned value 17273 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17274 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 17275 C->getZExtValue())) { 17276 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17277 break; 17278 } 17279 } 17280 // FIXME gcc accepts some relocatable values here too, but only in certain 17281 // memory models; it's complicated. 17282 return; 17283 } 17284 case 'i': { 17285 // Literal immediates are always ok. 17286 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 17287 // Widen to 64 bits here to get it sign extended. 17288 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 17289 break; 17290 } 17291 17292 // In any sort of PIC mode addresses need to be computed at runtime by 17293 // adding in a register or some sort of table lookup. These can't 17294 // be used as immediates. 17295 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 17296 return; 17297 17298 // If we are in non-pic codegen mode, we allow the address of a global (with 17299 // an optional displacement) to be used with 'i'. 17300 GlobalAddressSDNode *GA = 0; 17301 int64_t Offset = 0; 17302 17303 // Match either (GA), (GA+C), (GA+C1+C2), etc. 17304 while (1) { 17305 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 17306 Offset += GA->getOffset(); 17307 break; 17308 } else if (Op.getOpcode() == ISD::ADD) { 17309 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 17310 Offset += C->getZExtValue(); 17311 Op = Op.getOperand(0); 17312 continue; 17313 } 17314 } else if (Op.getOpcode() == ISD::SUB) { 17315 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 17316 Offset += -C->getZExtValue(); 17317 Op = Op.getOperand(0); 17318 continue; 17319 } 17320 } 17321 17322 // Otherwise, this isn't something we can handle, reject it. 17323 return; 17324 } 17325 17326 const GlobalValue *GV = GA->getGlobal(); 17327 // If we require an extra load to get this address, as in PIC mode, we 17328 // can't accept it. 17329 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 17330 getTargetMachine()))) 17331 return; 17332 17333 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 17334 GA->getValueType(0), Offset); 17335 break; 17336 } 17337 } 17338 17339 if (Result.getNode()) { 17340 Ops.push_back(Result); 17341 return; 17342 } 17343 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 17344} 17345 17346std::pair<unsigned, const TargetRegisterClass*> 17347X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 17348 EVT VT) const { 17349 // First, see if this is a constraint that directly corresponds to an LLVM 17350 // register class. 17351 if (Constraint.size() == 1) { 17352 // GCC Constraint Letters 17353 switch (Constraint[0]) { 17354 default: break; 17355 // TODO: Slight differences here in allocation order and leaving 17356 // RIP in the class. Do they matter any more here than they do 17357 // in the normal allocation? 17358 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 17359 if (Subtarget->is64Bit()) { 17360 if (VT == MVT::i32 || VT == MVT::f32) 17361 return std::make_pair(0U, &X86::GR32RegClass); 17362 if (VT == MVT::i16) 17363 return std::make_pair(0U, &X86::GR16RegClass); 17364 if (VT == MVT::i8 || VT == MVT::i1) 17365 return std::make_pair(0U, &X86::GR8RegClass); 17366 if (VT == MVT::i64 || VT == MVT::f64) 17367 return std::make_pair(0U, &X86::GR64RegClass); 17368 break; 17369 } 17370 // 32-bit fallthrough 17371 case 'Q': // Q_REGS 17372 if (VT == MVT::i32 || VT == MVT::f32) 17373 return std::make_pair(0U, &X86::GR32_ABCDRegClass); 17374 if (VT == MVT::i16) 17375 return std::make_pair(0U, &X86::GR16_ABCDRegClass); 17376 if (VT == MVT::i8 || VT == MVT::i1) 17377 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); 17378 if (VT == MVT::i64) 17379 return std::make_pair(0U, &X86::GR64_ABCDRegClass); 17380 break; 17381 case 'r': // GENERAL_REGS 17382 case 'l': // INDEX_REGS 17383 if (VT == MVT::i8 || VT == MVT::i1) 17384 return std::make_pair(0U, &X86::GR8RegClass); 17385 if (VT == MVT::i16) 17386 return std::make_pair(0U, &X86::GR16RegClass); 17387 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 17388 return std::make_pair(0U, &X86::GR32RegClass); 17389 return std::make_pair(0U, &X86::GR64RegClass); 17390 case 'R': // LEGACY_REGS 17391 if (VT == MVT::i8 || VT == MVT::i1) 17392 return std::make_pair(0U, &X86::GR8_NOREXRegClass); 17393 if (VT == MVT::i16) 17394 return std::make_pair(0U, &X86::GR16_NOREXRegClass); 17395 if (VT == MVT::i32 || !Subtarget->is64Bit()) 17396 return std::make_pair(0U, &X86::GR32_NOREXRegClass); 17397 return std::make_pair(0U, &X86::GR64_NOREXRegClass); 17398 case 'f': // FP Stack registers. 17399 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 17400 // value to the correct fpstack register class. 17401 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 17402 return std::make_pair(0U, &X86::RFP32RegClass); 17403 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 17404 return std::make_pair(0U, &X86::RFP64RegClass); 17405 return std::make_pair(0U, &X86::RFP80RegClass); 17406 case 'y': // MMX_REGS if MMX allowed. 17407 if (!Subtarget->hasMMX()) break; 17408 return std::make_pair(0U, &X86::VR64RegClass); 17409 case 'Y': // SSE_REGS if SSE2 allowed 17410 if (!Subtarget->hasSSE2()) break; 17411 // FALL THROUGH. 17412 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 17413 if (!Subtarget->hasSSE1()) break; 17414 17415 switch (VT.getSimpleVT().SimpleTy) { 17416 default: break; 17417 // Scalar SSE types. 17418 case MVT::f32: 17419 case MVT::i32: 17420 return std::make_pair(0U, &X86::FR32RegClass); 17421 case MVT::f64: 17422 case MVT::i64: 17423 return std::make_pair(0U, &X86::FR64RegClass); 17424 // Vector types. 17425 case MVT::v16i8: 17426 case MVT::v8i16: 17427 case MVT::v4i32: 17428 case MVT::v2i64: 17429 case MVT::v4f32: 17430 case MVT::v2f64: 17431 return std::make_pair(0U, &X86::VR128RegClass); 17432 // AVX types. 17433 case MVT::v32i8: 17434 case MVT::v16i16: 17435 case MVT::v8i32: 17436 case MVT::v4i64: 17437 case MVT::v8f32: 17438 case MVT::v4f64: 17439 return std::make_pair(0U, &X86::VR256RegClass); 17440 } 17441 break; 17442 } 17443 } 17444 17445 // Use the default implementation in TargetLowering to convert the register 17446 // constraint into a member of a register class. 17447 std::pair<unsigned, const TargetRegisterClass*> Res; 17448 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 17449 17450 // Not found as a standard register? 17451 if (Res.second == 0) { 17452 // Map st(0) -> st(7) -> ST0 17453 if (Constraint.size() == 7 && Constraint[0] == '{' && 17454 tolower(Constraint[1]) == 's' && 17455 tolower(Constraint[2]) == 't' && 17456 Constraint[3] == '(' && 17457 (Constraint[4] >= '0' && Constraint[4] <= '7') && 17458 Constraint[5] == ')' && 17459 Constraint[6] == '}') { 17460 17461 Res.first = X86::ST0+Constraint[4]-'0'; 17462 Res.second = &X86::RFP80RegClass; 17463 return Res; 17464 } 17465 17466 // GCC allows "st(0)" to be called just plain "st". 17467 if (StringRef("{st}").equals_lower(Constraint)) { 17468 Res.first = X86::ST0; 17469 Res.second = &X86::RFP80RegClass; 17470 return Res; 17471 } 17472 17473 // flags -> EFLAGS 17474 if (StringRef("{flags}").equals_lower(Constraint)) { 17475 Res.first = X86::EFLAGS; 17476 Res.second = &X86::CCRRegClass; 17477 return Res; 17478 } 17479 17480 // 'A' means EAX + EDX. 17481 if (Constraint == "A") { 17482 Res.first = X86::EAX; 17483 Res.second = &X86::GR32_ADRegClass; 17484 return Res; 17485 } 17486 return Res; 17487 } 17488 17489 // Otherwise, check to see if this is a register class of the wrong value 17490 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 17491 // turn into {ax},{dx}. 17492 if (Res.second->hasType(VT)) 17493 return Res; // Correct type already, nothing to do. 17494 17495 // All of the single-register GCC register classes map their values onto 17496 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 17497 // really want an 8-bit or 32-bit register, map to the appropriate register 17498 // class and return the appropriate register. 17499 if (Res.second == &X86::GR16RegClass) { 17500 if (VT == MVT::i8) { 17501 unsigned DestReg = 0; 17502 switch (Res.first) { 17503 default: break; 17504 case X86::AX: DestReg = X86::AL; break; 17505 case X86::DX: DestReg = X86::DL; break; 17506 case X86::CX: DestReg = X86::CL; break; 17507 case X86::BX: DestReg = X86::BL; break; 17508 } 17509 if (DestReg) { 17510 Res.first = DestReg; 17511 Res.second = &X86::GR8RegClass; 17512 } 17513 } else if (VT == MVT::i32) { 17514 unsigned DestReg = 0; 17515 switch (Res.first) { 17516 default: break; 17517 case X86::AX: DestReg = X86::EAX; break; 17518 case X86::DX: DestReg = X86::EDX; break; 17519 case X86::CX: DestReg = X86::ECX; break; 17520 case X86::BX: DestReg = X86::EBX; break; 17521 case X86::SI: DestReg = X86::ESI; break; 17522 case X86::DI: DestReg = X86::EDI; break; 17523 case X86::BP: DestReg = X86::EBP; break; 17524 case X86::SP: DestReg = X86::ESP; break; 17525 } 17526 if (DestReg) { 17527 Res.first = DestReg; 17528 Res.second = &X86::GR32RegClass; 17529 } 17530 } else if (VT == MVT::i64) { 17531 unsigned DestReg = 0; 17532 switch (Res.first) { 17533 default: break; 17534 case X86::AX: DestReg = X86::RAX; break; 17535 case X86::DX: DestReg = X86::RDX; break; 17536 case X86::CX: DestReg = X86::RCX; break; 17537 case X86::BX: DestReg = X86::RBX; break; 17538 case X86::SI: DestReg = X86::RSI; break; 17539 case X86::DI: DestReg = X86::RDI; break; 17540 case X86::BP: DestReg = X86::RBP; break; 17541 case X86::SP: DestReg = X86::RSP; break; 17542 } 17543 if (DestReg) { 17544 Res.first = DestReg; 17545 Res.second = &X86::GR64RegClass; 17546 } 17547 } 17548 } else if (Res.second == &X86::FR32RegClass || 17549 Res.second == &X86::FR64RegClass || 17550 Res.second == &X86::VR128RegClass) { 17551 // Handle references to XMM physical registers that got mapped into the 17552 // wrong class. This can happen with constraints like {xmm0} where the 17553 // target independent register mapper will just pick the first match it can 17554 // find, ignoring the required type. 17555 17556 if (VT == MVT::f32 || VT == MVT::i32) 17557 Res.second = &X86::FR32RegClass; 17558 else if (VT == MVT::f64 || VT == MVT::i64) 17559 Res.second = &X86::FR64RegClass; 17560 else if (X86::VR128RegClass.hasType(VT)) 17561 Res.second = &X86::VR128RegClass; 17562 else if (X86::VR256RegClass.hasType(VT)) 17563 Res.second = &X86::VR256RegClass; 17564 } 17565 17566 return Res; 17567} 17568 17569//===----------------------------------------------------------------------===// 17570// 17571// X86 cost model. 17572// 17573//===----------------------------------------------------------------------===// 17574 17575struct X86CostTblEntry { 17576 int ISD; 17577 MVT Type; 17578 unsigned Cost; 17579}; 17580 17581static int 17582FindInTable(const X86CostTblEntry *Tbl, unsigned len, int ISD, MVT Ty) { 17583 for (unsigned int i = 0; i < len; ++i) 17584 if (Tbl[i].ISD == ISD && Tbl[i].Type == Ty) 17585 return i; 17586 17587 // Could not find an entry. 17588 return -1; 17589} 17590 17591struct X86TypeConversionCostTblEntry { 17592 int ISD; 17593 MVT Dst; 17594 MVT Src; 17595 unsigned Cost; 17596}; 17597 17598static int 17599FindInConvertTable(const X86TypeConversionCostTblEntry *Tbl, unsigned len, 17600 int ISD, MVT Dst, MVT Src) { 17601 for (unsigned int i = 0; i < len; ++i) 17602 if (Tbl[i].ISD == ISD && Tbl[i].Src == Src && Tbl[i].Dst == Dst) 17603 return i; 17604 17605 // Could not find an entry. 17606 return -1; 17607} 17608 17609unsigned 17610X86VectorTargetTransformInfo::getArithmeticInstrCost(unsigned Opcode, 17611 Type *Ty) const { 17612 // Legalize the type. 17613 std::pair<unsigned, MVT> LT = getTypeLegalizationCost(Ty); 17614 17615 int ISD = InstructionOpcodeToISD(Opcode); 17616 assert(ISD && "Invalid opcode"); 17617 17618 const X86Subtarget &ST = TLI->getTargetMachine().getSubtarget<X86Subtarget>(); 17619 17620 static const X86CostTblEntry AVX1CostTable[] = { 17621 // We don't have to scalarize unsupported ops. We can issue two half-sized 17622 // operations and we only need to extract the upper YMM half. 17623 // Two ops + 1 extract + 1 insert = 4. 17624 { ISD::MUL, MVT::v8i32, 4 }, 17625 { ISD::SUB, MVT::v8i32, 4 }, 17626 { ISD::ADD, MVT::v8i32, 4 }, 17627 { ISD::MUL, MVT::v4i64, 4 }, 17628 { ISD::SUB, MVT::v4i64, 4 }, 17629 { ISD::ADD, MVT::v4i64, 4 }, 17630 }; 17631 17632 // Look for AVX1 lowering tricks. 17633 if (ST.hasAVX()) { 17634 int Idx = FindInTable(AVX1CostTable, array_lengthof(AVX1CostTable), ISD, 17635 LT.second); 17636 if (Idx != -1) 17637 return LT.first * AVX1CostTable[Idx].Cost; 17638 } 17639 // Fallback to the default implementation. 17640 return VectorTargetTransformImpl::getArithmeticInstrCost(Opcode, Ty); 17641} 17642 17643unsigned 17644X86VectorTargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val, 17645 unsigned Index) const { 17646 assert(Val->isVectorTy() && "This must be a vector type"); 17647 17648 if (Index != -1U) { 17649 // Legalize the type. 17650 std::pair<unsigned, MVT> LT = getTypeLegalizationCost(Val); 17651 17652 // This type is legalized to a scalar type. 17653 if (!LT.second.isVector()) 17654 return 0; 17655 17656 // The type may be split. Normalize the index to the new type. 17657 unsigned Width = LT.second.getVectorNumElements(); 17658 Index = Index % Width; 17659 17660 // Floating point scalars are already located in index #0. 17661 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 17662 return 0; 17663 } 17664 17665 return VectorTargetTransformImpl::getVectorInstrCost(Opcode, Val, Index); 17666} 17667 17668unsigned X86VectorTargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, 17669 Type *ValTy, 17670 Type *CondTy) const { 17671 // Legalize the type. 17672 std::pair<unsigned, MVT> LT = getTypeLegalizationCost(ValTy); 17673 17674 MVT MTy = LT.second; 17675 17676 int ISD = InstructionOpcodeToISD(Opcode); 17677 assert(ISD && "Invalid opcode"); 17678 17679 const X86Subtarget &ST = 17680 TLI->getTargetMachine().getSubtarget<X86Subtarget>(); 17681 17682 static const X86CostTblEntry SSE42CostTbl[] = { 17683 { ISD::SETCC, MVT::v2f64, 1 }, 17684 { ISD::SETCC, MVT::v4f32, 1 }, 17685 { ISD::SETCC, MVT::v2i64, 1 }, 17686 { ISD::SETCC, MVT::v4i32, 1 }, 17687 { ISD::SETCC, MVT::v8i16, 1 }, 17688 { ISD::SETCC, MVT::v16i8, 1 }, 17689 }; 17690 17691 static const X86CostTblEntry AVX1CostTbl[] = { 17692 { ISD::SETCC, MVT::v4f64, 1 }, 17693 { ISD::SETCC, MVT::v8f32, 1 }, 17694 // AVX1 does not support 8-wide integer compare. 17695 { ISD::SETCC, MVT::v4i64, 4 }, 17696 { ISD::SETCC, MVT::v8i32, 4 }, 17697 { ISD::SETCC, MVT::v16i16, 4 }, 17698 { ISD::SETCC, MVT::v32i8, 4 }, 17699 }; 17700 17701 static const X86CostTblEntry AVX2CostTbl[] = { 17702 { ISD::SETCC, MVT::v4i64, 1 }, 17703 { ISD::SETCC, MVT::v8i32, 1 }, 17704 { ISD::SETCC, MVT::v16i16, 1 }, 17705 { ISD::SETCC, MVT::v32i8, 1 }, 17706 }; 17707 17708 if (ST.hasSSE42()) { 17709 int Idx = FindInTable(SSE42CostTbl, array_lengthof(SSE42CostTbl), ISD, MTy); 17710 if (Idx != -1) 17711 return LT.first * SSE42CostTbl[Idx].Cost; 17712 } 17713 17714 if (ST.hasAVX()) { 17715 int Idx = FindInTable(AVX1CostTbl, array_lengthof(AVX1CostTbl), ISD, MTy); 17716 if (Idx != -1) 17717 return LT.first * AVX1CostTbl[Idx].Cost; 17718 } 17719 17720 if (ST.hasAVX2()) { 17721 int Idx = FindInTable(AVX2CostTbl, array_lengthof(AVX2CostTbl), ISD, MTy); 17722 if (Idx != -1) 17723 return LT.first * AVX2CostTbl[Idx].Cost; 17724 } 17725 17726 return VectorTargetTransformImpl::getCmpSelInstrCost(Opcode, ValTy, CondTy); 17727} 17728 17729unsigned X86VectorTargetTransformInfo::getCastInstrCost(unsigned Opcode, 17730 Type *Dst, 17731 Type *Src) const { 17732 int ISD = InstructionOpcodeToISD(Opcode); 17733 assert(ISD && "Invalid opcode"); 17734 17735 EVT SrcTy = TLI->getValueType(Src); 17736 EVT DstTy = TLI->getValueType(Dst); 17737 17738 if (!SrcTy.isSimple() || !DstTy.isSimple()) 17739 return VectorTargetTransformImpl::getCastInstrCost(Opcode, Dst, Src); 17740 17741 const X86Subtarget &ST = TLI->getTargetMachine().getSubtarget<X86Subtarget>(); 17742 17743 static const X86TypeConversionCostTblEntry AVXConversionTbl[] = { 17744 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 17745 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 17746 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 17747 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 17748 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, 17749 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 }, 17750 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 }, 17751 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 }, 17752 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 }, 17753 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 }, 17754 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 }, 17755 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 17756 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 }, 17757 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 }, 17758 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, 17759 }; 17760 17761 if (ST.hasAVX()) { 17762 int Idx = FindInConvertTable(AVXConversionTbl, 17763 array_lengthof(AVXConversionTbl), 17764 ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 17765 if (Idx != -1) 17766 return AVXConversionTbl[Idx].Cost; 17767 } 17768 17769 return VectorTargetTransformImpl::getCastInstrCost(Opcode, Dst, Src); 17770} 17771 17772