X86ISelLowering.cpp revision a24262a0f5c54f59e3362dd8a050589c508c9923
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86ISelLowering.h" 17#include "X86.h" 18#include "X86InstrBuilder.h" 19#include "X86TargetMachine.h" 20#include "X86TargetObjectFile.h" 21#include "Utils/X86ShuffleDecode.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/IntrinsicLowering.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunction.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineJumpTableInfo.h" 36#include "llvm/CodeGen/MachineModuleInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/SmallSet.h" 43#include "llvm/ADT/Statistic.h" 44#include "llvm/ADT/StringExtras.h" 45#include "llvm/ADT/VariadicFunction.h" 46#include "llvm/Support/CallSite.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/Support/ErrorHandling.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Target/TargetOptions.h" 51#include <bitset> 52#include <cctype> 53using namespace llvm; 54 55STATISTIC(NumTailCalls, "Number of tail calls"); 56 57// Forward declarations. 58static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 62/// sets things up to match to an AVX VEXTRACTF128 instruction or a 63/// simple subregister reference. Idx is an index in the 128 bits we 64/// want. It need not be aligned to a 128-bit bounday. That makes 65/// lowering EXTRACT_VECTOR_ELT operations easier. 66static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 67 SelectionDAG &DAG, DebugLoc dl) { 68 EVT VT = Vec.getValueType(); 69 assert(VT.is256BitVector() && "Unexpected vector size!"); 70 EVT ElVT = VT.getVectorElementType(); 71 unsigned Factor = VT.getSizeInBits()/128; 72 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 73 VT.getVectorNumElements()/Factor); 74 75 // Extract from UNDEF is UNDEF. 76 if (Vec.getOpcode() == ISD::UNDEF) 77 return DAG.getUNDEF(ResultVT); 78 79 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 80 // we can match to VEXTRACTF128. 81 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 82 83 // This is the index of the first element of the 128-bit chunk 84 // we want. 85 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 86 * ElemsPerChunk); 87 88 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 89 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 90 VecIdx); 91 92 return Result; 93} 94 95/// Generate a DAG to put 128-bits into a vector > 128 bits. This 96/// sets things up to match to an AVX VINSERTF128 instruction or a 97/// simple superregister reference. Idx is an index in the 128 bits 98/// we want. It need not be aligned to a 128-bit bounday. That makes 99/// lowering INSERT_VECTOR_ELT operations easier. 100static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 101 unsigned IdxVal, SelectionDAG &DAG, 102 DebugLoc dl) { 103 // Inserting UNDEF is Result 104 if (Vec.getOpcode() == ISD::UNDEF) 105 return Result; 106 107 EVT VT = Vec.getValueType(); 108 assert(VT.is128BitVector() && "Unexpected vector size!"); 109 110 EVT ElVT = VT.getVectorElementType(); 111 EVT ResultVT = Result.getValueType(); 112 113 // Insert the relevant 128 bits. 114 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 115 116 // This is the index of the first element of the 128-bit chunk 117 // we want. 118 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 119 * ElemsPerChunk); 120 121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 122 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 123 VecIdx); 124} 125 126/// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 127/// instructions. This is used because creating CONCAT_VECTOR nodes of 128/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 129/// large BUILD_VECTORS. 130static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 131 unsigned NumElems, SelectionDAG &DAG, 132 DebugLoc dl) { 133 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 134 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 135} 136 137static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 138 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 139 bool is64Bit = Subtarget->is64Bit(); 140 141 if (Subtarget->isTargetEnvMacho()) { 142 if (is64Bit) 143 return new X86_64MachoTargetObjectFile(); 144 return new TargetLoweringObjectFileMachO(); 145 } 146 147 if (Subtarget->isTargetLinux()) 148 return new X86LinuxTargetObjectFile(); 149 if (Subtarget->isTargetELF()) 150 return new TargetLoweringObjectFileELF(); 151 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 152 return new TargetLoweringObjectFileCOFF(); 153 llvm_unreachable("unknown subtarget type"); 154} 155 156X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 157 : TargetLowering(TM, createTLOF(TM)) { 158 Subtarget = &TM.getSubtarget<X86Subtarget>(); 159 X86ScalarSSEf64 = Subtarget->hasSSE2(); 160 X86ScalarSSEf32 = Subtarget->hasSSE1(); 161 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 162 163 RegInfo = TM.getRegisterInfo(); 164 TD = getDataLayout(); 165 166 // Set up the TargetLowering object. 167 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 168 169 // X86 is weird, it always uses i8 for shift amounts and setcc results. 170 setBooleanContents(ZeroOrOneBooleanContent); 171 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 172 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 173 174 // For 64-bit since we have so many registers use the ILP scheduler, for 175 // 32-bit code use the register pressure specific scheduling. 176 // For Atom, always use ILP scheduling. 177 if (Subtarget->isAtom()) 178 setSchedulingPreference(Sched::ILP); 179 else if (Subtarget->is64Bit()) 180 setSchedulingPreference(Sched::ILP); 181 else 182 setSchedulingPreference(Sched::RegPressure); 183 setStackPointerRegisterToSaveRestore(X86StackPtr); 184 185 // Bypass i32 with i8 on Atom when compiling with O2 186 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) 187 addBypassSlowDiv(32, 8); 188 189 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 190 // Setup Windows compiler runtime calls. 191 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 192 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 193 setLibcallName(RTLIB::SREM_I64, "_allrem"); 194 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 195 setLibcallName(RTLIB::MUL_I64, "_allmul"); 196 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 197 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 198 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 199 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 200 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 201 202 // The _ftol2 runtime function has an unusual calling conv, which 203 // is modeled by a special pseudo-instruction. 204 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 205 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 206 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 207 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 208 } 209 210 if (Subtarget->isTargetDarwin()) { 211 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 212 setUseUnderscoreSetJmp(false); 213 setUseUnderscoreLongJmp(false); 214 } else if (Subtarget->isTargetMingw()) { 215 // MS runtime is weird: it exports _setjmp, but longjmp! 216 setUseUnderscoreSetJmp(true); 217 setUseUnderscoreLongJmp(false); 218 } else { 219 setUseUnderscoreSetJmp(true); 220 setUseUnderscoreLongJmp(true); 221 } 222 223 // Set up the register classes. 224 addRegisterClass(MVT::i8, &X86::GR8RegClass); 225 addRegisterClass(MVT::i16, &X86::GR16RegClass); 226 addRegisterClass(MVT::i32, &X86::GR32RegClass); 227 if (Subtarget->is64Bit()) 228 addRegisterClass(MVT::i64, &X86::GR64RegClass); 229 230 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 231 232 // We don't accept any truncstore of integer registers. 233 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 234 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 235 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 236 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 237 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 238 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 239 240 // SETOEQ and SETUNE require checking two conditions. 241 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 242 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 243 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 244 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 245 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 246 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 247 248 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 249 // operation. 250 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 251 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 252 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 253 254 if (Subtarget->is64Bit()) { 255 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 256 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 257 } else if (!TM.Options.UseSoftFloat) { 258 // We have an algorithm for SSE2->double, and we turn this into a 259 // 64-bit FILD followed by conditional FADD for other targets. 260 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 261 // We have an algorithm for SSE2, and we turn this into a 64-bit 262 // FILD for other targets. 263 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 264 } 265 266 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 267 // this operation. 268 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 269 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 270 271 if (!TM.Options.UseSoftFloat) { 272 // SSE has no i16 to fp conversion, only i32 273 if (X86ScalarSSEf32) { 274 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 275 // f32 and f64 cases are Legal, f80 case is not 276 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 277 } else { 278 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 279 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 280 } 281 } else { 282 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 283 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 284 } 285 286 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 287 // are Legal, f80 is custom lowered. 288 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 289 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 290 291 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 292 // this operation. 293 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 294 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 295 296 if (X86ScalarSSEf32) { 297 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 298 // f32 and f64 cases are Legal, f80 case is not 299 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 300 } else { 301 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 302 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 303 } 304 305 // Handle FP_TO_UINT by promoting the destination to a larger signed 306 // conversion. 307 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 308 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 309 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 310 311 if (Subtarget->is64Bit()) { 312 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 313 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 314 } else if (!TM.Options.UseSoftFloat) { 315 // Since AVX is a superset of SSE3, only check for SSE here. 316 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 317 // Expand FP_TO_UINT into a select. 318 // FIXME: We would like to use a Custom expander here eventually to do 319 // the optimal thing for SSE vs. the default expansion in the legalizer. 320 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 321 else 322 // With SSE3 we can use fisttpll to convert to a signed i64; without 323 // SSE, we're stuck with a fistpll. 324 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 325 } 326 327 if (isTargetFTOL()) { 328 // Use the _ftol2 runtime function, which has a pseudo-instruction 329 // to handle its weird calling convention. 330 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 331 } 332 333 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 334 if (!X86ScalarSSEf64) { 335 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 336 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 337 if (Subtarget->is64Bit()) { 338 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 339 // Without SSE, i64->f64 goes through memory. 340 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 341 } 342 } 343 344 // Scalar integer divide and remainder are lowered to use operations that 345 // produce two results, to match the available instructions. This exposes 346 // the two-result form to trivial CSE, which is able to combine x/y and x%y 347 // into a single instruction. 348 // 349 // Scalar integer multiply-high is also lowered to use two-result 350 // operations, to match the available instructions. However, plain multiply 351 // (low) operations are left as Legal, as there are single-result 352 // instructions for this in x86. Using the two-result multiply instructions 353 // when both high and low results are needed must be arranged by dagcombine. 354 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 355 MVT VT = IntVTs[i]; 356 setOperationAction(ISD::MULHS, VT, Expand); 357 setOperationAction(ISD::MULHU, VT, Expand); 358 setOperationAction(ISD::SDIV, VT, Expand); 359 setOperationAction(ISD::UDIV, VT, Expand); 360 setOperationAction(ISD::SREM, VT, Expand); 361 setOperationAction(ISD::UREM, VT, Expand); 362 363 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 364 setOperationAction(ISD::ADDC, VT, Custom); 365 setOperationAction(ISD::ADDE, VT, Custom); 366 setOperationAction(ISD::SUBC, VT, Custom); 367 setOperationAction(ISD::SUBE, VT, Custom); 368 } 369 370 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 371 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 372 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 373 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 374 if (Subtarget->is64Bit()) 375 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 376 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 377 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 378 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 379 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 380 setOperationAction(ISD::FREM , MVT::f32 , Expand); 381 setOperationAction(ISD::FREM , MVT::f64 , Expand); 382 setOperationAction(ISD::FREM , MVT::f80 , Expand); 383 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 384 385 // Promote the i8 variants and force them on up to i32 which has a shorter 386 // encoding. 387 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 388 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 389 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 390 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 391 if (Subtarget->hasBMI()) { 392 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 393 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 394 if (Subtarget->is64Bit()) 395 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 396 } else { 397 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 398 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 399 if (Subtarget->is64Bit()) 400 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 401 } 402 403 if (Subtarget->hasLZCNT()) { 404 // When promoting the i8 variants, force them to i32 for a shorter 405 // encoding. 406 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 407 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 408 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 409 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 410 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 411 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 412 if (Subtarget->is64Bit()) 413 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 414 } else { 415 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 416 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 417 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 418 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 419 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 420 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 421 if (Subtarget->is64Bit()) { 422 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 423 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 424 } 425 } 426 427 if (Subtarget->hasPOPCNT()) { 428 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 429 } else { 430 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 431 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 432 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 433 if (Subtarget->is64Bit()) 434 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 435 } 436 437 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 438 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 439 440 // These should be promoted to a larger select which is supported. 441 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 442 // X86 wants to expand cmov itself. 443 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 444 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 445 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 446 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 447 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 448 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 449 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 450 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 451 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 452 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 453 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 454 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 455 if (Subtarget->is64Bit()) { 456 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 457 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 458 } 459 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 460 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intened to support 461 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 462 // support continuation, user-level threading, and etc.. As a result, no 463 // other SjLj exception interfaces are implemented and please don't build 464 // your own exception handling based on them. 465 // LLVM/Clang supports zero-cost DWARF exception handling. 466 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 467 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 468 469 // Darwin ABI issue. 470 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 471 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 472 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 473 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 474 if (Subtarget->is64Bit()) 475 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 476 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 477 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 478 if (Subtarget->is64Bit()) { 479 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 480 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 481 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 482 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 483 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 484 } 485 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 486 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 487 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 488 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 489 if (Subtarget->is64Bit()) { 490 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 491 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 492 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 493 } 494 495 if (Subtarget->hasSSE1()) 496 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 497 498 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 499 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 500 501 // On X86 and X86-64, atomic operations are lowered to locked instructions. 502 // Locked instructions, in turn, have implicit fence semantics (all memory 503 // operations are flushed before issuing the locked instruction, and they 504 // are not buffered), so we can fold away the common pattern of 505 // fence-atomic-fence. 506 setShouldFoldAtomicFences(true); 507 508 // Expand certain atomics 509 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 510 MVT VT = IntVTs[i]; 511 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 512 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 513 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 514 } 515 516 if (!Subtarget->is64Bit()) { 517 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 518 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 519 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 520 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 521 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 522 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 523 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 524 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 525 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 526 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 527 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 528 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 529 } 530 531 if (Subtarget->hasCmpxchg16b()) { 532 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 533 } 534 535 // FIXME - use subtarget debug flags 536 if (!Subtarget->isTargetDarwin() && 537 !Subtarget->isTargetELF() && 538 !Subtarget->isTargetCygMing()) { 539 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 540 } 541 542 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 543 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 544 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 545 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 546 if (Subtarget->is64Bit()) { 547 setExceptionPointerRegister(X86::RAX); 548 setExceptionSelectorRegister(X86::RDX); 549 } else { 550 setExceptionPointerRegister(X86::EAX); 551 setExceptionSelectorRegister(X86::EDX); 552 } 553 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 554 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 555 556 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 557 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 558 559 setOperationAction(ISD::TRAP, MVT::Other, Legal); 560 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 561 562 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 563 setOperationAction(ISD::VASTART , MVT::Other, Custom); 564 setOperationAction(ISD::VAEND , MVT::Other, Expand); 565 if (Subtarget->is64Bit()) { 566 setOperationAction(ISD::VAARG , MVT::Other, Custom); 567 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 568 } else { 569 setOperationAction(ISD::VAARG , MVT::Other, Expand); 570 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 571 } 572 573 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 574 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 575 576 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 577 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 578 MVT::i64 : MVT::i32, Custom); 579 else if (TM.Options.EnableSegmentedStacks) 580 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 581 MVT::i64 : MVT::i32, Custom); 582 else 583 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 584 MVT::i64 : MVT::i32, Expand); 585 586 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 587 // f32 and f64 use SSE. 588 // Set up the FP register classes. 589 addRegisterClass(MVT::f32, &X86::FR32RegClass); 590 addRegisterClass(MVT::f64, &X86::FR64RegClass); 591 592 // Use ANDPD to simulate FABS. 593 setOperationAction(ISD::FABS , MVT::f64, Custom); 594 setOperationAction(ISD::FABS , MVT::f32, Custom); 595 596 // Use XORP to simulate FNEG. 597 setOperationAction(ISD::FNEG , MVT::f64, Custom); 598 setOperationAction(ISD::FNEG , MVT::f32, Custom); 599 600 // Use ANDPD and ORPD to simulate FCOPYSIGN. 601 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 602 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 603 604 // Lower this to FGETSIGNx86 plus an AND. 605 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 606 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 607 608 // We don't support sin/cos/fmod 609 setOperationAction(ISD::FSIN , MVT::f64, Expand); 610 setOperationAction(ISD::FCOS , MVT::f64, Expand); 611 setOperationAction(ISD::FSIN , MVT::f32, Expand); 612 setOperationAction(ISD::FCOS , MVT::f32, Expand); 613 614 // Expand FP immediates into loads from the stack, except for the special 615 // cases we handle. 616 addLegalFPImmediate(APFloat(+0.0)); // xorpd 617 addLegalFPImmediate(APFloat(+0.0f)); // xorps 618 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 619 // Use SSE for f32, x87 for f64. 620 // Set up the FP register classes. 621 addRegisterClass(MVT::f32, &X86::FR32RegClass); 622 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 623 624 // Use ANDPS to simulate FABS. 625 setOperationAction(ISD::FABS , MVT::f32, Custom); 626 627 // Use XORP to simulate FNEG. 628 setOperationAction(ISD::FNEG , MVT::f32, Custom); 629 630 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 631 632 // Use ANDPS and ORPS to simulate FCOPYSIGN. 633 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 634 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 635 636 // We don't support sin/cos/fmod 637 setOperationAction(ISD::FSIN , MVT::f32, Expand); 638 setOperationAction(ISD::FCOS , MVT::f32, Expand); 639 640 // Special cases we handle for FP constants. 641 addLegalFPImmediate(APFloat(+0.0f)); // xorps 642 addLegalFPImmediate(APFloat(+0.0)); // FLD0 643 addLegalFPImmediate(APFloat(+1.0)); // FLD1 644 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 645 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 646 647 if (!TM.Options.UnsafeFPMath) { 648 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 649 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 650 } 651 } else if (!TM.Options.UseSoftFloat) { 652 // f32 and f64 in x87. 653 // Set up the FP register classes. 654 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 655 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 656 657 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 658 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 659 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 660 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 661 662 if (!TM.Options.UnsafeFPMath) { 663 setOperationAction(ISD::FSIN , MVT::f32 , Expand); 664 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 665 setOperationAction(ISD::FCOS , MVT::f32 , Expand); 666 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 667 } 668 addLegalFPImmediate(APFloat(+0.0)); // FLD0 669 addLegalFPImmediate(APFloat(+1.0)); // FLD1 670 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 671 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 672 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 673 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 674 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 675 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 676 } 677 678 // We don't support FMA. 679 setOperationAction(ISD::FMA, MVT::f64, Expand); 680 setOperationAction(ISD::FMA, MVT::f32, Expand); 681 682 // Long double always uses X87. 683 if (!TM.Options.UseSoftFloat) { 684 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 685 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 686 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 687 { 688 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 689 addLegalFPImmediate(TmpFlt); // FLD0 690 TmpFlt.changeSign(); 691 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 692 693 bool ignored; 694 APFloat TmpFlt2(+1.0); 695 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 696 &ignored); 697 addLegalFPImmediate(TmpFlt2); // FLD1 698 TmpFlt2.changeSign(); 699 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 700 } 701 702 if (!TM.Options.UnsafeFPMath) { 703 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 704 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 705 } 706 707 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 708 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 709 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 710 setOperationAction(ISD::FRINT, MVT::f80, Expand); 711 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 712 setOperationAction(ISD::FMA, MVT::f80, Expand); 713 } 714 715 // Always use a library call for pow. 716 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 717 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 718 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 719 720 setOperationAction(ISD::FLOG, MVT::f80, Expand); 721 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 722 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 723 setOperationAction(ISD::FEXP, MVT::f80, Expand); 724 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 725 726 // First set operation action for all vector types to either promote 727 // (for widening) or expand (for scalarization). Then we will selectively 728 // turn on ones that can be effectively codegen'd. 729 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 730 VT <= MVT::LAST_VECTOR_VALUETYPE; ++VT) { 731 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 733 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 734 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 738 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 740 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 741 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 745 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 746 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 747 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 748 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 749 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 750 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 751 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 752 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 753 setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Expand); 754 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 755 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 756 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 757 setOperationAction(ISD::FFLOOR, (MVT::SimpleValueType)VT, Expand); 758 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 759 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 760 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 761 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 762 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 763 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 764 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 765 setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 766 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 767 setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 768 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 769 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 770 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 771 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 772 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 773 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 774 setOperationAction(ISD::SETCC, (MVT::SimpleValueType)VT, Expand); 775 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 776 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 777 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 778 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 779 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 780 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 781 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 782 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 783 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 784 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 785 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 786 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 787 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 788 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 789 setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); 790 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 791 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 792 setTruncStoreAction((MVT::SimpleValueType)VT, 793 (MVT::SimpleValueType)InnerVT, Expand); 794 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 795 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 796 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 797 } 798 799 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 800 // with -msoft-float, disable use of MMX as well. 801 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 802 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 803 // No operations on x86mmx supported, everything uses intrinsics. 804 } 805 806 // MMX-sized vectors (other than x86mmx) are expected to be expanded 807 // into smaller operations. 808 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 809 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 810 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 811 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 812 setOperationAction(ISD::AND, MVT::v8i8, Expand); 813 setOperationAction(ISD::AND, MVT::v4i16, Expand); 814 setOperationAction(ISD::AND, MVT::v2i32, Expand); 815 setOperationAction(ISD::AND, MVT::v1i64, Expand); 816 setOperationAction(ISD::OR, MVT::v8i8, Expand); 817 setOperationAction(ISD::OR, MVT::v4i16, Expand); 818 setOperationAction(ISD::OR, MVT::v2i32, Expand); 819 setOperationAction(ISD::OR, MVT::v1i64, Expand); 820 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 821 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 822 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 823 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 824 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 825 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 826 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 827 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 828 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 829 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 830 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 831 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 832 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 833 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 834 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 835 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 836 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 837 838 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 839 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 840 841 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 842 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 843 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 844 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 845 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 846 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 847 setOperationAction(ISD::FABS, MVT::v4f32, Custom); 848 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 849 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 850 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 851 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 852 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 853 } 854 855 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 856 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 857 858 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 859 // registers cannot be used even for integer operations. 860 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 861 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 862 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 863 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 864 865 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 866 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 867 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 868 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 869 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 870 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 871 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 872 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 873 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 874 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 875 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 876 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 877 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 878 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 879 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 880 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 881 setOperationAction(ISD::FABS, MVT::v2f64, Custom); 882 883 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 884 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 885 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 886 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 887 888 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 889 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 890 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 891 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 892 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 893 894 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 895 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 896 MVT VT = (MVT::SimpleValueType)i; 897 // Do not attempt to custom lower non-power-of-2 vectors 898 if (!isPowerOf2_32(VT.getVectorNumElements())) 899 continue; 900 // Do not attempt to custom lower non-128-bit vectors 901 if (!VT.is128BitVector()) 902 continue; 903 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 904 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 905 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 906 } 907 908 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 909 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 910 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 911 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 912 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 913 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 914 915 if (Subtarget->is64Bit()) { 916 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 917 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 918 } 919 920 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 921 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 922 MVT VT = (MVT::SimpleValueType)i; 923 924 // Do not attempt to promote non-128-bit vectors 925 if (!VT.is128BitVector()) 926 continue; 927 928 setOperationAction(ISD::AND, VT, Promote); 929 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 930 setOperationAction(ISD::OR, VT, Promote); 931 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 932 setOperationAction(ISD::XOR, VT, Promote); 933 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 934 setOperationAction(ISD::LOAD, VT, Promote); 935 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 936 setOperationAction(ISD::SELECT, VT, Promote); 937 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 938 } 939 940 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 941 942 // Custom lower v2i64 and v2f64 selects. 943 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 944 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 945 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 946 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 947 948 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 949 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 950 951 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 952 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 953 // As there is no 64-bit GPR available, we need build a special custom 954 // sequence to convert from v2i32 to v2f32. 955 if (!Subtarget->is64Bit()) 956 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); 957 958 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 959 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); 960 961 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); 962 } 963 964 if (Subtarget->hasSSE41()) { 965 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 966 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 967 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 968 setOperationAction(ISD::FRINT, MVT::f32, Legal); 969 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 970 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 971 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 972 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 973 setOperationAction(ISD::FRINT, MVT::f64, Legal); 974 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 975 976 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 977 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 978 979 // FIXME: Do we need to handle scalar-to-vector here? 980 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 981 982 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 983 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 984 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 985 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 986 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 987 988 // i8 and i16 vectors are custom , because the source register and source 989 // source memory operand types are not the same width. f32 vectors are 990 // custom since the immediate controlling the insert encodes additional 991 // information. 992 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 993 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 994 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 995 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 996 997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 998 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 999 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 1000 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 1001 1002 // FIXME: these should be Legal but thats only for the case where 1003 // the index is constant. For now custom expand to deal with that. 1004 if (Subtarget->is64Bit()) { 1005 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 1006 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 1007 } 1008 } 1009 1010 if (Subtarget->hasSSE2()) { 1011 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 1012 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 1013 1014 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 1015 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 1016 1017 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 1018 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 1019 1020 if (Subtarget->hasAVX2()) { 1021 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 1022 setOperationAction(ISD::SRL, MVT::v4i32, Legal); 1023 1024 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 1025 setOperationAction(ISD::SHL, MVT::v4i32, Legal); 1026 1027 setOperationAction(ISD::SRA, MVT::v4i32, Legal); 1028 } else { 1029 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1030 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1031 1032 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1033 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1034 1035 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1036 } 1037 } 1038 1039 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { 1040 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1041 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1042 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1043 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1044 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1045 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1046 1047 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1048 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1049 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1050 1051 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1052 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1053 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1054 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1055 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1056 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); 1057 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1058 setOperationAction(ISD::FABS, MVT::v8f32, Custom); 1059 1060 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1061 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1062 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1063 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1064 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1065 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1066 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1067 setOperationAction(ISD::FABS, MVT::v4f64, Custom); 1068 1069 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); 1070 1071 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 1072 1073 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1074 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1075 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1076 1077 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 1078 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); 1079 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 1080 1081 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); 1082 1083 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1084 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1085 1086 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1087 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1088 1089 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1090 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1091 1092 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1093 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1094 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1095 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1096 1097 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1098 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1099 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1100 1101 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1102 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1103 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1104 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1105 1106 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { 1107 setOperationAction(ISD::FMA, MVT::v8f32, Custom); 1108 setOperationAction(ISD::FMA, MVT::v4f64, Custom); 1109 setOperationAction(ISD::FMA, MVT::v4f32, Custom); 1110 setOperationAction(ISD::FMA, MVT::v2f64, Custom); 1111 setOperationAction(ISD::FMA, MVT::f32, Custom); 1112 setOperationAction(ISD::FMA, MVT::f64, Custom); 1113 } 1114 1115 if (Subtarget->hasAVX2()) { 1116 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1117 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1118 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1119 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1120 1121 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1122 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1123 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1124 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1125 1126 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1127 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1128 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1129 // Don't lower v32i8 because there is no 128-bit byte mul 1130 1131 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1132 1133 setOperationAction(ISD::SRL, MVT::v4i64, Legal); 1134 setOperationAction(ISD::SRL, MVT::v8i32, Legal); 1135 1136 setOperationAction(ISD::SHL, MVT::v4i64, Legal); 1137 setOperationAction(ISD::SHL, MVT::v8i32, Legal); 1138 1139 setOperationAction(ISD::SRA, MVT::v8i32, Legal); 1140 } else { 1141 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1142 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1143 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1144 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1145 1146 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1147 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1148 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1149 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1150 1151 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1152 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1153 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1154 // Don't lower v32i8 because there is no 128-bit byte mul 1155 1156 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1157 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1158 1159 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1160 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1161 1162 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1163 } 1164 1165 // Custom lower several nodes for 256-bit types. 1166 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1167 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1168 MVT VT = (MVT::SimpleValueType)i; 1169 1170 // Extract subvector is special because the value type 1171 // (result) is 128-bit but the source is 256-bit wide. 1172 if (VT.is128BitVector()) 1173 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1174 1175 // Do not attempt to custom lower other non-256-bit vectors 1176 if (!VT.is256BitVector()) 1177 continue; 1178 1179 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1180 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1181 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1182 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1183 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1184 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1185 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1186 } 1187 1188 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1189 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1190 MVT VT = (MVT::SimpleValueType)i; 1191 1192 // Do not attempt to promote non-256-bit vectors 1193 if (!VT.is256BitVector()) 1194 continue; 1195 1196 setOperationAction(ISD::AND, VT, Promote); 1197 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1198 setOperationAction(ISD::OR, VT, Promote); 1199 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1200 setOperationAction(ISD::XOR, VT, Promote); 1201 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1202 setOperationAction(ISD::LOAD, VT, Promote); 1203 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1204 setOperationAction(ISD::SELECT, VT, Promote); 1205 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1206 } 1207 } 1208 1209 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1210 // of this type with custom code. 1211 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1212 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1213 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1214 Custom); 1215 } 1216 1217 // We want to custom lower some of our intrinsics. 1218 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1219 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1220 1221 1222 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1223 // handle type legalization for these operations here. 1224 // 1225 // FIXME: We really should do custom legalization for addition and 1226 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1227 // than generic legalization for 64-bit multiplication-with-overflow, though. 1228 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1229 // Add/Sub/Mul with overflow operations are custom lowered. 1230 MVT VT = IntVTs[i]; 1231 setOperationAction(ISD::SADDO, VT, Custom); 1232 setOperationAction(ISD::UADDO, VT, Custom); 1233 setOperationAction(ISD::SSUBO, VT, Custom); 1234 setOperationAction(ISD::USUBO, VT, Custom); 1235 setOperationAction(ISD::SMULO, VT, Custom); 1236 setOperationAction(ISD::UMULO, VT, Custom); 1237 } 1238 1239 // There are no 8-bit 3-address imul/mul instructions 1240 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1241 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1242 1243 if (!Subtarget->is64Bit()) { 1244 // These libcalls are not available in 32-bit. 1245 setLibcallName(RTLIB::SHL_I128, 0); 1246 setLibcallName(RTLIB::SRL_I128, 0); 1247 setLibcallName(RTLIB::SRA_I128, 0); 1248 } 1249 1250 // We have target-specific dag combine patterns for the following nodes: 1251 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1252 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1253 setTargetDAGCombine(ISD::VSELECT); 1254 setTargetDAGCombine(ISD::SELECT); 1255 setTargetDAGCombine(ISD::SHL); 1256 setTargetDAGCombine(ISD::SRA); 1257 setTargetDAGCombine(ISD::SRL); 1258 setTargetDAGCombine(ISD::OR); 1259 setTargetDAGCombine(ISD::AND); 1260 setTargetDAGCombine(ISD::ADD); 1261 setTargetDAGCombine(ISD::FADD); 1262 setTargetDAGCombine(ISD::FSUB); 1263 setTargetDAGCombine(ISD::FMA); 1264 setTargetDAGCombine(ISD::SUB); 1265 setTargetDAGCombine(ISD::LOAD); 1266 setTargetDAGCombine(ISD::STORE); 1267 setTargetDAGCombine(ISD::ZERO_EXTEND); 1268 setTargetDAGCombine(ISD::ANY_EXTEND); 1269 setTargetDAGCombine(ISD::SIGN_EXTEND); 1270 setTargetDAGCombine(ISD::TRUNCATE); 1271 setTargetDAGCombine(ISD::SINT_TO_FP); 1272 setTargetDAGCombine(ISD::SETCC); 1273 if (Subtarget->is64Bit()) 1274 setTargetDAGCombine(ISD::MUL); 1275 setTargetDAGCombine(ISD::XOR); 1276 1277 computeRegisterProperties(); 1278 1279 // On Darwin, -Os means optimize for size without hurting performance, 1280 // do not reduce the limit. 1281 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1282 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1283 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1284 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1285 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1286 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1287 setPrefLoopAlignment(4); // 2^4 bytes. 1288 benefitFromCodePlacementOpt = true; 1289 1290 // Predictable cmov don't hurt on atom because it's in-order. 1291 predictableSelectIsExpensive = !Subtarget->isAtom(); 1292 1293 setPrefFunctionAlignment(4); // 2^4 bytes. 1294} 1295 1296 1297EVT X86TargetLowering::getSetCCResultType(EVT VT) const { 1298 if (!VT.isVector()) return MVT::i8; 1299 return VT.changeVectorElementTypeToInteger(); 1300} 1301 1302 1303/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1304/// the desired ByVal argument alignment. 1305static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1306 if (MaxAlign == 16) 1307 return; 1308 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1309 if (VTy->getBitWidth() == 128) 1310 MaxAlign = 16; 1311 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1312 unsigned EltAlign = 0; 1313 getMaxByValAlign(ATy->getElementType(), EltAlign); 1314 if (EltAlign > MaxAlign) 1315 MaxAlign = EltAlign; 1316 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1317 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1318 unsigned EltAlign = 0; 1319 getMaxByValAlign(STy->getElementType(i), EltAlign); 1320 if (EltAlign > MaxAlign) 1321 MaxAlign = EltAlign; 1322 if (MaxAlign == 16) 1323 break; 1324 } 1325 } 1326} 1327 1328/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1329/// function arguments in the caller parameter area. For X86, aggregates 1330/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1331/// are at 4-byte boundaries. 1332unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1333 if (Subtarget->is64Bit()) { 1334 // Max of 8 and alignment of type. 1335 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1336 if (TyAlign > 8) 1337 return TyAlign; 1338 return 8; 1339 } 1340 1341 unsigned Align = 4; 1342 if (Subtarget->hasSSE1()) 1343 getMaxByValAlign(Ty, Align); 1344 return Align; 1345} 1346 1347/// getOptimalMemOpType - Returns the target specific optimal type for load 1348/// and store operations as a result of memset, memcpy, and memmove 1349/// lowering. If DstAlign is zero that means it's safe to destination 1350/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1351/// means there isn't a need to check it against alignment requirement, 1352/// probably because the source does not need to be loaded. If 1353/// 'IsZeroVal' is true, that means it's safe to return a 1354/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1355/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1356/// constant so it does not need to be loaded. 1357/// It returns EVT::Other if the type should be determined using generic 1358/// target-independent logic. 1359EVT 1360X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1361 unsigned DstAlign, unsigned SrcAlign, 1362 bool IsZeroVal, 1363 bool MemcpyStrSrc, 1364 MachineFunction &MF) const { 1365 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1366 // linux. This is because the stack realignment code can't handle certain 1367 // cases like PR2962. This should be removed when PR2962 is fixed. 1368 const Function *F = MF.getFunction(); 1369 if (IsZeroVal && 1370 !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat)) { 1371 if (Size >= 16 && 1372 (Subtarget->isUnalignedMemAccessFast() || 1373 ((DstAlign == 0 || DstAlign >= 16) && 1374 (SrcAlign == 0 || SrcAlign >= 16))) && 1375 Subtarget->getStackAlignment() >= 16) { 1376 if (Subtarget->getStackAlignment() >= 32) { 1377 if (Subtarget->hasAVX2()) 1378 return MVT::v8i32; 1379 if (Subtarget->hasAVX()) 1380 return MVT::v8f32; 1381 } 1382 if (Subtarget->hasSSE2()) 1383 return MVT::v4i32; 1384 if (Subtarget->hasSSE1()) 1385 return MVT::v4f32; 1386 } else if (!MemcpyStrSrc && Size >= 8 && 1387 !Subtarget->is64Bit() && 1388 Subtarget->getStackAlignment() >= 8 && 1389 Subtarget->hasSSE2()) { 1390 // Do not use f64 to lower memcpy if source is string constant. It's 1391 // better to use i32 to avoid the loads. 1392 return MVT::f64; 1393 } 1394 } 1395 if (Subtarget->is64Bit() && Size >= 8) 1396 return MVT::i64; 1397 return MVT::i32; 1398} 1399 1400/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1401/// current function. The returned value is a member of the 1402/// MachineJumpTableInfo::JTEntryKind enum. 1403unsigned X86TargetLowering::getJumpTableEncoding() const { 1404 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1405 // symbol. 1406 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1407 Subtarget->isPICStyleGOT()) 1408 return MachineJumpTableInfo::EK_Custom32; 1409 1410 // Otherwise, use the normal jump table encoding heuristics. 1411 return TargetLowering::getJumpTableEncoding(); 1412} 1413 1414const MCExpr * 1415X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1416 const MachineBasicBlock *MBB, 1417 unsigned uid,MCContext &Ctx) const{ 1418 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1419 Subtarget->isPICStyleGOT()); 1420 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1421 // entries. 1422 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1423 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1424} 1425 1426/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1427/// jumptable. 1428SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1429 SelectionDAG &DAG) const { 1430 if (!Subtarget->is64Bit()) 1431 // This doesn't have DebugLoc associated with it, but is not really the 1432 // same as a Register. 1433 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1434 return Table; 1435} 1436 1437/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1438/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1439/// MCExpr. 1440const MCExpr *X86TargetLowering:: 1441getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1442 MCContext &Ctx) const { 1443 // X86-64 uses RIP relative addressing based on the jump table label. 1444 if (Subtarget->isPICStyleRIPRel()) 1445 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1446 1447 // Otherwise, the reference is relative to the PIC base. 1448 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1449} 1450 1451// FIXME: Why this routine is here? Move to RegInfo! 1452std::pair<const TargetRegisterClass*, uint8_t> 1453X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1454 const TargetRegisterClass *RRC = 0; 1455 uint8_t Cost = 1; 1456 switch (VT.getSimpleVT().SimpleTy) { 1457 default: 1458 return TargetLowering::findRepresentativeClass(VT); 1459 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1460 RRC = Subtarget->is64Bit() ? 1461 (const TargetRegisterClass*)&X86::GR64RegClass : 1462 (const TargetRegisterClass*)&X86::GR32RegClass; 1463 break; 1464 case MVT::x86mmx: 1465 RRC = &X86::VR64RegClass; 1466 break; 1467 case MVT::f32: case MVT::f64: 1468 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1469 case MVT::v4f32: case MVT::v2f64: 1470 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1471 case MVT::v4f64: 1472 RRC = &X86::VR128RegClass; 1473 break; 1474 } 1475 return std::make_pair(RRC, Cost); 1476} 1477 1478bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1479 unsigned &Offset) const { 1480 if (!Subtarget->isTargetLinux()) 1481 return false; 1482 1483 if (Subtarget->is64Bit()) { 1484 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1485 Offset = 0x28; 1486 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1487 AddressSpace = 256; 1488 else 1489 AddressSpace = 257; 1490 } else { 1491 // %gs:0x14 on i386 1492 Offset = 0x14; 1493 AddressSpace = 256; 1494 } 1495 return true; 1496} 1497 1498 1499//===----------------------------------------------------------------------===// 1500// Return Value Calling Convention Implementation 1501//===----------------------------------------------------------------------===// 1502 1503#include "X86GenCallingConv.inc" 1504 1505bool 1506X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1507 MachineFunction &MF, bool isVarArg, 1508 const SmallVectorImpl<ISD::OutputArg> &Outs, 1509 LLVMContext &Context) const { 1510 SmallVector<CCValAssign, 16> RVLocs; 1511 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1512 RVLocs, Context); 1513 return CCInfo.CheckReturn(Outs, RetCC_X86); 1514} 1515 1516SDValue 1517X86TargetLowering::LowerReturn(SDValue Chain, 1518 CallingConv::ID CallConv, bool isVarArg, 1519 const SmallVectorImpl<ISD::OutputArg> &Outs, 1520 const SmallVectorImpl<SDValue> &OutVals, 1521 DebugLoc dl, SelectionDAG &DAG) const { 1522 MachineFunction &MF = DAG.getMachineFunction(); 1523 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1524 1525 SmallVector<CCValAssign, 16> RVLocs; 1526 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1527 RVLocs, *DAG.getContext()); 1528 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1529 1530 // Add the regs to the liveout set for the function. 1531 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1532 for (unsigned i = 0; i != RVLocs.size(); ++i) 1533 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1534 MRI.addLiveOut(RVLocs[i].getLocReg()); 1535 1536 SDValue Flag; 1537 1538 SmallVector<SDValue, 6> RetOps; 1539 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1540 // Operand #1 = Bytes To Pop 1541 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1542 MVT::i16)); 1543 1544 // Copy the result values into the output registers. 1545 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1546 CCValAssign &VA = RVLocs[i]; 1547 assert(VA.isRegLoc() && "Can only return in registers!"); 1548 SDValue ValToCopy = OutVals[i]; 1549 EVT ValVT = ValToCopy.getValueType(); 1550 1551 // Promote values to the appropriate types 1552 if (VA.getLocInfo() == CCValAssign::SExt) 1553 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1554 else if (VA.getLocInfo() == CCValAssign::ZExt) 1555 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1556 else if (VA.getLocInfo() == CCValAssign::AExt) 1557 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1558 else if (VA.getLocInfo() == CCValAssign::BCvt) 1559 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1560 1561 // If this is x86-64, and we disabled SSE, we can't return FP values, 1562 // or SSE or MMX vectors. 1563 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1564 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1565 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1566 report_fatal_error("SSE register return with SSE disabled"); 1567 } 1568 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1569 // llvm-gcc has never done it right and no one has noticed, so this 1570 // should be OK for now. 1571 if (ValVT == MVT::f64 && 1572 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1573 report_fatal_error("SSE2 register return with SSE2 disabled"); 1574 1575 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1576 // the RET instruction and handled by the FP Stackifier. 1577 if (VA.getLocReg() == X86::ST0 || 1578 VA.getLocReg() == X86::ST1) { 1579 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1580 // change the value to the FP stack register class. 1581 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1582 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1583 RetOps.push_back(ValToCopy); 1584 // Don't emit a copytoreg. 1585 continue; 1586 } 1587 1588 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1589 // which is returned in RAX / RDX. 1590 if (Subtarget->is64Bit()) { 1591 if (ValVT == MVT::x86mmx) { 1592 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1593 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1594 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1595 ValToCopy); 1596 // If we don't have SSE2 available, convert to v4f32 so the generated 1597 // register is legal. 1598 if (!Subtarget->hasSSE2()) 1599 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1600 } 1601 } 1602 } 1603 1604 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1605 Flag = Chain.getValue(1); 1606 } 1607 1608 // The x86-64 ABI for returning structs by value requires that we copy 1609 // the sret argument into %rax for the return. We saved the argument into 1610 // a virtual register in the entry block, so now we copy the value out 1611 // and into %rax. 1612 if (Subtarget->is64Bit() && 1613 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1614 MachineFunction &MF = DAG.getMachineFunction(); 1615 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1616 unsigned Reg = FuncInfo->getSRetReturnReg(); 1617 assert(Reg && 1618 "SRetReturnReg should have been set in LowerFormalArguments()."); 1619 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1620 1621 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1622 Flag = Chain.getValue(1); 1623 1624 // RAX now acts like a return value. 1625 MRI.addLiveOut(X86::RAX); 1626 } 1627 1628 RetOps[0] = Chain; // Update chain. 1629 1630 // Add the flag if we have it. 1631 if (Flag.getNode()) 1632 RetOps.push_back(Flag); 1633 1634 return DAG.getNode(X86ISD::RET_FLAG, dl, 1635 MVT::Other, &RetOps[0], RetOps.size()); 1636} 1637 1638bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1639 if (N->getNumValues() != 1) 1640 return false; 1641 if (!N->hasNUsesOfValue(1, 0)) 1642 return false; 1643 1644 SDValue TCChain = Chain; 1645 SDNode *Copy = *N->use_begin(); 1646 if (Copy->getOpcode() == ISD::CopyToReg) { 1647 // If the copy has a glue operand, we conservatively assume it isn't safe to 1648 // perform a tail call. 1649 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1650 return false; 1651 TCChain = Copy->getOperand(0); 1652 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1653 return false; 1654 1655 bool HasRet = false; 1656 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1657 UI != UE; ++UI) { 1658 if (UI->getOpcode() != X86ISD::RET_FLAG) 1659 return false; 1660 HasRet = true; 1661 } 1662 1663 if (!HasRet) 1664 return false; 1665 1666 Chain = TCChain; 1667 return true; 1668} 1669 1670EVT 1671X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1672 ISD::NodeType ExtendKind) const { 1673 MVT ReturnMVT; 1674 // TODO: Is this also valid on 32-bit? 1675 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1676 ReturnMVT = MVT::i8; 1677 else 1678 ReturnMVT = MVT::i32; 1679 1680 EVT MinVT = getRegisterType(Context, ReturnMVT); 1681 return VT.bitsLT(MinVT) ? MinVT : VT; 1682} 1683 1684/// LowerCallResult - Lower the result values of a call into the 1685/// appropriate copies out of appropriate physical registers. 1686/// 1687SDValue 1688X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1689 CallingConv::ID CallConv, bool isVarArg, 1690 const SmallVectorImpl<ISD::InputArg> &Ins, 1691 DebugLoc dl, SelectionDAG &DAG, 1692 SmallVectorImpl<SDValue> &InVals) const { 1693 1694 // Assign locations to each value returned by this call. 1695 SmallVector<CCValAssign, 16> RVLocs; 1696 bool Is64Bit = Subtarget->is64Bit(); 1697 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1698 getTargetMachine(), RVLocs, *DAG.getContext()); 1699 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1700 1701 // Copy all of the result registers out of their specified physreg. 1702 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1703 CCValAssign &VA = RVLocs[i]; 1704 EVT CopyVT = VA.getValVT(); 1705 1706 // If this is x86-64, and we disabled SSE, we can't return FP values 1707 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1708 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1709 report_fatal_error("SSE register return with SSE disabled"); 1710 } 1711 1712 SDValue Val; 1713 1714 // If this is a call to a function that returns an fp value on the floating 1715 // point stack, we must guarantee the value is popped from the stack, so 1716 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1717 // if the return value is not used. We use the FpPOP_RETVAL instruction 1718 // instead. 1719 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1720 // If we prefer to use the value in xmm registers, copy it out as f80 and 1721 // use a truncate to move it from fp stack reg to xmm reg. 1722 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1723 SDValue Ops[] = { Chain, InFlag }; 1724 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1725 MVT::Other, MVT::Glue, Ops, 2), 1); 1726 Val = Chain.getValue(0); 1727 1728 // Round the f80 to the right size, which also moves it to the appropriate 1729 // xmm register. 1730 if (CopyVT != VA.getValVT()) 1731 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1732 // This truncation won't change the value. 1733 DAG.getIntPtrConstant(1)); 1734 } else { 1735 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1736 CopyVT, InFlag).getValue(1); 1737 Val = Chain.getValue(0); 1738 } 1739 InFlag = Chain.getValue(2); 1740 InVals.push_back(Val); 1741 } 1742 1743 return Chain; 1744} 1745 1746 1747//===----------------------------------------------------------------------===// 1748// C & StdCall & Fast Calling Convention implementation 1749//===----------------------------------------------------------------------===// 1750// StdCall calling convention seems to be standard for many Windows' API 1751// routines and around. It differs from C calling convention just a little: 1752// callee should clean up the stack, not caller. Symbols should be also 1753// decorated in some fancy way :) It doesn't support any vector arguments. 1754// For info on fast calling convention see Fast Calling Convention (tail call) 1755// implementation LowerX86_32FastCCCallTo. 1756 1757/// CallIsStructReturn - Determines whether a call uses struct return 1758/// semantics. 1759enum StructReturnType { 1760 NotStructReturn, 1761 RegStructReturn, 1762 StackStructReturn 1763}; 1764static StructReturnType 1765callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1766 if (Outs.empty()) 1767 return NotStructReturn; 1768 1769 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 1770 if (!Flags.isSRet()) 1771 return NotStructReturn; 1772 if (Flags.isInReg()) 1773 return RegStructReturn; 1774 return StackStructReturn; 1775} 1776 1777/// ArgsAreStructReturn - Determines whether a function uses struct 1778/// return semantics. 1779static StructReturnType 1780argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1781 if (Ins.empty()) 1782 return NotStructReturn; 1783 1784 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 1785 if (!Flags.isSRet()) 1786 return NotStructReturn; 1787 if (Flags.isInReg()) 1788 return RegStructReturn; 1789 return StackStructReturn; 1790} 1791 1792/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1793/// by "Src" to address "Dst" with size and alignment information specified by 1794/// the specific parameter attribute. The copy will be passed as a byval 1795/// function parameter. 1796static SDValue 1797CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1798 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1799 DebugLoc dl) { 1800 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1801 1802 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1803 /*isVolatile*/false, /*AlwaysInline=*/true, 1804 MachinePointerInfo(), MachinePointerInfo()); 1805} 1806 1807/// IsTailCallConvention - Return true if the calling convention is one that 1808/// supports tail call optimization. 1809static bool IsTailCallConvention(CallingConv::ID CC) { 1810 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1811} 1812 1813bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1814 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1815 return false; 1816 1817 CallSite CS(CI); 1818 CallingConv::ID CalleeCC = CS.getCallingConv(); 1819 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1820 return false; 1821 1822 return true; 1823} 1824 1825/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1826/// a tailcall target by changing its ABI. 1827static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1828 bool GuaranteedTailCallOpt) { 1829 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1830} 1831 1832SDValue 1833X86TargetLowering::LowerMemArgument(SDValue Chain, 1834 CallingConv::ID CallConv, 1835 const SmallVectorImpl<ISD::InputArg> &Ins, 1836 DebugLoc dl, SelectionDAG &DAG, 1837 const CCValAssign &VA, 1838 MachineFrameInfo *MFI, 1839 unsigned i) const { 1840 // Create the nodes corresponding to a load from this parameter slot. 1841 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1842 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1843 getTargetMachine().Options.GuaranteedTailCallOpt); 1844 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1845 EVT ValVT; 1846 1847 // If value is passed by pointer we have address passed instead of the value 1848 // itself. 1849 if (VA.getLocInfo() == CCValAssign::Indirect) 1850 ValVT = VA.getLocVT(); 1851 else 1852 ValVT = VA.getValVT(); 1853 1854 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1855 // changed with more analysis. 1856 // In case of tail call optimization mark all arguments mutable. Since they 1857 // could be overwritten by lowering of arguments in case of a tail call. 1858 if (Flags.isByVal()) { 1859 unsigned Bytes = Flags.getByValSize(); 1860 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1861 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1862 return DAG.getFrameIndex(FI, getPointerTy()); 1863 } else { 1864 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1865 VA.getLocMemOffset(), isImmutable); 1866 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1867 return DAG.getLoad(ValVT, dl, Chain, FIN, 1868 MachinePointerInfo::getFixedStack(FI), 1869 false, false, false, 0); 1870 } 1871} 1872 1873SDValue 1874X86TargetLowering::LowerFormalArguments(SDValue Chain, 1875 CallingConv::ID CallConv, 1876 bool isVarArg, 1877 const SmallVectorImpl<ISD::InputArg> &Ins, 1878 DebugLoc dl, 1879 SelectionDAG &DAG, 1880 SmallVectorImpl<SDValue> &InVals) 1881 const { 1882 MachineFunction &MF = DAG.getMachineFunction(); 1883 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1884 1885 const Function* Fn = MF.getFunction(); 1886 if (Fn->hasExternalLinkage() && 1887 Subtarget->isTargetCygMing() && 1888 Fn->getName() == "main") 1889 FuncInfo->setForceFramePointer(true); 1890 1891 MachineFrameInfo *MFI = MF.getFrameInfo(); 1892 bool Is64Bit = Subtarget->is64Bit(); 1893 bool IsWindows = Subtarget->isTargetWindows(); 1894 bool IsWin64 = Subtarget->isTargetWin64(); 1895 1896 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1897 "Var args not supported with calling convention fastcc or ghc"); 1898 1899 // Assign locations to all of the incoming arguments. 1900 SmallVector<CCValAssign, 16> ArgLocs; 1901 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1902 ArgLocs, *DAG.getContext()); 1903 1904 // Allocate shadow area for Win64 1905 if (IsWin64) { 1906 CCInfo.AllocateStack(32, 8); 1907 } 1908 1909 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1910 1911 unsigned LastVal = ~0U; 1912 SDValue ArgValue; 1913 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1914 CCValAssign &VA = ArgLocs[i]; 1915 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1916 // places. 1917 assert(VA.getValNo() != LastVal && 1918 "Don't support value assigned to multiple locs yet"); 1919 (void)LastVal; 1920 LastVal = VA.getValNo(); 1921 1922 if (VA.isRegLoc()) { 1923 EVT RegVT = VA.getLocVT(); 1924 const TargetRegisterClass *RC; 1925 if (RegVT == MVT::i32) 1926 RC = &X86::GR32RegClass; 1927 else if (Is64Bit && RegVT == MVT::i64) 1928 RC = &X86::GR64RegClass; 1929 else if (RegVT == MVT::f32) 1930 RC = &X86::FR32RegClass; 1931 else if (RegVT == MVT::f64) 1932 RC = &X86::FR64RegClass; 1933 else if (RegVT.is256BitVector()) 1934 RC = &X86::VR256RegClass; 1935 else if (RegVT.is128BitVector()) 1936 RC = &X86::VR128RegClass; 1937 else if (RegVT == MVT::x86mmx) 1938 RC = &X86::VR64RegClass; 1939 else 1940 llvm_unreachable("Unknown argument type!"); 1941 1942 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1943 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1944 1945 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1946 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1947 // right size. 1948 if (VA.getLocInfo() == CCValAssign::SExt) 1949 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1950 DAG.getValueType(VA.getValVT())); 1951 else if (VA.getLocInfo() == CCValAssign::ZExt) 1952 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1953 DAG.getValueType(VA.getValVT())); 1954 else if (VA.getLocInfo() == CCValAssign::BCvt) 1955 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1956 1957 if (VA.isExtInLoc()) { 1958 // Handle MMX values passed in XMM regs. 1959 if (RegVT.isVector()) { 1960 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1961 ArgValue); 1962 } else 1963 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1964 } 1965 } else { 1966 assert(VA.isMemLoc()); 1967 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1968 } 1969 1970 // If value is passed via pointer - do a load. 1971 if (VA.getLocInfo() == CCValAssign::Indirect) 1972 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1973 MachinePointerInfo(), false, false, false, 0); 1974 1975 InVals.push_back(ArgValue); 1976 } 1977 1978 // The x86-64 ABI for returning structs by value requires that we copy 1979 // the sret argument into %rax for the return. Save the argument into 1980 // a virtual register so that we can access it from the return points. 1981 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1982 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1983 unsigned Reg = FuncInfo->getSRetReturnReg(); 1984 if (!Reg) { 1985 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1986 FuncInfo->setSRetReturnReg(Reg); 1987 } 1988 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1989 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1990 } 1991 1992 unsigned StackSize = CCInfo.getNextStackOffset(); 1993 // Align stack specially for tail calls. 1994 if (FuncIsMadeTailCallSafe(CallConv, 1995 MF.getTarget().Options.GuaranteedTailCallOpt)) 1996 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1997 1998 // If the function takes variable number of arguments, make a frame index for 1999 // the start of the first vararg value... for expansion of llvm.va_start. 2000 if (isVarArg) { 2001 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 2002 CallConv != CallingConv::X86_ThisCall)) { 2003 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 2004 } 2005 if (Is64Bit) { 2006 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 2007 2008 // FIXME: We should really autogenerate these arrays 2009 static const uint16_t GPR64ArgRegsWin64[] = { 2010 X86::RCX, X86::RDX, X86::R8, X86::R9 2011 }; 2012 static const uint16_t GPR64ArgRegs64Bit[] = { 2013 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 2014 }; 2015 static const uint16_t XMMArgRegs64Bit[] = { 2016 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2017 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2018 }; 2019 const uint16_t *GPR64ArgRegs; 2020 unsigned NumXMMRegs = 0; 2021 2022 if (IsWin64) { 2023 // The XMM registers which might contain var arg parameters are shadowed 2024 // in their paired GPR. So we only need to save the GPR to their home 2025 // slots. 2026 TotalNumIntRegs = 4; 2027 GPR64ArgRegs = GPR64ArgRegsWin64; 2028 } else { 2029 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 2030 GPR64ArgRegs = GPR64ArgRegs64Bit; 2031 2032 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 2033 TotalNumXMMRegs); 2034 } 2035 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 2036 TotalNumIntRegs); 2037 2038 bool NoImplicitFloatOps = Fn->getFnAttributes(). 2039 hasAttribute(Attributes::NoImplicitFloat); 2040 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 2041 "SSE register cannot be used when SSE is disabled!"); 2042 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2043 NoImplicitFloatOps) && 2044 "SSE register cannot be used when SSE is disabled!"); 2045 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2046 !Subtarget->hasSSE1()) 2047 // Kernel mode asks for SSE to be disabled, so don't push them 2048 // on the stack. 2049 TotalNumXMMRegs = 0; 2050 2051 if (IsWin64) { 2052 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2053 // Get to the caller-allocated home save location. Add 8 to account 2054 // for the return address. 2055 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2056 FuncInfo->setRegSaveFrameIndex( 2057 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2058 // Fixup to set vararg frame on shadow area (4 x i64). 2059 if (NumIntRegs < 4) 2060 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2061 } else { 2062 // For X86-64, if there are vararg parameters that are passed via 2063 // registers, then we must store them to their spots on the stack so 2064 // they may be loaded by deferencing the result of va_next. 2065 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2066 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2067 FuncInfo->setRegSaveFrameIndex( 2068 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2069 false)); 2070 } 2071 2072 // Store the integer parameter registers. 2073 SmallVector<SDValue, 8> MemOps; 2074 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2075 getPointerTy()); 2076 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2077 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2078 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2079 DAG.getIntPtrConstant(Offset)); 2080 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2081 &X86::GR64RegClass); 2082 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2083 SDValue Store = 2084 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2085 MachinePointerInfo::getFixedStack( 2086 FuncInfo->getRegSaveFrameIndex(), Offset), 2087 false, false, 0); 2088 MemOps.push_back(Store); 2089 Offset += 8; 2090 } 2091 2092 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2093 // Now store the XMM (fp + vector) parameter registers. 2094 SmallVector<SDValue, 11> SaveXMMOps; 2095 SaveXMMOps.push_back(Chain); 2096 2097 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2098 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2099 SaveXMMOps.push_back(ALVal); 2100 2101 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2102 FuncInfo->getRegSaveFrameIndex())); 2103 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2104 FuncInfo->getVarArgsFPOffset())); 2105 2106 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2107 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2108 &X86::VR128RegClass); 2109 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2110 SaveXMMOps.push_back(Val); 2111 } 2112 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2113 MVT::Other, 2114 &SaveXMMOps[0], SaveXMMOps.size())); 2115 } 2116 2117 if (!MemOps.empty()) 2118 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2119 &MemOps[0], MemOps.size()); 2120 } 2121 } 2122 2123 // Some CCs need callee pop. 2124 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2125 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2126 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2127 } else { 2128 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2129 // If this is an sret function, the return should pop the hidden pointer. 2130 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2131 argsAreStructReturn(Ins) == StackStructReturn) 2132 FuncInfo->setBytesToPopOnReturn(4); 2133 } 2134 2135 if (!Is64Bit) { 2136 // RegSaveFrameIndex is X86-64 only. 2137 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2138 if (CallConv == CallingConv::X86_FastCall || 2139 CallConv == CallingConv::X86_ThisCall) 2140 // fastcc functions can't have varargs. 2141 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2142 } 2143 2144 FuncInfo->setArgumentStackSize(StackSize); 2145 2146 return Chain; 2147} 2148 2149SDValue 2150X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2151 SDValue StackPtr, SDValue Arg, 2152 DebugLoc dl, SelectionDAG &DAG, 2153 const CCValAssign &VA, 2154 ISD::ArgFlagsTy Flags) const { 2155 unsigned LocMemOffset = VA.getLocMemOffset(); 2156 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2157 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2158 if (Flags.isByVal()) 2159 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2160 2161 return DAG.getStore(Chain, dl, Arg, PtrOff, 2162 MachinePointerInfo::getStack(LocMemOffset), 2163 false, false, 0); 2164} 2165 2166/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2167/// optimization is performed and it is required. 2168SDValue 2169X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2170 SDValue &OutRetAddr, SDValue Chain, 2171 bool IsTailCall, bool Is64Bit, 2172 int FPDiff, DebugLoc dl) const { 2173 // Adjust the Return address stack slot. 2174 EVT VT = getPointerTy(); 2175 OutRetAddr = getReturnAddressFrameIndex(DAG); 2176 2177 // Load the "old" Return address. 2178 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2179 false, false, false, 0); 2180 return SDValue(OutRetAddr.getNode(), 1); 2181} 2182 2183/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2184/// optimization is performed and it is required (FPDiff!=0). 2185static SDValue 2186EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2187 SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, 2188 unsigned SlotSize, int FPDiff, DebugLoc dl) { 2189 // Store the return address to the appropriate stack slot. 2190 if (!FPDiff) return Chain; 2191 // Calculate the new stack slot for the return address. 2192 int NewReturnAddrFI = 2193 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2194 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); 2195 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2196 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2197 false, false, 0); 2198 return Chain; 2199} 2200 2201SDValue 2202X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2203 SmallVectorImpl<SDValue> &InVals) const { 2204 SelectionDAG &DAG = CLI.DAG; 2205 DebugLoc &dl = CLI.DL; 2206 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2207 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2208 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2209 SDValue Chain = CLI.Chain; 2210 SDValue Callee = CLI.Callee; 2211 CallingConv::ID CallConv = CLI.CallConv; 2212 bool &isTailCall = CLI.IsTailCall; 2213 bool isVarArg = CLI.IsVarArg; 2214 2215 MachineFunction &MF = DAG.getMachineFunction(); 2216 bool Is64Bit = Subtarget->is64Bit(); 2217 bool IsWin64 = Subtarget->isTargetWin64(); 2218 bool IsWindows = Subtarget->isTargetWindows(); 2219 StructReturnType SR = callIsStructReturn(Outs); 2220 bool IsSibcall = false; 2221 2222 if (MF.getTarget().Options.DisableTailCalls) 2223 isTailCall = false; 2224 2225 if (isTailCall) { 2226 // Check if it's really possible to do a tail call. 2227 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2228 isVarArg, SR != NotStructReturn, 2229 MF.getFunction()->hasStructRetAttr(), CLI.RetTy, 2230 Outs, OutVals, Ins, DAG); 2231 2232 // Sibcalls are automatically detected tailcalls which do not require 2233 // ABI changes. 2234 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2235 IsSibcall = true; 2236 2237 if (isTailCall) 2238 ++NumTailCalls; 2239 } 2240 2241 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2242 "Var args not supported with calling convention fastcc or ghc"); 2243 2244 // Analyze operands of the call, assigning locations to each operand. 2245 SmallVector<CCValAssign, 16> ArgLocs; 2246 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2247 ArgLocs, *DAG.getContext()); 2248 2249 // Allocate shadow area for Win64 2250 if (IsWin64) { 2251 CCInfo.AllocateStack(32, 8); 2252 } 2253 2254 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2255 2256 // Get a count of how many bytes are to be pushed on the stack. 2257 unsigned NumBytes = CCInfo.getNextStackOffset(); 2258 if (IsSibcall) 2259 // This is a sibcall. The memory operands are available in caller's 2260 // own caller's stack. 2261 NumBytes = 0; 2262 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2263 IsTailCallConvention(CallConv)) 2264 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2265 2266 int FPDiff = 0; 2267 if (isTailCall && !IsSibcall) { 2268 // Lower arguments at fp - stackoffset + fpdiff. 2269 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); 2270 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn(); 2271 2272 FPDiff = NumBytesCallerPushed - NumBytes; 2273 2274 // Set the delta of movement of the returnaddr stackslot. 2275 // But only set if delta is greater than previous delta. 2276 if (FPDiff < X86Info->getTCReturnAddrDelta()) 2277 X86Info->setTCReturnAddrDelta(FPDiff); 2278 } 2279 2280 if (!IsSibcall) 2281 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2282 2283 SDValue RetAddrFrIdx; 2284 // Load return address for tail calls. 2285 if (isTailCall && FPDiff) 2286 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2287 Is64Bit, FPDiff, dl); 2288 2289 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2290 SmallVector<SDValue, 8> MemOpChains; 2291 SDValue StackPtr; 2292 2293 // Walk the register/memloc assignments, inserting copies/loads. In the case 2294 // of tail call optimization arguments are handle later. 2295 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2296 CCValAssign &VA = ArgLocs[i]; 2297 EVT RegVT = VA.getLocVT(); 2298 SDValue Arg = OutVals[i]; 2299 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2300 bool isByVal = Flags.isByVal(); 2301 2302 // Promote the value if needed. 2303 switch (VA.getLocInfo()) { 2304 default: llvm_unreachable("Unknown loc info!"); 2305 case CCValAssign::Full: break; 2306 case CCValAssign::SExt: 2307 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2308 break; 2309 case CCValAssign::ZExt: 2310 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2311 break; 2312 case CCValAssign::AExt: 2313 if (RegVT.is128BitVector()) { 2314 // Special case: passing MMX values in XMM registers. 2315 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2316 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2317 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2318 } else 2319 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2320 break; 2321 case CCValAssign::BCvt: 2322 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2323 break; 2324 case CCValAssign::Indirect: { 2325 // Store the argument. 2326 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2327 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2328 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2329 MachinePointerInfo::getFixedStack(FI), 2330 false, false, 0); 2331 Arg = SpillSlot; 2332 break; 2333 } 2334 } 2335 2336 if (VA.isRegLoc()) { 2337 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2338 if (isVarArg && IsWin64) { 2339 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2340 // shadow reg if callee is a varargs function. 2341 unsigned ShadowReg = 0; 2342 switch (VA.getLocReg()) { 2343 case X86::XMM0: ShadowReg = X86::RCX; break; 2344 case X86::XMM1: ShadowReg = X86::RDX; break; 2345 case X86::XMM2: ShadowReg = X86::R8; break; 2346 case X86::XMM3: ShadowReg = X86::R9; break; 2347 } 2348 if (ShadowReg) 2349 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2350 } 2351 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2352 assert(VA.isMemLoc()); 2353 if (StackPtr.getNode() == 0) 2354 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2355 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2356 dl, DAG, VA, Flags)); 2357 } 2358 } 2359 2360 if (!MemOpChains.empty()) 2361 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2362 &MemOpChains[0], MemOpChains.size()); 2363 2364 if (Subtarget->isPICStyleGOT()) { 2365 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2366 // GOT pointer. 2367 if (!isTailCall) { 2368 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2369 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); 2370 } else { 2371 // If we are tail calling and generating PIC/GOT style code load the 2372 // address of the callee into ECX. The value in ecx is used as target of 2373 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2374 // for tail calls on PIC/GOT architectures. Normally we would just put the 2375 // address of GOT into ebx and then call target@PLT. But for tail calls 2376 // ebx would be restored (since ebx is callee saved) before jumping to the 2377 // target@PLT. 2378 2379 // Note: The actual moving to ECX is done further down. 2380 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2381 if (G && !G->getGlobal()->hasHiddenVisibility() && 2382 !G->getGlobal()->hasProtectedVisibility()) 2383 Callee = LowerGlobalAddress(Callee, DAG); 2384 else if (isa<ExternalSymbolSDNode>(Callee)) 2385 Callee = LowerExternalSymbol(Callee, DAG); 2386 } 2387 } 2388 2389 if (Is64Bit && isVarArg && !IsWin64) { 2390 // From AMD64 ABI document: 2391 // For calls that may call functions that use varargs or stdargs 2392 // (prototype-less calls or calls to functions containing ellipsis (...) in 2393 // the declaration) %al is used as hidden argument to specify the number 2394 // of SSE registers used. The contents of %al do not need to match exactly 2395 // the number of registers, but must be an ubound on the number of SSE 2396 // registers used and is in the range 0 - 8 inclusive. 2397 2398 // Count the number of XMM registers allocated. 2399 static const uint16_t XMMArgRegs[] = { 2400 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2401 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2402 }; 2403 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2404 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2405 && "SSE registers cannot be used when SSE is disabled"); 2406 2407 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2408 DAG.getConstant(NumXMMRegs, MVT::i8))); 2409 } 2410 2411 // For tail calls lower the arguments to the 'real' stack slot. 2412 if (isTailCall) { 2413 // Force all the incoming stack arguments to be loaded from the stack 2414 // before any new outgoing arguments are stored to the stack, because the 2415 // outgoing stack slots may alias the incoming argument stack slots, and 2416 // the alias isn't otherwise explicit. This is slightly more conservative 2417 // than necessary, because it means that each store effectively depends 2418 // on every argument instead of just those arguments it would clobber. 2419 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2420 2421 SmallVector<SDValue, 8> MemOpChains2; 2422 SDValue FIN; 2423 int FI = 0; 2424 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2425 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2426 CCValAssign &VA = ArgLocs[i]; 2427 if (VA.isRegLoc()) 2428 continue; 2429 assert(VA.isMemLoc()); 2430 SDValue Arg = OutVals[i]; 2431 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2432 // Create frame index. 2433 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2434 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2435 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2436 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2437 2438 if (Flags.isByVal()) { 2439 // Copy relative to framepointer. 2440 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2441 if (StackPtr.getNode() == 0) 2442 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2443 getPointerTy()); 2444 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2445 2446 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2447 ArgChain, 2448 Flags, DAG, dl)); 2449 } else { 2450 // Store relative to framepointer. 2451 MemOpChains2.push_back( 2452 DAG.getStore(ArgChain, dl, Arg, FIN, 2453 MachinePointerInfo::getFixedStack(FI), 2454 false, false, 0)); 2455 } 2456 } 2457 } 2458 2459 if (!MemOpChains2.empty()) 2460 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2461 &MemOpChains2[0], MemOpChains2.size()); 2462 2463 // Store the return address to the appropriate stack slot. 2464 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, 2465 getPointerTy(), RegInfo->getSlotSize(), 2466 FPDiff, dl); 2467 } 2468 2469 // Build a sequence of copy-to-reg nodes chained together with token chain 2470 // and flag operands which copy the outgoing args into registers. 2471 SDValue InFlag; 2472 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2473 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2474 RegsToPass[i].second, InFlag); 2475 InFlag = Chain.getValue(1); 2476 } 2477 2478 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2479 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2480 // In the 64-bit large code model, we have to make all calls 2481 // through a register, since the call instruction's 32-bit 2482 // pc-relative offset may not be large enough to hold the whole 2483 // address. 2484 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2485 // If the callee is a GlobalAddress node (quite common, every direct call 2486 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2487 // it. 2488 2489 // We should use extra load for direct calls to dllimported functions in 2490 // non-JIT mode. 2491 const GlobalValue *GV = G->getGlobal(); 2492 if (!GV->hasDLLImportLinkage()) { 2493 unsigned char OpFlags = 0; 2494 bool ExtraLoad = false; 2495 unsigned WrapperKind = ISD::DELETED_NODE; 2496 2497 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2498 // external symbols most go through the PLT in PIC mode. If the symbol 2499 // has hidden or protected visibility, or if it is static or local, then 2500 // we don't need to use the PLT - we can directly call it. 2501 if (Subtarget->isTargetELF() && 2502 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2503 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2504 OpFlags = X86II::MO_PLT; 2505 } else if (Subtarget->isPICStyleStubAny() && 2506 (GV->isDeclaration() || GV->isWeakForLinker()) && 2507 (!Subtarget->getTargetTriple().isMacOSX() || 2508 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2509 // PC-relative references to external symbols should go through $stub, 2510 // unless we're building with the leopard linker or later, which 2511 // automatically synthesizes these stubs. 2512 OpFlags = X86II::MO_DARWIN_STUB; 2513 } else if (Subtarget->isPICStyleRIPRel() && 2514 isa<Function>(GV) && 2515 cast<Function>(GV)->getFnAttributes(). 2516 hasAttribute(Attributes::NonLazyBind)) { 2517 // If the function is marked as non-lazy, generate an indirect call 2518 // which loads from the GOT directly. This avoids runtime overhead 2519 // at the cost of eager binding (and one extra byte of encoding). 2520 OpFlags = X86II::MO_GOTPCREL; 2521 WrapperKind = X86ISD::WrapperRIP; 2522 ExtraLoad = true; 2523 } 2524 2525 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2526 G->getOffset(), OpFlags); 2527 2528 // Add a wrapper if needed. 2529 if (WrapperKind != ISD::DELETED_NODE) 2530 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2531 // Add extra indirection if needed. 2532 if (ExtraLoad) 2533 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2534 MachinePointerInfo::getGOT(), 2535 false, false, false, 0); 2536 } 2537 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2538 unsigned char OpFlags = 0; 2539 2540 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2541 // external symbols should go through the PLT. 2542 if (Subtarget->isTargetELF() && 2543 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2544 OpFlags = X86II::MO_PLT; 2545 } else if (Subtarget->isPICStyleStubAny() && 2546 (!Subtarget->getTargetTriple().isMacOSX() || 2547 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2548 // PC-relative references to external symbols should go through $stub, 2549 // unless we're building with the leopard linker or later, which 2550 // automatically synthesizes these stubs. 2551 OpFlags = X86II::MO_DARWIN_STUB; 2552 } 2553 2554 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2555 OpFlags); 2556 } 2557 2558 // Returns a chain & a flag for retval copy to use. 2559 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2560 SmallVector<SDValue, 8> Ops; 2561 2562 if (!IsSibcall && isTailCall) { 2563 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2564 DAG.getIntPtrConstant(0, true), InFlag); 2565 InFlag = Chain.getValue(1); 2566 } 2567 2568 Ops.push_back(Chain); 2569 Ops.push_back(Callee); 2570 2571 if (isTailCall) 2572 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2573 2574 // Add argument registers to the end of the list so that they are known live 2575 // into the call. 2576 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2577 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2578 RegsToPass[i].second.getValueType())); 2579 2580 // Add a register mask operand representing the call-preserved registers. 2581 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2582 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2583 assert(Mask && "Missing call preserved mask for calling convention"); 2584 Ops.push_back(DAG.getRegisterMask(Mask)); 2585 2586 if (InFlag.getNode()) 2587 Ops.push_back(InFlag); 2588 2589 if (isTailCall) { 2590 // We used to do: 2591 //// If this is the first return lowered for this function, add the regs 2592 //// to the liveout set for the function. 2593 // This isn't right, although it's probably harmless on x86; liveouts 2594 // should be computed from returns not tail calls. Consider a void 2595 // function making a tail call to a function returning int. 2596 return DAG.getNode(X86ISD::TC_RETURN, dl, 2597 NodeTys, &Ops[0], Ops.size()); 2598 } 2599 2600 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2601 InFlag = Chain.getValue(1); 2602 2603 // Create the CALLSEQ_END node. 2604 unsigned NumBytesForCalleeToPush; 2605 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2606 getTargetMachine().Options.GuaranteedTailCallOpt)) 2607 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2608 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2609 SR == StackStructReturn) 2610 // If this is a call to a struct-return function, the callee 2611 // pops the hidden struct pointer, so we have to push it back. 2612 // This is common for Darwin/X86, Linux & Mingw32 targets. 2613 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2614 NumBytesForCalleeToPush = 4; 2615 else 2616 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2617 2618 // Returns a flag for retval copy to use. 2619 if (!IsSibcall) { 2620 Chain = DAG.getCALLSEQ_END(Chain, 2621 DAG.getIntPtrConstant(NumBytes, true), 2622 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2623 true), 2624 InFlag); 2625 InFlag = Chain.getValue(1); 2626 } 2627 2628 // Handle result values, copying them out of physregs into vregs that we 2629 // return. 2630 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2631 Ins, dl, DAG, InVals); 2632} 2633 2634 2635//===----------------------------------------------------------------------===// 2636// Fast Calling Convention (tail call) implementation 2637//===----------------------------------------------------------------------===// 2638 2639// Like std call, callee cleans arguments, convention except that ECX is 2640// reserved for storing the tail called function address. Only 2 registers are 2641// free for argument passing (inreg). Tail call optimization is performed 2642// provided: 2643// * tailcallopt is enabled 2644// * caller/callee are fastcc 2645// On X86_64 architecture with GOT-style position independent code only local 2646// (within module) calls are supported at the moment. 2647// To keep the stack aligned according to platform abi the function 2648// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2649// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2650// If a tail called function callee has more arguments than the caller the 2651// caller needs to make sure that there is room to move the RETADDR to. This is 2652// achieved by reserving an area the size of the argument delta right after the 2653// original REtADDR, but before the saved framepointer or the spilled registers 2654// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2655// stack layout: 2656// arg1 2657// arg2 2658// RETADDR 2659// [ new RETADDR 2660// move area ] 2661// (possible EBP) 2662// ESI 2663// EDI 2664// local1 .. 2665 2666/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2667/// for a 16 byte align requirement. 2668unsigned 2669X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2670 SelectionDAG& DAG) const { 2671 MachineFunction &MF = DAG.getMachineFunction(); 2672 const TargetMachine &TM = MF.getTarget(); 2673 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2674 unsigned StackAlignment = TFI.getStackAlignment(); 2675 uint64_t AlignMask = StackAlignment - 1; 2676 int64_t Offset = StackSize; 2677 unsigned SlotSize = RegInfo->getSlotSize(); 2678 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2679 // Number smaller than 12 so just add the difference. 2680 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2681 } else { 2682 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2683 Offset = ((~AlignMask) & Offset) + StackAlignment + 2684 (StackAlignment-SlotSize); 2685 } 2686 return Offset; 2687} 2688 2689/// MatchingStackOffset - Return true if the given stack call argument is 2690/// already available in the same position (relatively) of the caller's 2691/// incoming argument stack. 2692static 2693bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2694 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2695 const X86InstrInfo *TII) { 2696 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2697 int FI = INT_MAX; 2698 if (Arg.getOpcode() == ISD::CopyFromReg) { 2699 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2700 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2701 return false; 2702 MachineInstr *Def = MRI->getVRegDef(VR); 2703 if (!Def) 2704 return false; 2705 if (!Flags.isByVal()) { 2706 if (!TII->isLoadFromStackSlot(Def, FI)) 2707 return false; 2708 } else { 2709 unsigned Opcode = Def->getOpcode(); 2710 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2711 Def->getOperand(1).isFI()) { 2712 FI = Def->getOperand(1).getIndex(); 2713 Bytes = Flags.getByValSize(); 2714 } else 2715 return false; 2716 } 2717 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2718 if (Flags.isByVal()) 2719 // ByVal argument is passed in as a pointer but it's now being 2720 // dereferenced. e.g. 2721 // define @foo(%struct.X* %A) { 2722 // tail call @bar(%struct.X* byval %A) 2723 // } 2724 return false; 2725 SDValue Ptr = Ld->getBasePtr(); 2726 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2727 if (!FINode) 2728 return false; 2729 FI = FINode->getIndex(); 2730 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2731 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2732 FI = FINode->getIndex(); 2733 Bytes = Flags.getByValSize(); 2734 } else 2735 return false; 2736 2737 assert(FI != INT_MAX); 2738 if (!MFI->isFixedObjectIndex(FI)) 2739 return false; 2740 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2741} 2742 2743/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2744/// for tail call optimization. Targets which want to do tail call 2745/// optimization should implement this function. 2746bool 2747X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2748 CallingConv::ID CalleeCC, 2749 bool isVarArg, 2750 bool isCalleeStructRet, 2751 bool isCallerStructRet, 2752 Type *RetTy, 2753 const SmallVectorImpl<ISD::OutputArg> &Outs, 2754 const SmallVectorImpl<SDValue> &OutVals, 2755 const SmallVectorImpl<ISD::InputArg> &Ins, 2756 SelectionDAG& DAG) const { 2757 if (!IsTailCallConvention(CalleeCC) && 2758 CalleeCC != CallingConv::C) 2759 return false; 2760 2761 // If -tailcallopt is specified, make fastcc functions tail-callable. 2762 const MachineFunction &MF = DAG.getMachineFunction(); 2763 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2764 2765 // If the function return type is x86_fp80 and the callee return type is not, 2766 // then the FP_EXTEND of the call result is not a nop. It's not safe to 2767 // perform a tailcall optimization here. 2768 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) 2769 return false; 2770 2771 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2772 bool CCMatch = CallerCC == CalleeCC; 2773 2774 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2775 if (IsTailCallConvention(CalleeCC) && CCMatch) 2776 return true; 2777 return false; 2778 } 2779 2780 // Look for obvious safe cases to perform tail call optimization that do not 2781 // require ABI changes. This is what gcc calls sibcall. 2782 2783 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2784 // emit a special epilogue. 2785 if (RegInfo->needsStackRealignment(MF)) 2786 return false; 2787 2788 // Also avoid sibcall optimization if either caller or callee uses struct 2789 // return semantics. 2790 if (isCalleeStructRet || isCallerStructRet) 2791 return false; 2792 2793 // An stdcall caller is expected to clean up its arguments; the callee 2794 // isn't going to do that. 2795 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2796 return false; 2797 2798 // Do not sibcall optimize vararg calls unless all arguments are passed via 2799 // registers. 2800 if (isVarArg && !Outs.empty()) { 2801 2802 // Optimizing for varargs on Win64 is unlikely to be safe without 2803 // additional testing. 2804 if (Subtarget->isTargetWin64()) 2805 return false; 2806 2807 SmallVector<CCValAssign, 16> ArgLocs; 2808 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2809 getTargetMachine(), ArgLocs, *DAG.getContext()); 2810 2811 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2812 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2813 if (!ArgLocs[i].isRegLoc()) 2814 return false; 2815 } 2816 2817 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2818 // stack. Therefore, if it's not used by the call it is not safe to optimize 2819 // this into a sibcall. 2820 bool Unused = false; 2821 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2822 if (!Ins[i].Used) { 2823 Unused = true; 2824 break; 2825 } 2826 } 2827 if (Unused) { 2828 SmallVector<CCValAssign, 16> RVLocs; 2829 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2830 getTargetMachine(), RVLocs, *DAG.getContext()); 2831 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2832 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2833 CCValAssign &VA = RVLocs[i]; 2834 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2835 return false; 2836 } 2837 } 2838 2839 // If the calling conventions do not match, then we'd better make sure the 2840 // results are returned in the same way as what the caller expects. 2841 if (!CCMatch) { 2842 SmallVector<CCValAssign, 16> RVLocs1; 2843 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2844 getTargetMachine(), RVLocs1, *DAG.getContext()); 2845 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2846 2847 SmallVector<CCValAssign, 16> RVLocs2; 2848 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2849 getTargetMachine(), RVLocs2, *DAG.getContext()); 2850 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2851 2852 if (RVLocs1.size() != RVLocs2.size()) 2853 return false; 2854 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2855 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2856 return false; 2857 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2858 return false; 2859 if (RVLocs1[i].isRegLoc()) { 2860 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2861 return false; 2862 } else { 2863 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2864 return false; 2865 } 2866 } 2867 } 2868 2869 // If the callee takes no arguments then go on to check the results of the 2870 // call. 2871 if (!Outs.empty()) { 2872 // Check if stack adjustment is needed. For now, do not do this if any 2873 // argument is passed on the stack. 2874 SmallVector<CCValAssign, 16> ArgLocs; 2875 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2876 getTargetMachine(), ArgLocs, *DAG.getContext()); 2877 2878 // Allocate shadow area for Win64 2879 if (Subtarget->isTargetWin64()) { 2880 CCInfo.AllocateStack(32, 8); 2881 } 2882 2883 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2884 if (CCInfo.getNextStackOffset()) { 2885 MachineFunction &MF = DAG.getMachineFunction(); 2886 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2887 return false; 2888 2889 // Check if the arguments are already laid out in the right way as 2890 // the caller's fixed stack objects. 2891 MachineFrameInfo *MFI = MF.getFrameInfo(); 2892 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2893 const X86InstrInfo *TII = 2894 ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2895 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2896 CCValAssign &VA = ArgLocs[i]; 2897 SDValue Arg = OutVals[i]; 2898 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2899 if (VA.getLocInfo() == CCValAssign::Indirect) 2900 return false; 2901 if (!VA.isRegLoc()) { 2902 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2903 MFI, MRI, TII)) 2904 return false; 2905 } 2906 } 2907 } 2908 2909 // If the tailcall address may be in a register, then make sure it's 2910 // possible to register allocate for it. In 32-bit, the call address can 2911 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2912 // callee-saved registers are restored. These happen to be the same 2913 // registers used to pass 'inreg' arguments so watch out for those. 2914 if (!Subtarget->is64Bit() && 2915 !isa<GlobalAddressSDNode>(Callee) && 2916 !isa<ExternalSymbolSDNode>(Callee)) { 2917 unsigned NumInRegs = 0; 2918 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2919 CCValAssign &VA = ArgLocs[i]; 2920 if (!VA.isRegLoc()) 2921 continue; 2922 unsigned Reg = VA.getLocReg(); 2923 switch (Reg) { 2924 default: break; 2925 case X86::EAX: case X86::EDX: case X86::ECX: 2926 if (++NumInRegs == 3) 2927 return false; 2928 break; 2929 } 2930 } 2931 } 2932 } 2933 2934 return true; 2935} 2936 2937FastISel * 2938X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 2939 const TargetLibraryInfo *libInfo) const { 2940 return X86::createFastISel(funcInfo, libInfo); 2941} 2942 2943 2944//===----------------------------------------------------------------------===// 2945// Other Lowering Hooks 2946//===----------------------------------------------------------------------===// 2947 2948static bool MayFoldLoad(SDValue Op) { 2949 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2950} 2951 2952static bool MayFoldIntoStore(SDValue Op) { 2953 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2954} 2955 2956static bool isTargetShuffle(unsigned Opcode) { 2957 switch(Opcode) { 2958 default: return false; 2959 case X86ISD::PSHUFD: 2960 case X86ISD::PSHUFHW: 2961 case X86ISD::PSHUFLW: 2962 case X86ISD::SHUFP: 2963 case X86ISD::PALIGN: 2964 case X86ISD::MOVLHPS: 2965 case X86ISD::MOVLHPD: 2966 case X86ISD::MOVHLPS: 2967 case X86ISD::MOVLPS: 2968 case X86ISD::MOVLPD: 2969 case X86ISD::MOVSHDUP: 2970 case X86ISD::MOVSLDUP: 2971 case X86ISD::MOVDDUP: 2972 case X86ISD::MOVSS: 2973 case X86ISD::MOVSD: 2974 case X86ISD::UNPCKL: 2975 case X86ISD::UNPCKH: 2976 case X86ISD::VPERMILP: 2977 case X86ISD::VPERM2X128: 2978 case X86ISD::VPERMI: 2979 return true; 2980 } 2981} 2982 2983static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2984 SDValue V1, SelectionDAG &DAG) { 2985 switch(Opc) { 2986 default: llvm_unreachable("Unknown x86 shuffle node"); 2987 case X86ISD::MOVSHDUP: 2988 case X86ISD::MOVSLDUP: 2989 case X86ISD::MOVDDUP: 2990 return DAG.getNode(Opc, dl, VT, V1); 2991 } 2992} 2993 2994static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2995 SDValue V1, unsigned TargetMask, 2996 SelectionDAG &DAG) { 2997 switch(Opc) { 2998 default: llvm_unreachable("Unknown x86 shuffle node"); 2999 case X86ISD::PSHUFD: 3000 case X86ISD::PSHUFHW: 3001 case X86ISD::PSHUFLW: 3002 case X86ISD::VPERMILP: 3003 case X86ISD::VPERMI: 3004 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 3005 } 3006} 3007 3008static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3009 SDValue V1, SDValue V2, unsigned TargetMask, 3010 SelectionDAG &DAG) { 3011 switch(Opc) { 3012 default: llvm_unreachable("Unknown x86 shuffle node"); 3013 case X86ISD::PALIGN: 3014 case X86ISD::SHUFP: 3015 case X86ISD::VPERM2X128: 3016 return DAG.getNode(Opc, dl, VT, V1, V2, 3017 DAG.getConstant(TargetMask, MVT::i8)); 3018 } 3019} 3020 3021static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 3022 SDValue V1, SDValue V2, SelectionDAG &DAG) { 3023 switch(Opc) { 3024 default: llvm_unreachable("Unknown x86 shuffle node"); 3025 case X86ISD::MOVLHPS: 3026 case X86ISD::MOVLHPD: 3027 case X86ISD::MOVHLPS: 3028 case X86ISD::MOVLPS: 3029 case X86ISD::MOVLPD: 3030 case X86ISD::MOVSS: 3031 case X86ISD::MOVSD: 3032 case X86ISD::UNPCKL: 3033 case X86ISD::UNPCKH: 3034 return DAG.getNode(Opc, dl, VT, V1, V2); 3035 } 3036} 3037 3038SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 3039 MachineFunction &MF = DAG.getMachineFunction(); 3040 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 3041 int ReturnAddrIndex = FuncInfo->getRAIndex(); 3042 3043 if (ReturnAddrIndex == 0) { 3044 // Set up a frame object for the return address. 3045 unsigned SlotSize = RegInfo->getSlotSize(); 3046 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 3047 false); 3048 FuncInfo->setRAIndex(ReturnAddrIndex); 3049 } 3050 3051 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3052} 3053 3054 3055bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3056 bool hasSymbolicDisplacement) { 3057 // Offset should fit into 32 bit immediate field. 3058 if (!isInt<32>(Offset)) 3059 return false; 3060 3061 // If we don't have a symbolic displacement - we don't have any extra 3062 // restrictions. 3063 if (!hasSymbolicDisplacement) 3064 return true; 3065 3066 // FIXME: Some tweaks might be needed for medium code model. 3067 if (M != CodeModel::Small && M != CodeModel::Kernel) 3068 return false; 3069 3070 // For small code model we assume that latest object is 16MB before end of 31 3071 // bits boundary. We may also accept pretty large negative constants knowing 3072 // that all objects are in the positive half of address space. 3073 if (M == CodeModel::Small && Offset < 16*1024*1024) 3074 return true; 3075 3076 // For kernel code model we know that all object resist in the negative half 3077 // of 32bits address space. We may not accept negative offsets, since they may 3078 // be just off and we may accept pretty large positive ones. 3079 if (M == CodeModel::Kernel && Offset > 0) 3080 return true; 3081 3082 return false; 3083} 3084 3085/// isCalleePop - Determines whether the callee is required to pop its 3086/// own arguments. Callee pop is necessary to support tail calls. 3087bool X86::isCalleePop(CallingConv::ID CallingConv, 3088 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3089 if (IsVarArg) 3090 return false; 3091 3092 switch (CallingConv) { 3093 default: 3094 return false; 3095 case CallingConv::X86_StdCall: 3096 return !is64Bit; 3097 case CallingConv::X86_FastCall: 3098 return !is64Bit; 3099 case CallingConv::X86_ThisCall: 3100 return !is64Bit; 3101 case CallingConv::Fast: 3102 return TailCallOpt; 3103 case CallingConv::GHC: 3104 return TailCallOpt; 3105 } 3106} 3107 3108/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3109/// specific condition code, returning the condition code and the LHS/RHS of the 3110/// comparison to make. 3111static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3112 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3113 if (!isFP) { 3114 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3115 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3116 // X > -1 -> X == 0, jump !sign. 3117 RHS = DAG.getConstant(0, RHS.getValueType()); 3118 return X86::COND_NS; 3119 } 3120 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3121 // X < 0 -> X == 0, jump on sign. 3122 return X86::COND_S; 3123 } 3124 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3125 // X < 1 -> X <= 0 3126 RHS = DAG.getConstant(0, RHS.getValueType()); 3127 return X86::COND_LE; 3128 } 3129 } 3130 3131 switch (SetCCOpcode) { 3132 default: llvm_unreachable("Invalid integer condition!"); 3133 case ISD::SETEQ: return X86::COND_E; 3134 case ISD::SETGT: return X86::COND_G; 3135 case ISD::SETGE: return X86::COND_GE; 3136 case ISD::SETLT: return X86::COND_L; 3137 case ISD::SETLE: return X86::COND_LE; 3138 case ISD::SETNE: return X86::COND_NE; 3139 case ISD::SETULT: return X86::COND_B; 3140 case ISD::SETUGT: return X86::COND_A; 3141 case ISD::SETULE: return X86::COND_BE; 3142 case ISD::SETUGE: return X86::COND_AE; 3143 } 3144 } 3145 3146 // First determine if it is required or is profitable to flip the operands. 3147 3148 // If LHS is a foldable load, but RHS is not, flip the condition. 3149 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3150 !ISD::isNON_EXTLoad(RHS.getNode())) { 3151 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3152 std::swap(LHS, RHS); 3153 } 3154 3155 switch (SetCCOpcode) { 3156 default: break; 3157 case ISD::SETOLT: 3158 case ISD::SETOLE: 3159 case ISD::SETUGT: 3160 case ISD::SETUGE: 3161 std::swap(LHS, RHS); 3162 break; 3163 } 3164 3165 // On a floating point condition, the flags are set as follows: 3166 // ZF PF CF op 3167 // 0 | 0 | 0 | X > Y 3168 // 0 | 0 | 1 | X < Y 3169 // 1 | 0 | 0 | X == Y 3170 // 1 | 1 | 1 | unordered 3171 switch (SetCCOpcode) { 3172 default: llvm_unreachable("Condcode should be pre-legalized away"); 3173 case ISD::SETUEQ: 3174 case ISD::SETEQ: return X86::COND_E; 3175 case ISD::SETOLT: // flipped 3176 case ISD::SETOGT: 3177 case ISD::SETGT: return X86::COND_A; 3178 case ISD::SETOLE: // flipped 3179 case ISD::SETOGE: 3180 case ISD::SETGE: return X86::COND_AE; 3181 case ISD::SETUGT: // flipped 3182 case ISD::SETULT: 3183 case ISD::SETLT: return X86::COND_B; 3184 case ISD::SETUGE: // flipped 3185 case ISD::SETULE: 3186 case ISD::SETLE: return X86::COND_BE; 3187 case ISD::SETONE: 3188 case ISD::SETNE: return X86::COND_NE; 3189 case ISD::SETUO: return X86::COND_P; 3190 case ISD::SETO: return X86::COND_NP; 3191 case ISD::SETOEQ: 3192 case ISD::SETUNE: return X86::COND_INVALID; 3193 } 3194} 3195 3196/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3197/// code. Current x86 isa includes the following FP cmov instructions: 3198/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3199static bool hasFPCMov(unsigned X86CC) { 3200 switch (X86CC) { 3201 default: 3202 return false; 3203 case X86::COND_B: 3204 case X86::COND_BE: 3205 case X86::COND_E: 3206 case X86::COND_P: 3207 case X86::COND_A: 3208 case X86::COND_AE: 3209 case X86::COND_NE: 3210 case X86::COND_NP: 3211 return true; 3212 } 3213} 3214 3215/// isFPImmLegal - Returns true if the target can instruction select the 3216/// specified FP immediate natively. If false, the legalizer will 3217/// materialize the FP immediate as a load from a constant pool. 3218bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3219 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3220 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3221 return true; 3222 } 3223 return false; 3224} 3225 3226/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3227/// the specified range (L, H]. 3228static bool isUndefOrInRange(int Val, int Low, int Hi) { 3229 return (Val < 0) || (Val >= Low && Val < Hi); 3230} 3231 3232/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3233/// specified value. 3234static bool isUndefOrEqual(int Val, int CmpVal) { 3235 if (Val < 0 || Val == CmpVal) 3236 return true; 3237 return false; 3238} 3239 3240/// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3241/// from position Pos and ending in Pos+Size, falls within the specified 3242/// sequential range (L, L+Pos]. or is undef. 3243static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3244 unsigned Pos, unsigned Size, int Low) { 3245 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3246 if (!isUndefOrEqual(Mask[i], Low)) 3247 return false; 3248 return true; 3249} 3250 3251/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3252/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3253/// the second operand. 3254static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3255 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3256 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3257 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3258 return (Mask[0] < 2 && Mask[1] < 2); 3259 return false; 3260} 3261 3262/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3263/// is suitable for input to PSHUFHW. 3264static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3265 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3266 return false; 3267 3268 // Lower quadword copied in order or undef. 3269 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3270 return false; 3271 3272 // Upper quadword shuffled. 3273 for (unsigned i = 4; i != 8; ++i) 3274 if (!isUndefOrInRange(Mask[i], 4, 8)) 3275 return false; 3276 3277 if (VT == MVT::v16i16) { 3278 // Lower quadword copied in order or undef. 3279 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3280 return false; 3281 3282 // Upper quadword shuffled. 3283 for (unsigned i = 12; i != 16; ++i) 3284 if (!isUndefOrInRange(Mask[i], 12, 16)) 3285 return false; 3286 } 3287 3288 return true; 3289} 3290 3291/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3292/// is suitable for input to PSHUFLW. 3293static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3294 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3295 return false; 3296 3297 // Upper quadword copied in order. 3298 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3299 return false; 3300 3301 // Lower quadword shuffled. 3302 for (unsigned i = 0; i != 4; ++i) 3303 if (!isUndefOrInRange(Mask[i], 0, 4)) 3304 return false; 3305 3306 if (VT == MVT::v16i16) { 3307 // Upper quadword copied in order. 3308 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3309 return false; 3310 3311 // Lower quadword shuffled. 3312 for (unsigned i = 8; i != 12; ++i) 3313 if (!isUndefOrInRange(Mask[i], 8, 12)) 3314 return false; 3315 } 3316 3317 return true; 3318} 3319 3320/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3321/// is suitable for input to PALIGNR. 3322static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3323 const X86Subtarget *Subtarget) { 3324 if ((VT.getSizeInBits() == 128 && !Subtarget->hasSSSE3()) || 3325 (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())) 3326 return false; 3327 3328 unsigned NumElts = VT.getVectorNumElements(); 3329 unsigned NumLanes = VT.getSizeInBits()/128; 3330 unsigned NumLaneElts = NumElts/NumLanes; 3331 3332 // Do not handle 64-bit element shuffles with palignr. 3333 if (NumLaneElts == 2) 3334 return false; 3335 3336 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3337 unsigned i; 3338 for (i = 0; i != NumLaneElts; ++i) { 3339 if (Mask[i+l] >= 0) 3340 break; 3341 } 3342 3343 // Lane is all undef, go to next lane 3344 if (i == NumLaneElts) 3345 continue; 3346 3347 int Start = Mask[i+l]; 3348 3349 // Make sure its in this lane in one of the sources 3350 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3351 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3352 return false; 3353 3354 // If not lane 0, then we must match lane 0 3355 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3356 return false; 3357 3358 // Correct second source to be contiguous with first source 3359 if (Start >= (int)NumElts) 3360 Start -= NumElts - NumLaneElts; 3361 3362 // Make sure we're shifting in the right direction. 3363 if (Start <= (int)(i+l)) 3364 return false; 3365 3366 Start -= i; 3367 3368 // Check the rest of the elements to see if they are consecutive. 3369 for (++i; i != NumLaneElts; ++i) { 3370 int Idx = Mask[i+l]; 3371 3372 // Make sure its in this lane 3373 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3374 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3375 return false; 3376 3377 // If not lane 0, then we must match lane 0 3378 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3379 return false; 3380 3381 if (Idx >= (int)NumElts) 3382 Idx -= NumElts - NumLaneElts; 3383 3384 if (!isUndefOrEqual(Idx, Start+i)) 3385 return false; 3386 3387 } 3388 } 3389 3390 return true; 3391} 3392 3393/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3394/// the two vector operands have swapped position. 3395static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3396 unsigned NumElems) { 3397 for (unsigned i = 0; i != NumElems; ++i) { 3398 int idx = Mask[i]; 3399 if (idx < 0) 3400 continue; 3401 else if (idx < (int)NumElems) 3402 Mask[i] = idx + NumElems; 3403 else 3404 Mask[i] = idx - NumElems; 3405 } 3406} 3407 3408/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3409/// specifies a shuffle of elements that is suitable for input to 128/256-bit 3410/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3411/// reverse of what x86 shuffles want. 3412static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX, 3413 bool Commuted = false) { 3414 if (!HasAVX && VT.getSizeInBits() == 256) 3415 return false; 3416 3417 unsigned NumElems = VT.getVectorNumElements(); 3418 unsigned NumLanes = VT.getSizeInBits()/128; 3419 unsigned NumLaneElems = NumElems/NumLanes; 3420 3421 if (NumLaneElems != 2 && NumLaneElems != 4) 3422 return false; 3423 3424 // VSHUFPSY divides the resulting vector into 4 chunks. 3425 // The sources are also splitted into 4 chunks, and each destination 3426 // chunk must come from a different source chunk. 3427 // 3428 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3429 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3430 // 3431 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3432 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3433 // 3434 // VSHUFPDY divides the resulting vector into 4 chunks. 3435 // The sources are also splitted into 4 chunks, and each destination 3436 // chunk must come from a different source chunk. 3437 // 3438 // SRC1 => X3 X2 X1 X0 3439 // SRC2 => Y3 Y2 Y1 Y0 3440 // 3441 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3442 // 3443 unsigned HalfLaneElems = NumLaneElems/2; 3444 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3445 for (unsigned i = 0; i != NumLaneElems; ++i) { 3446 int Idx = Mask[i+l]; 3447 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3448 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3449 return false; 3450 // For VSHUFPSY, the mask of the second half must be the same as the 3451 // first but with the appropriate offsets. This works in the same way as 3452 // VPERMILPS works with masks. 3453 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3454 continue; 3455 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3456 return false; 3457 } 3458 } 3459 3460 return true; 3461} 3462 3463/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3464/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3465static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) { 3466 if (!VT.is128BitVector()) 3467 return false; 3468 3469 unsigned NumElems = VT.getVectorNumElements(); 3470 3471 if (NumElems != 4) 3472 return false; 3473 3474 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3475 return isUndefOrEqual(Mask[0], 6) && 3476 isUndefOrEqual(Mask[1], 7) && 3477 isUndefOrEqual(Mask[2], 2) && 3478 isUndefOrEqual(Mask[3], 3); 3479} 3480 3481/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3482/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3483/// <2, 3, 2, 3> 3484static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) { 3485 if (!VT.is128BitVector()) 3486 return false; 3487 3488 unsigned NumElems = VT.getVectorNumElements(); 3489 3490 if (NumElems != 4) 3491 return false; 3492 3493 return isUndefOrEqual(Mask[0], 2) && 3494 isUndefOrEqual(Mask[1], 3) && 3495 isUndefOrEqual(Mask[2], 2) && 3496 isUndefOrEqual(Mask[3], 3); 3497} 3498 3499/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3500/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3501static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) { 3502 if (!VT.is128BitVector()) 3503 return false; 3504 3505 unsigned NumElems = VT.getVectorNumElements(); 3506 3507 if (NumElems != 2 && NumElems != 4) 3508 return false; 3509 3510 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3511 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3512 return false; 3513 3514 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3515 if (!isUndefOrEqual(Mask[i], i)) 3516 return false; 3517 3518 return true; 3519} 3520 3521/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3522/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3523static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) { 3524 if (!VT.is128BitVector()) 3525 return false; 3526 3527 unsigned NumElems = VT.getVectorNumElements(); 3528 3529 if (NumElems != 2 && NumElems != 4) 3530 return false; 3531 3532 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3533 if (!isUndefOrEqual(Mask[i], i)) 3534 return false; 3535 3536 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3537 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3538 return false; 3539 3540 return true; 3541} 3542 3543// 3544// Some special combinations that can be optimized. 3545// 3546static 3547SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3548 SelectionDAG &DAG) { 3549 EVT VT = SVOp->getValueType(0); 3550 DebugLoc dl = SVOp->getDebugLoc(); 3551 3552 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3553 return SDValue(); 3554 3555 ArrayRef<int> Mask = SVOp->getMask(); 3556 3557 // These are the special masks that may be optimized. 3558 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3559 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3560 bool MatchEvenMask = true; 3561 bool MatchOddMask = true; 3562 for (int i=0; i<8; ++i) { 3563 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3564 MatchEvenMask = false; 3565 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3566 MatchOddMask = false; 3567 } 3568 3569 if (!MatchEvenMask && !MatchOddMask) 3570 return SDValue(); 3571 3572 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3573 3574 SDValue Op0 = SVOp->getOperand(0); 3575 SDValue Op1 = SVOp->getOperand(1); 3576 3577 if (MatchEvenMask) { 3578 // Shift the second operand right to 32 bits. 3579 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; 3580 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); 3581 } else { 3582 // Shift the first operand left to 32 bits. 3583 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; 3584 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); 3585 } 3586 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; 3587 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); 3588} 3589 3590/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3591/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3592static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3593 bool HasAVX2, bool V2IsSplat = false) { 3594 unsigned NumElts = VT.getVectorNumElements(); 3595 3596 assert((VT.is128BitVector() || VT.is256BitVector()) && 3597 "Unsupported vector type for unpckh"); 3598 3599 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3600 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3601 return false; 3602 3603 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3604 // independently on 128-bit lanes. 3605 unsigned NumLanes = VT.getSizeInBits()/128; 3606 unsigned NumLaneElts = NumElts/NumLanes; 3607 3608 for (unsigned l = 0; l != NumLanes; ++l) { 3609 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3610 i != (l+1)*NumLaneElts; 3611 i += 2, ++j) { 3612 int BitI = Mask[i]; 3613 int BitI1 = Mask[i+1]; 3614 if (!isUndefOrEqual(BitI, j)) 3615 return false; 3616 if (V2IsSplat) { 3617 if (!isUndefOrEqual(BitI1, NumElts)) 3618 return false; 3619 } else { 3620 if (!isUndefOrEqual(BitI1, j + NumElts)) 3621 return false; 3622 } 3623 } 3624 } 3625 3626 return true; 3627} 3628 3629/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3630/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3631static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3632 bool HasAVX2, bool V2IsSplat = false) { 3633 unsigned NumElts = VT.getVectorNumElements(); 3634 3635 assert((VT.is128BitVector() || VT.is256BitVector()) && 3636 "Unsupported vector type for unpckh"); 3637 3638 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3639 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3640 return false; 3641 3642 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3643 // independently on 128-bit lanes. 3644 unsigned NumLanes = VT.getSizeInBits()/128; 3645 unsigned NumLaneElts = NumElts/NumLanes; 3646 3647 for (unsigned l = 0; l != NumLanes; ++l) { 3648 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3649 i != (l+1)*NumLaneElts; i += 2, ++j) { 3650 int BitI = Mask[i]; 3651 int BitI1 = Mask[i+1]; 3652 if (!isUndefOrEqual(BitI, j)) 3653 return false; 3654 if (V2IsSplat) { 3655 if (isUndefOrEqual(BitI1, NumElts)) 3656 return false; 3657 } else { 3658 if (!isUndefOrEqual(BitI1, j+NumElts)) 3659 return false; 3660 } 3661 } 3662 } 3663 return true; 3664} 3665 3666/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3667/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3668/// <0, 0, 1, 1> 3669static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, 3670 bool HasAVX2) { 3671 unsigned NumElts = VT.getVectorNumElements(); 3672 3673 assert((VT.is128BitVector() || VT.is256BitVector()) && 3674 "Unsupported vector type for unpckh"); 3675 3676 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3677 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3678 return false; 3679 3680 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3681 // FIXME: Need a better way to get rid of this, there's no latency difference 3682 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3683 // the former later. We should also remove the "_undef" special mask. 3684 if (NumElts == 4 && VT.getSizeInBits() == 256) 3685 return false; 3686 3687 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3688 // independently on 128-bit lanes. 3689 unsigned NumLanes = VT.getSizeInBits()/128; 3690 unsigned NumLaneElts = NumElts/NumLanes; 3691 3692 for (unsigned l = 0; l != NumLanes; ++l) { 3693 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3694 i != (l+1)*NumLaneElts; 3695 i += 2, ++j) { 3696 int BitI = Mask[i]; 3697 int BitI1 = Mask[i+1]; 3698 3699 if (!isUndefOrEqual(BitI, j)) 3700 return false; 3701 if (!isUndefOrEqual(BitI1, j)) 3702 return false; 3703 } 3704 } 3705 3706 return true; 3707} 3708 3709/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3710/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3711/// <2, 2, 3, 3> 3712static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3713 unsigned NumElts = VT.getVectorNumElements(); 3714 3715 assert((VT.is128BitVector() || VT.is256BitVector()) && 3716 "Unsupported vector type for unpckh"); 3717 3718 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3719 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3720 return false; 3721 3722 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3723 // independently on 128-bit lanes. 3724 unsigned NumLanes = VT.getSizeInBits()/128; 3725 unsigned NumLaneElts = NumElts/NumLanes; 3726 3727 for (unsigned l = 0; l != NumLanes; ++l) { 3728 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3729 i != (l+1)*NumLaneElts; i += 2, ++j) { 3730 int BitI = Mask[i]; 3731 int BitI1 = Mask[i+1]; 3732 if (!isUndefOrEqual(BitI, j)) 3733 return false; 3734 if (!isUndefOrEqual(BitI1, j)) 3735 return false; 3736 } 3737 } 3738 return true; 3739} 3740 3741/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3742/// specifies a shuffle of elements that is suitable for input to MOVSS, 3743/// MOVSD, and MOVD, i.e. setting the lowest element. 3744static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3745 if (VT.getVectorElementType().getSizeInBits() < 32) 3746 return false; 3747 if (!VT.is128BitVector()) 3748 return false; 3749 3750 unsigned NumElts = VT.getVectorNumElements(); 3751 3752 if (!isUndefOrEqual(Mask[0], NumElts)) 3753 return false; 3754 3755 for (unsigned i = 1; i != NumElts; ++i) 3756 if (!isUndefOrEqual(Mask[i], i)) 3757 return false; 3758 3759 return true; 3760} 3761 3762/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3763/// as permutations between 128-bit chunks or halves. As an example: this 3764/// shuffle bellow: 3765/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3766/// The first half comes from the second half of V1 and the second half from the 3767/// the second half of V2. 3768static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3769 if (!HasAVX || !VT.is256BitVector()) 3770 return false; 3771 3772 // The shuffle result is divided into half A and half B. In total the two 3773 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3774 // B must come from C, D, E or F. 3775 unsigned HalfSize = VT.getVectorNumElements()/2; 3776 bool MatchA = false, MatchB = false; 3777 3778 // Check if A comes from one of C, D, E, F. 3779 for (unsigned Half = 0; Half != 4; ++Half) { 3780 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3781 MatchA = true; 3782 break; 3783 } 3784 } 3785 3786 // Check if B comes from one of C, D, E, F. 3787 for (unsigned Half = 0; Half != 4; ++Half) { 3788 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3789 MatchB = true; 3790 break; 3791 } 3792 } 3793 3794 return MatchA && MatchB; 3795} 3796 3797/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3798/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3799static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3800 EVT VT = SVOp->getValueType(0); 3801 3802 unsigned HalfSize = VT.getVectorNumElements()/2; 3803 3804 unsigned FstHalf = 0, SndHalf = 0; 3805 for (unsigned i = 0; i < HalfSize; ++i) { 3806 if (SVOp->getMaskElt(i) > 0) { 3807 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3808 break; 3809 } 3810 } 3811 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3812 if (SVOp->getMaskElt(i) > 0) { 3813 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3814 break; 3815 } 3816 } 3817 3818 return (FstHalf | (SndHalf << 4)); 3819} 3820 3821/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3822/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3823/// Note that VPERMIL mask matching is different depending whether theunderlying 3824/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3825/// to the same elements of the low, but to the higher half of the source. 3826/// In VPERMILPD the two lanes could be shuffled independently of each other 3827/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 3828static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3829 if (!HasAVX) 3830 return false; 3831 3832 unsigned NumElts = VT.getVectorNumElements(); 3833 // Only match 256-bit with 32/64-bit types 3834 if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8)) 3835 return false; 3836 3837 unsigned NumLanes = VT.getSizeInBits()/128; 3838 unsigned LaneSize = NumElts/NumLanes; 3839 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3840 for (unsigned i = 0; i != LaneSize; ++i) { 3841 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3842 return false; 3843 if (NumElts != 8 || l == 0) 3844 continue; 3845 // VPERMILPS handling 3846 if (Mask[i] < 0) 3847 continue; 3848 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3849 return false; 3850 } 3851 } 3852 3853 return true; 3854} 3855 3856/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 3857/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3858/// element of vector 2 and the other elements to come from vector 1 in order. 3859static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3860 bool V2IsSplat = false, bool V2IsUndef = false) { 3861 if (!VT.is128BitVector()) 3862 return false; 3863 3864 unsigned NumOps = VT.getVectorNumElements(); 3865 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3866 return false; 3867 3868 if (!isUndefOrEqual(Mask[0], 0)) 3869 return false; 3870 3871 for (unsigned i = 1; i != NumOps; ++i) 3872 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3873 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3874 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3875 return false; 3876 3877 return true; 3878} 3879 3880/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3881/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3882/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3883static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT, 3884 const X86Subtarget *Subtarget) { 3885 if (!Subtarget->hasSSE3()) 3886 return false; 3887 3888 unsigned NumElems = VT.getVectorNumElements(); 3889 3890 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3891 (VT.getSizeInBits() == 256 && NumElems != 8)) 3892 return false; 3893 3894 // "i+1" is the value the indexed mask element must have 3895 for (unsigned i = 0; i != NumElems; i += 2) 3896 if (!isUndefOrEqual(Mask[i], i+1) || 3897 !isUndefOrEqual(Mask[i+1], i+1)) 3898 return false; 3899 3900 return true; 3901} 3902 3903/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3904/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3905/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3906static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT, 3907 const X86Subtarget *Subtarget) { 3908 if (!Subtarget->hasSSE3()) 3909 return false; 3910 3911 unsigned NumElems = VT.getVectorNumElements(); 3912 3913 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3914 (VT.getSizeInBits() == 256 && NumElems != 8)) 3915 return false; 3916 3917 // "i" is the value the indexed mask element must have 3918 for (unsigned i = 0; i != NumElems; i += 2) 3919 if (!isUndefOrEqual(Mask[i], i) || 3920 !isUndefOrEqual(Mask[i+1], i)) 3921 return false; 3922 3923 return true; 3924} 3925 3926/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 3927/// specifies a shuffle of elements that is suitable for input to 256-bit 3928/// version of MOVDDUP. 3929static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3930 if (!HasAVX || !VT.is256BitVector()) 3931 return false; 3932 3933 unsigned NumElts = VT.getVectorNumElements(); 3934 if (NumElts != 4) 3935 return false; 3936 3937 for (unsigned i = 0; i != NumElts/2; ++i) 3938 if (!isUndefOrEqual(Mask[i], 0)) 3939 return false; 3940 for (unsigned i = NumElts/2; i != NumElts; ++i) 3941 if (!isUndefOrEqual(Mask[i], NumElts/2)) 3942 return false; 3943 return true; 3944} 3945 3946/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3947/// specifies a shuffle of elements that is suitable for input to 128-bit 3948/// version of MOVDDUP. 3949static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { 3950 if (!VT.is128BitVector()) 3951 return false; 3952 3953 unsigned e = VT.getVectorNumElements() / 2; 3954 for (unsigned i = 0; i != e; ++i) 3955 if (!isUndefOrEqual(Mask[i], i)) 3956 return false; 3957 for (unsigned i = 0; i != e; ++i) 3958 if (!isUndefOrEqual(Mask[e+i], i)) 3959 return false; 3960 return true; 3961} 3962 3963/// isVEXTRACTF128Index - Return true if the specified 3964/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3965/// suitable for input to VEXTRACTF128. 3966bool X86::isVEXTRACTF128Index(SDNode *N) { 3967 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3968 return false; 3969 3970 // The index should be aligned on a 128-bit boundary. 3971 uint64_t Index = 3972 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3973 3974 unsigned VL = N->getValueType(0).getVectorNumElements(); 3975 unsigned VBits = N->getValueType(0).getSizeInBits(); 3976 unsigned ElSize = VBits / VL; 3977 bool Result = (Index * ElSize) % 128 == 0; 3978 3979 return Result; 3980} 3981 3982/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3983/// operand specifies a subvector insert that is suitable for input to 3984/// VINSERTF128. 3985bool X86::isVINSERTF128Index(SDNode *N) { 3986 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3987 return false; 3988 3989 // The index should be aligned on a 128-bit boundary. 3990 uint64_t Index = 3991 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3992 3993 unsigned VL = N->getValueType(0).getVectorNumElements(); 3994 unsigned VBits = N->getValueType(0).getSizeInBits(); 3995 unsigned ElSize = VBits / VL; 3996 bool Result = (Index * ElSize) % 128 == 0; 3997 3998 return Result; 3999} 4000 4001/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 4002/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 4003/// Handles 128-bit and 256-bit. 4004static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 4005 EVT VT = N->getValueType(0); 4006 4007 assert((VT.is128BitVector() || VT.is256BitVector()) && 4008 "Unsupported vector type for PSHUF/SHUFP"); 4009 4010 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 4011 // independently on 128-bit lanes. 4012 unsigned NumElts = VT.getVectorNumElements(); 4013 unsigned NumLanes = VT.getSizeInBits()/128; 4014 unsigned NumLaneElts = NumElts/NumLanes; 4015 4016 assert((NumLaneElts == 2 || NumLaneElts == 4) && 4017 "Only supports 2 or 4 elements per lane"); 4018 4019 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 4020 unsigned Mask = 0; 4021 for (unsigned i = 0; i != NumElts; ++i) { 4022 int Elt = N->getMaskElt(i); 4023 if (Elt < 0) continue; 4024 Elt &= NumLaneElts - 1; 4025 unsigned ShAmt = (i << Shift) % 8; 4026 Mask |= Elt << ShAmt; 4027 } 4028 4029 return Mask; 4030} 4031 4032/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 4033/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 4034static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 4035 EVT VT = N->getValueType(0); 4036 4037 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4038 "Unsupported vector type for PSHUFHW"); 4039 4040 unsigned NumElts = VT.getVectorNumElements(); 4041 4042 unsigned Mask = 0; 4043 for (unsigned l = 0; l != NumElts; l += 8) { 4044 // 8 nodes per lane, but we only care about the last 4. 4045 for (unsigned i = 0; i < 4; ++i) { 4046 int Elt = N->getMaskElt(l+i+4); 4047 if (Elt < 0) continue; 4048 Elt &= 0x3; // only 2-bits. 4049 Mask |= Elt << (i * 2); 4050 } 4051 } 4052 4053 return Mask; 4054} 4055 4056/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4057/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4058static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4059 EVT VT = N->getValueType(0); 4060 4061 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4062 "Unsupported vector type for PSHUFHW"); 4063 4064 unsigned NumElts = VT.getVectorNumElements(); 4065 4066 unsigned Mask = 0; 4067 for (unsigned l = 0; l != NumElts; l += 8) { 4068 // 8 nodes per lane, but we only care about the first 4. 4069 for (unsigned i = 0; i < 4; ++i) { 4070 int Elt = N->getMaskElt(l+i); 4071 if (Elt < 0) continue; 4072 Elt &= 0x3; // only 2-bits 4073 Mask |= Elt << (i * 2); 4074 } 4075 } 4076 4077 return Mask; 4078} 4079 4080/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4081/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4082static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4083 EVT VT = SVOp->getValueType(0); 4084 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4085 4086 unsigned NumElts = VT.getVectorNumElements(); 4087 unsigned NumLanes = VT.getSizeInBits()/128; 4088 unsigned NumLaneElts = NumElts/NumLanes; 4089 4090 int Val = 0; 4091 unsigned i; 4092 for (i = 0; i != NumElts; ++i) { 4093 Val = SVOp->getMaskElt(i); 4094 if (Val >= 0) 4095 break; 4096 } 4097 if (Val >= (int)NumElts) 4098 Val -= NumElts - NumLaneElts; 4099 4100 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4101 return (Val - i) * EltSize; 4102} 4103 4104/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4105/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4106/// instructions. 4107unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4108 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4109 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4110 4111 uint64_t Index = 4112 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4113 4114 EVT VecVT = N->getOperand(0).getValueType(); 4115 EVT ElVT = VecVT.getVectorElementType(); 4116 4117 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4118 return Index / NumElemsPerChunk; 4119} 4120 4121/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4122/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4123/// instructions. 4124unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4125 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4126 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4127 4128 uint64_t Index = 4129 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4130 4131 EVT VecVT = N->getValueType(0); 4132 EVT ElVT = VecVT.getVectorElementType(); 4133 4134 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4135 return Index / NumElemsPerChunk; 4136} 4137 4138/// getShuffleCLImmediate - Return the appropriate immediate to shuffle 4139/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. 4140/// Handles 256-bit. 4141static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { 4142 EVT VT = N->getValueType(0); 4143 4144 unsigned NumElts = VT.getVectorNumElements(); 4145 4146 assert((VT.is256BitVector() && NumElts == 4) && 4147 "Unsupported vector type for VPERMQ/VPERMPD"); 4148 4149 unsigned Mask = 0; 4150 for (unsigned i = 0; i != NumElts; ++i) { 4151 int Elt = N->getMaskElt(i); 4152 if (Elt < 0) 4153 continue; 4154 Mask |= Elt << (i*2); 4155 } 4156 4157 return Mask; 4158} 4159/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4160/// constant +0.0. 4161bool X86::isZeroNode(SDValue Elt) { 4162 return ((isa<ConstantSDNode>(Elt) && 4163 cast<ConstantSDNode>(Elt)->isNullValue()) || 4164 (isa<ConstantFPSDNode>(Elt) && 4165 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4166} 4167 4168/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4169/// their permute mask. 4170static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4171 SelectionDAG &DAG) { 4172 EVT VT = SVOp->getValueType(0); 4173 unsigned NumElems = VT.getVectorNumElements(); 4174 SmallVector<int, 8> MaskVec; 4175 4176 for (unsigned i = 0; i != NumElems; ++i) { 4177 int Idx = SVOp->getMaskElt(i); 4178 if (Idx >= 0) { 4179 if (Idx < (int)NumElems) 4180 Idx += NumElems; 4181 else 4182 Idx -= NumElems; 4183 } 4184 MaskVec.push_back(Idx); 4185 } 4186 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4187 SVOp->getOperand(0), &MaskVec[0]); 4188} 4189 4190/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4191/// match movhlps. The lower half elements should come from upper half of 4192/// V1 (and in order), and the upper half elements should come from the upper 4193/// half of V2 (and in order). 4194static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) { 4195 if (!VT.is128BitVector()) 4196 return false; 4197 if (VT.getVectorNumElements() != 4) 4198 return false; 4199 for (unsigned i = 0, e = 2; i != e; ++i) 4200 if (!isUndefOrEqual(Mask[i], i+2)) 4201 return false; 4202 for (unsigned i = 2; i != 4; ++i) 4203 if (!isUndefOrEqual(Mask[i], i+4)) 4204 return false; 4205 return true; 4206} 4207 4208/// isScalarLoadToVector - Returns true if the node is a scalar load that 4209/// is promoted to a vector. It also returns the LoadSDNode by reference if 4210/// required. 4211static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4212 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4213 return false; 4214 N = N->getOperand(0).getNode(); 4215 if (!ISD::isNON_EXTLoad(N)) 4216 return false; 4217 if (LD) 4218 *LD = cast<LoadSDNode>(N); 4219 return true; 4220} 4221 4222// Test whether the given value is a vector value which will be legalized 4223// into a load. 4224static bool WillBeConstantPoolLoad(SDNode *N) { 4225 if (N->getOpcode() != ISD::BUILD_VECTOR) 4226 return false; 4227 4228 // Check for any non-constant elements. 4229 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4230 switch (N->getOperand(i).getNode()->getOpcode()) { 4231 case ISD::UNDEF: 4232 case ISD::ConstantFP: 4233 case ISD::Constant: 4234 break; 4235 default: 4236 return false; 4237 } 4238 4239 // Vectors of all-zeros and all-ones are materialized with special 4240 // instructions rather than being loaded. 4241 return !ISD::isBuildVectorAllZeros(N) && 4242 !ISD::isBuildVectorAllOnes(N); 4243} 4244 4245/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4246/// match movlp{s|d}. The lower half elements should come from lower half of 4247/// V1 (and in order), and the upper half elements should come from the upper 4248/// half of V2 (and in order). And since V1 will become the source of the 4249/// MOVLP, it must be either a vector load or a scalar load to vector. 4250static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4251 ArrayRef<int> Mask, EVT VT) { 4252 if (!VT.is128BitVector()) 4253 return false; 4254 4255 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4256 return false; 4257 // Is V2 is a vector load, don't do this transformation. We will try to use 4258 // load folding shufps op. 4259 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4260 return false; 4261 4262 unsigned NumElems = VT.getVectorNumElements(); 4263 4264 if (NumElems != 2 && NumElems != 4) 4265 return false; 4266 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4267 if (!isUndefOrEqual(Mask[i], i)) 4268 return false; 4269 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4270 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4271 return false; 4272 return true; 4273} 4274 4275/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4276/// all the same. 4277static bool isSplatVector(SDNode *N) { 4278 if (N->getOpcode() != ISD::BUILD_VECTOR) 4279 return false; 4280 4281 SDValue SplatValue = N->getOperand(0); 4282 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4283 if (N->getOperand(i) != SplatValue) 4284 return false; 4285 return true; 4286} 4287 4288/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4289/// to an zero vector. 4290/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4291static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4292 SDValue V1 = N->getOperand(0); 4293 SDValue V2 = N->getOperand(1); 4294 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4295 for (unsigned i = 0; i != NumElems; ++i) { 4296 int Idx = N->getMaskElt(i); 4297 if (Idx >= (int)NumElems) { 4298 unsigned Opc = V2.getOpcode(); 4299 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4300 continue; 4301 if (Opc != ISD::BUILD_VECTOR || 4302 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4303 return false; 4304 } else if (Idx >= 0) { 4305 unsigned Opc = V1.getOpcode(); 4306 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4307 continue; 4308 if (Opc != ISD::BUILD_VECTOR || 4309 !X86::isZeroNode(V1.getOperand(Idx))) 4310 return false; 4311 } 4312 } 4313 return true; 4314} 4315 4316/// getZeroVector - Returns a vector of specified type with all zero elements. 4317/// 4318static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4319 SelectionDAG &DAG, DebugLoc dl) { 4320 assert(VT.isVector() && "Expected a vector type"); 4321 unsigned Size = VT.getSizeInBits(); 4322 4323 // Always build SSE zero vectors as <4 x i32> bitcasted 4324 // to their dest type. This ensures they get CSE'd. 4325 SDValue Vec; 4326 if (Size == 128) { // SSE 4327 if (Subtarget->hasSSE2()) { // SSE2 4328 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4329 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4330 } else { // SSE1 4331 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4332 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4333 } 4334 } else if (Size == 256) { // AVX 4335 if (Subtarget->hasAVX2()) { // AVX2 4336 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4337 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4338 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4339 } else { 4340 // 256-bit logic and arithmetic instructions in AVX are all 4341 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4342 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4343 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4344 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4345 } 4346 } else 4347 llvm_unreachable("Unexpected vector type"); 4348 4349 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4350} 4351 4352/// getOnesVector - Returns a vector of specified type with all bits set. 4353/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4354/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4355/// Then bitcast to their original type, ensuring they get CSE'd. 4356static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, 4357 DebugLoc dl) { 4358 assert(VT.isVector() && "Expected a vector type"); 4359 unsigned Size = VT.getSizeInBits(); 4360 4361 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4362 SDValue Vec; 4363 if (Size == 256) { 4364 if (HasAVX2) { // AVX2 4365 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4366 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4367 } else { // AVX 4368 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4369 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4370 } 4371 } else if (Size == 128) { 4372 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4373 } else 4374 llvm_unreachable("Unexpected vector type"); 4375 4376 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4377} 4378 4379/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4380/// that point to V2 points to its first element. 4381static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4382 for (unsigned i = 0; i != NumElems; ++i) { 4383 if (Mask[i] > (int)NumElems) { 4384 Mask[i] = NumElems; 4385 } 4386 } 4387} 4388 4389/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4390/// operation of specified width. 4391static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4392 SDValue V2) { 4393 unsigned NumElems = VT.getVectorNumElements(); 4394 SmallVector<int, 8> Mask; 4395 Mask.push_back(NumElems); 4396 for (unsigned i = 1; i != NumElems; ++i) 4397 Mask.push_back(i); 4398 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4399} 4400 4401/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4402static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4403 SDValue V2) { 4404 unsigned NumElems = VT.getVectorNumElements(); 4405 SmallVector<int, 8> Mask; 4406 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4407 Mask.push_back(i); 4408 Mask.push_back(i + NumElems); 4409 } 4410 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4411} 4412 4413/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4414static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4415 SDValue V2) { 4416 unsigned NumElems = VT.getVectorNumElements(); 4417 SmallVector<int, 8> Mask; 4418 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4419 Mask.push_back(i + Half); 4420 Mask.push_back(i + NumElems + Half); 4421 } 4422 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4423} 4424 4425// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4426// a generic shuffle instruction because the target has no such instructions. 4427// Generate shuffles which repeat i16 and i8 several times until they can be 4428// represented by v4f32 and then be manipulated by target suported shuffles. 4429static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4430 EVT VT = V.getValueType(); 4431 int NumElems = VT.getVectorNumElements(); 4432 DebugLoc dl = V.getDebugLoc(); 4433 4434 while (NumElems > 4) { 4435 if (EltNo < NumElems/2) { 4436 V = getUnpackl(DAG, dl, VT, V, V); 4437 } else { 4438 V = getUnpackh(DAG, dl, VT, V, V); 4439 EltNo -= NumElems/2; 4440 } 4441 NumElems >>= 1; 4442 } 4443 return V; 4444} 4445 4446/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4447static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4448 EVT VT = V.getValueType(); 4449 DebugLoc dl = V.getDebugLoc(); 4450 unsigned Size = VT.getSizeInBits(); 4451 4452 if (Size == 128) { 4453 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4454 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4455 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4456 &SplatMask[0]); 4457 } else if (Size == 256) { 4458 // To use VPERMILPS to splat scalars, the second half of indicies must 4459 // refer to the higher part, which is a duplication of the lower one, 4460 // because VPERMILPS can only handle in-lane permutations. 4461 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4462 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4463 4464 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4465 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4466 &SplatMask[0]); 4467 } else 4468 llvm_unreachable("Vector size not supported"); 4469 4470 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4471} 4472 4473/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4474static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4475 EVT SrcVT = SV->getValueType(0); 4476 SDValue V1 = SV->getOperand(0); 4477 DebugLoc dl = SV->getDebugLoc(); 4478 4479 int EltNo = SV->getSplatIndex(); 4480 int NumElems = SrcVT.getVectorNumElements(); 4481 unsigned Size = SrcVT.getSizeInBits(); 4482 4483 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4484 "Unknown how to promote splat for type"); 4485 4486 // Extract the 128-bit part containing the splat element and update 4487 // the splat element index when it refers to the higher register. 4488 if (Size == 256) { 4489 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4490 if (EltNo >= NumElems/2) 4491 EltNo -= NumElems/2; 4492 } 4493 4494 // All i16 and i8 vector types can't be used directly by a generic shuffle 4495 // instruction because the target has no such instruction. Generate shuffles 4496 // which repeat i16 and i8 several times until they fit in i32, and then can 4497 // be manipulated by target suported shuffles. 4498 EVT EltVT = SrcVT.getVectorElementType(); 4499 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4500 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4501 4502 // Recreate the 256-bit vector and place the same 128-bit vector 4503 // into the low and high part. This is necessary because we want 4504 // to use VPERM* to shuffle the vectors 4505 if (Size == 256) { 4506 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4507 } 4508 4509 return getLegalSplat(DAG, V1, EltNo); 4510} 4511 4512/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4513/// vector of zero or undef vector. This produces a shuffle where the low 4514/// element of V2 is swizzled into the zero/undef vector, landing at element 4515/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4516static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4517 bool IsZero, 4518 const X86Subtarget *Subtarget, 4519 SelectionDAG &DAG) { 4520 EVT VT = V2.getValueType(); 4521 SDValue V1 = IsZero 4522 ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4523 unsigned NumElems = VT.getVectorNumElements(); 4524 SmallVector<int, 16> MaskVec; 4525 for (unsigned i = 0; i != NumElems; ++i) 4526 // If this is the insertion idx, put the low elt of V2 here. 4527 MaskVec.push_back(i == Idx ? NumElems : i); 4528 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4529} 4530 4531/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4532/// target specific opcode. Returns true if the Mask could be calculated. 4533/// Sets IsUnary to true if only uses one source. 4534static bool getTargetShuffleMask(SDNode *N, MVT VT, 4535 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4536 unsigned NumElems = VT.getVectorNumElements(); 4537 SDValue ImmN; 4538 4539 IsUnary = false; 4540 switch(N->getOpcode()) { 4541 case X86ISD::SHUFP: 4542 ImmN = N->getOperand(N->getNumOperands()-1); 4543 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4544 break; 4545 case X86ISD::UNPCKH: 4546 DecodeUNPCKHMask(VT, Mask); 4547 break; 4548 case X86ISD::UNPCKL: 4549 DecodeUNPCKLMask(VT, Mask); 4550 break; 4551 case X86ISD::MOVHLPS: 4552 DecodeMOVHLPSMask(NumElems, Mask); 4553 break; 4554 case X86ISD::MOVLHPS: 4555 DecodeMOVLHPSMask(NumElems, Mask); 4556 break; 4557 case X86ISD::PSHUFD: 4558 case X86ISD::VPERMILP: 4559 ImmN = N->getOperand(N->getNumOperands()-1); 4560 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4561 IsUnary = true; 4562 break; 4563 case X86ISD::PSHUFHW: 4564 ImmN = N->getOperand(N->getNumOperands()-1); 4565 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4566 IsUnary = true; 4567 break; 4568 case X86ISD::PSHUFLW: 4569 ImmN = N->getOperand(N->getNumOperands()-1); 4570 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4571 IsUnary = true; 4572 break; 4573 case X86ISD::VPERMI: 4574 ImmN = N->getOperand(N->getNumOperands()-1); 4575 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4576 IsUnary = true; 4577 break; 4578 case X86ISD::MOVSS: 4579 case X86ISD::MOVSD: { 4580 // The index 0 always comes from the first element of the second source, 4581 // this is why MOVSS and MOVSD are used in the first place. The other 4582 // elements come from the other positions of the first source vector 4583 Mask.push_back(NumElems); 4584 for (unsigned i = 1; i != NumElems; ++i) { 4585 Mask.push_back(i); 4586 } 4587 break; 4588 } 4589 case X86ISD::VPERM2X128: 4590 ImmN = N->getOperand(N->getNumOperands()-1); 4591 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4592 if (Mask.empty()) return false; 4593 break; 4594 case X86ISD::MOVDDUP: 4595 case X86ISD::MOVLHPD: 4596 case X86ISD::MOVLPD: 4597 case X86ISD::MOVLPS: 4598 case X86ISD::MOVSHDUP: 4599 case X86ISD::MOVSLDUP: 4600 case X86ISD::PALIGN: 4601 // Not yet implemented 4602 return false; 4603 default: llvm_unreachable("unknown target shuffle node"); 4604 } 4605 4606 return true; 4607} 4608 4609/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4610/// element of the result of the vector shuffle. 4611static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4612 unsigned Depth) { 4613 if (Depth == 6) 4614 return SDValue(); // Limit search depth. 4615 4616 SDValue V = SDValue(N, 0); 4617 EVT VT = V.getValueType(); 4618 unsigned Opcode = V.getOpcode(); 4619 4620 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4621 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4622 int Elt = SV->getMaskElt(Index); 4623 4624 if (Elt < 0) 4625 return DAG.getUNDEF(VT.getVectorElementType()); 4626 4627 unsigned NumElems = VT.getVectorNumElements(); 4628 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4629 : SV->getOperand(1); 4630 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 4631 } 4632 4633 // Recurse into target specific vector shuffles to find scalars. 4634 if (isTargetShuffle(Opcode)) { 4635 MVT ShufVT = V.getValueType().getSimpleVT(); 4636 unsigned NumElems = ShufVT.getVectorNumElements(); 4637 SmallVector<int, 16> ShuffleMask; 4638 bool IsUnary; 4639 4640 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 4641 return SDValue(); 4642 4643 int Elt = ShuffleMask[Index]; 4644 if (Elt < 0) 4645 return DAG.getUNDEF(ShufVT.getVectorElementType()); 4646 4647 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 4648 : N->getOperand(1); 4649 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 4650 Depth+1); 4651 } 4652 4653 // Actual nodes that may contain scalar elements 4654 if (Opcode == ISD::BITCAST) { 4655 V = V.getOperand(0); 4656 EVT SrcVT = V.getValueType(); 4657 unsigned NumElems = VT.getVectorNumElements(); 4658 4659 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4660 return SDValue(); 4661 } 4662 4663 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4664 return (Index == 0) ? V.getOperand(0) 4665 : DAG.getUNDEF(VT.getVectorElementType()); 4666 4667 if (V.getOpcode() == ISD::BUILD_VECTOR) 4668 return V.getOperand(Index); 4669 4670 return SDValue(); 4671} 4672 4673/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4674/// shuffle operation which come from a consecutively from a zero. The 4675/// search can start in two different directions, from left or right. 4676static 4677unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems, 4678 bool ZerosFromLeft, SelectionDAG &DAG) { 4679 unsigned i; 4680 for (i = 0; i != NumElems; ++i) { 4681 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4682 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 4683 if (!(Elt.getNode() && 4684 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4685 break; 4686 } 4687 4688 return i; 4689} 4690 4691/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 4692/// correspond consecutively to elements from one of the vector operands, 4693/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4694static 4695bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 4696 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 4697 unsigned NumElems, unsigned &OpNum) { 4698 bool SeenV1 = false; 4699 bool SeenV2 = false; 4700 4701 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 4702 int Idx = SVOp->getMaskElt(i); 4703 // Ignore undef indicies 4704 if (Idx < 0) 4705 continue; 4706 4707 if (Idx < (int)NumElems) 4708 SeenV1 = true; 4709 else 4710 SeenV2 = true; 4711 4712 // Only accept consecutive elements from the same vector 4713 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4714 return false; 4715 } 4716 4717 OpNum = SeenV1 ? 0 : 1; 4718 return true; 4719} 4720 4721/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4722/// logical left shift of a vector. 4723static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4724 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4725 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4726 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4727 false /* check zeros from right */, DAG); 4728 unsigned OpSrc; 4729 4730 if (!NumZeros) 4731 return false; 4732 4733 // Considering the elements in the mask that are not consecutive zeros, 4734 // check if they consecutively come from only one of the source vectors. 4735 // 4736 // V1 = {X, A, B, C} 0 4737 // \ \ \ / 4738 // vector_shuffle V1, V2 <1, 2, 3, X> 4739 // 4740 if (!isShuffleMaskConsecutive(SVOp, 4741 0, // Mask Start Index 4742 NumElems-NumZeros, // Mask End Index(exclusive) 4743 NumZeros, // Where to start looking in the src vector 4744 NumElems, // Number of elements in vector 4745 OpSrc)) // Which source operand ? 4746 return false; 4747 4748 isLeft = false; 4749 ShAmt = NumZeros; 4750 ShVal = SVOp->getOperand(OpSrc); 4751 return true; 4752} 4753 4754/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4755/// logical left shift of a vector. 4756static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4757 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4758 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4759 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4760 true /* check zeros from left */, DAG); 4761 unsigned OpSrc; 4762 4763 if (!NumZeros) 4764 return false; 4765 4766 // Considering the elements in the mask that are not consecutive zeros, 4767 // check if they consecutively come from only one of the source vectors. 4768 // 4769 // 0 { A, B, X, X } = V2 4770 // / \ / / 4771 // vector_shuffle V1, V2 <X, X, 4, 5> 4772 // 4773 if (!isShuffleMaskConsecutive(SVOp, 4774 NumZeros, // Mask Start Index 4775 NumElems, // Mask End Index(exclusive) 4776 0, // Where to start looking in the src vector 4777 NumElems, // Number of elements in vector 4778 OpSrc)) // Which source operand ? 4779 return false; 4780 4781 isLeft = true; 4782 ShAmt = NumZeros; 4783 ShVal = SVOp->getOperand(OpSrc); 4784 return true; 4785} 4786 4787/// isVectorShift - Returns true if the shuffle can be implemented as a 4788/// logical left or right shift of a vector. 4789static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4790 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4791 // Although the logic below support any bitwidth size, there are no 4792 // shift instructions which handle more than 128-bit vectors. 4793 if (!SVOp->getValueType(0).is128BitVector()) 4794 return false; 4795 4796 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4797 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4798 return true; 4799 4800 return false; 4801} 4802 4803/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4804/// 4805static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4806 unsigned NumNonZero, unsigned NumZero, 4807 SelectionDAG &DAG, 4808 const X86Subtarget* Subtarget, 4809 const TargetLowering &TLI) { 4810 if (NumNonZero > 8) 4811 return SDValue(); 4812 4813 DebugLoc dl = Op.getDebugLoc(); 4814 SDValue V(0, 0); 4815 bool First = true; 4816 for (unsigned i = 0; i < 16; ++i) { 4817 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4818 if (ThisIsNonZero && First) { 4819 if (NumZero) 4820 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4821 else 4822 V = DAG.getUNDEF(MVT::v8i16); 4823 First = false; 4824 } 4825 4826 if ((i & 1) != 0) { 4827 SDValue ThisElt(0, 0), LastElt(0, 0); 4828 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4829 if (LastIsNonZero) { 4830 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4831 MVT::i16, Op.getOperand(i-1)); 4832 } 4833 if (ThisIsNonZero) { 4834 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4835 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4836 ThisElt, DAG.getConstant(8, MVT::i8)); 4837 if (LastIsNonZero) 4838 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4839 } else 4840 ThisElt = LastElt; 4841 4842 if (ThisElt.getNode()) 4843 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4844 DAG.getIntPtrConstant(i/2)); 4845 } 4846 } 4847 4848 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4849} 4850 4851/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4852/// 4853static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4854 unsigned NumNonZero, unsigned NumZero, 4855 SelectionDAG &DAG, 4856 const X86Subtarget* Subtarget, 4857 const TargetLowering &TLI) { 4858 if (NumNonZero > 4) 4859 return SDValue(); 4860 4861 DebugLoc dl = Op.getDebugLoc(); 4862 SDValue V(0, 0); 4863 bool First = true; 4864 for (unsigned i = 0; i < 8; ++i) { 4865 bool isNonZero = (NonZeros & (1 << i)) != 0; 4866 if (isNonZero) { 4867 if (First) { 4868 if (NumZero) 4869 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4870 else 4871 V = DAG.getUNDEF(MVT::v8i16); 4872 First = false; 4873 } 4874 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4875 MVT::v8i16, V, Op.getOperand(i), 4876 DAG.getIntPtrConstant(i)); 4877 } 4878 } 4879 4880 return V; 4881} 4882 4883/// getVShift - Return a vector logical shift node. 4884/// 4885static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4886 unsigned NumBits, SelectionDAG &DAG, 4887 const TargetLowering &TLI, DebugLoc dl) { 4888 assert(VT.is128BitVector() && "Unknown type for VShift"); 4889 EVT ShVT = MVT::v2i64; 4890 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4891 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4892 return DAG.getNode(ISD::BITCAST, dl, VT, 4893 DAG.getNode(Opc, dl, ShVT, SrcOp, 4894 DAG.getConstant(NumBits, 4895 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4896} 4897 4898SDValue 4899X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4900 SelectionDAG &DAG) const { 4901 4902 // Check if the scalar load can be widened into a vector load. And if 4903 // the address is "base + cst" see if the cst can be "absorbed" into 4904 // the shuffle mask. 4905 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4906 SDValue Ptr = LD->getBasePtr(); 4907 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4908 return SDValue(); 4909 EVT PVT = LD->getValueType(0); 4910 if (PVT != MVT::i32 && PVT != MVT::f32) 4911 return SDValue(); 4912 4913 int FI = -1; 4914 int64_t Offset = 0; 4915 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4916 FI = FINode->getIndex(); 4917 Offset = 0; 4918 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4919 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4920 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4921 Offset = Ptr.getConstantOperandVal(1); 4922 Ptr = Ptr.getOperand(0); 4923 } else { 4924 return SDValue(); 4925 } 4926 4927 // FIXME: 256-bit vector instructions don't require a strict alignment, 4928 // improve this code to support it better. 4929 unsigned RequiredAlign = VT.getSizeInBits()/8; 4930 SDValue Chain = LD->getChain(); 4931 // Make sure the stack object alignment is at least 16 or 32. 4932 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4933 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4934 if (MFI->isFixedObjectIndex(FI)) { 4935 // Can't change the alignment. FIXME: It's possible to compute 4936 // the exact stack offset and reference FI + adjust offset instead. 4937 // If someone *really* cares about this. That's the way to implement it. 4938 return SDValue(); 4939 } else { 4940 MFI->setObjectAlignment(FI, RequiredAlign); 4941 } 4942 } 4943 4944 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4945 // Ptr + (Offset & ~15). 4946 if (Offset < 0) 4947 return SDValue(); 4948 if ((Offset % RequiredAlign) & 3) 4949 return SDValue(); 4950 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4951 if (StartOffset) 4952 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4953 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4954 4955 int EltNo = (Offset - StartOffset) >> 2; 4956 unsigned NumElems = VT.getVectorNumElements(); 4957 4958 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4959 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4960 LD->getPointerInfo().getWithOffset(StartOffset), 4961 false, false, false, 0); 4962 4963 SmallVector<int, 8> Mask; 4964 for (unsigned i = 0; i != NumElems; ++i) 4965 Mask.push_back(EltNo); 4966 4967 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 4968 } 4969 4970 return SDValue(); 4971} 4972 4973/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4974/// vector of type 'VT', see if the elements can be replaced by a single large 4975/// load which has the same value as a build_vector whose operands are 'elts'. 4976/// 4977/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4978/// 4979/// FIXME: we'd also like to handle the case where the last elements are zero 4980/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4981/// There's even a handy isZeroNode for that purpose. 4982static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4983 DebugLoc &DL, SelectionDAG &DAG) { 4984 EVT EltVT = VT.getVectorElementType(); 4985 unsigned NumElems = Elts.size(); 4986 4987 LoadSDNode *LDBase = NULL; 4988 unsigned LastLoadedElt = -1U; 4989 4990 // For each element in the initializer, see if we've found a load or an undef. 4991 // If we don't find an initial load element, or later load elements are 4992 // non-consecutive, bail out. 4993 for (unsigned i = 0; i < NumElems; ++i) { 4994 SDValue Elt = Elts[i]; 4995 4996 if (!Elt.getNode() || 4997 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4998 return SDValue(); 4999 if (!LDBase) { 5000 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 5001 return SDValue(); 5002 LDBase = cast<LoadSDNode>(Elt.getNode()); 5003 LastLoadedElt = i; 5004 continue; 5005 } 5006 if (Elt.getOpcode() == ISD::UNDEF) 5007 continue; 5008 5009 LoadSDNode *LD = cast<LoadSDNode>(Elt); 5010 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 5011 return SDValue(); 5012 LastLoadedElt = i; 5013 } 5014 5015 // If we have found an entire vector of loads and undefs, then return a large 5016 // load of the entire vector width starting at the base pointer. If we found 5017 // consecutive loads for the low half, generate a vzext_load node. 5018 if (LastLoadedElt == NumElems - 1) { 5019 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 5020 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5021 LDBase->getPointerInfo(), 5022 LDBase->isVolatile(), LDBase->isNonTemporal(), 5023 LDBase->isInvariant(), 0); 5024 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 5025 LDBase->getPointerInfo(), 5026 LDBase->isVolatile(), LDBase->isNonTemporal(), 5027 LDBase->isInvariant(), LDBase->getAlignment()); 5028 } 5029 if (NumElems == 4 && LastLoadedElt == 1 && 5030 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 5031 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 5032 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 5033 SDValue ResNode = 5034 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, 5035 LDBase->getPointerInfo(), 5036 LDBase->getAlignment(), 5037 false/*isVolatile*/, true/*ReadMem*/, 5038 false/*WriteMem*/); 5039 5040 // Make sure the newly-created LOAD is in the same position as LDBase in 5041 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and 5042 // update uses of LDBase's output chain to use the TokenFactor. 5043 if (LDBase->hasAnyUseOfValue(1)) { 5044 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5045 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); 5046 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5047 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5048 SDValue(ResNode.getNode(), 1)); 5049 } 5050 5051 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 5052 } 5053 return SDValue(); 5054} 5055 5056/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 5057/// to generate a splat value for the following cases: 5058/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 5059/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 5060/// a scalar load, or a constant. 5061/// The VBROADCAST node is returned when a pattern is found, 5062/// or SDValue() otherwise. 5063SDValue 5064X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { 5065 if (!Subtarget->hasAVX()) 5066 return SDValue(); 5067 5068 EVT VT = Op.getValueType(); 5069 DebugLoc dl = Op.getDebugLoc(); 5070 5071 assert((VT.is128BitVector() || VT.is256BitVector()) && 5072 "Unsupported vector type for broadcast."); 5073 5074 SDValue Ld; 5075 bool ConstSplatVal; 5076 5077 switch (Op.getOpcode()) { 5078 default: 5079 // Unknown pattern found. 5080 return SDValue(); 5081 5082 case ISD::BUILD_VECTOR: { 5083 // The BUILD_VECTOR node must be a splat. 5084 if (!isSplatVector(Op.getNode())) 5085 return SDValue(); 5086 5087 Ld = Op.getOperand(0); 5088 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5089 Ld.getOpcode() == ISD::ConstantFP); 5090 5091 // The suspected load node has several users. Make sure that all 5092 // of its users are from the BUILD_VECTOR node. 5093 // Constants may have multiple users. 5094 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5095 return SDValue(); 5096 break; 5097 } 5098 5099 case ISD::VECTOR_SHUFFLE: { 5100 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5101 5102 // Shuffles must have a splat mask where the first element is 5103 // broadcasted. 5104 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5105 return SDValue(); 5106 5107 SDValue Sc = Op.getOperand(0); 5108 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5109 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5110 5111 if (!Subtarget->hasAVX2()) 5112 return SDValue(); 5113 5114 // Use the register form of the broadcast instruction available on AVX2. 5115 if (VT.is256BitVector()) 5116 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5117 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5118 } 5119 5120 Ld = Sc.getOperand(0); 5121 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5122 Ld.getOpcode() == ISD::ConstantFP); 5123 5124 // The scalar_to_vector node and the suspected 5125 // load node must have exactly one user. 5126 // Constants may have multiple users. 5127 if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) 5128 return SDValue(); 5129 break; 5130 } 5131 } 5132 5133 bool Is256 = VT.is256BitVector(); 5134 5135 // Handle the broadcasting a single constant scalar from the constant pool 5136 // into a vector. On Sandybridge it is still better to load a constant vector 5137 // from the constant pool and not to broadcast it from a scalar. 5138 if (ConstSplatVal && Subtarget->hasAVX2()) { 5139 EVT CVT = Ld.getValueType(); 5140 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5141 unsigned ScalarSize = CVT.getSizeInBits(); 5142 5143 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { 5144 const Constant *C = 0; 5145 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5146 C = CI->getConstantIntValue(); 5147 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5148 C = CF->getConstantFPValue(); 5149 5150 assert(C && "Invalid constant type"); 5151 5152 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 5153 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5154 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5155 MachinePointerInfo::getConstantPool(), 5156 false, false, false, Alignment); 5157 5158 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5159 } 5160 } 5161 5162 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5163 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5164 5165 // Handle AVX2 in-register broadcasts. 5166 if (!IsLoad && Subtarget->hasAVX2() && 5167 (ScalarSize == 32 || (Is256 && ScalarSize == 64))) 5168 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5169 5170 // The scalar source must be a normal load. 5171 if (!IsLoad) 5172 return SDValue(); 5173 5174 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) 5175 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5176 5177 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5178 // double since there is no vbroadcastsd xmm 5179 if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { 5180 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5181 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5182 } 5183 5184 // Unsupported broadcast. 5185 return SDValue(); 5186} 5187 5188SDValue 5189X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { 5190 EVT VT = Op.getValueType(); 5191 5192 // Skip if insert_vec_elt is not supported. 5193 if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) 5194 return SDValue(); 5195 5196 DebugLoc DL = Op.getDebugLoc(); 5197 unsigned NumElems = Op.getNumOperands(); 5198 5199 SDValue VecIn1; 5200 SDValue VecIn2; 5201 SmallVector<unsigned, 4> InsertIndices; 5202 SmallVector<int, 8> Mask(NumElems, -1); 5203 5204 for (unsigned i = 0; i != NumElems; ++i) { 5205 unsigned Opc = Op.getOperand(i).getOpcode(); 5206 5207 if (Opc == ISD::UNDEF) 5208 continue; 5209 5210 if (Opc != ISD::EXTRACT_VECTOR_ELT) { 5211 // Quit if more than 1 elements need inserting. 5212 if (InsertIndices.size() > 1) 5213 return SDValue(); 5214 5215 InsertIndices.push_back(i); 5216 continue; 5217 } 5218 5219 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0); 5220 SDValue ExtIdx = Op.getOperand(i).getOperand(1); 5221 5222 // Quit if extracted from vector of different type. 5223 if (ExtractedFromVec.getValueType() != VT) 5224 return SDValue(); 5225 5226 // Quit if non-constant index. 5227 if (!isa<ConstantSDNode>(ExtIdx)) 5228 return SDValue(); 5229 5230 if (VecIn1.getNode() == 0) 5231 VecIn1 = ExtractedFromVec; 5232 else if (VecIn1 != ExtractedFromVec) { 5233 if (VecIn2.getNode() == 0) 5234 VecIn2 = ExtractedFromVec; 5235 else if (VecIn2 != ExtractedFromVec) 5236 // Quit if more than 2 vectors to shuffle 5237 return SDValue(); 5238 } 5239 5240 unsigned Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue(); 5241 5242 if (ExtractedFromVec == VecIn1) 5243 Mask[i] = Idx; 5244 else if (ExtractedFromVec == VecIn2) 5245 Mask[i] = Idx + NumElems; 5246 } 5247 5248 if (VecIn1.getNode() == 0) 5249 return SDValue(); 5250 5251 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 5252 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]); 5253 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) { 5254 unsigned Idx = InsertIndices[i]; 5255 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), 5256 DAG.getIntPtrConstant(Idx)); 5257 } 5258 5259 return NV; 5260} 5261 5262SDValue 5263X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5264 DebugLoc dl = Op.getDebugLoc(); 5265 5266 EVT VT = Op.getValueType(); 5267 EVT ExtVT = VT.getVectorElementType(); 5268 unsigned NumElems = Op.getNumOperands(); 5269 5270 // Vectors containing all zeros can be matched by pxor and xorps later 5271 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5272 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5273 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5274 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5275 return Op; 5276 5277 return getZeroVector(VT, Subtarget, DAG, dl); 5278 } 5279 5280 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5281 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5282 // vpcmpeqd on 256-bit vectors. 5283 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5284 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasAVX2())) 5285 return Op; 5286 5287 return getOnesVector(VT, Subtarget->hasAVX2(), DAG, dl); 5288 } 5289 5290 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 5291 if (Broadcast.getNode()) 5292 return Broadcast; 5293 5294 unsigned EVTBits = ExtVT.getSizeInBits(); 5295 5296 unsigned NumZero = 0; 5297 unsigned NumNonZero = 0; 5298 unsigned NonZeros = 0; 5299 bool IsAllConstants = true; 5300 SmallSet<SDValue, 8> Values; 5301 for (unsigned i = 0; i < NumElems; ++i) { 5302 SDValue Elt = Op.getOperand(i); 5303 if (Elt.getOpcode() == ISD::UNDEF) 5304 continue; 5305 Values.insert(Elt); 5306 if (Elt.getOpcode() != ISD::Constant && 5307 Elt.getOpcode() != ISD::ConstantFP) 5308 IsAllConstants = false; 5309 if (X86::isZeroNode(Elt)) 5310 NumZero++; 5311 else { 5312 NonZeros |= (1 << i); 5313 NumNonZero++; 5314 } 5315 } 5316 5317 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5318 if (NumNonZero == 0) 5319 return DAG.getUNDEF(VT); 5320 5321 // Special case for single non-zero, non-undef, element. 5322 if (NumNonZero == 1) { 5323 unsigned Idx = CountTrailingZeros_32(NonZeros); 5324 SDValue Item = Op.getOperand(Idx); 5325 5326 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5327 // the value are obviously zero, truncate the value to i32 and do the 5328 // insertion that way. Only do this if the value is non-constant or if the 5329 // value is a constant being inserted into element 0. It is cheaper to do 5330 // a constant pool load than it is to do a movd + shuffle. 5331 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5332 (!IsAllConstants || Idx == 0)) { 5333 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5334 // Handle SSE only. 5335 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5336 EVT VecVT = MVT::v4i32; 5337 unsigned VecElts = 4; 5338 5339 // Truncate the value (which may itself be a constant) to i32, and 5340 // convert it to a vector with movd (S2V+shuffle to zero extend). 5341 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5342 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5343 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5344 5345 // Now we have our 32-bit value zero extended in the low element of 5346 // a vector. If Idx != 0, swizzle it into place. 5347 if (Idx != 0) { 5348 SmallVector<int, 4> Mask; 5349 Mask.push_back(Idx); 5350 for (unsigned i = 1; i != VecElts; ++i) 5351 Mask.push_back(i); 5352 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5353 &Mask[0]); 5354 } 5355 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5356 } 5357 } 5358 5359 // If we have a constant or non-constant insertion into the low element of 5360 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5361 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5362 // depending on what the source datatype is. 5363 if (Idx == 0) { 5364 if (NumZero == 0) 5365 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5366 5367 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5368 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5369 if (VT.is256BitVector()) { 5370 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5371 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5372 Item, DAG.getIntPtrConstant(0)); 5373 } 5374 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5375 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5376 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5377 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5378 } 5379 5380 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5381 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5382 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5383 if (VT.is256BitVector()) { 5384 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5385 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5386 } else { 5387 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5388 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5389 } 5390 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5391 } 5392 } 5393 5394 // Is it a vector logical left shift? 5395 if (NumElems == 2 && Idx == 1 && 5396 X86::isZeroNode(Op.getOperand(0)) && 5397 !X86::isZeroNode(Op.getOperand(1))) { 5398 unsigned NumBits = VT.getSizeInBits(); 5399 return getVShift(true, VT, 5400 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5401 VT, Op.getOperand(1)), 5402 NumBits/2, DAG, *this, dl); 5403 } 5404 5405 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5406 return SDValue(); 5407 5408 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5409 // is a non-constant being inserted into an element other than the low one, 5410 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5411 // movd/movss) to move this into the low element, then shuffle it into 5412 // place. 5413 if (EVTBits == 32) { 5414 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5415 5416 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5417 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5418 SmallVector<int, 8> MaskVec; 5419 for (unsigned i = 0; i != NumElems; ++i) 5420 MaskVec.push_back(i == Idx ? 0 : 1); 5421 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5422 } 5423 } 5424 5425 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5426 if (Values.size() == 1) { 5427 if (EVTBits == 32) { 5428 // Instead of a shuffle like this: 5429 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5430 // Check if it's possible to issue this instead. 5431 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5432 unsigned Idx = CountTrailingZeros_32(NonZeros); 5433 SDValue Item = Op.getOperand(Idx); 5434 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5435 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5436 } 5437 return SDValue(); 5438 } 5439 5440 // A vector full of immediates; various special cases are already 5441 // handled, so this is best done with a single constant-pool load. 5442 if (IsAllConstants) 5443 return SDValue(); 5444 5445 // For AVX-length vectors, build the individual 128-bit pieces and use 5446 // shuffles to put them in place. 5447 if (VT.is256BitVector()) { 5448 SmallVector<SDValue, 32> V; 5449 for (unsigned i = 0; i != NumElems; ++i) 5450 V.push_back(Op.getOperand(i)); 5451 5452 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5453 5454 // Build both the lower and upper subvector. 5455 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5456 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5457 NumElems/2); 5458 5459 // Recreate the wider vector with the lower and upper part. 5460 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5461 } 5462 5463 // Let legalizer expand 2-wide build_vectors. 5464 if (EVTBits == 64) { 5465 if (NumNonZero == 1) { 5466 // One half is zero or undef. 5467 unsigned Idx = CountTrailingZeros_32(NonZeros); 5468 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5469 Op.getOperand(Idx)); 5470 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5471 } 5472 return SDValue(); 5473 } 5474 5475 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5476 if (EVTBits == 8 && NumElems == 16) { 5477 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5478 Subtarget, *this); 5479 if (V.getNode()) return V; 5480 } 5481 5482 if (EVTBits == 16 && NumElems == 8) { 5483 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5484 Subtarget, *this); 5485 if (V.getNode()) return V; 5486 } 5487 5488 // If element VT is == 32 bits, turn it into a number of shuffles. 5489 SmallVector<SDValue, 8> V(NumElems); 5490 if (NumElems == 4 && NumZero > 0) { 5491 for (unsigned i = 0; i < 4; ++i) { 5492 bool isZero = !(NonZeros & (1 << i)); 5493 if (isZero) 5494 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 5495 else 5496 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5497 } 5498 5499 for (unsigned i = 0; i < 2; ++i) { 5500 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5501 default: break; 5502 case 0: 5503 V[i] = V[i*2]; // Must be a zero vector. 5504 break; 5505 case 1: 5506 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5507 break; 5508 case 2: 5509 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5510 break; 5511 case 3: 5512 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5513 break; 5514 } 5515 } 5516 5517 bool Reverse1 = (NonZeros & 0x3) == 2; 5518 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5519 int MaskVec[] = { 5520 Reverse1 ? 1 : 0, 5521 Reverse1 ? 0 : 1, 5522 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 5523 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 5524 }; 5525 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5526 } 5527 5528 if (Values.size() > 1 && VT.is128BitVector()) { 5529 // Check for a build vector of consecutive loads. 5530 for (unsigned i = 0; i < NumElems; ++i) 5531 V[i] = Op.getOperand(i); 5532 5533 // Check for elements which are consecutive loads. 5534 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5535 if (LD.getNode()) 5536 return LD; 5537 5538 // Check for a build vector from mostly shuffle plus few inserting. 5539 SDValue Sh = buildFromShuffleMostly(Op, DAG); 5540 if (Sh.getNode()) 5541 return Sh; 5542 5543 // For SSE 4.1, use insertps to put the high elements into the low element. 5544 if (getSubtarget()->hasSSE41()) { 5545 SDValue Result; 5546 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5547 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5548 else 5549 Result = DAG.getUNDEF(VT); 5550 5551 for (unsigned i = 1; i < NumElems; ++i) { 5552 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5553 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5554 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5555 } 5556 return Result; 5557 } 5558 5559 // Otherwise, expand into a number of unpckl*, start by extending each of 5560 // our (non-undef) elements to the full vector width with the element in the 5561 // bottom slot of the vector (which generates no code for SSE). 5562 for (unsigned i = 0; i < NumElems; ++i) { 5563 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5564 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5565 else 5566 V[i] = DAG.getUNDEF(VT); 5567 } 5568 5569 // Next, we iteratively mix elements, e.g. for v4f32: 5570 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5571 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5572 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5573 unsigned EltStride = NumElems >> 1; 5574 while (EltStride != 0) { 5575 for (unsigned i = 0; i < EltStride; ++i) { 5576 // If V[i+EltStride] is undef and this is the first round of mixing, 5577 // then it is safe to just drop this shuffle: V[i] is already in the 5578 // right place, the one element (since it's the first round) being 5579 // inserted as undef can be dropped. This isn't safe for successive 5580 // rounds because they will permute elements within both vectors. 5581 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5582 EltStride == NumElems/2) 5583 continue; 5584 5585 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5586 } 5587 EltStride >>= 1; 5588 } 5589 return V[0]; 5590 } 5591 return SDValue(); 5592} 5593 5594// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5595// to create 256-bit vectors from two other 128-bit ones. 5596static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5597 DebugLoc dl = Op.getDebugLoc(); 5598 EVT ResVT = Op.getValueType(); 5599 5600 assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); 5601 5602 SDValue V1 = Op.getOperand(0); 5603 SDValue V2 = Op.getOperand(1); 5604 unsigned NumElems = ResVT.getVectorNumElements(); 5605 5606 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 5607} 5608 5609static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5610 assert(Op.getNumOperands() == 2); 5611 5612 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5613 // from two other 128-bit ones. 5614 return LowerAVXCONCAT_VECTORS(Op, DAG); 5615} 5616 5617// Try to lower a shuffle node into a simple blend instruction. 5618static SDValue 5619LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 5620 const X86Subtarget *Subtarget, SelectionDAG &DAG) { 5621 SDValue V1 = SVOp->getOperand(0); 5622 SDValue V2 = SVOp->getOperand(1); 5623 DebugLoc dl = SVOp->getDebugLoc(); 5624 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5625 unsigned NumElems = VT.getVectorNumElements(); 5626 5627 if (!Subtarget->hasSSE41()) 5628 return SDValue(); 5629 5630 unsigned ISDNo = 0; 5631 MVT OpTy; 5632 5633 switch (VT.SimpleTy) { 5634 default: return SDValue(); 5635 case MVT::v8i16: 5636 ISDNo = X86ISD::BLENDPW; 5637 OpTy = MVT::v8i16; 5638 break; 5639 case MVT::v4i32: 5640 case MVT::v4f32: 5641 ISDNo = X86ISD::BLENDPS; 5642 OpTy = MVT::v4f32; 5643 break; 5644 case MVT::v2i64: 5645 case MVT::v2f64: 5646 ISDNo = X86ISD::BLENDPD; 5647 OpTy = MVT::v2f64; 5648 break; 5649 case MVT::v8i32: 5650 case MVT::v8f32: 5651 if (!Subtarget->hasAVX()) 5652 return SDValue(); 5653 ISDNo = X86ISD::BLENDPS; 5654 OpTy = MVT::v8f32; 5655 break; 5656 case MVT::v4i64: 5657 case MVT::v4f64: 5658 if (!Subtarget->hasAVX()) 5659 return SDValue(); 5660 ISDNo = X86ISD::BLENDPD; 5661 OpTy = MVT::v4f64; 5662 break; 5663 } 5664 assert(ISDNo && "Invalid Op Number"); 5665 5666 unsigned MaskVals = 0; 5667 5668 for (unsigned i = 0; i != NumElems; ++i) { 5669 int EltIdx = SVOp->getMaskElt(i); 5670 if (EltIdx == (int)i || EltIdx < 0) 5671 MaskVals |= (1<<i); 5672 else if (EltIdx == (int)(i + NumElems)) 5673 continue; // Bit is set to zero; 5674 else 5675 return SDValue(); 5676 } 5677 5678 V1 = DAG.getNode(ISD::BITCAST, dl, OpTy, V1); 5679 V2 = DAG.getNode(ISD::BITCAST, dl, OpTy, V2); 5680 SDValue Ret = DAG.getNode(ISDNo, dl, OpTy, V1, V2, 5681 DAG.getConstant(MaskVals, MVT::i32)); 5682 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 5683} 5684 5685// v8i16 shuffles - Prefer shuffles in the following order: 5686// 1. [all] pshuflw, pshufhw, optional move 5687// 2. [ssse3] 1 x pshufb 5688// 3. [ssse3] 2 x pshufb + 1 x por 5689// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5690static SDValue 5691LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, 5692 SelectionDAG &DAG) { 5693 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5694 SDValue V1 = SVOp->getOperand(0); 5695 SDValue V2 = SVOp->getOperand(1); 5696 DebugLoc dl = SVOp->getDebugLoc(); 5697 SmallVector<int, 8> MaskVals; 5698 5699 // Determine if more than 1 of the words in each of the low and high quadwords 5700 // of the result come from the same quadword of one of the two inputs. Undef 5701 // mask values count as coming from any quadword, for better codegen. 5702 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5703 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5704 std::bitset<4> InputQuads; 5705 for (unsigned i = 0; i < 8; ++i) { 5706 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5707 int EltIdx = SVOp->getMaskElt(i); 5708 MaskVals.push_back(EltIdx); 5709 if (EltIdx < 0) { 5710 ++Quad[0]; 5711 ++Quad[1]; 5712 ++Quad[2]; 5713 ++Quad[3]; 5714 continue; 5715 } 5716 ++Quad[EltIdx / 4]; 5717 InputQuads.set(EltIdx / 4); 5718 } 5719 5720 int BestLoQuad = -1; 5721 unsigned MaxQuad = 1; 5722 for (unsigned i = 0; i < 4; ++i) { 5723 if (LoQuad[i] > MaxQuad) { 5724 BestLoQuad = i; 5725 MaxQuad = LoQuad[i]; 5726 } 5727 } 5728 5729 int BestHiQuad = -1; 5730 MaxQuad = 1; 5731 for (unsigned i = 0; i < 4; ++i) { 5732 if (HiQuad[i] > MaxQuad) { 5733 BestHiQuad = i; 5734 MaxQuad = HiQuad[i]; 5735 } 5736 } 5737 5738 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5739 // of the two input vectors, shuffle them into one input vector so only a 5740 // single pshufb instruction is necessary. If There are more than 2 input 5741 // quads, disable the next transformation since it does not help SSSE3. 5742 bool V1Used = InputQuads[0] || InputQuads[1]; 5743 bool V2Used = InputQuads[2] || InputQuads[3]; 5744 if (Subtarget->hasSSSE3()) { 5745 if (InputQuads.count() == 2 && V1Used && V2Used) { 5746 BestLoQuad = InputQuads[0] ? 0 : 1; 5747 BestHiQuad = InputQuads[2] ? 2 : 3; 5748 } 5749 if (InputQuads.count() > 2) { 5750 BestLoQuad = -1; 5751 BestHiQuad = -1; 5752 } 5753 } 5754 5755 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5756 // the shuffle mask. If a quad is scored as -1, that means that it contains 5757 // words from all 4 input quadwords. 5758 SDValue NewV; 5759 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5760 int MaskV[] = { 5761 BestLoQuad < 0 ? 0 : BestLoQuad, 5762 BestHiQuad < 0 ? 1 : BestHiQuad 5763 }; 5764 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5765 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5766 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5767 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5768 5769 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5770 // source words for the shuffle, to aid later transformations. 5771 bool AllWordsInNewV = true; 5772 bool InOrder[2] = { true, true }; 5773 for (unsigned i = 0; i != 8; ++i) { 5774 int idx = MaskVals[i]; 5775 if (idx != (int)i) 5776 InOrder[i/4] = false; 5777 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5778 continue; 5779 AllWordsInNewV = false; 5780 break; 5781 } 5782 5783 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5784 if (AllWordsInNewV) { 5785 for (int i = 0; i != 8; ++i) { 5786 int idx = MaskVals[i]; 5787 if (idx < 0) 5788 continue; 5789 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5790 if ((idx != i) && idx < 4) 5791 pshufhw = false; 5792 if ((idx != i) && idx > 3) 5793 pshuflw = false; 5794 } 5795 V1 = NewV; 5796 V2Used = false; 5797 BestLoQuad = 0; 5798 BestHiQuad = 1; 5799 } 5800 5801 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5802 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5803 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5804 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5805 unsigned TargetMask = 0; 5806 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5807 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5808 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5809 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 5810 getShufflePSHUFLWImmediate(SVOp); 5811 V1 = NewV.getOperand(0); 5812 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5813 } 5814 } 5815 5816 // If we have SSSE3, and all words of the result are from 1 input vector, 5817 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5818 // is present, fall back to case 4. 5819 if (Subtarget->hasSSSE3()) { 5820 SmallVector<SDValue,16> pshufbMask; 5821 5822 // If we have elements from both input vectors, set the high bit of the 5823 // shuffle mask element to zero out elements that come from V2 in the V1 5824 // mask, and elements that come from V1 in the V2 mask, so that the two 5825 // results can be OR'd together. 5826 bool TwoInputs = V1Used && V2Used; 5827 for (unsigned i = 0; i != 8; ++i) { 5828 int EltIdx = MaskVals[i] * 2; 5829 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 5830 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 5831 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5832 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5833 } 5834 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5835 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5836 DAG.getNode(ISD::BUILD_VECTOR, dl, 5837 MVT::v16i8, &pshufbMask[0], 16)); 5838 if (!TwoInputs) 5839 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5840 5841 // Calculate the shuffle mask for the second input, shuffle it, and 5842 // OR it with the first shuffled input. 5843 pshufbMask.clear(); 5844 for (unsigned i = 0; i != 8; ++i) { 5845 int EltIdx = MaskVals[i] * 2; 5846 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5847 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15; 5848 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5849 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5850 } 5851 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5852 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5853 DAG.getNode(ISD::BUILD_VECTOR, dl, 5854 MVT::v16i8, &pshufbMask[0], 16)); 5855 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5856 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5857 } 5858 5859 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5860 // and update MaskVals with new element order. 5861 std::bitset<8> InOrder; 5862 if (BestLoQuad >= 0) { 5863 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 }; 5864 for (int i = 0; i != 4; ++i) { 5865 int idx = MaskVals[i]; 5866 if (idx < 0) { 5867 InOrder.set(i); 5868 } else if ((idx / 4) == BestLoQuad) { 5869 MaskV[i] = idx & 3; 5870 InOrder.set(i); 5871 } 5872 } 5873 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5874 &MaskV[0]); 5875 5876 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5877 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5878 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5879 NewV.getOperand(0), 5880 getShufflePSHUFLWImmediate(SVOp), DAG); 5881 } 5882 } 5883 5884 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5885 // and update MaskVals with the new element order. 5886 if (BestHiQuad >= 0) { 5887 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 }; 5888 for (unsigned i = 4; i != 8; ++i) { 5889 int idx = MaskVals[i]; 5890 if (idx < 0) { 5891 InOrder.set(i); 5892 } else if ((idx / 4) == BestHiQuad) { 5893 MaskV[i] = (idx & 3) + 4; 5894 InOrder.set(i); 5895 } 5896 } 5897 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5898 &MaskV[0]); 5899 5900 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) { 5901 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5902 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5903 NewV.getOperand(0), 5904 getShufflePSHUFHWImmediate(SVOp), DAG); 5905 } 5906 } 5907 5908 // In case BestHi & BestLo were both -1, which means each quadword has a word 5909 // from each of the four input quadwords, calculate the InOrder bitvector now 5910 // before falling through to the insert/extract cleanup. 5911 if (BestLoQuad == -1 && BestHiQuad == -1) { 5912 NewV = V1; 5913 for (int i = 0; i != 8; ++i) 5914 if (MaskVals[i] < 0 || MaskVals[i] == i) 5915 InOrder.set(i); 5916 } 5917 5918 // The other elements are put in the right place using pextrw and pinsrw. 5919 for (unsigned i = 0; i != 8; ++i) { 5920 if (InOrder[i]) 5921 continue; 5922 int EltIdx = MaskVals[i]; 5923 if (EltIdx < 0) 5924 continue; 5925 SDValue ExtOp = (EltIdx < 8) ? 5926 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5927 DAG.getIntPtrConstant(EltIdx)) : 5928 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5929 DAG.getIntPtrConstant(EltIdx - 8)); 5930 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5931 DAG.getIntPtrConstant(i)); 5932 } 5933 return NewV; 5934} 5935 5936// v16i8 shuffles - Prefer shuffles in the following order: 5937// 1. [ssse3] 1 x pshufb 5938// 2. [ssse3] 2 x pshufb + 1 x por 5939// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5940static 5941SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5942 SelectionDAG &DAG, 5943 const X86TargetLowering &TLI) { 5944 SDValue V1 = SVOp->getOperand(0); 5945 SDValue V2 = SVOp->getOperand(1); 5946 DebugLoc dl = SVOp->getDebugLoc(); 5947 ArrayRef<int> MaskVals = SVOp->getMask(); 5948 5949 // If we have SSSE3, case 1 is generated when all result bytes come from 5950 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5951 // present, fall back to case 3. 5952 5953 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5954 if (TLI.getSubtarget()->hasSSSE3()) { 5955 SmallVector<SDValue,16> pshufbMask; 5956 5957 // If all result elements are from one input vector, then only translate 5958 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5959 // 5960 // Otherwise, we have elements from both input vectors, and must zero out 5961 // elements that come from V2 in the first mask, and V1 in the second mask 5962 // so that we can OR them together. 5963 for (unsigned i = 0; i != 16; ++i) { 5964 int EltIdx = MaskVals[i]; 5965 if (EltIdx < 0 || EltIdx >= 16) 5966 EltIdx = 0x80; 5967 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5968 } 5969 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5970 DAG.getNode(ISD::BUILD_VECTOR, dl, 5971 MVT::v16i8, &pshufbMask[0], 16)); 5972 5973 // As PSHUFB will zero elements with negative indices, it's safe to ignore 5974 // the 2nd operand if it's undefined or zero. 5975 if (V2.getOpcode() == ISD::UNDEF || 5976 ISD::isBuildVectorAllZeros(V2.getNode())) 5977 return V1; 5978 5979 // Calculate the shuffle mask for the second input, shuffle it, and 5980 // OR it with the first shuffled input. 5981 pshufbMask.clear(); 5982 for (unsigned i = 0; i != 16; ++i) { 5983 int EltIdx = MaskVals[i]; 5984 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5985 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5986 } 5987 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5988 DAG.getNode(ISD::BUILD_VECTOR, dl, 5989 MVT::v16i8, &pshufbMask[0], 16)); 5990 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5991 } 5992 5993 // No SSSE3 - Calculate in place words and then fix all out of place words 5994 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5995 // the 16 different words that comprise the two doublequadword input vectors. 5996 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5997 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5998 SDValue NewV = V1; 5999 for (int i = 0; i != 8; ++i) { 6000 int Elt0 = MaskVals[i*2]; 6001 int Elt1 = MaskVals[i*2+1]; 6002 6003 // This word of the result is all undef, skip it. 6004 if (Elt0 < 0 && Elt1 < 0) 6005 continue; 6006 6007 // This word of the result is already in the correct place, skip it. 6008 if ((Elt0 == i*2) && (Elt1 == i*2+1)) 6009 continue; 6010 6011 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 6012 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 6013 SDValue InsElt; 6014 6015 // If Elt0 and Elt1 are defined, are consecutive, and can be load 6016 // using a single extract together, load it and store it. 6017 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 6018 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6019 DAG.getIntPtrConstant(Elt1 / 2)); 6020 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6021 DAG.getIntPtrConstant(i)); 6022 continue; 6023 } 6024 6025 // If Elt1 is defined, extract it from the appropriate source. If the 6026 // source byte is not also odd, shift the extracted word left 8 bits 6027 // otherwise clear the bottom 8 bits if we need to do an or. 6028 if (Elt1 >= 0) { 6029 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 6030 DAG.getIntPtrConstant(Elt1 / 2)); 6031 if ((Elt1 & 1) == 0) 6032 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 6033 DAG.getConstant(8, 6034 TLI.getShiftAmountTy(InsElt.getValueType()))); 6035 else if (Elt0 >= 0) 6036 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 6037 DAG.getConstant(0xFF00, MVT::i16)); 6038 } 6039 // If Elt0 is defined, extract it from the appropriate source. If the 6040 // source byte is not also even, shift the extracted word right 8 bits. If 6041 // Elt1 was also defined, OR the extracted values together before 6042 // inserting them in the result. 6043 if (Elt0 >= 0) { 6044 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 6045 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 6046 if ((Elt0 & 1) != 0) 6047 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 6048 DAG.getConstant(8, 6049 TLI.getShiftAmountTy(InsElt0.getValueType()))); 6050 else if (Elt1 >= 0) 6051 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 6052 DAG.getConstant(0x00FF, MVT::i16)); 6053 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 6054 : InsElt0; 6055 } 6056 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 6057 DAG.getIntPtrConstant(i)); 6058 } 6059 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 6060} 6061 6062// v32i8 shuffles - Translate to VPSHUFB if possible. 6063static 6064SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, 6065 const X86Subtarget *Subtarget, 6066 SelectionDAG &DAG) { 6067 EVT VT = SVOp->getValueType(0); 6068 SDValue V1 = SVOp->getOperand(0); 6069 SDValue V2 = SVOp->getOperand(1); 6070 DebugLoc dl = SVOp->getDebugLoc(); 6071 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); 6072 6073 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6074 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode()); 6075 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode()); 6076 6077 // VPSHUFB may be generated if 6078 // (1) one of input vector is undefined or zeroinitializer. 6079 // The mask value 0x80 puts 0 in the corresponding slot of the vector. 6080 // And (2) the mask indexes don't cross the 128-bit lane. 6081 if (VT != MVT::v32i8 || !Subtarget->hasAVX2() || 6082 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero)) 6083 return SDValue(); 6084 6085 if (V1IsAllZero && !V2IsAllZero) { 6086 CommuteVectorShuffleMask(MaskVals, 32); 6087 V1 = V2; 6088 } 6089 SmallVector<SDValue, 32> pshufbMask; 6090 for (unsigned i = 0; i != 32; i++) { 6091 int EltIdx = MaskVals[i]; 6092 if (EltIdx < 0 || EltIdx >= 32) 6093 EltIdx = 0x80; 6094 else { 6095 if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16)) 6096 // Cross lane is not allowed. 6097 return SDValue(); 6098 EltIdx &= 0xf; 6099 } 6100 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 6101 } 6102 return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1, 6103 DAG.getNode(ISD::BUILD_VECTOR, dl, 6104 MVT::v32i8, &pshufbMask[0], 32)); 6105} 6106 6107/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 6108/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 6109/// done when every pair / quad of shuffle mask elements point to elements in 6110/// the right sequence. e.g. 6111/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 6112static 6113SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 6114 SelectionDAG &DAG, DebugLoc dl) { 6115 MVT VT = SVOp->getValueType(0).getSimpleVT(); 6116 unsigned NumElems = VT.getVectorNumElements(); 6117 MVT NewVT; 6118 unsigned Scale; 6119 switch (VT.SimpleTy) { 6120 default: llvm_unreachable("Unexpected!"); 6121 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break; 6122 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break; 6123 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break; 6124 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break; 6125 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break; 6126 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break; 6127 } 6128 6129 SmallVector<int, 8> MaskVec; 6130 for (unsigned i = 0; i != NumElems; i += Scale) { 6131 int StartIdx = -1; 6132 for (unsigned j = 0; j != Scale; ++j) { 6133 int EltIdx = SVOp->getMaskElt(i+j); 6134 if (EltIdx < 0) 6135 continue; 6136 if (StartIdx < 0) 6137 StartIdx = (EltIdx / Scale); 6138 if (EltIdx != (int)(StartIdx*Scale + j)) 6139 return SDValue(); 6140 } 6141 MaskVec.push_back(StartIdx); 6142 } 6143 6144 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0)); 6145 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1)); 6146 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 6147} 6148 6149/// getVZextMovL - Return a zero-extending vector move low node. 6150/// 6151static SDValue getVZextMovL(EVT VT, EVT OpVT, 6152 SDValue SrcOp, SelectionDAG &DAG, 6153 const X86Subtarget *Subtarget, DebugLoc dl) { 6154 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 6155 LoadSDNode *LD = NULL; 6156 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 6157 LD = dyn_cast<LoadSDNode>(SrcOp); 6158 if (!LD) { 6159 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 6160 // instead. 6161 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 6162 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 6163 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 6164 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 6165 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 6166 // PR2108 6167 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 6168 return DAG.getNode(ISD::BITCAST, dl, VT, 6169 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6170 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6171 OpVT, 6172 SrcOp.getOperand(0) 6173 .getOperand(0)))); 6174 } 6175 } 6176 } 6177 6178 return DAG.getNode(ISD::BITCAST, dl, VT, 6179 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 6180 DAG.getNode(ISD::BITCAST, dl, 6181 OpVT, SrcOp))); 6182} 6183 6184/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 6185/// which could not be matched by any known target speficic shuffle 6186static SDValue 6187LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6188 6189 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG); 6190 if (NewOp.getNode()) 6191 return NewOp; 6192 6193 EVT VT = SVOp->getValueType(0); 6194 6195 unsigned NumElems = VT.getVectorNumElements(); 6196 unsigned NumLaneElems = NumElems / 2; 6197 6198 DebugLoc dl = SVOp->getDebugLoc(); 6199 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 6200 EVT NVT = MVT::getVectorVT(EltVT, NumLaneElems); 6201 SDValue Output[2]; 6202 6203 SmallVector<int, 16> Mask; 6204 for (unsigned l = 0; l < 2; ++l) { 6205 // Build a shuffle mask for the output, discovering on the fly which 6206 // input vectors to use as shuffle operands (recorded in InputUsed). 6207 // If building a suitable shuffle vector proves too hard, then bail 6208 // out with UseBuildVector set. 6209 bool UseBuildVector = false; 6210 int InputUsed[2] = { -1, -1 }; // Not yet discovered. 6211 unsigned LaneStart = l * NumLaneElems; 6212 for (unsigned i = 0; i != NumLaneElems; ++i) { 6213 // The mask element. This indexes into the input. 6214 int Idx = SVOp->getMaskElt(i+LaneStart); 6215 if (Idx < 0) { 6216 // the mask element does not index into any input vector. 6217 Mask.push_back(-1); 6218 continue; 6219 } 6220 6221 // The input vector this mask element indexes into. 6222 int Input = Idx / NumLaneElems; 6223 6224 // Turn the index into an offset from the start of the input vector. 6225 Idx -= Input * NumLaneElems; 6226 6227 // Find or create a shuffle vector operand to hold this input. 6228 unsigned OpNo; 6229 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) { 6230 if (InputUsed[OpNo] == Input) 6231 // This input vector is already an operand. 6232 break; 6233 if (InputUsed[OpNo] < 0) { 6234 // Create a new operand for this input vector. 6235 InputUsed[OpNo] = Input; 6236 break; 6237 } 6238 } 6239 6240 if (OpNo >= array_lengthof(InputUsed)) { 6241 // More than two input vectors used! Give up on trying to create a 6242 // shuffle vector. Insert all elements into a BUILD_VECTOR instead. 6243 UseBuildVector = true; 6244 break; 6245 } 6246 6247 // Add the mask index for the new shuffle vector. 6248 Mask.push_back(Idx + OpNo * NumLaneElems); 6249 } 6250 6251 if (UseBuildVector) { 6252 SmallVector<SDValue, 16> SVOps; 6253 for (unsigned i = 0; i != NumLaneElems; ++i) { 6254 // The mask element. This indexes into the input. 6255 int Idx = SVOp->getMaskElt(i+LaneStart); 6256 if (Idx < 0) { 6257 SVOps.push_back(DAG.getUNDEF(EltVT)); 6258 continue; 6259 } 6260 6261 // The input vector this mask element indexes into. 6262 int Input = Idx / NumElems; 6263 6264 // Turn the index into an offset from the start of the input vector. 6265 Idx -= Input * NumElems; 6266 6267 // Extract the vector element by hand. 6268 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6269 SVOp->getOperand(Input), 6270 DAG.getIntPtrConstant(Idx))); 6271 } 6272 6273 // Construct the output using a BUILD_VECTOR. 6274 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, &SVOps[0], 6275 SVOps.size()); 6276 } else if (InputUsed[0] < 0) { 6277 // No input vectors were used! The result is undefined. 6278 Output[l] = DAG.getUNDEF(NVT); 6279 } else { 6280 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2), 6281 (InputUsed[0] % 2) * NumLaneElems, 6282 DAG, dl); 6283 // If only one input was used, use an undefined vector for the other. 6284 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) : 6285 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2), 6286 (InputUsed[1] % 2) * NumLaneElems, DAG, dl); 6287 // At least one input vector was used. Create a new shuffle vector. 6288 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]); 6289 } 6290 6291 Mask.clear(); 6292 } 6293 6294 // Concatenate the result back 6295 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]); 6296} 6297 6298/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6299/// 4 elements, and match them with several different shuffle types. 6300static SDValue 6301LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6302 SDValue V1 = SVOp->getOperand(0); 6303 SDValue V2 = SVOp->getOperand(1); 6304 DebugLoc dl = SVOp->getDebugLoc(); 6305 EVT VT = SVOp->getValueType(0); 6306 6307 assert(VT.is128BitVector() && "Unsupported vector size"); 6308 6309 std::pair<int, int> Locs[4]; 6310 int Mask1[] = { -1, -1, -1, -1 }; 6311 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end()); 6312 6313 unsigned NumHi = 0; 6314 unsigned NumLo = 0; 6315 for (unsigned i = 0; i != 4; ++i) { 6316 int Idx = PermMask[i]; 6317 if (Idx < 0) { 6318 Locs[i] = std::make_pair(-1, -1); 6319 } else { 6320 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6321 if (Idx < 4) { 6322 Locs[i] = std::make_pair(0, NumLo); 6323 Mask1[NumLo] = Idx; 6324 NumLo++; 6325 } else { 6326 Locs[i] = std::make_pair(1, NumHi); 6327 if (2+NumHi < 4) 6328 Mask1[2+NumHi] = Idx; 6329 NumHi++; 6330 } 6331 } 6332 } 6333 6334 if (NumLo <= 2 && NumHi <= 2) { 6335 // If no more than two elements come from either vector. This can be 6336 // implemented with two shuffles. First shuffle gather the elements. 6337 // The second shuffle, which takes the first shuffle as both of its 6338 // vector operands, put the elements into the right order. 6339 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6340 6341 int Mask2[] = { -1, -1, -1, -1 }; 6342 6343 for (unsigned i = 0; i != 4; ++i) 6344 if (Locs[i].first != -1) { 6345 unsigned Idx = (i < 2) ? 0 : 4; 6346 Idx += Locs[i].first * 2 + Locs[i].second; 6347 Mask2[i] = Idx; 6348 } 6349 6350 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6351 } 6352 6353 if (NumLo == 3 || NumHi == 3) { 6354 // Otherwise, we must have three elements from one vector, call it X, and 6355 // one element from the other, call it Y. First, use a shufps to build an 6356 // intermediate vector with the one element from Y and the element from X 6357 // that will be in the same half in the final destination (the indexes don't 6358 // matter). Then, use a shufps to build the final vector, taking the half 6359 // containing the element from Y from the intermediate, and the other half 6360 // from X. 6361 if (NumHi == 3) { 6362 // Normalize it so the 3 elements come from V1. 6363 CommuteVectorShuffleMask(PermMask, 4); 6364 std::swap(V1, V2); 6365 } 6366 6367 // Find the element from V2. 6368 unsigned HiIndex; 6369 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6370 int Val = PermMask[HiIndex]; 6371 if (Val < 0) 6372 continue; 6373 if (Val >= 4) 6374 break; 6375 } 6376 6377 Mask1[0] = PermMask[HiIndex]; 6378 Mask1[1] = -1; 6379 Mask1[2] = PermMask[HiIndex^1]; 6380 Mask1[3] = -1; 6381 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6382 6383 if (HiIndex >= 2) { 6384 Mask1[0] = PermMask[0]; 6385 Mask1[1] = PermMask[1]; 6386 Mask1[2] = HiIndex & 1 ? 6 : 4; 6387 Mask1[3] = HiIndex & 1 ? 4 : 6; 6388 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6389 } 6390 6391 Mask1[0] = HiIndex & 1 ? 2 : 0; 6392 Mask1[1] = HiIndex & 1 ? 0 : 2; 6393 Mask1[2] = PermMask[2]; 6394 Mask1[3] = PermMask[3]; 6395 if (Mask1[2] >= 0) 6396 Mask1[2] += 4; 6397 if (Mask1[3] >= 0) 6398 Mask1[3] += 4; 6399 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6400 } 6401 6402 // Break it into (shuffle shuffle_hi, shuffle_lo). 6403 int LoMask[] = { -1, -1, -1, -1 }; 6404 int HiMask[] = { -1, -1, -1, -1 }; 6405 6406 int *MaskPtr = LoMask; 6407 unsigned MaskIdx = 0; 6408 unsigned LoIdx = 0; 6409 unsigned HiIdx = 2; 6410 for (unsigned i = 0; i != 4; ++i) { 6411 if (i == 2) { 6412 MaskPtr = HiMask; 6413 MaskIdx = 1; 6414 LoIdx = 0; 6415 HiIdx = 2; 6416 } 6417 int Idx = PermMask[i]; 6418 if (Idx < 0) { 6419 Locs[i] = std::make_pair(-1, -1); 6420 } else if (Idx < 4) { 6421 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6422 MaskPtr[LoIdx] = Idx; 6423 LoIdx++; 6424 } else { 6425 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6426 MaskPtr[HiIdx] = Idx; 6427 HiIdx++; 6428 } 6429 } 6430 6431 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6432 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6433 int MaskOps[] = { -1, -1, -1, -1 }; 6434 for (unsigned i = 0; i != 4; ++i) 6435 if (Locs[i].first != -1) 6436 MaskOps[i] = Locs[i].first * 4 + Locs[i].second; 6437 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6438} 6439 6440static bool MayFoldVectorLoad(SDValue V) { 6441 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6442 V = V.getOperand(0); 6443 6444 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6445 V = V.getOperand(0); 6446 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6447 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6448 // BUILD_VECTOR (load), undef 6449 V = V.getOperand(0); 6450 6451 return MayFoldLoad(V); 6452} 6453 6454// FIXME: the version above should always be used. Since there's 6455// a bug where several vector shuffles can't be folded because the 6456// DAG is not updated during lowering and a node claims to have two 6457// uses while it only has one, use this version, and let isel match 6458// another instruction if the load really happens to have more than 6459// one use. Remove this version after this bug get fixed. 6460// rdar://8434668, PR8156 6461static bool RelaxedMayFoldVectorLoad(SDValue V) { 6462 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6463 V = V.getOperand(0); 6464 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6465 V = V.getOperand(0); 6466 if (ISD::isNormalLoad(V.getNode())) 6467 return true; 6468 return false; 6469} 6470 6471static 6472SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6473 EVT VT = Op.getValueType(); 6474 6475 // Canonizalize to v2f64. 6476 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6477 return DAG.getNode(ISD::BITCAST, dl, VT, 6478 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6479 V1, DAG)); 6480} 6481 6482static 6483SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6484 bool HasSSE2) { 6485 SDValue V1 = Op.getOperand(0); 6486 SDValue V2 = Op.getOperand(1); 6487 EVT VT = Op.getValueType(); 6488 6489 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6490 6491 if (HasSSE2 && VT == MVT::v2f64) 6492 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6493 6494 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6495 return DAG.getNode(ISD::BITCAST, dl, VT, 6496 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6497 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6498 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6499} 6500 6501static 6502SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6503 SDValue V1 = Op.getOperand(0); 6504 SDValue V2 = Op.getOperand(1); 6505 EVT VT = Op.getValueType(); 6506 6507 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6508 "unsupported shuffle type"); 6509 6510 if (V2.getOpcode() == ISD::UNDEF) 6511 V2 = V1; 6512 6513 // v4i32 or v4f32 6514 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6515} 6516 6517static 6518SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6519 SDValue V1 = Op.getOperand(0); 6520 SDValue V2 = Op.getOperand(1); 6521 EVT VT = Op.getValueType(); 6522 unsigned NumElems = VT.getVectorNumElements(); 6523 6524 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6525 // operand of these instructions is only memory, so check if there's a 6526 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6527 // same masks. 6528 bool CanFoldLoad = false; 6529 6530 // Trivial case, when V2 comes from a load. 6531 if (MayFoldVectorLoad(V2)) 6532 CanFoldLoad = true; 6533 6534 // When V1 is a load, it can be folded later into a store in isel, example: 6535 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6536 // turns into: 6537 // (MOVLPSmr addr:$src1, VR128:$src2) 6538 // So, recognize this potential and also use MOVLPS or MOVLPD 6539 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6540 CanFoldLoad = true; 6541 6542 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6543 if (CanFoldLoad) { 6544 if (HasSSE2 && NumElems == 2) 6545 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6546 6547 if (NumElems == 4) 6548 // If we don't care about the second element, proceed to use movss. 6549 if (SVOp->getMaskElt(1) != -1) 6550 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6551 } 6552 6553 // movl and movlp will both match v2i64, but v2i64 is never matched by 6554 // movl earlier because we make it strict to avoid messing with the movlp load 6555 // folding logic (see the code above getMOVLP call). Match it here then, 6556 // this is horrible, but will stay like this until we move all shuffle 6557 // matching to x86 specific nodes. Note that for the 1st condition all 6558 // types are matched with movsd. 6559 if (HasSSE2) { 6560 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6561 // as to remove this logic from here, as much as possible 6562 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT)) 6563 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6564 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6565 } 6566 6567 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6568 6569 // Invert the operand order and use SHUFPS to match it. 6570 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6571 getShuffleSHUFImmediate(SVOp), DAG); 6572} 6573 6574// Reduce a vector shuffle to zext. 6575SDValue 6576X86TargetLowering::lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { 6577 // PMOVZX is only available from SSE41. 6578 if (!Subtarget->hasSSE41()) 6579 return SDValue(); 6580 6581 EVT VT = Op.getValueType(); 6582 6583 // Only AVX2 support 256-bit vector integer extending. 6584 if (!Subtarget->hasAVX2() && VT.is256BitVector()) 6585 return SDValue(); 6586 6587 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6588 DebugLoc DL = Op.getDebugLoc(); 6589 SDValue V1 = Op.getOperand(0); 6590 SDValue V2 = Op.getOperand(1); 6591 unsigned NumElems = VT.getVectorNumElements(); 6592 6593 // Extending is an unary operation and the element type of the source vector 6594 // won't be equal to or larger than i64. 6595 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() || 6596 VT.getVectorElementType() == MVT::i64) 6597 return SDValue(); 6598 6599 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4. 6600 unsigned Shift = 1; // Start from 2, i.e. 1 << 1. 6601 while ((1U << Shift) < NumElems) { 6602 if (SVOp->getMaskElt(1U << Shift) == 1) 6603 break; 6604 Shift += 1; 6605 // The maximal ratio is 8, i.e. from i8 to i64. 6606 if (Shift > 3) 6607 return SDValue(); 6608 } 6609 6610 // Check the shuffle mask. 6611 unsigned Mask = (1U << Shift) - 1; 6612 for (unsigned i = 0; i != NumElems; ++i) { 6613 int EltIdx = SVOp->getMaskElt(i); 6614 if ((i & Mask) != 0 && EltIdx != -1) 6615 return SDValue(); 6616 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift)) 6617 return SDValue(); 6618 } 6619 6620 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift; 6621 EVT NeVT = EVT::getIntegerVT(*DAG.getContext(), NBits); 6622 EVT NVT = EVT::getVectorVT(*DAG.getContext(), NeVT, NumElems >> Shift); 6623 6624 if (!isTypeLegal(NVT)) 6625 return SDValue(); 6626 6627 // Simplify the operand as it's prepared to be fed into shuffle. 6628 unsigned SignificantBits = NVT.getSizeInBits() >> Shift; 6629 if (V1.getOpcode() == ISD::BITCAST && 6630 V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 6631 V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6632 V1.getOperand(0) 6633 .getOperand(0).getValueType().getSizeInBits() == SignificantBits) { 6634 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x) 6635 SDValue V = V1.getOperand(0).getOperand(0).getOperand(0); 6636 ConstantSDNode *CIdx = 6637 dyn_cast<ConstantSDNode>(V1.getOperand(0).getOperand(0).getOperand(1)); 6638 // If it's foldable, i.e. normal load with single use, we will let code 6639 // selection to fold it. Otherwise, we will short the conversion sequence. 6640 if (CIdx && CIdx->getZExtValue() == 0 && 6641 (!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse())) 6642 V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V); 6643 } 6644 6645 return DAG.getNode(ISD::BITCAST, DL, VT, 6646 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1)); 6647} 6648 6649SDValue 6650X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { 6651 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6652 EVT VT = Op.getValueType(); 6653 DebugLoc dl = Op.getDebugLoc(); 6654 SDValue V1 = Op.getOperand(0); 6655 SDValue V2 = Op.getOperand(1); 6656 6657 if (isZeroShuffle(SVOp)) 6658 return getZeroVector(VT, Subtarget, DAG, dl); 6659 6660 // Handle splat operations 6661 if (SVOp->isSplat()) { 6662 unsigned NumElem = VT.getVectorNumElements(); 6663 int Size = VT.getSizeInBits(); 6664 6665 // Use vbroadcast whenever the splat comes from a foldable load 6666 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 6667 if (Broadcast.getNode()) 6668 return Broadcast; 6669 6670 // Handle splats by matching through known shuffle masks 6671 if ((Size == 128 && NumElem <= 4) || 6672 (Size == 256 && NumElem < 8)) 6673 return SDValue(); 6674 6675 // All remaning splats are promoted to target supported vector shuffles. 6676 return PromoteSplat(SVOp, DAG); 6677 } 6678 6679 // Check integer expanding shuffles. 6680 SDValue NewOp = lowerVectorIntExtend(Op, DAG); 6681 if (NewOp.getNode()) 6682 return NewOp; 6683 6684 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6685 // do it! 6686 if (VT == MVT::v8i16 || VT == MVT::v16i8 || 6687 VT == MVT::v16i16 || VT == MVT::v32i8) { 6688 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6689 if (NewOp.getNode()) 6690 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6691 } else if ((VT == MVT::v4i32 || 6692 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6693 // FIXME: Figure out a cleaner way to do this. 6694 // Try to make use of movq to zero out the top part. 6695 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6696 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6697 if (NewOp.getNode()) { 6698 EVT NewVT = NewOp.getValueType(); 6699 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), 6700 NewVT, true, false)) 6701 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), 6702 DAG, Subtarget, dl); 6703 } 6704 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6705 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6706 if (NewOp.getNode()) { 6707 EVT NewVT = NewOp.getValueType(); 6708 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT)) 6709 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), 6710 DAG, Subtarget, dl); 6711 } 6712 } 6713 } 6714 return SDValue(); 6715} 6716 6717SDValue 6718X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6719 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6720 SDValue V1 = Op.getOperand(0); 6721 SDValue V2 = Op.getOperand(1); 6722 EVT VT = Op.getValueType(); 6723 DebugLoc dl = Op.getDebugLoc(); 6724 unsigned NumElems = VT.getVectorNumElements(); 6725 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 6726 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6727 bool V1IsSplat = false; 6728 bool V2IsSplat = false; 6729 bool HasSSE2 = Subtarget->hasSSE2(); 6730 bool HasAVX = Subtarget->hasAVX(); 6731 bool HasAVX2 = Subtarget->hasAVX2(); 6732 MachineFunction &MF = DAG.getMachineFunction(); 6733 bool OptForSize = MF.getFunction()->getFnAttributes(). 6734 hasAttribute(Attributes::OptimizeForSize); 6735 6736 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6737 6738 if (V1IsUndef && V2IsUndef) 6739 return DAG.getUNDEF(VT); 6740 6741 assert(!V1IsUndef && "Op 1 of shuffle should not be undef"); 6742 6743 // Vector shuffle lowering takes 3 steps: 6744 // 6745 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6746 // narrowing and commutation of operands should be handled. 6747 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6748 // shuffle nodes. 6749 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6750 // so the shuffle can be broken into other shuffles and the legalizer can 6751 // try the lowering again. 6752 // 6753 // The general idea is that no vector_shuffle operation should be left to 6754 // be matched during isel, all of them must be converted to a target specific 6755 // node here. 6756 6757 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6758 // narrowing and commutation of operands should be handled. The actual code 6759 // doesn't include all of those, work in progress... 6760 SDValue NewOp = NormalizeVectorShuffle(Op, DAG); 6761 if (NewOp.getNode()) 6762 return NewOp; 6763 6764 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end()); 6765 6766 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6767 // unpckh_undef). Only use pshufd if speed is more important than size. 6768 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6769 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6770 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6771 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6772 6773 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() && 6774 V2IsUndef && RelaxedMayFoldVectorLoad(V1)) 6775 return getMOVDDup(Op, dl, V1, DAG); 6776 6777 if (isMOVHLPS_v_undef_Mask(M, VT)) 6778 return getMOVHighToLow(Op, dl, DAG); 6779 6780 // Use to match splats 6781 if (HasSSE2 && isUNPCKHMask(M, VT, HasAVX2) && V2IsUndef && 6782 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6783 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6784 6785 if (isPSHUFDMask(M, VT)) { 6786 // The actual implementation will match the mask in the if above and then 6787 // during isel it can match several different instructions, not only pshufd 6788 // as its name says, sad but true, emulate the behavior for now... 6789 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6790 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6791 6792 unsigned TargetMask = getShuffleSHUFImmediate(SVOp); 6793 6794 if (HasAVX && (VT == MVT::v4f32 || VT == MVT::v2f64)) 6795 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, DAG); 6796 6797 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6798 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6799 6800 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6801 TargetMask, DAG); 6802 } 6803 6804 // Check if this can be converted into a logical shift. 6805 bool isLeft = false; 6806 unsigned ShAmt = 0; 6807 SDValue ShVal; 6808 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6809 if (isShift && ShVal.hasOneUse()) { 6810 // If the shifted value has multiple uses, it may be cheaper to use 6811 // v_set0 + movlhps or movhlps, etc. 6812 EVT EltVT = VT.getVectorElementType(); 6813 ShAmt *= EltVT.getSizeInBits(); 6814 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6815 } 6816 6817 if (isMOVLMask(M, VT)) { 6818 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6819 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6820 if (!isMOVLPMask(M, VT)) { 6821 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6822 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6823 6824 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6825 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6826 } 6827 } 6828 6829 // FIXME: fold these into legal mask. 6830 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasAVX2)) 6831 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6832 6833 if (isMOVHLPSMask(M, VT)) 6834 return getMOVHighToLow(Op, dl, DAG); 6835 6836 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget)) 6837 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6838 6839 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget)) 6840 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6841 6842 if (isMOVLPMask(M, VT)) 6843 return getMOVLP(Op, dl, DAG, HasSSE2); 6844 6845 if (ShouldXformToMOVHLPS(M, VT) || 6846 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT)) 6847 return CommuteVectorShuffle(SVOp, DAG); 6848 6849 if (isShift) { 6850 // No better options. Use a vshldq / vsrldq. 6851 EVT EltVT = VT.getVectorElementType(); 6852 ShAmt *= EltVT.getSizeInBits(); 6853 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6854 } 6855 6856 bool Commuted = false; 6857 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6858 // 1,1,1,1 -> v8i16 though. 6859 V1IsSplat = isSplatVector(V1.getNode()); 6860 V2IsSplat = isSplatVector(V2.getNode()); 6861 6862 // Canonicalize the splat or undef, if present, to be on the RHS. 6863 if (!V2IsUndef && V1IsSplat && !V2IsSplat) { 6864 CommuteVectorShuffleMask(M, NumElems); 6865 std::swap(V1, V2); 6866 std::swap(V1IsSplat, V2IsSplat); 6867 Commuted = true; 6868 } 6869 6870 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6871 // Shuffling low element of v1 into undef, just return v1. 6872 if (V2IsUndef) 6873 return V1; 6874 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6875 // the instruction selector will not match, so get a canonical MOVL with 6876 // swapped operands to undo the commute. 6877 return getMOVL(DAG, dl, VT, V2, V1); 6878 } 6879 6880 if (isUNPCKLMask(M, VT, HasAVX2)) 6881 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6882 6883 if (isUNPCKHMask(M, VT, HasAVX2)) 6884 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6885 6886 if (V2IsSplat) { 6887 // Normalize mask so all entries that point to V2 points to its first 6888 // element then try to match unpck{h|l} again. If match, return a 6889 // new vector_shuffle with the corrected mask.p 6890 SmallVector<int, 8> NewMask(M.begin(), M.end()); 6891 NormalizeMask(NewMask, NumElems); 6892 if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) 6893 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6894 if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) 6895 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6896 } 6897 6898 if (Commuted) { 6899 // Commute is back and try unpck* again. 6900 // FIXME: this seems wrong. 6901 CommuteVectorShuffleMask(M, NumElems); 6902 std::swap(V1, V2); 6903 std::swap(V1IsSplat, V2IsSplat); 6904 Commuted = false; 6905 6906 if (isUNPCKLMask(M, VT, HasAVX2)) 6907 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6908 6909 if (isUNPCKHMask(M, VT, HasAVX2)) 6910 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6911 } 6912 6913 // Normalize the node to match x86 shuffle ops if needed 6914 if (!V2IsUndef && (isSHUFPMask(M, VT, HasAVX, /* Commuted */ true))) 6915 return CommuteVectorShuffle(SVOp, DAG); 6916 6917 // The checks below are all present in isShuffleMaskLegal, but they are 6918 // inlined here right now to enable us to directly emit target specific 6919 // nodes, and remove one by one until they don't return Op anymore. 6920 6921 if (isPALIGNRMask(M, VT, Subtarget)) 6922 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 6923 getShufflePALIGNRImmediate(SVOp), 6924 DAG); 6925 6926 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 6927 SVOp->getSplatIndex() == 0 && V2IsUndef) { 6928 if (VT == MVT::v2f64 || VT == MVT::v2i64) 6929 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6930 } 6931 6932 if (isPSHUFHWMask(M, VT, HasAVX2)) 6933 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 6934 getShufflePSHUFHWImmediate(SVOp), 6935 DAG); 6936 6937 if (isPSHUFLWMask(M, VT, HasAVX2)) 6938 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 6939 getShufflePSHUFLWImmediate(SVOp), 6940 DAG); 6941 6942 if (isSHUFPMask(M, VT, HasAVX)) 6943 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 6944 getShuffleSHUFImmediate(SVOp), DAG); 6945 6946 if (isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6947 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6948 if (isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6949 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6950 6951 //===--------------------------------------------------------------------===// 6952 // Generate target specific nodes for 128 or 256-bit shuffles only 6953 // supported in the AVX instruction set. 6954 // 6955 6956 // Handle VMOVDDUPY permutations 6957 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasAVX)) 6958 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 6959 6960 // Handle VPERMILPS/D* permutations 6961 if (isVPERMILPMask(M, VT, HasAVX)) { 6962 if (HasAVX2 && VT == MVT::v8i32) 6963 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, 6964 getShuffleSHUFImmediate(SVOp), DAG); 6965 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 6966 getShuffleSHUFImmediate(SVOp), DAG); 6967 } 6968 6969 // Handle VPERM2F128/VPERM2I128 permutations 6970 if (isVPERM2X128Mask(M, VT, HasAVX)) 6971 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 6972 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 6973 6974 SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(SVOp, Subtarget, DAG); 6975 if (BlendOp.getNode()) 6976 return BlendOp; 6977 6978 if (V2IsUndef && HasAVX2 && (VT == MVT::v8i32 || VT == MVT::v8f32)) { 6979 SmallVector<SDValue, 8> permclMask; 6980 for (unsigned i = 0; i != 8; ++i) { 6981 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32)); 6982 } 6983 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, 6984 &permclMask[0], 8); 6985 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 6986 return DAG.getNode(X86ISD::VPERMV, dl, VT, 6987 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); 6988 } 6989 6990 if (V2IsUndef && HasAVX2 && (VT == MVT::v4i64 || VT == MVT::v4f64)) 6991 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, 6992 getShuffleCLImmediate(SVOp), DAG); 6993 6994 6995 //===--------------------------------------------------------------------===// 6996 // Since no target specific shuffle was selected for this generic one, 6997 // lower it into other known shuffles. FIXME: this isn't true yet, but 6998 // this is the plan. 6999 // 7000 7001 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 7002 if (VT == MVT::v8i16) { 7003 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG); 7004 if (NewOp.getNode()) 7005 return NewOp; 7006 } 7007 7008 if (VT == MVT::v16i8) { 7009 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 7010 if (NewOp.getNode()) 7011 return NewOp; 7012 } 7013 7014 if (VT == MVT::v32i8) { 7015 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG); 7016 if (NewOp.getNode()) 7017 return NewOp; 7018 } 7019 7020 // Handle all 128-bit wide vectors with 4 elements, and match them with 7021 // several different shuffle types. 7022 if (NumElems == 4 && VT.is128BitVector()) 7023 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 7024 7025 // Handle general 256-bit shuffles 7026 if (VT.is256BitVector()) 7027 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 7028 7029 return SDValue(); 7030} 7031 7032SDValue 7033X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 7034 SelectionDAG &DAG) const { 7035 EVT VT = Op.getValueType(); 7036 DebugLoc dl = Op.getDebugLoc(); 7037 7038 if (!Op.getOperand(0).getValueType().is128BitVector()) 7039 return SDValue(); 7040 7041 if (VT.getSizeInBits() == 8) { 7042 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 7043 Op.getOperand(0), Op.getOperand(1)); 7044 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7045 DAG.getValueType(VT)); 7046 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7047 } 7048 7049 if (VT.getSizeInBits() == 16) { 7050 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7051 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 7052 if (Idx == 0) 7053 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7054 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7055 DAG.getNode(ISD::BITCAST, dl, 7056 MVT::v4i32, 7057 Op.getOperand(0)), 7058 Op.getOperand(1))); 7059 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 7060 Op.getOperand(0), Op.getOperand(1)); 7061 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 7062 DAG.getValueType(VT)); 7063 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7064 } 7065 7066 if (VT == MVT::f32) { 7067 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 7068 // the result back to FR32 register. It's only worth matching if the 7069 // result has a single use which is a store or a bitcast to i32. And in 7070 // the case of a store, it's not worth it if the index is a constant 0, 7071 // because a MOVSSmr can be used instead, which is smaller and faster. 7072 if (!Op.hasOneUse()) 7073 return SDValue(); 7074 SDNode *User = *Op.getNode()->use_begin(); 7075 if ((User->getOpcode() != ISD::STORE || 7076 (isa<ConstantSDNode>(Op.getOperand(1)) && 7077 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 7078 (User->getOpcode() != ISD::BITCAST || 7079 User->getValueType(0) != MVT::i32)) 7080 return SDValue(); 7081 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7082 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 7083 Op.getOperand(0)), 7084 Op.getOperand(1)); 7085 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 7086 } 7087 7088 if (VT == MVT::i32 || VT == MVT::i64) { 7089 // ExtractPS/pextrq works with constant index. 7090 if (isa<ConstantSDNode>(Op.getOperand(1))) 7091 return Op; 7092 } 7093 return SDValue(); 7094} 7095 7096 7097SDValue 7098X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7099 SelectionDAG &DAG) const { 7100 if (!isa<ConstantSDNode>(Op.getOperand(1))) 7101 return SDValue(); 7102 7103 SDValue Vec = Op.getOperand(0); 7104 EVT VecVT = Vec.getValueType(); 7105 7106 // If this is a 256-bit vector result, first extract the 128-bit vector and 7107 // then extract the element from the 128-bit vector. 7108 if (VecVT.is256BitVector()) { 7109 DebugLoc dl = Op.getNode()->getDebugLoc(); 7110 unsigned NumElems = VecVT.getVectorNumElements(); 7111 SDValue Idx = Op.getOperand(1); 7112 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7113 7114 // Get the 128-bit vector. 7115 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl); 7116 7117 if (IdxVal >= NumElems/2) 7118 IdxVal -= NumElems/2; 7119 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 7120 DAG.getConstant(IdxVal, MVT::i32)); 7121 } 7122 7123 assert(VecVT.is128BitVector() && "Unexpected vector length"); 7124 7125 if (Subtarget->hasSSE41()) { 7126 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 7127 if (Res.getNode()) 7128 return Res; 7129 } 7130 7131 EVT VT = Op.getValueType(); 7132 DebugLoc dl = Op.getDebugLoc(); 7133 // TODO: handle v16i8. 7134 if (VT.getSizeInBits() == 16) { 7135 SDValue Vec = Op.getOperand(0); 7136 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7137 if (Idx == 0) 7138 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 7139 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 7140 DAG.getNode(ISD::BITCAST, dl, 7141 MVT::v4i32, Vec), 7142 Op.getOperand(1))); 7143 // Transform it so it match pextrw which produces a 32-bit result. 7144 EVT EltVT = MVT::i32; 7145 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 7146 Op.getOperand(0), Op.getOperand(1)); 7147 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 7148 DAG.getValueType(VT)); 7149 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 7150 } 7151 7152 if (VT.getSizeInBits() == 32) { 7153 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7154 if (Idx == 0) 7155 return Op; 7156 7157 // SHUFPS the element to the lowest double word, then movss. 7158 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 7159 EVT VVT = Op.getOperand(0).getValueType(); 7160 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7161 DAG.getUNDEF(VVT), Mask); 7162 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7163 DAG.getIntPtrConstant(0)); 7164 } 7165 7166 if (VT.getSizeInBits() == 64) { 7167 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 7168 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 7169 // to match extract_elt for f64. 7170 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 7171 if (Idx == 0) 7172 return Op; 7173 7174 // UNPCKHPD the element to the lowest double word, then movsd. 7175 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 7176 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 7177 int Mask[2] = { 1, -1 }; 7178 EVT VVT = Op.getOperand(0).getValueType(); 7179 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 7180 DAG.getUNDEF(VVT), Mask); 7181 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 7182 DAG.getIntPtrConstant(0)); 7183 } 7184 7185 return SDValue(); 7186} 7187 7188SDValue 7189X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 7190 SelectionDAG &DAG) const { 7191 EVT VT = Op.getValueType(); 7192 EVT EltVT = VT.getVectorElementType(); 7193 DebugLoc dl = Op.getDebugLoc(); 7194 7195 SDValue N0 = Op.getOperand(0); 7196 SDValue N1 = Op.getOperand(1); 7197 SDValue N2 = Op.getOperand(2); 7198 7199 if (!VT.is128BitVector()) 7200 return SDValue(); 7201 7202 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 7203 isa<ConstantSDNode>(N2)) { 7204 unsigned Opc; 7205 if (VT == MVT::v8i16) 7206 Opc = X86ISD::PINSRW; 7207 else if (VT == MVT::v16i8) 7208 Opc = X86ISD::PINSRB; 7209 else 7210 Opc = X86ISD::PINSRB; 7211 7212 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 7213 // argument. 7214 if (N1.getValueType() != MVT::i32) 7215 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7216 if (N2.getValueType() != MVT::i32) 7217 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7218 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 7219 } 7220 7221 if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 7222 // Bits [7:6] of the constant are the source select. This will always be 7223 // zero here. The DAG Combiner may combine an extract_elt index into these 7224 // bits. For example (insert (extract, 3), 2) could be matched by putting 7225 // the '3' into bits [7:6] of X86ISD::INSERTPS. 7226 // Bits [5:4] of the constant are the destination select. This is the 7227 // value of the incoming immediate. 7228 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 7229 // combine either bitwise AND or insert of float 0.0 to set these bits. 7230 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 7231 // Create this as a scalar to vector.. 7232 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 7233 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 7234 } 7235 7236 if ((EltVT == MVT::i32 || EltVT == MVT::i64) && isa<ConstantSDNode>(N2)) { 7237 // PINSR* works with constant index. 7238 return Op; 7239 } 7240 return SDValue(); 7241} 7242 7243SDValue 7244X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 7245 EVT VT = Op.getValueType(); 7246 EVT EltVT = VT.getVectorElementType(); 7247 7248 DebugLoc dl = Op.getDebugLoc(); 7249 SDValue N0 = Op.getOperand(0); 7250 SDValue N1 = Op.getOperand(1); 7251 SDValue N2 = Op.getOperand(2); 7252 7253 // If this is a 256-bit vector result, first extract the 128-bit vector, 7254 // insert the element into the extracted half and then place it back. 7255 if (VT.is256BitVector()) { 7256 if (!isa<ConstantSDNode>(N2)) 7257 return SDValue(); 7258 7259 // Get the desired 128-bit vector half. 7260 unsigned NumElems = VT.getVectorNumElements(); 7261 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 7262 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); 7263 7264 // Insert the element into the desired half. 7265 bool Upper = IdxVal >= NumElems/2; 7266 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, 7267 DAG.getConstant(Upper ? IdxVal-NumElems/2 : IdxVal, MVT::i32)); 7268 7269 // Insert the changed part back to the 256-bit vector 7270 return Insert128BitVector(N0, V, IdxVal, DAG, dl); 7271 } 7272 7273 if (Subtarget->hasSSE41()) 7274 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 7275 7276 if (EltVT == MVT::i8) 7277 return SDValue(); 7278 7279 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 7280 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 7281 // as its second argument. 7282 if (N1.getValueType() != MVT::i32) 7283 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 7284 if (N2.getValueType() != MVT::i32) 7285 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 7286 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 7287 } 7288 return SDValue(); 7289} 7290 7291static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { 7292 LLVMContext *Context = DAG.getContext(); 7293 DebugLoc dl = Op.getDebugLoc(); 7294 EVT OpVT = Op.getValueType(); 7295 7296 // If this is a 256-bit vector result, first insert into a 128-bit 7297 // vector and then insert into the 256-bit vector. 7298 if (!OpVT.is128BitVector()) { 7299 // Insert into a 128-bit vector. 7300 EVT VT128 = EVT::getVectorVT(*Context, 7301 OpVT.getVectorElementType(), 7302 OpVT.getVectorNumElements() / 2); 7303 7304 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7305 7306 // Insert the 128-bit vector. 7307 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); 7308 } 7309 7310 if (OpVT == MVT::v1i64 && 7311 Op.getOperand(0).getValueType() == MVT::i64) 7312 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7313 7314 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7315 assert(OpVT.is128BitVector() && "Expected an SSE type!"); 7316 return DAG.getNode(ISD::BITCAST, dl, OpVT, 7317 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7318} 7319 7320// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7321// a simple subregister reference or explicit instructions to grab 7322// upper bits of a vector. 7323static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7324 SelectionDAG &DAG) { 7325 if (Subtarget->hasAVX()) { 7326 DebugLoc dl = Op.getNode()->getDebugLoc(); 7327 SDValue Vec = Op.getNode()->getOperand(0); 7328 SDValue Idx = Op.getNode()->getOperand(1); 7329 7330 if (Op.getNode()->getValueType(0).is128BitVector() && 7331 Vec.getNode()->getValueType(0).is256BitVector() && 7332 isa<ConstantSDNode>(Idx)) { 7333 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7334 return Extract128BitVector(Vec, IdxVal, DAG, dl); 7335 } 7336 } 7337 return SDValue(); 7338} 7339 7340// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7341// simple superregister reference or explicit instructions to insert 7342// the upper bits of a vector. 7343static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, 7344 SelectionDAG &DAG) { 7345 if (Subtarget->hasAVX()) { 7346 DebugLoc dl = Op.getNode()->getDebugLoc(); 7347 SDValue Vec = Op.getNode()->getOperand(0); 7348 SDValue SubVec = Op.getNode()->getOperand(1); 7349 SDValue Idx = Op.getNode()->getOperand(2); 7350 7351 if (Op.getNode()->getValueType(0).is256BitVector() && 7352 SubVec.getNode()->getValueType(0).is128BitVector() && 7353 isa<ConstantSDNode>(Idx)) { 7354 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 7355 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl); 7356 } 7357 } 7358 return SDValue(); 7359} 7360 7361// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7362// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7363// one of the above mentioned nodes. It has to be wrapped because otherwise 7364// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7365// be used to form addressing mode. These wrapped nodes will be selected 7366// into MOV32ri. 7367SDValue 7368X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7369 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7370 7371 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7372 // global base reg. 7373 unsigned char OpFlag = 0; 7374 unsigned WrapperKind = X86ISD::Wrapper; 7375 CodeModel::Model M = getTargetMachine().getCodeModel(); 7376 7377 if (Subtarget->isPICStyleRIPRel() && 7378 (M == CodeModel::Small || M == CodeModel::Kernel)) 7379 WrapperKind = X86ISD::WrapperRIP; 7380 else if (Subtarget->isPICStyleGOT()) 7381 OpFlag = X86II::MO_GOTOFF; 7382 else if (Subtarget->isPICStyleStubPIC()) 7383 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7384 7385 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7386 CP->getAlignment(), 7387 CP->getOffset(), OpFlag); 7388 DebugLoc DL = CP->getDebugLoc(); 7389 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7390 // With PIC, the address is actually $g + Offset. 7391 if (OpFlag) { 7392 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7393 DAG.getNode(X86ISD::GlobalBaseReg, 7394 DebugLoc(), getPointerTy()), 7395 Result); 7396 } 7397 7398 return Result; 7399} 7400 7401SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7402 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7403 7404 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7405 // global base reg. 7406 unsigned char OpFlag = 0; 7407 unsigned WrapperKind = X86ISD::Wrapper; 7408 CodeModel::Model M = getTargetMachine().getCodeModel(); 7409 7410 if (Subtarget->isPICStyleRIPRel() && 7411 (M == CodeModel::Small || M == CodeModel::Kernel)) 7412 WrapperKind = X86ISD::WrapperRIP; 7413 else if (Subtarget->isPICStyleGOT()) 7414 OpFlag = X86II::MO_GOTOFF; 7415 else if (Subtarget->isPICStyleStubPIC()) 7416 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7417 7418 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7419 OpFlag); 7420 DebugLoc DL = JT->getDebugLoc(); 7421 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7422 7423 // With PIC, the address is actually $g + Offset. 7424 if (OpFlag) 7425 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7426 DAG.getNode(X86ISD::GlobalBaseReg, 7427 DebugLoc(), getPointerTy()), 7428 Result); 7429 7430 return Result; 7431} 7432 7433SDValue 7434X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7435 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7436 7437 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7438 // global base reg. 7439 unsigned char OpFlag = 0; 7440 unsigned WrapperKind = X86ISD::Wrapper; 7441 CodeModel::Model M = getTargetMachine().getCodeModel(); 7442 7443 if (Subtarget->isPICStyleRIPRel() && 7444 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7445 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7446 OpFlag = X86II::MO_GOTPCREL; 7447 WrapperKind = X86ISD::WrapperRIP; 7448 } else if (Subtarget->isPICStyleGOT()) { 7449 OpFlag = X86II::MO_GOT; 7450 } else if (Subtarget->isPICStyleStubPIC()) { 7451 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7452 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7453 OpFlag = X86II::MO_DARWIN_NONLAZY; 7454 } 7455 7456 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7457 7458 DebugLoc DL = Op.getDebugLoc(); 7459 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7460 7461 7462 // With PIC, the address is actually $g + Offset. 7463 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7464 !Subtarget->is64Bit()) { 7465 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7466 DAG.getNode(X86ISD::GlobalBaseReg, 7467 DebugLoc(), getPointerTy()), 7468 Result); 7469 } 7470 7471 // For symbols that require a load from a stub to get the address, emit the 7472 // load. 7473 if (isGlobalStubReference(OpFlag)) 7474 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7475 MachinePointerInfo::getGOT(), false, false, false, 0); 7476 7477 return Result; 7478} 7479 7480SDValue 7481X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7482 // Create the TargetBlockAddressAddress node. 7483 unsigned char OpFlags = 7484 Subtarget->ClassifyBlockAddressReference(); 7485 CodeModel::Model M = getTargetMachine().getCodeModel(); 7486 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7487 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); 7488 DebugLoc dl = Op.getDebugLoc(); 7489 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, 7490 OpFlags); 7491 7492 if (Subtarget->isPICStyleRIPRel() && 7493 (M == CodeModel::Small || M == CodeModel::Kernel)) 7494 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7495 else 7496 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7497 7498 // With PIC, the address is actually $g + Offset. 7499 if (isGlobalRelativeToPICBase(OpFlags)) { 7500 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7501 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7502 Result); 7503 } 7504 7505 return Result; 7506} 7507 7508SDValue 7509X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7510 int64_t Offset, 7511 SelectionDAG &DAG) const { 7512 // Create the TargetGlobalAddress node, folding in the constant 7513 // offset if it is legal. 7514 unsigned char OpFlags = 7515 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7516 CodeModel::Model M = getTargetMachine().getCodeModel(); 7517 SDValue Result; 7518 if (OpFlags == X86II::MO_NO_FLAG && 7519 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7520 // A direct static reference to a global. 7521 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7522 Offset = 0; 7523 } else { 7524 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7525 } 7526 7527 if (Subtarget->isPICStyleRIPRel() && 7528 (M == CodeModel::Small || M == CodeModel::Kernel)) 7529 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7530 else 7531 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7532 7533 // With PIC, the address is actually $g + Offset. 7534 if (isGlobalRelativeToPICBase(OpFlags)) { 7535 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7536 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7537 Result); 7538 } 7539 7540 // For globals that require a load from a stub to get the address, emit the 7541 // load. 7542 if (isGlobalStubReference(OpFlags)) 7543 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7544 MachinePointerInfo::getGOT(), false, false, false, 0); 7545 7546 // If there was a non-zero offset that we didn't fold, create an explicit 7547 // addition for it. 7548 if (Offset != 0) 7549 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7550 DAG.getConstant(Offset, getPointerTy())); 7551 7552 return Result; 7553} 7554 7555SDValue 7556X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7557 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7558 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7559 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7560} 7561 7562static SDValue 7563GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7564 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7565 unsigned char OperandFlags, bool LocalDynamic = false) { 7566 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7567 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7568 DebugLoc dl = GA->getDebugLoc(); 7569 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7570 GA->getValueType(0), 7571 GA->getOffset(), 7572 OperandFlags); 7573 7574 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR 7575 : X86ISD::TLSADDR; 7576 7577 if (InFlag) { 7578 SDValue Ops[] = { Chain, TGA, *InFlag }; 7579 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 3); 7580 } else { 7581 SDValue Ops[] = { Chain, TGA }; 7582 Chain = DAG.getNode(CallType, dl, NodeTys, Ops, 2); 7583 } 7584 7585 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7586 MFI->setAdjustsStack(true); 7587 7588 SDValue Flag = Chain.getValue(1); 7589 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7590} 7591 7592// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7593static SDValue 7594LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7595 const EVT PtrVT) { 7596 SDValue InFlag; 7597 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7598 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7599 DAG.getNode(X86ISD::GlobalBaseReg, 7600 DebugLoc(), PtrVT), InFlag); 7601 InFlag = Chain.getValue(1); 7602 7603 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7604} 7605 7606// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7607static SDValue 7608LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7609 const EVT PtrVT) { 7610 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7611 X86::RAX, X86II::MO_TLSGD); 7612} 7613 7614static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, 7615 SelectionDAG &DAG, 7616 const EVT PtrVT, 7617 bool is64Bit) { 7618 DebugLoc dl = GA->getDebugLoc(); 7619 7620 // Get the start address of the TLS block for this module. 7621 X86MachineFunctionInfo* MFI = DAG.getMachineFunction() 7622 .getInfo<X86MachineFunctionInfo>(); 7623 MFI->incNumLocalDynamicTLSAccesses(); 7624 7625 SDValue Base; 7626 if (is64Bit) { 7627 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX, 7628 X86II::MO_TLSLD, /*LocalDynamic=*/true); 7629 } else { 7630 SDValue InFlag; 7631 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7632 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), InFlag); 7633 InFlag = Chain.getValue(1); 7634 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, 7635 X86II::MO_TLSLDM, /*LocalDynamic=*/true); 7636 } 7637 7638 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations 7639 // of Base. 7640 7641 // Build x@dtpoff. 7642 unsigned char OperandFlags = X86II::MO_DTPOFF; 7643 unsigned WrapperKind = X86ISD::Wrapper; 7644 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7645 GA->getValueType(0), 7646 GA->getOffset(), OperandFlags); 7647 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7648 7649 // Add x@dtpoff with the base. 7650 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); 7651} 7652 7653// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. 7654static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7655 const EVT PtrVT, TLSModel::Model model, 7656 bool is64Bit, bool isPIC) { 7657 DebugLoc dl = GA->getDebugLoc(); 7658 7659 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7660 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7661 is64Bit ? 257 : 256)); 7662 7663 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7664 DAG.getIntPtrConstant(0), 7665 MachinePointerInfo(Ptr), 7666 false, false, false, 0); 7667 7668 unsigned char OperandFlags = 0; 7669 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7670 // initialexec. 7671 unsigned WrapperKind = X86ISD::Wrapper; 7672 if (model == TLSModel::LocalExec) { 7673 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7674 } else if (model == TLSModel::InitialExec) { 7675 if (is64Bit) { 7676 OperandFlags = X86II::MO_GOTTPOFF; 7677 WrapperKind = X86ISD::WrapperRIP; 7678 } else { 7679 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; 7680 } 7681 } else { 7682 llvm_unreachable("Unexpected model"); 7683 } 7684 7685 // emit "addl x@ntpoff,%eax" (local exec) 7686 // or "addl x@indntpoff,%eax" (initial exec) 7687 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) 7688 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7689 GA->getValueType(0), 7690 GA->getOffset(), OperandFlags); 7691 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7692 7693 if (model == TLSModel::InitialExec) { 7694 if (isPIC && !is64Bit) { 7695 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, 7696 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), PtrVT), 7697 Offset); 7698 } 7699 7700 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7701 MachinePointerInfo::getGOT(), false, false, false, 7702 0); 7703 } 7704 7705 // The address of the thread local variable is the add of the thread 7706 // pointer with the offset of the variable. 7707 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7708} 7709 7710SDValue 7711X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7712 7713 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7714 const GlobalValue *GV = GA->getGlobal(); 7715 7716 if (Subtarget->isTargetELF()) { 7717 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 7718 7719 switch (model) { 7720 case TLSModel::GeneralDynamic: 7721 if (Subtarget->is64Bit()) 7722 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7723 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7724 case TLSModel::LocalDynamic: 7725 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(), 7726 Subtarget->is64Bit()); 7727 case TLSModel::InitialExec: 7728 case TLSModel::LocalExec: 7729 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7730 Subtarget->is64Bit(), 7731 getTargetMachine().getRelocationModel() == Reloc::PIC_); 7732 } 7733 llvm_unreachable("Unknown TLS model."); 7734 } 7735 7736 if (Subtarget->isTargetDarwin()) { 7737 // Darwin only has one model of TLS. Lower to that. 7738 unsigned char OpFlag = 0; 7739 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7740 X86ISD::WrapperRIP : X86ISD::Wrapper; 7741 7742 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7743 // global base reg. 7744 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7745 !Subtarget->is64Bit(); 7746 if (PIC32) 7747 OpFlag = X86II::MO_TLVP_PIC_BASE; 7748 else 7749 OpFlag = X86II::MO_TLVP; 7750 DebugLoc DL = Op.getDebugLoc(); 7751 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7752 GA->getValueType(0), 7753 GA->getOffset(), OpFlag); 7754 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7755 7756 // With PIC32, the address is actually $g + Offset. 7757 if (PIC32) 7758 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7759 DAG.getNode(X86ISD::GlobalBaseReg, 7760 DebugLoc(), getPointerTy()), 7761 Offset); 7762 7763 // Lowering the machine isd will make sure everything is in the right 7764 // location. 7765 SDValue Chain = DAG.getEntryNode(); 7766 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7767 SDValue Args[] = { Chain, Offset }; 7768 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7769 7770 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7771 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7772 MFI->setAdjustsStack(true); 7773 7774 // And our return value (tls address) is in the standard call return value 7775 // location. 7776 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7777 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7778 Chain.getValue(1)); 7779 } 7780 7781 if (Subtarget->isTargetWindows()) { 7782 // Just use the implicit TLS architecture 7783 // Need to generate someting similar to: 7784 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage 7785 // ; from TEB 7786 // mov ecx, dword [rel _tls_index]: Load index (from C runtime) 7787 // mov rcx, qword [rdx+rcx*8] 7788 // mov eax, .tls$:tlsvar 7789 // [rax+rcx] contains the address 7790 // Windows 64bit: gs:0x58 7791 // Windows 32bit: fs:__tls_array 7792 7793 // If GV is an alias then use the aliasee for determining 7794 // thread-localness. 7795 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7796 GV = GA->resolveAliasedGlobal(false); 7797 DebugLoc dl = GA->getDebugLoc(); 7798 SDValue Chain = DAG.getEntryNode(); 7799 7800 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or 7801 // %gs:0x58 (64-bit). 7802 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit() 7803 ? Type::getInt8PtrTy(*DAG.getContext(), 7804 256) 7805 : Type::getInt32PtrTy(*DAG.getContext(), 7806 257)); 7807 7808 SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain, 7809 Subtarget->is64Bit() 7810 ? DAG.getIntPtrConstant(0x58) 7811 : DAG.getExternalSymbol("_tls_array", 7812 getPointerTy()), 7813 MachinePointerInfo(Ptr), 7814 false, false, false, 0); 7815 7816 // Load the _tls_index variable 7817 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy()); 7818 if (Subtarget->is64Bit()) 7819 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain, 7820 IDX, MachinePointerInfo(), MVT::i32, 7821 false, false, 0); 7822 else 7823 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(), 7824 false, false, false, 0); 7825 7826 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize(0)), 7827 getPointerTy()); 7828 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale); 7829 7830 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX); 7831 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(), 7832 false, false, false, 0); 7833 7834 // Get the offset of start of .tls section 7835 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7836 GA->getValueType(0), 7837 GA->getOffset(), X86II::MO_SECREL); 7838 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA); 7839 7840 // The address of the thread local variable is the add of the thread 7841 // pointer with the offset of the variable. 7842 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset); 7843 } 7844 7845 llvm_unreachable("TLS not implemented for this target."); 7846} 7847 7848 7849/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7850/// and take a 2 x i32 value to shift plus a shift amount. 7851SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7852 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7853 EVT VT = Op.getValueType(); 7854 unsigned VTBits = VT.getSizeInBits(); 7855 DebugLoc dl = Op.getDebugLoc(); 7856 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7857 SDValue ShOpLo = Op.getOperand(0); 7858 SDValue ShOpHi = Op.getOperand(1); 7859 SDValue ShAmt = Op.getOperand(2); 7860 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7861 DAG.getConstant(VTBits - 1, MVT::i8)) 7862 : DAG.getConstant(0, VT); 7863 7864 SDValue Tmp2, Tmp3; 7865 if (Op.getOpcode() == ISD::SHL_PARTS) { 7866 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7867 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7868 } else { 7869 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7870 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7871 } 7872 7873 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7874 DAG.getConstant(VTBits, MVT::i8)); 7875 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7876 AndNode, DAG.getConstant(0, MVT::i8)); 7877 7878 SDValue Hi, Lo; 7879 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7880 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7881 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7882 7883 if (Op.getOpcode() == ISD::SHL_PARTS) { 7884 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7885 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7886 } else { 7887 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7888 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7889 } 7890 7891 SDValue Ops[2] = { Lo, Hi }; 7892 return DAG.getMergeValues(Ops, 2, dl); 7893} 7894 7895SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7896 SelectionDAG &DAG) const { 7897 EVT SrcVT = Op.getOperand(0).getValueType(); 7898 7899 if (SrcVT.isVector()) 7900 return SDValue(); 7901 7902 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7903 "Unknown SINT_TO_FP to lower!"); 7904 7905 // These are really Legal; return the operand so the caller accepts it as 7906 // Legal. 7907 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7908 return Op; 7909 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7910 Subtarget->is64Bit()) { 7911 return Op; 7912 } 7913 7914 DebugLoc dl = Op.getDebugLoc(); 7915 unsigned Size = SrcVT.getSizeInBits()/8; 7916 MachineFunction &MF = DAG.getMachineFunction(); 7917 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7918 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7919 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7920 StackSlot, 7921 MachinePointerInfo::getFixedStack(SSFI), 7922 false, false, 0); 7923 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7924} 7925 7926SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7927 SDValue StackSlot, 7928 SelectionDAG &DAG) const { 7929 // Build the FILD 7930 DebugLoc DL = Op.getDebugLoc(); 7931 SDVTList Tys; 7932 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 7933 if (useSSE) 7934 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 7935 else 7936 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 7937 7938 unsigned ByteSize = SrcVT.getSizeInBits()/8; 7939 7940 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 7941 MachineMemOperand *MMO; 7942 if (FI) { 7943 int SSFI = FI->getIndex(); 7944 MMO = 7945 DAG.getMachineFunction() 7946 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7947 MachineMemOperand::MOLoad, ByteSize, ByteSize); 7948 } else { 7949 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 7950 StackSlot = StackSlot.getOperand(1); 7951 } 7952 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 7953 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 7954 X86ISD::FILD, DL, 7955 Tys, Ops, array_lengthof(Ops), 7956 SrcVT, MMO); 7957 7958 if (useSSE) { 7959 Chain = Result.getValue(1); 7960 SDValue InFlag = Result.getValue(2); 7961 7962 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 7963 // shouldn't be necessary except that RFP cannot be live across 7964 // multiple blocks. When stackifier is fixed, they can be uncoupled. 7965 MachineFunction &MF = DAG.getMachineFunction(); 7966 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 7967 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 7968 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7969 Tys = DAG.getVTList(MVT::Other); 7970 SDValue Ops[] = { 7971 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 7972 }; 7973 MachineMemOperand *MMO = 7974 DAG.getMachineFunction() 7975 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7976 MachineMemOperand::MOStore, SSFISize, SSFISize); 7977 7978 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 7979 Ops, array_lengthof(Ops), 7980 Op.getValueType(), MMO); 7981 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 7982 MachinePointerInfo::getFixedStack(SSFI), 7983 false, false, false, 0); 7984 } 7985 7986 return Result; 7987} 7988 7989// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 7990SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 7991 SelectionDAG &DAG) const { 7992 // This algorithm is not obvious. Here it is what we're trying to output: 7993 /* 7994 movq %rax, %xmm0 7995 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 7996 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 7997 #ifdef __SSE3__ 7998 haddpd %xmm0, %xmm0 7999 #else 8000 pshufd $0x4e, %xmm0, %xmm1 8001 addpd %xmm1, %xmm0 8002 #endif 8003 */ 8004 8005 DebugLoc dl = Op.getDebugLoc(); 8006 LLVMContext *Context = DAG.getContext(); 8007 8008 // Build some magic constants. 8009 const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; 8010 Constant *C0 = ConstantDataVector::get(*Context, CV0); 8011 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 8012 8013 SmallVector<Constant*,2> CV1; 8014 CV1.push_back( 8015 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 8016 CV1.push_back( 8017 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 8018 Constant *C1 = ConstantVector::get(CV1); 8019 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 8020 8021 // Load the 64-bit value into an XMM register. 8022 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 8023 Op.getOperand(0)); 8024 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 8025 MachinePointerInfo::getConstantPool(), 8026 false, false, false, 16); 8027 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 8028 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 8029 CLod0); 8030 8031 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 8032 MachinePointerInfo::getConstantPool(), 8033 false, false, false, 16); 8034 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 8035 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 8036 SDValue Result; 8037 8038 if (Subtarget->hasSSE3()) { 8039 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 8040 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 8041 } else { 8042 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 8043 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 8044 S2F, 0x4E, DAG); 8045 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 8046 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 8047 Sub); 8048 } 8049 8050 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 8051 DAG.getIntPtrConstant(0)); 8052} 8053 8054// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 8055SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 8056 SelectionDAG &DAG) const { 8057 DebugLoc dl = Op.getDebugLoc(); 8058 // FP constant to bias correct the final result. 8059 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 8060 MVT::f64); 8061 8062 // Load the 32-bit value into an XMM register. 8063 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 8064 Op.getOperand(0)); 8065 8066 // Zero out the upper parts of the register. 8067 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); 8068 8069 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8070 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 8071 DAG.getIntPtrConstant(0)); 8072 8073 // Or the load with the bias. 8074 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 8075 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8076 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8077 MVT::v2f64, Load)), 8078 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 8079 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 8080 MVT::v2f64, Bias))); 8081 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8082 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 8083 DAG.getIntPtrConstant(0)); 8084 8085 // Subtract the bias. 8086 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 8087 8088 // Handle final rounding. 8089 EVT DestVT = Op.getValueType(); 8090 8091 if (DestVT.bitsLT(MVT::f64)) 8092 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 8093 DAG.getIntPtrConstant(0)); 8094 if (DestVT.bitsGT(MVT::f64)) 8095 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 8096 8097 // Handle final rounding. 8098 return Sub; 8099} 8100 8101SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, 8102 SelectionDAG &DAG) const { 8103 SDValue N0 = Op.getOperand(0); 8104 EVT SVT = N0.getValueType(); 8105 DebugLoc dl = Op.getDebugLoc(); 8106 8107 assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || 8108 SVT == MVT::v8i8 || SVT == MVT::v8i16) && 8109 "Custom UINT_TO_FP is not supported!"); 8110 8111 EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, SVT.getVectorNumElements()); 8112 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), 8113 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0)); 8114} 8115 8116SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 8117 SelectionDAG &DAG) const { 8118 SDValue N0 = Op.getOperand(0); 8119 DebugLoc dl = Op.getDebugLoc(); 8120 8121 if (Op.getValueType().isVector()) 8122 return lowerUINT_TO_FP_vec(Op, DAG); 8123 8124 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 8125 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 8126 // the optimization here. 8127 if (DAG.SignBitIsZero(N0)) 8128 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 8129 8130 EVT SrcVT = N0.getValueType(); 8131 EVT DstVT = Op.getValueType(); 8132 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 8133 return LowerUINT_TO_FP_i64(Op, DAG); 8134 if (SrcVT == MVT::i32 && X86ScalarSSEf64) 8135 return LowerUINT_TO_FP_i32(Op, DAG); 8136 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 8137 return SDValue(); 8138 8139 // Make a 64-bit buffer, and use it to build an FILD. 8140 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 8141 if (SrcVT == MVT::i32) { 8142 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 8143 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 8144 getPointerTy(), StackSlot, WordOff); 8145 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8146 StackSlot, MachinePointerInfo(), 8147 false, false, 0); 8148 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 8149 OffsetSlot, MachinePointerInfo(), 8150 false, false, 0); 8151 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 8152 return Fild; 8153 } 8154 8155 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 8156 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 8157 StackSlot, MachinePointerInfo(), 8158 false, false, 0); 8159 // For i64 source, we need to add the appropriate power of 2 if the input 8160 // was negative. This is the same as the optimization in 8161 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 8162 // we must be careful to do the computation in x87 extended precision, not 8163 // in SSE. (The generic code can't know it's OK to do this, or how to.) 8164 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 8165 MachineMemOperand *MMO = 8166 DAG.getMachineFunction() 8167 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8168 MachineMemOperand::MOLoad, 8, 8); 8169 8170 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 8171 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 8172 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 8173 MVT::i64, MMO); 8174 8175 APInt FF(32, 0x5F800000ULL); 8176 8177 // Check whether the sign bit is set. 8178 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 8179 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 8180 ISD::SETLT); 8181 8182 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 8183 SDValue FudgePtr = DAG.getConstantPool( 8184 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 8185 getPointerTy()); 8186 8187 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 8188 SDValue Zero = DAG.getIntPtrConstant(0); 8189 SDValue Four = DAG.getIntPtrConstant(4); 8190 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 8191 Zero, Four); 8192 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 8193 8194 // Load the value out, extending it from f32 to f80. 8195 // FIXME: Avoid the extend by constructing the right constant pool? 8196 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 8197 FudgePtr, MachinePointerInfo::getConstantPool(), 8198 MVT::f32, false, false, 4); 8199 // Extend everything to 80 bits to force it to be done on x87. 8200 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 8201 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 8202} 8203 8204std::pair<SDValue,SDValue> X86TargetLowering:: 8205FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) const { 8206 DebugLoc DL = Op.getDebugLoc(); 8207 8208 EVT DstTy = Op.getValueType(); 8209 8210 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) { 8211 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 8212 DstTy = MVT::i64; 8213 } 8214 8215 assert(DstTy.getSimpleVT() <= MVT::i64 && 8216 DstTy.getSimpleVT() >= MVT::i16 && 8217 "Unknown FP_TO_INT to lower!"); 8218 8219 // These are really Legal. 8220 if (DstTy == MVT::i32 && 8221 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8222 return std::make_pair(SDValue(), SDValue()); 8223 if (Subtarget->is64Bit() && 8224 DstTy == MVT::i64 && 8225 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 8226 return std::make_pair(SDValue(), SDValue()); 8227 8228 // We lower FP->int64 either into FISTP64 followed by a load from a temporary 8229 // stack slot, or into the FTOL runtime function. 8230 MachineFunction &MF = DAG.getMachineFunction(); 8231 unsigned MemSize = DstTy.getSizeInBits()/8; 8232 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8233 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8234 8235 unsigned Opc; 8236 if (!IsSigned && isIntegerTypeFTOL(DstTy)) 8237 Opc = X86ISD::WIN_FTOL; 8238 else 8239 switch (DstTy.getSimpleVT().SimpleTy) { 8240 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 8241 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 8242 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 8243 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 8244 } 8245 8246 SDValue Chain = DAG.getEntryNode(); 8247 SDValue Value = Op.getOperand(0); 8248 EVT TheVT = Op.getOperand(0).getValueType(); 8249 // FIXME This causes a redundant load/store if the SSE-class value is already 8250 // in memory, such as if it is on the callstack. 8251 if (isScalarFPTypeInSSEReg(TheVT)) { 8252 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 8253 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 8254 MachinePointerInfo::getFixedStack(SSFI), 8255 false, false, 0); 8256 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 8257 SDValue Ops[] = { 8258 Chain, StackSlot, DAG.getValueType(TheVT) 8259 }; 8260 8261 MachineMemOperand *MMO = 8262 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8263 MachineMemOperand::MOLoad, MemSize, MemSize); 8264 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 8265 DstTy, MMO); 8266 Chain = Value.getValue(1); 8267 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 8268 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8269 } 8270 8271 MachineMemOperand *MMO = 8272 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8273 MachineMemOperand::MOStore, MemSize, MemSize); 8274 8275 if (Opc != X86ISD::WIN_FTOL) { 8276 // Build the FP_TO_INT*_IN_MEM 8277 SDValue Ops[] = { Chain, Value, StackSlot }; 8278 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 8279 Ops, 3, DstTy, MMO); 8280 return std::make_pair(FIST, StackSlot); 8281 } else { 8282 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL, 8283 DAG.getVTList(MVT::Other, MVT::Glue), 8284 Chain, Value); 8285 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX, 8286 MVT::i32, ftol.getValue(1)); 8287 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX, 8288 MVT::i32, eax.getValue(2)); 8289 SDValue Ops[] = { eax, edx }; 8290 SDValue pair = IsReplace 8291 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, 2) 8292 : DAG.getMergeValues(Ops, 2, DL); 8293 return std::make_pair(pair, SDValue()); 8294 } 8295} 8296 8297SDValue X86TargetLowering::lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const { 8298 DebugLoc DL = Op.getDebugLoc(); 8299 EVT VT = Op.getValueType(); 8300 SDValue In = Op.getOperand(0); 8301 EVT SVT = In.getValueType(); 8302 8303 if (!VT.is256BitVector() || !SVT.is128BitVector() || 8304 VT.getVectorNumElements() != SVT.getVectorNumElements()) 8305 return SDValue(); 8306 8307 assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!"); 8308 8309 // AVX2 has better support of integer extending. 8310 if (Subtarget->hasAVX2()) 8311 return DAG.getNode(X86ISD::VZEXT, DL, VT, In); 8312 8313 SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In); 8314 static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1}; 8315 SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, 8316 DAG.getVectorShuffle(MVT::v8i16, DL, In, DAG.getUNDEF(MVT::v8i16), &Mask[0])); 8317 8318 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi); 8319} 8320 8321SDValue X86TargetLowering::lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 8322 DebugLoc DL = Op.getDebugLoc(); 8323 EVT VT = Op.getValueType(); 8324 EVT SVT = Op.getOperand(0).getValueType(); 8325 8326 if (!VT.is128BitVector() || !SVT.is256BitVector() || 8327 VT.getVectorNumElements() != SVT.getVectorNumElements()) 8328 return SDValue(); 8329 8330 assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!"); 8331 8332 unsigned NumElems = VT.getVectorNumElements(); 8333 EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 8334 NumElems * 2); 8335 8336 SDValue In = Op.getOperand(0); 8337 SmallVector<int, 16> MaskVec(NumElems * 2, -1); 8338 // Prepare truncation shuffle mask 8339 for (unsigned i = 0; i != NumElems; ++i) 8340 MaskVec[i] = i * 2; 8341 SDValue V = DAG.getVectorShuffle(NVT, DL, 8342 DAG.getNode(ISD::BITCAST, DL, NVT, In), 8343 DAG.getUNDEF(NVT), &MaskVec[0]); 8344 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, 8345 DAG.getIntPtrConstant(0)); 8346} 8347 8348SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 8349 SelectionDAG &DAG) const { 8350 if (Op.getValueType().isVector()) { 8351 if (Op.getValueType() == MVT::v8i16) 8352 return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), Op.getValueType(), 8353 DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(), 8354 MVT::v8i32, Op.getOperand(0))); 8355 return SDValue(); 8356 } 8357 8358 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8359 /*IsSigned=*/ true, /*IsReplace=*/ false); 8360 SDValue FIST = Vals.first, StackSlot = Vals.second; 8361 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 8362 if (FIST.getNode() == 0) return Op; 8363 8364 if (StackSlot.getNode()) 8365 // Load the result. 8366 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8367 FIST, StackSlot, MachinePointerInfo(), 8368 false, false, false, 0); 8369 8370 // The node is the result. 8371 return FIST; 8372} 8373 8374SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 8375 SelectionDAG &DAG) const { 8376 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, 8377 /*IsSigned=*/ false, /*IsReplace=*/ false); 8378 SDValue FIST = Vals.first, StackSlot = Vals.second; 8379 assert(FIST.getNode() && "Unexpected failure"); 8380 8381 if (StackSlot.getNode()) 8382 // Load the result. 8383 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 8384 FIST, StackSlot, MachinePointerInfo(), 8385 false, false, false, 0); 8386 8387 // The node is the result. 8388 return FIST; 8389} 8390 8391SDValue X86TargetLowering::lowerFP_EXTEND(SDValue Op, 8392 SelectionDAG &DAG) const { 8393 DebugLoc DL = Op.getDebugLoc(); 8394 EVT VT = Op.getValueType(); 8395 SDValue In = Op.getOperand(0); 8396 EVT SVT = In.getValueType(); 8397 8398 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!"); 8399 8400 return DAG.getNode(X86ISD::VFPEXT, DL, VT, 8401 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, 8402 In, DAG.getUNDEF(SVT))); 8403} 8404 8405SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { 8406 LLVMContext *Context = DAG.getContext(); 8407 DebugLoc dl = Op.getDebugLoc(); 8408 EVT VT = Op.getValueType(); 8409 EVT EltVT = VT; 8410 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8411 if (VT.isVector()) { 8412 EltVT = VT.getVectorElementType(); 8413 NumElts = VT.getVectorNumElements(); 8414 } 8415 Constant *C; 8416 if (EltVT == MVT::f64) 8417 C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 8418 else 8419 C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 8420 C = ConstantVector::getSplat(NumElts, C); 8421 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8422 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8423 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8424 MachinePointerInfo::getConstantPool(), 8425 false, false, false, Alignment); 8426 if (VT.isVector()) { 8427 MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8428 return DAG.getNode(ISD::BITCAST, dl, VT, 8429 DAG.getNode(ISD::AND, dl, ANDVT, 8430 DAG.getNode(ISD::BITCAST, dl, ANDVT, 8431 Op.getOperand(0)), 8432 DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask))); 8433 } 8434 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 8435} 8436 8437SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 8438 LLVMContext *Context = DAG.getContext(); 8439 DebugLoc dl = Op.getDebugLoc(); 8440 EVT VT = Op.getValueType(); 8441 EVT EltVT = VT; 8442 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 8443 if (VT.isVector()) { 8444 EltVT = VT.getVectorElementType(); 8445 NumElts = VT.getVectorNumElements(); 8446 } 8447 Constant *C; 8448 if (EltVT == MVT::f64) 8449 C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 8450 else 8451 C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 8452 C = ConstantVector::getSplat(NumElts, C); 8453 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); 8454 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8455 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8456 MachinePointerInfo::getConstantPool(), 8457 false, false, false, Alignment); 8458 if (VT.isVector()) { 8459 MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8460 return DAG.getNode(ISD::BITCAST, dl, VT, 8461 DAG.getNode(ISD::XOR, dl, XORVT, 8462 DAG.getNode(ISD::BITCAST, dl, XORVT, 8463 Op.getOperand(0)), 8464 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 8465 } 8466 8467 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 8468} 8469 8470SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 8471 LLVMContext *Context = DAG.getContext(); 8472 SDValue Op0 = Op.getOperand(0); 8473 SDValue Op1 = Op.getOperand(1); 8474 DebugLoc dl = Op.getDebugLoc(); 8475 EVT VT = Op.getValueType(); 8476 EVT SrcVT = Op1.getValueType(); 8477 8478 // If second operand is smaller, extend it first. 8479 if (SrcVT.bitsLT(VT)) { 8480 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 8481 SrcVT = VT; 8482 } 8483 // And if it is bigger, shrink it first. 8484 if (SrcVT.bitsGT(VT)) { 8485 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 8486 SrcVT = VT; 8487 } 8488 8489 // At this point the operands and the result should have the same 8490 // type, and that won't be f80 since that is not custom lowered. 8491 8492 // First get the sign bit of second operand. 8493 SmallVector<Constant*,4> CV; 8494 if (SrcVT == MVT::f64) { 8495 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 8496 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8497 } else { 8498 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 8499 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8500 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8501 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8502 } 8503 Constant *C = ConstantVector::get(CV); 8504 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8505 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 8506 MachinePointerInfo::getConstantPool(), 8507 false, false, false, 16); 8508 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 8509 8510 // Shift sign bit right or left if the two operands have different types. 8511 if (SrcVT.bitsGT(VT)) { 8512 // Op0 is MVT::f32, Op1 is MVT::f64. 8513 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 8514 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 8515 DAG.getConstant(32, MVT::i32)); 8516 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 8517 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 8518 DAG.getIntPtrConstant(0)); 8519 } 8520 8521 // Clear first operand sign bit. 8522 CV.clear(); 8523 if (VT == MVT::f64) { 8524 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 8525 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8526 } else { 8527 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 8528 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8529 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8530 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8531 } 8532 C = ConstantVector::get(CV); 8533 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8534 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8535 MachinePointerInfo::getConstantPool(), 8536 false, false, false, 16); 8537 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8538 8539 // Or the value with the sign bit. 8540 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8541} 8542 8543static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { 8544 SDValue N0 = Op.getOperand(0); 8545 DebugLoc dl = Op.getDebugLoc(); 8546 EVT VT = Op.getValueType(); 8547 8548 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8549 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8550 DAG.getConstant(1, VT)); 8551 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8552} 8553 8554// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able. 8555// 8556SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const { 8557 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree."); 8558 8559 if (!Subtarget->hasSSE41()) 8560 return SDValue(); 8561 8562 if (!Op->hasOneUse()) 8563 return SDValue(); 8564 8565 SDNode *N = Op.getNode(); 8566 DebugLoc DL = N->getDebugLoc(); 8567 8568 SmallVector<SDValue, 8> Opnds; 8569 DenseMap<SDValue, unsigned> VecInMap; 8570 EVT VT = MVT::Other; 8571 8572 // Recognize a special case where a vector is casted into wide integer to 8573 // test all 0s. 8574 Opnds.push_back(N->getOperand(0)); 8575 Opnds.push_back(N->getOperand(1)); 8576 8577 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { 8578 SmallVector<SDValue, 8>::const_iterator I = Opnds.begin() + Slot; 8579 // BFS traverse all OR'd operands. 8580 if (I->getOpcode() == ISD::OR) { 8581 Opnds.push_back(I->getOperand(0)); 8582 Opnds.push_back(I->getOperand(1)); 8583 // Re-evaluate the number of nodes to be traversed. 8584 e += 2; // 2 more nodes (LHS and RHS) are pushed. 8585 continue; 8586 } 8587 8588 // Quit if a non-EXTRACT_VECTOR_ELT 8589 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8590 return SDValue(); 8591 8592 // Quit if without a constant index. 8593 SDValue Idx = I->getOperand(1); 8594 if (!isa<ConstantSDNode>(Idx)) 8595 return SDValue(); 8596 8597 SDValue ExtractedFromVec = I->getOperand(0); 8598 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec); 8599 if (M == VecInMap.end()) { 8600 VT = ExtractedFromVec.getValueType(); 8601 // Quit if not 128/256-bit vector. 8602 if (!VT.is128BitVector() && !VT.is256BitVector()) 8603 return SDValue(); 8604 // Quit if not the same type. 8605 if (VecInMap.begin() != VecInMap.end() && 8606 VT != VecInMap.begin()->first.getValueType()) 8607 return SDValue(); 8608 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; 8609 } 8610 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue(); 8611 } 8612 8613 assert((VT.is128BitVector() || VT.is256BitVector()) && 8614 "Not extracted from 128-/256-bit vector."); 8615 8616 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; 8617 SmallVector<SDValue, 8> VecIns; 8618 8619 for (DenseMap<SDValue, unsigned>::const_iterator 8620 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { 8621 // Quit if not all elements are used. 8622 if (I->second != FullMask) 8623 return SDValue(); 8624 VecIns.push_back(I->first); 8625 } 8626 8627 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; 8628 8629 // Cast all vectors into TestVT for PTEST. 8630 for (unsigned i = 0, e = VecIns.size(); i < e; ++i) 8631 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]); 8632 8633 // If more than one full vectors are evaluated, OR them first before PTEST. 8634 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { 8635 // Each iteration will OR 2 nodes and append the result until there is only 8636 // 1 node left, i.e. the final OR'd value of all vectors. 8637 SDValue LHS = VecIns[Slot]; 8638 SDValue RHS = VecIns[Slot + 1]; 8639 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); 8640 } 8641 8642 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, 8643 VecIns.back(), VecIns.back()); 8644} 8645 8646/// Emit nodes that will be selected as "test Op0,Op0", or something 8647/// equivalent. 8648SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8649 SelectionDAG &DAG) const { 8650 DebugLoc dl = Op.getDebugLoc(); 8651 8652 // CF and OF aren't always set the way we want. Determine which 8653 // of these we need. 8654 bool NeedCF = false; 8655 bool NeedOF = false; 8656 switch (X86CC) { 8657 default: break; 8658 case X86::COND_A: case X86::COND_AE: 8659 case X86::COND_B: case X86::COND_BE: 8660 NeedCF = true; 8661 break; 8662 case X86::COND_G: case X86::COND_GE: 8663 case X86::COND_L: case X86::COND_LE: 8664 case X86::COND_O: case X86::COND_NO: 8665 NeedOF = true; 8666 break; 8667 } 8668 8669 // See if we can use the EFLAGS value from the operand instead of 8670 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8671 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8672 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8673 // Emit a CMP with 0, which is the TEST pattern. 8674 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8675 DAG.getConstant(0, Op.getValueType())); 8676 8677 unsigned Opcode = 0; 8678 unsigned NumOperands = 0; 8679 8680 // Truncate operations may prevent the merge of the SETCC instruction 8681 // and the arithmetic intruction before it. Attempt to truncate the operands 8682 // of the arithmetic instruction and use a reduced bit-width instruction. 8683 bool NeedTruncation = false; 8684 SDValue ArithOp = Op; 8685 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { 8686 SDValue Arith = Op->getOperand(0); 8687 // Both the trunc and the arithmetic op need to have one user each. 8688 if (Arith->hasOneUse()) 8689 switch (Arith.getOpcode()) { 8690 default: break; 8691 case ISD::ADD: 8692 case ISD::SUB: 8693 case ISD::AND: 8694 case ISD::OR: 8695 case ISD::XOR: { 8696 NeedTruncation = true; 8697 ArithOp = Arith; 8698 } 8699 } 8700 } 8701 8702 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation 8703 // which may be the result of a CAST. We use the variable 'Op', which is the 8704 // non-casted variable when we check for possible users. 8705 switch (ArithOp.getOpcode()) { 8706 case ISD::ADD: 8707 // Due to an isel shortcoming, be conservative if this add is likely to be 8708 // selected as part of a load-modify-store instruction. When the root node 8709 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8710 // uses of other nodes in the match, such as the ADD in this case. This 8711 // leads to the ADD being left around and reselected, with the result being 8712 // two adds in the output. Alas, even if none our users are stores, that 8713 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8714 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8715 // climbing the DAG back to the root, and it doesn't seem to be worth the 8716 // effort. 8717 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8718 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8719 if (UI->getOpcode() != ISD::CopyToReg && 8720 UI->getOpcode() != ISD::SETCC && 8721 UI->getOpcode() != ISD::STORE) 8722 goto default_case; 8723 8724 if (ConstantSDNode *C = 8725 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) { 8726 // An add of one will be selected as an INC. 8727 if (C->getAPIntValue() == 1) { 8728 Opcode = X86ISD::INC; 8729 NumOperands = 1; 8730 break; 8731 } 8732 8733 // An add of negative one (subtract of one) will be selected as a DEC. 8734 if (C->getAPIntValue().isAllOnesValue()) { 8735 Opcode = X86ISD::DEC; 8736 NumOperands = 1; 8737 break; 8738 } 8739 } 8740 8741 // Otherwise use a regular EFLAGS-setting add. 8742 Opcode = X86ISD::ADD; 8743 NumOperands = 2; 8744 break; 8745 case ISD::AND: { 8746 // If the primary and result isn't used, don't bother using X86ISD::AND, 8747 // because a TEST instruction will be better. 8748 bool NonFlagUse = false; 8749 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8750 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8751 SDNode *User = *UI; 8752 unsigned UOpNo = UI.getOperandNo(); 8753 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8754 // Look pass truncate. 8755 UOpNo = User->use_begin().getOperandNo(); 8756 User = *User->use_begin(); 8757 } 8758 8759 if (User->getOpcode() != ISD::BRCOND && 8760 User->getOpcode() != ISD::SETCC && 8761 !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) { 8762 NonFlagUse = true; 8763 break; 8764 } 8765 } 8766 8767 if (!NonFlagUse) 8768 break; 8769 } 8770 // FALL THROUGH 8771 case ISD::SUB: 8772 case ISD::OR: 8773 case ISD::XOR: 8774 // Due to the ISEL shortcoming noted above, be conservative if this op is 8775 // likely to be selected as part of a load-modify-store instruction. 8776 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8777 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8778 if (UI->getOpcode() == ISD::STORE) 8779 goto default_case; 8780 8781 // Otherwise use a regular EFLAGS-setting instruction. 8782 switch (ArithOp.getOpcode()) { 8783 default: llvm_unreachable("unexpected operator!"); 8784 case ISD::SUB: Opcode = X86ISD::SUB; break; 8785 case ISD::XOR: Opcode = X86ISD::XOR; break; 8786 case ISD::AND: Opcode = X86ISD::AND; break; 8787 case ISD::OR: { 8788 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) { 8789 SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG); 8790 if (EFLAGS.getNode()) 8791 return EFLAGS; 8792 } 8793 Opcode = X86ISD::OR; 8794 break; 8795 } 8796 } 8797 8798 NumOperands = 2; 8799 break; 8800 case X86ISD::ADD: 8801 case X86ISD::SUB: 8802 case X86ISD::INC: 8803 case X86ISD::DEC: 8804 case X86ISD::OR: 8805 case X86ISD::XOR: 8806 case X86ISD::AND: 8807 return SDValue(Op.getNode(), 1); 8808 default: 8809 default_case: 8810 break; 8811 } 8812 8813 // If we found that truncation is beneficial, perform the truncation and 8814 // update 'Op'. 8815 if (NeedTruncation) { 8816 EVT VT = Op.getValueType(); 8817 SDValue WideVal = Op->getOperand(0); 8818 EVT WideVT = WideVal.getValueType(); 8819 unsigned ConvertedOp = 0; 8820 // Use a target machine opcode to prevent further DAGCombine 8821 // optimizations that may separate the arithmetic operations 8822 // from the setcc node. 8823 switch (WideVal.getOpcode()) { 8824 default: break; 8825 case ISD::ADD: ConvertedOp = X86ISD::ADD; break; 8826 case ISD::SUB: ConvertedOp = X86ISD::SUB; break; 8827 case ISD::AND: ConvertedOp = X86ISD::AND; break; 8828 case ISD::OR: ConvertedOp = X86ISD::OR; break; 8829 case ISD::XOR: ConvertedOp = X86ISD::XOR; break; 8830 } 8831 8832 if (ConvertedOp) { 8833 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8834 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { 8835 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); 8836 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); 8837 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); 8838 } 8839 } 8840 } 8841 8842 if (Opcode == 0) 8843 // Emit a CMP with 0, which is the TEST pattern. 8844 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8845 DAG.getConstant(0, Op.getValueType())); 8846 8847 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 8848 SmallVector<SDValue, 4> Ops; 8849 for (unsigned i = 0; i != NumOperands; ++i) 8850 Ops.push_back(Op.getOperand(i)); 8851 8852 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 8853 DAG.ReplaceAllUsesWith(Op, New); 8854 return SDValue(New.getNode(), 1); 8855} 8856 8857/// Emit nodes that will be selected as "cmp Op0,Op1", or something 8858/// equivalent. 8859SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 8860 SelectionDAG &DAG) const { 8861 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 8862 if (C->getAPIntValue() == 0) 8863 return EmitTest(Op0, X86CC, DAG); 8864 8865 DebugLoc dl = Op0.getDebugLoc(); 8866 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || 8867 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { 8868 // Use SUB instead of CMP to enable CSE between SUB and CMP. 8869 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); 8870 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, 8871 Op0, Op1); 8872 return SDValue(Sub.getNode(), 1); 8873 } 8874 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 8875} 8876 8877/// Convert a comparison if required by the subtarget. 8878SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, 8879 SelectionDAG &DAG) const { 8880 // If the subtarget does not support the FUCOMI instruction, floating-point 8881 // comparisons have to be converted. 8882 if (Subtarget->hasCMov() || 8883 Cmp.getOpcode() != X86ISD::CMP || 8884 !Cmp.getOperand(0).getValueType().isFloatingPoint() || 8885 !Cmp.getOperand(1).getValueType().isFloatingPoint()) 8886 return Cmp; 8887 8888 // The instruction selector will select an FUCOM instruction instead of 8889 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence 8890 // build an SDNode sequence that transfers the result from FPSW into EFLAGS: 8891 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) 8892 DebugLoc dl = Cmp.getDebugLoc(); 8893 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); 8894 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); 8895 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, 8896 DAG.getConstant(8, MVT::i8)); 8897 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); 8898 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); 8899} 8900 8901/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 8902/// if it's possible. 8903SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 8904 DebugLoc dl, SelectionDAG &DAG) const { 8905 SDValue Op0 = And.getOperand(0); 8906 SDValue Op1 = And.getOperand(1); 8907 if (Op0.getOpcode() == ISD::TRUNCATE) 8908 Op0 = Op0.getOperand(0); 8909 if (Op1.getOpcode() == ISD::TRUNCATE) 8910 Op1 = Op1.getOperand(0); 8911 8912 SDValue LHS, RHS; 8913 if (Op1.getOpcode() == ISD::SHL) 8914 std::swap(Op0, Op1); 8915 if (Op0.getOpcode() == ISD::SHL) { 8916 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 8917 if (And00C->getZExtValue() == 1) { 8918 // If we looked past a truncate, check that it's only truncating away 8919 // known zeros. 8920 unsigned BitWidth = Op0.getValueSizeInBits(); 8921 unsigned AndBitWidth = And.getValueSizeInBits(); 8922 if (BitWidth > AndBitWidth) { 8923 APInt Zeros, Ones; 8924 DAG.ComputeMaskedBits(Op0, Zeros, Ones); 8925 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 8926 return SDValue(); 8927 } 8928 LHS = Op1; 8929 RHS = Op0.getOperand(1); 8930 } 8931 } else if (Op1.getOpcode() == ISD::Constant) { 8932 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 8933 uint64_t AndRHSVal = AndRHS->getZExtValue(); 8934 SDValue AndLHS = Op0; 8935 8936 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 8937 LHS = AndLHS.getOperand(0); 8938 RHS = AndLHS.getOperand(1); 8939 } 8940 8941 // Use BT if the immediate can't be encoded in a TEST instruction. 8942 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 8943 LHS = AndLHS; 8944 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 8945 } 8946 } 8947 8948 if (LHS.getNode()) { 8949 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 8950 // instruction. Since the shift amount is in-range-or-undefined, we know 8951 // that doing a bittest on the i32 value is ok. We extend to i32 because 8952 // the encoding for the i16 version is larger than the i32 version. 8953 // Also promote i16 to i32 for performance / code size reason. 8954 if (LHS.getValueType() == MVT::i8 || 8955 LHS.getValueType() == MVT::i16) 8956 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 8957 8958 // If the operand types disagree, extend the shift amount to match. Since 8959 // BT ignores high bits (like shifts) we can use anyextend. 8960 if (LHS.getValueType() != RHS.getValueType()) 8961 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 8962 8963 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 8964 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 8965 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8966 DAG.getConstant(Cond, MVT::i8), BT); 8967 } 8968 8969 return SDValue(); 8970} 8971 8972SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 8973 8974 if (Op.getValueType().isVector()) return LowerVSETCC(Op, DAG); 8975 8976 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 8977 SDValue Op0 = Op.getOperand(0); 8978 SDValue Op1 = Op.getOperand(1); 8979 DebugLoc dl = Op.getDebugLoc(); 8980 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8981 8982 // Optimize to BT if possible. 8983 // Lower (X & (1 << N)) == 0 to BT(X, N). 8984 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 8985 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 8986 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 8987 Op1.getOpcode() == ISD::Constant && 8988 cast<ConstantSDNode>(Op1)->isNullValue() && 8989 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8990 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 8991 if (NewSetCC.getNode()) 8992 return NewSetCC; 8993 } 8994 8995 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 8996 // these. 8997 if (Op1.getOpcode() == ISD::Constant && 8998 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 8999 cast<ConstantSDNode>(Op1)->isNullValue()) && 9000 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 9001 9002 // If the input is a setcc, then reuse the input setcc or use a new one with 9003 // the inverted condition. 9004 if (Op0.getOpcode() == X86ISD::SETCC) { 9005 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 9006 bool Invert = (CC == ISD::SETNE) ^ 9007 cast<ConstantSDNode>(Op1)->isNullValue(); 9008 if (!Invert) return Op0; 9009 9010 CCode = X86::GetOppositeBranchCondition(CCode); 9011 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9012 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 9013 } 9014 } 9015 9016 bool isFP = Op1.getValueType().isFloatingPoint(); 9017 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 9018 if (X86CC == X86::COND_INVALID) 9019 return SDValue(); 9020 9021 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 9022 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); 9023 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9024 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 9025} 9026 9027// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 9028// ones, and then concatenate the result back. 9029static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 9030 EVT VT = Op.getValueType(); 9031 9032 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && 9033 "Unsupported value type for operation"); 9034 9035 unsigned NumElems = VT.getVectorNumElements(); 9036 DebugLoc dl = Op.getDebugLoc(); 9037 SDValue CC = Op.getOperand(2); 9038 9039 // Extract the LHS vectors 9040 SDValue LHS = Op.getOperand(0); 9041 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 9042 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 9043 9044 // Extract the RHS vectors 9045 SDValue RHS = Op.getOperand(1); 9046 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 9047 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 9048 9049 // Issue the operation on the smaller types and concatenate the result back 9050 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9051 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9052 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9053 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 9054 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 9055} 9056 9057 9058SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 9059 SDValue Cond; 9060 SDValue Op0 = Op.getOperand(0); 9061 SDValue Op1 = Op.getOperand(1); 9062 SDValue CC = Op.getOperand(2); 9063 EVT VT = Op.getValueType(); 9064 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 9065 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 9066 DebugLoc dl = Op.getDebugLoc(); 9067 9068 if (isFP) { 9069#ifndef NDEBUG 9070 EVT EltVT = Op0.getValueType().getVectorElementType(); 9071 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 9072#endif 9073 9074 unsigned SSECC; 9075 bool Swap = false; 9076 9077 // SSE Condition code mapping: 9078 // 0 - EQ 9079 // 1 - LT 9080 // 2 - LE 9081 // 3 - UNORD 9082 // 4 - NEQ 9083 // 5 - NLT 9084 // 6 - NLE 9085 // 7 - ORD 9086 switch (SetCCOpcode) { 9087 default: llvm_unreachable("Unexpected SETCC condition"); 9088 case ISD::SETOEQ: 9089 case ISD::SETEQ: SSECC = 0; break; 9090 case ISD::SETOGT: 9091 case ISD::SETGT: Swap = true; // Fallthrough 9092 case ISD::SETLT: 9093 case ISD::SETOLT: SSECC = 1; break; 9094 case ISD::SETOGE: 9095 case ISD::SETGE: Swap = true; // Fallthrough 9096 case ISD::SETLE: 9097 case ISD::SETOLE: SSECC = 2; break; 9098 case ISD::SETUO: SSECC = 3; break; 9099 case ISD::SETUNE: 9100 case ISD::SETNE: SSECC = 4; break; 9101 case ISD::SETULE: Swap = true; // Fallthrough 9102 case ISD::SETUGE: SSECC = 5; break; 9103 case ISD::SETULT: Swap = true; // Fallthrough 9104 case ISD::SETUGT: SSECC = 6; break; 9105 case ISD::SETO: SSECC = 7; break; 9106 case ISD::SETUEQ: 9107 case ISD::SETONE: SSECC = 8; break; 9108 } 9109 if (Swap) 9110 std::swap(Op0, Op1); 9111 9112 // In the two special cases we can't handle, emit two comparisons. 9113 if (SSECC == 8) { 9114 unsigned CC0, CC1; 9115 unsigned CombineOpc; 9116 if (SetCCOpcode == ISD::SETUEQ) { 9117 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR; 9118 } else { 9119 assert(SetCCOpcode == ISD::SETONE); 9120 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND; 9121 } 9122 9123 SDValue Cmp0 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9124 DAG.getConstant(CC0, MVT::i8)); 9125 SDValue Cmp1 = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9126 DAG.getConstant(CC1, MVT::i8)); 9127 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); 9128 } 9129 // Handle all other FP comparisons here. 9130 return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1, 9131 DAG.getConstant(SSECC, MVT::i8)); 9132 } 9133 9134 // Break 256-bit integer vector compare into smaller ones. 9135 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 9136 return Lower256IntVSETCC(Op, DAG); 9137 9138 // We are handling one of the integer comparisons here. Since SSE only has 9139 // GT and EQ comparisons for integer, swapping operands and multiple 9140 // operations may be required for some comparisons. 9141 unsigned Opc; 9142 bool Swap = false, Invert = false, FlipSigns = false; 9143 9144 switch (SetCCOpcode) { 9145 default: llvm_unreachable("Unexpected SETCC condition"); 9146 case ISD::SETNE: Invert = true; 9147 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break; 9148 case ISD::SETLT: Swap = true; 9149 case ISD::SETGT: Opc = X86ISD::PCMPGT; break; 9150 case ISD::SETGE: Swap = true; 9151 case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break; 9152 case ISD::SETULT: Swap = true; 9153 case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break; 9154 case ISD::SETUGE: Swap = true; 9155 case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break; 9156 } 9157 if (Swap) 9158 std::swap(Op0, Op1); 9159 9160 // Check that the operation in question is available (most are plain SSE2, 9161 // but PCMPGTQ and PCMPEQQ have different requirements). 9162 if (VT == MVT::v2i64) { 9163 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) 9164 return SDValue(); 9165 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) 9166 return SDValue(); 9167 } 9168 9169 // Since SSE has no unsigned integer comparisons, we need to flip the sign 9170 // bits of the inputs before performing those operations. 9171 if (FlipSigns) { 9172 EVT EltVT = VT.getVectorElementType(); 9173 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 9174 EltVT); 9175 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 9176 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 9177 SignBits.size()); 9178 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 9179 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 9180 } 9181 9182 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 9183 9184 // If the logical-not of the result is required, perform that now. 9185 if (Invert) 9186 Result = DAG.getNOT(dl, Result, VT); 9187 9188 return Result; 9189} 9190 9191// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 9192static bool isX86LogicalCmp(SDValue Op) { 9193 unsigned Opc = Op.getNode()->getOpcode(); 9194 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || 9195 Opc == X86ISD::SAHF) 9196 return true; 9197 if (Op.getResNo() == 1 && 9198 (Opc == X86ISD::ADD || 9199 Opc == X86ISD::SUB || 9200 Opc == X86ISD::ADC || 9201 Opc == X86ISD::SBB || 9202 Opc == X86ISD::SMUL || 9203 Opc == X86ISD::UMUL || 9204 Opc == X86ISD::INC || 9205 Opc == X86ISD::DEC || 9206 Opc == X86ISD::OR || 9207 Opc == X86ISD::XOR || 9208 Opc == X86ISD::AND)) 9209 return true; 9210 9211 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 9212 return true; 9213 9214 return false; 9215} 9216 9217static bool isZero(SDValue V) { 9218 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9219 return C && C->isNullValue(); 9220} 9221 9222static bool isAllOnes(SDValue V) { 9223 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 9224 return C && C->isAllOnesValue(); 9225} 9226 9227static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { 9228 if (V.getOpcode() != ISD::TRUNCATE) 9229 return false; 9230 9231 SDValue VOp0 = V.getOperand(0); 9232 unsigned InBits = VOp0.getValueSizeInBits(); 9233 unsigned Bits = V.getValueSizeInBits(); 9234 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); 9235} 9236 9237SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 9238 bool addTest = true; 9239 SDValue Cond = Op.getOperand(0); 9240 SDValue Op1 = Op.getOperand(1); 9241 SDValue Op2 = Op.getOperand(2); 9242 DebugLoc DL = Op.getDebugLoc(); 9243 SDValue CC; 9244 9245 if (Cond.getOpcode() == ISD::SETCC) { 9246 SDValue NewCond = LowerSETCC(Cond, DAG); 9247 if (NewCond.getNode()) 9248 Cond = NewCond; 9249 } 9250 9251 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 9252 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 9253 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 9254 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 9255 if (Cond.getOpcode() == X86ISD::SETCC && 9256 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 9257 isZero(Cond.getOperand(1).getOperand(1))) { 9258 SDValue Cmp = Cond.getOperand(1); 9259 9260 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 9261 9262 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 9263 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 9264 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 9265 9266 SDValue CmpOp0 = Cmp.getOperand(0); 9267 // Apply further optimizations for special cases 9268 // (select (x != 0), -1, 0) -> neg & sbb 9269 // (select (x == 0), 0, -1) -> neg & sbb 9270 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y)) 9271 if (YC->isNullValue() && 9272 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) { 9273 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32); 9274 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, 9275 DAG.getConstant(0, CmpOp0.getValueType()), 9276 CmpOp0); 9277 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9278 DAG.getConstant(X86::COND_B, MVT::i8), 9279 SDValue(Neg.getNode(), 1)); 9280 return Res; 9281 } 9282 9283 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 9284 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 9285 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9286 9287 SDValue Res = // Res = 0 or -1. 9288 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9289 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 9290 9291 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 9292 Res = DAG.getNOT(DL, Res, Res.getValueType()); 9293 9294 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 9295 if (N2C == 0 || !N2C->isNullValue()) 9296 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 9297 return Res; 9298 } 9299 } 9300 9301 // Look past (and (setcc_carry (cmp ...)), 1). 9302 if (Cond.getOpcode() == ISD::AND && 9303 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9304 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9305 if (C && C->getAPIntValue() == 1) 9306 Cond = Cond.getOperand(0); 9307 } 9308 9309 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9310 // setting operand in place of the X86ISD::SETCC. 9311 unsigned CondOpcode = Cond.getOpcode(); 9312 if (CondOpcode == X86ISD::SETCC || 9313 CondOpcode == X86ISD::SETCC_CARRY) { 9314 CC = Cond.getOperand(0); 9315 9316 SDValue Cmp = Cond.getOperand(1); 9317 unsigned Opc = Cmp.getOpcode(); 9318 EVT VT = Op.getValueType(); 9319 9320 bool IllegalFPCMov = false; 9321 if (VT.isFloatingPoint() && !VT.isVector() && 9322 !isScalarFPTypeInSSEReg(VT)) // FPStack? 9323 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 9324 9325 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 9326 Opc == X86ISD::BT) { // FIXME 9327 Cond = Cmp; 9328 addTest = false; 9329 } 9330 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9331 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9332 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9333 Cond.getOperand(0).getValueType() != MVT::i8)) { 9334 SDValue LHS = Cond.getOperand(0); 9335 SDValue RHS = Cond.getOperand(1); 9336 unsigned X86Opcode; 9337 unsigned X86Cond; 9338 SDVTList VTs; 9339 switch (CondOpcode) { 9340 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9341 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9342 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9343 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9344 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9345 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9346 default: llvm_unreachable("unexpected overflowing operator"); 9347 } 9348 if (CondOpcode == ISD::UMULO) 9349 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9350 MVT::i32); 9351 else 9352 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9353 9354 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 9355 9356 if (CondOpcode == ISD::UMULO) 9357 Cond = X86Op.getValue(2); 9358 else 9359 Cond = X86Op.getValue(1); 9360 9361 CC = DAG.getConstant(X86Cond, MVT::i8); 9362 addTest = false; 9363 } 9364 9365 if (addTest) { 9366 // Look pass the truncate if the high bits are known zero. 9367 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9368 Cond = Cond.getOperand(0); 9369 9370 // We know the result of AND is compared against zero. Try to match 9371 // it to BT. 9372 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9373 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 9374 if (NewSetCC.getNode()) { 9375 CC = NewSetCC.getOperand(0); 9376 Cond = NewSetCC.getOperand(1); 9377 addTest = false; 9378 } 9379 } 9380 } 9381 9382 if (addTest) { 9383 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9384 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9385 } 9386 9387 // a < b ? -1 : 0 -> RES = ~setcc_carry 9388 // a < b ? 0 : -1 -> RES = setcc_carry 9389 // a >= b ? -1 : 0 -> RES = setcc_carry 9390 // a >= b ? 0 : -1 -> RES = ~setcc_carry 9391 if (Cond.getOpcode() == X86ISD::SUB) { 9392 Cond = ConvertCmpIfNecessary(Cond, DAG); 9393 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 9394 9395 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 9396 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 9397 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 9398 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 9399 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 9400 return DAG.getNOT(DL, Res, Res.getValueType()); 9401 return Res; 9402 } 9403 } 9404 9405 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate 9406 // widen the cmov and push the truncate through. This avoids introducing a new 9407 // branch during isel and doesn't add any extensions. 9408 if (Op.getValueType() == MVT::i8 && 9409 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) { 9410 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); 9411 if (T1.getValueType() == T2.getValueType() && 9412 // Blacklist CopyFromReg to avoid partial register stalls. 9413 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){ 9414 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue); 9415 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond); 9416 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); 9417 } 9418 } 9419 9420 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 9421 // condition is true. 9422 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 9423 SDValue Ops[] = { Op2, Op1, CC, Cond }; 9424 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 9425} 9426 9427// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 9428// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 9429// from the AND / OR. 9430static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 9431 Opc = Op.getOpcode(); 9432 if (Opc != ISD::OR && Opc != ISD::AND) 9433 return false; 9434 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9435 Op.getOperand(0).hasOneUse() && 9436 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 9437 Op.getOperand(1).hasOneUse()); 9438} 9439 9440// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 9441// 1 and that the SETCC node has a single use. 9442static bool isXor1OfSetCC(SDValue Op) { 9443 if (Op.getOpcode() != ISD::XOR) 9444 return false; 9445 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9446 if (N1C && N1C->getAPIntValue() == 1) { 9447 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 9448 Op.getOperand(0).hasOneUse(); 9449 } 9450 return false; 9451} 9452 9453SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 9454 bool addTest = true; 9455 SDValue Chain = Op.getOperand(0); 9456 SDValue Cond = Op.getOperand(1); 9457 SDValue Dest = Op.getOperand(2); 9458 DebugLoc dl = Op.getDebugLoc(); 9459 SDValue CC; 9460 bool Inverted = false; 9461 9462 if (Cond.getOpcode() == ISD::SETCC) { 9463 // Check for setcc([su]{add,sub,mul}o == 0). 9464 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 9465 isa<ConstantSDNode>(Cond.getOperand(1)) && 9466 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 9467 Cond.getOperand(0).getResNo() == 1 && 9468 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 9469 Cond.getOperand(0).getOpcode() == ISD::UADDO || 9470 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 9471 Cond.getOperand(0).getOpcode() == ISD::USUBO || 9472 Cond.getOperand(0).getOpcode() == ISD::SMULO || 9473 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 9474 Inverted = true; 9475 Cond = Cond.getOperand(0); 9476 } else { 9477 SDValue NewCond = LowerSETCC(Cond, DAG); 9478 if (NewCond.getNode()) 9479 Cond = NewCond; 9480 } 9481 } 9482#if 0 9483 // FIXME: LowerXALUO doesn't handle these!! 9484 else if (Cond.getOpcode() == X86ISD::ADD || 9485 Cond.getOpcode() == X86ISD::SUB || 9486 Cond.getOpcode() == X86ISD::SMUL || 9487 Cond.getOpcode() == X86ISD::UMUL) 9488 Cond = LowerXALUO(Cond, DAG); 9489#endif 9490 9491 // Look pass (and (setcc_carry (cmp ...)), 1). 9492 if (Cond.getOpcode() == ISD::AND && 9493 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 9494 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 9495 if (C && C->getAPIntValue() == 1) 9496 Cond = Cond.getOperand(0); 9497 } 9498 9499 // If condition flag is set by a X86ISD::CMP, then use it as the condition 9500 // setting operand in place of the X86ISD::SETCC. 9501 unsigned CondOpcode = Cond.getOpcode(); 9502 if (CondOpcode == X86ISD::SETCC || 9503 CondOpcode == X86ISD::SETCC_CARRY) { 9504 CC = Cond.getOperand(0); 9505 9506 SDValue Cmp = Cond.getOperand(1); 9507 unsigned Opc = Cmp.getOpcode(); 9508 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 9509 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 9510 Cond = Cmp; 9511 addTest = false; 9512 } else { 9513 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 9514 default: break; 9515 case X86::COND_O: 9516 case X86::COND_B: 9517 // These can only come from an arithmetic instruction with overflow, 9518 // e.g. SADDO, UADDO. 9519 Cond = Cond.getNode()->getOperand(1); 9520 addTest = false; 9521 break; 9522 } 9523 } 9524 } 9525 CondOpcode = Cond.getOpcode(); 9526 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 9527 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 9528 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 9529 Cond.getOperand(0).getValueType() != MVT::i8)) { 9530 SDValue LHS = Cond.getOperand(0); 9531 SDValue RHS = Cond.getOperand(1); 9532 unsigned X86Opcode; 9533 unsigned X86Cond; 9534 SDVTList VTs; 9535 switch (CondOpcode) { 9536 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 9537 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 9538 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 9539 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 9540 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 9541 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 9542 default: llvm_unreachable("unexpected overflowing operator"); 9543 } 9544 if (Inverted) 9545 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 9546 if (CondOpcode == ISD::UMULO) 9547 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 9548 MVT::i32); 9549 else 9550 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 9551 9552 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 9553 9554 if (CondOpcode == ISD::UMULO) 9555 Cond = X86Op.getValue(2); 9556 else 9557 Cond = X86Op.getValue(1); 9558 9559 CC = DAG.getConstant(X86Cond, MVT::i8); 9560 addTest = false; 9561 } else { 9562 unsigned CondOpc; 9563 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 9564 SDValue Cmp = Cond.getOperand(0).getOperand(1); 9565 if (CondOpc == ISD::OR) { 9566 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 9567 // two branches instead of an explicit OR instruction with a 9568 // separate test. 9569 if (Cmp == Cond.getOperand(1).getOperand(1) && 9570 isX86LogicalCmp(Cmp)) { 9571 CC = Cond.getOperand(0).getOperand(0); 9572 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9573 Chain, Dest, CC, Cmp); 9574 CC = Cond.getOperand(1).getOperand(0); 9575 Cond = Cmp; 9576 addTest = false; 9577 } 9578 } else { // ISD::AND 9579 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 9580 // two branches instead of an explicit AND instruction with a 9581 // separate test. However, we only do this if this block doesn't 9582 // have a fall-through edge, because this requires an explicit 9583 // jmp when the condition is false. 9584 if (Cmp == Cond.getOperand(1).getOperand(1) && 9585 isX86LogicalCmp(Cmp) && 9586 Op.getNode()->hasOneUse()) { 9587 X86::CondCode CCode = 9588 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9589 CCode = X86::GetOppositeBranchCondition(CCode); 9590 CC = DAG.getConstant(CCode, MVT::i8); 9591 SDNode *User = *Op.getNode()->use_begin(); 9592 // Look for an unconditional branch following this conditional branch. 9593 // We need this because we need to reverse the successors in order 9594 // to implement FCMP_OEQ. 9595 if (User->getOpcode() == ISD::BR) { 9596 SDValue FalseBB = User->getOperand(1); 9597 SDNode *NewBR = 9598 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9599 assert(NewBR == User); 9600 (void)NewBR; 9601 Dest = FalseBB; 9602 9603 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9604 Chain, Dest, CC, Cmp); 9605 X86::CondCode CCode = 9606 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 9607 CCode = X86::GetOppositeBranchCondition(CCode); 9608 CC = DAG.getConstant(CCode, MVT::i8); 9609 Cond = Cmp; 9610 addTest = false; 9611 } 9612 } 9613 } 9614 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 9615 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 9616 // It should be transformed during dag combiner except when the condition 9617 // is set by a arithmetics with overflow node. 9618 X86::CondCode CCode = 9619 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 9620 CCode = X86::GetOppositeBranchCondition(CCode); 9621 CC = DAG.getConstant(CCode, MVT::i8); 9622 Cond = Cond.getOperand(0).getOperand(1); 9623 addTest = false; 9624 } else if (Cond.getOpcode() == ISD::SETCC && 9625 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 9626 // For FCMP_OEQ, we can emit 9627 // two branches instead of an explicit AND instruction with a 9628 // separate test. However, we only do this if this block doesn't 9629 // have a fall-through edge, because this requires an explicit 9630 // jmp when the condition is false. 9631 if (Op.getNode()->hasOneUse()) { 9632 SDNode *User = *Op.getNode()->use_begin(); 9633 // Look for an unconditional branch following this conditional branch. 9634 // We need this because we need to reverse the successors in order 9635 // to implement FCMP_OEQ. 9636 if (User->getOpcode() == ISD::BR) { 9637 SDValue FalseBB = User->getOperand(1); 9638 SDNode *NewBR = 9639 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9640 assert(NewBR == User); 9641 (void)NewBR; 9642 Dest = FalseBB; 9643 9644 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9645 Cond.getOperand(0), Cond.getOperand(1)); 9646 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9647 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9648 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9649 Chain, Dest, CC, Cmp); 9650 CC = DAG.getConstant(X86::COND_P, MVT::i8); 9651 Cond = Cmp; 9652 addTest = false; 9653 } 9654 } 9655 } else if (Cond.getOpcode() == ISD::SETCC && 9656 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 9657 // For FCMP_UNE, we can emit 9658 // two branches instead of an explicit AND instruction with a 9659 // separate test. However, we only do this if this block doesn't 9660 // have a fall-through edge, because this requires an explicit 9661 // jmp when the condition is false. 9662 if (Op.getNode()->hasOneUse()) { 9663 SDNode *User = *Op.getNode()->use_begin(); 9664 // Look for an unconditional branch following this conditional branch. 9665 // We need this because we need to reverse the successors in order 9666 // to implement FCMP_UNE. 9667 if (User->getOpcode() == ISD::BR) { 9668 SDValue FalseBB = User->getOperand(1); 9669 SDNode *NewBR = 9670 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 9671 assert(NewBR == User); 9672 (void)NewBR; 9673 9674 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 9675 Cond.getOperand(0), Cond.getOperand(1)); 9676 Cmp = ConvertCmpIfNecessary(Cmp, DAG); 9677 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9678 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9679 Chain, Dest, CC, Cmp); 9680 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 9681 Cond = Cmp; 9682 addTest = false; 9683 Dest = FalseBB; 9684 } 9685 } 9686 } 9687 } 9688 9689 if (addTest) { 9690 // Look pass the truncate if the high bits are known zero. 9691 if (isTruncWithZeroHighBitsInput(Cond, DAG)) 9692 Cond = Cond.getOperand(0); 9693 9694 // We know the result of AND is compared against zero. Try to match 9695 // it to BT. 9696 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 9697 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 9698 if (NewSetCC.getNode()) { 9699 CC = NewSetCC.getOperand(0); 9700 Cond = NewSetCC.getOperand(1); 9701 addTest = false; 9702 } 9703 } 9704 } 9705 9706 if (addTest) { 9707 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 9708 Cond = EmitTest(Cond, X86::COND_NE, DAG); 9709 } 9710 Cond = ConvertCmpIfNecessary(Cond, DAG); 9711 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 9712 Chain, Dest, CC, Cond); 9713} 9714 9715 9716// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 9717// Calls to _alloca is needed to probe the stack when allocating more than 4k 9718// bytes in one go. Touching the stack at 4K increments is necessary to ensure 9719// that the guard pages used by the OS virtual memory manager are allocated in 9720// correct sequence. 9721SDValue 9722X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 9723 SelectionDAG &DAG) const { 9724 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 9725 getTargetMachine().Options.EnableSegmentedStacks) && 9726 "This should be used only on Windows targets or when segmented stacks " 9727 "are being used"); 9728 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 9729 DebugLoc dl = Op.getDebugLoc(); 9730 9731 // Get the inputs. 9732 SDValue Chain = Op.getOperand(0); 9733 SDValue Size = Op.getOperand(1); 9734 // FIXME: Ensure alignment here 9735 9736 bool Is64Bit = Subtarget->is64Bit(); 9737 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 9738 9739 if (getTargetMachine().Options.EnableSegmentedStacks) { 9740 MachineFunction &MF = DAG.getMachineFunction(); 9741 MachineRegisterInfo &MRI = MF.getRegInfo(); 9742 9743 if (Is64Bit) { 9744 // The 64 bit implementation of segmented stacks needs to clobber both r10 9745 // r11. This makes it impossible to use it along with nested parameters. 9746 const Function *F = MF.getFunction(); 9747 9748 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 9749 I != E; ++I) 9750 if (I->hasNestAttr()) 9751 report_fatal_error("Cannot use segmented stacks with functions that " 9752 "have nested arguments."); 9753 } 9754 9755 const TargetRegisterClass *AddrRegClass = 9756 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 9757 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 9758 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 9759 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 9760 DAG.getRegister(Vreg, SPTy)); 9761 SDValue Ops1[2] = { Value, Chain }; 9762 return DAG.getMergeValues(Ops1, 2, dl); 9763 } else { 9764 SDValue Flag; 9765 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 9766 9767 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 9768 Flag = Chain.getValue(1); 9769 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 9770 9771 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 9772 Flag = Chain.getValue(1); 9773 9774 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 9775 9776 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 9777 return DAG.getMergeValues(Ops1, 2, dl); 9778 } 9779} 9780 9781SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 9782 MachineFunction &MF = DAG.getMachineFunction(); 9783 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 9784 9785 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9786 DebugLoc DL = Op.getDebugLoc(); 9787 9788 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 9789 // vastart just stores the address of the VarArgsFrameIndex slot into the 9790 // memory location argument. 9791 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9792 getPointerTy()); 9793 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 9794 MachinePointerInfo(SV), false, false, 0); 9795 } 9796 9797 // __va_list_tag: 9798 // gp_offset (0 - 6 * 8) 9799 // fp_offset (48 - 48 + 8 * 16) 9800 // overflow_arg_area (point to parameters coming in memory). 9801 // reg_save_area 9802 SmallVector<SDValue, 8> MemOps; 9803 SDValue FIN = Op.getOperand(1); 9804 // Store gp_offset 9805 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 9806 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 9807 MVT::i32), 9808 FIN, MachinePointerInfo(SV), false, false, 0); 9809 MemOps.push_back(Store); 9810 9811 // Store fp_offset 9812 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9813 FIN, DAG.getIntPtrConstant(4)); 9814 Store = DAG.getStore(Op.getOperand(0), DL, 9815 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 9816 MVT::i32), 9817 FIN, MachinePointerInfo(SV, 4), false, false, 0); 9818 MemOps.push_back(Store); 9819 9820 // Store ptr to overflow_arg_area 9821 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9822 FIN, DAG.getIntPtrConstant(4)); 9823 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9824 getPointerTy()); 9825 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 9826 MachinePointerInfo(SV, 8), 9827 false, false, 0); 9828 MemOps.push_back(Store); 9829 9830 // Store ptr to reg_save_area. 9831 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9832 FIN, DAG.getIntPtrConstant(8)); 9833 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 9834 getPointerTy()); 9835 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 9836 MachinePointerInfo(SV, 16), false, false, 0); 9837 MemOps.push_back(Store); 9838 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 9839 &MemOps[0], MemOps.size()); 9840} 9841 9842SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 9843 assert(Subtarget->is64Bit() && 9844 "LowerVAARG only handles 64-bit va_arg!"); 9845 assert((Subtarget->isTargetLinux() || 9846 Subtarget->isTargetDarwin()) && 9847 "Unhandled target in LowerVAARG"); 9848 assert(Op.getNode()->getNumOperands() == 4); 9849 SDValue Chain = Op.getOperand(0); 9850 SDValue SrcPtr = Op.getOperand(1); 9851 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9852 unsigned Align = Op.getConstantOperandVal(3); 9853 DebugLoc dl = Op.getDebugLoc(); 9854 9855 EVT ArgVT = Op.getNode()->getValueType(0); 9856 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9857 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy); 9858 uint8_t ArgMode; 9859 9860 // Decide which area this value should be read from. 9861 // TODO: Implement the AMD64 ABI in its entirety. This simple 9862 // selection mechanism works only for the basic types. 9863 if (ArgVT == MVT::f80) { 9864 llvm_unreachable("va_arg for f80 not yet implemented"); 9865 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 9866 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 9867 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 9868 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 9869 } else { 9870 llvm_unreachable("Unhandled argument type in LowerVAARG"); 9871 } 9872 9873 if (ArgMode == 2) { 9874 // Sanity Check: Make sure using fp_offset makes sense. 9875 assert(!getTargetMachine().Options.UseSoftFloat && 9876 !(DAG.getMachineFunction() 9877 .getFunction()->getFnAttributes() 9878 .hasAttribute(Attributes::NoImplicitFloat)) && 9879 Subtarget->hasSSE1()); 9880 } 9881 9882 // Insert VAARG_64 node into the DAG 9883 // VAARG_64 returns two values: Variable Argument Address, Chain 9884 SmallVector<SDValue, 11> InstOps; 9885 InstOps.push_back(Chain); 9886 InstOps.push_back(SrcPtr); 9887 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 9888 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 9889 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 9890 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 9891 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 9892 VTs, &InstOps[0], InstOps.size(), 9893 MVT::i64, 9894 MachinePointerInfo(SV), 9895 /*Align=*/0, 9896 /*Volatile=*/false, 9897 /*ReadMem=*/true, 9898 /*WriteMem=*/true); 9899 Chain = VAARG.getValue(1); 9900 9901 // Load the next argument and return it 9902 return DAG.getLoad(ArgVT, dl, 9903 Chain, 9904 VAARG, 9905 MachinePointerInfo(), 9906 false, false, false, 0); 9907} 9908 9909static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, 9910 SelectionDAG &DAG) { 9911 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 9912 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 9913 SDValue Chain = Op.getOperand(0); 9914 SDValue DstPtr = Op.getOperand(1); 9915 SDValue SrcPtr = Op.getOperand(2); 9916 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 9917 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9918 DebugLoc DL = Op.getDebugLoc(); 9919 9920 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 9921 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 9922 false, 9923 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 9924} 9925 9926// getTargetVShiftNOde - Handle vector element shifts where the shift amount 9927// may or may not be a constant. Takes immediate version of shift as input. 9928static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, 9929 SDValue SrcOp, SDValue ShAmt, 9930 SelectionDAG &DAG) { 9931 assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32"); 9932 9933 if (isa<ConstantSDNode>(ShAmt)) { 9934 // Constant may be a TargetConstant. Use a regular constant. 9935 uint32_t ShiftAmt = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 9936 switch (Opc) { 9937 default: llvm_unreachable("Unknown target vector shift node"); 9938 case X86ISD::VSHLI: 9939 case X86ISD::VSRLI: 9940 case X86ISD::VSRAI: 9941 return DAG.getNode(Opc, dl, VT, SrcOp, 9942 DAG.getConstant(ShiftAmt, MVT::i32)); 9943 } 9944 } 9945 9946 // Change opcode to non-immediate version 9947 switch (Opc) { 9948 default: llvm_unreachable("Unknown target vector shift node"); 9949 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; 9950 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break; 9951 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; 9952 } 9953 9954 // Need to build a vector containing shift amount 9955 // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0 9956 SDValue ShOps[4]; 9957 ShOps[0] = ShAmt; 9958 ShOps[1] = DAG.getConstant(0, MVT::i32); 9959 ShOps[2] = ShOps[3] = DAG.getUNDEF(MVT::i32); 9960 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4); 9961 9962 // The return type has to be a 128-bit type with the same element 9963 // type as the input type. 9964 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9965 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits()); 9966 9967 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt); 9968 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); 9969} 9970 9971static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { 9972 DebugLoc dl = Op.getDebugLoc(); 9973 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9974 switch (IntNo) { 9975 default: return SDValue(); // Don't custom lower most intrinsics. 9976 // Comparison intrinsics. 9977 case Intrinsic::x86_sse_comieq_ss: 9978 case Intrinsic::x86_sse_comilt_ss: 9979 case Intrinsic::x86_sse_comile_ss: 9980 case Intrinsic::x86_sse_comigt_ss: 9981 case Intrinsic::x86_sse_comige_ss: 9982 case Intrinsic::x86_sse_comineq_ss: 9983 case Intrinsic::x86_sse_ucomieq_ss: 9984 case Intrinsic::x86_sse_ucomilt_ss: 9985 case Intrinsic::x86_sse_ucomile_ss: 9986 case Intrinsic::x86_sse_ucomigt_ss: 9987 case Intrinsic::x86_sse_ucomige_ss: 9988 case Intrinsic::x86_sse_ucomineq_ss: 9989 case Intrinsic::x86_sse2_comieq_sd: 9990 case Intrinsic::x86_sse2_comilt_sd: 9991 case Intrinsic::x86_sse2_comile_sd: 9992 case Intrinsic::x86_sse2_comigt_sd: 9993 case Intrinsic::x86_sse2_comige_sd: 9994 case Intrinsic::x86_sse2_comineq_sd: 9995 case Intrinsic::x86_sse2_ucomieq_sd: 9996 case Intrinsic::x86_sse2_ucomilt_sd: 9997 case Intrinsic::x86_sse2_ucomile_sd: 9998 case Intrinsic::x86_sse2_ucomigt_sd: 9999 case Intrinsic::x86_sse2_ucomige_sd: 10000 case Intrinsic::x86_sse2_ucomineq_sd: { 10001 unsigned Opc; 10002 ISD::CondCode CC; 10003 switch (IntNo) { 10004 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10005 case Intrinsic::x86_sse_comieq_ss: 10006 case Intrinsic::x86_sse2_comieq_sd: 10007 Opc = X86ISD::COMI; 10008 CC = ISD::SETEQ; 10009 break; 10010 case Intrinsic::x86_sse_comilt_ss: 10011 case Intrinsic::x86_sse2_comilt_sd: 10012 Opc = X86ISD::COMI; 10013 CC = ISD::SETLT; 10014 break; 10015 case Intrinsic::x86_sse_comile_ss: 10016 case Intrinsic::x86_sse2_comile_sd: 10017 Opc = X86ISD::COMI; 10018 CC = ISD::SETLE; 10019 break; 10020 case Intrinsic::x86_sse_comigt_ss: 10021 case Intrinsic::x86_sse2_comigt_sd: 10022 Opc = X86ISD::COMI; 10023 CC = ISD::SETGT; 10024 break; 10025 case Intrinsic::x86_sse_comige_ss: 10026 case Intrinsic::x86_sse2_comige_sd: 10027 Opc = X86ISD::COMI; 10028 CC = ISD::SETGE; 10029 break; 10030 case Intrinsic::x86_sse_comineq_ss: 10031 case Intrinsic::x86_sse2_comineq_sd: 10032 Opc = X86ISD::COMI; 10033 CC = ISD::SETNE; 10034 break; 10035 case Intrinsic::x86_sse_ucomieq_ss: 10036 case Intrinsic::x86_sse2_ucomieq_sd: 10037 Opc = X86ISD::UCOMI; 10038 CC = ISD::SETEQ; 10039 break; 10040 case Intrinsic::x86_sse_ucomilt_ss: 10041 case Intrinsic::x86_sse2_ucomilt_sd: 10042 Opc = X86ISD::UCOMI; 10043 CC = ISD::SETLT; 10044 break; 10045 case Intrinsic::x86_sse_ucomile_ss: 10046 case Intrinsic::x86_sse2_ucomile_sd: 10047 Opc = X86ISD::UCOMI; 10048 CC = ISD::SETLE; 10049 break; 10050 case Intrinsic::x86_sse_ucomigt_ss: 10051 case Intrinsic::x86_sse2_ucomigt_sd: 10052 Opc = X86ISD::UCOMI; 10053 CC = ISD::SETGT; 10054 break; 10055 case Intrinsic::x86_sse_ucomige_ss: 10056 case Intrinsic::x86_sse2_ucomige_sd: 10057 Opc = X86ISD::UCOMI; 10058 CC = ISD::SETGE; 10059 break; 10060 case Intrinsic::x86_sse_ucomineq_ss: 10061 case Intrinsic::x86_sse2_ucomineq_sd: 10062 Opc = X86ISD::UCOMI; 10063 CC = ISD::SETNE; 10064 break; 10065 } 10066 10067 SDValue LHS = Op.getOperand(1); 10068 SDValue RHS = Op.getOperand(2); 10069 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 10070 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 10071 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 10072 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10073 DAG.getConstant(X86CC, MVT::i8), Cond); 10074 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10075 } 10076 10077 // Arithmetic intrinsics. 10078 case Intrinsic::x86_sse2_pmulu_dq: 10079 case Intrinsic::x86_avx2_pmulu_dq: 10080 return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(), 10081 Op.getOperand(1), Op.getOperand(2)); 10082 10083 // SSE3/AVX horizontal add/sub intrinsics 10084 case Intrinsic::x86_sse3_hadd_ps: 10085 case Intrinsic::x86_sse3_hadd_pd: 10086 case Intrinsic::x86_avx_hadd_ps_256: 10087 case Intrinsic::x86_avx_hadd_pd_256: 10088 case Intrinsic::x86_sse3_hsub_ps: 10089 case Intrinsic::x86_sse3_hsub_pd: 10090 case Intrinsic::x86_avx_hsub_ps_256: 10091 case Intrinsic::x86_avx_hsub_pd_256: 10092 case Intrinsic::x86_ssse3_phadd_w_128: 10093 case Intrinsic::x86_ssse3_phadd_d_128: 10094 case Intrinsic::x86_avx2_phadd_w: 10095 case Intrinsic::x86_avx2_phadd_d: 10096 case Intrinsic::x86_ssse3_phsub_w_128: 10097 case Intrinsic::x86_ssse3_phsub_d_128: 10098 case Intrinsic::x86_avx2_phsub_w: 10099 case Intrinsic::x86_avx2_phsub_d: { 10100 unsigned Opcode; 10101 switch (IntNo) { 10102 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10103 case Intrinsic::x86_sse3_hadd_ps: 10104 case Intrinsic::x86_sse3_hadd_pd: 10105 case Intrinsic::x86_avx_hadd_ps_256: 10106 case Intrinsic::x86_avx_hadd_pd_256: 10107 Opcode = X86ISD::FHADD; 10108 break; 10109 case Intrinsic::x86_sse3_hsub_ps: 10110 case Intrinsic::x86_sse3_hsub_pd: 10111 case Intrinsic::x86_avx_hsub_ps_256: 10112 case Intrinsic::x86_avx_hsub_pd_256: 10113 Opcode = X86ISD::FHSUB; 10114 break; 10115 case Intrinsic::x86_ssse3_phadd_w_128: 10116 case Intrinsic::x86_ssse3_phadd_d_128: 10117 case Intrinsic::x86_avx2_phadd_w: 10118 case Intrinsic::x86_avx2_phadd_d: 10119 Opcode = X86ISD::HADD; 10120 break; 10121 case Intrinsic::x86_ssse3_phsub_w_128: 10122 case Intrinsic::x86_ssse3_phsub_d_128: 10123 case Intrinsic::x86_avx2_phsub_w: 10124 case Intrinsic::x86_avx2_phsub_d: 10125 Opcode = X86ISD::HSUB; 10126 break; 10127 } 10128 return DAG.getNode(Opcode, dl, Op.getValueType(), 10129 Op.getOperand(1), Op.getOperand(2)); 10130 } 10131 10132 // AVX2 variable shift intrinsics 10133 case Intrinsic::x86_avx2_psllv_d: 10134 case Intrinsic::x86_avx2_psllv_q: 10135 case Intrinsic::x86_avx2_psllv_d_256: 10136 case Intrinsic::x86_avx2_psllv_q_256: 10137 case Intrinsic::x86_avx2_psrlv_d: 10138 case Intrinsic::x86_avx2_psrlv_q: 10139 case Intrinsic::x86_avx2_psrlv_d_256: 10140 case Intrinsic::x86_avx2_psrlv_q_256: 10141 case Intrinsic::x86_avx2_psrav_d: 10142 case Intrinsic::x86_avx2_psrav_d_256: { 10143 unsigned Opcode; 10144 switch (IntNo) { 10145 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10146 case Intrinsic::x86_avx2_psllv_d: 10147 case Intrinsic::x86_avx2_psllv_q: 10148 case Intrinsic::x86_avx2_psllv_d_256: 10149 case Intrinsic::x86_avx2_psllv_q_256: 10150 Opcode = ISD::SHL; 10151 break; 10152 case Intrinsic::x86_avx2_psrlv_d: 10153 case Intrinsic::x86_avx2_psrlv_q: 10154 case Intrinsic::x86_avx2_psrlv_d_256: 10155 case Intrinsic::x86_avx2_psrlv_q_256: 10156 Opcode = ISD::SRL; 10157 break; 10158 case Intrinsic::x86_avx2_psrav_d: 10159 case Intrinsic::x86_avx2_psrav_d_256: 10160 Opcode = ISD::SRA; 10161 break; 10162 } 10163 return DAG.getNode(Opcode, dl, Op.getValueType(), 10164 Op.getOperand(1), Op.getOperand(2)); 10165 } 10166 10167 case Intrinsic::x86_ssse3_pshuf_b_128: 10168 case Intrinsic::x86_avx2_pshuf_b: 10169 return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(), 10170 Op.getOperand(1), Op.getOperand(2)); 10171 10172 case Intrinsic::x86_ssse3_psign_b_128: 10173 case Intrinsic::x86_ssse3_psign_w_128: 10174 case Intrinsic::x86_ssse3_psign_d_128: 10175 case Intrinsic::x86_avx2_psign_b: 10176 case Intrinsic::x86_avx2_psign_w: 10177 case Intrinsic::x86_avx2_psign_d: 10178 return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(), 10179 Op.getOperand(1), Op.getOperand(2)); 10180 10181 case Intrinsic::x86_sse41_insertps: 10182 return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(), 10183 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10184 10185 case Intrinsic::x86_avx_vperm2f128_ps_256: 10186 case Intrinsic::x86_avx_vperm2f128_pd_256: 10187 case Intrinsic::x86_avx_vperm2f128_si_256: 10188 case Intrinsic::x86_avx2_vperm2i128: 10189 return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), 10190 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 10191 10192 case Intrinsic::x86_avx2_permd: 10193 case Intrinsic::x86_avx2_permps: 10194 // Operands intentionally swapped. Mask is last operand to intrinsic, 10195 // but second operand for node/intruction. 10196 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), 10197 Op.getOperand(2), Op.getOperand(1)); 10198 10199 // ptest and testp intrinsics. The intrinsic these come from are designed to 10200 // return an integer value, not just an instruction so lower it to the ptest 10201 // or testp pattern and a setcc for the result. 10202 case Intrinsic::x86_sse41_ptestz: 10203 case Intrinsic::x86_sse41_ptestc: 10204 case Intrinsic::x86_sse41_ptestnzc: 10205 case Intrinsic::x86_avx_ptestz_256: 10206 case Intrinsic::x86_avx_ptestc_256: 10207 case Intrinsic::x86_avx_ptestnzc_256: 10208 case Intrinsic::x86_avx_vtestz_ps: 10209 case Intrinsic::x86_avx_vtestc_ps: 10210 case Intrinsic::x86_avx_vtestnzc_ps: 10211 case Intrinsic::x86_avx_vtestz_pd: 10212 case Intrinsic::x86_avx_vtestc_pd: 10213 case Intrinsic::x86_avx_vtestnzc_pd: 10214 case Intrinsic::x86_avx_vtestz_ps_256: 10215 case Intrinsic::x86_avx_vtestc_ps_256: 10216 case Intrinsic::x86_avx_vtestnzc_ps_256: 10217 case Intrinsic::x86_avx_vtestz_pd_256: 10218 case Intrinsic::x86_avx_vtestc_pd_256: 10219 case Intrinsic::x86_avx_vtestnzc_pd_256: { 10220 bool IsTestPacked = false; 10221 unsigned X86CC; 10222 switch (IntNo) { 10223 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 10224 case Intrinsic::x86_avx_vtestz_ps: 10225 case Intrinsic::x86_avx_vtestz_pd: 10226 case Intrinsic::x86_avx_vtestz_ps_256: 10227 case Intrinsic::x86_avx_vtestz_pd_256: 10228 IsTestPacked = true; // Fallthrough 10229 case Intrinsic::x86_sse41_ptestz: 10230 case Intrinsic::x86_avx_ptestz_256: 10231 // ZF = 1 10232 X86CC = X86::COND_E; 10233 break; 10234 case Intrinsic::x86_avx_vtestc_ps: 10235 case Intrinsic::x86_avx_vtestc_pd: 10236 case Intrinsic::x86_avx_vtestc_ps_256: 10237 case Intrinsic::x86_avx_vtestc_pd_256: 10238 IsTestPacked = true; // Fallthrough 10239 case Intrinsic::x86_sse41_ptestc: 10240 case Intrinsic::x86_avx_ptestc_256: 10241 // CF = 1 10242 X86CC = X86::COND_B; 10243 break; 10244 case Intrinsic::x86_avx_vtestnzc_ps: 10245 case Intrinsic::x86_avx_vtestnzc_pd: 10246 case Intrinsic::x86_avx_vtestnzc_ps_256: 10247 case Intrinsic::x86_avx_vtestnzc_pd_256: 10248 IsTestPacked = true; // Fallthrough 10249 case Intrinsic::x86_sse41_ptestnzc: 10250 case Intrinsic::x86_avx_ptestnzc_256: 10251 // ZF and CF = 0 10252 X86CC = X86::COND_A; 10253 break; 10254 } 10255 10256 SDValue LHS = Op.getOperand(1); 10257 SDValue RHS = Op.getOperand(2); 10258 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 10259 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 10260 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 10261 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 10262 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10263 } 10264 10265 // SSE/AVX shift intrinsics 10266 case Intrinsic::x86_sse2_psll_w: 10267 case Intrinsic::x86_sse2_psll_d: 10268 case Intrinsic::x86_sse2_psll_q: 10269 case Intrinsic::x86_avx2_psll_w: 10270 case Intrinsic::x86_avx2_psll_d: 10271 case Intrinsic::x86_avx2_psll_q: 10272 case Intrinsic::x86_sse2_psrl_w: 10273 case Intrinsic::x86_sse2_psrl_d: 10274 case Intrinsic::x86_sse2_psrl_q: 10275 case Intrinsic::x86_avx2_psrl_w: 10276 case Intrinsic::x86_avx2_psrl_d: 10277 case Intrinsic::x86_avx2_psrl_q: 10278 case Intrinsic::x86_sse2_psra_w: 10279 case Intrinsic::x86_sse2_psra_d: 10280 case Intrinsic::x86_avx2_psra_w: 10281 case Intrinsic::x86_avx2_psra_d: { 10282 unsigned Opcode; 10283 switch (IntNo) { 10284 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10285 case Intrinsic::x86_sse2_psll_w: 10286 case Intrinsic::x86_sse2_psll_d: 10287 case Intrinsic::x86_sse2_psll_q: 10288 case Intrinsic::x86_avx2_psll_w: 10289 case Intrinsic::x86_avx2_psll_d: 10290 case Intrinsic::x86_avx2_psll_q: 10291 Opcode = X86ISD::VSHL; 10292 break; 10293 case Intrinsic::x86_sse2_psrl_w: 10294 case Intrinsic::x86_sse2_psrl_d: 10295 case Intrinsic::x86_sse2_psrl_q: 10296 case Intrinsic::x86_avx2_psrl_w: 10297 case Intrinsic::x86_avx2_psrl_d: 10298 case Intrinsic::x86_avx2_psrl_q: 10299 Opcode = X86ISD::VSRL; 10300 break; 10301 case Intrinsic::x86_sse2_psra_w: 10302 case Intrinsic::x86_sse2_psra_d: 10303 case Intrinsic::x86_avx2_psra_w: 10304 case Intrinsic::x86_avx2_psra_d: 10305 Opcode = X86ISD::VSRA; 10306 break; 10307 } 10308 return DAG.getNode(Opcode, dl, Op.getValueType(), 10309 Op.getOperand(1), Op.getOperand(2)); 10310 } 10311 10312 // SSE/AVX immediate shift intrinsics 10313 case Intrinsic::x86_sse2_pslli_w: 10314 case Intrinsic::x86_sse2_pslli_d: 10315 case Intrinsic::x86_sse2_pslli_q: 10316 case Intrinsic::x86_avx2_pslli_w: 10317 case Intrinsic::x86_avx2_pslli_d: 10318 case Intrinsic::x86_avx2_pslli_q: 10319 case Intrinsic::x86_sse2_psrli_w: 10320 case Intrinsic::x86_sse2_psrli_d: 10321 case Intrinsic::x86_sse2_psrli_q: 10322 case Intrinsic::x86_avx2_psrli_w: 10323 case Intrinsic::x86_avx2_psrli_d: 10324 case Intrinsic::x86_avx2_psrli_q: 10325 case Intrinsic::x86_sse2_psrai_w: 10326 case Intrinsic::x86_sse2_psrai_d: 10327 case Intrinsic::x86_avx2_psrai_w: 10328 case Intrinsic::x86_avx2_psrai_d: { 10329 unsigned Opcode; 10330 switch (IntNo) { 10331 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10332 case Intrinsic::x86_sse2_pslli_w: 10333 case Intrinsic::x86_sse2_pslli_d: 10334 case Intrinsic::x86_sse2_pslli_q: 10335 case Intrinsic::x86_avx2_pslli_w: 10336 case Intrinsic::x86_avx2_pslli_d: 10337 case Intrinsic::x86_avx2_pslli_q: 10338 Opcode = X86ISD::VSHLI; 10339 break; 10340 case Intrinsic::x86_sse2_psrli_w: 10341 case Intrinsic::x86_sse2_psrli_d: 10342 case Intrinsic::x86_sse2_psrli_q: 10343 case Intrinsic::x86_avx2_psrli_w: 10344 case Intrinsic::x86_avx2_psrli_d: 10345 case Intrinsic::x86_avx2_psrli_q: 10346 Opcode = X86ISD::VSRLI; 10347 break; 10348 case Intrinsic::x86_sse2_psrai_w: 10349 case Intrinsic::x86_sse2_psrai_d: 10350 case Intrinsic::x86_avx2_psrai_w: 10351 case Intrinsic::x86_avx2_psrai_d: 10352 Opcode = X86ISD::VSRAI; 10353 break; 10354 } 10355 return getTargetVShiftNode(Opcode, dl, Op.getValueType(), 10356 Op.getOperand(1), Op.getOperand(2), DAG); 10357 } 10358 10359 case Intrinsic::x86_sse42_pcmpistria128: 10360 case Intrinsic::x86_sse42_pcmpestria128: 10361 case Intrinsic::x86_sse42_pcmpistric128: 10362 case Intrinsic::x86_sse42_pcmpestric128: 10363 case Intrinsic::x86_sse42_pcmpistrio128: 10364 case Intrinsic::x86_sse42_pcmpestrio128: 10365 case Intrinsic::x86_sse42_pcmpistris128: 10366 case Intrinsic::x86_sse42_pcmpestris128: 10367 case Intrinsic::x86_sse42_pcmpistriz128: 10368 case Intrinsic::x86_sse42_pcmpestriz128: { 10369 unsigned Opcode; 10370 unsigned X86CC; 10371 switch (IntNo) { 10372 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10373 case Intrinsic::x86_sse42_pcmpistria128: 10374 Opcode = X86ISD::PCMPISTRI; 10375 X86CC = X86::COND_A; 10376 break; 10377 case Intrinsic::x86_sse42_pcmpestria128: 10378 Opcode = X86ISD::PCMPESTRI; 10379 X86CC = X86::COND_A; 10380 break; 10381 case Intrinsic::x86_sse42_pcmpistric128: 10382 Opcode = X86ISD::PCMPISTRI; 10383 X86CC = X86::COND_B; 10384 break; 10385 case Intrinsic::x86_sse42_pcmpestric128: 10386 Opcode = X86ISD::PCMPESTRI; 10387 X86CC = X86::COND_B; 10388 break; 10389 case Intrinsic::x86_sse42_pcmpistrio128: 10390 Opcode = X86ISD::PCMPISTRI; 10391 X86CC = X86::COND_O; 10392 break; 10393 case Intrinsic::x86_sse42_pcmpestrio128: 10394 Opcode = X86ISD::PCMPESTRI; 10395 X86CC = X86::COND_O; 10396 break; 10397 case Intrinsic::x86_sse42_pcmpistris128: 10398 Opcode = X86ISD::PCMPISTRI; 10399 X86CC = X86::COND_S; 10400 break; 10401 case Intrinsic::x86_sse42_pcmpestris128: 10402 Opcode = X86ISD::PCMPESTRI; 10403 X86CC = X86::COND_S; 10404 break; 10405 case Intrinsic::x86_sse42_pcmpistriz128: 10406 Opcode = X86ISD::PCMPISTRI; 10407 X86CC = X86::COND_E; 10408 break; 10409 case Intrinsic::x86_sse42_pcmpestriz128: 10410 Opcode = X86ISD::PCMPESTRI; 10411 X86CC = X86::COND_E; 10412 break; 10413 } 10414 SmallVector<SDValue, 5> NewOps; 10415 NewOps.append(Op->op_begin()+1, Op->op_end()); 10416 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10417 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10418 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 10419 DAG.getConstant(X86CC, MVT::i8), 10420 SDValue(PCMP.getNode(), 1)); 10421 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 10422 } 10423 10424 case Intrinsic::x86_sse42_pcmpistri128: 10425 case Intrinsic::x86_sse42_pcmpestri128: { 10426 unsigned Opcode; 10427 if (IntNo == Intrinsic::x86_sse42_pcmpistri128) 10428 Opcode = X86ISD::PCMPISTRI; 10429 else 10430 Opcode = X86ISD::PCMPESTRI; 10431 10432 SmallVector<SDValue, 5> NewOps; 10433 NewOps.append(Op->op_begin()+1, Op->op_end()); 10434 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 10435 return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); 10436 } 10437 case Intrinsic::x86_fma_vfmadd_ps: 10438 case Intrinsic::x86_fma_vfmadd_pd: 10439 case Intrinsic::x86_fma_vfmsub_ps: 10440 case Intrinsic::x86_fma_vfmsub_pd: 10441 case Intrinsic::x86_fma_vfnmadd_ps: 10442 case Intrinsic::x86_fma_vfnmadd_pd: 10443 case Intrinsic::x86_fma_vfnmsub_ps: 10444 case Intrinsic::x86_fma_vfnmsub_pd: 10445 case Intrinsic::x86_fma_vfmaddsub_ps: 10446 case Intrinsic::x86_fma_vfmaddsub_pd: 10447 case Intrinsic::x86_fma_vfmsubadd_ps: 10448 case Intrinsic::x86_fma_vfmsubadd_pd: 10449 case Intrinsic::x86_fma_vfmadd_ps_256: 10450 case Intrinsic::x86_fma_vfmadd_pd_256: 10451 case Intrinsic::x86_fma_vfmsub_ps_256: 10452 case Intrinsic::x86_fma_vfmsub_pd_256: 10453 case Intrinsic::x86_fma_vfnmadd_ps_256: 10454 case Intrinsic::x86_fma_vfnmadd_pd_256: 10455 case Intrinsic::x86_fma_vfnmsub_ps_256: 10456 case Intrinsic::x86_fma_vfnmsub_pd_256: 10457 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10458 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10459 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10460 case Intrinsic::x86_fma_vfmsubadd_pd_256: { 10461 unsigned Opc; 10462 switch (IntNo) { 10463 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 10464 case Intrinsic::x86_fma_vfmadd_ps: 10465 case Intrinsic::x86_fma_vfmadd_pd: 10466 case Intrinsic::x86_fma_vfmadd_ps_256: 10467 case Intrinsic::x86_fma_vfmadd_pd_256: 10468 Opc = X86ISD::FMADD; 10469 break; 10470 case Intrinsic::x86_fma_vfmsub_ps: 10471 case Intrinsic::x86_fma_vfmsub_pd: 10472 case Intrinsic::x86_fma_vfmsub_ps_256: 10473 case Intrinsic::x86_fma_vfmsub_pd_256: 10474 Opc = X86ISD::FMSUB; 10475 break; 10476 case Intrinsic::x86_fma_vfnmadd_ps: 10477 case Intrinsic::x86_fma_vfnmadd_pd: 10478 case Intrinsic::x86_fma_vfnmadd_ps_256: 10479 case Intrinsic::x86_fma_vfnmadd_pd_256: 10480 Opc = X86ISD::FNMADD; 10481 break; 10482 case Intrinsic::x86_fma_vfnmsub_ps: 10483 case Intrinsic::x86_fma_vfnmsub_pd: 10484 case Intrinsic::x86_fma_vfnmsub_ps_256: 10485 case Intrinsic::x86_fma_vfnmsub_pd_256: 10486 Opc = X86ISD::FNMSUB; 10487 break; 10488 case Intrinsic::x86_fma_vfmaddsub_ps: 10489 case Intrinsic::x86_fma_vfmaddsub_pd: 10490 case Intrinsic::x86_fma_vfmaddsub_ps_256: 10491 case Intrinsic::x86_fma_vfmaddsub_pd_256: 10492 Opc = X86ISD::FMADDSUB; 10493 break; 10494 case Intrinsic::x86_fma_vfmsubadd_ps: 10495 case Intrinsic::x86_fma_vfmsubadd_pd: 10496 case Intrinsic::x86_fma_vfmsubadd_ps_256: 10497 case Intrinsic::x86_fma_vfmsubadd_pd_256: 10498 Opc = X86ISD::FMSUBADD; 10499 break; 10500 } 10501 10502 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1), 10503 Op.getOperand(2), Op.getOperand(3)); 10504 } 10505 } 10506} 10507 10508static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { 10509 DebugLoc dl = Op.getDebugLoc(); 10510 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10511 switch (IntNo) { 10512 default: return SDValue(); // Don't custom lower most intrinsics. 10513 10514 // RDRAND intrinsics. 10515 case Intrinsic::x86_rdrand_16: 10516 case Intrinsic::x86_rdrand_32: 10517 case Intrinsic::x86_rdrand_64: { 10518 // Emit the node with the right value type. 10519 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other); 10520 SDValue Result = DAG.getNode(X86ISD::RDRAND, dl, VTs, Op.getOperand(0)); 10521 10522 // If the value returned by RDRAND was valid (CF=1), return 1. Otherwise 10523 // return the value from Rand, which is always 0, casted to i32. 10524 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), 10525 DAG.getConstant(1, Op->getValueType(1)), 10526 DAG.getConstant(X86::COND_B, MVT::i32), 10527 SDValue(Result.getNode(), 1) }; 10528 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, 10529 DAG.getVTList(Op->getValueType(1), MVT::Glue), 10530 Ops, 4); 10531 10532 // Return { result, isValid, chain }. 10533 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, 10534 SDValue(Result.getNode(), 2)); 10535 } 10536 } 10537} 10538 10539SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 10540 SelectionDAG &DAG) const { 10541 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 10542 MFI->setReturnAddressIsTaken(true); 10543 10544 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10545 DebugLoc dl = Op.getDebugLoc(); 10546 EVT PtrVT = getPointerTy(); 10547 10548 if (Depth > 0) { 10549 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 10550 SDValue Offset = 10551 DAG.getConstant(RegInfo->getSlotSize(), PtrVT); 10552 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 10553 DAG.getNode(ISD::ADD, dl, PtrVT, 10554 FrameAddr, Offset), 10555 MachinePointerInfo(), false, false, false, 0); 10556 } 10557 10558 // Just load the return address. 10559 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 10560 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 10561 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 10562} 10563 10564SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 10565 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 10566 MFI->setFrameAddressIsTaken(true); 10567 10568 EVT VT = Op.getValueType(); 10569 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 10570 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10571 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 10572 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 10573 while (Depth--) 10574 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 10575 MachinePointerInfo(), 10576 false, false, false, 0); 10577 return FrameAddr; 10578} 10579 10580SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 10581 SelectionDAG &DAG) const { 10582 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); 10583} 10584 10585SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 10586 SDValue Chain = Op.getOperand(0); 10587 SDValue Offset = Op.getOperand(1); 10588 SDValue Handler = Op.getOperand(2); 10589 DebugLoc dl = Op.getDebugLoc(); 10590 10591 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 10592 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 10593 getPointerTy()); 10594 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 10595 10596 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 10597 DAG.getIntPtrConstant(RegInfo->getSlotSize())); 10598 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 10599 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 10600 false, false, 0); 10601 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 10602 10603 return DAG.getNode(X86ISD::EH_RETURN, dl, 10604 MVT::Other, 10605 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 10606} 10607 10608SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 10609 SelectionDAG &DAG) const { 10610 DebugLoc DL = Op.getDebugLoc(); 10611 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, 10612 DAG.getVTList(MVT::i32, MVT::Other), 10613 Op.getOperand(0), Op.getOperand(1)); 10614} 10615 10616SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 10617 SelectionDAG &DAG) const { 10618 DebugLoc DL = Op.getDebugLoc(); 10619 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 10620 Op.getOperand(0), Op.getOperand(1)); 10621} 10622 10623static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { 10624 return Op.getOperand(0); 10625} 10626 10627SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 10628 SelectionDAG &DAG) const { 10629 SDValue Root = Op.getOperand(0); 10630 SDValue Trmp = Op.getOperand(1); // trampoline 10631 SDValue FPtr = Op.getOperand(2); // nested function 10632 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 10633 DebugLoc dl = Op.getDebugLoc(); 10634 10635 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 10636 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 10637 10638 if (Subtarget->is64Bit()) { 10639 SDValue OutChains[6]; 10640 10641 // Large code-model. 10642 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 10643 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 10644 10645 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7; 10646 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7; 10647 10648 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 10649 10650 // Load the pointer to the nested function into R11. 10651 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 10652 SDValue Addr = Trmp; 10653 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10654 Addr, MachinePointerInfo(TrmpAddr), 10655 false, false, 0); 10656 10657 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10658 DAG.getConstant(2, MVT::i64)); 10659 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 10660 MachinePointerInfo(TrmpAddr, 2), 10661 false, false, 2); 10662 10663 // Load the 'nest' parameter value into R10. 10664 // R10 is specified in X86CallingConv.td 10665 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 10666 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10667 DAG.getConstant(10, MVT::i64)); 10668 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10669 Addr, MachinePointerInfo(TrmpAddr, 10), 10670 false, false, 0); 10671 10672 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10673 DAG.getConstant(12, MVT::i64)); 10674 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 10675 MachinePointerInfo(TrmpAddr, 12), 10676 false, false, 2); 10677 10678 // Jump to the nested function. 10679 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 10680 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10681 DAG.getConstant(20, MVT::i64)); 10682 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 10683 Addr, MachinePointerInfo(TrmpAddr, 20), 10684 false, false, 0); 10685 10686 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 10687 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 10688 DAG.getConstant(22, MVT::i64)); 10689 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 10690 MachinePointerInfo(TrmpAddr, 22), 10691 false, false, 0); 10692 10693 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 10694 } else { 10695 const Function *Func = 10696 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 10697 CallingConv::ID CC = Func->getCallingConv(); 10698 unsigned NestReg; 10699 10700 switch (CC) { 10701 default: 10702 llvm_unreachable("Unsupported calling convention"); 10703 case CallingConv::C: 10704 case CallingConv::X86_StdCall: { 10705 // Pass 'nest' parameter in ECX. 10706 // Must be kept in sync with X86CallingConv.td 10707 NestReg = X86::ECX; 10708 10709 // Check that ECX wasn't needed by an 'inreg' parameter. 10710 FunctionType *FTy = Func->getFunctionType(); 10711 const AttrListPtr &Attrs = Func->getAttributes(); 10712 10713 if (!Attrs.isEmpty() && !Func->isVarArg()) { 10714 unsigned InRegCount = 0; 10715 unsigned Idx = 1; 10716 10717 for (FunctionType::param_iterator I = FTy->param_begin(), 10718 E = FTy->param_end(); I != E; ++I, ++Idx) 10719 if (Attrs.getParamAttributes(Idx).hasAttribute(Attributes::InReg)) 10720 // FIXME: should only count parameters that are lowered to integers. 10721 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 10722 10723 if (InRegCount > 2) { 10724 report_fatal_error("Nest register in use - reduce number of inreg" 10725 " parameters!"); 10726 } 10727 } 10728 break; 10729 } 10730 case CallingConv::X86_FastCall: 10731 case CallingConv::X86_ThisCall: 10732 case CallingConv::Fast: 10733 // Pass 'nest' parameter in EAX. 10734 // Must be kept in sync with X86CallingConv.td 10735 NestReg = X86::EAX; 10736 break; 10737 } 10738 10739 SDValue OutChains[4]; 10740 SDValue Addr, Disp; 10741 10742 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10743 DAG.getConstant(10, MVT::i32)); 10744 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 10745 10746 // This is storing the opcode for MOV32ri. 10747 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 10748 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; 10749 OutChains[0] = DAG.getStore(Root, dl, 10750 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 10751 Trmp, MachinePointerInfo(TrmpAddr), 10752 false, false, 0); 10753 10754 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10755 DAG.getConstant(1, MVT::i32)); 10756 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 10757 MachinePointerInfo(TrmpAddr, 1), 10758 false, false, 1); 10759 10760 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 10761 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10762 DAG.getConstant(5, MVT::i32)); 10763 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 10764 MachinePointerInfo(TrmpAddr, 5), 10765 false, false, 1); 10766 10767 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 10768 DAG.getConstant(6, MVT::i32)); 10769 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 10770 MachinePointerInfo(TrmpAddr, 6), 10771 false, false, 1); 10772 10773 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 10774 } 10775} 10776 10777SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 10778 SelectionDAG &DAG) const { 10779 /* 10780 The rounding mode is in bits 11:10 of FPSR, and has the following 10781 settings: 10782 00 Round to nearest 10783 01 Round to -inf 10784 10 Round to +inf 10785 11 Round to 0 10786 10787 FLT_ROUNDS, on the other hand, expects the following: 10788 -1 Undefined 10789 0 Round to 0 10790 1 Round to nearest 10791 2 Round to +inf 10792 3 Round to -inf 10793 10794 To perform the conversion, we do: 10795 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 10796 */ 10797 10798 MachineFunction &MF = DAG.getMachineFunction(); 10799 const TargetMachine &TM = MF.getTarget(); 10800 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 10801 unsigned StackAlignment = TFI.getStackAlignment(); 10802 EVT VT = Op.getValueType(); 10803 DebugLoc DL = Op.getDebugLoc(); 10804 10805 // Save FP Control Word to stack slot 10806 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 10807 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 10808 10809 10810 MachineMemOperand *MMO = 10811 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 10812 MachineMemOperand::MOStore, 2, 2); 10813 10814 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 10815 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 10816 DAG.getVTList(MVT::Other), 10817 Ops, 2, MVT::i16, MMO); 10818 10819 // Load FP Control Word from stack slot 10820 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 10821 MachinePointerInfo(), false, false, false, 0); 10822 10823 // Transform as necessary 10824 SDValue CWD1 = 10825 DAG.getNode(ISD::SRL, DL, MVT::i16, 10826 DAG.getNode(ISD::AND, DL, MVT::i16, 10827 CWD, DAG.getConstant(0x800, MVT::i16)), 10828 DAG.getConstant(11, MVT::i8)); 10829 SDValue CWD2 = 10830 DAG.getNode(ISD::SRL, DL, MVT::i16, 10831 DAG.getNode(ISD::AND, DL, MVT::i16, 10832 CWD, DAG.getConstant(0x400, MVT::i16)), 10833 DAG.getConstant(9, MVT::i8)); 10834 10835 SDValue RetVal = 10836 DAG.getNode(ISD::AND, DL, MVT::i16, 10837 DAG.getNode(ISD::ADD, DL, MVT::i16, 10838 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 10839 DAG.getConstant(1, MVT::i16)), 10840 DAG.getConstant(3, MVT::i16)); 10841 10842 10843 return DAG.getNode((VT.getSizeInBits() < 16 ? 10844 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 10845} 10846 10847static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { 10848 EVT VT = Op.getValueType(); 10849 EVT OpVT = VT; 10850 unsigned NumBits = VT.getSizeInBits(); 10851 DebugLoc dl = Op.getDebugLoc(); 10852 10853 Op = Op.getOperand(0); 10854 if (VT == MVT::i8) { 10855 // Zero extend to i32 since there is not an i8 bsr. 10856 OpVT = MVT::i32; 10857 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10858 } 10859 10860 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 10861 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10862 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10863 10864 // If src is zero (i.e. bsr sets ZF), returns NumBits. 10865 SDValue Ops[] = { 10866 Op, 10867 DAG.getConstant(NumBits+NumBits-1, OpVT), 10868 DAG.getConstant(X86::COND_E, MVT::i8), 10869 Op.getValue(1) 10870 }; 10871 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 10872 10873 // Finally xor with NumBits-1. 10874 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10875 10876 if (VT == MVT::i8) 10877 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10878 return Op; 10879} 10880 10881static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { 10882 EVT VT = Op.getValueType(); 10883 EVT OpVT = VT; 10884 unsigned NumBits = VT.getSizeInBits(); 10885 DebugLoc dl = Op.getDebugLoc(); 10886 10887 Op = Op.getOperand(0); 10888 if (VT == MVT::i8) { 10889 // Zero extend to i32 since there is not an i8 bsr. 10890 OpVT = MVT::i32; 10891 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 10892 } 10893 10894 // Issue a bsr (scan bits in reverse). 10895 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 10896 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 10897 10898 // And xor with NumBits-1. 10899 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 10900 10901 if (VT == MVT::i8) 10902 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 10903 return Op; 10904} 10905 10906static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { 10907 EVT VT = Op.getValueType(); 10908 unsigned NumBits = VT.getSizeInBits(); 10909 DebugLoc dl = Op.getDebugLoc(); 10910 Op = Op.getOperand(0); 10911 10912 // Issue a bsf (scan bits forward) which also sets EFLAGS. 10913 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10914 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 10915 10916 // If src is zero (i.e. bsf sets ZF), returns NumBits. 10917 SDValue Ops[] = { 10918 Op, 10919 DAG.getConstant(NumBits, VT), 10920 DAG.getConstant(X86::COND_E, MVT::i8), 10921 Op.getValue(1) 10922 }; 10923 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 10924} 10925 10926// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 10927// ones, and then concatenate the result back. 10928static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 10929 EVT VT = Op.getValueType(); 10930 10931 assert(VT.is256BitVector() && VT.isInteger() && 10932 "Unsupported value type for operation"); 10933 10934 unsigned NumElems = VT.getVectorNumElements(); 10935 DebugLoc dl = Op.getDebugLoc(); 10936 10937 // Extract the LHS vectors 10938 SDValue LHS = Op.getOperand(0); 10939 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 10940 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 10941 10942 // Extract the RHS vectors 10943 SDValue RHS = Op.getOperand(1); 10944 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl); 10945 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl); 10946 10947 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10948 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10949 10950 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 10951 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 10952 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 10953} 10954 10955static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) { 10956 assert(Op.getValueType().is256BitVector() && 10957 Op.getValueType().isInteger() && 10958 "Only handle AVX 256-bit vector integer operation"); 10959 return Lower256IntArith(Op, DAG); 10960} 10961 10962static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { 10963 assert(Op.getValueType().is256BitVector() && 10964 Op.getValueType().isInteger() && 10965 "Only handle AVX 256-bit vector integer operation"); 10966 return Lower256IntArith(Op, DAG); 10967} 10968 10969static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, 10970 SelectionDAG &DAG) { 10971 EVT VT = Op.getValueType(); 10972 10973 // Decompose 256-bit ops into smaller 128-bit ops. 10974 if (VT.is256BitVector() && !Subtarget->hasAVX2()) 10975 return Lower256IntArith(Op, DAG); 10976 10977 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && 10978 "Only know how to lower V2I64/V4I64 multiply"); 10979 10980 DebugLoc dl = Op.getDebugLoc(); 10981 10982 // Ahi = psrlqi(a, 32); 10983 // Bhi = psrlqi(b, 32); 10984 // 10985 // AloBlo = pmuludq(a, b); 10986 // AloBhi = pmuludq(a, Bhi); 10987 // AhiBlo = pmuludq(Ahi, b); 10988 10989 // AloBhi = psllqi(AloBhi, 32); 10990 // AhiBlo = psllqi(AhiBlo, 32); 10991 // return AloBlo + AloBhi + AhiBlo; 10992 10993 SDValue A = Op.getOperand(0); 10994 SDValue B = Op.getOperand(1); 10995 10996 SDValue ShAmt = DAG.getConstant(32, MVT::i32); 10997 10998 SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt); 10999 SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt); 11000 11001 // Bit cast to 32-bit vectors for MULUDQ 11002 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32; 11003 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A); 11004 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B); 11005 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi); 11006 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi); 11007 11008 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); 11009 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); 11010 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); 11011 11012 AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt); 11013 AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt); 11014 11015 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 11016 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 11017} 11018 11019SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 11020 11021 EVT VT = Op.getValueType(); 11022 DebugLoc dl = Op.getDebugLoc(); 11023 SDValue R = Op.getOperand(0); 11024 SDValue Amt = Op.getOperand(1); 11025 LLVMContext *Context = DAG.getContext(); 11026 11027 if (!Subtarget->hasSSE2()) 11028 return SDValue(); 11029 11030 // Optimize shl/srl/sra with constant shift amount. 11031 if (isSplatVector(Amt.getNode())) { 11032 SDValue SclrAmt = Amt->getOperand(0); 11033 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 11034 uint64_t ShiftAmt = C->getZExtValue(); 11035 11036 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 11037 (Subtarget->hasAVX2() && 11038 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) { 11039 if (Op.getOpcode() == ISD::SHL) 11040 return DAG.getNode(X86ISD::VSHLI, dl, VT, R, 11041 DAG.getConstant(ShiftAmt, MVT::i32)); 11042 if (Op.getOpcode() == ISD::SRL) 11043 return DAG.getNode(X86ISD::VSRLI, dl, VT, R, 11044 DAG.getConstant(ShiftAmt, MVT::i32)); 11045 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64) 11046 return DAG.getNode(X86ISD::VSRAI, dl, VT, R, 11047 DAG.getConstant(ShiftAmt, MVT::i32)); 11048 } 11049 11050 if (VT == MVT::v16i8) { 11051 if (Op.getOpcode() == ISD::SHL) { 11052 // Make a large shift. 11053 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R, 11054 DAG.getConstant(ShiftAmt, MVT::i32)); 11055 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11056 // Zero out the rightmost bits. 11057 SmallVector<SDValue, 16> V(16, 11058 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11059 MVT::i8)); 11060 return DAG.getNode(ISD::AND, dl, VT, SHL, 11061 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11062 } 11063 if (Op.getOpcode() == ISD::SRL) { 11064 // Make a large shift. 11065 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R, 11066 DAG.getConstant(ShiftAmt, MVT::i32)); 11067 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11068 // Zero out the leftmost bits. 11069 SmallVector<SDValue, 16> V(16, 11070 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11071 MVT::i8)); 11072 return DAG.getNode(ISD::AND, dl, VT, SRL, 11073 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 11074 } 11075 if (Op.getOpcode() == ISD::SRA) { 11076 if (ShiftAmt == 7) { 11077 // R s>> 7 === R s< 0 11078 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11079 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11080 } 11081 11082 // R s>> a === ((R u>> a) ^ m) - m 11083 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11084 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 11085 MVT::i8)); 11086 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 11087 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11088 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11089 return Res; 11090 } 11091 llvm_unreachable("Unknown shift opcode."); 11092 } 11093 11094 if (Subtarget->hasAVX2() && VT == MVT::v32i8) { 11095 if (Op.getOpcode() == ISD::SHL) { 11096 // Make a large shift. 11097 SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R, 11098 DAG.getConstant(ShiftAmt, MVT::i32)); 11099 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL); 11100 // Zero out the rightmost bits. 11101 SmallVector<SDValue, 32> V(32, 11102 DAG.getConstant(uint8_t(-1U << ShiftAmt), 11103 MVT::i8)); 11104 return DAG.getNode(ISD::AND, dl, VT, SHL, 11105 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11106 } 11107 if (Op.getOpcode() == ISD::SRL) { 11108 // Make a large shift. 11109 SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R, 11110 DAG.getConstant(ShiftAmt, MVT::i32)); 11111 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL); 11112 // Zero out the leftmost bits. 11113 SmallVector<SDValue, 32> V(32, 11114 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 11115 MVT::i8)); 11116 return DAG.getNode(ISD::AND, dl, VT, SRL, 11117 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 11118 } 11119 if (Op.getOpcode() == ISD::SRA) { 11120 if (ShiftAmt == 7) { 11121 // R s>> 7 === R s< 0 11122 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 11123 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); 11124 } 11125 11126 // R s>> a === ((R u>> a) ^ m) - m 11127 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 11128 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 11129 MVT::i8)); 11130 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 11131 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 11132 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 11133 return Res; 11134 } 11135 llvm_unreachable("Unknown shift opcode."); 11136 } 11137 } 11138 } 11139 11140 // Lower SHL with variable shift amount. 11141 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 11142 Op = DAG.getNode(X86ISD::VSHLI, dl, VT, Op.getOperand(1), 11143 DAG.getConstant(23, MVT::i32)); 11144 11145 const uint32_t CV[] = { 0x3f800000U, 0x3f800000U, 0x3f800000U, 0x3f800000U}; 11146 Constant *C = ConstantDataVector::get(*Context, CV); 11147 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 11148 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 11149 MachinePointerInfo::getConstantPool(), 11150 false, false, false, 16); 11151 11152 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 11153 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 11154 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 11155 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 11156 } 11157 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 11158 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); 11159 11160 // a = a << 5; 11161 Op = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, Op.getOperand(1), 11162 DAG.getConstant(5, MVT::i32)); 11163 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op); 11164 11165 // Turn 'a' into a mask suitable for VSELECT 11166 SDValue VSelM = DAG.getConstant(0x80, VT); 11167 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11168 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11169 11170 SDValue CM1 = DAG.getConstant(0x0f, VT); 11171 SDValue CM2 = DAG.getConstant(0x3f, VT); 11172 11173 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 11174 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 11175 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11176 DAG.getConstant(4, MVT::i32), DAG); 11177 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11178 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11179 11180 // a += a 11181 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11182 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11183 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11184 11185 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 11186 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 11187 M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 11188 DAG.getConstant(2, MVT::i32), DAG); 11189 M = DAG.getNode(ISD::BITCAST, dl, VT, M); 11190 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 11191 11192 // a += a 11193 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 11194 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 11195 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM); 11196 11197 // return VSELECT(r, r+r, a); 11198 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 11199 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 11200 return R; 11201 } 11202 11203 // Decompose 256-bit shifts into smaller 128-bit shifts. 11204 if (VT.is256BitVector()) { 11205 unsigned NumElems = VT.getVectorNumElements(); 11206 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11207 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11208 11209 // Extract the two vectors 11210 SDValue V1 = Extract128BitVector(R, 0, DAG, dl); 11211 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl); 11212 11213 // Recreate the shift amount vectors 11214 SDValue Amt1, Amt2; 11215 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 11216 // Constant shift amount 11217 SmallVector<SDValue, 4> Amt1Csts; 11218 SmallVector<SDValue, 4> Amt2Csts; 11219 for (unsigned i = 0; i != NumElems/2; ++i) 11220 Amt1Csts.push_back(Amt->getOperand(i)); 11221 for (unsigned i = NumElems/2; i != NumElems; ++i) 11222 Amt2Csts.push_back(Amt->getOperand(i)); 11223 11224 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11225 &Amt1Csts[0], NumElems/2); 11226 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 11227 &Amt2Csts[0], NumElems/2); 11228 } else { 11229 // Variable shift amount 11230 Amt1 = Extract128BitVector(Amt, 0, DAG, dl); 11231 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl); 11232 } 11233 11234 // Issue new vector shifts for the smaller types 11235 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 11236 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 11237 11238 // Concatenate the result back 11239 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 11240 } 11241 11242 return SDValue(); 11243} 11244 11245static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { 11246 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 11247 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 11248 // looks for this combo and may remove the "setcc" instruction if the "setcc" 11249 // has only one use. 11250 SDNode *N = Op.getNode(); 11251 SDValue LHS = N->getOperand(0); 11252 SDValue RHS = N->getOperand(1); 11253 unsigned BaseOp = 0; 11254 unsigned Cond = 0; 11255 DebugLoc DL = Op.getDebugLoc(); 11256 switch (Op.getOpcode()) { 11257 default: llvm_unreachable("Unknown ovf instruction!"); 11258 case ISD::SADDO: 11259 // A subtract of one will be selected as a INC. Note that INC doesn't 11260 // set CF, so we can't do this for UADDO. 11261 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 11262 if (C->isOne()) { 11263 BaseOp = X86ISD::INC; 11264 Cond = X86::COND_O; 11265 break; 11266 } 11267 BaseOp = X86ISD::ADD; 11268 Cond = X86::COND_O; 11269 break; 11270 case ISD::UADDO: 11271 BaseOp = X86ISD::ADD; 11272 Cond = X86::COND_B; 11273 break; 11274 case ISD::SSUBO: 11275 // A subtract of one will be selected as a DEC. Note that DEC doesn't 11276 // set CF, so we can't do this for USUBO. 11277 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 11278 if (C->isOne()) { 11279 BaseOp = X86ISD::DEC; 11280 Cond = X86::COND_O; 11281 break; 11282 } 11283 BaseOp = X86ISD::SUB; 11284 Cond = X86::COND_O; 11285 break; 11286 case ISD::USUBO: 11287 BaseOp = X86ISD::SUB; 11288 Cond = X86::COND_B; 11289 break; 11290 case ISD::SMULO: 11291 BaseOp = X86ISD::SMUL; 11292 Cond = X86::COND_O; 11293 break; 11294 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 11295 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 11296 MVT::i32); 11297 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 11298 11299 SDValue SetCC = 11300 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 11301 DAG.getConstant(X86::COND_O, MVT::i32), 11302 SDValue(Sum.getNode(), 2)); 11303 11304 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 11305 } 11306 } 11307 11308 // Also sets EFLAGS. 11309 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 11310 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 11311 11312 SDValue SetCC = 11313 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 11314 DAG.getConstant(Cond, MVT::i32), 11315 SDValue(Sum.getNode(), 1)); 11316 11317 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 11318} 11319 11320SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 11321 SelectionDAG &DAG) const { 11322 DebugLoc dl = Op.getDebugLoc(); 11323 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 11324 EVT VT = Op.getValueType(); 11325 11326 if (!Subtarget->hasSSE2() || !VT.isVector()) 11327 return SDValue(); 11328 11329 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 11330 ExtraVT.getScalarType().getSizeInBits(); 11331 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 11332 11333 switch (VT.getSimpleVT().SimpleTy) { 11334 default: return SDValue(); 11335 case MVT::v8i32: 11336 case MVT::v16i16: 11337 if (!Subtarget->hasAVX()) 11338 return SDValue(); 11339 if (!Subtarget->hasAVX2()) { 11340 // needs to be split 11341 unsigned NumElems = VT.getVectorNumElements(); 11342 11343 // Extract the LHS vectors 11344 SDValue LHS = Op.getOperand(0); 11345 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl); 11346 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl); 11347 11348 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 11349 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 11350 11351 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 11352 unsigned ExtraNumElems = ExtraVT.getVectorNumElements(); 11353 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 11354 ExtraNumElems/2); 11355 SDValue Extra = DAG.getValueType(ExtraVT); 11356 11357 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 11358 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 11359 11360 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2); 11361 } 11362 // fall through 11363 case MVT::v4i32: 11364 case MVT::v8i16: { 11365 SDValue Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT, 11366 Op.getOperand(0), ShAmt, DAG); 11367 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG); 11368 } 11369 } 11370} 11371 11372 11373static SDValue LowerMEMBARRIER(SDValue Op, const X86Subtarget *Subtarget, 11374 SelectionDAG &DAG) { 11375 DebugLoc dl = Op.getDebugLoc(); 11376 11377 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. 11378 // There isn't any reason to disable it if the target processor supports it. 11379 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { 11380 SDValue Chain = Op.getOperand(0); 11381 SDValue Zero = DAG.getConstant(0, MVT::i32); 11382 SDValue Ops[] = { 11383 DAG.getRegister(X86::ESP, MVT::i32), // Base 11384 DAG.getTargetConstant(1, MVT::i8), // Scale 11385 DAG.getRegister(0, MVT::i32), // Index 11386 DAG.getTargetConstant(0, MVT::i32), // Disp 11387 DAG.getRegister(0, MVT::i32), // Segment. 11388 Zero, 11389 Chain 11390 }; 11391 SDNode *Res = 11392 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 11393 array_lengthof(Ops)); 11394 return SDValue(Res, 0); 11395 } 11396 11397 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 11398 if (!isDev) 11399 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 11400 11401 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 11402 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 11403 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 11404 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 11405 11406 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 11407 if (!Op1 && !Op2 && !Op3 && Op4) 11408 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 11409 11410 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 11411 if (Op1 && !Op2 && !Op3 && !Op4) 11412 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 11413 11414 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 11415 // (MFENCE)>; 11416 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 11417} 11418 11419static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, 11420 SelectionDAG &DAG) { 11421 DebugLoc dl = Op.getDebugLoc(); 11422 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 11423 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 11424 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 11425 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 11426 11427 // The only fence that needs an instruction is a sequentially-consistent 11428 // cross-thread fence. 11429 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 11430 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 11431 // no-sse2). There isn't any reason to disable it if the target processor 11432 // supports it. 11433 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 11434 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 11435 11436 SDValue Chain = Op.getOperand(0); 11437 SDValue Zero = DAG.getConstant(0, MVT::i32); 11438 SDValue Ops[] = { 11439 DAG.getRegister(X86::ESP, MVT::i32), // Base 11440 DAG.getTargetConstant(1, MVT::i8), // Scale 11441 DAG.getRegister(0, MVT::i32), // Index 11442 DAG.getTargetConstant(0, MVT::i32), // Disp 11443 DAG.getRegister(0, MVT::i32), // Segment. 11444 Zero, 11445 Chain 11446 }; 11447 SDNode *Res = 11448 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 11449 array_lengthof(Ops)); 11450 return SDValue(Res, 0); 11451 } 11452 11453 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 11454 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 11455} 11456 11457 11458static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, 11459 SelectionDAG &DAG) { 11460 EVT T = Op.getValueType(); 11461 DebugLoc DL = Op.getDebugLoc(); 11462 unsigned Reg = 0; 11463 unsigned size = 0; 11464 switch(T.getSimpleVT().SimpleTy) { 11465 default: llvm_unreachable("Invalid value type!"); 11466 case MVT::i8: Reg = X86::AL; size = 1; break; 11467 case MVT::i16: Reg = X86::AX; size = 2; break; 11468 case MVT::i32: Reg = X86::EAX; size = 4; break; 11469 case MVT::i64: 11470 assert(Subtarget->is64Bit() && "Node not type legal!"); 11471 Reg = X86::RAX; size = 8; 11472 break; 11473 } 11474 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 11475 Op.getOperand(2), SDValue()); 11476 SDValue Ops[] = { cpIn.getValue(0), 11477 Op.getOperand(1), 11478 Op.getOperand(3), 11479 DAG.getTargetConstant(size, MVT::i8), 11480 cpIn.getValue(1) }; 11481 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11482 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 11483 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 11484 Ops, 5, T, MMO); 11485 SDValue cpOut = 11486 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 11487 return cpOut; 11488} 11489 11490static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, 11491 SelectionDAG &DAG) { 11492 assert(Subtarget->is64Bit() && "Result not type legalized?"); 11493 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11494 SDValue TheChain = Op.getOperand(0); 11495 DebugLoc dl = Op.getDebugLoc(); 11496 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 11497 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 11498 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 11499 rax.getValue(2)); 11500 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 11501 DAG.getConstant(32, MVT::i8)); 11502 SDValue Ops[] = { 11503 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 11504 rdx.getValue(1) 11505 }; 11506 return DAG.getMergeValues(Ops, 2, dl); 11507} 11508 11509SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 11510 EVT SrcVT = Op.getOperand(0).getValueType(); 11511 EVT DstVT = Op.getValueType(); 11512 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 11513 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 11514 assert((DstVT == MVT::i64 || 11515 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 11516 "Unexpected custom BITCAST"); 11517 // i64 <=> MMX conversions are Legal. 11518 if (SrcVT==MVT::i64 && DstVT.isVector()) 11519 return Op; 11520 if (DstVT==MVT::i64 && SrcVT.isVector()) 11521 return Op; 11522 // MMX <=> MMX conversions are Legal. 11523 if (SrcVT.isVector() && DstVT.isVector()) 11524 return Op; 11525 // All other conversions need to be expanded. 11526 return SDValue(); 11527} 11528 11529static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { 11530 SDNode *Node = Op.getNode(); 11531 DebugLoc dl = Node->getDebugLoc(); 11532 EVT T = Node->getValueType(0); 11533 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 11534 DAG.getConstant(0, T), Node->getOperand(2)); 11535 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 11536 cast<AtomicSDNode>(Node)->getMemoryVT(), 11537 Node->getOperand(0), 11538 Node->getOperand(1), negOp, 11539 cast<AtomicSDNode>(Node)->getSrcValue(), 11540 cast<AtomicSDNode>(Node)->getAlignment(), 11541 cast<AtomicSDNode>(Node)->getOrdering(), 11542 cast<AtomicSDNode>(Node)->getSynchScope()); 11543} 11544 11545static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 11546 SDNode *Node = Op.getNode(); 11547 DebugLoc dl = Node->getDebugLoc(); 11548 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 11549 11550 // Convert seq_cst store -> xchg 11551 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 11552 // FIXME: On 32-bit, store -> fist or movq would be more efficient 11553 // (The only way to get a 16-byte store is cmpxchg16b) 11554 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 11555 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 11556 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 11557 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 11558 cast<AtomicSDNode>(Node)->getMemoryVT(), 11559 Node->getOperand(0), 11560 Node->getOperand(1), Node->getOperand(2), 11561 cast<AtomicSDNode>(Node)->getMemOperand(), 11562 cast<AtomicSDNode>(Node)->getOrdering(), 11563 cast<AtomicSDNode>(Node)->getSynchScope()); 11564 return Swap.getValue(1); 11565 } 11566 // Other atomic stores have a simple pattern. 11567 return Op; 11568} 11569 11570static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 11571 EVT VT = Op.getNode()->getValueType(0); 11572 11573 // Let legalize expand this if it isn't a legal type yet. 11574 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 11575 return SDValue(); 11576 11577 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 11578 11579 unsigned Opc; 11580 bool ExtraOp = false; 11581 switch (Op.getOpcode()) { 11582 default: llvm_unreachable("Invalid code"); 11583 case ISD::ADDC: Opc = X86ISD::ADD; break; 11584 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 11585 case ISD::SUBC: Opc = X86ISD::SUB; break; 11586 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 11587 } 11588 11589 if (!ExtraOp) 11590 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 11591 Op.getOperand(1)); 11592 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 11593 Op.getOperand(1), Op.getOperand(2)); 11594} 11595 11596/// LowerOperation - Provide custom lowering hooks for some operations. 11597/// 11598SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 11599 switch (Op.getOpcode()) { 11600 default: llvm_unreachable("Should not custom lower this!"); 11601 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 11602 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, Subtarget, DAG); 11603 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); 11604 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); 11605 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 11606 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 11607 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 11608 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 11609 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 11610 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 11611 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 11612 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); 11613 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); 11614 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 11615 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 11616 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 11617 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 11618 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 11619 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 11620 case ISD::SHL_PARTS: 11621 case ISD::SRA_PARTS: 11622 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 11623 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 11624 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 11625 case ISD::TRUNCATE: return lowerTRUNCATE(Op, DAG); 11626 case ISD::ZERO_EXTEND: return lowerZERO_EXTEND(Op, DAG); 11627 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 11628 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 11629 case ISD::FP_EXTEND: return lowerFP_EXTEND(Op, DAG); 11630 case ISD::FABS: return LowerFABS(Op, DAG); 11631 case ISD::FNEG: return LowerFNEG(Op, DAG); 11632 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 11633 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 11634 case ISD::SETCC: return LowerSETCC(Op, DAG); 11635 case ISD::SELECT: return LowerSELECT(Op, DAG); 11636 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 11637 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 11638 case ISD::VASTART: return LowerVASTART(Op, DAG); 11639 case ISD::VAARG: return LowerVAARG(Op, DAG); 11640 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); 11641 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 11642 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 11643 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 11644 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 11645 case ISD::FRAME_TO_ARGS_OFFSET: 11646 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 11647 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 11648 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 11649 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 11650 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 11651 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 11652 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 11653 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 11654 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 11655 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 11656 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 11657 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); 11658 case ISD::SRA: 11659 case ISD::SRL: 11660 case ISD::SHL: return LowerShift(Op, DAG); 11661 case ISD::SADDO: 11662 case ISD::UADDO: 11663 case ISD::SSUBO: 11664 case ISD::USUBO: 11665 case ISD::SMULO: 11666 case ISD::UMULO: return LowerXALUO(Op, DAG); 11667 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); 11668 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 11669 case ISD::ADDC: 11670 case ISD::ADDE: 11671 case ISD::SUBC: 11672 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 11673 case ISD::ADD: return LowerADD(Op, DAG); 11674 case ISD::SUB: return LowerSUB(Op, DAG); 11675 } 11676} 11677 11678static void ReplaceATOMIC_LOAD(SDNode *Node, 11679 SmallVectorImpl<SDValue> &Results, 11680 SelectionDAG &DAG) { 11681 DebugLoc dl = Node->getDebugLoc(); 11682 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 11683 11684 // Convert wide load -> cmpxchg8b/cmpxchg16b 11685 // FIXME: On 32-bit, load -> fild or movq would be more efficient 11686 // (The only way to get a 16-byte load is cmpxchg16b) 11687 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 11688 SDValue Zero = DAG.getConstant(0, VT); 11689 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 11690 Node->getOperand(0), 11691 Node->getOperand(1), Zero, Zero, 11692 cast<AtomicSDNode>(Node)->getMemOperand(), 11693 cast<AtomicSDNode>(Node)->getOrdering(), 11694 cast<AtomicSDNode>(Node)->getSynchScope()); 11695 Results.push_back(Swap.getValue(0)); 11696 Results.push_back(Swap.getValue(1)); 11697} 11698 11699static void 11700ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 11701 SelectionDAG &DAG, unsigned NewOp) { 11702 DebugLoc dl = Node->getDebugLoc(); 11703 assert (Node->getValueType(0) == MVT::i64 && 11704 "Only know how to expand i64 atomics"); 11705 11706 SDValue Chain = Node->getOperand(0); 11707 SDValue In1 = Node->getOperand(1); 11708 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11709 Node->getOperand(2), DAG.getIntPtrConstant(0)); 11710 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 11711 Node->getOperand(2), DAG.getIntPtrConstant(1)); 11712 SDValue Ops[] = { Chain, In1, In2L, In2H }; 11713 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 11714 SDValue Result = 11715 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 11716 cast<MemSDNode>(Node)->getMemOperand()); 11717 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 11718 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 11719 Results.push_back(Result.getValue(2)); 11720} 11721 11722/// ReplaceNodeResults - Replace a node with an illegal result type 11723/// with a new node built out of custom code. 11724void X86TargetLowering::ReplaceNodeResults(SDNode *N, 11725 SmallVectorImpl<SDValue>&Results, 11726 SelectionDAG &DAG) const { 11727 DebugLoc dl = N->getDebugLoc(); 11728 switch (N->getOpcode()) { 11729 default: 11730 llvm_unreachable("Do not know how to custom type legalize this operation!"); 11731 case ISD::SIGN_EXTEND_INREG: 11732 case ISD::ADDC: 11733 case ISD::ADDE: 11734 case ISD::SUBC: 11735 case ISD::SUBE: 11736 // We don't want to expand or promote these. 11737 return; 11738 case ISD::FP_TO_SINT: 11739 case ISD::FP_TO_UINT: { 11740 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 11741 11742 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType())) 11743 return; 11744 11745 std::pair<SDValue,SDValue> Vals = 11746 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); 11747 SDValue FIST = Vals.first, StackSlot = Vals.second; 11748 if (FIST.getNode() != 0) { 11749 EVT VT = N->getValueType(0); 11750 // Return a load from the stack slot. 11751 if (StackSlot.getNode() != 0) 11752 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 11753 MachinePointerInfo(), 11754 false, false, false, 0)); 11755 else 11756 Results.push_back(FIST); 11757 } 11758 return; 11759 } 11760 case ISD::UINT_TO_FP: { 11761 if (N->getOperand(0).getValueType() != MVT::v2i32 && 11762 N->getValueType(0) != MVT::v2f32) 11763 return; 11764 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, 11765 N->getOperand(0)); 11766 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 11767 MVT::f64); 11768 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias); 11769 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, 11770 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias)); 11771 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or); 11772 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias); 11773 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub)); 11774 return; 11775 } 11776 case ISD::FP_ROUND: { 11777 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0)); 11778 Results.push_back(V); 11779 return; 11780 } 11781 case ISD::READCYCLECOUNTER: { 11782 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11783 SDValue TheChain = N->getOperand(0); 11784 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 11785 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 11786 rd.getValue(1)); 11787 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 11788 eax.getValue(2)); 11789 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 11790 SDValue Ops[] = { eax, edx }; 11791 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 11792 Results.push_back(edx.getValue(1)); 11793 return; 11794 } 11795 case ISD::ATOMIC_CMP_SWAP: { 11796 EVT T = N->getValueType(0); 11797 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 11798 bool Regs64bit = T == MVT::i128; 11799 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 11800 SDValue cpInL, cpInH; 11801 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11802 DAG.getConstant(0, HalfT)); 11803 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 11804 DAG.getConstant(1, HalfT)); 11805 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 11806 Regs64bit ? X86::RAX : X86::EAX, 11807 cpInL, SDValue()); 11808 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 11809 Regs64bit ? X86::RDX : X86::EDX, 11810 cpInH, cpInL.getValue(1)); 11811 SDValue swapInL, swapInH; 11812 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11813 DAG.getConstant(0, HalfT)); 11814 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 11815 DAG.getConstant(1, HalfT)); 11816 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 11817 Regs64bit ? X86::RBX : X86::EBX, 11818 swapInL, cpInH.getValue(1)); 11819 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 11820 Regs64bit ? X86::RCX : X86::ECX, 11821 swapInH, swapInL.getValue(1)); 11822 SDValue Ops[] = { swapInH.getValue(0), 11823 N->getOperand(1), 11824 swapInH.getValue(1) }; 11825 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 11826 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 11827 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 11828 X86ISD::LCMPXCHG8_DAG; 11829 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 11830 Ops, 3, T, MMO); 11831 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 11832 Regs64bit ? X86::RAX : X86::EAX, 11833 HalfT, Result.getValue(1)); 11834 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 11835 Regs64bit ? X86::RDX : X86::EDX, 11836 HalfT, cpOutL.getValue(2)); 11837 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 11838 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 11839 Results.push_back(cpOutH.getValue(1)); 11840 return; 11841 } 11842 case ISD::ATOMIC_LOAD_ADD: 11843 case ISD::ATOMIC_LOAD_AND: 11844 case ISD::ATOMIC_LOAD_NAND: 11845 case ISD::ATOMIC_LOAD_OR: 11846 case ISD::ATOMIC_LOAD_SUB: 11847 case ISD::ATOMIC_LOAD_XOR: 11848 case ISD::ATOMIC_LOAD_MAX: 11849 case ISD::ATOMIC_LOAD_MIN: 11850 case ISD::ATOMIC_LOAD_UMAX: 11851 case ISD::ATOMIC_LOAD_UMIN: 11852 case ISD::ATOMIC_SWAP: { 11853 unsigned Opc; 11854 switch (N->getOpcode()) { 11855 default: llvm_unreachable("Unexpected opcode"); 11856 case ISD::ATOMIC_LOAD_ADD: 11857 Opc = X86ISD::ATOMADD64_DAG; 11858 break; 11859 case ISD::ATOMIC_LOAD_AND: 11860 Opc = X86ISD::ATOMAND64_DAG; 11861 break; 11862 case ISD::ATOMIC_LOAD_NAND: 11863 Opc = X86ISD::ATOMNAND64_DAG; 11864 break; 11865 case ISD::ATOMIC_LOAD_OR: 11866 Opc = X86ISD::ATOMOR64_DAG; 11867 break; 11868 case ISD::ATOMIC_LOAD_SUB: 11869 Opc = X86ISD::ATOMSUB64_DAG; 11870 break; 11871 case ISD::ATOMIC_LOAD_XOR: 11872 Opc = X86ISD::ATOMXOR64_DAG; 11873 break; 11874 case ISD::ATOMIC_LOAD_MAX: 11875 Opc = X86ISD::ATOMMAX64_DAG; 11876 break; 11877 case ISD::ATOMIC_LOAD_MIN: 11878 Opc = X86ISD::ATOMMIN64_DAG; 11879 break; 11880 case ISD::ATOMIC_LOAD_UMAX: 11881 Opc = X86ISD::ATOMUMAX64_DAG; 11882 break; 11883 case ISD::ATOMIC_LOAD_UMIN: 11884 Opc = X86ISD::ATOMUMIN64_DAG; 11885 break; 11886 case ISD::ATOMIC_SWAP: 11887 Opc = X86ISD::ATOMSWAP64_DAG; 11888 break; 11889 } 11890 ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc); 11891 return; 11892 } 11893 case ISD::ATOMIC_LOAD: 11894 ReplaceATOMIC_LOAD(N, Results, DAG); 11895 } 11896} 11897 11898const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 11899 switch (Opcode) { 11900 default: return NULL; 11901 case X86ISD::BSF: return "X86ISD::BSF"; 11902 case X86ISD::BSR: return "X86ISD::BSR"; 11903 case X86ISD::SHLD: return "X86ISD::SHLD"; 11904 case X86ISD::SHRD: return "X86ISD::SHRD"; 11905 case X86ISD::FAND: return "X86ISD::FAND"; 11906 case X86ISD::FOR: return "X86ISD::FOR"; 11907 case X86ISD::FXOR: return "X86ISD::FXOR"; 11908 case X86ISD::FSRL: return "X86ISD::FSRL"; 11909 case X86ISD::FILD: return "X86ISD::FILD"; 11910 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 11911 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 11912 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 11913 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 11914 case X86ISD::FLD: return "X86ISD::FLD"; 11915 case X86ISD::FST: return "X86ISD::FST"; 11916 case X86ISD::CALL: return "X86ISD::CALL"; 11917 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 11918 case X86ISD::BT: return "X86ISD::BT"; 11919 case X86ISD::CMP: return "X86ISD::CMP"; 11920 case X86ISD::COMI: return "X86ISD::COMI"; 11921 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 11922 case X86ISD::SETCC: return "X86ISD::SETCC"; 11923 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 11924 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 11925 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 11926 case X86ISD::CMOV: return "X86ISD::CMOV"; 11927 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 11928 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 11929 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 11930 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 11931 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 11932 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 11933 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 11934 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 11935 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 11936 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 11937 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 11938 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 11939 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 11940 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 11941 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 11942 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 11943 case X86ISD::BLENDPW: return "X86ISD::BLENDPW"; 11944 case X86ISD::BLENDPS: return "X86ISD::BLENDPS"; 11945 case X86ISD::BLENDPD: return "X86ISD::BLENDPD"; 11946 case X86ISD::HADD: return "X86ISD::HADD"; 11947 case X86ISD::HSUB: return "X86ISD::HSUB"; 11948 case X86ISD::FHADD: return "X86ISD::FHADD"; 11949 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 11950 case X86ISD::FMAX: return "X86ISD::FMAX"; 11951 case X86ISD::FMIN: return "X86ISD::FMIN"; 11952 case X86ISD::FMAXC: return "X86ISD::FMAXC"; 11953 case X86ISD::FMINC: return "X86ISD::FMINC"; 11954 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 11955 case X86ISD::FRCP: return "X86ISD::FRCP"; 11956 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 11957 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; 11958 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 11959 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; 11960 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; 11961 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 11962 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 11963 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 11964 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; 11965 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 11966 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 11967 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 11968 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 11969 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 11970 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 11971 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 11972 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 11973 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 11974 case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL"; 11975 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 11976 case X86ISD::VZEXT: return "X86ISD::VZEXT"; 11977 case X86ISD::VSEXT: return "X86ISD::VSEXT"; 11978 case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; 11979 case X86ISD::VFPROUND: return "X86ISD::VFPROUND"; 11980 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; 11981 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; 11982 case X86ISD::VSHL: return "X86ISD::VSHL"; 11983 case X86ISD::VSRL: return "X86ISD::VSRL"; 11984 case X86ISD::VSRA: return "X86ISD::VSRA"; 11985 case X86ISD::VSHLI: return "X86ISD::VSHLI"; 11986 case X86ISD::VSRLI: return "X86ISD::VSRLI"; 11987 case X86ISD::VSRAI: return "X86ISD::VSRAI"; 11988 case X86ISD::CMPP: return "X86ISD::CMPP"; 11989 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; 11990 case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; 11991 case X86ISD::ADD: return "X86ISD::ADD"; 11992 case X86ISD::SUB: return "X86ISD::SUB"; 11993 case X86ISD::ADC: return "X86ISD::ADC"; 11994 case X86ISD::SBB: return "X86ISD::SBB"; 11995 case X86ISD::SMUL: return "X86ISD::SMUL"; 11996 case X86ISD::UMUL: return "X86ISD::UMUL"; 11997 case X86ISD::INC: return "X86ISD::INC"; 11998 case X86ISD::DEC: return "X86ISD::DEC"; 11999 case X86ISD::OR: return "X86ISD::OR"; 12000 case X86ISD::XOR: return "X86ISD::XOR"; 12001 case X86ISD::AND: return "X86ISD::AND"; 12002 case X86ISD::ANDN: return "X86ISD::ANDN"; 12003 case X86ISD::BLSI: return "X86ISD::BLSI"; 12004 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 12005 case X86ISD::BLSR: return "X86ISD::BLSR"; 12006 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 12007 case X86ISD::PTEST: return "X86ISD::PTEST"; 12008 case X86ISD::TESTP: return "X86ISD::TESTP"; 12009 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 12010 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 12011 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 12012 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 12013 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 12014 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 12015 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 12016 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 12017 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 12018 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 12019 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 12020 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 12021 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 12022 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 12023 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 12024 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 12025 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 12026 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 12027 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 12028 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 12029 case X86ISD::VPERMV: return "X86ISD::VPERMV"; 12030 case X86ISD::VPERMI: return "X86ISD::VPERMI"; 12031 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; 12032 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 12033 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 12034 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 12035 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 12036 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 12037 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL"; 12038 case X86ISD::SAHF: return "X86ISD::SAHF"; 12039 case X86ISD::RDRAND: return "X86ISD::RDRAND"; 12040 case X86ISD::FMADD: return "X86ISD::FMADD"; 12041 case X86ISD::FMSUB: return "X86ISD::FMSUB"; 12042 case X86ISD::FNMADD: return "X86ISD::FNMADD"; 12043 case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; 12044 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; 12045 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; 12046 } 12047} 12048 12049// isLegalAddressingMode - Return true if the addressing mode represented 12050// by AM is legal for this target, for a load/store of the specified type. 12051bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 12052 Type *Ty) const { 12053 // X86 supports extremely general addressing modes. 12054 CodeModel::Model M = getTargetMachine().getCodeModel(); 12055 Reloc::Model R = getTargetMachine().getRelocationModel(); 12056 12057 // X86 allows a sign-extended 32-bit immediate field as a displacement. 12058 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 12059 return false; 12060 12061 if (AM.BaseGV) { 12062 unsigned GVFlags = 12063 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 12064 12065 // If a reference to this global requires an extra load, we can't fold it. 12066 if (isGlobalStubReference(GVFlags)) 12067 return false; 12068 12069 // If BaseGV requires a register for the PIC base, we cannot also have a 12070 // BaseReg specified. 12071 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 12072 return false; 12073 12074 // If lower 4G is not available, then we must use rip-relative addressing. 12075 if ((M != CodeModel::Small || R != Reloc::Static) && 12076 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 12077 return false; 12078 } 12079 12080 switch (AM.Scale) { 12081 case 0: 12082 case 1: 12083 case 2: 12084 case 4: 12085 case 8: 12086 // These scales always work. 12087 break; 12088 case 3: 12089 case 5: 12090 case 9: 12091 // These scales are formed with basereg+scalereg. Only accept if there is 12092 // no basereg yet. 12093 if (AM.HasBaseReg) 12094 return false; 12095 break; 12096 default: // Other stuff never works. 12097 return false; 12098 } 12099 12100 return true; 12101} 12102 12103 12104bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 12105 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 12106 return false; 12107 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 12108 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 12109 if (NumBits1 <= NumBits2) 12110 return false; 12111 return true; 12112} 12113 12114bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { 12115 return Imm == (int32_t)Imm; 12116} 12117 12118bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { 12119 // Can also use sub to handle negated immediates. 12120 return Imm == (int32_t)Imm; 12121} 12122 12123bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 12124 if (!VT1.isInteger() || !VT2.isInteger()) 12125 return false; 12126 unsigned NumBits1 = VT1.getSizeInBits(); 12127 unsigned NumBits2 = VT2.getSizeInBits(); 12128 if (NumBits1 <= NumBits2) 12129 return false; 12130 return true; 12131} 12132 12133bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 12134 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12135 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 12136} 12137 12138bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 12139 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 12140 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 12141} 12142 12143bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 12144 // i16 instructions are longer (0x66 prefix) and potentially slower. 12145 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 12146} 12147 12148/// isShuffleMaskLegal - Targets can use this to indicate that they only 12149/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 12150/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 12151/// are assumed to be legal. 12152bool 12153X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 12154 EVT VT) const { 12155 // Very little shuffling can be done for 64-bit vectors right now. 12156 if (VT.getSizeInBits() == 64) 12157 return false; 12158 12159 // FIXME: pshufb, blends, shifts. 12160 return (VT.getVectorNumElements() == 2 || 12161 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 12162 isMOVLMask(M, VT) || 12163 isSHUFPMask(M, VT, Subtarget->hasAVX()) || 12164 isPSHUFDMask(M, VT) || 12165 isPSHUFHWMask(M, VT, Subtarget->hasAVX2()) || 12166 isPSHUFLWMask(M, VT, Subtarget->hasAVX2()) || 12167 isPALIGNRMask(M, VT, Subtarget) || 12168 isUNPCKLMask(M, VT, Subtarget->hasAVX2()) || 12169 isUNPCKHMask(M, VT, Subtarget->hasAVX2()) || 12170 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasAVX2()) || 12171 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasAVX2())); 12172} 12173 12174bool 12175X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 12176 EVT VT) const { 12177 unsigned NumElts = VT.getVectorNumElements(); 12178 // FIXME: This collection of masks seems suspect. 12179 if (NumElts == 2) 12180 return true; 12181 if (NumElts == 4 && VT.is128BitVector()) { 12182 return (isMOVLMask(Mask, VT) || 12183 isCommutedMOVLMask(Mask, VT, true) || 12184 isSHUFPMask(Mask, VT, Subtarget->hasAVX()) || 12185 isSHUFPMask(Mask, VT, Subtarget->hasAVX(), /* Commuted */ true)); 12186 } 12187 return false; 12188} 12189 12190//===----------------------------------------------------------------------===// 12191// X86 Scheduler Hooks 12192//===----------------------------------------------------------------------===// 12193 12194// private utility function 12195 12196// Get CMPXCHG opcode for the specified data type. 12197static unsigned getCmpXChgOpcode(EVT VT) { 12198 switch (VT.getSimpleVT().SimpleTy) { 12199 case MVT::i8: return X86::LCMPXCHG8; 12200 case MVT::i16: return X86::LCMPXCHG16; 12201 case MVT::i32: return X86::LCMPXCHG32; 12202 case MVT::i64: return X86::LCMPXCHG64; 12203 default: 12204 break; 12205 } 12206 llvm_unreachable("Invalid operand size!"); 12207} 12208 12209// Get LOAD opcode for the specified data type. 12210static unsigned getLoadOpcode(EVT VT) { 12211 switch (VT.getSimpleVT().SimpleTy) { 12212 case MVT::i8: return X86::MOV8rm; 12213 case MVT::i16: return X86::MOV16rm; 12214 case MVT::i32: return X86::MOV32rm; 12215 case MVT::i64: return X86::MOV64rm; 12216 default: 12217 break; 12218 } 12219 llvm_unreachable("Invalid operand size!"); 12220} 12221 12222// Get opcode of the non-atomic one from the specified atomic instruction. 12223static unsigned getNonAtomicOpcode(unsigned Opc) { 12224 switch (Opc) { 12225 case X86::ATOMAND8: return X86::AND8rr; 12226 case X86::ATOMAND16: return X86::AND16rr; 12227 case X86::ATOMAND32: return X86::AND32rr; 12228 case X86::ATOMAND64: return X86::AND64rr; 12229 case X86::ATOMOR8: return X86::OR8rr; 12230 case X86::ATOMOR16: return X86::OR16rr; 12231 case X86::ATOMOR32: return X86::OR32rr; 12232 case X86::ATOMOR64: return X86::OR64rr; 12233 case X86::ATOMXOR8: return X86::XOR8rr; 12234 case X86::ATOMXOR16: return X86::XOR16rr; 12235 case X86::ATOMXOR32: return X86::XOR32rr; 12236 case X86::ATOMXOR64: return X86::XOR64rr; 12237 } 12238 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12239} 12240 12241// Get opcode of the non-atomic one from the specified atomic instruction with 12242// extra opcode. 12243static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc, 12244 unsigned &ExtraOpc) { 12245 switch (Opc) { 12246 case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr; 12247 case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr; 12248 case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr; 12249 case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr; 12250 case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr; 12251 case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr; 12252 case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr; 12253 case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr; 12254 case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr; 12255 case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr; 12256 case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr; 12257 case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr; 12258 case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr; 12259 case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr; 12260 case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr; 12261 case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr; 12262 case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr; 12263 case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr; 12264 case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr; 12265 case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr; 12266 } 12267 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12268} 12269 12270// Get opcode of the non-atomic one from the specified atomic instruction for 12271// 64-bit data type on 32-bit target. 12272static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) { 12273 switch (Opc) { 12274 case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr; 12275 case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr; 12276 case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr; 12277 case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr; 12278 case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr; 12279 case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr; 12280 case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr; 12281 case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr; 12282 case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr; 12283 case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr; 12284 } 12285 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12286} 12287 12288// Get opcode of the non-atomic one from the specified atomic instruction for 12289// 64-bit data type on 32-bit target with extra opcode. 12290static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc, 12291 unsigned &HiOpc, 12292 unsigned &ExtraOpc) { 12293 switch (Opc) { 12294 case X86::ATOMNAND6432: 12295 ExtraOpc = X86::NOT32r; 12296 HiOpc = X86::AND32rr; 12297 return X86::AND32rr; 12298 } 12299 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12300} 12301 12302// Get pseudo CMOV opcode from the specified data type. 12303static unsigned getPseudoCMOVOpc(EVT VT) { 12304 switch (VT.getSimpleVT().SimpleTy) { 12305 case MVT::i8: return X86::CMOV_GR8; 12306 case MVT::i16: return X86::CMOV_GR16; 12307 case MVT::i32: return X86::CMOV_GR32; 12308 default: 12309 break; 12310 } 12311 llvm_unreachable("Unknown CMOV opcode!"); 12312} 12313 12314// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions. 12315// They will be translated into a spin-loop or compare-exchange loop from 12316// 12317// ... 12318// dst = atomic-fetch-op MI.addr, MI.val 12319// ... 12320// 12321// to 12322// 12323// ... 12324// EAX = LOAD MI.addr 12325// loop: 12326// t1 = OP MI.val, EAX 12327// LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 12328// JNE loop 12329// sink: 12330// dst = EAX 12331// ... 12332MachineBasicBlock * 12333X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, 12334 MachineBasicBlock *MBB) const { 12335 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12336 DebugLoc DL = MI->getDebugLoc(); 12337 12338 MachineFunction *MF = MBB->getParent(); 12339 MachineRegisterInfo &MRI = MF->getRegInfo(); 12340 12341 const BasicBlock *BB = MBB->getBasicBlock(); 12342 MachineFunction::iterator I = MBB; 12343 ++I; 12344 12345 assert(MI->getNumOperands() <= X86::AddrNumOperands + 2 && 12346 "Unexpected number of operands"); 12347 12348 assert(MI->hasOneMemOperand() && 12349 "Expected atomic-load-op to have one memoperand"); 12350 12351 // Memory Reference 12352 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12353 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12354 12355 unsigned DstReg, SrcReg; 12356 unsigned MemOpndSlot; 12357 12358 unsigned CurOp = 0; 12359 12360 DstReg = MI->getOperand(CurOp++).getReg(); 12361 MemOpndSlot = CurOp; 12362 CurOp += X86::AddrNumOperands; 12363 SrcReg = MI->getOperand(CurOp++).getReg(); 12364 12365 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 12366 MVT::SimpleValueType VT = *RC->vt_begin(); 12367 unsigned AccPhyReg = getX86SubSuperRegister(X86::EAX, VT); 12368 12369 unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT); 12370 unsigned LOADOpc = getLoadOpcode(VT); 12371 12372 // For the atomic load-arith operator, we generate 12373 // 12374 // thisMBB: 12375 // EAX = LOAD [MI.addr] 12376 // mainMBB: 12377 // t1 = OP MI.val, EAX 12378 // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] 12379 // JNE mainMBB 12380 // sinkMBB: 12381 12382 MachineBasicBlock *thisMBB = MBB; 12383 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 12384 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 12385 MF->insert(I, mainMBB); 12386 MF->insert(I, sinkMBB); 12387 12388 MachineInstrBuilder MIB; 12389 12390 // Transfer the remainder of BB and its successor edges to sinkMBB. 12391 sinkMBB->splice(sinkMBB->begin(), MBB, 12392 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12393 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 12394 12395 // thisMBB: 12396 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), AccPhyReg); 12397 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12398 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12399 MIB.setMemRefs(MMOBegin, MMOEnd); 12400 12401 thisMBB->addSuccessor(mainMBB); 12402 12403 // mainMBB: 12404 MachineBasicBlock *origMainMBB = mainMBB; 12405 mainMBB->addLiveIn(AccPhyReg); 12406 12407 // Copy AccPhyReg as it is used more than once. 12408 unsigned AccReg = MRI.createVirtualRegister(RC); 12409 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccReg) 12410 .addReg(AccPhyReg); 12411 12412 unsigned t1 = MRI.createVirtualRegister(RC); 12413 unsigned Opc = MI->getOpcode(); 12414 switch (Opc) { 12415 default: 12416 llvm_unreachable("Unhandled atomic-load-op opcode!"); 12417 case X86::ATOMAND8: 12418 case X86::ATOMAND16: 12419 case X86::ATOMAND32: 12420 case X86::ATOMAND64: 12421 case X86::ATOMOR8: 12422 case X86::ATOMOR16: 12423 case X86::ATOMOR32: 12424 case X86::ATOMOR64: 12425 case X86::ATOMXOR8: 12426 case X86::ATOMXOR16: 12427 case X86::ATOMXOR32: 12428 case X86::ATOMXOR64: { 12429 unsigned ARITHOpc = getNonAtomicOpcode(Opc); 12430 BuildMI(mainMBB, DL, TII->get(ARITHOpc), t1).addReg(SrcReg) 12431 .addReg(AccReg); 12432 break; 12433 } 12434 case X86::ATOMNAND8: 12435 case X86::ATOMNAND16: 12436 case X86::ATOMNAND32: 12437 case X86::ATOMNAND64: { 12438 unsigned t2 = MRI.createVirtualRegister(RC); 12439 unsigned NOTOpc; 12440 unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc); 12441 BuildMI(mainMBB, DL, TII->get(ANDOpc), t2).addReg(SrcReg) 12442 .addReg(AccReg); 12443 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1).addReg(t2); 12444 break; 12445 } 12446 case X86::ATOMMAX8: 12447 case X86::ATOMMAX16: 12448 case X86::ATOMMAX32: 12449 case X86::ATOMMAX64: 12450 case X86::ATOMMIN8: 12451 case X86::ATOMMIN16: 12452 case X86::ATOMMIN32: 12453 case X86::ATOMMIN64: 12454 case X86::ATOMUMAX8: 12455 case X86::ATOMUMAX16: 12456 case X86::ATOMUMAX32: 12457 case X86::ATOMUMAX64: 12458 case X86::ATOMUMIN8: 12459 case X86::ATOMUMIN16: 12460 case X86::ATOMUMIN32: 12461 case X86::ATOMUMIN64: { 12462 unsigned CMPOpc; 12463 unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc); 12464 12465 BuildMI(mainMBB, DL, TII->get(CMPOpc)) 12466 .addReg(SrcReg) 12467 .addReg(AccReg); 12468 12469 if (Subtarget->hasCMov()) { 12470 if (VT != MVT::i8) { 12471 // Native support 12472 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t1) 12473 .addReg(SrcReg) 12474 .addReg(AccReg); 12475 } else { 12476 // Promote i8 to i32 to use CMOV32 12477 const TargetRegisterClass *RC32 = getRegClassFor(MVT::i32); 12478 unsigned SrcReg32 = MRI.createVirtualRegister(RC32); 12479 unsigned AccReg32 = MRI.createVirtualRegister(RC32); 12480 unsigned t2 = MRI.createVirtualRegister(RC32); 12481 12482 unsigned Undef = MRI.createVirtualRegister(RC32); 12483 BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef); 12484 12485 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32) 12486 .addReg(Undef) 12487 .addReg(SrcReg) 12488 .addImm(X86::sub_8bit); 12489 BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32) 12490 .addReg(Undef) 12491 .addReg(AccReg) 12492 .addImm(X86::sub_8bit); 12493 12494 BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2) 12495 .addReg(SrcReg32) 12496 .addReg(AccReg32); 12497 12498 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t1) 12499 .addReg(t2, 0, X86::sub_8bit); 12500 } 12501 } else { 12502 // Use pseudo select and lower them. 12503 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 12504 "Invalid atomic-load-op transformation!"); 12505 unsigned SelOpc = getPseudoCMOVOpc(VT); 12506 X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc); 12507 assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!"); 12508 MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t1) 12509 .addReg(SrcReg).addReg(AccReg) 12510 .addImm(CC); 12511 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12512 } 12513 break; 12514 } 12515 } 12516 12517 // Copy AccPhyReg back from virtual register. 12518 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccPhyReg) 12519 .addReg(AccReg); 12520 12521 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 12522 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12523 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12524 MIB.addReg(t1); 12525 MIB.setMemRefs(MMOBegin, MMOEnd); 12526 12527 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 12528 12529 mainMBB->addSuccessor(origMainMBB); 12530 mainMBB->addSuccessor(sinkMBB); 12531 12532 // sinkMBB: 12533 sinkMBB->addLiveIn(AccPhyReg); 12534 12535 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12536 TII->get(TargetOpcode::COPY), DstReg) 12537 .addReg(AccPhyReg); 12538 12539 MI->eraseFromParent(); 12540 return sinkMBB; 12541} 12542 12543// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic 12544// instructions. They will be translated into a spin-loop or compare-exchange 12545// loop from 12546// 12547// ... 12548// dst = atomic-fetch-op MI.addr, MI.val 12549// ... 12550// 12551// to 12552// 12553// ... 12554// EAX = LOAD [MI.addr + 0] 12555// EDX = LOAD [MI.addr + 4] 12556// loop: 12557// EBX = OP MI.val.lo, EAX 12558// ECX = OP MI.val.hi, EDX 12559// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 12560// JNE loop 12561// sink: 12562// dst = EDX:EAX 12563// ... 12564MachineBasicBlock * 12565X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, 12566 MachineBasicBlock *MBB) const { 12567 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12568 DebugLoc DL = MI->getDebugLoc(); 12569 12570 MachineFunction *MF = MBB->getParent(); 12571 MachineRegisterInfo &MRI = MF->getRegInfo(); 12572 12573 const BasicBlock *BB = MBB->getBasicBlock(); 12574 MachineFunction::iterator I = MBB; 12575 ++I; 12576 12577 assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 && 12578 "Unexpected number of operands"); 12579 12580 assert(MI->hasOneMemOperand() && 12581 "Expected atomic-load-op32 to have one memoperand"); 12582 12583 // Memory Reference 12584 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12585 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12586 12587 unsigned DstLoReg, DstHiReg; 12588 unsigned SrcLoReg, SrcHiReg; 12589 unsigned MemOpndSlot; 12590 12591 unsigned CurOp = 0; 12592 12593 DstLoReg = MI->getOperand(CurOp++).getReg(); 12594 DstHiReg = MI->getOperand(CurOp++).getReg(); 12595 MemOpndSlot = CurOp; 12596 CurOp += X86::AddrNumOperands; 12597 SrcLoReg = MI->getOperand(CurOp++).getReg(); 12598 SrcHiReg = MI->getOperand(CurOp++).getReg(); 12599 12600 const TargetRegisterClass *RC = &X86::GR32RegClass; 12601 const TargetRegisterClass *RC8 = &X86::GR8RegClass; 12602 12603 unsigned LCMPXCHGOpc = X86::LCMPXCHG8B; 12604 unsigned LOADOpc = X86::MOV32rm; 12605 12606 // For the atomic load-arith operator, we generate 12607 // 12608 // thisMBB: 12609 // EAX = LOAD [MI.addr + 0] 12610 // EDX = LOAD [MI.addr + 4] 12611 // mainMBB: 12612 // EBX = OP MI.vallo, EAX 12613 // ECX = OP MI.valhi, EDX 12614 // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] 12615 // JNE mainMBB 12616 // sinkMBB: 12617 12618 MachineBasicBlock *thisMBB = MBB; 12619 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 12620 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 12621 MF->insert(I, mainMBB); 12622 MF->insert(I, sinkMBB); 12623 12624 MachineInstrBuilder MIB; 12625 12626 // Transfer the remainder of BB and its successor edges to sinkMBB. 12627 sinkMBB->splice(sinkMBB->begin(), MBB, 12628 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 12629 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 12630 12631 // thisMBB: 12632 // Lo 12633 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EAX); 12634 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12635 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12636 MIB.setMemRefs(MMOBegin, MMOEnd); 12637 // Hi 12638 MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EDX); 12639 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 12640 if (i == X86::AddrDisp) 12641 MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) 12642 else 12643 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12644 } 12645 MIB.setMemRefs(MMOBegin, MMOEnd); 12646 12647 thisMBB->addSuccessor(mainMBB); 12648 12649 // mainMBB: 12650 MachineBasicBlock *origMainMBB = mainMBB; 12651 mainMBB->addLiveIn(X86::EAX); 12652 mainMBB->addLiveIn(X86::EDX); 12653 12654 // Copy EDX:EAX as they are used more than once. 12655 unsigned LoReg = MRI.createVirtualRegister(RC); 12656 unsigned HiReg = MRI.createVirtualRegister(RC); 12657 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), LoReg).addReg(X86::EAX); 12658 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), HiReg).addReg(X86::EDX); 12659 12660 unsigned t1L = MRI.createVirtualRegister(RC); 12661 unsigned t1H = MRI.createVirtualRegister(RC); 12662 12663 unsigned Opc = MI->getOpcode(); 12664 switch (Opc) { 12665 default: 12666 llvm_unreachable("Unhandled atomic-load-op6432 opcode!"); 12667 case X86::ATOMAND6432: 12668 case X86::ATOMOR6432: 12669 case X86::ATOMXOR6432: 12670 case X86::ATOMADD6432: 12671 case X86::ATOMSUB6432: { 12672 unsigned HiOpc; 12673 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12674 BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg).addReg(LoReg); 12675 BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg).addReg(HiReg); 12676 break; 12677 } 12678 case X86::ATOMNAND6432: { 12679 unsigned HiOpc, NOTOpc; 12680 unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc); 12681 unsigned t2L = MRI.createVirtualRegister(RC); 12682 unsigned t2H = MRI.createVirtualRegister(RC); 12683 BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg).addReg(LoReg); 12684 BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg).addReg(HiReg); 12685 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1L).addReg(t2L); 12686 BuildMI(mainMBB, DL, TII->get(NOTOpc), t1H).addReg(t2H); 12687 break; 12688 } 12689 case X86::ATOMMAX6432: 12690 case X86::ATOMMIN6432: 12691 case X86::ATOMUMAX6432: 12692 case X86::ATOMUMIN6432: { 12693 unsigned HiOpc; 12694 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12695 unsigned cL = MRI.createVirtualRegister(RC8); 12696 unsigned cH = MRI.createVirtualRegister(RC8); 12697 unsigned cL32 = MRI.createVirtualRegister(RC); 12698 unsigned cH32 = MRI.createVirtualRegister(RC); 12699 unsigned cc = MRI.createVirtualRegister(RC); 12700 // cl := cmp src_lo, lo 12701 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 12702 .addReg(SrcLoReg).addReg(LoReg); 12703 BuildMI(mainMBB, DL, TII->get(LoOpc), cL); 12704 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL); 12705 // ch := cmp src_hi, hi 12706 BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) 12707 .addReg(SrcHiReg).addReg(HiReg); 12708 BuildMI(mainMBB, DL, TII->get(HiOpc), cH); 12709 BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH); 12710 // cc := if (src_hi == hi) ? cl : ch; 12711 if (Subtarget->hasCMov()) { 12712 BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc) 12713 .addReg(cH32).addReg(cL32); 12714 } else { 12715 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc) 12716 .addReg(cH32).addReg(cL32) 12717 .addImm(X86::COND_E); 12718 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12719 } 12720 BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc); 12721 if (Subtarget->hasCMov()) { 12722 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1L) 12723 .addReg(SrcLoReg).addReg(LoReg); 12724 BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1H) 12725 .addReg(SrcHiReg).addReg(HiReg); 12726 } else { 12727 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1L) 12728 .addReg(SrcLoReg).addReg(LoReg) 12729 .addImm(X86::COND_NE); 12730 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12731 MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1H) 12732 .addReg(SrcHiReg).addReg(HiReg) 12733 .addImm(X86::COND_NE); 12734 mainMBB = EmitLoweredSelect(MIB, mainMBB); 12735 } 12736 break; 12737 } 12738 case X86::ATOMSWAP6432: { 12739 unsigned HiOpc; 12740 unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); 12741 BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg); 12742 BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg); 12743 break; 12744 } 12745 } 12746 12747 // Copy EDX:EAX back from HiReg:LoReg 12748 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(LoReg); 12749 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(HiReg); 12750 // Copy ECX:EBX from t1H:t1L 12751 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t1L); 12752 BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t1H); 12753 12754 MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); 12755 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 12756 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 12757 MIB.setMemRefs(MMOBegin, MMOEnd); 12758 12759 BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); 12760 12761 mainMBB->addSuccessor(origMainMBB); 12762 mainMBB->addSuccessor(sinkMBB); 12763 12764 // sinkMBB: 12765 sinkMBB->addLiveIn(X86::EAX); 12766 sinkMBB->addLiveIn(X86::EDX); 12767 12768 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12769 TII->get(TargetOpcode::COPY), DstLoReg) 12770 .addReg(X86::EAX); 12771 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12772 TII->get(TargetOpcode::COPY), DstHiReg) 12773 .addReg(X86::EDX); 12774 12775 MI->eraseFromParent(); 12776 return sinkMBB; 12777} 12778 12779// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 12780// or XMM0_V32I8 in AVX all of this code can be replaced with that 12781// in the .td file. 12782MachineBasicBlock * 12783X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 12784 unsigned numArgs, bool memArg) const { 12785 assert(Subtarget->hasSSE42() && 12786 "Target must have SSE4.2 or AVX features enabled"); 12787 12788 DebugLoc dl = MI->getDebugLoc(); 12789 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12790 unsigned Opc; 12791 if (!Subtarget->hasAVX()) { 12792 if (memArg) 12793 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 12794 else 12795 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 12796 } else { 12797 if (memArg) 12798 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 12799 else 12800 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 12801 } 12802 12803 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 12804 for (unsigned i = 0; i < numArgs; ++i) { 12805 MachineOperand &Op = MI->getOperand(i+1); 12806 if (!(Op.isReg() && Op.isImplicit())) 12807 MIB.addOperand(Op); 12808 } 12809 BuildMI(*BB, MI, dl, 12810 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) 12811 .addReg(X86::XMM0); 12812 12813 MI->eraseFromParent(); 12814 return BB; 12815} 12816 12817MachineBasicBlock * 12818X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 12819 DebugLoc dl = MI->getDebugLoc(); 12820 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12821 12822 // Address into RAX/EAX, other two args into ECX, EDX. 12823 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 12824 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 12825 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 12826 for (int i = 0; i < X86::AddrNumOperands; ++i) 12827 MIB.addOperand(MI->getOperand(i)); 12828 12829 unsigned ValOps = X86::AddrNumOperands; 12830 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 12831 .addReg(MI->getOperand(ValOps).getReg()); 12832 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 12833 .addReg(MI->getOperand(ValOps+1).getReg()); 12834 12835 // The instruction doesn't actually take any operands though. 12836 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 12837 12838 MI->eraseFromParent(); // The pseudo is gone now. 12839 return BB; 12840} 12841 12842MachineBasicBlock * 12843X86TargetLowering::EmitVAARG64WithCustomInserter( 12844 MachineInstr *MI, 12845 MachineBasicBlock *MBB) const { 12846 // Emit va_arg instruction on X86-64. 12847 12848 // Operands to this pseudo-instruction: 12849 // 0 ) Output : destination address (reg) 12850 // 1-5) Input : va_list address (addr, i64mem) 12851 // 6 ) ArgSize : Size (in bytes) of vararg type 12852 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 12853 // 8 ) Align : Alignment of type 12854 // 9 ) EFLAGS (implicit-def) 12855 12856 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 12857 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 12858 12859 unsigned DestReg = MI->getOperand(0).getReg(); 12860 MachineOperand &Base = MI->getOperand(1); 12861 MachineOperand &Scale = MI->getOperand(2); 12862 MachineOperand &Index = MI->getOperand(3); 12863 MachineOperand &Disp = MI->getOperand(4); 12864 MachineOperand &Segment = MI->getOperand(5); 12865 unsigned ArgSize = MI->getOperand(6).getImm(); 12866 unsigned ArgMode = MI->getOperand(7).getImm(); 12867 unsigned Align = MI->getOperand(8).getImm(); 12868 12869 // Memory Reference 12870 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 12871 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 12872 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 12873 12874 // Machine Information 12875 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12876 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 12877 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 12878 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 12879 DebugLoc DL = MI->getDebugLoc(); 12880 12881 // struct va_list { 12882 // i32 gp_offset 12883 // i32 fp_offset 12884 // i64 overflow_area (address) 12885 // i64 reg_save_area (address) 12886 // } 12887 // sizeof(va_list) = 24 12888 // alignment(va_list) = 8 12889 12890 unsigned TotalNumIntRegs = 6; 12891 unsigned TotalNumXMMRegs = 8; 12892 bool UseGPOffset = (ArgMode == 1); 12893 bool UseFPOffset = (ArgMode == 2); 12894 unsigned MaxOffset = TotalNumIntRegs * 8 + 12895 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 12896 12897 /* Align ArgSize to a multiple of 8 */ 12898 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 12899 bool NeedsAlign = (Align > 8); 12900 12901 MachineBasicBlock *thisMBB = MBB; 12902 MachineBasicBlock *overflowMBB; 12903 MachineBasicBlock *offsetMBB; 12904 MachineBasicBlock *endMBB; 12905 12906 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 12907 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 12908 unsigned OffsetReg = 0; 12909 12910 if (!UseGPOffset && !UseFPOffset) { 12911 // If we only pull from the overflow region, we don't create a branch. 12912 // We don't need to alter control flow. 12913 OffsetDestReg = 0; // unused 12914 OverflowDestReg = DestReg; 12915 12916 offsetMBB = NULL; 12917 overflowMBB = thisMBB; 12918 endMBB = thisMBB; 12919 } else { 12920 // First emit code to check if gp_offset (or fp_offset) is below the bound. 12921 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 12922 // If not, pull from overflow_area. (branch to overflowMBB) 12923 // 12924 // thisMBB 12925 // | . 12926 // | . 12927 // offsetMBB overflowMBB 12928 // | . 12929 // | . 12930 // endMBB 12931 12932 // Registers for the PHI in endMBB 12933 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 12934 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 12935 12936 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 12937 MachineFunction *MF = MBB->getParent(); 12938 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12939 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12940 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12941 12942 MachineFunction::iterator MBBIter = MBB; 12943 ++MBBIter; 12944 12945 // Insert the new basic blocks 12946 MF->insert(MBBIter, offsetMBB); 12947 MF->insert(MBBIter, overflowMBB); 12948 MF->insert(MBBIter, endMBB); 12949 12950 // Transfer the remainder of MBB and its successor edges to endMBB. 12951 endMBB->splice(endMBB->begin(), thisMBB, 12952 llvm::next(MachineBasicBlock::iterator(MI)), 12953 thisMBB->end()); 12954 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 12955 12956 // Make offsetMBB and overflowMBB successors of thisMBB 12957 thisMBB->addSuccessor(offsetMBB); 12958 thisMBB->addSuccessor(overflowMBB); 12959 12960 // endMBB is a successor of both offsetMBB and overflowMBB 12961 offsetMBB->addSuccessor(endMBB); 12962 overflowMBB->addSuccessor(endMBB); 12963 12964 // Load the offset value into a register 12965 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 12966 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 12967 .addOperand(Base) 12968 .addOperand(Scale) 12969 .addOperand(Index) 12970 .addDisp(Disp, UseFPOffset ? 4 : 0) 12971 .addOperand(Segment) 12972 .setMemRefs(MMOBegin, MMOEnd); 12973 12974 // Check if there is enough room left to pull this argument. 12975 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 12976 .addReg(OffsetReg) 12977 .addImm(MaxOffset + 8 - ArgSizeA8); 12978 12979 // Branch to "overflowMBB" if offset >= max 12980 // Fall through to "offsetMBB" otherwise 12981 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 12982 .addMBB(overflowMBB); 12983 } 12984 12985 // In offsetMBB, emit code to use the reg_save_area. 12986 if (offsetMBB) { 12987 assert(OffsetReg != 0); 12988 12989 // Read the reg_save_area address. 12990 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 12991 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 12992 .addOperand(Base) 12993 .addOperand(Scale) 12994 .addOperand(Index) 12995 .addDisp(Disp, 16) 12996 .addOperand(Segment) 12997 .setMemRefs(MMOBegin, MMOEnd); 12998 12999 // Zero-extend the offset 13000 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 13001 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 13002 .addImm(0) 13003 .addReg(OffsetReg) 13004 .addImm(X86::sub_32bit); 13005 13006 // Add the offset to the reg_save_area to get the final address. 13007 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 13008 .addReg(OffsetReg64) 13009 .addReg(RegSaveReg); 13010 13011 // Compute the offset for the next argument 13012 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 13013 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 13014 .addReg(OffsetReg) 13015 .addImm(UseFPOffset ? 16 : 8); 13016 13017 // Store it back into the va_list. 13018 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 13019 .addOperand(Base) 13020 .addOperand(Scale) 13021 .addOperand(Index) 13022 .addDisp(Disp, UseFPOffset ? 4 : 0) 13023 .addOperand(Segment) 13024 .addReg(NextOffsetReg) 13025 .setMemRefs(MMOBegin, MMOEnd); 13026 13027 // Jump to endMBB 13028 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 13029 .addMBB(endMBB); 13030 } 13031 13032 // 13033 // Emit code to use overflow area 13034 // 13035 13036 // Load the overflow_area address into a register. 13037 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 13038 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 13039 .addOperand(Base) 13040 .addOperand(Scale) 13041 .addOperand(Index) 13042 .addDisp(Disp, 8) 13043 .addOperand(Segment) 13044 .setMemRefs(MMOBegin, MMOEnd); 13045 13046 // If we need to align it, do so. Otherwise, just copy the address 13047 // to OverflowDestReg. 13048 if (NeedsAlign) { 13049 // Align the overflow address 13050 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 13051 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 13052 13053 // aligned_addr = (addr + (align-1)) & ~(align-1) 13054 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 13055 .addReg(OverflowAddrReg) 13056 .addImm(Align-1); 13057 13058 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 13059 .addReg(TmpReg) 13060 .addImm(~(uint64_t)(Align-1)); 13061 } else { 13062 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 13063 .addReg(OverflowAddrReg); 13064 } 13065 13066 // Compute the next overflow address after this argument. 13067 // (the overflow address should be kept 8-byte aligned) 13068 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 13069 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 13070 .addReg(OverflowDestReg) 13071 .addImm(ArgSizeA8); 13072 13073 // Store the new overflow address. 13074 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 13075 .addOperand(Base) 13076 .addOperand(Scale) 13077 .addOperand(Index) 13078 .addDisp(Disp, 8) 13079 .addOperand(Segment) 13080 .addReg(NextAddrReg) 13081 .setMemRefs(MMOBegin, MMOEnd); 13082 13083 // If we branched, emit the PHI to the front of endMBB. 13084 if (offsetMBB) { 13085 BuildMI(*endMBB, endMBB->begin(), DL, 13086 TII->get(X86::PHI), DestReg) 13087 .addReg(OffsetDestReg).addMBB(offsetMBB) 13088 .addReg(OverflowDestReg).addMBB(overflowMBB); 13089 } 13090 13091 // Erase the pseudo instruction 13092 MI->eraseFromParent(); 13093 13094 return endMBB; 13095} 13096 13097MachineBasicBlock * 13098X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 13099 MachineInstr *MI, 13100 MachineBasicBlock *MBB) const { 13101 // Emit code to save XMM registers to the stack. The ABI says that the 13102 // number of registers to save is given in %al, so it's theoretically 13103 // possible to do an indirect jump trick to avoid saving all of them, 13104 // however this code takes a simpler approach and just executes all 13105 // of the stores if %al is non-zero. It's less code, and it's probably 13106 // easier on the hardware branch predictor, and stores aren't all that 13107 // expensive anyway. 13108 13109 // Create the new basic blocks. One block contains all the XMM stores, 13110 // and one block is the final destination regardless of whether any 13111 // stores were performed. 13112 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 13113 MachineFunction *F = MBB->getParent(); 13114 MachineFunction::iterator MBBIter = MBB; 13115 ++MBBIter; 13116 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 13117 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 13118 F->insert(MBBIter, XMMSaveMBB); 13119 F->insert(MBBIter, EndMBB); 13120 13121 // Transfer the remainder of MBB and its successor edges to EndMBB. 13122 EndMBB->splice(EndMBB->begin(), MBB, 13123 llvm::next(MachineBasicBlock::iterator(MI)), 13124 MBB->end()); 13125 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 13126 13127 // The original block will now fall through to the XMM save block. 13128 MBB->addSuccessor(XMMSaveMBB); 13129 // The XMMSaveMBB will fall through to the end block. 13130 XMMSaveMBB->addSuccessor(EndMBB); 13131 13132 // Now add the instructions. 13133 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13134 DebugLoc DL = MI->getDebugLoc(); 13135 13136 unsigned CountReg = MI->getOperand(0).getReg(); 13137 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 13138 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 13139 13140 if (!Subtarget->isTargetWin64()) { 13141 // If %al is 0, branch around the XMM save block. 13142 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 13143 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 13144 MBB->addSuccessor(EndMBB); 13145 } 13146 13147 unsigned MOVOpc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; 13148 // In the XMM save block, save all the XMM argument registers. 13149 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 13150 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 13151 MachineMemOperand *MMO = 13152 F->getMachineMemOperand( 13153 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 13154 MachineMemOperand::MOStore, 13155 /*Size=*/16, /*Align=*/16); 13156 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 13157 .addFrameIndex(RegSaveFrameIndex) 13158 .addImm(/*Scale=*/1) 13159 .addReg(/*IndexReg=*/0) 13160 .addImm(/*Disp=*/Offset) 13161 .addReg(/*Segment=*/0) 13162 .addReg(MI->getOperand(i).getReg()) 13163 .addMemOperand(MMO); 13164 } 13165 13166 MI->eraseFromParent(); // The pseudo instruction is gone now. 13167 13168 return EndMBB; 13169} 13170 13171// The EFLAGS operand of SelectItr might be missing a kill marker 13172// because there were multiple uses of EFLAGS, and ISel didn't know 13173// which to mark. Figure out whether SelectItr should have had a 13174// kill marker, and set it if it should. Returns the correct kill 13175// marker value. 13176static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, 13177 MachineBasicBlock* BB, 13178 const TargetRegisterInfo* TRI) { 13179 // Scan forward through BB for a use/def of EFLAGS. 13180 MachineBasicBlock::iterator miI(llvm::next(SelectItr)); 13181 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 13182 const MachineInstr& mi = *miI; 13183 if (mi.readsRegister(X86::EFLAGS)) 13184 return false; 13185 if (mi.definesRegister(X86::EFLAGS)) 13186 break; // Should have kill-flag - update below. 13187 } 13188 13189 // If we hit the end of the block, check whether EFLAGS is live into a 13190 // successor. 13191 if (miI == BB->end()) { 13192 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 13193 sEnd = BB->succ_end(); 13194 sItr != sEnd; ++sItr) { 13195 MachineBasicBlock* succ = *sItr; 13196 if (succ->isLiveIn(X86::EFLAGS)) 13197 return false; 13198 } 13199 } 13200 13201 // We found a def, or hit the end of the basic block and EFLAGS wasn't live 13202 // out. SelectMI should have a kill flag on EFLAGS. 13203 SelectItr->addRegisterKilled(X86::EFLAGS, TRI); 13204 return true; 13205} 13206 13207MachineBasicBlock * 13208X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 13209 MachineBasicBlock *BB) const { 13210 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13211 DebugLoc DL = MI->getDebugLoc(); 13212 13213 // To "insert" a SELECT_CC instruction, we actually have to insert the 13214 // diamond control-flow pattern. The incoming instruction knows the 13215 // destination vreg to set, the condition code register to branch on, the 13216 // true/false values to select between, and a branch opcode to use. 13217 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 13218 MachineFunction::iterator It = BB; 13219 ++It; 13220 13221 // thisMBB: 13222 // ... 13223 // TrueVal = ... 13224 // cmpTY ccX, r1, r2 13225 // bCC copy1MBB 13226 // fallthrough --> copy0MBB 13227 MachineBasicBlock *thisMBB = BB; 13228 MachineFunction *F = BB->getParent(); 13229 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 13230 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 13231 F->insert(It, copy0MBB); 13232 F->insert(It, sinkMBB); 13233 13234 // If the EFLAGS register isn't dead in the terminator, then claim that it's 13235 // live into the sink and copy blocks. 13236 const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); 13237 if (!MI->killsRegister(X86::EFLAGS) && 13238 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) { 13239 copy0MBB->addLiveIn(X86::EFLAGS); 13240 sinkMBB->addLiveIn(X86::EFLAGS); 13241 } 13242 13243 // Transfer the remainder of BB and its successor edges to sinkMBB. 13244 sinkMBB->splice(sinkMBB->begin(), BB, 13245 llvm::next(MachineBasicBlock::iterator(MI)), 13246 BB->end()); 13247 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 13248 13249 // Add the true and fallthrough blocks as its successors. 13250 BB->addSuccessor(copy0MBB); 13251 BB->addSuccessor(sinkMBB); 13252 13253 // Create the conditional branch instruction. 13254 unsigned Opc = 13255 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 13256 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 13257 13258 // copy0MBB: 13259 // %FalseValue = ... 13260 // # fallthrough to sinkMBB 13261 copy0MBB->addSuccessor(sinkMBB); 13262 13263 // sinkMBB: 13264 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 13265 // ... 13266 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13267 TII->get(X86::PHI), MI->getOperand(0).getReg()) 13268 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 13269 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 13270 13271 MI->eraseFromParent(); // The pseudo instruction is gone now. 13272 return sinkMBB; 13273} 13274 13275MachineBasicBlock * 13276X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 13277 bool Is64Bit) const { 13278 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13279 DebugLoc DL = MI->getDebugLoc(); 13280 MachineFunction *MF = BB->getParent(); 13281 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 13282 13283 assert(getTargetMachine().Options.EnableSegmentedStacks); 13284 13285 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 13286 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 13287 13288 // BB: 13289 // ... [Till the alloca] 13290 // If stacklet is not large enough, jump to mallocMBB 13291 // 13292 // bumpMBB: 13293 // Allocate by subtracting from RSP 13294 // Jump to continueMBB 13295 // 13296 // mallocMBB: 13297 // Allocate by call to runtime 13298 // 13299 // continueMBB: 13300 // ... 13301 // [rest of original BB] 13302 // 13303 13304 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13305 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13306 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 13307 13308 MachineRegisterInfo &MRI = MF->getRegInfo(); 13309 const TargetRegisterClass *AddrRegClass = 13310 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 13311 13312 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 13313 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 13314 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 13315 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 13316 sizeVReg = MI->getOperand(1).getReg(), 13317 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 13318 13319 MachineFunction::iterator MBBIter = BB; 13320 ++MBBIter; 13321 13322 MF->insert(MBBIter, bumpMBB); 13323 MF->insert(MBBIter, mallocMBB); 13324 MF->insert(MBBIter, continueMBB); 13325 13326 continueMBB->splice(continueMBB->begin(), BB, llvm::next 13327 (MachineBasicBlock::iterator(MI)), BB->end()); 13328 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 13329 13330 // Add code to the main basic block to check if the stack limit has been hit, 13331 // and if so, jump to mallocMBB otherwise to bumpMBB. 13332 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 13333 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 13334 .addReg(tmpSPVReg).addReg(sizeVReg); 13335 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 13336 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) 13337 .addReg(SPLimitVReg); 13338 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 13339 13340 // bumpMBB simply decreases the stack pointer, since we know the current 13341 // stacklet has enough space. 13342 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 13343 .addReg(SPLimitVReg); 13344 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 13345 .addReg(SPLimitVReg); 13346 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 13347 13348 // Calls into a routine in libgcc to allocate more space from the heap. 13349 const uint32_t *RegMask = 13350 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 13351 if (Is64Bit) { 13352 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 13353 .addReg(sizeVReg); 13354 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 13355 .addExternalSymbol("__morestack_allocate_stack_space") 13356 .addRegMask(RegMask) 13357 .addReg(X86::RDI, RegState::Implicit) 13358 .addReg(X86::RAX, RegState::ImplicitDefine); 13359 } else { 13360 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 13361 .addImm(12); 13362 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 13363 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 13364 .addExternalSymbol("__morestack_allocate_stack_space") 13365 .addRegMask(RegMask) 13366 .addReg(X86::EAX, RegState::ImplicitDefine); 13367 } 13368 13369 if (!Is64Bit) 13370 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 13371 .addImm(16); 13372 13373 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 13374 .addReg(Is64Bit ? X86::RAX : X86::EAX); 13375 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 13376 13377 // Set up the CFG correctly. 13378 BB->addSuccessor(bumpMBB); 13379 BB->addSuccessor(mallocMBB); 13380 mallocMBB->addSuccessor(continueMBB); 13381 bumpMBB->addSuccessor(continueMBB); 13382 13383 // Take care of the PHI nodes. 13384 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 13385 MI->getOperand(0).getReg()) 13386 .addReg(mallocPtrVReg).addMBB(mallocMBB) 13387 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 13388 13389 // Delete the original pseudo instruction. 13390 MI->eraseFromParent(); 13391 13392 // And we're done. 13393 return continueMBB; 13394} 13395 13396MachineBasicBlock * 13397X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 13398 MachineBasicBlock *BB) const { 13399 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13400 DebugLoc DL = MI->getDebugLoc(); 13401 13402 assert(!Subtarget->isTargetEnvMacho()); 13403 13404 // The lowering is pretty easy: we're just emitting the call to _alloca. The 13405 // non-trivial part is impdef of ESP. 13406 13407 if (Subtarget->isTargetWin64()) { 13408 if (Subtarget->isTargetCygMing()) { 13409 // ___chkstk(Mingw64): 13410 // Clobbers R10, R11, RAX and EFLAGS. 13411 // Updates RSP. 13412 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 13413 .addExternalSymbol("___chkstk") 13414 .addReg(X86::RAX, RegState::Implicit) 13415 .addReg(X86::RSP, RegState::Implicit) 13416 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 13417 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 13418 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13419 } else { 13420 // __chkstk(MSVCRT): does not update stack pointer. 13421 // Clobbers R10, R11 and EFLAGS. 13422 // FIXME: RAX(allocated size) might be reused and not killed. 13423 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 13424 .addExternalSymbol("__chkstk") 13425 .addReg(X86::RAX, RegState::Implicit) 13426 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13427 // RAX has the offset to subtracted from RSP. 13428 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 13429 .addReg(X86::RSP) 13430 .addReg(X86::RAX); 13431 } 13432 } else { 13433 const char *StackProbeSymbol = 13434 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 13435 13436 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 13437 .addExternalSymbol(StackProbeSymbol) 13438 .addReg(X86::EAX, RegState::Implicit) 13439 .addReg(X86::ESP, RegState::Implicit) 13440 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 13441 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 13442 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 13443 } 13444 13445 MI->eraseFromParent(); // The pseudo instruction is gone now. 13446 return BB; 13447} 13448 13449MachineBasicBlock * 13450X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 13451 MachineBasicBlock *BB) const { 13452 // This is pretty easy. We're taking the value that we received from 13453 // our load from the relocation, sticking it in either RDI (x86-64) 13454 // or EAX and doing an indirect call. The return value will then 13455 // be in the normal return register. 13456 const X86InstrInfo *TII 13457 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 13458 DebugLoc DL = MI->getDebugLoc(); 13459 MachineFunction *F = BB->getParent(); 13460 13461 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 13462 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 13463 13464 // Get a register mask for the lowered call. 13465 // FIXME: The 32-bit calls have non-standard calling conventions. Use a 13466 // proper register mask. 13467 const uint32_t *RegMask = 13468 getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); 13469 if (Subtarget->is64Bit()) { 13470 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13471 TII->get(X86::MOV64rm), X86::RDI) 13472 .addReg(X86::RIP) 13473 .addImm(0).addReg(0) 13474 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13475 MI->getOperand(3).getTargetFlags()) 13476 .addReg(0); 13477 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 13478 addDirectMem(MIB, X86::RDI); 13479 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); 13480 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 13481 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13482 TII->get(X86::MOV32rm), X86::EAX) 13483 .addReg(0) 13484 .addImm(0).addReg(0) 13485 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13486 MI->getOperand(3).getTargetFlags()) 13487 .addReg(0); 13488 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 13489 addDirectMem(MIB, X86::EAX); 13490 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 13491 } else { 13492 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 13493 TII->get(X86::MOV32rm), X86::EAX) 13494 .addReg(TII->getGlobalBaseReg(F)) 13495 .addImm(0).addReg(0) 13496 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 13497 MI->getOperand(3).getTargetFlags()) 13498 .addReg(0); 13499 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 13500 addDirectMem(MIB, X86::EAX); 13501 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); 13502 } 13503 13504 MI->eraseFromParent(); // The pseudo instruction is gone now. 13505 return BB; 13506} 13507 13508MachineBasicBlock * 13509X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 13510 MachineBasicBlock *MBB) const { 13511 DebugLoc DL = MI->getDebugLoc(); 13512 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13513 13514 MachineFunction *MF = MBB->getParent(); 13515 MachineRegisterInfo &MRI = MF->getRegInfo(); 13516 13517 const BasicBlock *BB = MBB->getBasicBlock(); 13518 MachineFunction::iterator I = MBB; 13519 ++I; 13520 13521 // Memory Reference 13522 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13523 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13524 13525 unsigned DstReg; 13526 unsigned MemOpndSlot = 0; 13527 13528 unsigned CurOp = 0; 13529 13530 DstReg = MI->getOperand(CurOp++).getReg(); 13531 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 13532 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 13533 unsigned mainDstReg = MRI.createVirtualRegister(RC); 13534 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 13535 13536 MemOpndSlot = CurOp; 13537 13538 MVT PVT = getPointerTy(); 13539 assert((PVT == MVT::i64 || PVT == MVT::i32) && 13540 "Invalid Pointer Size!"); 13541 13542 // For v = setjmp(buf), we generate 13543 // 13544 // thisMBB: 13545 // buf[LabelOffset] = restoreMBB 13546 // SjLjSetup restoreMBB 13547 // 13548 // mainMBB: 13549 // v_main = 0 13550 // 13551 // sinkMBB: 13552 // v = phi(main, restore) 13553 // 13554 // restoreMBB: 13555 // v_restore = 1 13556 13557 MachineBasicBlock *thisMBB = MBB; 13558 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 13559 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 13560 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); 13561 MF->insert(I, mainMBB); 13562 MF->insert(I, sinkMBB); 13563 MF->push_back(restoreMBB); 13564 13565 MachineInstrBuilder MIB; 13566 13567 // Transfer the remainder of BB and its successor edges to sinkMBB. 13568 sinkMBB->splice(sinkMBB->begin(), MBB, 13569 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 13570 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 13571 13572 // thisMBB: 13573 unsigned PtrStoreOpc = 0; 13574 unsigned LabelReg = 0; 13575 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 13576 Reloc::Model RM = getTargetMachine().getRelocationModel(); 13577 bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) && 13578 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC); 13579 13580 // Prepare IP either in reg or imm. 13581 if (!UseImmLabel) { 13582 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; 13583 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 13584 LabelReg = MRI.createVirtualRegister(PtrRC); 13585 if (Subtarget->is64Bit()) { 13586 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg) 13587 .addReg(X86::RIP) 13588 .addImm(0) 13589 .addReg(0) 13590 .addMBB(restoreMBB) 13591 .addReg(0); 13592 } else { 13593 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII); 13594 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg) 13595 .addReg(XII->getGlobalBaseReg(MF)) 13596 .addImm(0) 13597 .addReg(0) 13598 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference()) 13599 .addReg(0); 13600 } 13601 } else 13602 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; 13603 // Store IP 13604 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc)); 13605 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13606 if (i == X86::AddrDisp) 13607 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset); 13608 else 13609 MIB.addOperand(MI->getOperand(MemOpndSlot + i)); 13610 } 13611 if (!UseImmLabel) 13612 MIB.addReg(LabelReg); 13613 else 13614 MIB.addMBB(restoreMBB); 13615 MIB.setMemRefs(MMOBegin, MMOEnd); 13616 // Setup 13617 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) 13618 .addMBB(restoreMBB); 13619 MIB.addRegMask(RegInfo->getNoPreservedMask()); 13620 thisMBB->addSuccessor(mainMBB); 13621 thisMBB->addSuccessor(restoreMBB); 13622 13623 // mainMBB: 13624 // EAX = 0 13625 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg); 13626 mainMBB->addSuccessor(sinkMBB); 13627 13628 // sinkMBB: 13629 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 13630 TII->get(X86::PHI), DstReg) 13631 .addReg(mainDstReg).addMBB(mainMBB) 13632 .addReg(restoreDstReg).addMBB(restoreMBB); 13633 13634 // restoreMBB: 13635 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); 13636 BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB); 13637 restoreMBB->addSuccessor(sinkMBB); 13638 13639 MI->eraseFromParent(); 13640 return sinkMBB; 13641} 13642 13643MachineBasicBlock * 13644X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 13645 MachineBasicBlock *MBB) const { 13646 DebugLoc DL = MI->getDebugLoc(); 13647 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13648 13649 MachineFunction *MF = MBB->getParent(); 13650 MachineRegisterInfo &MRI = MF->getRegInfo(); 13651 13652 // Memory Reference 13653 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 13654 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 13655 13656 MVT PVT = getPointerTy(); 13657 assert((PVT == MVT::i64 || PVT == MVT::i32) && 13658 "Invalid Pointer Size!"); 13659 13660 const TargetRegisterClass *RC = 13661 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; 13662 unsigned Tmp = MRI.createVirtualRegister(RC); 13663 // Since FP is only updated here but NOT referenced, it's treated as GPR. 13664 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; 13665 unsigned SP = RegInfo->getStackRegister(); 13666 13667 MachineInstrBuilder MIB; 13668 13669 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 13670 const int64_t SPOffset = 2 * PVT.getStoreSize(); 13671 13672 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; 13673 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r; 13674 13675 // Reload FP 13676 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); 13677 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) 13678 MIB.addOperand(MI->getOperand(i)); 13679 MIB.setMemRefs(MMOBegin, MMOEnd); 13680 // Reload IP 13681 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); 13682 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13683 if (i == X86::AddrDisp) 13684 MIB.addDisp(MI->getOperand(i), LabelOffset); 13685 else 13686 MIB.addOperand(MI->getOperand(i)); 13687 } 13688 MIB.setMemRefs(MMOBegin, MMOEnd); 13689 // Reload SP 13690 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP); 13691 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { 13692 if (i == X86::AddrDisp) 13693 MIB.addDisp(MI->getOperand(i), SPOffset); 13694 else 13695 MIB.addOperand(MI->getOperand(i)); 13696 } 13697 MIB.setMemRefs(MMOBegin, MMOEnd); 13698 // Jump 13699 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); 13700 13701 MI->eraseFromParent(); 13702 return MBB; 13703} 13704 13705MachineBasicBlock * 13706X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 13707 MachineBasicBlock *BB) const { 13708 switch (MI->getOpcode()) { 13709 default: llvm_unreachable("Unexpected instr type to insert"); 13710 case X86::TAILJMPd64: 13711 case X86::TAILJMPr64: 13712 case X86::TAILJMPm64: 13713 llvm_unreachable("TAILJMP64 would not be touched here."); 13714 case X86::TCRETURNdi64: 13715 case X86::TCRETURNri64: 13716 case X86::TCRETURNmi64: 13717 return BB; 13718 case X86::WIN_ALLOCA: 13719 return EmitLoweredWinAlloca(MI, BB); 13720 case X86::SEG_ALLOCA_32: 13721 return EmitLoweredSegAlloca(MI, BB, false); 13722 case X86::SEG_ALLOCA_64: 13723 return EmitLoweredSegAlloca(MI, BB, true); 13724 case X86::TLSCall_32: 13725 case X86::TLSCall_64: 13726 return EmitLoweredTLSCall(MI, BB); 13727 case X86::CMOV_GR8: 13728 case X86::CMOV_FR32: 13729 case X86::CMOV_FR64: 13730 case X86::CMOV_V4F32: 13731 case X86::CMOV_V2F64: 13732 case X86::CMOV_V2I64: 13733 case X86::CMOV_V8F32: 13734 case X86::CMOV_V4F64: 13735 case X86::CMOV_V4I64: 13736 case X86::CMOV_GR16: 13737 case X86::CMOV_GR32: 13738 case X86::CMOV_RFP32: 13739 case X86::CMOV_RFP64: 13740 case X86::CMOV_RFP80: 13741 return EmitLoweredSelect(MI, BB); 13742 13743 case X86::FP32_TO_INT16_IN_MEM: 13744 case X86::FP32_TO_INT32_IN_MEM: 13745 case X86::FP32_TO_INT64_IN_MEM: 13746 case X86::FP64_TO_INT16_IN_MEM: 13747 case X86::FP64_TO_INT32_IN_MEM: 13748 case X86::FP64_TO_INT64_IN_MEM: 13749 case X86::FP80_TO_INT16_IN_MEM: 13750 case X86::FP80_TO_INT32_IN_MEM: 13751 case X86::FP80_TO_INT64_IN_MEM: { 13752 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 13753 DebugLoc DL = MI->getDebugLoc(); 13754 13755 // Change the floating point control register to use "round towards zero" 13756 // mode when truncating to an integer value. 13757 MachineFunction *F = BB->getParent(); 13758 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 13759 addFrameReference(BuildMI(*BB, MI, DL, 13760 TII->get(X86::FNSTCW16m)), CWFrameIdx); 13761 13762 // Load the old value of the high byte of the control word... 13763 unsigned OldCW = 13764 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass); 13765 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 13766 CWFrameIdx); 13767 13768 // Set the high part to be round to zero... 13769 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 13770 .addImm(0xC7F); 13771 13772 // Reload the modified control word now... 13773 addFrameReference(BuildMI(*BB, MI, DL, 13774 TII->get(X86::FLDCW16m)), CWFrameIdx); 13775 13776 // Restore the memory image of control word to original value 13777 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 13778 .addReg(OldCW); 13779 13780 // Get the X86 opcode to use. 13781 unsigned Opc; 13782 switch (MI->getOpcode()) { 13783 default: llvm_unreachable("illegal opcode!"); 13784 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 13785 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 13786 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 13787 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 13788 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 13789 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 13790 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 13791 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 13792 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 13793 } 13794 13795 X86AddressMode AM; 13796 MachineOperand &Op = MI->getOperand(0); 13797 if (Op.isReg()) { 13798 AM.BaseType = X86AddressMode::RegBase; 13799 AM.Base.Reg = Op.getReg(); 13800 } else { 13801 AM.BaseType = X86AddressMode::FrameIndexBase; 13802 AM.Base.FrameIndex = Op.getIndex(); 13803 } 13804 Op = MI->getOperand(1); 13805 if (Op.isImm()) 13806 AM.Scale = Op.getImm(); 13807 Op = MI->getOperand(2); 13808 if (Op.isImm()) 13809 AM.IndexReg = Op.getImm(); 13810 Op = MI->getOperand(3); 13811 if (Op.isGlobal()) { 13812 AM.GV = Op.getGlobal(); 13813 } else { 13814 AM.Disp = Op.getImm(); 13815 } 13816 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 13817 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 13818 13819 // Reload the original control word now. 13820 addFrameReference(BuildMI(*BB, MI, DL, 13821 TII->get(X86::FLDCW16m)), CWFrameIdx); 13822 13823 MI->eraseFromParent(); // The pseudo instruction is gone now. 13824 return BB; 13825 } 13826 // String/text processing lowering. 13827 case X86::PCMPISTRM128REG: 13828 case X86::VPCMPISTRM128REG: 13829 case X86::PCMPISTRM128MEM: 13830 case X86::VPCMPISTRM128MEM: 13831 case X86::PCMPESTRM128REG: 13832 case X86::VPCMPESTRM128REG: 13833 case X86::PCMPESTRM128MEM: 13834 case X86::VPCMPESTRM128MEM: { 13835 unsigned NumArgs; 13836 bool MemArg; 13837 switch (MI->getOpcode()) { 13838 default: llvm_unreachable("illegal opcode!"); 13839 case X86::PCMPISTRM128REG: 13840 case X86::VPCMPISTRM128REG: 13841 NumArgs = 3; MemArg = false; break; 13842 case X86::PCMPISTRM128MEM: 13843 case X86::VPCMPISTRM128MEM: 13844 NumArgs = 3; MemArg = true; break; 13845 case X86::PCMPESTRM128REG: 13846 case X86::VPCMPESTRM128REG: 13847 NumArgs = 5; MemArg = false; break; 13848 case X86::PCMPESTRM128MEM: 13849 case X86::VPCMPESTRM128MEM: 13850 NumArgs = 5; MemArg = true; break; 13851 } 13852 return EmitPCMP(MI, BB, NumArgs, MemArg); 13853 } 13854 13855 // Thread synchronization. 13856 case X86::MONITOR: 13857 return EmitMonitor(MI, BB); 13858 13859 // Atomic Lowering. 13860 case X86::ATOMAND8: 13861 case X86::ATOMAND16: 13862 case X86::ATOMAND32: 13863 case X86::ATOMAND64: 13864 // Fall through 13865 case X86::ATOMOR8: 13866 case X86::ATOMOR16: 13867 case X86::ATOMOR32: 13868 case X86::ATOMOR64: 13869 // Fall through 13870 case X86::ATOMXOR16: 13871 case X86::ATOMXOR8: 13872 case X86::ATOMXOR32: 13873 case X86::ATOMXOR64: 13874 // Fall through 13875 case X86::ATOMNAND8: 13876 case X86::ATOMNAND16: 13877 case X86::ATOMNAND32: 13878 case X86::ATOMNAND64: 13879 // Fall through 13880 case X86::ATOMMAX8: 13881 case X86::ATOMMAX16: 13882 case X86::ATOMMAX32: 13883 case X86::ATOMMAX64: 13884 // Fall through 13885 case X86::ATOMMIN8: 13886 case X86::ATOMMIN16: 13887 case X86::ATOMMIN32: 13888 case X86::ATOMMIN64: 13889 // Fall through 13890 case X86::ATOMUMAX8: 13891 case X86::ATOMUMAX16: 13892 case X86::ATOMUMAX32: 13893 case X86::ATOMUMAX64: 13894 // Fall through 13895 case X86::ATOMUMIN8: 13896 case X86::ATOMUMIN16: 13897 case X86::ATOMUMIN32: 13898 case X86::ATOMUMIN64: 13899 return EmitAtomicLoadArith(MI, BB); 13900 13901 // This group does 64-bit operations on a 32-bit host. 13902 case X86::ATOMAND6432: 13903 case X86::ATOMOR6432: 13904 case X86::ATOMXOR6432: 13905 case X86::ATOMNAND6432: 13906 case X86::ATOMADD6432: 13907 case X86::ATOMSUB6432: 13908 case X86::ATOMMAX6432: 13909 case X86::ATOMMIN6432: 13910 case X86::ATOMUMAX6432: 13911 case X86::ATOMUMIN6432: 13912 case X86::ATOMSWAP6432: 13913 return EmitAtomicLoadArith6432(MI, BB); 13914 13915 case X86::VASTART_SAVE_XMM_REGS: 13916 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 13917 13918 case X86::VAARG_64: 13919 return EmitVAARG64WithCustomInserter(MI, BB); 13920 13921 case X86::EH_SjLj_SetJmp32: 13922 case X86::EH_SjLj_SetJmp64: 13923 return emitEHSjLjSetJmp(MI, BB); 13924 13925 case X86::EH_SjLj_LongJmp32: 13926 case X86::EH_SjLj_LongJmp64: 13927 return emitEHSjLjLongJmp(MI, BB); 13928 } 13929} 13930 13931//===----------------------------------------------------------------------===// 13932// X86 Optimization Hooks 13933//===----------------------------------------------------------------------===// 13934 13935void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 13936 APInt &KnownZero, 13937 APInt &KnownOne, 13938 const SelectionDAG &DAG, 13939 unsigned Depth) const { 13940 unsigned BitWidth = KnownZero.getBitWidth(); 13941 unsigned Opc = Op.getOpcode(); 13942 assert((Opc >= ISD::BUILTIN_OP_END || 13943 Opc == ISD::INTRINSIC_WO_CHAIN || 13944 Opc == ISD::INTRINSIC_W_CHAIN || 13945 Opc == ISD::INTRINSIC_VOID) && 13946 "Should use MaskedValueIsZero if you don't know whether Op" 13947 " is a target node!"); 13948 13949 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 13950 switch (Opc) { 13951 default: break; 13952 case X86ISD::ADD: 13953 case X86ISD::SUB: 13954 case X86ISD::ADC: 13955 case X86ISD::SBB: 13956 case X86ISD::SMUL: 13957 case X86ISD::UMUL: 13958 case X86ISD::INC: 13959 case X86ISD::DEC: 13960 case X86ISD::OR: 13961 case X86ISD::XOR: 13962 case X86ISD::AND: 13963 // These nodes' second result is a boolean. 13964 if (Op.getResNo() == 0) 13965 break; 13966 // Fallthrough 13967 case X86ISD::SETCC: 13968 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 13969 break; 13970 case ISD::INTRINSIC_WO_CHAIN: { 13971 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 13972 unsigned NumLoBits = 0; 13973 switch (IntId) { 13974 default: break; 13975 case Intrinsic::x86_sse_movmsk_ps: 13976 case Intrinsic::x86_avx_movmsk_ps_256: 13977 case Intrinsic::x86_sse2_movmsk_pd: 13978 case Intrinsic::x86_avx_movmsk_pd_256: 13979 case Intrinsic::x86_mmx_pmovmskb: 13980 case Intrinsic::x86_sse2_pmovmskb_128: 13981 case Intrinsic::x86_avx2_pmovmskb: { 13982 // High bits of movmskp{s|d}, pmovmskb are known zero. 13983 switch (IntId) { 13984 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 13985 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 13986 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 13987 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 13988 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 13989 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 13990 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 13991 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 13992 } 13993 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits); 13994 break; 13995 } 13996 } 13997 break; 13998 } 13999 } 14000} 14001 14002unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 14003 unsigned Depth) const { 14004 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 14005 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 14006 return Op.getValueType().getScalarType().getSizeInBits(); 14007 14008 // Fallback case. 14009 return 1; 14010} 14011 14012/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 14013/// node is a GlobalAddress + offset. 14014bool X86TargetLowering::isGAPlusOffset(SDNode *N, 14015 const GlobalValue* &GA, 14016 int64_t &Offset) const { 14017 if (N->getOpcode() == X86ISD::Wrapper) { 14018 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 14019 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 14020 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 14021 return true; 14022 } 14023 } 14024 return TargetLowering::isGAPlusOffset(N, GA, Offset); 14025} 14026 14027/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 14028/// same as extracting the high 128-bit part of 256-bit vector and then 14029/// inserting the result into the low part of a new 256-bit vector 14030static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 14031 EVT VT = SVOp->getValueType(0); 14032 unsigned NumElems = VT.getVectorNumElements(); 14033 14034 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 14035 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j) 14036 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 14037 SVOp->getMaskElt(j) >= 0) 14038 return false; 14039 14040 return true; 14041} 14042 14043/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 14044/// same as extracting the low 128-bit part of 256-bit vector and then 14045/// inserting the result into the high part of a new 256-bit vector 14046static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 14047 EVT VT = SVOp->getValueType(0); 14048 unsigned NumElems = VT.getVectorNumElements(); 14049 14050 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 14051 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j) 14052 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 14053 SVOp->getMaskElt(j) >= 0) 14054 return false; 14055 14056 return true; 14057} 14058 14059/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 14060static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 14061 TargetLowering::DAGCombinerInfo &DCI, 14062 const X86Subtarget* Subtarget) { 14063 DebugLoc dl = N->getDebugLoc(); 14064 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 14065 SDValue V1 = SVOp->getOperand(0); 14066 SDValue V2 = SVOp->getOperand(1); 14067 EVT VT = SVOp->getValueType(0); 14068 unsigned NumElems = VT.getVectorNumElements(); 14069 14070 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 14071 V2.getOpcode() == ISD::CONCAT_VECTORS) { 14072 // 14073 // 0,0,0,... 14074 // | 14075 // V UNDEF BUILD_VECTOR UNDEF 14076 // \ / \ / 14077 // CONCAT_VECTOR CONCAT_VECTOR 14078 // \ / 14079 // \ / 14080 // RESULT: V + zero extended 14081 // 14082 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 14083 V2.getOperand(1).getOpcode() != ISD::UNDEF || 14084 V1.getOperand(1).getOpcode() != ISD::UNDEF) 14085 return SDValue(); 14086 14087 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 14088 return SDValue(); 14089 14090 // To match the shuffle mask, the first half of the mask should 14091 // be exactly the first vector, and all the rest a splat with the 14092 // first element of the second one. 14093 for (unsigned i = 0; i != NumElems/2; ++i) 14094 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 14095 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 14096 return SDValue(); 14097 14098 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 14099 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 14100 if (Ld->hasNUsesOfValue(1, 0)) { 14101 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 14102 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 14103 SDValue ResNode = 14104 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, 14105 Ld->getMemoryVT(), 14106 Ld->getPointerInfo(), 14107 Ld->getAlignment(), 14108 false/*isVolatile*/, true/*ReadMem*/, 14109 false/*WriteMem*/); 14110 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 14111 } 14112 } 14113 14114 // Emit a zeroed vector and insert the desired subvector on its 14115 // first half. 14116 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl); 14117 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl); 14118 return DCI.CombineTo(N, InsV); 14119 } 14120 14121 //===--------------------------------------------------------------------===// 14122 // Combine some shuffles into subvector extracts and inserts: 14123 // 14124 14125 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 14126 if (isShuffleHigh128VectorInsertLow(SVOp)) { 14127 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl); 14128 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl); 14129 return DCI.CombineTo(N, InsV); 14130 } 14131 14132 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 14133 if (isShuffleLow128VectorInsertHigh(SVOp)) { 14134 SDValue V = Extract128BitVector(V1, 0, DAG, dl); 14135 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl); 14136 return DCI.CombineTo(N, InsV); 14137 } 14138 14139 return SDValue(); 14140} 14141 14142/// PerformShuffleCombine - Performs several different shuffle combines. 14143static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 14144 TargetLowering::DAGCombinerInfo &DCI, 14145 const X86Subtarget *Subtarget) { 14146 DebugLoc dl = N->getDebugLoc(); 14147 EVT VT = N->getValueType(0); 14148 14149 // Don't create instructions with illegal types after legalize types has run. 14150 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14151 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 14152 return SDValue(); 14153 14154 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 14155 if (Subtarget->hasAVX() && VT.is256BitVector() && 14156 N->getOpcode() == ISD::VECTOR_SHUFFLE) 14157 return PerformShuffleCombine256(N, DAG, DCI, Subtarget); 14158 14159 // Only handle 128 wide vector from here on. 14160 if (!VT.is128BitVector()) 14161 return SDValue(); 14162 14163 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 14164 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 14165 // consecutive, non-overlapping, and in the right order. 14166 SmallVector<SDValue, 16> Elts; 14167 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 14168 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 14169 14170 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 14171} 14172 14173 14174/// PerformTruncateCombine - Converts truncate operation to 14175/// a sequence of vector shuffle operations. 14176/// It is possible when we truncate 256-bit vector to 128-bit vector 14177static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 14178 TargetLowering::DAGCombinerInfo &DCI, 14179 const X86Subtarget *Subtarget) { 14180 if (!DCI.isBeforeLegalizeOps()) 14181 return SDValue(); 14182 14183 if (!Subtarget->hasAVX()) 14184 return SDValue(); 14185 14186 EVT VT = N->getValueType(0); 14187 SDValue Op = N->getOperand(0); 14188 EVT OpVT = Op.getValueType(); 14189 DebugLoc dl = N->getDebugLoc(); 14190 14191 if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) { 14192 14193 if (Subtarget->hasAVX2()) { 14194 // AVX2: v4i64 -> v4i32 14195 14196 // VPERMD 14197 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; 14198 14199 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v8i32, Op); 14200 Op = DAG.getVectorShuffle(MVT::v8i32, dl, Op, DAG.getUNDEF(MVT::v8i32), 14201 ShufMask); 14202 14203 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Op, 14204 DAG.getIntPtrConstant(0)); 14205 } 14206 14207 // AVX: v4i64 -> v4i32 14208 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 14209 DAG.getIntPtrConstant(0)); 14210 14211 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 14212 DAG.getIntPtrConstant(2)); 14213 14214 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 14215 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 14216 14217 // PSHUFD 14218 static const int ShufMask1[] = {0, 2, 0, 0}; 14219 14220 SDValue Undef = DAG.getUNDEF(VT); 14221 OpLo = DAG.getVectorShuffle(VT, dl, OpLo, Undef, ShufMask1); 14222 OpHi = DAG.getVectorShuffle(VT, dl, OpHi, Undef, ShufMask1); 14223 14224 // MOVLHPS 14225 static const int ShufMask2[] = {0, 1, 4, 5}; 14226 14227 return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2); 14228 } 14229 14230 if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) { 14231 14232 if (Subtarget->hasAVX2()) { 14233 // AVX2: v8i32 -> v8i16 14234 14235 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v32i8, Op); 14236 14237 // PSHUFB 14238 SmallVector<SDValue,32> pshufbMask; 14239 for (unsigned i = 0; i < 2; ++i) { 14240 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8)); 14241 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8)); 14242 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8)); 14243 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8)); 14244 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8)); 14245 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8)); 14246 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8)); 14247 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8)); 14248 for (unsigned j = 0; j < 8; ++j) 14249 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 14250 } 14251 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v32i8, 14252 &pshufbMask[0], 32); 14253 Op = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, Op, BV); 14254 14255 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i64, Op); 14256 14257 static const int ShufMask[] = {0, 2, -1, -1}; 14258 Op = DAG.getVectorShuffle(MVT::v4i64, dl, Op, DAG.getUNDEF(MVT::v4i64), 14259 &ShufMask[0]); 14260 14261 Op = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op, 14262 DAG.getIntPtrConstant(0)); 14263 14264 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 14265 } 14266 14267 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 14268 DAG.getIntPtrConstant(0)); 14269 14270 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op, 14271 DAG.getIntPtrConstant(4)); 14272 14273 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo); 14274 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi); 14275 14276 // PSHUFB 14277 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, 14278 -1, -1, -1, -1, -1, -1, -1, -1}; 14279 14280 SDValue Undef = DAG.getUNDEF(MVT::v16i8); 14281 OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo, Undef, ShufMask1); 14282 OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi, Undef, ShufMask1); 14283 14284 OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo); 14285 OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi); 14286 14287 // MOVLHPS 14288 static const int ShufMask2[] = {0, 1, 4, 5}; 14289 14290 SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2); 14291 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res); 14292 } 14293 14294 return SDValue(); 14295} 14296 14297/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target 14298/// specific shuffle of a load can be folded into a single element load. 14299/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but 14300/// shuffles have been customed lowered so we need to handle those here. 14301static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, 14302 TargetLowering::DAGCombinerInfo &DCI) { 14303 if (DCI.isBeforeLegalizeOps()) 14304 return SDValue(); 14305 14306 SDValue InVec = N->getOperand(0); 14307 SDValue EltNo = N->getOperand(1); 14308 14309 if (!isa<ConstantSDNode>(EltNo)) 14310 return SDValue(); 14311 14312 EVT VT = InVec.getValueType(); 14313 14314 bool HasShuffleIntoBitcast = false; 14315 if (InVec.getOpcode() == ISD::BITCAST) { 14316 // Don't duplicate a load with other uses. 14317 if (!InVec.hasOneUse()) 14318 return SDValue(); 14319 EVT BCVT = InVec.getOperand(0).getValueType(); 14320 if (BCVT.getVectorNumElements() != VT.getVectorNumElements()) 14321 return SDValue(); 14322 InVec = InVec.getOperand(0); 14323 HasShuffleIntoBitcast = true; 14324 } 14325 14326 if (!isTargetShuffle(InVec.getOpcode())) 14327 return SDValue(); 14328 14329 // Don't duplicate a load with other uses. 14330 if (!InVec.hasOneUse()) 14331 return SDValue(); 14332 14333 SmallVector<int, 16> ShuffleMask; 14334 bool UnaryShuffle; 14335 if (!getTargetShuffleMask(InVec.getNode(), VT.getSimpleVT(), ShuffleMask, 14336 UnaryShuffle)) 14337 return SDValue(); 14338 14339 // Select the input vector, guarding against out of range extract vector. 14340 unsigned NumElems = VT.getVectorNumElements(); 14341 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 14342 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt]; 14343 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0) 14344 : InVec.getOperand(1); 14345 14346 // If inputs to shuffle are the same for both ops, then allow 2 uses 14347 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1; 14348 14349 if (LdNode.getOpcode() == ISD::BITCAST) { 14350 // Don't duplicate a load with other uses. 14351 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) 14352 return SDValue(); 14353 14354 AllowedUses = 1; // only allow 1 load use if we have a bitcast 14355 LdNode = LdNode.getOperand(0); 14356 } 14357 14358 if (!ISD::isNormalLoad(LdNode.getNode())) 14359 return SDValue(); 14360 14361 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); 14362 14363 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) 14364 return SDValue(); 14365 14366 if (HasShuffleIntoBitcast) { 14367 // If there's a bitcast before the shuffle, check if the load type and 14368 // alignment is valid. 14369 unsigned Align = LN0->getAlignment(); 14370 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14371 unsigned NewAlign = TLI.getDataLayout()-> 14372 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 14373 14374 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 14375 return SDValue(); 14376 } 14377 14378 // All checks match so transform back to vector_shuffle so that DAG combiner 14379 // can finish the job 14380 DebugLoc dl = N->getDebugLoc(); 14381 14382 // Create shuffle node taking into account the case that its a unary shuffle 14383 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1); 14384 Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl, 14385 InVec.getOperand(0), Shuffle, 14386 &ShuffleMask[0]); 14387 Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 14388 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, 14389 EltNo); 14390} 14391 14392/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 14393/// generation and convert it from being a bunch of shuffles and extracts 14394/// to a simple store and scalar loads to extract the elements. 14395static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 14396 TargetLowering::DAGCombinerInfo &DCI) { 14397 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI); 14398 if (NewOp.getNode()) 14399 return NewOp; 14400 14401 SDValue InputVector = N->getOperand(0); 14402 14403 // Only operate on vectors of 4 elements, where the alternative shuffling 14404 // gets to be more expensive. 14405 if (InputVector.getValueType() != MVT::v4i32) 14406 return SDValue(); 14407 14408 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 14409 // single use which is a sign-extend or zero-extend, and all elements are 14410 // used. 14411 SmallVector<SDNode *, 4> Uses; 14412 unsigned ExtractedElements = 0; 14413 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 14414 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 14415 if (UI.getUse().getResNo() != InputVector.getResNo()) 14416 return SDValue(); 14417 14418 SDNode *Extract = *UI; 14419 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 14420 return SDValue(); 14421 14422 if (Extract->getValueType(0) != MVT::i32) 14423 return SDValue(); 14424 if (!Extract->hasOneUse()) 14425 return SDValue(); 14426 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 14427 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 14428 return SDValue(); 14429 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 14430 return SDValue(); 14431 14432 // Record which element was extracted. 14433 ExtractedElements |= 14434 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 14435 14436 Uses.push_back(Extract); 14437 } 14438 14439 // If not all the elements were used, this may not be worthwhile. 14440 if (ExtractedElements != 15) 14441 return SDValue(); 14442 14443 // Ok, we've now decided to do the transformation. 14444 DebugLoc dl = InputVector.getDebugLoc(); 14445 14446 // Store the value to a temporary stack slot. 14447 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 14448 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 14449 MachinePointerInfo(), false, false, 0); 14450 14451 // Replace each use (extract) with a load of the appropriate element. 14452 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 14453 UE = Uses.end(); UI != UE; ++UI) { 14454 SDNode *Extract = *UI; 14455 14456 // cOMpute the element's address. 14457 SDValue Idx = Extract->getOperand(1); 14458 unsigned EltSize = 14459 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 14460 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 14461 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14462 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 14463 14464 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 14465 StackPtr, OffsetVal); 14466 14467 // Load the scalar. 14468 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 14469 ScalarAddr, MachinePointerInfo(), 14470 false, false, false, 0); 14471 14472 // Replace the exact with the load. 14473 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 14474 } 14475 14476 // The replacement was made in place; don't return anything. 14477 return SDValue(); 14478} 14479 14480/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 14481/// nodes. 14482static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 14483 TargetLowering::DAGCombinerInfo &DCI, 14484 const X86Subtarget *Subtarget) { 14485 DebugLoc DL = N->getDebugLoc(); 14486 SDValue Cond = N->getOperand(0); 14487 // Get the LHS/RHS of the select. 14488 SDValue LHS = N->getOperand(1); 14489 SDValue RHS = N->getOperand(2); 14490 EVT VT = LHS.getValueType(); 14491 14492 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 14493 // instructions match the semantics of the common C idiom x<y?x:y but not 14494 // x<=y?x:y, because of how they handle negative zero (which can be 14495 // ignored in unsafe-math mode). 14496 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 14497 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 14498 (Subtarget->hasSSE2() || 14499 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 14500 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 14501 14502 unsigned Opcode = 0; 14503 // Check for x CC y ? x : y. 14504 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 14505 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 14506 switch (CC) { 14507 default: break; 14508 case ISD::SETULT: 14509 // Converting this to a min would handle NaNs incorrectly, and swapping 14510 // the operands would cause it to handle comparisons between positive 14511 // and negative zero incorrectly. 14512 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 14513 if (!DAG.getTarget().Options.UnsafeFPMath && 14514 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 14515 break; 14516 std::swap(LHS, RHS); 14517 } 14518 Opcode = X86ISD::FMIN; 14519 break; 14520 case ISD::SETOLE: 14521 // Converting this to a min would handle comparisons between positive 14522 // and negative zero incorrectly. 14523 if (!DAG.getTarget().Options.UnsafeFPMath && 14524 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 14525 break; 14526 Opcode = X86ISD::FMIN; 14527 break; 14528 case ISD::SETULE: 14529 // Converting this to a min would handle both negative zeros and NaNs 14530 // incorrectly, but we can swap the operands to fix both. 14531 std::swap(LHS, RHS); 14532 case ISD::SETOLT: 14533 case ISD::SETLT: 14534 case ISD::SETLE: 14535 Opcode = X86ISD::FMIN; 14536 break; 14537 14538 case ISD::SETOGE: 14539 // Converting this to a max would handle comparisons between positive 14540 // and negative zero incorrectly. 14541 if (!DAG.getTarget().Options.UnsafeFPMath && 14542 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 14543 break; 14544 Opcode = X86ISD::FMAX; 14545 break; 14546 case ISD::SETUGT: 14547 // Converting this to a max would handle NaNs incorrectly, and swapping 14548 // the operands would cause it to handle comparisons between positive 14549 // and negative zero incorrectly. 14550 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 14551 if (!DAG.getTarget().Options.UnsafeFPMath && 14552 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 14553 break; 14554 std::swap(LHS, RHS); 14555 } 14556 Opcode = X86ISD::FMAX; 14557 break; 14558 case ISD::SETUGE: 14559 // Converting this to a max would handle both negative zeros and NaNs 14560 // incorrectly, but we can swap the operands to fix both. 14561 std::swap(LHS, RHS); 14562 case ISD::SETOGT: 14563 case ISD::SETGT: 14564 case ISD::SETGE: 14565 Opcode = X86ISD::FMAX; 14566 break; 14567 } 14568 // Check for x CC y ? y : x -- a min/max with reversed arms. 14569 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 14570 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 14571 switch (CC) { 14572 default: break; 14573 case ISD::SETOGE: 14574 // Converting this to a min would handle comparisons between positive 14575 // and negative zero incorrectly, and swapping the operands would 14576 // cause it to handle NaNs incorrectly. 14577 if (!DAG.getTarget().Options.UnsafeFPMath && 14578 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 14579 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14580 break; 14581 std::swap(LHS, RHS); 14582 } 14583 Opcode = X86ISD::FMIN; 14584 break; 14585 case ISD::SETUGT: 14586 // Converting this to a min would handle NaNs incorrectly. 14587 if (!DAG.getTarget().Options.UnsafeFPMath && 14588 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 14589 break; 14590 Opcode = X86ISD::FMIN; 14591 break; 14592 case ISD::SETUGE: 14593 // Converting this to a min would handle both negative zeros and NaNs 14594 // incorrectly, but we can swap the operands to fix both. 14595 std::swap(LHS, RHS); 14596 case ISD::SETOGT: 14597 case ISD::SETGT: 14598 case ISD::SETGE: 14599 Opcode = X86ISD::FMIN; 14600 break; 14601 14602 case ISD::SETULT: 14603 // Converting this to a max would handle NaNs incorrectly. 14604 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14605 break; 14606 Opcode = X86ISD::FMAX; 14607 break; 14608 case ISD::SETOLE: 14609 // Converting this to a max would handle comparisons between positive 14610 // and negative zero incorrectly, and swapping the operands would 14611 // cause it to handle NaNs incorrectly. 14612 if (!DAG.getTarget().Options.UnsafeFPMath && 14613 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 14614 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 14615 break; 14616 std::swap(LHS, RHS); 14617 } 14618 Opcode = X86ISD::FMAX; 14619 break; 14620 case ISD::SETULE: 14621 // Converting this to a max would handle both negative zeros and NaNs 14622 // incorrectly, but we can swap the operands to fix both. 14623 std::swap(LHS, RHS); 14624 case ISD::SETOLT: 14625 case ISD::SETLT: 14626 case ISD::SETLE: 14627 Opcode = X86ISD::FMAX; 14628 break; 14629 } 14630 } 14631 14632 if (Opcode) 14633 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 14634 } 14635 14636 // If this is a select between two integer constants, try to do some 14637 // optimizations. 14638 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 14639 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 14640 // Don't do this for crazy integer types. 14641 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 14642 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 14643 // so that TrueC (the true value) is larger than FalseC. 14644 bool NeedsCondInvert = false; 14645 14646 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 14647 // Efficiently invertible. 14648 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 14649 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 14650 isa<ConstantSDNode>(Cond.getOperand(1))))) { 14651 NeedsCondInvert = true; 14652 std::swap(TrueC, FalseC); 14653 } 14654 14655 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 14656 if (FalseC->getAPIntValue() == 0 && 14657 TrueC->getAPIntValue().isPowerOf2()) { 14658 if (NeedsCondInvert) // Invert the condition if needed. 14659 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14660 DAG.getConstant(1, Cond.getValueType())); 14661 14662 // Zero extend the condition if needed. 14663 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 14664 14665 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 14666 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 14667 DAG.getConstant(ShAmt, MVT::i8)); 14668 } 14669 14670 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 14671 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 14672 if (NeedsCondInvert) // Invert the condition if needed. 14673 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14674 DAG.getConstant(1, Cond.getValueType())); 14675 14676 // Zero extend the condition if needed. 14677 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 14678 FalseC->getValueType(0), Cond); 14679 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14680 SDValue(FalseC, 0)); 14681 } 14682 14683 // Optimize cases that will turn into an LEA instruction. This requires 14684 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 14685 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 14686 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 14687 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 14688 14689 bool isFastMultiplier = false; 14690 if (Diff < 10) { 14691 switch ((unsigned char)Diff) { 14692 default: break; 14693 case 1: // result = add base, cond 14694 case 2: // result = lea base( , cond*2) 14695 case 3: // result = lea base(cond, cond*2) 14696 case 4: // result = lea base( , cond*4) 14697 case 5: // result = lea base(cond, cond*4) 14698 case 8: // result = lea base( , cond*8) 14699 case 9: // result = lea base(cond, cond*8) 14700 isFastMultiplier = true; 14701 break; 14702 } 14703 } 14704 14705 if (isFastMultiplier) { 14706 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 14707 if (NeedsCondInvert) // Invert the condition if needed. 14708 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 14709 DAG.getConstant(1, Cond.getValueType())); 14710 14711 // Zero extend the condition if needed. 14712 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 14713 Cond); 14714 // Scale the condition by the difference. 14715 if (Diff != 1) 14716 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 14717 DAG.getConstant(Diff, Cond.getValueType())); 14718 14719 // Add the base if non-zero. 14720 if (FalseC->getAPIntValue() != 0) 14721 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14722 SDValue(FalseC, 0)); 14723 return Cond; 14724 } 14725 } 14726 } 14727 } 14728 14729 // Canonicalize max and min: 14730 // (x > y) ? x : y -> (x >= y) ? x : y 14731 // (x < y) ? x : y -> (x <= y) ? x : y 14732 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 14733 // the need for an extra compare 14734 // against zero. e.g. 14735 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 14736 // subl %esi, %edi 14737 // testl %edi, %edi 14738 // movl $0, %eax 14739 // cmovgl %edi, %eax 14740 // => 14741 // xorl %eax, %eax 14742 // subl %esi, $edi 14743 // cmovsl %eax, %edi 14744 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 14745 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 14746 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 14747 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 14748 switch (CC) { 14749 default: break; 14750 case ISD::SETLT: 14751 case ISD::SETGT: { 14752 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 14753 Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), 14754 Cond.getOperand(0), Cond.getOperand(1), NewCC); 14755 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 14756 } 14757 } 14758 } 14759 14760 // If we know that this node is legal then we know that it is going to be 14761 // matched by one of the SSE/AVX BLEND instructions. These instructions only 14762 // depend on the highest bit in each word. Try to use SimplifyDemandedBits 14763 // to simplify previous instructions. 14764 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14765 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() && 14766 !DCI.isBeforeLegalize() && TLI.isOperationLegal(ISD::VSELECT, VT)) { 14767 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits(); 14768 14769 // Don't optimize vector selects that map to mask-registers. 14770 if (BitWidth == 1) 14771 return SDValue(); 14772 14773 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); 14774 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1); 14775 14776 APInt KnownZero, KnownOne; 14777 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), 14778 DCI.isBeforeLegalizeOps()); 14779 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) || 14780 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO)) 14781 DCI.CommitTargetLoweringOpt(TLO); 14782 } 14783 14784 return SDValue(); 14785} 14786 14787// Check whether a boolean test is testing a boolean value generated by 14788// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition 14789// code. 14790// 14791// Simplify the following patterns: 14792// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or 14793// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) 14794// to (Op EFLAGS Cond) 14795// 14796// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or 14797// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) 14798// to (Op EFLAGS !Cond) 14799// 14800// where Op could be BRCOND or CMOV. 14801// 14802static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { 14803 // Quit if not CMP and SUB with its value result used. 14804 if (Cmp.getOpcode() != X86ISD::CMP && 14805 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) 14806 return SDValue(); 14807 14808 // Quit if not used as a boolean value. 14809 if (CC != X86::COND_E && CC != X86::COND_NE) 14810 return SDValue(); 14811 14812 // Check CMP operands. One of them should be 0 or 1 and the other should be 14813 // an SetCC or extended from it. 14814 SDValue Op1 = Cmp.getOperand(0); 14815 SDValue Op2 = Cmp.getOperand(1); 14816 14817 SDValue SetCC; 14818 const ConstantSDNode* C = 0; 14819 bool needOppositeCond = (CC == X86::COND_E); 14820 14821 if ((C = dyn_cast<ConstantSDNode>(Op1))) 14822 SetCC = Op2; 14823 else if ((C = dyn_cast<ConstantSDNode>(Op2))) 14824 SetCC = Op1; 14825 else // Quit if all operands are not constants. 14826 return SDValue(); 14827 14828 if (C->getZExtValue() == 1) 14829 needOppositeCond = !needOppositeCond; 14830 else if (C->getZExtValue() != 0) 14831 // Quit if the constant is neither 0 or 1. 14832 return SDValue(); 14833 14834 // Skip 'zext' node. 14835 if (SetCC.getOpcode() == ISD::ZERO_EXTEND) 14836 SetCC = SetCC.getOperand(0); 14837 14838 switch (SetCC.getOpcode()) { 14839 case X86ISD::SETCC: 14840 // Set the condition code or opposite one if necessary. 14841 CC = X86::CondCode(SetCC.getConstantOperandVal(0)); 14842 if (needOppositeCond) 14843 CC = X86::GetOppositeBranchCondition(CC); 14844 return SetCC.getOperand(1); 14845 case X86ISD::CMOV: { 14846 // Check whether false/true value has canonical one, i.e. 0 or 1. 14847 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0)); 14848 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1)); 14849 // Quit if true value is not a constant. 14850 if (!TVal) 14851 return SDValue(); 14852 // Quit if false value is not a constant. 14853 if (!FVal) { 14854 // A special case for rdrand, where 0 is set if false cond is found. 14855 SDValue Op = SetCC.getOperand(0); 14856 if (Op.getOpcode() != X86ISD::RDRAND) 14857 return SDValue(); 14858 } 14859 // Quit if false value is not the constant 0 or 1. 14860 bool FValIsFalse = true; 14861 if (FVal && FVal->getZExtValue() != 0) { 14862 if (FVal->getZExtValue() != 1) 14863 return SDValue(); 14864 // If FVal is 1, opposite cond is needed. 14865 needOppositeCond = !needOppositeCond; 14866 FValIsFalse = false; 14867 } 14868 // Quit if TVal is not the constant opposite of FVal. 14869 if (FValIsFalse && TVal->getZExtValue() != 1) 14870 return SDValue(); 14871 if (!FValIsFalse && TVal->getZExtValue() != 0) 14872 return SDValue(); 14873 CC = X86::CondCode(SetCC.getConstantOperandVal(2)); 14874 if (needOppositeCond) 14875 CC = X86::GetOppositeBranchCondition(CC); 14876 return SetCC.getOperand(3); 14877 } 14878 } 14879 14880 return SDValue(); 14881} 14882 14883/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 14884static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 14885 TargetLowering::DAGCombinerInfo &DCI, 14886 const X86Subtarget *Subtarget) { 14887 DebugLoc DL = N->getDebugLoc(); 14888 14889 // If the flag operand isn't dead, don't touch this CMOV. 14890 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 14891 return SDValue(); 14892 14893 SDValue FalseOp = N->getOperand(0); 14894 SDValue TrueOp = N->getOperand(1); 14895 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 14896 SDValue Cond = N->getOperand(3); 14897 14898 if (CC == X86::COND_E || CC == X86::COND_NE) { 14899 switch (Cond.getOpcode()) { 14900 default: break; 14901 case X86ISD::BSR: 14902 case X86ISD::BSF: 14903 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 14904 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 14905 return (CC == X86::COND_E) ? FalseOp : TrueOp; 14906 } 14907 } 14908 14909 SDValue Flags; 14910 14911 Flags = checkBoolTestSetCCCombine(Cond, CC); 14912 if (Flags.getNode() && 14913 // Extra check as FCMOV only supports a subset of X86 cond. 14914 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { 14915 SDValue Ops[] = { FalseOp, TrueOp, 14916 DAG.getConstant(CC, MVT::i8), Flags }; 14917 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), 14918 Ops, array_lengthof(Ops)); 14919 } 14920 14921 // If this is a select between two integer constants, try to do some 14922 // optimizations. Note that the operands are ordered the opposite of SELECT 14923 // operands. 14924 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 14925 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 14926 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 14927 // larger than FalseC (the false value). 14928 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 14929 CC = X86::GetOppositeBranchCondition(CC); 14930 std::swap(TrueC, FalseC); 14931 std::swap(TrueOp, FalseOp); 14932 } 14933 14934 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 14935 // This is efficient for any integer data type (including i8/i16) and 14936 // shift amount. 14937 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 14938 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 14939 DAG.getConstant(CC, MVT::i8), Cond); 14940 14941 // Zero extend the condition if needed. 14942 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 14943 14944 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 14945 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 14946 DAG.getConstant(ShAmt, MVT::i8)); 14947 if (N->getNumValues() == 2) // Dead flag value? 14948 return DCI.CombineTo(N, Cond, SDValue()); 14949 return Cond; 14950 } 14951 14952 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 14953 // for any integer data type, including i8/i16. 14954 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 14955 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 14956 DAG.getConstant(CC, MVT::i8), Cond); 14957 14958 // Zero extend the condition if needed. 14959 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 14960 FalseC->getValueType(0), Cond); 14961 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 14962 SDValue(FalseC, 0)); 14963 14964 if (N->getNumValues() == 2) // Dead flag value? 14965 return DCI.CombineTo(N, Cond, SDValue()); 14966 return Cond; 14967 } 14968 14969 // Optimize cases that will turn into an LEA instruction. This requires 14970 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 14971 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 14972 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 14973 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 14974 14975 bool isFastMultiplier = false; 14976 if (Diff < 10) { 14977 switch ((unsigned char)Diff) { 14978 default: break; 14979 case 1: // result = add base, cond 14980 case 2: // result = lea base( , cond*2) 14981 case 3: // result = lea base(cond, cond*2) 14982 case 4: // result = lea base( , cond*4) 14983 case 5: // result = lea base(cond, cond*4) 14984 case 8: // result = lea base( , cond*8) 14985 case 9: // result = lea base(cond, cond*8) 14986 isFastMultiplier = true; 14987 break; 14988 } 14989 } 14990 14991 if (isFastMultiplier) { 14992 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 14993 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 14994 DAG.getConstant(CC, MVT::i8), Cond); 14995 // Zero extend the condition if needed. 14996 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 14997 Cond); 14998 // Scale the condition by the difference. 14999 if (Diff != 1) 15000 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 15001 DAG.getConstant(Diff, Cond.getValueType())); 15002 15003 // Add the base if non-zero. 15004 if (FalseC->getAPIntValue() != 0) 15005 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 15006 SDValue(FalseC, 0)); 15007 if (N->getNumValues() == 2) // Dead flag value? 15008 return DCI.CombineTo(N, Cond, SDValue()); 15009 return Cond; 15010 } 15011 } 15012 } 15013 } 15014 15015 // Handle these cases: 15016 // (select (x != c), e, c) -> select (x != c), e, x), 15017 // (select (x == c), c, e) -> select (x == c), x, e) 15018 // where the c is an integer constant, and the "select" is the combination 15019 // of CMOV and CMP. 15020 // 15021 // The rationale for this change is that the conditional-move from a constant 15022 // needs two instructions, however, conditional-move from a register needs 15023 // only one instruction. 15024 // 15025 // CAVEAT: By replacing a constant with a symbolic value, it may obscure 15026 // some instruction-combining opportunities. This opt needs to be 15027 // postponed as late as possible. 15028 // 15029 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { 15030 // the DCI.xxxx conditions are provided to postpone the optimization as 15031 // late as possible. 15032 15033 ConstantSDNode *CmpAgainst = 0; 15034 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && 15035 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) && 15036 dyn_cast<ConstantSDNode>(Cond.getOperand(0)) == 0) { 15037 15038 if (CC == X86::COND_NE && 15039 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) { 15040 CC = X86::GetOppositeBranchCondition(CC); 15041 std::swap(TrueOp, FalseOp); 15042 } 15043 15044 if (CC == X86::COND_E && 15045 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { 15046 SDValue Ops[] = { FalseOp, Cond.getOperand(0), 15047 DAG.getConstant(CC, MVT::i8), Cond }; 15048 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops, 15049 array_lengthof(Ops)); 15050 } 15051 } 15052 } 15053 15054 return SDValue(); 15055} 15056 15057 15058/// PerformMulCombine - Optimize a single multiply with constant into two 15059/// in order to implement it with two cheaper instructions, e.g. 15060/// LEA + SHL, LEA + LEA. 15061static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 15062 TargetLowering::DAGCombinerInfo &DCI) { 15063 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 15064 return SDValue(); 15065 15066 EVT VT = N->getValueType(0); 15067 if (VT != MVT::i64) 15068 return SDValue(); 15069 15070 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15071 if (!C) 15072 return SDValue(); 15073 uint64_t MulAmt = C->getZExtValue(); 15074 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 15075 return SDValue(); 15076 15077 uint64_t MulAmt1 = 0; 15078 uint64_t MulAmt2 = 0; 15079 if ((MulAmt % 9) == 0) { 15080 MulAmt1 = 9; 15081 MulAmt2 = MulAmt / 9; 15082 } else if ((MulAmt % 5) == 0) { 15083 MulAmt1 = 5; 15084 MulAmt2 = MulAmt / 5; 15085 } else if ((MulAmt % 3) == 0) { 15086 MulAmt1 = 3; 15087 MulAmt2 = MulAmt / 3; 15088 } 15089 if (MulAmt2 && 15090 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 15091 DebugLoc DL = N->getDebugLoc(); 15092 15093 if (isPowerOf2_64(MulAmt2) && 15094 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 15095 // If second multiplifer is pow2, issue it first. We want the multiply by 15096 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 15097 // is an add. 15098 std::swap(MulAmt1, MulAmt2); 15099 15100 SDValue NewMul; 15101 if (isPowerOf2_64(MulAmt1)) 15102 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15103 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 15104 else 15105 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 15106 DAG.getConstant(MulAmt1, VT)); 15107 15108 if (isPowerOf2_64(MulAmt2)) 15109 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 15110 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 15111 else 15112 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 15113 DAG.getConstant(MulAmt2, VT)); 15114 15115 // Do not add new nodes to DAG combiner worklist. 15116 DCI.CombineTo(N, NewMul, false); 15117 } 15118 return SDValue(); 15119} 15120 15121static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 15122 SDValue N0 = N->getOperand(0); 15123 SDValue N1 = N->getOperand(1); 15124 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 15125 EVT VT = N0.getValueType(); 15126 15127 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 15128 // since the result of setcc_c is all zero's or all ones. 15129 if (VT.isInteger() && !VT.isVector() && 15130 N1C && N0.getOpcode() == ISD::AND && 15131 N0.getOperand(1).getOpcode() == ISD::Constant) { 15132 SDValue N00 = N0.getOperand(0); 15133 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 15134 ((N00.getOpcode() == ISD::ANY_EXTEND || 15135 N00.getOpcode() == ISD::ZERO_EXTEND) && 15136 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 15137 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 15138 APInt ShAmt = N1C->getAPIntValue(); 15139 Mask = Mask.shl(ShAmt); 15140 if (Mask != 0) 15141 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 15142 N00, DAG.getConstant(Mask, VT)); 15143 } 15144 } 15145 15146 15147 // Hardware support for vector shifts is sparse which makes us scalarize the 15148 // vector operations in many cases. Also, on sandybridge ADD is faster than 15149 // shl. 15150 // (shl V, 1) -> add V,V 15151 if (isSplatVector(N1.getNode())) { 15152 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 15153 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 15154 // We shift all of the values by one. In many cases we do not have 15155 // hardware support for this operation. This is better expressed as an ADD 15156 // of two values. 15157 if (N1C && (1 == N1C->getZExtValue())) { 15158 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); 15159 } 15160 } 15161 15162 return SDValue(); 15163} 15164 15165/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 15166/// when possible. 15167static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 15168 TargetLowering::DAGCombinerInfo &DCI, 15169 const X86Subtarget *Subtarget) { 15170 EVT VT = N->getValueType(0); 15171 if (N->getOpcode() == ISD::SHL) { 15172 SDValue V = PerformSHLCombine(N, DAG); 15173 if (V.getNode()) return V; 15174 } 15175 15176 // On X86 with SSE2 support, we can transform this to a vector shift if 15177 // all elements are shifted by the same amount. We can't do this in legalize 15178 // because the a constant vector is typically transformed to a constant pool 15179 // so we have no knowledge of the shift amount. 15180 if (!Subtarget->hasSSE2()) 15181 return SDValue(); 15182 15183 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 15184 (!Subtarget->hasAVX2() || 15185 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 15186 return SDValue(); 15187 15188 SDValue ShAmtOp = N->getOperand(1); 15189 EVT EltVT = VT.getVectorElementType(); 15190 DebugLoc DL = N->getDebugLoc(); 15191 SDValue BaseShAmt = SDValue(); 15192 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 15193 unsigned NumElts = VT.getVectorNumElements(); 15194 unsigned i = 0; 15195 for (; i != NumElts; ++i) { 15196 SDValue Arg = ShAmtOp.getOperand(i); 15197 if (Arg.getOpcode() == ISD::UNDEF) continue; 15198 BaseShAmt = Arg; 15199 break; 15200 } 15201 // Handle the case where the build_vector is all undef 15202 // FIXME: Should DAG allow this? 15203 if (i == NumElts) 15204 return SDValue(); 15205 15206 for (; i != NumElts; ++i) { 15207 SDValue Arg = ShAmtOp.getOperand(i); 15208 if (Arg.getOpcode() == ISD::UNDEF) continue; 15209 if (Arg != BaseShAmt) { 15210 return SDValue(); 15211 } 15212 } 15213 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 15214 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 15215 SDValue InVec = ShAmtOp.getOperand(0); 15216 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 15217 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 15218 unsigned i = 0; 15219 for (; i != NumElts; ++i) { 15220 SDValue Arg = InVec.getOperand(i); 15221 if (Arg.getOpcode() == ISD::UNDEF) continue; 15222 BaseShAmt = Arg; 15223 break; 15224 } 15225 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 15226 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 15227 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 15228 if (C->getZExtValue() == SplatIdx) 15229 BaseShAmt = InVec.getOperand(1); 15230 } 15231 } 15232 if (BaseShAmt.getNode() == 0) { 15233 // Don't create instructions with illegal types after legalize 15234 // types has run. 15235 if (!DAG.getTargetLoweringInfo().isTypeLegal(EltVT) && 15236 !DCI.isBeforeLegalize()) 15237 return SDValue(); 15238 15239 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 15240 DAG.getIntPtrConstant(0)); 15241 } 15242 } else 15243 return SDValue(); 15244 15245 // The shift amount is an i32. 15246 if (EltVT.bitsGT(MVT::i32)) 15247 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 15248 else if (EltVT.bitsLT(MVT::i32)) 15249 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 15250 15251 // The shift amount is identical so we can do a vector shift. 15252 SDValue ValOp = N->getOperand(0); 15253 switch (N->getOpcode()) { 15254 default: 15255 llvm_unreachable("Unknown shift opcode!"); 15256 case ISD::SHL: 15257 switch (VT.getSimpleVT().SimpleTy) { 15258 default: return SDValue(); 15259 case MVT::v2i64: 15260 case MVT::v4i32: 15261 case MVT::v8i16: 15262 case MVT::v4i64: 15263 case MVT::v8i32: 15264 case MVT::v16i16: 15265 return getTargetVShiftNode(X86ISD::VSHLI, DL, VT, ValOp, BaseShAmt, DAG); 15266 } 15267 case ISD::SRA: 15268 switch (VT.getSimpleVT().SimpleTy) { 15269 default: return SDValue(); 15270 case MVT::v4i32: 15271 case MVT::v8i16: 15272 case MVT::v8i32: 15273 case MVT::v16i16: 15274 return getTargetVShiftNode(X86ISD::VSRAI, DL, VT, ValOp, BaseShAmt, DAG); 15275 } 15276 case ISD::SRL: 15277 switch (VT.getSimpleVT().SimpleTy) { 15278 default: return SDValue(); 15279 case MVT::v2i64: 15280 case MVT::v4i32: 15281 case MVT::v8i16: 15282 case MVT::v4i64: 15283 case MVT::v8i32: 15284 case MVT::v16i16: 15285 return getTargetVShiftNode(X86ISD::VSRLI, DL, VT, ValOp, BaseShAmt, DAG); 15286 } 15287 } 15288} 15289 15290 15291// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 15292// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 15293// and friends. Likewise for OR -> CMPNEQSS. 15294static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 15295 TargetLowering::DAGCombinerInfo &DCI, 15296 const X86Subtarget *Subtarget) { 15297 unsigned opcode; 15298 15299 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 15300 // we're requiring SSE2 for both. 15301 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 15302 SDValue N0 = N->getOperand(0); 15303 SDValue N1 = N->getOperand(1); 15304 SDValue CMP0 = N0->getOperand(1); 15305 SDValue CMP1 = N1->getOperand(1); 15306 DebugLoc DL = N->getDebugLoc(); 15307 15308 // The SETCCs should both refer to the same CMP. 15309 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 15310 return SDValue(); 15311 15312 SDValue CMP00 = CMP0->getOperand(0); 15313 SDValue CMP01 = CMP0->getOperand(1); 15314 EVT VT = CMP00.getValueType(); 15315 15316 if (VT == MVT::f32 || VT == MVT::f64) { 15317 bool ExpectingFlags = false; 15318 // Check for any users that want flags: 15319 for (SDNode::use_iterator UI = N->use_begin(), 15320 UE = N->use_end(); 15321 !ExpectingFlags && UI != UE; ++UI) 15322 switch (UI->getOpcode()) { 15323 default: 15324 case ISD::BR_CC: 15325 case ISD::BRCOND: 15326 case ISD::SELECT: 15327 ExpectingFlags = true; 15328 break; 15329 case ISD::CopyToReg: 15330 case ISD::SIGN_EXTEND: 15331 case ISD::ZERO_EXTEND: 15332 case ISD::ANY_EXTEND: 15333 break; 15334 } 15335 15336 if (!ExpectingFlags) { 15337 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 15338 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 15339 15340 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 15341 X86::CondCode tmp = cc0; 15342 cc0 = cc1; 15343 cc1 = tmp; 15344 } 15345 15346 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 15347 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 15348 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 15349 X86ISD::NodeType NTOperator = is64BitFP ? 15350 X86ISD::FSETCCsd : X86ISD::FSETCCss; 15351 // FIXME: need symbolic constants for these magic numbers. 15352 // See X86ATTInstPrinter.cpp:printSSECC(). 15353 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 15354 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 15355 DAG.getConstant(x86cc, MVT::i8)); 15356 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 15357 OnesOrZeroesF); 15358 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 15359 DAG.getConstant(1, MVT::i32)); 15360 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 15361 return OneBitOfTruth; 15362 } 15363 } 15364 } 15365 } 15366 return SDValue(); 15367} 15368 15369/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 15370/// so it can be folded inside ANDNP. 15371static bool CanFoldXORWithAllOnes(const SDNode *N) { 15372 EVT VT = N->getValueType(0); 15373 15374 // Match direct AllOnes for 128 and 256-bit vectors 15375 if (ISD::isBuildVectorAllOnes(N)) 15376 return true; 15377 15378 // Look through a bit convert. 15379 if (N->getOpcode() == ISD::BITCAST) 15380 N = N->getOperand(0).getNode(); 15381 15382 // Sometimes the operand may come from a insert_subvector building a 256-bit 15383 // allones vector 15384 if (VT.is256BitVector() && 15385 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 15386 SDValue V1 = N->getOperand(0); 15387 SDValue V2 = N->getOperand(1); 15388 15389 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 15390 V1.getOperand(0).getOpcode() == ISD::UNDEF && 15391 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 15392 ISD::isBuildVectorAllOnes(V2.getNode())) 15393 return true; 15394 } 15395 15396 return false; 15397} 15398 15399static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 15400 TargetLowering::DAGCombinerInfo &DCI, 15401 const X86Subtarget *Subtarget) { 15402 if (DCI.isBeforeLegalizeOps()) 15403 return SDValue(); 15404 15405 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 15406 if (R.getNode()) 15407 return R; 15408 15409 EVT VT = N->getValueType(0); 15410 15411 // Create ANDN, BLSI, and BLSR instructions 15412 // BLSI is X & (-X) 15413 // BLSR is X & (X-1) 15414 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 15415 SDValue N0 = N->getOperand(0); 15416 SDValue N1 = N->getOperand(1); 15417 DebugLoc DL = N->getDebugLoc(); 15418 15419 // Check LHS for not 15420 if (N0.getOpcode() == ISD::XOR && isAllOnes(N0.getOperand(1))) 15421 return DAG.getNode(X86ISD::ANDN, DL, VT, N0.getOperand(0), N1); 15422 // Check RHS for not 15423 if (N1.getOpcode() == ISD::XOR && isAllOnes(N1.getOperand(1))) 15424 return DAG.getNode(X86ISD::ANDN, DL, VT, N1.getOperand(0), N0); 15425 15426 // Check LHS for neg 15427 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 15428 isZero(N0.getOperand(0))) 15429 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 15430 15431 // Check RHS for neg 15432 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 15433 isZero(N1.getOperand(0))) 15434 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 15435 15436 // Check LHS for X-1 15437 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 15438 isAllOnes(N0.getOperand(1))) 15439 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 15440 15441 // Check RHS for X-1 15442 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 15443 isAllOnes(N1.getOperand(1))) 15444 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 15445 15446 return SDValue(); 15447 } 15448 15449 // Want to form ANDNP nodes: 15450 // 1) In the hopes of then easily combining them with OR and AND nodes 15451 // to form PBLEND/PSIGN. 15452 // 2) To match ANDN packed intrinsics 15453 if (VT != MVT::v2i64 && VT != MVT::v4i64) 15454 return SDValue(); 15455 15456 SDValue N0 = N->getOperand(0); 15457 SDValue N1 = N->getOperand(1); 15458 DebugLoc DL = N->getDebugLoc(); 15459 15460 // Check LHS for vnot 15461 if (N0.getOpcode() == ISD::XOR && 15462 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 15463 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 15464 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 15465 15466 // Check RHS for vnot 15467 if (N1.getOpcode() == ISD::XOR && 15468 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 15469 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 15470 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 15471 15472 return SDValue(); 15473} 15474 15475static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 15476 TargetLowering::DAGCombinerInfo &DCI, 15477 const X86Subtarget *Subtarget) { 15478 if (DCI.isBeforeLegalizeOps()) 15479 return SDValue(); 15480 15481 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 15482 if (R.getNode()) 15483 return R; 15484 15485 EVT VT = N->getValueType(0); 15486 15487 SDValue N0 = N->getOperand(0); 15488 SDValue N1 = N->getOperand(1); 15489 15490 // look for psign/blend 15491 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 15492 if (!Subtarget->hasSSSE3() || 15493 (VT == MVT::v4i64 && !Subtarget->hasAVX2())) 15494 return SDValue(); 15495 15496 // Canonicalize pandn to RHS 15497 if (N0.getOpcode() == X86ISD::ANDNP) 15498 std::swap(N0, N1); 15499 // or (and (m, y), (pandn m, x)) 15500 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 15501 SDValue Mask = N1.getOperand(0); 15502 SDValue X = N1.getOperand(1); 15503 SDValue Y; 15504 if (N0.getOperand(0) == Mask) 15505 Y = N0.getOperand(1); 15506 if (N0.getOperand(1) == Mask) 15507 Y = N0.getOperand(0); 15508 15509 // Check to see if the mask appeared in both the AND and ANDNP and 15510 if (!Y.getNode()) 15511 return SDValue(); 15512 15513 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 15514 // Look through mask bitcast. 15515 if (Mask.getOpcode() == ISD::BITCAST) 15516 Mask = Mask.getOperand(0); 15517 if (X.getOpcode() == ISD::BITCAST) 15518 X = X.getOperand(0); 15519 if (Y.getOpcode() == ISD::BITCAST) 15520 Y = Y.getOperand(0); 15521 15522 EVT MaskVT = Mask.getValueType(); 15523 15524 // Validate that the Mask operand is a vector sra node. 15525 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 15526 // there is no psrai.b 15527 if (Mask.getOpcode() != X86ISD::VSRAI) 15528 return SDValue(); 15529 15530 // Check that the SRA is all signbits. 15531 SDValue SraC = Mask.getOperand(1); 15532 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 15533 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 15534 if ((SraAmt + 1) != EltBits) 15535 return SDValue(); 15536 15537 DebugLoc DL = N->getDebugLoc(); 15538 15539 // Now we know we at least have a plendvb with the mask val. See if 15540 // we can form a psignb/w/d. 15541 // psign = x.type == y.type == mask.type && y = sub(0, x); 15542 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 15543 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 15544 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { 15545 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) && 15546 "Unsupported VT for PSIGN"); 15547 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0)); 15548 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 15549 } 15550 // PBLENDVB only available on SSE 4.1 15551 if (!Subtarget->hasSSE41()) 15552 return SDValue(); 15553 15554 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 15555 15556 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 15557 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 15558 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 15559 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 15560 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 15561 } 15562 } 15563 15564 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 15565 return SDValue(); 15566 15567 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 15568 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 15569 std::swap(N0, N1); 15570 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 15571 return SDValue(); 15572 if (!N0.hasOneUse() || !N1.hasOneUse()) 15573 return SDValue(); 15574 15575 SDValue ShAmt0 = N0.getOperand(1); 15576 if (ShAmt0.getValueType() != MVT::i8) 15577 return SDValue(); 15578 SDValue ShAmt1 = N1.getOperand(1); 15579 if (ShAmt1.getValueType() != MVT::i8) 15580 return SDValue(); 15581 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 15582 ShAmt0 = ShAmt0.getOperand(0); 15583 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 15584 ShAmt1 = ShAmt1.getOperand(0); 15585 15586 DebugLoc DL = N->getDebugLoc(); 15587 unsigned Opc = X86ISD::SHLD; 15588 SDValue Op0 = N0.getOperand(0); 15589 SDValue Op1 = N1.getOperand(0); 15590 if (ShAmt0.getOpcode() == ISD::SUB) { 15591 Opc = X86ISD::SHRD; 15592 std::swap(Op0, Op1); 15593 std::swap(ShAmt0, ShAmt1); 15594 } 15595 15596 unsigned Bits = VT.getSizeInBits(); 15597 if (ShAmt1.getOpcode() == ISD::SUB) { 15598 SDValue Sum = ShAmt1.getOperand(0); 15599 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 15600 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 15601 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 15602 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 15603 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 15604 return DAG.getNode(Opc, DL, VT, 15605 Op0, Op1, 15606 DAG.getNode(ISD::TRUNCATE, DL, 15607 MVT::i8, ShAmt0)); 15608 } 15609 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 15610 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 15611 if (ShAmt0C && 15612 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 15613 return DAG.getNode(Opc, DL, VT, 15614 N0.getOperand(0), N1.getOperand(0), 15615 DAG.getNode(ISD::TRUNCATE, DL, 15616 MVT::i8, ShAmt0)); 15617 } 15618 15619 return SDValue(); 15620} 15621 15622// Generate NEG and CMOV for integer abs. 15623static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 15624 EVT VT = N->getValueType(0); 15625 15626 // Since X86 does not have CMOV for 8-bit integer, we don't convert 15627 // 8-bit integer abs to NEG and CMOV. 15628 if (VT.isInteger() && VT.getSizeInBits() == 8) 15629 return SDValue(); 15630 15631 SDValue N0 = N->getOperand(0); 15632 SDValue N1 = N->getOperand(1); 15633 DebugLoc DL = N->getDebugLoc(); 15634 15635 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 15636 // and change it to SUB and CMOV. 15637 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 15638 N0.getOpcode() == ISD::ADD && 15639 N0.getOperand(1) == N1 && 15640 N1.getOpcode() == ISD::SRA && 15641 N1.getOperand(0) == N0.getOperand(0)) 15642 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 15643 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) { 15644 // Generate SUB & CMOV. 15645 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), 15646 DAG.getConstant(0, VT), N0.getOperand(0)); 15647 15648 SDValue Ops[] = { N0.getOperand(0), Neg, 15649 DAG.getConstant(X86::COND_GE, MVT::i8), 15650 SDValue(Neg.getNode(), 1) }; 15651 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), 15652 Ops, array_lengthof(Ops)); 15653 } 15654 return SDValue(); 15655} 15656 15657// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 15658static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 15659 TargetLowering::DAGCombinerInfo &DCI, 15660 const X86Subtarget *Subtarget) { 15661 if (DCI.isBeforeLegalizeOps()) 15662 return SDValue(); 15663 15664 if (Subtarget->hasCMov()) { 15665 SDValue RV = performIntegerAbsCombine(N, DAG); 15666 if (RV.getNode()) 15667 return RV; 15668 } 15669 15670 // Try forming BMI if it is available. 15671 if (!Subtarget->hasBMI()) 15672 return SDValue(); 15673 15674 EVT VT = N->getValueType(0); 15675 15676 if (VT != MVT::i32 && VT != MVT::i64) 15677 return SDValue(); 15678 15679 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 15680 15681 // Create BLSMSK instructions by finding X ^ (X-1) 15682 SDValue N0 = N->getOperand(0); 15683 SDValue N1 = N->getOperand(1); 15684 DebugLoc DL = N->getDebugLoc(); 15685 15686 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 15687 isAllOnes(N0.getOperand(1))) 15688 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 15689 15690 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 15691 isAllOnes(N1.getOperand(1))) 15692 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 15693 15694 return SDValue(); 15695} 15696 15697/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 15698static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 15699 TargetLowering::DAGCombinerInfo &DCI, 15700 const X86Subtarget *Subtarget) { 15701 LoadSDNode *Ld = cast<LoadSDNode>(N); 15702 EVT RegVT = Ld->getValueType(0); 15703 EVT MemVT = Ld->getMemoryVT(); 15704 DebugLoc dl = Ld->getDebugLoc(); 15705 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15706 15707 ISD::LoadExtType Ext = Ld->getExtensionType(); 15708 15709 // If this is a vector EXT Load then attempt to optimize it using a 15710 // shuffle. We need SSSE3 shuffles. 15711 // TODO: It is possible to support ZExt by zeroing the undef values 15712 // during the shuffle phase or after the shuffle. 15713 if (RegVT.isVector() && RegVT.isInteger() && 15714 Ext == ISD::EXTLOAD && Subtarget->hasSSSE3()) { 15715 assert(MemVT != RegVT && "Cannot extend to the same type"); 15716 assert(MemVT.isVector() && "Must load a vector from memory"); 15717 15718 unsigned NumElems = RegVT.getVectorNumElements(); 15719 unsigned RegSz = RegVT.getSizeInBits(); 15720 unsigned MemSz = MemVT.getSizeInBits(); 15721 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 15722 15723 // All sizes must be a power of two. 15724 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) 15725 return SDValue(); 15726 15727 // Attempt to load the original value using scalar loads. 15728 // Find the largest scalar type that divides the total loaded size. 15729 MVT SclrLoadTy = MVT::i8; 15730 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 15731 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 15732 MVT Tp = (MVT::SimpleValueType)tp; 15733 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { 15734 SclrLoadTy = Tp; 15735 } 15736 } 15737 15738 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 15739 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && 15740 (64 <= MemSz)) 15741 SclrLoadTy = MVT::f64; 15742 15743 // Calculate the number of scalar loads that we need to perform 15744 // in order to load our vector from memory. 15745 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); 15746 15747 // Represent our vector as a sequence of elements which are the 15748 // largest scalar that we can load. 15749 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 15750 RegSz/SclrLoadTy.getSizeInBits()); 15751 15752 // Represent the data using the same element type that is stored in 15753 // memory. In practice, we ''widen'' MemVT. 15754 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 15755 RegSz/MemVT.getScalarType().getSizeInBits()); 15756 15757 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && 15758 "Invalid vector type"); 15759 15760 // We can't shuffle using an illegal type. 15761 if (!TLI.isTypeLegal(WideVecVT)) 15762 return SDValue(); 15763 15764 SmallVector<SDValue, 8> Chains; 15765 SDValue Ptr = Ld->getBasePtr(); 15766 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits()/8, 15767 TLI.getPointerTy()); 15768 SDValue Res = DAG.getUNDEF(LoadUnitVecVT); 15769 15770 for (unsigned i = 0; i < NumLoads; ++i) { 15771 // Perform a single load. 15772 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 15773 Ptr, Ld->getPointerInfo(), 15774 Ld->isVolatile(), Ld->isNonTemporal(), 15775 Ld->isInvariant(), Ld->getAlignment()); 15776 Chains.push_back(ScalarLoad.getValue(1)); 15777 // Create the first element type using SCALAR_TO_VECTOR in order to avoid 15778 // another round of DAGCombining. 15779 if (i == 0) 15780 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); 15781 else 15782 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, 15783 ScalarLoad, DAG.getIntPtrConstant(i)); 15784 15785 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 15786 } 15787 15788 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 15789 Chains.size()); 15790 15791 // Bitcast the loaded value to a vector of the original element type, in 15792 // the size of the target vector type. 15793 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res); 15794 unsigned SizeRatio = RegSz/MemSz; 15795 15796 // Redistribute the loaded elements into the different locations. 15797 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 15798 for (unsigned i = 0; i != NumElems; ++i) 15799 ShuffleVec[i*SizeRatio] = i; 15800 15801 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 15802 DAG.getUNDEF(WideVecVT), 15803 &ShuffleVec[0]); 15804 15805 // Bitcast to the requested type. 15806 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 15807 // Replace the original load with the new sequence 15808 // and return the new chain. 15809 return DCI.CombineTo(N, Shuff, TF, true); 15810 } 15811 15812 return SDValue(); 15813} 15814 15815/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 15816static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 15817 const X86Subtarget *Subtarget) { 15818 StoreSDNode *St = cast<StoreSDNode>(N); 15819 EVT VT = St->getValue().getValueType(); 15820 EVT StVT = St->getMemoryVT(); 15821 DebugLoc dl = St->getDebugLoc(); 15822 SDValue StoredVal = St->getOperand(1); 15823 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15824 15825 // If we are saving a concatenation of two XMM registers, perform two stores. 15826 // On Sandy Bridge, 256-bit memory operations are executed by two 15827 // 128-bit ports. However, on Haswell it is better to issue a single 256-bit 15828 // memory operation. 15829 if (VT.is256BitVector() && !Subtarget->hasAVX2() && 15830 StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && 15831 StoredVal.getNumOperands() == 2) { 15832 SDValue Value0 = StoredVal.getOperand(0); 15833 SDValue Value1 = StoredVal.getOperand(1); 15834 15835 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 15836 SDValue Ptr0 = St->getBasePtr(); 15837 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 15838 15839 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 15840 St->getPointerInfo(), St->isVolatile(), 15841 St->isNonTemporal(), St->getAlignment()); 15842 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 15843 St->getPointerInfo(), St->isVolatile(), 15844 St->isNonTemporal(), St->getAlignment()); 15845 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 15846 } 15847 15848 // Optimize trunc store (of multiple scalars) to shuffle and store. 15849 // First, pack all of the elements in one place. Next, store to memory 15850 // in fewer chunks. 15851 if (St->isTruncatingStore() && VT.isVector()) { 15852 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15853 unsigned NumElems = VT.getVectorNumElements(); 15854 assert(StVT != VT && "Cannot truncate to the same type"); 15855 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 15856 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 15857 15858 // From, To sizes and ElemCount must be pow of two 15859 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 15860 // We are going to use the original vector elt for storing. 15861 // Accumulated smaller vector elements must be a multiple of the store size. 15862 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 15863 15864 unsigned SizeRatio = FromSz / ToSz; 15865 15866 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 15867 15868 // Create a type on which we perform the shuffle 15869 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 15870 StVT.getScalarType(), NumElems*SizeRatio); 15871 15872 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 15873 15874 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 15875 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 15876 for (unsigned i = 0; i != NumElems; ++i) 15877 ShuffleVec[i] = i * SizeRatio; 15878 15879 // Can't shuffle using an illegal type. 15880 if (!TLI.isTypeLegal(WideVecVT)) 15881 return SDValue(); 15882 15883 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 15884 DAG.getUNDEF(WideVecVT), 15885 &ShuffleVec[0]); 15886 // At this point all of the data is stored at the bottom of the 15887 // register. We now need to save it to mem. 15888 15889 // Find the largest store unit 15890 MVT StoreType = MVT::i8; 15891 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 15892 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 15893 MVT Tp = (MVT::SimpleValueType)tp; 15894 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) 15895 StoreType = Tp; 15896 } 15897 15898 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. 15899 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && 15900 (64 <= NumElems * ToSz)) 15901 StoreType = MVT::f64; 15902 15903 // Bitcast the original vector into a vector of store-size units 15904 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 15905 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); 15906 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 15907 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 15908 SmallVector<SDValue, 8> Chains; 15909 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 15910 TLI.getPointerTy()); 15911 SDValue Ptr = St->getBasePtr(); 15912 15913 // Perform one or more big stores into memory. 15914 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { 15915 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 15916 StoreType, ShuffWide, 15917 DAG.getIntPtrConstant(i)); 15918 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 15919 St->getPointerInfo(), St->isVolatile(), 15920 St->isNonTemporal(), St->getAlignment()); 15921 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 15922 Chains.push_back(Ch); 15923 } 15924 15925 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 15926 Chains.size()); 15927 } 15928 15929 15930 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 15931 // the FP state in cases where an emms may be missing. 15932 // A preferable solution to the general problem is to figure out the right 15933 // places to insert EMMS. This qualifies as a quick hack. 15934 15935 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 15936 if (VT.getSizeInBits() != 64) 15937 return SDValue(); 15938 15939 const Function *F = DAG.getMachineFunction().getFunction(); 15940 bool NoImplicitFloatOps = F->getFnAttributes(). 15941 hasAttribute(Attributes::NoImplicitFloat); 15942 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 15943 && Subtarget->hasSSE2(); 15944 if ((VT.isVector() || 15945 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 15946 isa<LoadSDNode>(St->getValue()) && 15947 !cast<LoadSDNode>(St->getValue())->isVolatile() && 15948 St->getChain().hasOneUse() && !St->isVolatile()) { 15949 SDNode* LdVal = St->getValue().getNode(); 15950 LoadSDNode *Ld = 0; 15951 int TokenFactorIndex = -1; 15952 SmallVector<SDValue, 8> Ops; 15953 SDNode* ChainVal = St->getChain().getNode(); 15954 // Must be a store of a load. We currently handle two cases: the load 15955 // is a direct child, and it's under an intervening TokenFactor. It is 15956 // possible to dig deeper under nested TokenFactors. 15957 if (ChainVal == LdVal) 15958 Ld = cast<LoadSDNode>(St->getChain()); 15959 else if (St->getValue().hasOneUse() && 15960 ChainVal->getOpcode() == ISD::TokenFactor) { 15961 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) { 15962 if (ChainVal->getOperand(i).getNode() == LdVal) { 15963 TokenFactorIndex = i; 15964 Ld = cast<LoadSDNode>(St->getValue()); 15965 } else 15966 Ops.push_back(ChainVal->getOperand(i)); 15967 } 15968 } 15969 15970 if (!Ld || !ISD::isNormalLoad(Ld)) 15971 return SDValue(); 15972 15973 // If this is not the MMX case, i.e. we are just turning i64 load/store 15974 // into f64 load/store, avoid the transformation if there are multiple 15975 // uses of the loaded value. 15976 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 15977 return SDValue(); 15978 15979 DebugLoc LdDL = Ld->getDebugLoc(); 15980 DebugLoc StDL = N->getDebugLoc(); 15981 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 15982 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 15983 // pair instead. 15984 if (Subtarget->is64Bit() || F64IsLegal) { 15985 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 15986 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 15987 Ld->getPointerInfo(), Ld->isVolatile(), 15988 Ld->isNonTemporal(), Ld->isInvariant(), 15989 Ld->getAlignment()); 15990 SDValue NewChain = NewLd.getValue(1); 15991 if (TokenFactorIndex != -1) { 15992 Ops.push_back(NewChain); 15993 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 15994 Ops.size()); 15995 } 15996 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 15997 St->getPointerInfo(), 15998 St->isVolatile(), St->isNonTemporal(), 15999 St->getAlignment()); 16000 } 16001 16002 // Otherwise, lower to two pairs of 32-bit loads / stores. 16003 SDValue LoAddr = Ld->getBasePtr(); 16004 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 16005 DAG.getConstant(4, MVT::i32)); 16006 16007 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 16008 Ld->getPointerInfo(), 16009 Ld->isVolatile(), Ld->isNonTemporal(), 16010 Ld->isInvariant(), Ld->getAlignment()); 16011 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 16012 Ld->getPointerInfo().getWithOffset(4), 16013 Ld->isVolatile(), Ld->isNonTemporal(), 16014 Ld->isInvariant(), 16015 MinAlign(Ld->getAlignment(), 4)); 16016 16017 SDValue NewChain = LoLd.getValue(1); 16018 if (TokenFactorIndex != -1) { 16019 Ops.push_back(LoLd); 16020 Ops.push_back(HiLd); 16021 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 16022 Ops.size()); 16023 } 16024 16025 LoAddr = St->getBasePtr(); 16026 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 16027 DAG.getConstant(4, MVT::i32)); 16028 16029 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 16030 St->getPointerInfo(), 16031 St->isVolatile(), St->isNonTemporal(), 16032 St->getAlignment()); 16033 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 16034 St->getPointerInfo().getWithOffset(4), 16035 St->isVolatile(), 16036 St->isNonTemporal(), 16037 MinAlign(St->getAlignment(), 4)); 16038 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 16039 } 16040 return SDValue(); 16041} 16042 16043/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 16044/// and return the operands for the horizontal operation in LHS and RHS. A 16045/// horizontal operation performs the binary operation on successive elements 16046/// of its first operand, then on successive elements of its second operand, 16047/// returning the resulting values in a vector. For example, if 16048/// A = < float a0, float a1, float a2, float a3 > 16049/// and 16050/// B = < float b0, float b1, float b2, float b3 > 16051/// then the result of doing a horizontal operation on A and B is 16052/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 16053/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 16054/// A horizontal-op B, for some already available A and B, and if so then LHS is 16055/// set to A, RHS to B, and the routine returns 'true'. 16056/// Note that the binary operation should have the property that if one of the 16057/// operands is UNDEF then the result is UNDEF. 16058static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 16059 // Look for the following pattern: if 16060 // A = < float a0, float a1, float a2, float a3 > 16061 // B = < float b0, float b1, float b2, float b3 > 16062 // and 16063 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 16064 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 16065 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 16066 // which is A horizontal-op B. 16067 16068 // At least one of the operands should be a vector shuffle. 16069 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 16070 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 16071 return false; 16072 16073 EVT VT = LHS.getValueType(); 16074 16075 assert((VT.is128BitVector() || VT.is256BitVector()) && 16076 "Unsupported vector type for horizontal add/sub"); 16077 16078 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 16079 // operate independently on 128-bit lanes. 16080 unsigned NumElts = VT.getVectorNumElements(); 16081 unsigned NumLanes = VT.getSizeInBits()/128; 16082 unsigned NumLaneElts = NumElts / NumLanes; 16083 assert((NumLaneElts % 2 == 0) && 16084 "Vector type should have an even number of elements in each lane"); 16085 unsigned HalfLaneElts = NumLaneElts/2; 16086 16087 // View LHS in the form 16088 // LHS = VECTOR_SHUFFLE A, B, LMask 16089 // If LHS is not a shuffle then pretend it is the shuffle 16090 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 16091 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 16092 // type VT. 16093 SDValue A, B; 16094 SmallVector<int, 16> LMask(NumElts); 16095 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 16096 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 16097 A = LHS.getOperand(0); 16098 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 16099 B = LHS.getOperand(1); 16100 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); 16101 std::copy(Mask.begin(), Mask.end(), LMask.begin()); 16102 } else { 16103 if (LHS.getOpcode() != ISD::UNDEF) 16104 A = LHS; 16105 for (unsigned i = 0; i != NumElts; ++i) 16106 LMask[i] = i; 16107 } 16108 16109 // Likewise, view RHS in the form 16110 // RHS = VECTOR_SHUFFLE C, D, RMask 16111 SDValue C, D; 16112 SmallVector<int, 16> RMask(NumElts); 16113 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 16114 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 16115 C = RHS.getOperand(0); 16116 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 16117 D = RHS.getOperand(1); 16118 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); 16119 std::copy(Mask.begin(), Mask.end(), RMask.begin()); 16120 } else { 16121 if (RHS.getOpcode() != ISD::UNDEF) 16122 C = RHS; 16123 for (unsigned i = 0; i != NumElts; ++i) 16124 RMask[i] = i; 16125 } 16126 16127 // Check that the shuffles are both shuffling the same vectors. 16128 if (!(A == C && B == D) && !(A == D && B == C)) 16129 return false; 16130 16131 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 16132 if (!A.getNode() && !B.getNode()) 16133 return false; 16134 16135 // If A and B occur in reverse order in RHS, then "swap" them (which means 16136 // rewriting the mask). 16137 if (A != C) 16138 CommuteVectorShuffleMask(RMask, NumElts); 16139 16140 // At this point LHS and RHS are equivalent to 16141 // LHS = VECTOR_SHUFFLE A, B, LMask 16142 // RHS = VECTOR_SHUFFLE A, B, RMask 16143 // Check that the masks correspond to performing a horizontal operation. 16144 for (unsigned i = 0; i != NumElts; ++i) { 16145 int LIdx = LMask[i], RIdx = RMask[i]; 16146 16147 // Ignore any UNDEF components. 16148 if (LIdx < 0 || RIdx < 0 || 16149 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 16150 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 16151 continue; 16152 16153 // Check that successive elements are being operated on. If not, this is 16154 // not a horizontal operation. 16155 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 16156 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 16157 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 16158 if (!(LIdx == Index && RIdx == Index + 1) && 16159 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 16160 return false; 16161 } 16162 16163 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 16164 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 16165 return true; 16166} 16167 16168/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 16169static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 16170 const X86Subtarget *Subtarget) { 16171 EVT VT = N->getValueType(0); 16172 SDValue LHS = N->getOperand(0); 16173 SDValue RHS = N->getOperand(1); 16174 16175 // Try to synthesize horizontal adds from adds of shuffles. 16176 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 16177 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 16178 isHorizontalBinOp(LHS, RHS, true)) 16179 return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); 16180 return SDValue(); 16181} 16182 16183/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 16184static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 16185 const X86Subtarget *Subtarget) { 16186 EVT VT = N->getValueType(0); 16187 SDValue LHS = N->getOperand(0); 16188 SDValue RHS = N->getOperand(1); 16189 16190 // Try to synthesize horizontal subs from subs of shuffles. 16191 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 16192 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 16193 isHorizontalBinOp(LHS, RHS, false)) 16194 return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); 16195 return SDValue(); 16196} 16197 16198/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 16199/// X86ISD::FXOR nodes. 16200static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 16201 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 16202 // F[X]OR(0.0, x) -> x 16203 // F[X]OR(x, 0.0) -> x 16204 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 16205 if (C->getValueAPF().isPosZero()) 16206 return N->getOperand(1); 16207 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 16208 if (C->getValueAPF().isPosZero()) 16209 return N->getOperand(0); 16210 return SDValue(); 16211} 16212 16213/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and 16214/// X86ISD::FMAX nodes. 16215static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { 16216 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX); 16217 16218 // Only perform optimizations if UnsafeMath is used. 16219 if (!DAG.getTarget().Options.UnsafeFPMath) 16220 return SDValue(); 16221 16222 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes 16223 // into FMINC and FMAXC, which are Commutative operations. 16224 unsigned NewOp = 0; 16225 switch (N->getOpcode()) { 16226 default: llvm_unreachable("unknown opcode"); 16227 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; 16228 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; 16229 } 16230 16231 return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0), 16232 N->getOperand(0), N->getOperand(1)); 16233} 16234 16235 16236/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 16237static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 16238 // FAND(0.0, x) -> 0.0 16239 // FAND(x, 0.0) -> 0.0 16240 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 16241 if (C->getValueAPF().isPosZero()) 16242 return N->getOperand(0); 16243 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 16244 if (C->getValueAPF().isPosZero()) 16245 return N->getOperand(1); 16246 return SDValue(); 16247} 16248 16249static SDValue PerformBTCombine(SDNode *N, 16250 SelectionDAG &DAG, 16251 TargetLowering::DAGCombinerInfo &DCI) { 16252 // BT ignores high bits in the bit index operand. 16253 SDValue Op1 = N->getOperand(1); 16254 if (Op1.hasOneUse()) { 16255 unsigned BitWidth = Op1.getValueSizeInBits(); 16256 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 16257 APInt KnownZero, KnownOne; 16258 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 16259 !DCI.isBeforeLegalizeOps()); 16260 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16261 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 16262 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 16263 DCI.CommitTargetLoweringOpt(TLO); 16264 } 16265 return SDValue(); 16266} 16267 16268static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 16269 SDValue Op = N->getOperand(0); 16270 if (Op.getOpcode() == ISD::BITCAST) 16271 Op = Op.getOperand(0); 16272 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 16273 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 16274 VT.getVectorElementType().getSizeInBits() == 16275 OpVT.getVectorElementType().getSizeInBits()) { 16276 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 16277 } 16278 return SDValue(); 16279} 16280 16281static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG, 16282 TargetLowering::DAGCombinerInfo &DCI, 16283 const X86Subtarget *Subtarget) { 16284 if (!DCI.isBeforeLegalizeOps()) 16285 return SDValue(); 16286 16287 if (!Subtarget->hasAVX()) 16288 return SDValue(); 16289 16290 EVT VT = N->getValueType(0); 16291 SDValue Op = N->getOperand(0); 16292 EVT OpVT = Op.getValueType(); 16293 DebugLoc dl = N->getDebugLoc(); 16294 16295 if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) || 16296 (VT == MVT::v8i32 && OpVT == MVT::v8i16)) { 16297 16298 if (Subtarget->hasAVX2()) 16299 return DAG.getNode(X86ISD::VSEXT_MOVL, dl, VT, Op); 16300 16301 // Optimize vectors in AVX mode 16302 // Sign extend v8i16 to v8i32 and 16303 // v4i32 to v4i64 16304 // 16305 // Divide input vector into two parts 16306 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1} 16307 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 16308 // concat the vectors to original VT 16309 16310 unsigned NumElems = OpVT.getVectorNumElements(); 16311 SDValue Undef = DAG.getUNDEF(OpVT); 16312 16313 SmallVector<int,8> ShufMask1(NumElems, -1); 16314 for (unsigned i = 0; i != NumElems/2; ++i) 16315 ShufMask1[i] = i; 16316 16317 SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask1[0]); 16318 16319 SmallVector<int,8> ShufMask2(NumElems, -1); 16320 for (unsigned i = 0; i != NumElems/2; ++i) 16321 ShufMask2[i] = i + NumElems/2; 16322 16323 SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, Undef, &ShufMask2[0]); 16324 16325 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), 16326 VT.getVectorNumElements()/2); 16327 16328 OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo); 16329 OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi); 16330 16331 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 16332 } 16333 return SDValue(); 16334} 16335 16336static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, 16337 const X86Subtarget* Subtarget) { 16338 DebugLoc dl = N->getDebugLoc(); 16339 EVT VT = N->getValueType(0); 16340 16341 // Let legalize expand this if it isn't a legal type yet. 16342 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 16343 return SDValue(); 16344 16345 EVT ScalarVT = VT.getScalarType(); 16346 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || 16347 (!Subtarget->hasFMA() && !Subtarget->hasFMA4())) 16348 return SDValue(); 16349 16350 SDValue A = N->getOperand(0); 16351 SDValue B = N->getOperand(1); 16352 SDValue C = N->getOperand(2); 16353 16354 bool NegA = (A.getOpcode() == ISD::FNEG); 16355 bool NegB = (B.getOpcode() == ISD::FNEG); 16356 bool NegC = (C.getOpcode() == ISD::FNEG); 16357 16358 // Negative multiplication when NegA xor NegB 16359 bool NegMul = (NegA != NegB); 16360 if (NegA) 16361 A = A.getOperand(0); 16362 if (NegB) 16363 B = B.getOperand(0); 16364 if (NegC) 16365 C = C.getOperand(0); 16366 16367 unsigned Opcode; 16368 if (!NegMul) 16369 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB; 16370 else 16371 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB; 16372 16373 return DAG.getNode(Opcode, dl, VT, A, B, C); 16374} 16375 16376static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG, 16377 TargetLowering::DAGCombinerInfo &DCI, 16378 const X86Subtarget *Subtarget) { 16379 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 16380 // (and (i32 x86isd::setcc_carry), 1) 16381 // This eliminates the zext. This transformation is necessary because 16382 // ISD::SETCC is always legalized to i8. 16383 DebugLoc dl = N->getDebugLoc(); 16384 SDValue N0 = N->getOperand(0); 16385 EVT VT = N->getValueType(0); 16386 EVT OpVT = N0.getValueType(); 16387 16388 if (N0.getOpcode() == ISD::AND && 16389 N0.hasOneUse() && 16390 N0.getOperand(0).hasOneUse()) { 16391 SDValue N00 = N0.getOperand(0); 16392 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 16393 return SDValue(); 16394 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 16395 if (!C || C->getZExtValue() != 1) 16396 return SDValue(); 16397 return DAG.getNode(ISD::AND, dl, VT, 16398 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 16399 N00.getOperand(0), N00.getOperand(1)), 16400 DAG.getConstant(1, VT)); 16401 } 16402 16403 // Optimize vectors in AVX mode: 16404 // 16405 // v8i16 -> v8i32 16406 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32. 16407 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. 16408 // Concat upper and lower parts. 16409 // 16410 // v4i32 -> v4i64 16411 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64. 16412 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. 16413 // Concat upper and lower parts. 16414 // 16415 if (!DCI.isBeforeLegalizeOps()) 16416 return SDValue(); 16417 16418 if (!Subtarget->hasAVX()) 16419 return SDValue(); 16420 16421 if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) || 16422 ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) { 16423 16424 if (Subtarget->hasAVX2()) 16425 return DAG.getNode(X86ISD::VZEXT_MOVL, dl, VT, N0); 16426 16427 SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl); 16428 SDValue OpLo = getUnpackl(DAG, dl, OpVT, N0, ZeroVec); 16429 SDValue OpHi = getUnpackh(DAG, dl, OpVT, N0, ZeroVec); 16430 16431 EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 16432 VT.getVectorNumElements()/2); 16433 16434 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo); 16435 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi); 16436 16437 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); 16438 } 16439 16440 return SDValue(); 16441} 16442 16443// Optimize x == -y --> x+y == 0 16444// x != -y --> x+y != 0 16445static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { 16446 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 16447 SDValue LHS = N->getOperand(0); 16448 SDValue RHS = N->getOperand(1); 16449 16450 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB) 16451 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0))) 16452 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) { 16453 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 16454 LHS.getValueType(), RHS, LHS.getOperand(1)); 16455 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 16456 addV, DAG.getConstant(0, addV.getValueType()), CC); 16457 } 16458 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB) 16459 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0))) 16460 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) { 16461 SDValue addV = DAG.getNode(ISD::ADD, N->getDebugLoc(), 16462 RHS.getValueType(), LHS, RHS.getOperand(1)); 16463 return DAG.getSetCC(N->getDebugLoc(), N->getValueType(0), 16464 addV, DAG.getConstant(0, addV.getValueType()), CC); 16465 } 16466 return SDValue(); 16467} 16468 16469// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 16470static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, 16471 TargetLowering::DAGCombinerInfo &DCI, 16472 const X86Subtarget *Subtarget) { 16473 DebugLoc DL = N->getDebugLoc(); 16474 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); 16475 SDValue EFLAGS = N->getOperand(1); 16476 16477 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 16478 // a zext and produces an all-ones bit which is more useful than 0/1 in some 16479 // cases. 16480 if (CC == X86::COND_B) 16481 return DAG.getNode(ISD::AND, DL, MVT::i8, 16482 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 16483 DAG.getConstant(CC, MVT::i8), EFLAGS), 16484 DAG.getConstant(1, MVT::i8)); 16485 16486 SDValue Flags; 16487 16488 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 16489 if (Flags.getNode()) { 16490 SDValue Cond = DAG.getConstant(CC, MVT::i8); 16491 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); 16492 } 16493 16494 return SDValue(); 16495} 16496 16497// Optimize branch condition evaluation. 16498// 16499static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, 16500 TargetLowering::DAGCombinerInfo &DCI, 16501 const X86Subtarget *Subtarget) { 16502 DebugLoc DL = N->getDebugLoc(); 16503 SDValue Chain = N->getOperand(0); 16504 SDValue Dest = N->getOperand(1); 16505 SDValue EFLAGS = N->getOperand(3); 16506 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); 16507 16508 SDValue Flags; 16509 16510 Flags = checkBoolTestSetCCCombine(EFLAGS, CC); 16511 if (Flags.getNode()) { 16512 SDValue Cond = DAG.getConstant(CC, MVT::i8); 16513 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, 16514 Flags); 16515 } 16516 16517 return SDValue(); 16518} 16519 16520static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 16521 const X86TargetLowering *XTLI) { 16522 SDValue Op0 = N->getOperand(0); 16523 EVT InVT = Op0->getValueType(0); 16524 16525 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32)) 16526 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { 16527 DebugLoc dl = N->getDebugLoc(); 16528 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; 16529 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); 16530 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); 16531 } 16532 16533 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 16534 // a 32-bit target where SSE doesn't support i64->FP operations. 16535 if (Op0.getOpcode() == ISD::LOAD) { 16536 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 16537 EVT VT = Ld->getValueType(0); 16538 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 16539 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 16540 !XTLI->getSubtarget()->is64Bit() && 16541 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 16542 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 16543 Ld->getChain(), Op0, DAG); 16544 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 16545 return FILDChain; 16546 } 16547 } 16548 return SDValue(); 16549} 16550 16551// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 16552static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 16553 X86TargetLowering::DAGCombinerInfo &DCI) { 16554 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 16555 // the result is either zero or one (depending on the input carry bit). 16556 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 16557 if (X86::isZeroNode(N->getOperand(0)) && 16558 X86::isZeroNode(N->getOperand(1)) && 16559 // We don't have a good way to replace an EFLAGS use, so only do this when 16560 // dead right now. 16561 SDValue(N, 1).use_empty()) { 16562 DebugLoc DL = N->getDebugLoc(); 16563 EVT VT = N->getValueType(0); 16564 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 16565 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 16566 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 16567 DAG.getConstant(X86::COND_B,MVT::i8), 16568 N->getOperand(2)), 16569 DAG.getConstant(1, VT)); 16570 return DCI.CombineTo(N, Res1, CarryOut); 16571 } 16572 16573 return SDValue(); 16574} 16575 16576// fold (add Y, (sete X, 0)) -> adc 0, Y 16577// (add Y, (setne X, 0)) -> sbb -1, Y 16578// (sub (sete X, 0), Y) -> sbb 0, Y 16579// (sub (setne X, 0), Y) -> adc -1, Y 16580static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 16581 DebugLoc DL = N->getDebugLoc(); 16582 16583 // Look through ZExts. 16584 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 16585 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 16586 return SDValue(); 16587 16588 SDValue SetCC = Ext.getOperand(0); 16589 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 16590 return SDValue(); 16591 16592 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 16593 if (CC != X86::COND_E && CC != X86::COND_NE) 16594 return SDValue(); 16595 16596 SDValue Cmp = SetCC.getOperand(1); 16597 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 16598 !X86::isZeroNode(Cmp.getOperand(1)) || 16599 !Cmp.getOperand(0).getValueType().isInteger()) 16600 return SDValue(); 16601 16602 SDValue CmpOp0 = Cmp.getOperand(0); 16603 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 16604 DAG.getConstant(1, CmpOp0.getValueType())); 16605 16606 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 16607 if (CC == X86::COND_NE) 16608 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 16609 DL, OtherVal.getValueType(), OtherVal, 16610 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 16611 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 16612 DL, OtherVal.getValueType(), OtherVal, 16613 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 16614} 16615 16616/// PerformADDCombine - Do target-specific dag combines on integer adds. 16617static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 16618 const X86Subtarget *Subtarget) { 16619 EVT VT = N->getValueType(0); 16620 SDValue Op0 = N->getOperand(0); 16621 SDValue Op1 = N->getOperand(1); 16622 16623 // Try to synthesize horizontal adds from adds of shuffles. 16624 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 16625 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 16626 isHorizontalBinOp(Op0, Op1, true)) 16627 return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); 16628 16629 return OptimizeConditionalInDecrement(N, DAG); 16630} 16631 16632static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 16633 const X86Subtarget *Subtarget) { 16634 SDValue Op0 = N->getOperand(0); 16635 SDValue Op1 = N->getOperand(1); 16636 16637 // X86 can't encode an immediate LHS of a sub. See if we can push the 16638 // negation into a preceding instruction. 16639 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 16640 // If the RHS of the sub is a XOR with one use and a constant, invert the 16641 // immediate. Then add one to the LHS of the sub so we can turn 16642 // X-Y -> X+~Y+1, saving one register. 16643 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 16644 isa<ConstantSDNode>(Op1.getOperand(1))) { 16645 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 16646 EVT VT = Op0.getValueType(); 16647 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 16648 Op1.getOperand(0), 16649 DAG.getConstant(~XorC, VT)); 16650 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 16651 DAG.getConstant(C->getAPIntValue()+1, VT)); 16652 } 16653 } 16654 16655 // Try to synthesize horizontal adds from adds of shuffles. 16656 EVT VT = N->getValueType(0); 16657 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 16658 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 16659 isHorizontalBinOp(Op0, Op1, true)) 16660 return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); 16661 16662 return OptimizeConditionalInDecrement(N, DAG); 16663} 16664 16665/// performVZEXTCombine - Performs build vector combines 16666static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, 16667 TargetLowering::DAGCombinerInfo &DCI, 16668 const X86Subtarget *Subtarget) { 16669 // (vzext (bitcast (vzext (x)) -> (vzext x) 16670 SDValue In = N->getOperand(0); 16671 while (In.getOpcode() == ISD::BITCAST) 16672 In = In.getOperand(0); 16673 16674 if (In.getOpcode() != X86ISD::VZEXT) 16675 return SDValue(); 16676 16677 return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0), In.getOperand(0)); 16678} 16679 16680SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 16681 DAGCombinerInfo &DCI) const { 16682 SelectionDAG &DAG = DCI.DAG; 16683 switch (N->getOpcode()) { 16684 default: break; 16685 case ISD::EXTRACT_VECTOR_ELT: 16686 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); 16687 case ISD::VSELECT: 16688 case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); 16689 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget); 16690 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 16691 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 16692 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 16693 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 16694 case ISD::SHL: 16695 case ISD::SRA: 16696 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget); 16697 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 16698 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 16699 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 16700 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); 16701 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 16702 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 16703 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 16704 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 16705 case X86ISD::FXOR: 16706 case X86ISD::FOR: return PerformFORCombine(N, DAG); 16707 case X86ISD::FMIN: 16708 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); 16709 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 16710 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 16711 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 16712 case ISD::ANY_EXTEND: 16713 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); 16714 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); 16715 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget); 16716 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); 16717 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget); 16718 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); 16719 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget); 16720 case X86ISD::SHUFP: // Handle all target specific shuffles 16721 case X86ISD::PALIGN: 16722 case X86ISD::UNPCKH: 16723 case X86ISD::UNPCKL: 16724 case X86ISD::MOVHLPS: 16725 case X86ISD::MOVLHPS: 16726 case X86ISD::PSHUFD: 16727 case X86ISD::PSHUFHW: 16728 case X86ISD::PSHUFLW: 16729 case X86ISD::MOVSS: 16730 case X86ISD::MOVSD: 16731 case X86ISD::VPERMILP: 16732 case X86ISD::VPERM2X128: 16733 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 16734 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); 16735 } 16736 16737 return SDValue(); 16738} 16739 16740/// isTypeDesirableForOp - Return true if the target has native support for 16741/// the specified value type and it is 'desirable' to use the type for the 16742/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 16743/// instruction encodings are longer and some i16 instructions are slow. 16744bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 16745 if (!isTypeLegal(VT)) 16746 return false; 16747 if (VT != MVT::i16) 16748 return true; 16749 16750 switch (Opc) { 16751 default: 16752 return true; 16753 case ISD::LOAD: 16754 case ISD::SIGN_EXTEND: 16755 case ISD::ZERO_EXTEND: 16756 case ISD::ANY_EXTEND: 16757 case ISD::SHL: 16758 case ISD::SRL: 16759 case ISD::SUB: 16760 case ISD::ADD: 16761 case ISD::MUL: 16762 case ISD::AND: 16763 case ISD::OR: 16764 case ISD::XOR: 16765 return false; 16766 } 16767} 16768 16769/// IsDesirableToPromoteOp - This method query the target whether it is 16770/// beneficial for dag combiner to promote the specified node. If true, it 16771/// should return the desired promotion type by reference. 16772bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 16773 EVT VT = Op.getValueType(); 16774 if (VT != MVT::i16) 16775 return false; 16776 16777 bool Promote = false; 16778 bool Commute = false; 16779 switch (Op.getOpcode()) { 16780 default: break; 16781 case ISD::LOAD: { 16782 LoadSDNode *LD = cast<LoadSDNode>(Op); 16783 // If the non-extending load has a single use and it's not live out, then it 16784 // might be folded. 16785 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 16786 Op.hasOneUse()*/) { 16787 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 16788 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 16789 // The only case where we'd want to promote LOAD (rather then it being 16790 // promoted as an operand is when it's only use is liveout. 16791 if (UI->getOpcode() != ISD::CopyToReg) 16792 return false; 16793 } 16794 } 16795 Promote = true; 16796 break; 16797 } 16798 case ISD::SIGN_EXTEND: 16799 case ISD::ZERO_EXTEND: 16800 case ISD::ANY_EXTEND: 16801 Promote = true; 16802 break; 16803 case ISD::SHL: 16804 case ISD::SRL: { 16805 SDValue N0 = Op.getOperand(0); 16806 // Look out for (store (shl (load), x)). 16807 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 16808 return false; 16809 Promote = true; 16810 break; 16811 } 16812 case ISD::ADD: 16813 case ISD::MUL: 16814 case ISD::AND: 16815 case ISD::OR: 16816 case ISD::XOR: 16817 Commute = true; 16818 // fallthrough 16819 case ISD::SUB: { 16820 SDValue N0 = Op.getOperand(0); 16821 SDValue N1 = Op.getOperand(1); 16822 if (!Commute && MayFoldLoad(N1)) 16823 return false; 16824 // Avoid disabling potential load folding opportunities. 16825 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 16826 return false; 16827 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 16828 return false; 16829 Promote = true; 16830 } 16831 } 16832 16833 PVT = MVT::i32; 16834 return Promote; 16835} 16836 16837//===----------------------------------------------------------------------===// 16838// X86 Inline Assembly Support 16839//===----------------------------------------------------------------------===// 16840 16841namespace { 16842 // Helper to match a string separated by whitespace. 16843 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 16844 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 16845 16846 for (unsigned i = 0, e = args.size(); i != e; ++i) { 16847 StringRef piece(*args[i]); 16848 if (!s.startswith(piece)) // Check if the piece matches. 16849 return false; 16850 16851 s = s.substr(piece.size()); 16852 StringRef::size_type pos = s.find_first_not_of(" \t"); 16853 if (pos == 0) // We matched a prefix. 16854 return false; 16855 16856 s = s.substr(pos); 16857 } 16858 16859 return s.empty(); 16860 } 16861 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 16862} 16863 16864bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 16865 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 16866 16867 std::string AsmStr = IA->getAsmString(); 16868 16869 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 16870 if (!Ty || Ty->getBitWidth() % 16 != 0) 16871 return false; 16872 16873 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 16874 SmallVector<StringRef, 4> AsmPieces; 16875 SplitString(AsmStr, AsmPieces, ";\n"); 16876 16877 switch (AsmPieces.size()) { 16878 default: return false; 16879 case 1: 16880 // FIXME: this should verify that we are targeting a 486 or better. If not, 16881 // we will turn this bswap into something that will be lowered to logical 16882 // ops instead of emitting the bswap asm. For now, we don't support 486 or 16883 // lower so don't worry about this. 16884 // bswap $0 16885 if (matchAsm(AsmPieces[0], "bswap", "$0") || 16886 matchAsm(AsmPieces[0], "bswapl", "$0") || 16887 matchAsm(AsmPieces[0], "bswapq", "$0") || 16888 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 16889 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 16890 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 16891 // No need to check constraints, nothing other than the equivalent of 16892 // "=r,0" would be valid here. 16893 return IntrinsicLowering::LowerToByteSwap(CI); 16894 } 16895 16896 // rorw $$8, ${0:w} --> llvm.bswap.i16 16897 if (CI->getType()->isIntegerTy(16) && 16898 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 16899 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 16900 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 16901 AsmPieces.clear(); 16902 const std::string &ConstraintsStr = IA->getConstraintString(); 16903 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 16904 std::sort(AsmPieces.begin(), AsmPieces.end()); 16905 if (AsmPieces.size() == 4 && 16906 AsmPieces[0] == "~{cc}" && 16907 AsmPieces[1] == "~{dirflag}" && 16908 AsmPieces[2] == "~{flags}" && 16909 AsmPieces[3] == "~{fpsr}") 16910 return IntrinsicLowering::LowerToByteSwap(CI); 16911 } 16912 break; 16913 case 3: 16914 if (CI->getType()->isIntegerTy(32) && 16915 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 16916 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 16917 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 16918 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 16919 AsmPieces.clear(); 16920 const std::string &ConstraintsStr = IA->getConstraintString(); 16921 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 16922 std::sort(AsmPieces.begin(), AsmPieces.end()); 16923 if (AsmPieces.size() == 4 && 16924 AsmPieces[0] == "~{cc}" && 16925 AsmPieces[1] == "~{dirflag}" && 16926 AsmPieces[2] == "~{flags}" && 16927 AsmPieces[3] == "~{fpsr}") 16928 return IntrinsicLowering::LowerToByteSwap(CI); 16929 } 16930 16931 if (CI->getType()->isIntegerTy(64)) { 16932 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 16933 if (Constraints.size() >= 2 && 16934 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 16935 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 16936 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 16937 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 16938 matchAsm(AsmPieces[1], "bswap", "%edx") && 16939 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 16940 return IntrinsicLowering::LowerToByteSwap(CI); 16941 } 16942 } 16943 break; 16944 } 16945 return false; 16946} 16947 16948 16949 16950/// getConstraintType - Given a constraint letter, return the type of 16951/// constraint it is for this target. 16952X86TargetLowering::ConstraintType 16953X86TargetLowering::getConstraintType(const std::string &Constraint) const { 16954 if (Constraint.size() == 1) { 16955 switch (Constraint[0]) { 16956 case 'R': 16957 case 'q': 16958 case 'Q': 16959 case 'f': 16960 case 't': 16961 case 'u': 16962 case 'y': 16963 case 'x': 16964 case 'Y': 16965 case 'l': 16966 return C_RegisterClass; 16967 case 'a': 16968 case 'b': 16969 case 'c': 16970 case 'd': 16971 case 'S': 16972 case 'D': 16973 case 'A': 16974 return C_Register; 16975 case 'I': 16976 case 'J': 16977 case 'K': 16978 case 'L': 16979 case 'M': 16980 case 'N': 16981 case 'G': 16982 case 'C': 16983 case 'e': 16984 case 'Z': 16985 return C_Other; 16986 default: 16987 break; 16988 } 16989 } 16990 return TargetLowering::getConstraintType(Constraint); 16991} 16992 16993/// Examine constraint type and operand type and determine a weight value. 16994/// This object must already have been set up with the operand type 16995/// and the current alternative constraint selected. 16996TargetLowering::ConstraintWeight 16997 X86TargetLowering::getSingleConstraintMatchWeight( 16998 AsmOperandInfo &info, const char *constraint) const { 16999 ConstraintWeight weight = CW_Invalid; 17000 Value *CallOperandVal = info.CallOperandVal; 17001 // If we don't have a value, we can't do a match, 17002 // but allow it at the lowest weight. 17003 if (CallOperandVal == NULL) 17004 return CW_Default; 17005 Type *type = CallOperandVal->getType(); 17006 // Look at the constraint type. 17007 switch (*constraint) { 17008 default: 17009 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 17010 case 'R': 17011 case 'q': 17012 case 'Q': 17013 case 'a': 17014 case 'b': 17015 case 'c': 17016 case 'd': 17017 case 'S': 17018 case 'D': 17019 case 'A': 17020 if (CallOperandVal->getType()->isIntegerTy()) 17021 weight = CW_SpecificReg; 17022 break; 17023 case 'f': 17024 case 't': 17025 case 'u': 17026 if (type->isFloatingPointTy()) 17027 weight = CW_SpecificReg; 17028 break; 17029 case 'y': 17030 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 17031 weight = CW_SpecificReg; 17032 break; 17033 case 'x': 17034 case 'Y': 17035 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 17036 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasAVX())) 17037 weight = CW_Register; 17038 break; 17039 case 'I': 17040 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 17041 if (C->getZExtValue() <= 31) 17042 weight = CW_Constant; 17043 } 17044 break; 17045 case 'J': 17046 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17047 if (C->getZExtValue() <= 63) 17048 weight = CW_Constant; 17049 } 17050 break; 17051 case 'K': 17052 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17053 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 17054 weight = CW_Constant; 17055 } 17056 break; 17057 case 'L': 17058 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17059 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 17060 weight = CW_Constant; 17061 } 17062 break; 17063 case 'M': 17064 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17065 if (C->getZExtValue() <= 3) 17066 weight = CW_Constant; 17067 } 17068 break; 17069 case 'N': 17070 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17071 if (C->getZExtValue() <= 0xff) 17072 weight = CW_Constant; 17073 } 17074 break; 17075 case 'G': 17076 case 'C': 17077 if (dyn_cast<ConstantFP>(CallOperandVal)) { 17078 weight = CW_Constant; 17079 } 17080 break; 17081 case 'e': 17082 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17083 if ((C->getSExtValue() >= -0x80000000LL) && 17084 (C->getSExtValue() <= 0x7fffffffLL)) 17085 weight = CW_Constant; 17086 } 17087 break; 17088 case 'Z': 17089 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 17090 if (C->getZExtValue() <= 0xffffffff) 17091 weight = CW_Constant; 17092 } 17093 break; 17094 } 17095 return weight; 17096} 17097 17098/// LowerXConstraint - try to replace an X constraint, which matches anything, 17099/// with another that has more specific requirements based on the type of the 17100/// corresponding operand. 17101const char *X86TargetLowering:: 17102LowerXConstraint(EVT ConstraintVT) const { 17103 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 17104 // 'f' like normal targets. 17105 if (ConstraintVT.isFloatingPoint()) { 17106 if (Subtarget->hasSSE2()) 17107 return "Y"; 17108 if (Subtarget->hasSSE1()) 17109 return "x"; 17110 } 17111 17112 return TargetLowering::LowerXConstraint(ConstraintVT); 17113} 17114 17115/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 17116/// vector. If it is invalid, don't add anything to Ops. 17117void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 17118 std::string &Constraint, 17119 std::vector<SDValue>&Ops, 17120 SelectionDAG &DAG) const { 17121 SDValue Result(0, 0); 17122 17123 // Only support length 1 constraints for now. 17124 if (Constraint.length() > 1) return; 17125 17126 char ConstraintLetter = Constraint[0]; 17127 switch (ConstraintLetter) { 17128 default: break; 17129 case 'I': 17130 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17131 if (C->getZExtValue() <= 31) { 17132 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17133 break; 17134 } 17135 } 17136 return; 17137 case 'J': 17138 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17139 if (C->getZExtValue() <= 63) { 17140 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17141 break; 17142 } 17143 } 17144 return; 17145 case 'K': 17146 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17147 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 17148 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17149 break; 17150 } 17151 } 17152 return; 17153 case 'N': 17154 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17155 if (C->getZExtValue() <= 255) { 17156 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17157 break; 17158 } 17159 } 17160 return; 17161 case 'e': { 17162 // 32-bit signed value 17163 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17164 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 17165 C->getSExtValue())) { 17166 // Widen to 64 bits here to get it sign extended. 17167 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 17168 break; 17169 } 17170 // FIXME gcc accepts some relocatable values here too, but only in certain 17171 // memory models; it's complicated. 17172 } 17173 return; 17174 } 17175 case 'Z': { 17176 // 32-bit unsigned value 17177 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 17178 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 17179 C->getZExtValue())) { 17180 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 17181 break; 17182 } 17183 } 17184 // FIXME gcc accepts some relocatable values here too, but only in certain 17185 // memory models; it's complicated. 17186 return; 17187 } 17188 case 'i': { 17189 // Literal immediates are always ok. 17190 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 17191 // Widen to 64 bits here to get it sign extended. 17192 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 17193 break; 17194 } 17195 17196 // In any sort of PIC mode addresses need to be computed at runtime by 17197 // adding in a register or some sort of table lookup. These can't 17198 // be used as immediates. 17199 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 17200 return; 17201 17202 // If we are in non-pic codegen mode, we allow the address of a global (with 17203 // an optional displacement) to be used with 'i'. 17204 GlobalAddressSDNode *GA = 0; 17205 int64_t Offset = 0; 17206 17207 // Match either (GA), (GA+C), (GA+C1+C2), etc. 17208 while (1) { 17209 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 17210 Offset += GA->getOffset(); 17211 break; 17212 } else if (Op.getOpcode() == ISD::ADD) { 17213 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 17214 Offset += C->getZExtValue(); 17215 Op = Op.getOperand(0); 17216 continue; 17217 } 17218 } else if (Op.getOpcode() == ISD::SUB) { 17219 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 17220 Offset += -C->getZExtValue(); 17221 Op = Op.getOperand(0); 17222 continue; 17223 } 17224 } 17225 17226 // Otherwise, this isn't something we can handle, reject it. 17227 return; 17228 } 17229 17230 const GlobalValue *GV = GA->getGlobal(); 17231 // If we require an extra load to get this address, as in PIC mode, we 17232 // can't accept it. 17233 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 17234 getTargetMachine()))) 17235 return; 17236 17237 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 17238 GA->getValueType(0), Offset); 17239 break; 17240 } 17241 } 17242 17243 if (Result.getNode()) { 17244 Ops.push_back(Result); 17245 return; 17246 } 17247 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 17248} 17249 17250std::pair<unsigned, const TargetRegisterClass*> 17251X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 17252 EVT VT) const { 17253 // First, see if this is a constraint that directly corresponds to an LLVM 17254 // register class. 17255 if (Constraint.size() == 1) { 17256 // GCC Constraint Letters 17257 switch (Constraint[0]) { 17258 default: break; 17259 // TODO: Slight differences here in allocation order and leaving 17260 // RIP in the class. Do they matter any more here than they do 17261 // in the normal allocation? 17262 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 17263 if (Subtarget->is64Bit()) { 17264 if (VT == MVT::i32 || VT == MVT::f32) 17265 return std::make_pair(0U, &X86::GR32RegClass); 17266 if (VT == MVT::i16) 17267 return std::make_pair(0U, &X86::GR16RegClass); 17268 if (VT == MVT::i8 || VT == MVT::i1) 17269 return std::make_pair(0U, &X86::GR8RegClass); 17270 if (VT == MVT::i64 || VT == MVT::f64) 17271 return std::make_pair(0U, &X86::GR64RegClass); 17272 break; 17273 } 17274 // 32-bit fallthrough 17275 case 'Q': // Q_REGS 17276 if (VT == MVT::i32 || VT == MVT::f32) 17277 return std::make_pair(0U, &X86::GR32_ABCDRegClass); 17278 if (VT == MVT::i16) 17279 return std::make_pair(0U, &X86::GR16_ABCDRegClass); 17280 if (VT == MVT::i8 || VT == MVT::i1) 17281 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); 17282 if (VT == MVT::i64) 17283 return std::make_pair(0U, &X86::GR64_ABCDRegClass); 17284 break; 17285 case 'r': // GENERAL_REGS 17286 case 'l': // INDEX_REGS 17287 if (VT == MVT::i8 || VT == MVT::i1) 17288 return std::make_pair(0U, &X86::GR8RegClass); 17289 if (VT == MVT::i16) 17290 return std::make_pair(0U, &X86::GR16RegClass); 17291 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 17292 return std::make_pair(0U, &X86::GR32RegClass); 17293 return std::make_pair(0U, &X86::GR64RegClass); 17294 case 'R': // LEGACY_REGS 17295 if (VT == MVT::i8 || VT == MVT::i1) 17296 return std::make_pair(0U, &X86::GR8_NOREXRegClass); 17297 if (VT == MVT::i16) 17298 return std::make_pair(0U, &X86::GR16_NOREXRegClass); 17299 if (VT == MVT::i32 || !Subtarget->is64Bit()) 17300 return std::make_pair(0U, &X86::GR32_NOREXRegClass); 17301 return std::make_pair(0U, &X86::GR64_NOREXRegClass); 17302 case 'f': // FP Stack registers. 17303 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 17304 // value to the correct fpstack register class. 17305 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 17306 return std::make_pair(0U, &X86::RFP32RegClass); 17307 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 17308 return std::make_pair(0U, &X86::RFP64RegClass); 17309 return std::make_pair(0U, &X86::RFP80RegClass); 17310 case 'y': // MMX_REGS if MMX allowed. 17311 if (!Subtarget->hasMMX()) break; 17312 return std::make_pair(0U, &X86::VR64RegClass); 17313 case 'Y': // SSE_REGS if SSE2 allowed 17314 if (!Subtarget->hasSSE2()) break; 17315 // FALL THROUGH. 17316 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 17317 if (!Subtarget->hasSSE1()) break; 17318 17319 switch (VT.getSimpleVT().SimpleTy) { 17320 default: break; 17321 // Scalar SSE types. 17322 case MVT::f32: 17323 case MVT::i32: 17324 return std::make_pair(0U, &X86::FR32RegClass); 17325 case MVT::f64: 17326 case MVT::i64: 17327 return std::make_pair(0U, &X86::FR64RegClass); 17328 // Vector types. 17329 case MVT::v16i8: 17330 case MVT::v8i16: 17331 case MVT::v4i32: 17332 case MVT::v2i64: 17333 case MVT::v4f32: 17334 case MVT::v2f64: 17335 return std::make_pair(0U, &X86::VR128RegClass); 17336 // AVX types. 17337 case MVT::v32i8: 17338 case MVT::v16i16: 17339 case MVT::v8i32: 17340 case MVT::v4i64: 17341 case MVT::v8f32: 17342 case MVT::v4f64: 17343 return std::make_pair(0U, &X86::VR256RegClass); 17344 } 17345 break; 17346 } 17347 } 17348 17349 // Use the default implementation in TargetLowering to convert the register 17350 // constraint into a member of a register class. 17351 std::pair<unsigned, const TargetRegisterClass*> Res; 17352 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 17353 17354 // Not found as a standard register? 17355 if (Res.second == 0) { 17356 // Map st(0) -> st(7) -> ST0 17357 if (Constraint.size() == 7 && Constraint[0] == '{' && 17358 tolower(Constraint[1]) == 's' && 17359 tolower(Constraint[2]) == 't' && 17360 Constraint[3] == '(' && 17361 (Constraint[4] >= '0' && Constraint[4] <= '7') && 17362 Constraint[5] == ')' && 17363 Constraint[6] == '}') { 17364 17365 Res.first = X86::ST0+Constraint[4]-'0'; 17366 Res.second = &X86::RFP80RegClass; 17367 return Res; 17368 } 17369 17370 // GCC allows "st(0)" to be called just plain "st". 17371 if (StringRef("{st}").equals_lower(Constraint)) { 17372 Res.first = X86::ST0; 17373 Res.second = &X86::RFP80RegClass; 17374 return Res; 17375 } 17376 17377 // flags -> EFLAGS 17378 if (StringRef("{flags}").equals_lower(Constraint)) { 17379 Res.first = X86::EFLAGS; 17380 Res.second = &X86::CCRRegClass; 17381 return Res; 17382 } 17383 17384 // 'A' means EAX + EDX. 17385 if (Constraint == "A") { 17386 Res.first = X86::EAX; 17387 Res.second = &X86::GR32_ADRegClass; 17388 return Res; 17389 } 17390 return Res; 17391 } 17392 17393 // Otherwise, check to see if this is a register class of the wrong value 17394 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 17395 // turn into {ax},{dx}. 17396 if (Res.second->hasType(VT)) 17397 return Res; // Correct type already, nothing to do. 17398 17399 // All of the single-register GCC register classes map their values onto 17400 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 17401 // really want an 8-bit or 32-bit register, map to the appropriate register 17402 // class and return the appropriate register. 17403 if (Res.second == &X86::GR16RegClass) { 17404 if (VT == MVT::i8) { 17405 unsigned DestReg = 0; 17406 switch (Res.first) { 17407 default: break; 17408 case X86::AX: DestReg = X86::AL; break; 17409 case X86::DX: DestReg = X86::DL; break; 17410 case X86::CX: DestReg = X86::CL; break; 17411 case X86::BX: DestReg = X86::BL; break; 17412 } 17413 if (DestReg) { 17414 Res.first = DestReg; 17415 Res.second = &X86::GR8RegClass; 17416 } 17417 } else if (VT == MVT::i32) { 17418 unsigned DestReg = 0; 17419 switch (Res.first) { 17420 default: break; 17421 case X86::AX: DestReg = X86::EAX; break; 17422 case X86::DX: DestReg = X86::EDX; break; 17423 case X86::CX: DestReg = X86::ECX; break; 17424 case X86::BX: DestReg = X86::EBX; break; 17425 case X86::SI: DestReg = X86::ESI; break; 17426 case X86::DI: DestReg = X86::EDI; break; 17427 case X86::BP: DestReg = X86::EBP; break; 17428 case X86::SP: DestReg = X86::ESP; break; 17429 } 17430 if (DestReg) { 17431 Res.first = DestReg; 17432 Res.second = &X86::GR32RegClass; 17433 } 17434 } else if (VT == MVT::i64) { 17435 unsigned DestReg = 0; 17436 switch (Res.first) { 17437 default: break; 17438 case X86::AX: DestReg = X86::RAX; break; 17439 case X86::DX: DestReg = X86::RDX; break; 17440 case X86::CX: DestReg = X86::RCX; break; 17441 case X86::BX: DestReg = X86::RBX; break; 17442 case X86::SI: DestReg = X86::RSI; break; 17443 case X86::DI: DestReg = X86::RDI; break; 17444 case X86::BP: DestReg = X86::RBP; break; 17445 case X86::SP: DestReg = X86::RSP; break; 17446 } 17447 if (DestReg) { 17448 Res.first = DestReg; 17449 Res.second = &X86::GR64RegClass; 17450 } 17451 } 17452 } else if (Res.second == &X86::FR32RegClass || 17453 Res.second == &X86::FR64RegClass || 17454 Res.second == &X86::VR128RegClass) { 17455 // Handle references to XMM physical registers that got mapped into the 17456 // wrong class. This can happen with constraints like {xmm0} where the 17457 // target independent register mapper will just pick the first match it can 17458 // find, ignoring the required type. 17459 17460 if (VT == MVT::f32 || VT == MVT::i32) 17461 Res.second = &X86::FR32RegClass; 17462 else if (VT == MVT::f64 || VT == MVT::i64) 17463 Res.second = &X86::FR64RegClass; 17464 else if (X86::VR128RegClass.hasType(VT)) 17465 Res.second = &X86::VR128RegClass; 17466 else if (X86::VR256RegClass.hasType(VT)) 17467 Res.second = &X86::VR256RegClass; 17468 } 17469 17470 return Res; 17471} 17472