X86ISelLowering.cpp revision 31b5f00c4ebd870fc2745f1bed86a7b67f802210
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86TargetMachine.h" 20#include "X86TargetObjectFile.h" 21#include "Utils/X86ShuffleDecode.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/IntrinsicLowering.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunction.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineJumpTableInfo.h" 36#include "llvm/CodeGen/MachineModuleInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/MC/MCAsmInfo.h" 40#include "llvm/MC/MCContext.h" 41#include "llvm/MC/MCExpr.h" 42#include "llvm/MC/MCSymbol.h" 43#include "llvm/ADT/BitVector.h" 44#include "llvm/ADT/SmallSet.h" 45#include "llvm/ADT/Statistic.h" 46#include "llvm/ADT/StringExtras.h" 47#include "llvm/ADT/VectorExtras.h" 48#include "llvm/Support/CallSite.h" 49#include "llvm/Support/Debug.h" 50#include "llvm/Support/Dwarf.h" 51#include "llvm/Support/ErrorHandling.h" 52#include "llvm/Support/MathExtras.h" 53#include "llvm/Support/raw_ostream.h" 54using namespace llvm; 55using namespace dwarf; 56 57STATISTIC(NumTailCalls, "Number of tail calls"); 58 59// Forward declarations. 60static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 61 SDValue V2); 62 63static SDValue Insert128BitVector(SDValue Result, 64 SDValue Vec, 65 SDValue Idx, 66 SelectionDAG &DAG, 67 DebugLoc dl); 68 69static SDValue Extract128BitVector(SDValue Vec, 70 SDValue Idx, 71 SelectionDAG &DAG, 72 DebugLoc dl); 73 74static SDValue ConcatVectors(SDValue Lower, SDValue Upper, SelectionDAG &DAG); 75 76 77/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 78/// sets things up to match to an AVX VEXTRACTF128 instruction or a 79/// simple subregister reference. Idx is an index in the 128 bits we 80/// want. It need not be aligned to a 128-bit bounday. That makes 81/// lowering EXTRACT_VECTOR_ELT operations easier. 82static SDValue Extract128BitVector(SDValue Vec, 83 SDValue Idx, 84 SelectionDAG &DAG, 85 DebugLoc dl) { 86 EVT VT = Vec.getValueType(); 87 assert(VT.getSizeInBits() == 256 && "Unexpected vector size!"); 88 89 EVT ElVT = VT.getVectorElementType(); 90 91 int Factor = VT.getSizeInBits() / 128; 92 93 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), 94 ElVT, 95 VT.getVectorNumElements() / Factor); 96 97 // Extract from UNDEF is UNDEF. 98 if (Vec.getOpcode() == ISD::UNDEF) 99 return DAG.getNode(ISD::UNDEF, dl, ResultVT); 100 101 if (isa<ConstantSDNode>(Idx)) { 102 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 103 104 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 105 // we can match to VEXTRACTF128. 106 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 107 108 // This is the index of the first element of the 128-bit chunk 109 // we want. 110 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 111 * ElemsPerChunk); 112 113 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 114 115 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 116 VecIdx); 117 118 return Result; 119 } 120 121 return SDValue(); 122} 123 124/// Generate a DAG to put 128-bits into a vector > 128 bits. This 125/// sets things up to match to an AVX VINSERTF128 instruction or a 126/// simple superregister reference. Idx is an index in the 128 bits 127/// we want. It need not be aligned to a 128-bit bounday. That makes 128/// lowering INSERT_VECTOR_ELT operations easier. 129static SDValue Insert128BitVector(SDValue Result, 130 SDValue Vec, 131 SDValue Idx, 132 SelectionDAG &DAG, 133 DebugLoc dl) { 134 if (isa<ConstantSDNode>(Idx)) { 135 EVT VT = Vec.getValueType(); 136 assert(VT.getSizeInBits() == 128 && "Unexpected vector size!"); 137 138 EVT ElVT = VT.getVectorElementType(); 139 140 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 141 142 EVT ResultVT = Result.getValueType(); 143 144 // Insert the relevant 128 bits. 145 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 146 147 // This is the index of the first element of the 128-bit chunk 148 // we want. 149 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 150 * ElemsPerChunk); 151 152 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 153 154 Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 155 VecIdx); 156 return Result; 157 } 158 159 return SDValue(); 160} 161 162/// Given two vectors, concat them. 163static SDValue ConcatVectors(SDValue Lower, SDValue Upper, SelectionDAG &DAG) { 164 DebugLoc dl = Lower.getDebugLoc(); 165 166 assert(Lower.getValueType() == Upper.getValueType() && "Mismatched vectors!"); 167 168 EVT VT = EVT::getVectorVT(*DAG.getContext(), 169 Lower.getValueType().getVectorElementType(), 170 Lower.getValueType().getVectorNumElements() * 2); 171 172 // TODO: Generalize to arbitrary vector length (this assumes 256-bit vectors). 173 assert(VT.getSizeInBits() == 256 && "Unsupported vector concat!"); 174 175 // Insert the upper subvector. 176 SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Upper, 177 DAG.getConstant( 178 // This is half the length of the result 179 // vector. Start inserting the upper 128 180 // bits here. 181 Lower.getValueType().getVectorNumElements(), 182 MVT::i32), 183 DAG, dl); 184 185 // Insert the lower subvector. 186 Vec = Insert128BitVector(Vec, Lower, DAG.getConstant(0, MVT::i32), DAG, dl); 187 return Vec; 188} 189 190static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 191 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 192 bool is64Bit = Subtarget->is64Bit(); 193 194 if (Subtarget->isTargetEnvMacho()) { 195 if (is64Bit) 196 return new X8664_MachoTargetObjectFile(); 197 return new TargetLoweringObjectFileMachO(); 198 } 199 200 if (Subtarget->isTargetELF()) { 201 if (is64Bit) 202 return new X8664_ELFTargetObjectFile(TM); 203 return new X8632_ELFTargetObjectFile(TM); 204 } 205 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 206 return new TargetLoweringObjectFileCOFF(); 207 llvm_unreachable("unknown subtarget type"); 208} 209 210X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 211 : TargetLowering(TM, createTLOF(TM)) { 212 Subtarget = &TM.getSubtarget<X86Subtarget>(); 213 X86ScalarSSEf64 = Subtarget->hasXMMInt(); 214 X86ScalarSSEf32 = Subtarget->hasXMM(); 215 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 216 217 RegInfo = TM.getRegisterInfo(); 218 TD = getTargetData(); 219 220 // Set up the TargetLowering object. 221 static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 222 223 // X86 is weird, it always uses i8 for shift amounts and setcc results. 224 setBooleanContents(ZeroOrOneBooleanContent); 225 226 // For 64-bit since we have so many registers use the ILP scheduler, for 227 // 32-bit code use the register pressure specific scheduling. 228 if (Subtarget->is64Bit()) 229 setSchedulingPreference(Sched::ILP); 230 else 231 setSchedulingPreference(Sched::RegPressure); 232 setStackPointerRegisterToSaveRestore(X86StackPtr); 233 234 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 235 // Setup Windows compiler runtime calls. 236 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 237 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 238 setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2"); 239 setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2"); 240 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 241 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 242 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C); 243 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C); 244 } 245 246 if (Subtarget->isTargetDarwin()) { 247 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 248 setUseUnderscoreSetJmp(false); 249 setUseUnderscoreLongJmp(false); 250 } else if (Subtarget->isTargetMingw()) { 251 // MS runtime is weird: it exports _setjmp, but longjmp! 252 setUseUnderscoreSetJmp(true); 253 setUseUnderscoreLongJmp(false); 254 } else { 255 setUseUnderscoreSetJmp(true); 256 setUseUnderscoreLongJmp(true); 257 } 258 259 // Set up the register classes. 260 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 261 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 262 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 263 if (Subtarget->is64Bit()) 264 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 265 266 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 267 268 // We don't accept any truncstore of integer registers. 269 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 270 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 271 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 272 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 273 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 274 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 275 276 // SETOEQ and SETUNE require checking two conditions. 277 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 278 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 279 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 280 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 281 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 282 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 283 284 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 285 // operation. 286 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 287 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 288 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 289 290 if (Subtarget->is64Bit()) { 291 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 292 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand); 293 } else if (!UseSoftFloat) { 294 // We have an algorithm for SSE2->double, and we turn this into a 295 // 64-bit FILD followed by conditional FADD for other targets. 296 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 297 // We have an algorithm for SSE2, and we turn this into a 64-bit 298 // FILD for other targets. 299 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 300 } 301 302 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 303 // this operation. 304 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 305 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 306 307 if (!UseSoftFloat) { 308 // SSE has no i16 to fp conversion, only i32 309 if (X86ScalarSSEf32) { 310 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 311 // f32 and f64 cases are Legal, f80 case is not 312 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 313 } else { 314 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 315 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 316 } 317 } else { 318 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 319 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 320 } 321 322 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 323 // are Legal, f80 is custom lowered. 324 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 325 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 326 327 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 328 // this operation. 329 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 330 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 331 332 if (X86ScalarSSEf32) { 333 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 334 // f32 and f64 cases are Legal, f80 case is not 335 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 336 } else { 337 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 338 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 339 } 340 341 // Handle FP_TO_UINT by promoting the destination to a larger signed 342 // conversion. 343 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 344 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 345 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 346 347 if (Subtarget->is64Bit()) { 348 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 349 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 350 } else if (!UseSoftFloat) { 351 if (X86ScalarSSEf32 && !Subtarget->hasSSE3()) 352 // Expand FP_TO_UINT into a select. 353 // FIXME: We would like to use a Custom expander here eventually to do 354 // the optimal thing for SSE vs. the default expansion in the legalizer. 355 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 356 else 357 // With SSE3 we can use fisttpll to convert to a signed i64; without 358 // SSE, we're stuck with a fistpll. 359 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 360 } 361 362 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 363 if (!X86ScalarSSEf64) { 364 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 365 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 366 if (Subtarget->is64Bit()) { 367 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 368 // Without SSE, i64->f64 goes through memory. 369 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 370 } 371 } 372 373 // Scalar integer divide and remainder are lowered to use operations that 374 // produce two results, to match the available instructions. This exposes 375 // the two-result form to trivial CSE, which is able to combine x/y and x%y 376 // into a single instruction. 377 // 378 // Scalar integer multiply-high is also lowered to use two-result 379 // operations, to match the available instructions. However, plain multiply 380 // (low) operations are left as Legal, as there are single-result 381 // instructions for this in x86. Using the two-result multiply instructions 382 // when both high and low results are needed must be arranged by dagcombine. 383 for (unsigned i = 0, e = 4; i != e; ++i) { 384 MVT VT = IntVTs[i]; 385 setOperationAction(ISD::MULHS, VT, Expand); 386 setOperationAction(ISD::MULHU, VT, Expand); 387 setOperationAction(ISD::SDIV, VT, Expand); 388 setOperationAction(ISD::UDIV, VT, Expand); 389 setOperationAction(ISD::SREM, VT, Expand); 390 setOperationAction(ISD::UREM, VT, Expand); 391 392 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 393 setOperationAction(ISD::ADDC, VT, Custom); 394 setOperationAction(ISD::ADDE, VT, Custom); 395 setOperationAction(ISD::SUBC, VT, Custom); 396 setOperationAction(ISD::SUBE, VT, Custom); 397 } 398 399 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 400 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 401 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 402 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 403 if (Subtarget->is64Bit()) 404 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 405 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 406 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 407 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 408 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 409 setOperationAction(ISD::FREM , MVT::f32 , Expand); 410 setOperationAction(ISD::FREM , MVT::f64 , Expand); 411 setOperationAction(ISD::FREM , MVT::f80 , Expand); 412 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 413 414 setOperationAction(ISD::CTTZ , MVT::i8 , Custom); 415 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 416 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 417 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 418 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 419 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 420 if (Subtarget->is64Bit()) { 421 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 422 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 423 } 424 425 if (Subtarget->hasPOPCNT()) { 426 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 427 } else { 428 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 429 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 430 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 431 if (Subtarget->is64Bit()) 432 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 433 } 434 435 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 436 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 437 438 // These should be promoted to a larger select which is supported. 439 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 440 // X86 wants to expand cmov itself. 441 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 442 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 443 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 444 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 445 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 446 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 447 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 448 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 449 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 450 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 451 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 452 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 453 if (Subtarget->is64Bit()) { 454 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 455 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 456 } 457 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 458 459 // Darwin ABI issue. 460 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 461 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 462 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 463 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 464 if (Subtarget->is64Bit()) 465 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 466 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 467 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 468 if (Subtarget->is64Bit()) { 469 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 470 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 471 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 472 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 473 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 474 } 475 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 476 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 477 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 478 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 479 if (Subtarget->is64Bit()) { 480 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 481 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 482 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 483 } 484 485 if (Subtarget->hasXMM()) 486 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 487 488 // We may not have a libcall for MEMBARRIER so we should lower this. 489 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 490 491 // On X86 and X86-64, atomic operations are lowered to locked instructions. 492 // Locked instructions, in turn, have implicit fence semantics (all memory 493 // operations are flushed before issuing the locked instruction, and they 494 // are not buffered), so we can fold away the common pattern of 495 // fence-atomic-fence. 496 setShouldFoldAtomicFences(true); 497 498 // Expand certain atomics 499 for (unsigned i = 0, e = 4; i != e; ++i) { 500 MVT VT = IntVTs[i]; 501 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 502 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 503 } 504 505 if (!Subtarget->is64Bit()) { 506 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 507 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 508 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 509 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 510 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 511 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 512 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 513 } 514 515 // FIXME - use subtarget debug flags 516 if (!Subtarget->isTargetDarwin() && 517 !Subtarget->isTargetELF() && 518 !Subtarget->isTargetCygMing()) { 519 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 520 } 521 522 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 523 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 524 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 525 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 526 if (Subtarget->is64Bit()) { 527 setExceptionPointerRegister(X86::RAX); 528 setExceptionSelectorRegister(X86::RDX); 529 } else { 530 setExceptionPointerRegister(X86::EAX); 531 setExceptionSelectorRegister(X86::EDX); 532 } 533 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 534 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 535 536 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 537 538 setOperationAction(ISD::TRAP, MVT::Other, Legal); 539 540 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 541 setOperationAction(ISD::VASTART , MVT::Other, Custom); 542 setOperationAction(ISD::VAEND , MVT::Other, Expand); 543 if (Subtarget->is64Bit()) { 544 setOperationAction(ISD::VAARG , MVT::Other, Custom); 545 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 546 } else { 547 setOperationAction(ISD::VAARG , MVT::Other, Expand); 548 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 549 } 550 551 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 552 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 553 setOperationAction(ISD::DYNAMIC_STACKALLOC, 554 (Subtarget->is64Bit() ? MVT::i64 : MVT::i32), 555 (Subtarget->isTargetCOFF() 556 && !Subtarget->isTargetEnvMacho() 557 ? Custom : Expand)); 558 559 if (!UseSoftFloat && X86ScalarSSEf64) { 560 // f32 and f64 use SSE. 561 // Set up the FP register classes. 562 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 563 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 564 565 // Use ANDPD to simulate FABS. 566 setOperationAction(ISD::FABS , MVT::f64, Custom); 567 setOperationAction(ISD::FABS , MVT::f32, Custom); 568 569 // Use XORP to simulate FNEG. 570 setOperationAction(ISD::FNEG , MVT::f64, Custom); 571 setOperationAction(ISD::FNEG , MVT::f32, Custom); 572 573 // Use ANDPD and ORPD to simulate FCOPYSIGN. 574 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 575 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 576 577 // Lower this to FGETSIGNx86 plus an AND. 578 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 579 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 580 581 // We don't support sin/cos/fmod 582 setOperationAction(ISD::FSIN , MVT::f64, Expand); 583 setOperationAction(ISD::FCOS , MVT::f64, Expand); 584 setOperationAction(ISD::FSIN , MVT::f32, Expand); 585 setOperationAction(ISD::FCOS , MVT::f32, Expand); 586 587 // Expand FP immediates into loads from the stack, except for the special 588 // cases we handle. 589 addLegalFPImmediate(APFloat(+0.0)); // xorpd 590 addLegalFPImmediate(APFloat(+0.0f)); // xorps 591 } else if (!UseSoftFloat && X86ScalarSSEf32) { 592 // Use SSE for f32, x87 for f64. 593 // Set up the FP register classes. 594 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 595 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 596 597 // Use ANDPS to simulate FABS. 598 setOperationAction(ISD::FABS , MVT::f32, Custom); 599 600 // Use XORP to simulate FNEG. 601 setOperationAction(ISD::FNEG , MVT::f32, Custom); 602 603 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 604 605 // Use ANDPS and ORPS to simulate FCOPYSIGN. 606 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 607 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 608 609 // We don't support sin/cos/fmod 610 setOperationAction(ISD::FSIN , MVT::f32, Expand); 611 setOperationAction(ISD::FCOS , MVT::f32, Expand); 612 613 // Special cases we handle for FP constants. 614 addLegalFPImmediate(APFloat(+0.0f)); // xorps 615 addLegalFPImmediate(APFloat(+0.0)); // FLD0 616 addLegalFPImmediate(APFloat(+1.0)); // FLD1 617 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 618 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 619 620 if (!UnsafeFPMath) { 621 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 622 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 623 } 624 } else if (!UseSoftFloat) { 625 // f32 and f64 in x87. 626 // Set up the FP register classes. 627 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 628 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 629 630 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 631 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 632 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 633 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 634 635 if (!UnsafeFPMath) { 636 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 637 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 638 } 639 addLegalFPImmediate(APFloat(+0.0)); // FLD0 640 addLegalFPImmediate(APFloat(+1.0)); // FLD1 641 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 642 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 643 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 644 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 645 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 646 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 647 } 648 649 // Long double always uses X87. 650 if (!UseSoftFloat) { 651 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 652 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 653 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 654 { 655 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 656 addLegalFPImmediate(TmpFlt); // FLD0 657 TmpFlt.changeSign(); 658 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 659 660 bool ignored; 661 APFloat TmpFlt2(+1.0); 662 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 663 &ignored); 664 addLegalFPImmediate(TmpFlt2); // FLD1 665 TmpFlt2.changeSign(); 666 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 667 } 668 669 if (!UnsafeFPMath) { 670 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 671 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 672 } 673 } 674 675 // Always use a library call for pow. 676 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 677 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 678 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 679 680 setOperationAction(ISD::FLOG, MVT::f80, Expand); 681 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 682 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 683 setOperationAction(ISD::FEXP, MVT::f80, Expand); 684 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 685 686 // First set operation action for all vector types to either promote 687 // (for widening) or expand (for scalarization). Then we will selectively 688 // turn on ones that can be effectively codegen'd. 689 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 690 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 691 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 692 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 693 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 694 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 695 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 696 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 697 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 698 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 699 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 700 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 701 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 702 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 703 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 704 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 705 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 706 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 707 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 708 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 709 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 710 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 711 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 712 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 713 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 714 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 715 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 716 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 717 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 718 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 719 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 720 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 721 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 722 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 723 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 724 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 725 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 726 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 727 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 728 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 729 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 730 setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand); 731 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 733 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 734 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 738 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 740 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 741 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 745 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 746 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 747 setTruncStoreAction((MVT::SimpleValueType)VT, 748 (MVT::SimpleValueType)InnerVT, Expand); 749 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 750 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 751 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 752 } 753 754 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 755 // with -msoft-float, disable use of MMX as well. 756 if (!UseSoftFloat && Subtarget->hasMMX()) { 757 addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass); 758 // No operations on x86mmx supported, everything uses intrinsics. 759 } 760 761 // MMX-sized vectors (other than x86mmx) are expected to be expanded 762 // into smaller operations. 763 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 764 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 765 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 766 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 767 setOperationAction(ISD::AND, MVT::v8i8, Expand); 768 setOperationAction(ISD::AND, MVT::v4i16, Expand); 769 setOperationAction(ISD::AND, MVT::v2i32, Expand); 770 setOperationAction(ISD::AND, MVT::v1i64, Expand); 771 setOperationAction(ISD::OR, MVT::v8i8, Expand); 772 setOperationAction(ISD::OR, MVT::v4i16, Expand); 773 setOperationAction(ISD::OR, MVT::v2i32, Expand); 774 setOperationAction(ISD::OR, MVT::v1i64, Expand); 775 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 776 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 777 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 778 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 779 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 780 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 781 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 782 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 783 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 784 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 785 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 786 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 787 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 788 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 789 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 790 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 791 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 792 793 if (!UseSoftFloat && Subtarget->hasXMM()) { 794 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 795 796 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 797 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 798 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 799 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 800 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 801 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 802 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 803 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 804 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 805 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 806 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 807 setOperationAction(ISD::VSETCC, MVT::v4f32, Custom); 808 } 809 810 if (!UseSoftFloat && Subtarget->hasXMMInt()) { 811 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 812 813 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 814 // registers cannot be used even for integer operations. 815 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 816 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 817 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 818 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 819 820 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 821 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 822 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 823 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 824 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 825 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 826 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 827 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 828 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 829 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 830 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 831 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 832 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 833 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 834 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 835 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 836 837 setOperationAction(ISD::VSETCC, MVT::v2f64, Custom); 838 setOperationAction(ISD::VSETCC, MVT::v16i8, Custom); 839 setOperationAction(ISD::VSETCC, MVT::v8i16, Custom); 840 setOperationAction(ISD::VSETCC, MVT::v4i32, Custom); 841 842 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 843 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 844 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 845 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 846 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 847 848 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); 849 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom); 850 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); 851 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); 852 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 853 854 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 855 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { 856 EVT VT = (MVT::SimpleValueType)i; 857 // Do not attempt to custom lower non-power-of-2 vectors 858 if (!isPowerOf2_32(VT.getVectorNumElements())) 859 continue; 860 // Do not attempt to custom lower non-128-bit vectors 861 if (!VT.is128BitVector()) 862 continue; 863 setOperationAction(ISD::BUILD_VECTOR, 864 VT.getSimpleVT().SimpleTy, Custom); 865 setOperationAction(ISD::VECTOR_SHUFFLE, 866 VT.getSimpleVT().SimpleTy, Custom); 867 setOperationAction(ISD::EXTRACT_VECTOR_ELT, 868 VT.getSimpleVT().SimpleTy, Custom); 869 } 870 871 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 872 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 873 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 874 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 875 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 876 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 877 878 if (Subtarget->is64Bit()) { 879 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 880 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 881 } 882 883 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 884 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) { 885 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 886 EVT VT = SVT; 887 888 // Do not attempt to promote non-128-bit vectors 889 if (!VT.is128BitVector()) 890 continue; 891 892 setOperationAction(ISD::AND, SVT, Promote); 893 AddPromotedToType (ISD::AND, SVT, MVT::v2i64); 894 setOperationAction(ISD::OR, SVT, Promote); 895 AddPromotedToType (ISD::OR, SVT, MVT::v2i64); 896 setOperationAction(ISD::XOR, SVT, Promote); 897 AddPromotedToType (ISD::XOR, SVT, MVT::v2i64); 898 setOperationAction(ISD::LOAD, SVT, Promote); 899 AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64); 900 setOperationAction(ISD::SELECT, SVT, Promote); 901 AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64); 902 } 903 904 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 905 906 // Custom lower v2i64 and v2f64 selects. 907 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 908 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 909 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 910 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 911 912 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 913 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 914 } 915 916 if (Subtarget->hasSSE41()) { 917 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 918 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 919 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 920 setOperationAction(ISD::FRINT, MVT::f32, Legal); 921 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 922 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 923 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 924 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 925 setOperationAction(ISD::FRINT, MVT::f64, Legal); 926 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 927 928 // FIXME: Do we need to handle scalar-to-vector here? 929 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 930 931 // Can turn SHL into an integer multiply. 932 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 933 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 934 935 // i8 and i16 vectors are custom , because the source register and source 936 // source memory operand types are not the same width. f32 vectors are 937 // custom since the immediate controlling the insert encodes additional 938 // information. 939 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 940 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 941 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 942 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 943 944 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 945 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 946 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 947 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 948 949 if (Subtarget->is64Bit()) { 950 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); 951 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 952 } 953 } 954 955 if (Subtarget->hasSSE2()) { 956 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 957 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 958 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 959 960 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 961 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 962 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 963 964 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 965 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 966 } 967 968 if (Subtarget->hasSSE42()) 969 setOperationAction(ISD::VSETCC, MVT::v2i64, Custom); 970 971 if (!UseSoftFloat && Subtarget->hasAVX()) { 972 addRegisterClass(MVT::v8f32, X86::VR256RegisterClass); 973 addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); 974 addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); 975 addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); 976 addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); 977 978 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 979 setOperationAction(ISD::LOAD, MVT::v8i32, Legal); 980 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 981 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 982 983 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 984 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 985 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 986 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 987 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 988 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 989 990 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 991 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 992 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 993 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 994 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 995 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 996 997 // Custom lower build_vector, vector_shuffle, scalar_to_vector, 998 // insert_vector_elt extract_subvector and extract_vector_elt for 999 // 256-bit types. 1000 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1001 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; 1002 ++i) { 1003 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 1004 // Do not attempt to custom lower non-256-bit vectors 1005 if (!isPowerOf2_32(MVT(VT).getVectorNumElements()) 1006 || (MVT(VT).getSizeInBits() < 256)) 1007 continue; 1008 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1009 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1010 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1011 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1012 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1013 } 1014 // Custom-lower insert_subvector and extract_subvector based on 1015 // the result type. 1016 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1017 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; 1018 ++i) { 1019 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 1020 // Do not attempt to custom lower non-256-bit vectors 1021 if (!isPowerOf2_32(MVT(VT).getVectorNumElements())) 1022 continue; 1023 1024 if (MVT(VT).getSizeInBits() == 128) { 1025 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1026 } 1027 else if (MVT(VT).getSizeInBits() == 256) { 1028 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1029 } 1030 } 1031 1032 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1033 // Don't promote loads because we need them for VPERM vector index versions. 1034 1035 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1036 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; 1037 VT++) { 1038 if (!isPowerOf2_32(MVT((MVT::SimpleValueType)VT).getVectorNumElements()) 1039 || (MVT((MVT::SimpleValueType)VT).getSizeInBits() < 256)) 1040 continue; 1041 setOperationAction(ISD::AND, (MVT::SimpleValueType)VT, Promote); 1042 AddPromotedToType (ISD::AND, (MVT::SimpleValueType)VT, MVT::v4i64); 1043 setOperationAction(ISD::OR, (MVT::SimpleValueType)VT, Promote); 1044 AddPromotedToType (ISD::OR, (MVT::SimpleValueType)VT, MVT::v4i64); 1045 setOperationAction(ISD::XOR, (MVT::SimpleValueType)VT, Promote); 1046 AddPromotedToType (ISD::XOR, (MVT::SimpleValueType)VT, MVT::v4i64); 1047 //setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Promote); 1048 //AddPromotedToType (ISD::LOAD, (MVT::SimpleValueType)VT, MVT::v4i64); 1049 setOperationAction(ISD::SELECT, (MVT::SimpleValueType)VT, Promote); 1050 AddPromotedToType (ISD::SELECT, (MVT::SimpleValueType)VT, MVT::v4i64); 1051 } 1052 } 1053 1054 // We want to custom lower some of our intrinsics. 1055 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1056 1057 1058 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1059 // handle type legalization for these operations here. 1060 // 1061 // FIXME: We really should do custom legalization for addition and 1062 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1063 // than generic legalization for 64-bit multiplication-with-overflow, though. 1064 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1065 // Add/Sub/Mul with overflow operations are custom lowered. 1066 MVT VT = IntVTs[i]; 1067 setOperationAction(ISD::SADDO, VT, Custom); 1068 setOperationAction(ISD::UADDO, VT, Custom); 1069 setOperationAction(ISD::SSUBO, VT, Custom); 1070 setOperationAction(ISD::USUBO, VT, Custom); 1071 setOperationAction(ISD::SMULO, VT, Custom); 1072 setOperationAction(ISD::UMULO, VT, Custom); 1073 } 1074 1075 // There are no 8-bit 3-address imul/mul instructions 1076 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1077 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1078 1079 if (!Subtarget->is64Bit()) { 1080 // These libcalls are not available in 32-bit. 1081 setLibcallName(RTLIB::SHL_I128, 0); 1082 setLibcallName(RTLIB::SRL_I128, 0); 1083 setLibcallName(RTLIB::SRA_I128, 0); 1084 } 1085 1086 // We have target-specific dag combine patterns for the following nodes: 1087 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1088 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1089 setTargetDAGCombine(ISD::BUILD_VECTOR); 1090 setTargetDAGCombine(ISD::SELECT); 1091 setTargetDAGCombine(ISD::SHL); 1092 setTargetDAGCombine(ISD::SRA); 1093 setTargetDAGCombine(ISD::SRL); 1094 setTargetDAGCombine(ISD::OR); 1095 setTargetDAGCombine(ISD::AND); 1096 setTargetDAGCombine(ISD::ADD); 1097 setTargetDAGCombine(ISD::SUB); 1098 setTargetDAGCombine(ISD::STORE); 1099 setTargetDAGCombine(ISD::ZERO_EXTEND); 1100 setTargetDAGCombine(ISD::SINT_TO_FP); 1101 if (Subtarget->is64Bit()) 1102 setTargetDAGCombine(ISD::MUL); 1103 1104 computeRegisterProperties(); 1105 1106 // On Darwin, -Os means optimize for size without hurting performance, 1107 // do not reduce the limit. 1108 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1109 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1110 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1111 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1112 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1113 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1114 setPrefLoopAlignment(16); 1115 benefitFromCodePlacementOpt = true; 1116 1117 setPrefFunctionAlignment(4); 1118} 1119 1120 1121MVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const { 1122 return MVT::i8; 1123} 1124 1125 1126/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1127/// the desired ByVal argument alignment. 1128static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) { 1129 if (MaxAlign == 16) 1130 return; 1131 if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1132 if (VTy->getBitWidth() == 128) 1133 MaxAlign = 16; 1134 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1135 unsigned EltAlign = 0; 1136 getMaxByValAlign(ATy->getElementType(), EltAlign); 1137 if (EltAlign > MaxAlign) 1138 MaxAlign = EltAlign; 1139 } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { 1140 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1141 unsigned EltAlign = 0; 1142 getMaxByValAlign(STy->getElementType(i), EltAlign); 1143 if (EltAlign > MaxAlign) 1144 MaxAlign = EltAlign; 1145 if (MaxAlign == 16) 1146 break; 1147 } 1148 } 1149 return; 1150} 1151 1152/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1153/// function arguments in the caller parameter area. For X86, aggregates 1154/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1155/// are at 4-byte boundaries. 1156unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const { 1157 if (Subtarget->is64Bit()) { 1158 // Max of 8 and alignment of type. 1159 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1160 if (TyAlign > 8) 1161 return TyAlign; 1162 return 8; 1163 } 1164 1165 unsigned Align = 4; 1166 if (Subtarget->hasXMM()) 1167 getMaxByValAlign(Ty, Align); 1168 return Align; 1169} 1170 1171/// getOptimalMemOpType - Returns the target specific optimal type for load 1172/// and store operations as a result of memset, memcpy, and memmove 1173/// lowering. If DstAlign is zero that means it's safe to destination 1174/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1175/// means there isn't a need to check it against alignment requirement, 1176/// probably because the source does not need to be loaded. If 1177/// 'NonScalarIntSafe' is true, that means it's safe to return a 1178/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1179/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1180/// constant so it does not need to be loaded. 1181/// It returns EVT::Other if the type should be determined using generic 1182/// target-independent logic. 1183EVT 1184X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1185 unsigned DstAlign, unsigned SrcAlign, 1186 bool NonScalarIntSafe, 1187 bool MemcpyStrSrc, 1188 MachineFunction &MF) const { 1189 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1190 // linux. This is because the stack realignment code can't handle certain 1191 // cases like PR2962. This should be removed when PR2962 is fixed. 1192 const Function *F = MF.getFunction(); 1193 if (NonScalarIntSafe && 1194 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1195 if (Size >= 16 && 1196 (Subtarget->isUnalignedMemAccessFast() || 1197 ((DstAlign == 0 || DstAlign >= 16) && 1198 (SrcAlign == 0 || SrcAlign >= 16))) && 1199 Subtarget->getStackAlignment() >= 16) { 1200 if (Subtarget->hasSSE2()) 1201 return MVT::v4i32; 1202 if (Subtarget->hasSSE1()) 1203 return MVT::v4f32; 1204 } else if (!MemcpyStrSrc && Size >= 8 && 1205 !Subtarget->is64Bit() && 1206 Subtarget->getStackAlignment() >= 8 && 1207 Subtarget->hasXMMInt()) { 1208 // Do not use f64 to lower memcpy if source is string constant. It's 1209 // better to use i32 to avoid the loads. 1210 return MVT::f64; 1211 } 1212 } 1213 if (Subtarget->is64Bit() && Size >= 8) 1214 return MVT::i64; 1215 return MVT::i32; 1216} 1217 1218/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1219/// current function. The returned value is a member of the 1220/// MachineJumpTableInfo::JTEntryKind enum. 1221unsigned X86TargetLowering::getJumpTableEncoding() const { 1222 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1223 // symbol. 1224 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1225 Subtarget->isPICStyleGOT()) 1226 return MachineJumpTableInfo::EK_Custom32; 1227 1228 // Otherwise, use the normal jump table encoding heuristics. 1229 return TargetLowering::getJumpTableEncoding(); 1230} 1231 1232const MCExpr * 1233X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1234 const MachineBasicBlock *MBB, 1235 unsigned uid,MCContext &Ctx) const{ 1236 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1237 Subtarget->isPICStyleGOT()); 1238 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1239 // entries. 1240 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1241 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1242} 1243 1244/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1245/// jumptable. 1246SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1247 SelectionDAG &DAG) const { 1248 if (!Subtarget->is64Bit()) 1249 // This doesn't have DebugLoc associated with it, but is not really the 1250 // same as a Register. 1251 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1252 return Table; 1253} 1254 1255/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1256/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1257/// MCExpr. 1258const MCExpr *X86TargetLowering:: 1259getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1260 MCContext &Ctx) const { 1261 // X86-64 uses RIP relative addressing based on the jump table label. 1262 if (Subtarget->isPICStyleRIPRel()) 1263 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1264 1265 // Otherwise, the reference is relative to the PIC base. 1266 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1267} 1268 1269// FIXME: Why this routine is here? Move to RegInfo! 1270std::pair<const TargetRegisterClass*, uint8_t> 1271X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1272 const TargetRegisterClass *RRC = 0; 1273 uint8_t Cost = 1; 1274 switch (VT.getSimpleVT().SimpleTy) { 1275 default: 1276 return TargetLowering::findRepresentativeClass(VT); 1277 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1278 RRC = (Subtarget->is64Bit() 1279 ? X86::GR64RegisterClass : X86::GR32RegisterClass); 1280 break; 1281 case MVT::x86mmx: 1282 RRC = X86::VR64RegisterClass; 1283 break; 1284 case MVT::f32: case MVT::f64: 1285 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1286 case MVT::v4f32: case MVT::v2f64: 1287 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1288 case MVT::v4f64: 1289 RRC = X86::VR128RegisterClass; 1290 break; 1291 } 1292 return std::make_pair(RRC, Cost); 1293} 1294 1295bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1296 unsigned &Offset) const { 1297 if (!Subtarget->isTargetLinux()) 1298 return false; 1299 1300 if (Subtarget->is64Bit()) { 1301 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1302 Offset = 0x28; 1303 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1304 AddressSpace = 256; 1305 else 1306 AddressSpace = 257; 1307 } else { 1308 // %gs:0x14 on i386 1309 Offset = 0x14; 1310 AddressSpace = 256; 1311 } 1312 return true; 1313} 1314 1315 1316//===----------------------------------------------------------------------===// 1317// Return Value Calling Convention Implementation 1318//===----------------------------------------------------------------------===// 1319 1320#include "X86GenCallingConv.inc" 1321 1322bool 1323X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1324 MachineFunction &MF, bool isVarArg, 1325 const SmallVectorImpl<ISD::OutputArg> &Outs, 1326 LLVMContext &Context) const { 1327 SmallVector<CCValAssign, 16> RVLocs; 1328 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1329 RVLocs, Context); 1330 return CCInfo.CheckReturn(Outs, RetCC_X86); 1331} 1332 1333SDValue 1334X86TargetLowering::LowerReturn(SDValue Chain, 1335 CallingConv::ID CallConv, bool isVarArg, 1336 const SmallVectorImpl<ISD::OutputArg> &Outs, 1337 const SmallVectorImpl<SDValue> &OutVals, 1338 DebugLoc dl, SelectionDAG &DAG) const { 1339 MachineFunction &MF = DAG.getMachineFunction(); 1340 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1341 1342 SmallVector<CCValAssign, 16> RVLocs; 1343 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1344 RVLocs, *DAG.getContext()); 1345 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1346 1347 // Add the regs to the liveout set for the function. 1348 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1349 for (unsigned i = 0; i != RVLocs.size(); ++i) 1350 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1351 MRI.addLiveOut(RVLocs[i].getLocReg()); 1352 1353 SDValue Flag; 1354 1355 SmallVector<SDValue, 6> RetOps; 1356 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1357 // Operand #1 = Bytes To Pop 1358 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1359 MVT::i16)); 1360 1361 // Copy the result values into the output registers. 1362 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1363 CCValAssign &VA = RVLocs[i]; 1364 assert(VA.isRegLoc() && "Can only return in registers!"); 1365 SDValue ValToCopy = OutVals[i]; 1366 EVT ValVT = ValToCopy.getValueType(); 1367 1368 // If this is x86-64, and we disabled SSE, we can't return FP values, 1369 // or SSE or MMX vectors. 1370 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1371 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1372 (Subtarget->is64Bit() && !Subtarget->hasXMM())) { 1373 report_fatal_error("SSE register return with SSE disabled"); 1374 } 1375 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1376 // llvm-gcc has never done it right and no one has noticed, so this 1377 // should be OK for now. 1378 if (ValVT == MVT::f64 && 1379 (Subtarget->is64Bit() && !Subtarget->hasXMMInt())) 1380 report_fatal_error("SSE2 register return with SSE2 disabled"); 1381 1382 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1383 // the RET instruction and handled by the FP Stackifier. 1384 if (VA.getLocReg() == X86::ST0 || 1385 VA.getLocReg() == X86::ST1) { 1386 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1387 // change the value to the FP stack register class. 1388 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1389 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1390 RetOps.push_back(ValToCopy); 1391 // Don't emit a copytoreg. 1392 continue; 1393 } 1394 1395 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1396 // which is returned in RAX / RDX. 1397 if (Subtarget->is64Bit()) { 1398 if (ValVT == MVT::x86mmx) { 1399 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1400 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1401 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1402 ValToCopy); 1403 // If we don't have SSE2 available, convert to v4f32 so the generated 1404 // register is legal. 1405 if (!Subtarget->hasSSE2()) 1406 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1407 } 1408 } 1409 } 1410 1411 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1412 Flag = Chain.getValue(1); 1413 } 1414 1415 // The x86-64 ABI for returning structs by value requires that we copy 1416 // the sret argument into %rax for the return. We saved the argument into 1417 // a virtual register in the entry block, so now we copy the value out 1418 // and into %rax. 1419 if (Subtarget->is64Bit() && 1420 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1421 MachineFunction &MF = DAG.getMachineFunction(); 1422 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1423 unsigned Reg = FuncInfo->getSRetReturnReg(); 1424 assert(Reg && 1425 "SRetReturnReg should have been set in LowerFormalArguments()."); 1426 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1427 1428 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1429 Flag = Chain.getValue(1); 1430 1431 // RAX now acts like a return value. 1432 MRI.addLiveOut(X86::RAX); 1433 } 1434 1435 RetOps[0] = Chain; // Update chain. 1436 1437 // Add the flag if we have it. 1438 if (Flag.getNode()) 1439 RetOps.push_back(Flag); 1440 1441 return DAG.getNode(X86ISD::RET_FLAG, dl, 1442 MVT::Other, &RetOps[0], RetOps.size()); 1443} 1444 1445bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const { 1446 if (N->getNumValues() != 1) 1447 return false; 1448 if (!N->hasNUsesOfValue(1, 0)) 1449 return false; 1450 1451 SDNode *Copy = *N->use_begin(); 1452 if (Copy->getOpcode() != ISD::CopyToReg && 1453 Copy->getOpcode() != ISD::FP_EXTEND) 1454 return false; 1455 1456 bool HasRet = false; 1457 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1458 UI != UE; ++UI) { 1459 if (UI->getOpcode() != X86ISD::RET_FLAG) 1460 return false; 1461 HasRet = true; 1462 } 1463 1464 return HasRet; 1465} 1466 1467EVT 1468X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1469 ISD::NodeType ExtendKind) const { 1470 MVT ReturnMVT; 1471 // TODO: Is this also valid on 32-bit? 1472 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1473 ReturnMVT = MVT::i8; 1474 else 1475 ReturnMVT = MVT::i32; 1476 1477 EVT MinVT = getRegisterType(Context, ReturnMVT); 1478 return VT.bitsLT(MinVT) ? MinVT : VT; 1479} 1480 1481/// LowerCallResult - Lower the result values of a call into the 1482/// appropriate copies out of appropriate physical registers. 1483/// 1484SDValue 1485X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1486 CallingConv::ID CallConv, bool isVarArg, 1487 const SmallVectorImpl<ISD::InputArg> &Ins, 1488 DebugLoc dl, SelectionDAG &DAG, 1489 SmallVectorImpl<SDValue> &InVals) const { 1490 1491 // Assign locations to each value returned by this call. 1492 SmallVector<CCValAssign, 16> RVLocs; 1493 bool Is64Bit = Subtarget->is64Bit(); 1494 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1495 getTargetMachine(), RVLocs, *DAG.getContext()); 1496 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1497 1498 // Copy all of the result registers out of their specified physreg. 1499 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1500 CCValAssign &VA = RVLocs[i]; 1501 EVT CopyVT = VA.getValVT(); 1502 1503 // If this is x86-64, and we disabled SSE, we can't return FP values 1504 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1505 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasXMM())) { 1506 report_fatal_error("SSE register return with SSE disabled"); 1507 } 1508 1509 SDValue Val; 1510 1511 // If this is a call to a function that returns an fp value on the floating 1512 // point stack, we must guarantee the the value is popped from the stack, so 1513 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1514 // if the return value is not used. We use the FpPOP_RETVAL instruction 1515 // instead. 1516 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1517 // If we prefer to use the value in xmm registers, copy it out as f80 and 1518 // use a truncate to move it from fp stack reg to xmm reg. 1519 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1520 SDValue Ops[] = { Chain, InFlag }; 1521 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1522 MVT::Other, MVT::Glue, Ops, 2), 1); 1523 Val = Chain.getValue(0); 1524 1525 // Round the f80 to the right size, which also moves it to the appropriate 1526 // xmm register. 1527 if (CopyVT != VA.getValVT()) 1528 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1529 // This truncation won't change the value. 1530 DAG.getIntPtrConstant(1)); 1531 } else { 1532 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1533 CopyVT, InFlag).getValue(1); 1534 Val = Chain.getValue(0); 1535 } 1536 InFlag = Chain.getValue(2); 1537 InVals.push_back(Val); 1538 } 1539 1540 return Chain; 1541} 1542 1543 1544//===----------------------------------------------------------------------===// 1545// C & StdCall & Fast Calling Convention implementation 1546//===----------------------------------------------------------------------===// 1547// StdCall calling convention seems to be standard for many Windows' API 1548// routines and around. It differs from C calling convention just a little: 1549// callee should clean up the stack, not caller. Symbols should be also 1550// decorated in some fancy way :) It doesn't support any vector arguments. 1551// For info on fast calling convention see Fast Calling Convention (tail call) 1552// implementation LowerX86_32FastCCCallTo. 1553 1554/// CallIsStructReturn - Determines whether a call uses struct return 1555/// semantics. 1556static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1557 if (Outs.empty()) 1558 return false; 1559 1560 return Outs[0].Flags.isSRet(); 1561} 1562 1563/// ArgsAreStructReturn - Determines whether a function uses struct 1564/// return semantics. 1565static bool 1566ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1567 if (Ins.empty()) 1568 return false; 1569 1570 return Ins[0].Flags.isSRet(); 1571} 1572 1573/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1574/// by "Src" to address "Dst" with size and alignment information specified by 1575/// the specific parameter attribute. The copy will be passed as a byval 1576/// function parameter. 1577static SDValue 1578CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1579 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1580 DebugLoc dl) { 1581 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1582 1583 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1584 /*isVolatile*/false, /*AlwaysInline=*/true, 1585 MachinePointerInfo(), MachinePointerInfo()); 1586} 1587 1588/// IsTailCallConvention - Return true if the calling convention is one that 1589/// supports tail call optimization. 1590static bool IsTailCallConvention(CallingConv::ID CC) { 1591 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1592} 1593 1594bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1595 if (!CI->isTailCall()) 1596 return false; 1597 1598 CallSite CS(CI); 1599 CallingConv::ID CalleeCC = CS.getCallingConv(); 1600 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1601 return false; 1602 1603 return true; 1604} 1605 1606/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1607/// a tailcall target by changing its ABI. 1608static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) { 1609 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1610} 1611 1612SDValue 1613X86TargetLowering::LowerMemArgument(SDValue Chain, 1614 CallingConv::ID CallConv, 1615 const SmallVectorImpl<ISD::InputArg> &Ins, 1616 DebugLoc dl, SelectionDAG &DAG, 1617 const CCValAssign &VA, 1618 MachineFrameInfo *MFI, 1619 unsigned i) const { 1620 // Create the nodes corresponding to a load from this parameter slot. 1621 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1622 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv); 1623 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1624 EVT ValVT; 1625 1626 // If value is passed by pointer we have address passed instead of the value 1627 // itself. 1628 if (VA.getLocInfo() == CCValAssign::Indirect) 1629 ValVT = VA.getLocVT(); 1630 else 1631 ValVT = VA.getValVT(); 1632 1633 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1634 // changed with more analysis. 1635 // In case of tail call optimization mark all arguments mutable. Since they 1636 // could be overwritten by lowering of arguments in case of a tail call. 1637 if (Flags.isByVal()) { 1638 unsigned Bytes = Flags.getByValSize(); 1639 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1640 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1641 return DAG.getFrameIndex(FI, getPointerTy()); 1642 } else { 1643 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1644 VA.getLocMemOffset(), isImmutable); 1645 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1646 return DAG.getLoad(ValVT, dl, Chain, FIN, 1647 MachinePointerInfo::getFixedStack(FI), 1648 false, false, 0); 1649 } 1650} 1651 1652SDValue 1653X86TargetLowering::LowerFormalArguments(SDValue Chain, 1654 CallingConv::ID CallConv, 1655 bool isVarArg, 1656 const SmallVectorImpl<ISD::InputArg> &Ins, 1657 DebugLoc dl, 1658 SelectionDAG &DAG, 1659 SmallVectorImpl<SDValue> &InVals) 1660 const { 1661 MachineFunction &MF = DAG.getMachineFunction(); 1662 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1663 1664 const Function* Fn = MF.getFunction(); 1665 if (Fn->hasExternalLinkage() && 1666 Subtarget->isTargetCygMing() && 1667 Fn->getName() == "main") 1668 FuncInfo->setForceFramePointer(true); 1669 1670 MachineFrameInfo *MFI = MF.getFrameInfo(); 1671 bool Is64Bit = Subtarget->is64Bit(); 1672 bool IsWin64 = Subtarget->isTargetWin64(); 1673 1674 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1675 "Var args not supported with calling convention fastcc or ghc"); 1676 1677 // Assign locations to all of the incoming arguments. 1678 SmallVector<CCValAssign, 16> ArgLocs; 1679 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1680 ArgLocs, *DAG.getContext()); 1681 1682 // Allocate shadow area for Win64 1683 if (IsWin64) { 1684 CCInfo.AllocateStack(32, 8); 1685 } 1686 1687 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1688 1689 unsigned LastVal = ~0U; 1690 SDValue ArgValue; 1691 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1692 CCValAssign &VA = ArgLocs[i]; 1693 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1694 // places. 1695 assert(VA.getValNo() != LastVal && 1696 "Don't support value assigned to multiple locs yet"); 1697 LastVal = VA.getValNo(); 1698 1699 if (VA.isRegLoc()) { 1700 EVT RegVT = VA.getLocVT(); 1701 TargetRegisterClass *RC = NULL; 1702 if (RegVT == MVT::i32) 1703 RC = X86::GR32RegisterClass; 1704 else if (Is64Bit && RegVT == MVT::i64) 1705 RC = X86::GR64RegisterClass; 1706 else if (RegVT == MVT::f32) 1707 RC = X86::FR32RegisterClass; 1708 else if (RegVT == MVT::f64) 1709 RC = X86::FR64RegisterClass; 1710 else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) 1711 RC = X86::VR256RegisterClass; 1712 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) 1713 RC = X86::VR128RegisterClass; 1714 else if (RegVT == MVT::x86mmx) 1715 RC = X86::VR64RegisterClass; 1716 else 1717 llvm_unreachable("Unknown argument type!"); 1718 1719 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1720 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1721 1722 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1723 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1724 // right size. 1725 if (VA.getLocInfo() == CCValAssign::SExt) 1726 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1727 DAG.getValueType(VA.getValVT())); 1728 else if (VA.getLocInfo() == CCValAssign::ZExt) 1729 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1730 DAG.getValueType(VA.getValVT())); 1731 else if (VA.getLocInfo() == CCValAssign::BCvt) 1732 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1733 1734 if (VA.isExtInLoc()) { 1735 // Handle MMX values passed in XMM regs. 1736 if (RegVT.isVector()) { 1737 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1738 ArgValue); 1739 } else 1740 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1741 } 1742 } else { 1743 assert(VA.isMemLoc()); 1744 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1745 } 1746 1747 // If value is passed via pointer - do a load. 1748 if (VA.getLocInfo() == CCValAssign::Indirect) 1749 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1750 MachinePointerInfo(), false, false, 0); 1751 1752 InVals.push_back(ArgValue); 1753 } 1754 1755 // The x86-64 ABI for returning structs by value requires that we copy 1756 // the sret argument into %rax for the return. Save the argument into 1757 // a virtual register so that we can access it from the return points. 1758 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1759 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1760 unsigned Reg = FuncInfo->getSRetReturnReg(); 1761 if (!Reg) { 1762 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1763 FuncInfo->setSRetReturnReg(Reg); 1764 } 1765 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1766 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1767 } 1768 1769 unsigned StackSize = CCInfo.getNextStackOffset(); 1770 // Align stack specially for tail calls. 1771 if (FuncIsMadeTailCallSafe(CallConv)) 1772 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1773 1774 // If the function takes variable number of arguments, make a frame index for 1775 // the start of the first vararg value... for expansion of llvm.va_start. 1776 if (isVarArg) { 1777 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1778 CallConv != CallingConv::X86_ThisCall)) { 1779 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1780 } 1781 if (Is64Bit) { 1782 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1783 1784 // FIXME: We should really autogenerate these arrays 1785 static const unsigned GPR64ArgRegsWin64[] = { 1786 X86::RCX, X86::RDX, X86::R8, X86::R9 1787 }; 1788 static const unsigned GPR64ArgRegs64Bit[] = { 1789 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1790 }; 1791 static const unsigned XMMArgRegs64Bit[] = { 1792 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1793 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1794 }; 1795 const unsigned *GPR64ArgRegs; 1796 unsigned NumXMMRegs = 0; 1797 1798 if (IsWin64) { 1799 // The XMM registers which might contain var arg parameters are shadowed 1800 // in their paired GPR. So we only need to save the GPR to their home 1801 // slots. 1802 TotalNumIntRegs = 4; 1803 GPR64ArgRegs = GPR64ArgRegsWin64; 1804 } else { 1805 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1806 GPR64ArgRegs = GPR64ArgRegs64Bit; 1807 1808 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, TotalNumXMMRegs); 1809 } 1810 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1811 TotalNumIntRegs); 1812 1813 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 1814 assert(!(NumXMMRegs && !Subtarget->hasXMM()) && 1815 "SSE register cannot be used when SSE is disabled!"); 1816 assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) && 1817 "SSE register cannot be used when SSE is disabled!"); 1818 if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasXMM()) 1819 // Kernel mode asks for SSE to be disabled, so don't push them 1820 // on the stack. 1821 TotalNumXMMRegs = 0; 1822 1823 if (IsWin64) { 1824 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 1825 // Get to the caller-allocated home save location. Add 8 to account 1826 // for the return address. 1827 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 1828 FuncInfo->setRegSaveFrameIndex( 1829 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 1830 // Fixup to set vararg frame on shadow area (4 x i64). 1831 if (NumIntRegs < 4) 1832 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 1833 } else { 1834 // For X86-64, if there are vararg parameters that are passed via 1835 // registers, then we must store them to their spots on the stack so they 1836 // may be loaded by deferencing the result of va_next. 1837 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 1838 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 1839 FuncInfo->setRegSaveFrameIndex( 1840 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 1841 false)); 1842 } 1843 1844 // Store the integer parameter registers. 1845 SmallVector<SDValue, 8> MemOps; 1846 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 1847 getPointerTy()); 1848 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 1849 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1850 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 1851 DAG.getIntPtrConstant(Offset)); 1852 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 1853 X86::GR64RegisterClass); 1854 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1855 SDValue Store = 1856 DAG.getStore(Val.getValue(1), dl, Val, FIN, 1857 MachinePointerInfo::getFixedStack( 1858 FuncInfo->getRegSaveFrameIndex(), Offset), 1859 false, false, 0); 1860 MemOps.push_back(Store); 1861 Offset += 8; 1862 } 1863 1864 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 1865 // Now store the XMM (fp + vector) parameter registers. 1866 SmallVector<SDValue, 11> SaveXMMOps; 1867 SaveXMMOps.push_back(Chain); 1868 1869 unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass); 1870 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 1871 SaveXMMOps.push_back(ALVal); 1872 1873 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1874 FuncInfo->getRegSaveFrameIndex())); 1875 SaveXMMOps.push_back(DAG.getIntPtrConstant( 1876 FuncInfo->getVarArgsFPOffset())); 1877 1878 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 1879 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 1880 X86::VR128RegisterClass); 1881 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 1882 SaveXMMOps.push_back(Val); 1883 } 1884 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 1885 MVT::Other, 1886 &SaveXMMOps[0], SaveXMMOps.size())); 1887 } 1888 1889 if (!MemOps.empty()) 1890 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1891 &MemOps[0], MemOps.size()); 1892 } 1893 } 1894 1895 // Some CCs need callee pop. 1896 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) { 1897 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 1898 } else { 1899 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 1900 // If this is an sret function, the return should pop the hidden pointer. 1901 if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins)) 1902 FuncInfo->setBytesToPopOnReturn(4); 1903 } 1904 1905 if (!Is64Bit) { 1906 // RegSaveFrameIndex is X86-64 only. 1907 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 1908 if (CallConv == CallingConv::X86_FastCall || 1909 CallConv == CallingConv::X86_ThisCall) 1910 // fastcc functions can't have varargs. 1911 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 1912 } 1913 1914 return Chain; 1915} 1916 1917SDValue 1918X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 1919 SDValue StackPtr, SDValue Arg, 1920 DebugLoc dl, SelectionDAG &DAG, 1921 const CCValAssign &VA, 1922 ISD::ArgFlagsTy Flags) const { 1923 unsigned LocMemOffset = VA.getLocMemOffset(); 1924 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1925 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1926 if (Flags.isByVal()) 1927 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 1928 1929 return DAG.getStore(Chain, dl, Arg, PtrOff, 1930 MachinePointerInfo::getStack(LocMemOffset), 1931 false, false, 0); 1932} 1933 1934/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 1935/// optimization is performed and it is required. 1936SDValue 1937X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 1938 SDValue &OutRetAddr, SDValue Chain, 1939 bool IsTailCall, bool Is64Bit, 1940 int FPDiff, DebugLoc dl) const { 1941 // Adjust the Return address stack slot. 1942 EVT VT = getPointerTy(); 1943 OutRetAddr = getReturnAddressFrameIndex(DAG); 1944 1945 // Load the "old" Return address. 1946 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 1947 false, false, 0); 1948 return SDValue(OutRetAddr.getNode(), 1); 1949} 1950 1951/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 1952/// optimization is performed and it is required (FPDiff!=0). 1953static SDValue 1954EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 1955 SDValue Chain, SDValue RetAddrFrIdx, 1956 bool Is64Bit, int FPDiff, DebugLoc dl) { 1957 // Store the return address to the appropriate stack slot. 1958 if (!FPDiff) return Chain; 1959 // Calculate the new stack slot for the return address. 1960 int SlotSize = Is64Bit ? 8 : 4; 1961 int NewReturnAddrFI = 1962 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 1963 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 1964 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 1965 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 1966 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 1967 false, false, 0); 1968 return Chain; 1969} 1970 1971SDValue 1972X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, 1973 CallingConv::ID CallConv, bool isVarArg, 1974 bool &isTailCall, 1975 const SmallVectorImpl<ISD::OutputArg> &Outs, 1976 const SmallVectorImpl<SDValue> &OutVals, 1977 const SmallVectorImpl<ISD::InputArg> &Ins, 1978 DebugLoc dl, SelectionDAG &DAG, 1979 SmallVectorImpl<SDValue> &InVals) const { 1980 MachineFunction &MF = DAG.getMachineFunction(); 1981 bool Is64Bit = Subtarget->is64Bit(); 1982 bool IsWin64 = Subtarget->isTargetWin64(); 1983 bool IsStructRet = CallIsStructReturn(Outs); 1984 bool IsSibcall = false; 1985 1986 if (isTailCall) { 1987 // Check if it's really possible to do a tail call. 1988 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1989 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1990 Outs, OutVals, Ins, DAG); 1991 1992 // Sibcalls are automatically detected tailcalls which do not require 1993 // ABI changes. 1994 if (!GuaranteedTailCallOpt && isTailCall) 1995 IsSibcall = true; 1996 1997 if (isTailCall) 1998 ++NumTailCalls; 1999 } 2000 2001 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2002 "Var args not supported with calling convention fastcc or ghc"); 2003 2004 // Analyze operands of the call, assigning locations to each operand. 2005 SmallVector<CCValAssign, 16> ArgLocs; 2006 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2007 ArgLocs, *DAG.getContext()); 2008 2009 // Allocate shadow area for Win64 2010 if (IsWin64) { 2011 CCInfo.AllocateStack(32, 8); 2012 } 2013 2014 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2015 2016 // Get a count of how many bytes are to be pushed on the stack. 2017 unsigned NumBytes = CCInfo.getNextStackOffset(); 2018 if (IsSibcall) 2019 // This is a sibcall. The memory operands are available in caller's 2020 // own caller's stack. 2021 NumBytes = 0; 2022 else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv)) 2023 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2024 2025 int FPDiff = 0; 2026 if (isTailCall && !IsSibcall) { 2027 // Lower arguments at fp - stackoffset + fpdiff. 2028 unsigned NumBytesCallerPushed = 2029 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 2030 FPDiff = NumBytesCallerPushed - NumBytes; 2031 2032 // Set the delta of movement of the returnaddr stackslot. 2033 // But only set if delta is greater than previous delta. 2034 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 2035 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 2036 } 2037 2038 if (!IsSibcall) 2039 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2040 2041 SDValue RetAddrFrIdx; 2042 // Load return address for tail calls. 2043 if (isTailCall && FPDiff) 2044 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2045 Is64Bit, FPDiff, dl); 2046 2047 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2048 SmallVector<SDValue, 8> MemOpChains; 2049 SDValue StackPtr; 2050 2051 // Walk the register/memloc assignments, inserting copies/loads. In the case 2052 // of tail call optimization arguments are handle later. 2053 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2054 CCValAssign &VA = ArgLocs[i]; 2055 EVT RegVT = VA.getLocVT(); 2056 SDValue Arg = OutVals[i]; 2057 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2058 bool isByVal = Flags.isByVal(); 2059 2060 // Promote the value if needed. 2061 switch (VA.getLocInfo()) { 2062 default: llvm_unreachable("Unknown loc info!"); 2063 case CCValAssign::Full: break; 2064 case CCValAssign::SExt: 2065 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2066 break; 2067 case CCValAssign::ZExt: 2068 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2069 break; 2070 case CCValAssign::AExt: 2071 if (RegVT.isVector() && RegVT.getSizeInBits() == 128) { 2072 // Special case: passing MMX values in XMM registers. 2073 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2074 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2075 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2076 } else 2077 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2078 break; 2079 case CCValAssign::BCvt: 2080 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2081 break; 2082 case CCValAssign::Indirect: { 2083 // Store the argument. 2084 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2085 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2086 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2087 MachinePointerInfo::getFixedStack(FI), 2088 false, false, 0); 2089 Arg = SpillSlot; 2090 break; 2091 } 2092 } 2093 2094 if (VA.isRegLoc()) { 2095 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2096 if (isVarArg && IsWin64) { 2097 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2098 // shadow reg if callee is a varargs function. 2099 unsigned ShadowReg = 0; 2100 switch (VA.getLocReg()) { 2101 case X86::XMM0: ShadowReg = X86::RCX; break; 2102 case X86::XMM1: ShadowReg = X86::RDX; break; 2103 case X86::XMM2: ShadowReg = X86::R8; break; 2104 case X86::XMM3: ShadowReg = X86::R9; break; 2105 } 2106 if (ShadowReg) 2107 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2108 } 2109 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2110 assert(VA.isMemLoc()); 2111 if (StackPtr.getNode() == 0) 2112 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2113 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2114 dl, DAG, VA, Flags)); 2115 } 2116 } 2117 2118 if (!MemOpChains.empty()) 2119 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2120 &MemOpChains[0], MemOpChains.size()); 2121 2122 // Build a sequence of copy-to-reg nodes chained together with token chain 2123 // and flag operands which copy the outgoing args into registers. 2124 SDValue InFlag; 2125 // Tail call byval lowering might overwrite argument registers so in case of 2126 // tail call optimization the copies to registers are lowered later. 2127 if (!isTailCall) 2128 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2129 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2130 RegsToPass[i].second, InFlag); 2131 InFlag = Chain.getValue(1); 2132 } 2133 2134 if (Subtarget->isPICStyleGOT()) { 2135 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2136 // GOT pointer. 2137 if (!isTailCall) { 2138 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, 2139 DAG.getNode(X86ISD::GlobalBaseReg, 2140 DebugLoc(), getPointerTy()), 2141 InFlag); 2142 InFlag = Chain.getValue(1); 2143 } else { 2144 // If we are tail calling and generating PIC/GOT style code load the 2145 // address of the callee into ECX. The value in ecx is used as target of 2146 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2147 // for tail calls on PIC/GOT architectures. Normally we would just put the 2148 // address of GOT into ebx and then call target@PLT. But for tail calls 2149 // ebx would be restored (since ebx is callee saved) before jumping to the 2150 // target@PLT. 2151 2152 // Note: The actual moving to ECX is done further down. 2153 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2154 if (G && !G->getGlobal()->hasHiddenVisibility() && 2155 !G->getGlobal()->hasProtectedVisibility()) 2156 Callee = LowerGlobalAddress(Callee, DAG); 2157 else if (isa<ExternalSymbolSDNode>(Callee)) 2158 Callee = LowerExternalSymbol(Callee, DAG); 2159 } 2160 } 2161 2162 if (Is64Bit && isVarArg && !IsWin64) { 2163 // From AMD64 ABI document: 2164 // For calls that may call functions that use varargs or stdargs 2165 // (prototype-less calls or calls to functions containing ellipsis (...) in 2166 // the declaration) %al is used as hidden argument to specify the number 2167 // of SSE registers used. The contents of %al do not need to match exactly 2168 // the number of registers, but must be an ubound on the number of SSE 2169 // registers used and is in the range 0 - 8 inclusive. 2170 2171 // Count the number of XMM registers allocated. 2172 static const unsigned XMMArgRegs[] = { 2173 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2174 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2175 }; 2176 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2177 assert((Subtarget->hasXMM() || !NumXMMRegs) 2178 && "SSE registers cannot be used when SSE is disabled"); 2179 2180 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, 2181 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 2182 InFlag = Chain.getValue(1); 2183 } 2184 2185 2186 // For tail calls lower the arguments to the 'real' stack slot. 2187 if (isTailCall) { 2188 // Force all the incoming stack arguments to be loaded from the stack 2189 // before any new outgoing arguments are stored to the stack, because the 2190 // outgoing stack slots may alias the incoming argument stack slots, and 2191 // the alias isn't otherwise explicit. This is slightly more conservative 2192 // than necessary, because it means that each store effectively depends 2193 // on every argument instead of just those arguments it would clobber. 2194 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2195 2196 SmallVector<SDValue, 8> MemOpChains2; 2197 SDValue FIN; 2198 int FI = 0; 2199 // Do not flag preceding copytoreg stuff together with the following stuff. 2200 InFlag = SDValue(); 2201 if (GuaranteedTailCallOpt) { 2202 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2203 CCValAssign &VA = ArgLocs[i]; 2204 if (VA.isRegLoc()) 2205 continue; 2206 assert(VA.isMemLoc()); 2207 SDValue Arg = OutVals[i]; 2208 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2209 // Create frame index. 2210 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2211 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2212 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2213 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2214 2215 if (Flags.isByVal()) { 2216 // Copy relative to framepointer. 2217 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2218 if (StackPtr.getNode() == 0) 2219 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2220 getPointerTy()); 2221 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2222 2223 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2224 ArgChain, 2225 Flags, DAG, dl)); 2226 } else { 2227 // Store relative to framepointer. 2228 MemOpChains2.push_back( 2229 DAG.getStore(ArgChain, dl, Arg, FIN, 2230 MachinePointerInfo::getFixedStack(FI), 2231 false, false, 0)); 2232 } 2233 } 2234 } 2235 2236 if (!MemOpChains2.empty()) 2237 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2238 &MemOpChains2[0], MemOpChains2.size()); 2239 2240 // Copy arguments to their registers. 2241 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2242 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2243 RegsToPass[i].second, InFlag); 2244 InFlag = Chain.getValue(1); 2245 } 2246 InFlag =SDValue(); 2247 2248 // Store the return address to the appropriate stack slot. 2249 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2250 FPDiff, dl); 2251 } 2252 2253 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2254 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2255 // In the 64-bit large code model, we have to make all calls 2256 // through a register, since the call instruction's 32-bit 2257 // pc-relative offset may not be large enough to hold the whole 2258 // address. 2259 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2260 // If the callee is a GlobalAddress node (quite common, every direct call 2261 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2262 // it. 2263 2264 // We should use extra load for direct calls to dllimported functions in 2265 // non-JIT mode. 2266 const GlobalValue *GV = G->getGlobal(); 2267 if (!GV->hasDLLImportLinkage()) { 2268 unsigned char OpFlags = 0; 2269 bool ExtraLoad = false; 2270 unsigned WrapperKind = ISD::DELETED_NODE; 2271 2272 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2273 // external symbols most go through the PLT in PIC mode. If the symbol 2274 // has hidden or protected visibility, or if it is static or local, then 2275 // we don't need to use the PLT - we can directly call it. 2276 if (Subtarget->isTargetELF() && 2277 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2278 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2279 OpFlags = X86II::MO_PLT; 2280 } else if (Subtarget->isPICStyleStubAny() && 2281 (GV->isDeclaration() || GV->isWeakForLinker()) && 2282 (!Subtarget->getTargetTriple().isMacOSX() || 2283 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2284 // PC-relative references to external symbols should go through $stub, 2285 // unless we're building with the leopard linker or later, which 2286 // automatically synthesizes these stubs. 2287 OpFlags = X86II::MO_DARWIN_STUB; 2288 } else if (Subtarget->isPICStyleRIPRel() && 2289 isa<Function>(GV) && 2290 cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) { 2291 // If the function is marked as non-lazy, generate an indirect call 2292 // which loads from the GOT directly. This avoids runtime overhead 2293 // at the cost of eager binding (and one extra byte of encoding). 2294 OpFlags = X86II::MO_GOTPCREL; 2295 WrapperKind = X86ISD::WrapperRIP; 2296 ExtraLoad = true; 2297 } 2298 2299 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2300 G->getOffset(), OpFlags); 2301 2302 // Add a wrapper if needed. 2303 if (WrapperKind != ISD::DELETED_NODE) 2304 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2305 // Add extra indirection if needed. 2306 if (ExtraLoad) 2307 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2308 MachinePointerInfo::getGOT(), 2309 false, false, 0); 2310 } 2311 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2312 unsigned char OpFlags = 0; 2313 2314 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2315 // external symbols should go through the PLT. 2316 if (Subtarget->isTargetELF() && 2317 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2318 OpFlags = X86II::MO_PLT; 2319 } else if (Subtarget->isPICStyleStubAny() && 2320 (!Subtarget->getTargetTriple().isMacOSX() || 2321 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2322 // PC-relative references to external symbols should go through $stub, 2323 // unless we're building with the leopard linker or later, which 2324 // automatically synthesizes these stubs. 2325 OpFlags = X86II::MO_DARWIN_STUB; 2326 } 2327 2328 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2329 OpFlags); 2330 } 2331 2332 // Returns a chain & a flag for retval copy to use. 2333 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2334 SmallVector<SDValue, 8> Ops; 2335 2336 if (!IsSibcall && isTailCall) { 2337 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2338 DAG.getIntPtrConstant(0, true), InFlag); 2339 InFlag = Chain.getValue(1); 2340 } 2341 2342 Ops.push_back(Chain); 2343 Ops.push_back(Callee); 2344 2345 if (isTailCall) 2346 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2347 2348 // Add argument registers to the end of the list so that they are known live 2349 // into the call. 2350 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2351 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2352 RegsToPass[i].second.getValueType())); 2353 2354 // Add an implicit use GOT pointer in EBX. 2355 if (!isTailCall && Subtarget->isPICStyleGOT()) 2356 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 2357 2358 // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. 2359 if (Is64Bit && isVarArg && !IsWin64) 2360 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 2361 2362 if (InFlag.getNode()) 2363 Ops.push_back(InFlag); 2364 2365 if (isTailCall) { 2366 // We used to do: 2367 //// If this is the first return lowered for this function, add the regs 2368 //// to the liveout set for the function. 2369 // This isn't right, although it's probably harmless on x86; liveouts 2370 // should be computed from returns not tail calls. Consider a void 2371 // function making a tail call to a function returning int. 2372 return DAG.getNode(X86ISD::TC_RETURN, dl, 2373 NodeTys, &Ops[0], Ops.size()); 2374 } 2375 2376 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2377 InFlag = Chain.getValue(1); 2378 2379 // Create the CALLSEQ_END node. 2380 unsigned NumBytesForCalleeToPush; 2381 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) 2382 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2383 else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet) 2384 // If this is a call to a struct-return function, the callee 2385 // pops the hidden struct pointer, so we have to push it back. 2386 // This is common for Darwin/X86, Linux & Mingw32 targets. 2387 NumBytesForCalleeToPush = 4; 2388 else 2389 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2390 2391 // Returns a flag for retval copy to use. 2392 if (!IsSibcall) { 2393 Chain = DAG.getCALLSEQ_END(Chain, 2394 DAG.getIntPtrConstant(NumBytes, true), 2395 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2396 true), 2397 InFlag); 2398 InFlag = Chain.getValue(1); 2399 } 2400 2401 // Handle result values, copying them out of physregs into vregs that we 2402 // return. 2403 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2404 Ins, dl, DAG, InVals); 2405} 2406 2407 2408//===----------------------------------------------------------------------===// 2409// Fast Calling Convention (tail call) implementation 2410//===----------------------------------------------------------------------===// 2411 2412// Like std call, callee cleans arguments, convention except that ECX is 2413// reserved for storing the tail called function address. Only 2 registers are 2414// free for argument passing (inreg). Tail call optimization is performed 2415// provided: 2416// * tailcallopt is enabled 2417// * caller/callee are fastcc 2418// On X86_64 architecture with GOT-style position independent code only local 2419// (within module) calls are supported at the moment. 2420// To keep the stack aligned according to platform abi the function 2421// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2422// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2423// If a tail called function callee has more arguments than the caller the 2424// caller needs to make sure that there is room to move the RETADDR to. This is 2425// achieved by reserving an area the size of the argument delta right after the 2426// original REtADDR, but before the saved framepointer or the spilled registers 2427// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2428// stack layout: 2429// arg1 2430// arg2 2431// RETADDR 2432// [ new RETADDR 2433// move area ] 2434// (possible EBP) 2435// ESI 2436// EDI 2437// local1 .. 2438 2439/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2440/// for a 16 byte align requirement. 2441unsigned 2442X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2443 SelectionDAG& DAG) const { 2444 MachineFunction &MF = DAG.getMachineFunction(); 2445 const TargetMachine &TM = MF.getTarget(); 2446 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2447 unsigned StackAlignment = TFI.getStackAlignment(); 2448 uint64_t AlignMask = StackAlignment - 1; 2449 int64_t Offset = StackSize; 2450 uint64_t SlotSize = TD->getPointerSize(); 2451 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2452 // Number smaller than 12 so just add the difference. 2453 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2454 } else { 2455 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2456 Offset = ((~AlignMask) & Offset) + StackAlignment + 2457 (StackAlignment-SlotSize); 2458 } 2459 return Offset; 2460} 2461 2462/// MatchingStackOffset - Return true if the given stack call argument is 2463/// already available in the same position (relatively) of the caller's 2464/// incoming argument stack. 2465static 2466bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2467 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2468 const X86InstrInfo *TII) { 2469 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2470 int FI = INT_MAX; 2471 if (Arg.getOpcode() == ISD::CopyFromReg) { 2472 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2473 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2474 return false; 2475 MachineInstr *Def = MRI->getVRegDef(VR); 2476 if (!Def) 2477 return false; 2478 if (!Flags.isByVal()) { 2479 if (!TII->isLoadFromStackSlot(Def, FI)) 2480 return false; 2481 } else { 2482 unsigned Opcode = Def->getOpcode(); 2483 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2484 Def->getOperand(1).isFI()) { 2485 FI = Def->getOperand(1).getIndex(); 2486 Bytes = Flags.getByValSize(); 2487 } else 2488 return false; 2489 } 2490 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2491 if (Flags.isByVal()) 2492 // ByVal argument is passed in as a pointer but it's now being 2493 // dereferenced. e.g. 2494 // define @foo(%struct.X* %A) { 2495 // tail call @bar(%struct.X* byval %A) 2496 // } 2497 return false; 2498 SDValue Ptr = Ld->getBasePtr(); 2499 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2500 if (!FINode) 2501 return false; 2502 FI = FINode->getIndex(); 2503 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2504 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2505 FI = FINode->getIndex(); 2506 Bytes = Flags.getByValSize(); 2507 } else 2508 return false; 2509 2510 assert(FI != INT_MAX); 2511 if (!MFI->isFixedObjectIndex(FI)) 2512 return false; 2513 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2514} 2515 2516/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2517/// for tail call optimization. Targets which want to do tail call 2518/// optimization should implement this function. 2519bool 2520X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2521 CallingConv::ID CalleeCC, 2522 bool isVarArg, 2523 bool isCalleeStructRet, 2524 bool isCallerStructRet, 2525 const SmallVectorImpl<ISD::OutputArg> &Outs, 2526 const SmallVectorImpl<SDValue> &OutVals, 2527 const SmallVectorImpl<ISD::InputArg> &Ins, 2528 SelectionDAG& DAG) const { 2529 if (!IsTailCallConvention(CalleeCC) && 2530 CalleeCC != CallingConv::C) 2531 return false; 2532 2533 // If -tailcallopt is specified, make fastcc functions tail-callable. 2534 const MachineFunction &MF = DAG.getMachineFunction(); 2535 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2536 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2537 bool CCMatch = CallerCC == CalleeCC; 2538 2539 if (GuaranteedTailCallOpt) { 2540 if (IsTailCallConvention(CalleeCC) && CCMatch) 2541 return true; 2542 return false; 2543 } 2544 2545 // Look for obvious safe cases to perform tail call optimization that do not 2546 // require ABI changes. This is what gcc calls sibcall. 2547 2548 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2549 // emit a special epilogue. 2550 if (RegInfo->needsStackRealignment(MF)) 2551 return false; 2552 2553 // Also avoid sibcall optimization if either caller or callee uses struct 2554 // return semantics. 2555 if (isCalleeStructRet || isCallerStructRet) 2556 return false; 2557 2558 // An stdcall caller is expected to clean up its arguments; the callee 2559 // isn't going to do that. 2560 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2561 return false; 2562 2563 // Do not sibcall optimize vararg calls unless all arguments are passed via 2564 // registers. 2565 if (isVarArg && !Outs.empty()) { 2566 2567 // Optimizing for varargs on Win64 is unlikely to be safe without 2568 // additional testing. 2569 if (Subtarget->isTargetWin64()) 2570 return false; 2571 2572 SmallVector<CCValAssign, 16> ArgLocs; 2573 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2574 getTargetMachine(), ArgLocs, *DAG.getContext()); 2575 2576 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2577 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2578 if (!ArgLocs[i].isRegLoc()) 2579 return false; 2580 } 2581 2582 // If the call result is in ST0 / ST1, it needs to be popped off the x87 stack. 2583 // Therefore if it's not used by the call it is not safe to optimize this into 2584 // a sibcall. 2585 bool Unused = false; 2586 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2587 if (!Ins[i].Used) { 2588 Unused = true; 2589 break; 2590 } 2591 } 2592 if (Unused) { 2593 SmallVector<CCValAssign, 16> RVLocs; 2594 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2595 getTargetMachine(), RVLocs, *DAG.getContext()); 2596 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2597 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2598 CCValAssign &VA = RVLocs[i]; 2599 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2600 return false; 2601 } 2602 } 2603 2604 // If the calling conventions do not match, then we'd better make sure the 2605 // results are returned in the same way as what the caller expects. 2606 if (!CCMatch) { 2607 SmallVector<CCValAssign, 16> RVLocs1; 2608 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2609 getTargetMachine(), RVLocs1, *DAG.getContext()); 2610 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2611 2612 SmallVector<CCValAssign, 16> RVLocs2; 2613 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2614 getTargetMachine(), RVLocs2, *DAG.getContext()); 2615 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2616 2617 if (RVLocs1.size() != RVLocs2.size()) 2618 return false; 2619 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2620 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2621 return false; 2622 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2623 return false; 2624 if (RVLocs1[i].isRegLoc()) { 2625 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2626 return false; 2627 } else { 2628 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2629 return false; 2630 } 2631 } 2632 } 2633 2634 // If the callee takes no arguments then go on to check the results of the 2635 // call. 2636 if (!Outs.empty()) { 2637 // Check if stack adjustment is needed. For now, do not do this if any 2638 // argument is passed on the stack. 2639 SmallVector<CCValAssign, 16> ArgLocs; 2640 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2641 getTargetMachine(), ArgLocs, *DAG.getContext()); 2642 2643 // Allocate shadow area for Win64 2644 if (Subtarget->isTargetWin64()) { 2645 CCInfo.AllocateStack(32, 8); 2646 } 2647 2648 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2649 if (CCInfo.getNextStackOffset()) { 2650 MachineFunction &MF = DAG.getMachineFunction(); 2651 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2652 return false; 2653 2654 // Check if the arguments are already laid out in the right way as 2655 // the caller's fixed stack objects. 2656 MachineFrameInfo *MFI = MF.getFrameInfo(); 2657 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2658 const X86InstrInfo *TII = 2659 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2660 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2661 CCValAssign &VA = ArgLocs[i]; 2662 SDValue Arg = OutVals[i]; 2663 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2664 if (VA.getLocInfo() == CCValAssign::Indirect) 2665 return false; 2666 if (!VA.isRegLoc()) { 2667 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2668 MFI, MRI, TII)) 2669 return false; 2670 } 2671 } 2672 } 2673 2674 // If the tailcall address may be in a register, then make sure it's 2675 // possible to register allocate for it. In 32-bit, the call address can 2676 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2677 // callee-saved registers are restored. These happen to be the same 2678 // registers used to pass 'inreg' arguments so watch out for those. 2679 if (!Subtarget->is64Bit() && 2680 !isa<GlobalAddressSDNode>(Callee) && 2681 !isa<ExternalSymbolSDNode>(Callee)) { 2682 unsigned NumInRegs = 0; 2683 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2684 CCValAssign &VA = ArgLocs[i]; 2685 if (!VA.isRegLoc()) 2686 continue; 2687 unsigned Reg = VA.getLocReg(); 2688 switch (Reg) { 2689 default: break; 2690 case X86::EAX: case X86::EDX: case X86::ECX: 2691 if (++NumInRegs == 3) 2692 return false; 2693 break; 2694 } 2695 } 2696 } 2697 } 2698 2699 return true; 2700} 2701 2702FastISel * 2703X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 2704 return X86::createFastISel(funcInfo); 2705} 2706 2707 2708//===----------------------------------------------------------------------===// 2709// Other Lowering Hooks 2710//===----------------------------------------------------------------------===// 2711 2712static bool MayFoldLoad(SDValue Op) { 2713 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2714} 2715 2716static bool MayFoldIntoStore(SDValue Op) { 2717 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2718} 2719 2720static bool isTargetShuffle(unsigned Opcode) { 2721 switch(Opcode) { 2722 default: return false; 2723 case X86ISD::PSHUFD: 2724 case X86ISD::PSHUFHW: 2725 case X86ISD::PSHUFLW: 2726 case X86ISD::SHUFPD: 2727 case X86ISD::PALIGN: 2728 case X86ISD::SHUFPS: 2729 case X86ISD::MOVLHPS: 2730 case X86ISD::MOVLHPD: 2731 case X86ISD::MOVHLPS: 2732 case X86ISD::MOVLPS: 2733 case X86ISD::MOVLPD: 2734 case X86ISD::MOVSHDUP: 2735 case X86ISD::MOVSLDUP: 2736 case X86ISD::MOVDDUP: 2737 case X86ISD::MOVSS: 2738 case X86ISD::MOVSD: 2739 case X86ISD::UNPCKLPS: 2740 case X86ISD::UNPCKLPD: 2741 case X86ISD::VUNPCKLPS: 2742 case X86ISD::VUNPCKLPD: 2743 case X86ISD::VUNPCKLPSY: 2744 case X86ISD::VUNPCKLPDY: 2745 case X86ISD::PUNPCKLWD: 2746 case X86ISD::PUNPCKLBW: 2747 case X86ISD::PUNPCKLDQ: 2748 case X86ISD::PUNPCKLQDQ: 2749 case X86ISD::UNPCKHPS: 2750 case X86ISD::UNPCKHPD: 2751 case X86ISD::PUNPCKHWD: 2752 case X86ISD::PUNPCKHBW: 2753 case X86ISD::PUNPCKHDQ: 2754 case X86ISD::PUNPCKHQDQ: 2755 return true; 2756 } 2757 return false; 2758} 2759 2760static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2761 SDValue V1, SelectionDAG &DAG) { 2762 switch(Opc) { 2763 default: llvm_unreachable("Unknown x86 shuffle node"); 2764 case X86ISD::MOVSHDUP: 2765 case X86ISD::MOVSLDUP: 2766 case X86ISD::MOVDDUP: 2767 return DAG.getNode(Opc, dl, VT, V1); 2768 } 2769 2770 return SDValue(); 2771} 2772 2773static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2774 SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { 2775 switch(Opc) { 2776 default: llvm_unreachable("Unknown x86 shuffle node"); 2777 case X86ISD::PSHUFD: 2778 case X86ISD::PSHUFHW: 2779 case X86ISD::PSHUFLW: 2780 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2781 } 2782 2783 return SDValue(); 2784} 2785 2786static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2787 SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { 2788 switch(Opc) { 2789 default: llvm_unreachable("Unknown x86 shuffle node"); 2790 case X86ISD::PALIGN: 2791 case X86ISD::SHUFPD: 2792 case X86ISD::SHUFPS: 2793 return DAG.getNode(Opc, dl, VT, V1, V2, 2794 DAG.getConstant(TargetMask, MVT::i8)); 2795 } 2796 return SDValue(); 2797} 2798 2799static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2800 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2801 switch(Opc) { 2802 default: llvm_unreachable("Unknown x86 shuffle node"); 2803 case X86ISD::MOVLHPS: 2804 case X86ISD::MOVLHPD: 2805 case X86ISD::MOVHLPS: 2806 case X86ISD::MOVLPS: 2807 case X86ISD::MOVLPD: 2808 case X86ISD::MOVSS: 2809 case X86ISD::MOVSD: 2810 case X86ISD::UNPCKLPS: 2811 case X86ISD::UNPCKLPD: 2812 case X86ISD::VUNPCKLPS: 2813 case X86ISD::VUNPCKLPD: 2814 case X86ISD::VUNPCKLPSY: 2815 case X86ISD::VUNPCKLPDY: 2816 case X86ISD::PUNPCKLWD: 2817 case X86ISD::PUNPCKLBW: 2818 case X86ISD::PUNPCKLDQ: 2819 case X86ISD::PUNPCKLQDQ: 2820 case X86ISD::UNPCKHPS: 2821 case X86ISD::UNPCKHPD: 2822 case X86ISD::PUNPCKHWD: 2823 case X86ISD::PUNPCKHBW: 2824 case X86ISD::PUNPCKHDQ: 2825 case X86ISD::PUNPCKHQDQ: 2826 return DAG.getNode(Opc, dl, VT, V1, V2); 2827 } 2828 return SDValue(); 2829} 2830 2831SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2832 MachineFunction &MF = DAG.getMachineFunction(); 2833 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2834 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2835 2836 if (ReturnAddrIndex == 0) { 2837 // Set up a frame object for the return address. 2838 uint64_t SlotSize = TD->getPointerSize(); 2839 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 2840 false); 2841 FuncInfo->setRAIndex(ReturnAddrIndex); 2842 } 2843 2844 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2845} 2846 2847 2848bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 2849 bool hasSymbolicDisplacement) { 2850 // Offset should fit into 32 bit immediate field. 2851 if (!isInt<32>(Offset)) 2852 return false; 2853 2854 // If we don't have a symbolic displacement - we don't have any extra 2855 // restrictions. 2856 if (!hasSymbolicDisplacement) 2857 return true; 2858 2859 // FIXME: Some tweaks might be needed for medium code model. 2860 if (M != CodeModel::Small && M != CodeModel::Kernel) 2861 return false; 2862 2863 // For small code model we assume that latest object is 16MB before end of 31 2864 // bits boundary. We may also accept pretty large negative constants knowing 2865 // that all objects are in the positive half of address space. 2866 if (M == CodeModel::Small && Offset < 16*1024*1024) 2867 return true; 2868 2869 // For kernel code model we know that all object resist in the negative half 2870 // of 32bits address space. We may not accept negative offsets, since they may 2871 // be just off and we may accept pretty large positive ones. 2872 if (M == CodeModel::Kernel && Offset > 0) 2873 return true; 2874 2875 return false; 2876} 2877 2878/// isCalleePop - Determines whether the callee is required to pop its 2879/// own arguments. Callee pop is necessary to support tail calls. 2880bool X86::isCalleePop(CallingConv::ID CallingConv, 2881 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 2882 if (IsVarArg) 2883 return false; 2884 2885 switch (CallingConv) { 2886 default: 2887 return false; 2888 case CallingConv::X86_StdCall: 2889 return !is64Bit; 2890 case CallingConv::X86_FastCall: 2891 return !is64Bit; 2892 case CallingConv::X86_ThisCall: 2893 return !is64Bit; 2894 case CallingConv::Fast: 2895 return TailCallOpt; 2896 case CallingConv::GHC: 2897 return TailCallOpt; 2898 } 2899} 2900 2901/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 2902/// specific condition code, returning the condition code and the LHS/RHS of the 2903/// comparison to make. 2904static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 2905 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 2906 if (!isFP) { 2907 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 2908 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 2909 // X > -1 -> X == 0, jump !sign. 2910 RHS = DAG.getConstant(0, RHS.getValueType()); 2911 return X86::COND_NS; 2912 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 2913 // X < 0 -> X == 0, jump on sign. 2914 return X86::COND_S; 2915 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 2916 // X < 1 -> X <= 0 2917 RHS = DAG.getConstant(0, RHS.getValueType()); 2918 return X86::COND_LE; 2919 } 2920 } 2921 2922 switch (SetCCOpcode) { 2923 default: llvm_unreachable("Invalid integer condition!"); 2924 case ISD::SETEQ: return X86::COND_E; 2925 case ISD::SETGT: return X86::COND_G; 2926 case ISD::SETGE: return X86::COND_GE; 2927 case ISD::SETLT: return X86::COND_L; 2928 case ISD::SETLE: return X86::COND_LE; 2929 case ISD::SETNE: return X86::COND_NE; 2930 case ISD::SETULT: return X86::COND_B; 2931 case ISD::SETUGT: return X86::COND_A; 2932 case ISD::SETULE: return X86::COND_BE; 2933 case ISD::SETUGE: return X86::COND_AE; 2934 } 2935 } 2936 2937 // First determine if it is required or is profitable to flip the operands. 2938 2939 // If LHS is a foldable load, but RHS is not, flip the condition. 2940 if (ISD::isNON_EXTLoad(LHS.getNode()) && 2941 !ISD::isNON_EXTLoad(RHS.getNode())) { 2942 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 2943 std::swap(LHS, RHS); 2944 } 2945 2946 switch (SetCCOpcode) { 2947 default: break; 2948 case ISD::SETOLT: 2949 case ISD::SETOLE: 2950 case ISD::SETUGT: 2951 case ISD::SETUGE: 2952 std::swap(LHS, RHS); 2953 break; 2954 } 2955 2956 // On a floating point condition, the flags are set as follows: 2957 // ZF PF CF op 2958 // 0 | 0 | 0 | X > Y 2959 // 0 | 0 | 1 | X < Y 2960 // 1 | 0 | 0 | X == Y 2961 // 1 | 1 | 1 | unordered 2962 switch (SetCCOpcode) { 2963 default: llvm_unreachable("Condcode should be pre-legalized away"); 2964 case ISD::SETUEQ: 2965 case ISD::SETEQ: return X86::COND_E; 2966 case ISD::SETOLT: // flipped 2967 case ISD::SETOGT: 2968 case ISD::SETGT: return X86::COND_A; 2969 case ISD::SETOLE: // flipped 2970 case ISD::SETOGE: 2971 case ISD::SETGE: return X86::COND_AE; 2972 case ISD::SETUGT: // flipped 2973 case ISD::SETULT: 2974 case ISD::SETLT: return X86::COND_B; 2975 case ISD::SETUGE: // flipped 2976 case ISD::SETULE: 2977 case ISD::SETLE: return X86::COND_BE; 2978 case ISD::SETONE: 2979 case ISD::SETNE: return X86::COND_NE; 2980 case ISD::SETUO: return X86::COND_P; 2981 case ISD::SETO: return X86::COND_NP; 2982 case ISD::SETOEQ: 2983 case ISD::SETUNE: return X86::COND_INVALID; 2984 } 2985} 2986 2987/// hasFPCMov - is there a floating point cmov for the specific X86 condition 2988/// code. Current x86 isa includes the following FP cmov instructions: 2989/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 2990static bool hasFPCMov(unsigned X86CC) { 2991 switch (X86CC) { 2992 default: 2993 return false; 2994 case X86::COND_B: 2995 case X86::COND_BE: 2996 case X86::COND_E: 2997 case X86::COND_P: 2998 case X86::COND_A: 2999 case X86::COND_AE: 3000 case X86::COND_NE: 3001 case X86::COND_NP: 3002 return true; 3003 } 3004} 3005 3006/// isFPImmLegal - Returns true if the target can instruction select the 3007/// specified FP immediate natively. If false, the legalizer will 3008/// materialize the FP immediate as a load from a constant pool. 3009bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3010 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3011 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3012 return true; 3013 } 3014 return false; 3015} 3016 3017/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3018/// the specified range (L, H]. 3019static bool isUndefOrInRange(int Val, int Low, int Hi) { 3020 return (Val < 0) || (Val >= Low && Val < Hi); 3021} 3022 3023/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3024/// specified value. 3025static bool isUndefOrEqual(int Val, int CmpVal) { 3026 if (Val < 0 || Val == CmpVal) 3027 return true; 3028 return false; 3029} 3030 3031/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3032/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3033/// the second operand. 3034static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3035 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3036 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3037 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3038 return (Mask[0] < 2 && Mask[1] < 2); 3039 return false; 3040} 3041 3042bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) { 3043 SmallVector<int, 8> M; 3044 N->getMask(M); 3045 return ::isPSHUFDMask(M, N->getValueType(0)); 3046} 3047 3048/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3049/// is suitable for input to PSHUFHW. 3050static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3051 if (VT != MVT::v8i16) 3052 return false; 3053 3054 // Lower quadword copied in order or undef. 3055 for (int i = 0; i != 4; ++i) 3056 if (Mask[i] >= 0 && Mask[i] != i) 3057 return false; 3058 3059 // Upper quadword shuffled. 3060 for (int i = 4; i != 8; ++i) 3061 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7)) 3062 return false; 3063 3064 return true; 3065} 3066 3067bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) { 3068 SmallVector<int, 8> M; 3069 N->getMask(M); 3070 return ::isPSHUFHWMask(M, N->getValueType(0)); 3071} 3072 3073/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3074/// is suitable for input to PSHUFLW. 3075static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3076 if (VT != MVT::v8i16) 3077 return false; 3078 3079 // Upper quadword copied in order. 3080 for (int i = 4; i != 8; ++i) 3081 if (Mask[i] >= 0 && Mask[i] != i) 3082 return false; 3083 3084 // Lower quadword shuffled. 3085 for (int i = 0; i != 4; ++i) 3086 if (Mask[i] >= 4) 3087 return false; 3088 3089 return true; 3090} 3091 3092bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) { 3093 SmallVector<int, 8> M; 3094 N->getMask(M); 3095 return ::isPSHUFLWMask(M, N->getValueType(0)); 3096} 3097 3098/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3099/// is suitable for input to PALIGNR. 3100static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT, 3101 bool hasSSSE3) { 3102 int i, e = VT.getVectorNumElements(); 3103 3104 // Do not handle v2i64 / v2f64 shuffles with palignr. 3105 if (e < 4 || !hasSSSE3) 3106 return false; 3107 3108 for (i = 0; i != e; ++i) 3109 if (Mask[i] >= 0) 3110 break; 3111 3112 // All undef, not a palignr. 3113 if (i == e) 3114 return false; 3115 3116 // Determine if it's ok to perform a palignr with only the LHS, since we 3117 // don't have access to the actual shuffle elements to see if RHS is undef. 3118 bool Unary = Mask[i] < (int)e; 3119 bool NeedsUnary = false; 3120 3121 int s = Mask[i] - i; 3122 3123 // Check the rest of the elements to see if they are consecutive. 3124 for (++i; i != e; ++i) { 3125 int m = Mask[i]; 3126 if (m < 0) 3127 continue; 3128 3129 Unary = Unary && (m < (int)e); 3130 NeedsUnary = NeedsUnary || (m < s); 3131 3132 if (NeedsUnary && !Unary) 3133 return false; 3134 if (Unary && m != ((s+i) & (e-1))) 3135 return false; 3136 if (!Unary && m != (s+i)) 3137 return false; 3138 } 3139 return true; 3140} 3141 3142bool X86::isPALIGNRMask(ShuffleVectorSDNode *N) { 3143 SmallVector<int, 8> M; 3144 N->getMask(M); 3145 return ::isPALIGNRMask(M, N->getValueType(0), true); 3146} 3147 3148/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3149/// specifies a shuffle of elements that is suitable for input to SHUFP*. 3150static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3151 int NumElems = VT.getVectorNumElements(); 3152 if (NumElems != 2 && NumElems != 4) 3153 return false; 3154 3155 int Half = NumElems / 2; 3156 for (int i = 0; i < Half; ++i) 3157 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 3158 return false; 3159 for (int i = Half; i < NumElems; ++i) 3160 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 3161 return false; 3162 3163 return true; 3164} 3165 3166bool X86::isSHUFPMask(ShuffleVectorSDNode *N) { 3167 SmallVector<int, 8> M; 3168 N->getMask(M); 3169 return ::isSHUFPMask(M, N->getValueType(0)); 3170} 3171 3172/// isCommutedSHUFP - Returns true if the shuffle mask is exactly 3173/// the reverse of what x86 shuffles want. x86 shuffles requires the lower 3174/// half elements to come from vector 1 (which would equal the dest.) and 3175/// the upper half to come from vector 2. 3176static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3177 int NumElems = VT.getVectorNumElements(); 3178 3179 if (NumElems != 2 && NumElems != 4) 3180 return false; 3181 3182 int Half = NumElems / 2; 3183 for (int i = 0; i < Half; ++i) 3184 if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2)) 3185 return false; 3186 for (int i = Half; i < NumElems; ++i) 3187 if (!isUndefOrInRange(Mask[i], 0, NumElems)) 3188 return false; 3189 return true; 3190} 3191 3192static bool isCommutedSHUFP(ShuffleVectorSDNode *N) { 3193 SmallVector<int, 8> M; 3194 N->getMask(M); 3195 return isCommutedSHUFPMask(M, N->getValueType(0)); 3196} 3197 3198/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3199/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3200bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) { 3201 if (N->getValueType(0).getVectorNumElements() != 4) 3202 return false; 3203 3204 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3205 return isUndefOrEqual(N->getMaskElt(0), 6) && 3206 isUndefOrEqual(N->getMaskElt(1), 7) && 3207 isUndefOrEqual(N->getMaskElt(2), 2) && 3208 isUndefOrEqual(N->getMaskElt(3), 3); 3209} 3210 3211/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3212/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3213/// <2, 3, 2, 3> 3214bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) { 3215 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3216 3217 if (NumElems != 4) 3218 return false; 3219 3220 return isUndefOrEqual(N->getMaskElt(0), 2) && 3221 isUndefOrEqual(N->getMaskElt(1), 3) && 3222 isUndefOrEqual(N->getMaskElt(2), 2) && 3223 isUndefOrEqual(N->getMaskElt(3), 3); 3224} 3225 3226/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3227/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3228bool X86::isMOVLPMask(ShuffleVectorSDNode *N) { 3229 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3230 3231 if (NumElems != 2 && NumElems != 4) 3232 return false; 3233 3234 for (unsigned i = 0; i < NumElems/2; ++i) 3235 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems)) 3236 return false; 3237 3238 for (unsigned i = NumElems/2; i < NumElems; ++i) 3239 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3240 return false; 3241 3242 return true; 3243} 3244 3245/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3246/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3247bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) { 3248 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3249 3250 if ((NumElems != 2 && NumElems != 4) 3251 || N->getValueType(0).getSizeInBits() > 128) 3252 return false; 3253 3254 for (unsigned i = 0; i < NumElems/2; ++i) 3255 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3256 return false; 3257 3258 for (unsigned i = 0; i < NumElems/2; ++i) 3259 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems)) 3260 return false; 3261 3262 return true; 3263} 3264 3265/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3266/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3267static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3268 bool V2IsSplat = false) { 3269 int NumElts = VT.getVectorNumElements(); 3270 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3271 return false; 3272 3273 // Handle vector lengths > 128 bits. Define a "section" as a set of 3274 // 128 bits. AVX defines UNPCK* to operate independently on 128-bit 3275 // sections. 3276 unsigned NumSections = VT.getSizeInBits() / 128; 3277 if (NumSections == 0 ) NumSections = 1; // Handle MMX 3278 unsigned NumSectionElts = NumElts / NumSections; 3279 3280 unsigned Start = 0; 3281 unsigned End = NumSectionElts; 3282 for (unsigned s = 0; s < NumSections; ++s) { 3283 for (unsigned i = Start, j = s * NumSectionElts; 3284 i != End; 3285 i += 2, ++j) { 3286 int BitI = Mask[i]; 3287 int BitI1 = Mask[i+1]; 3288 if (!isUndefOrEqual(BitI, j)) 3289 return false; 3290 if (V2IsSplat) { 3291 if (!isUndefOrEqual(BitI1, NumElts)) 3292 return false; 3293 } else { 3294 if (!isUndefOrEqual(BitI1, j + NumElts)) 3295 return false; 3296 } 3297 } 3298 // Process the next 128 bits. 3299 Start += NumSectionElts; 3300 End += NumSectionElts; 3301 } 3302 3303 return true; 3304} 3305 3306bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3307 SmallVector<int, 8> M; 3308 N->getMask(M); 3309 return ::isUNPCKLMask(M, N->getValueType(0), V2IsSplat); 3310} 3311 3312/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3313/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3314static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT, 3315 bool V2IsSplat = false) { 3316 int NumElts = VT.getVectorNumElements(); 3317 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16) 3318 return false; 3319 3320 for (int i = 0, j = 0; i != NumElts; i += 2, ++j) { 3321 int BitI = Mask[i]; 3322 int BitI1 = Mask[i+1]; 3323 if (!isUndefOrEqual(BitI, j + NumElts/2)) 3324 return false; 3325 if (V2IsSplat) { 3326 if (isUndefOrEqual(BitI1, NumElts)) 3327 return false; 3328 } else { 3329 if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts)) 3330 return false; 3331 } 3332 } 3333 return true; 3334} 3335 3336bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat) { 3337 SmallVector<int, 8> M; 3338 N->getMask(M); 3339 return ::isUNPCKHMask(M, N->getValueType(0), V2IsSplat); 3340} 3341 3342/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3343/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3344/// <0, 0, 1, 1> 3345static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3346 int NumElems = VT.getVectorNumElements(); 3347 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3348 return false; 3349 3350 // Handle vector lengths > 128 bits. Define a "section" as a set of 3351 // 128 bits. AVX defines UNPCK* to operate independently on 128-bit 3352 // sections. 3353 unsigned NumSections = VT.getSizeInBits() / 128; 3354 if (NumSections == 0 ) NumSections = 1; // Handle MMX 3355 unsigned NumSectionElts = NumElems / NumSections; 3356 3357 for (unsigned s = 0; s < NumSections; ++s) { 3358 for (unsigned i = s * NumSectionElts, j = s * NumSectionElts; 3359 i != NumSectionElts * (s + 1); 3360 i += 2, ++j) { 3361 int BitI = Mask[i]; 3362 int BitI1 = Mask[i+1]; 3363 3364 if (!isUndefOrEqual(BitI, j)) 3365 return false; 3366 if (!isUndefOrEqual(BitI1, j)) 3367 return false; 3368 } 3369 } 3370 3371 return true; 3372} 3373 3374bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N) { 3375 SmallVector<int, 8> M; 3376 N->getMask(M); 3377 return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0)); 3378} 3379 3380/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3381/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3382/// <2, 2, 3, 3> 3383static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) { 3384 int NumElems = VT.getVectorNumElements(); 3385 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16) 3386 return false; 3387 3388 for (int i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { 3389 int BitI = Mask[i]; 3390 int BitI1 = Mask[i+1]; 3391 if (!isUndefOrEqual(BitI, j)) 3392 return false; 3393 if (!isUndefOrEqual(BitI1, j)) 3394 return false; 3395 } 3396 return true; 3397} 3398 3399bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N) { 3400 SmallVector<int, 8> M; 3401 N->getMask(M); 3402 return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0)); 3403} 3404 3405/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3406/// specifies a shuffle of elements that is suitable for input to MOVSS, 3407/// MOVSD, and MOVD, i.e. setting the lowest element. 3408static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3409 if (VT.getVectorElementType().getSizeInBits() < 32) 3410 return false; 3411 3412 int NumElts = VT.getVectorNumElements(); 3413 3414 if (!isUndefOrEqual(Mask[0], NumElts)) 3415 return false; 3416 3417 for (int i = 1; i < NumElts; ++i) 3418 if (!isUndefOrEqual(Mask[i], i)) 3419 return false; 3420 3421 return true; 3422} 3423 3424bool X86::isMOVLMask(ShuffleVectorSDNode *N) { 3425 SmallVector<int, 8> M; 3426 N->getMask(M); 3427 return ::isMOVLMask(M, N->getValueType(0)); 3428} 3429 3430/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 3431/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3432/// element of vector 2 and the other elements to come from vector 1 in order. 3433static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3434 bool V2IsSplat = false, bool V2IsUndef = false) { 3435 int NumOps = VT.getVectorNumElements(); 3436 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3437 return false; 3438 3439 if (!isUndefOrEqual(Mask[0], 0)) 3440 return false; 3441 3442 for (int i = 1; i < NumOps; ++i) 3443 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3444 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3445 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3446 return false; 3447 3448 return true; 3449} 3450 3451static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false, 3452 bool V2IsUndef = false) { 3453 SmallVector<int, 8> M; 3454 N->getMask(M); 3455 return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef); 3456} 3457 3458/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3459/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3460bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N) { 3461 if (N->getValueType(0).getVectorNumElements() != 4) 3462 return false; 3463 3464 // Expect 1, 1, 3, 3 3465 for (unsigned i = 0; i < 2; ++i) { 3466 int Elt = N->getMaskElt(i); 3467 if (Elt >= 0 && Elt != 1) 3468 return false; 3469 } 3470 3471 bool HasHi = false; 3472 for (unsigned i = 2; i < 4; ++i) { 3473 int Elt = N->getMaskElt(i); 3474 if (Elt >= 0 && Elt != 3) 3475 return false; 3476 if (Elt == 3) 3477 HasHi = true; 3478 } 3479 // Don't use movshdup if it can be done with a shufps. 3480 // FIXME: verify that matching u, u, 3, 3 is what we want. 3481 return HasHi; 3482} 3483 3484/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3485/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3486bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N) { 3487 if (N->getValueType(0).getVectorNumElements() != 4) 3488 return false; 3489 3490 // Expect 0, 0, 2, 2 3491 for (unsigned i = 0; i < 2; ++i) 3492 if (N->getMaskElt(i) > 0) 3493 return false; 3494 3495 bool HasHi = false; 3496 for (unsigned i = 2; i < 4; ++i) { 3497 int Elt = N->getMaskElt(i); 3498 if (Elt >= 0 && Elt != 2) 3499 return false; 3500 if (Elt == 2) 3501 HasHi = true; 3502 } 3503 // Don't use movsldup if it can be done with a shufps. 3504 return HasHi; 3505} 3506 3507/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3508/// specifies a shuffle of elements that is suitable for input to MOVDDUP. 3509bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) { 3510 int e = N->getValueType(0).getVectorNumElements() / 2; 3511 3512 for (int i = 0; i < e; ++i) 3513 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3514 return false; 3515 for (int i = 0; i < e; ++i) 3516 if (!isUndefOrEqual(N->getMaskElt(e+i), i)) 3517 return false; 3518 return true; 3519} 3520 3521/// isVEXTRACTF128Index - Return true if the specified 3522/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3523/// suitable for input to VEXTRACTF128. 3524bool X86::isVEXTRACTF128Index(SDNode *N) { 3525 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3526 return false; 3527 3528 // The index should be aligned on a 128-bit boundary. 3529 uint64_t Index = 3530 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3531 3532 unsigned VL = N->getValueType(0).getVectorNumElements(); 3533 unsigned VBits = N->getValueType(0).getSizeInBits(); 3534 unsigned ElSize = VBits / VL; 3535 bool Result = (Index * ElSize) % 128 == 0; 3536 3537 return Result; 3538} 3539 3540/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3541/// operand specifies a subvector insert that is suitable for input to 3542/// VINSERTF128. 3543bool X86::isVINSERTF128Index(SDNode *N) { 3544 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3545 return false; 3546 3547 // The index should be aligned on a 128-bit boundary. 3548 uint64_t Index = 3549 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3550 3551 unsigned VL = N->getValueType(0).getVectorNumElements(); 3552 unsigned VBits = N->getValueType(0).getSizeInBits(); 3553 unsigned ElSize = VBits / VL; 3554 bool Result = (Index * ElSize) % 128 == 0; 3555 3556 return Result; 3557} 3558 3559/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3560/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3561unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 3562 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3563 int NumOperands = SVOp->getValueType(0).getVectorNumElements(); 3564 3565 unsigned Shift = (NumOperands == 4) ? 2 : 1; 3566 unsigned Mask = 0; 3567 for (int i = 0; i < NumOperands; ++i) { 3568 int Val = SVOp->getMaskElt(NumOperands-i-1); 3569 if (Val < 0) Val = 0; 3570 if (Val >= NumOperands) Val -= NumOperands; 3571 Mask |= Val; 3572 if (i != NumOperands - 1) 3573 Mask <<= Shift; 3574 } 3575 return Mask; 3576} 3577 3578/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3579/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3580unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 3581 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3582 unsigned Mask = 0; 3583 // 8 nodes, but we only care about the last 4. 3584 for (unsigned i = 7; i >= 4; --i) { 3585 int Val = SVOp->getMaskElt(i); 3586 if (Val >= 0) 3587 Mask |= (Val - 4); 3588 if (i != 4) 3589 Mask <<= 2; 3590 } 3591 return Mask; 3592} 3593 3594/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 3595/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 3596unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 3597 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3598 unsigned Mask = 0; 3599 // 8 nodes, but we only care about the first 4. 3600 for (int i = 3; i >= 0; --i) { 3601 int Val = SVOp->getMaskElt(i); 3602 if (Val >= 0) 3603 Mask |= Val; 3604 if (i != 0) 3605 Mask <<= 2; 3606 } 3607 return Mask; 3608} 3609 3610/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 3611/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 3612unsigned X86::getShufflePALIGNRImmediate(SDNode *N) { 3613 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3614 EVT VVT = N->getValueType(0); 3615 unsigned EltSize = VVT.getVectorElementType().getSizeInBits() >> 3; 3616 int Val = 0; 3617 3618 unsigned i, e; 3619 for (i = 0, e = VVT.getVectorNumElements(); i != e; ++i) { 3620 Val = SVOp->getMaskElt(i); 3621 if (Val >= 0) 3622 break; 3623 } 3624 return (Val - i) * EltSize; 3625} 3626 3627/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 3628/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 3629/// instructions. 3630unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 3631 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3632 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 3633 3634 uint64_t Index = 3635 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3636 3637 EVT VecVT = N->getOperand(0).getValueType(); 3638 EVT ElVT = VecVT.getVectorElementType(); 3639 3640 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 3641 3642 return Index / NumElemsPerChunk; 3643} 3644 3645/// getInsertVINSERTF128Immediate - Return the appropriate immediate 3646/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 3647/// instructions. 3648unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 3649 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3650 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 3651 3652 uint64_t Index = 3653 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3654 3655 EVT VecVT = N->getValueType(0); 3656 EVT ElVT = VecVT.getVectorElementType(); 3657 3658 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 3659 3660 return Index / NumElemsPerChunk; 3661} 3662 3663/// isZeroNode - Returns true if Elt is a constant zero or a floating point 3664/// constant +0.0. 3665bool X86::isZeroNode(SDValue Elt) { 3666 return ((isa<ConstantSDNode>(Elt) && 3667 cast<ConstantSDNode>(Elt)->isNullValue()) || 3668 (isa<ConstantFPSDNode>(Elt) && 3669 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 3670} 3671 3672/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 3673/// their permute mask. 3674static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 3675 SelectionDAG &DAG) { 3676 EVT VT = SVOp->getValueType(0); 3677 unsigned NumElems = VT.getVectorNumElements(); 3678 SmallVector<int, 8> MaskVec; 3679 3680 for (unsigned i = 0; i != NumElems; ++i) { 3681 int idx = SVOp->getMaskElt(i); 3682 if (idx < 0) 3683 MaskVec.push_back(idx); 3684 else if (idx < (int)NumElems) 3685 MaskVec.push_back(idx + NumElems); 3686 else 3687 MaskVec.push_back(idx - NumElems); 3688 } 3689 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 3690 SVOp->getOperand(0), &MaskVec[0]); 3691} 3692 3693/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3694/// the two vector operands have swapped position. 3695static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) { 3696 unsigned NumElems = VT.getVectorNumElements(); 3697 for (unsigned i = 0; i != NumElems; ++i) { 3698 int idx = Mask[i]; 3699 if (idx < 0) 3700 continue; 3701 else if (idx < (int)NumElems) 3702 Mask[i] = idx + NumElems; 3703 else 3704 Mask[i] = idx - NumElems; 3705 } 3706} 3707 3708/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 3709/// match movhlps. The lower half elements should come from upper half of 3710/// V1 (and in order), and the upper half elements should come from the upper 3711/// half of V2 (and in order). 3712static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) { 3713 if (Op->getValueType(0).getVectorNumElements() != 4) 3714 return false; 3715 for (unsigned i = 0, e = 2; i != e; ++i) 3716 if (!isUndefOrEqual(Op->getMaskElt(i), i+2)) 3717 return false; 3718 for (unsigned i = 2; i != 4; ++i) 3719 if (!isUndefOrEqual(Op->getMaskElt(i), i+4)) 3720 return false; 3721 return true; 3722} 3723 3724/// isScalarLoadToVector - Returns true if the node is a scalar load that 3725/// is promoted to a vector. It also returns the LoadSDNode by reference if 3726/// required. 3727static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 3728 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 3729 return false; 3730 N = N->getOperand(0).getNode(); 3731 if (!ISD::isNON_EXTLoad(N)) 3732 return false; 3733 if (LD) 3734 *LD = cast<LoadSDNode>(N); 3735 return true; 3736} 3737 3738/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 3739/// match movlp{s|d}. The lower half elements should come from lower half of 3740/// V1 (and in order), and the upper half elements should come from the upper 3741/// half of V2 (and in order). And since V1 will become the source of the 3742/// MOVLP, it must be either a vector load or a scalar load to vector. 3743static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 3744 ShuffleVectorSDNode *Op) { 3745 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 3746 return false; 3747 // Is V2 is a vector load, don't do this transformation. We will try to use 3748 // load folding shufps op. 3749 if (ISD::isNON_EXTLoad(V2)) 3750 return false; 3751 3752 unsigned NumElems = Op->getValueType(0).getVectorNumElements(); 3753 3754 if (NumElems != 2 && NumElems != 4) 3755 return false; 3756 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3757 if (!isUndefOrEqual(Op->getMaskElt(i), i)) 3758 return false; 3759 for (unsigned i = NumElems/2; i != NumElems; ++i) 3760 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems)) 3761 return false; 3762 return true; 3763} 3764 3765/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 3766/// all the same. 3767static bool isSplatVector(SDNode *N) { 3768 if (N->getOpcode() != ISD::BUILD_VECTOR) 3769 return false; 3770 3771 SDValue SplatValue = N->getOperand(0); 3772 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 3773 if (N->getOperand(i) != SplatValue) 3774 return false; 3775 return true; 3776} 3777 3778/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 3779/// to an zero vector. 3780/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 3781static bool isZeroShuffle(ShuffleVectorSDNode *N) { 3782 SDValue V1 = N->getOperand(0); 3783 SDValue V2 = N->getOperand(1); 3784 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3785 for (unsigned i = 0; i != NumElems; ++i) { 3786 int Idx = N->getMaskElt(i); 3787 if (Idx >= (int)NumElems) { 3788 unsigned Opc = V2.getOpcode(); 3789 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 3790 continue; 3791 if (Opc != ISD::BUILD_VECTOR || 3792 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 3793 return false; 3794 } else if (Idx >= 0) { 3795 unsigned Opc = V1.getOpcode(); 3796 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 3797 continue; 3798 if (Opc != ISD::BUILD_VECTOR || 3799 !X86::isZeroNode(V1.getOperand(Idx))) 3800 return false; 3801 } 3802 } 3803 return true; 3804} 3805 3806/// getZeroVector - Returns a vector of specified type with all zero elements. 3807/// 3808static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG, 3809 DebugLoc dl) { 3810 assert(VT.isVector() && "Expected a vector type"); 3811 3812 // Always build SSE zero vectors as <4 x i32> bitcasted 3813 // to their dest type. This ensures they get CSE'd. 3814 SDValue Vec; 3815 if (VT.getSizeInBits() == 128) { // SSE 3816 if (HasSSE2) { // SSE2 3817 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 3818 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 3819 } else { // SSE1 3820 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 3821 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 3822 } 3823 } else if (VT.getSizeInBits() == 256) { // AVX 3824 // 256-bit logic and arithmetic instructions in AVX are 3825 // all floating-point, no support for integer ops. Default 3826 // to emitting fp zeroed vectors then. 3827 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 3828 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 3829 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 3830 } 3831 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 3832} 3833 3834/// getOnesVector - Returns a vector of specified type with all bits set. 3835/// 3836static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3837 assert(VT.isVector() && "Expected a vector type"); 3838 3839 // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest 3840 // type. This ensures they get CSE'd. 3841 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 3842 SDValue Vec; 3843 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 3844 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 3845} 3846 3847 3848/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 3849/// that point to V2 points to its first element. 3850static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 3851 EVT VT = SVOp->getValueType(0); 3852 unsigned NumElems = VT.getVectorNumElements(); 3853 3854 bool Changed = false; 3855 SmallVector<int, 8> MaskVec; 3856 SVOp->getMask(MaskVec); 3857 3858 for (unsigned i = 0; i != NumElems; ++i) { 3859 if (MaskVec[i] > (int)NumElems) { 3860 MaskVec[i] = NumElems; 3861 Changed = true; 3862 } 3863 } 3864 if (Changed) 3865 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0), 3866 SVOp->getOperand(1), &MaskVec[0]); 3867 return SDValue(SVOp, 0); 3868} 3869 3870/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 3871/// operation of specified width. 3872static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3873 SDValue V2) { 3874 unsigned NumElems = VT.getVectorNumElements(); 3875 SmallVector<int, 8> Mask; 3876 Mask.push_back(NumElems); 3877 for (unsigned i = 1; i != NumElems; ++i) 3878 Mask.push_back(i); 3879 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3880} 3881 3882/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 3883static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3884 SDValue V2) { 3885 unsigned NumElems = VT.getVectorNumElements(); 3886 SmallVector<int, 8> Mask; 3887 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 3888 Mask.push_back(i); 3889 Mask.push_back(i + NumElems); 3890 } 3891 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3892} 3893 3894/// getUnpackhMask - Returns a vector_shuffle node for an unpackh operation. 3895static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 3896 SDValue V2) { 3897 unsigned NumElems = VT.getVectorNumElements(); 3898 unsigned Half = NumElems/2; 3899 SmallVector<int, 8> Mask; 3900 for (unsigned i = 0; i != Half; ++i) { 3901 Mask.push_back(i + Half); 3902 Mask.push_back(i + NumElems + Half); 3903 } 3904 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 3905} 3906 3907/// PromoteSplat - Promote a splat of v4i32, v8i16 or v16i8 to v4f32. 3908static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 3909 EVT PVT = MVT::v4f32; 3910 EVT VT = SV->getValueType(0); 3911 DebugLoc dl = SV->getDebugLoc(); 3912 SDValue V1 = SV->getOperand(0); 3913 int NumElems = VT.getVectorNumElements(); 3914 int EltNo = SV->getSplatIndex(); 3915 3916 // unpack elements to the correct location 3917 while (NumElems > 4) { 3918 if (EltNo < NumElems/2) { 3919 V1 = getUnpackl(DAG, dl, VT, V1, V1); 3920 } else { 3921 V1 = getUnpackh(DAG, dl, VT, V1, V1); 3922 EltNo -= NumElems/2; 3923 } 3924 NumElems >>= 1; 3925 } 3926 3927 // Perform the splat. 3928 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 3929 V1 = DAG.getNode(ISD::BITCAST, dl, PVT, V1); 3930 V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]); 3931 return DAG.getNode(ISD::BITCAST, dl, VT, V1); 3932} 3933 3934/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 3935/// vector of zero or undef vector. This produces a shuffle where the low 3936/// element of V2 is swizzled into the zero/undef vector, landing at element 3937/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 3938static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 3939 bool isZero, bool HasSSE2, 3940 SelectionDAG &DAG) { 3941 EVT VT = V2.getValueType(); 3942 SDValue V1 = isZero 3943 ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 3944 unsigned NumElems = VT.getVectorNumElements(); 3945 SmallVector<int, 16> MaskVec; 3946 for (unsigned i = 0; i != NumElems; ++i) 3947 // If this is the insertion idx, put the low elt of V2 here. 3948 MaskVec.push_back(i == Idx ? NumElems : i); 3949 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 3950} 3951 3952/// getShuffleScalarElt - Returns the scalar element that will make up the ith 3953/// element of the result of the vector shuffle. 3954static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, 3955 unsigned Depth) { 3956 if (Depth == 6) 3957 return SDValue(); // Limit search depth. 3958 3959 SDValue V = SDValue(N, 0); 3960 EVT VT = V.getValueType(); 3961 unsigned Opcode = V.getOpcode(); 3962 3963 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 3964 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 3965 Index = SV->getMaskElt(Index); 3966 3967 if (Index < 0) 3968 return DAG.getUNDEF(VT.getVectorElementType()); 3969 3970 int NumElems = VT.getVectorNumElements(); 3971 SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1); 3972 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1); 3973 } 3974 3975 // Recurse into target specific vector shuffles to find scalars. 3976 if (isTargetShuffle(Opcode)) { 3977 int NumElems = VT.getVectorNumElements(); 3978 SmallVector<unsigned, 16> ShuffleMask; 3979 SDValue ImmN; 3980 3981 switch(Opcode) { 3982 case X86ISD::SHUFPS: 3983 case X86ISD::SHUFPD: 3984 ImmN = N->getOperand(N->getNumOperands()-1); 3985 DecodeSHUFPSMask(NumElems, 3986 cast<ConstantSDNode>(ImmN)->getZExtValue(), 3987 ShuffleMask); 3988 break; 3989 case X86ISD::PUNPCKHBW: 3990 case X86ISD::PUNPCKHWD: 3991 case X86ISD::PUNPCKHDQ: 3992 case X86ISD::PUNPCKHQDQ: 3993 DecodePUNPCKHMask(NumElems, ShuffleMask); 3994 break; 3995 case X86ISD::UNPCKHPS: 3996 case X86ISD::UNPCKHPD: 3997 DecodeUNPCKHPMask(NumElems, ShuffleMask); 3998 break; 3999 case X86ISD::PUNPCKLBW: 4000 case X86ISD::PUNPCKLWD: 4001 case X86ISD::PUNPCKLDQ: 4002 case X86ISD::PUNPCKLQDQ: 4003 DecodePUNPCKLMask(VT, ShuffleMask); 4004 break; 4005 case X86ISD::UNPCKLPS: 4006 case X86ISD::UNPCKLPD: 4007 case X86ISD::VUNPCKLPS: 4008 case X86ISD::VUNPCKLPD: 4009 case X86ISD::VUNPCKLPSY: 4010 case X86ISD::VUNPCKLPDY: 4011 DecodeUNPCKLPMask(VT, ShuffleMask); 4012 break; 4013 case X86ISD::MOVHLPS: 4014 DecodeMOVHLPSMask(NumElems, ShuffleMask); 4015 break; 4016 case X86ISD::MOVLHPS: 4017 DecodeMOVLHPSMask(NumElems, ShuffleMask); 4018 break; 4019 case X86ISD::PSHUFD: 4020 ImmN = N->getOperand(N->getNumOperands()-1); 4021 DecodePSHUFMask(NumElems, 4022 cast<ConstantSDNode>(ImmN)->getZExtValue(), 4023 ShuffleMask); 4024 break; 4025 case X86ISD::PSHUFHW: 4026 ImmN = N->getOperand(N->getNumOperands()-1); 4027 DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4028 ShuffleMask); 4029 break; 4030 case X86ISD::PSHUFLW: 4031 ImmN = N->getOperand(N->getNumOperands()-1); 4032 DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4033 ShuffleMask); 4034 break; 4035 case X86ISD::MOVSS: 4036 case X86ISD::MOVSD: { 4037 // The index 0 always comes from the first element of the second source, 4038 // this is why MOVSS and MOVSD are used in the first place. The other 4039 // elements come from the other positions of the first source vector. 4040 unsigned OpNum = (Index == 0) ? 1 : 0; 4041 return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG, 4042 Depth+1); 4043 } 4044 default: 4045 assert("not implemented for target shuffle node"); 4046 return SDValue(); 4047 } 4048 4049 Index = ShuffleMask[Index]; 4050 if (Index < 0) 4051 return DAG.getUNDEF(VT.getVectorElementType()); 4052 4053 SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); 4054 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, 4055 Depth+1); 4056 } 4057 4058 // Actual nodes that may contain scalar elements 4059 if (Opcode == ISD::BITCAST) { 4060 V = V.getOperand(0); 4061 EVT SrcVT = V.getValueType(); 4062 unsigned NumElems = VT.getVectorNumElements(); 4063 4064 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4065 return SDValue(); 4066 } 4067 4068 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4069 return (Index == 0) ? V.getOperand(0) 4070 : DAG.getUNDEF(VT.getVectorElementType()); 4071 4072 if (V.getOpcode() == ISD::BUILD_VECTOR) 4073 return V.getOperand(Index); 4074 4075 return SDValue(); 4076} 4077 4078/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4079/// shuffle operation which come from a consecutively from a zero. The 4080/// search can start in two different directions, from left or right. 4081static 4082unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, 4083 bool ZerosFromLeft, SelectionDAG &DAG) { 4084 int i = 0; 4085 4086 while (i < NumElems) { 4087 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4088 SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0); 4089 if (!(Elt.getNode() && 4090 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4091 break; 4092 ++i; 4093 } 4094 4095 return i; 4096} 4097 4098/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to 4099/// MaskE correspond consecutively to elements from one of the vector operands, 4100/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4101static 4102bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE, 4103 int OpIdx, int NumElems, unsigned &OpNum) { 4104 bool SeenV1 = false; 4105 bool SeenV2 = false; 4106 4107 for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) { 4108 int Idx = SVOp->getMaskElt(i); 4109 // Ignore undef indicies 4110 if (Idx < 0) 4111 continue; 4112 4113 if (Idx < NumElems) 4114 SeenV1 = true; 4115 else 4116 SeenV2 = true; 4117 4118 // Only accept consecutive elements from the same vector 4119 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4120 return false; 4121 } 4122 4123 OpNum = SeenV1 ? 0 : 1; 4124 return true; 4125} 4126 4127/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4128/// logical left shift of a vector. 4129static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4130 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4131 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4132 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4133 false /* check zeros from right */, DAG); 4134 unsigned OpSrc; 4135 4136 if (!NumZeros) 4137 return false; 4138 4139 // Considering the elements in the mask that are not consecutive zeros, 4140 // check if they consecutively come from only one of the source vectors. 4141 // 4142 // V1 = {X, A, B, C} 0 4143 // \ \ \ / 4144 // vector_shuffle V1, V2 <1, 2, 3, X> 4145 // 4146 if (!isShuffleMaskConsecutive(SVOp, 4147 0, // Mask Start Index 4148 NumElems-NumZeros-1, // Mask End Index 4149 NumZeros, // Where to start looking in the src vector 4150 NumElems, // Number of elements in vector 4151 OpSrc)) // Which source operand ? 4152 return false; 4153 4154 isLeft = false; 4155 ShAmt = NumZeros; 4156 ShVal = SVOp->getOperand(OpSrc); 4157 return true; 4158} 4159 4160/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4161/// logical left shift of a vector. 4162static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4163 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4164 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4165 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4166 true /* check zeros from left */, DAG); 4167 unsigned OpSrc; 4168 4169 if (!NumZeros) 4170 return false; 4171 4172 // Considering the elements in the mask that are not consecutive zeros, 4173 // check if they consecutively come from only one of the source vectors. 4174 // 4175 // 0 { A, B, X, X } = V2 4176 // / \ / / 4177 // vector_shuffle V1, V2 <X, X, 4, 5> 4178 // 4179 if (!isShuffleMaskConsecutive(SVOp, 4180 NumZeros, // Mask Start Index 4181 NumElems-1, // Mask End Index 4182 0, // Where to start looking in the src vector 4183 NumElems, // Number of elements in vector 4184 OpSrc)) // Which source operand ? 4185 return false; 4186 4187 isLeft = true; 4188 ShAmt = NumZeros; 4189 ShVal = SVOp->getOperand(OpSrc); 4190 return true; 4191} 4192 4193/// isVectorShift - Returns true if the shuffle can be implemented as a 4194/// logical left or right shift of a vector. 4195static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4196 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4197 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4198 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4199 return true; 4200 4201 return false; 4202} 4203 4204/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4205/// 4206static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4207 unsigned NumNonZero, unsigned NumZero, 4208 SelectionDAG &DAG, 4209 const TargetLowering &TLI) { 4210 if (NumNonZero > 8) 4211 return SDValue(); 4212 4213 DebugLoc dl = Op.getDebugLoc(); 4214 SDValue V(0, 0); 4215 bool First = true; 4216 for (unsigned i = 0; i < 16; ++i) { 4217 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4218 if (ThisIsNonZero && First) { 4219 if (NumZero) 4220 V = getZeroVector(MVT::v8i16, true, DAG, dl); 4221 else 4222 V = DAG.getUNDEF(MVT::v8i16); 4223 First = false; 4224 } 4225 4226 if ((i & 1) != 0) { 4227 SDValue ThisElt(0, 0), LastElt(0, 0); 4228 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4229 if (LastIsNonZero) { 4230 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4231 MVT::i16, Op.getOperand(i-1)); 4232 } 4233 if (ThisIsNonZero) { 4234 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4235 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4236 ThisElt, DAG.getConstant(8, MVT::i8)); 4237 if (LastIsNonZero) 4238 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4239 } else 4240 ThisElt = LastElt; 4241 4242 if (ThisElt.getNode()) 4243 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4244 DAG.getIntPtrConstant(i/2)); 4245 } 4246 } 4247 4248 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4249} 4250 4251/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4252/// 4253static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4254 unsigned NumNonZero, unsigned NumZero, 4255 SelectionDAG &DAG, 4256 const TargetLowering &TLI) { 4257 if (NumNonZero > 4) 4258 return SDValue(); 4259 4260 DebugLoc dl = Op.getDebugLoc(); 4261 SDValue V(0, 0); 4262 bool First = true; 4263 for (unsigned i = 0; i < 8; ++i) { 4264 bool isNonZero = (NonZeros & (1 << i)) != 0; 4265 if (isNonZero) { 4266 if (First) { 4267 if (NumZero) 4268 V = getZeroVector(MVT::v8i16, true, DAG, dl); 4269 else 4270 V = DAG.getUNDEF(MVT::v8i16); 4271 First = false; 4272 } 4273 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4274 MVT::v8i16, V, Op.getOperand(i), 4275 DAG.getIntPtrConstant(i)); 4276 } 4277 } 4278 4279 return V; 4280} 4281 4282/// getVShift - Return a vector logical shift node. 4283/// 4284static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4285 unsigned NumBits, SelectionDAG &DAG, 4286 const TargetLowering &TLI, DebugLoc dl) { 4287 EVT ShVT = MVT::v2i64; 4288 unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; 4289 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4290 return DAG.getNode(ISD::BITCAST, dl, VT, 4291 DAG.getNode(Opc, dl, ShVT, SrcOp, 4292 DAG.getConstant(NumBits, 4293 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4294} 4295 4296SDValue 4297X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4298 SelectionDAG &DAG) const { 4299 4300 // Check if the scalar load can be widened into a vector load. And if 4301 // the address is "base + cst" see if the cst can be "absorbed" into 4302 // the shuffle mask. 4303 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4304 SDValue Ptr = LD->getBasePtr(); 4305 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4306 return SDValue(); 4307 EVT PVT = LD->getValueType(0); 4308 if (PVT != MVT::i32 && PVT != MVT::f32) 4309 return SDValue(); 4310 4311 int FI = -1; 4312 int64_t Offset = 0; 4313 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4314 FI = FINode->getIndex(); 4315 Offset = 0; 4316 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4317 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4318 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4319 Offset = Ptr.getConstantOperandVal(1); 4320 Ptr = Ptr.getOperand(0); 4321 } else { 4322 return SDValue(); 4323 } 4324 4325 SDValue Chain = LD->getChain(); 4326 // Make sure the stack object alignment is at least 16. 4327 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4328 if (DAG.InferPtrAlignment(Ptr) < 16) { 4329 if (MFI->isFixedObjectIndex(FI)) { 4330 // Can't change the alignment. FIXME: It's possible to compute 4331 // the exact stack offset and reference FI + adjust offset instead. 4332 // If someone *really* cares about this. That's the way to implement it. 4333 return SDValue(); 4334 } else { 4335 MFI->setObjectAlignment(FI, 16); 4336 } 4337 } 4338 4339 // (Offset % 16) must be multiple of 4. Then address is then 4340 // Ptr + (Offset & ~15). 4341 if (Offset < 0) 4342 return SDValue(); 4343 if ((Offset % 16) & 3) 4344 return SDValue(); 4345 int64_t StartOffset = Offset & ~15; 4346 if (StartOffset) 4347 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4348 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4349 4350 int EltNo = (Offset - StartOffset) >> 2; 4351 int Mask[4] = { EltNo, EltNo, EltNo, EltNo }; 4352 EVT VT = (PVT == MVT::i32) ? MVT::v4i32 : MVT::v4f32; 4353 SDValue V1 = DAG.getLoad(VT, dl, Chain, Ptr, 4354 LD->getPointerInfo().getWithOffset(StartOffset), 4355 false, false, 0); 4356 // Canonicalize it to a v4i32 shuffle. 4357 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 4358 return DAG.getNode(ISD::BITCAST, dl, VT, 4359 DAG.getVectorShuffle(MVT::v4i32, dl, V1, 4360 DAG.getUNDEF(MVT::v4i32),&Mask[0])); 4361 } 4362 4363 return SDValue(); 4364} 4365 4366/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4367/// vector of type 'VT', see if the elements can be replaced by a single large 4368/// load which has the same value as a build_vector whose operands are 'elts'. 4369/// 4370/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4371/// 4372/// FIXME: we'd also like to handle the case where the last elements are zero 4373/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4374/// There's even a handy isZeroNode for that purpose. 4375static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4376 DebugLoc &DL, SelectionDAG &DAG) { 4377 EVT EltVT = VT.getVectorElementType(); 4378 unsigned NumElems = Elts.size(); 4379 4380 LoadSDNode *LDBase = NULL; 4381 unsigned LastLoadedElt = -1U; 4382 4383 // For each element in the initializer, see if we've found a load or an undef. 4384 // If we don't find an initial load element, or later load elements are 4385 // non-consecutive, bail out. 4386 for (unsigned i = 0; i < NumElems; ++i) { 4387 SDValue Elt = Elts[i]; 4388 4389 if (!Elt.getNode() || 4390 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4391 return SDValue(); 4392 if (!LDBase) { 4393 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4394 return SDValue(); 4395 LDBase = cast<LoadSDNode>(Elt.getNode()); 4396 LastLoadedElt = i; 4397 continue; 4398 } 4399 if (Elt.getOpcode() == ISD::UNDEF) 4400 continue; 4401 4402 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4403 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4404 return SDValue(); 4405 LastLoadedElt = i; 4406 } 4407 4408 // If we have found an entire vector of loads and undefs, then return a large 4409 // load of the entire vector width starting at the base pointer. If we found 4410 // consecutive loads for the low half, generate a vzext_load node. 4411 if (LastLoadedElt == NumElems - 1) { 4412 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4413 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4414 LDBase->getPointerInfo(), 4415 LDBase->isVolatile(), LDBase->isNonTemporal(), 0); 4416 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4417 LDBase->getPointerInfo(), 4418 LDBase->isVolatile(), LDBase->isNonTemporal(), 4419 LDBase->getAlignment()); 4420 } else if (NumElems == 4 && LastLoadedElt == 1) { 4421 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4422 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4423 SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, 4424 Ops, 2, MVT::i32, 4425 LDBase->getMemOperand()); 4426 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 4427 } 4428 return SDValue(); 4429} 4430 4431SDValue 4432X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 4433 DebugLoc dl = Op.getDebugLoc(); 4434 4435 EVT VT = Op.getValueType(); 4436 EVT ExtVT = VT.getVectorElementType(); 4437 4438 unsigned NumElems = Op.getNumOperands(); 4439 4440 // For AVX-length vectors, build the individual 128-bit pieces and 4441 // use shuffles to put them in place. 4442 if (VT.getSizeInBits() > 256 && 4443 Subtarget->hasAVX() && 4444 !ISD::isBuildVectorAllZeros(Op.getNode())) { 4445 SmallVector<SDValue, 8> V; 4446 V.resize(NumElems); 4447 for (unsigned i = 0; i < NumElems; ++i) { 4448 V[i] = Op.getOperand(i); 4449 } 4450 4451 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 4452 4453 // Build the lower subvector. 4454 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 4455 // Build the upper subvector. 4456 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 4457 NumElems/2); 4458 4459 return ConcatVectors(Lower, Upper, DAG); 4460 } 4461 4462 // All zero's are handled with pxor in SSE2 and above, xorps in SSE1. 4463 // All one's are handled with pcmpeqd. In AVX, zero's are handled with 4464 // vpxor in 128-bit and xor{pd,ps} in 256-bit, but no 256 version of pcmpeqd 4465 // is present, so AllOnes is ignored. 4466 if (ISD::isBuildVectorAllZeros(Op.getNode()) || 4467 (Op.getValueType().getSizeInBits() != 256 && 4468 ISD::isBuildVectorAllOnes(Op.getNode()))) { 4469 // Canonicalize this to <4 x i32> (SSE) to 4470 // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are 4471 // eliminated on x86-32 hosts. 4472 if (Op.getValueType() == MVT::v4i32) 4473 return Op; 4474 4475 if (ISD::isBuildVectorAllOnes(Op.getNode())) 4476 return getOnesVector(Op.getValueType(), DAG, dl); 4477 return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl); 4478 } 4479 4480 unsigned EVTBits = ExtVT.getSizeInBits(); 4481 4482 unsigned NumZero = 0; 4483 unsigned NumNonZero = 0; 4484 unsigned NonZeros = 0; 4485 bool IsAllConstants = true; 4486 SmallSet<SDValue, 8> Values; 4487 for (unsigned i = 0; i < NumElems; ++i) { 4488 SDValue Elt = Op.getOperand(i); 4489 if (Elt.getOpcode() == ISD::UNDEF) 4490 continue; 4491 Values.insert(Elt); 4492 if (Elt.getOpcode() != ISD::Constant && 4493 Elt.getOpcode() != ISD::ConstantFP) 4494 IsAllConstants = false; 4495 if (X86::isZeroNode(Elt)) 4496 NumZero++; 4497 else { 4498 NonZeros |= (1 << i); 4499 NumNonZero++; 4500 } 4501 } 4502 4503 // All undef vector. Return an UNDEF. All zero vectors were handled above. 4504 if (NumNonZero == 0) 4505 return DAG.getUNDEF(VT); 4506 4507 // Special case for single non-zero, non-undef, element. 4508 if (NumNonZero == 1) { 4509 unsigned Idx = CountTrailingZeros_32(NonZeros); 4510 SDValue Item = Op.getOperand(Idx); 4511 4512 // If this is an insertion of an i64 value on x86-32, and if the top bits of 4513 // the value are obviously zero, truncate the value to i32 and do the 4514 // insertion that way. Only do this if the value is non-constant or if the 4515 // value is a constant being inserted into element 0. It is cheaper to do 4516 // a constant pool load than it is to do a movd + shuffle. 4517 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 4518 (!IsAllConstants || Idx == 0)) { 4519 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 4520 // Handle SSE only. 4521 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 4522 EVT VecVT = MVT::v4i32; 4523 unsigned VecElts = 4; 4524 4525 // Truncate the value (which may itself be a constant) to i32, and 4526 // convert it to a vector with movd (S2V+shuffle to zero extend). 4527 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 4528 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 4529 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 4530 Subtarget->hasSSE2(), DAG); 4531 4532 // Now we have our 32-bit value zero extended in the low element of 4533 // a vector. If Idx != 0, swizzle it into place. 4534 if (Idx != 0) { 4535 SmallVector<int, 4> Mask; 4536 Mask.push_back(Idx); 4537 for (unsigned i = 1; i != VecElts; ++i) 4538 Mask.push_back(i); 4539 Item = DAG.getVectorShuffle(VecVT, dl, Item, 4540 DAG.getUNDEF(Item.getValueType()), 4541 &Mask[0]); 4542 } 4543 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item); 4544 } 4545 } 4546 4547 // If we have a constant or non-constant insertion into the low element of 4548 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 4549 // the rest of the elements. This will be matched as movd/movq/movss/movsd 4550 // depending on what the source datatype is. 4551 if (Idx == 0) { 4552 if (NumZero == 0) { 4553 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4554 } else if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 4555 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 4556 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4557 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 4558 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(), 4559 DAG); 4560 } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 4561 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 4562 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 4563 EVT MiddleVT = MVT::v4i32; 4564 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item); 4565 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 4566 Subtarget->hasSSE2(), DAG); 4567 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 4568 } 4569 } 4570 4571 // Is it a vector logical left shift? 4572 if (NumElems == 2 && Idx == 1 && 4573 X86::isZeroNode(Op.getOperand(0)) && 4574 !X86::isZeroNode(Op.getOperand(1))) { 4575 unsigned NumBits = VT.getSizeInBits(); 4576 return getVShift(true, VT, 4577 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 4578 VT, Op.getOperand(1)), 4579 NumBits/2, DAG, *this, dl); 4580 } 4581 4582 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 4583 return SDValue(); 4584 4585 // Otherwise, if this is a vector with i32 or f32 elements, and the element 4586 // is a non-constant being inserted into an element other than the low one, 4587 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 4588 // movd/movss) to move this into the low element, then shuffle it into 4589 // place. 4590 if (EVTBits == 32) { 4591 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 4592 4593 // Turn it into a shuffle of zero and zero-extended scalar to vector. 4594 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, 4595 Subtarget->hasSSE2(), DAG); 4596 SmallVector<int, 8> MaskVec; 4597 for (unsigned i = 0; i < NumElems; i++) 4598 MaskVec.push_back(i == Idx ? 0 : 1); 4599 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 4600 } 4601 } 4602 4603 // Splat is obviously ok. Let legalizer expand it to a shuffle. 4604 if (Values.size() == 1) { 4605 if (EVTBits == 32) { 4606 // Instead of a shuffle like this: 4607 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 4608 // Check if it's possible to issue this instead. 4609 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 4610 unsigned Idx = CountTrailingZeros_32(NonZeros); 4611 SDValue Item = Op.getOperand(Idx); 4612 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 4613 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 4614 } 4615 return SDValue(); 4616 } 4617 4618 // A vector full of immediates; various special cases are already 4619 // handled, so this is best done with a single constant-pool load. 4620 if (IsAllConstants) 4621 return SDValue(); 4622 4623 // Let legalizer expand 2-wide build_vectors. 4624 if (EVTBits == 64) { 4625 if (NumNonZero == 1) { 4626 // One half is zero or undef. 4627 unsigned Idx = CountTrailingZeros_32(NonZeros); 4628 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 4629 Op.getOperand(Idx)); 4630 return getShuffleVectorZeroOrUndef(V2, Idx, true, 4631 Subtarget->hasSSE2(), DAG); 4632 } 4633 return SDValue(); 4634 } 4635 4636 // If element VT is < 32 bits, convert it to inserts into a zero vector. 4637 if (EVTBits == 8 && NumElems == 16) { 4638 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 4639 *this); 4640 if (V.getNode()) return V; 4641 } 4642 4643 if (EVTBits == 16 && NumElems == 8) { 4644 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 4645 *this); 4646 if (V.getNode()) return V; 4647 } 4648 4649 // If element VT is == 32 bits, turn it into a number of shuffles. 4650 SmallVector<SDValue, 8> V; 4651 V.resize(NumElems); 4652 if (NumElems == 4 && NumZero > 0) { 4653 for (unsigned i = 0; i < 4; ++i) { 4654 bool isZero = !(NonZeros & (1 << i)); 4655 if (isZero) 4656 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 4657 else 4658 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 4659 } 4660 4661 for (unsigned i = 0; i < 2; ++i) { 4662 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 4663 default: break; 4664 case 0: 4665 V[i] = V[i*2]; // Must be a zero vector. 4666 break; 4667 case 1: 4668 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 4669 break; 4670 case 2: 4671 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 4672 break; 4673 case 3: 4674 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 4675 break; 4676 } 4677 } 4678 4679 SmallVector<int, 8> MaskVec; 4680 bool Reverse = (NonZeros & 0x3) == 2; 4681 for (unsigned i = 0; i < 2; ++i) 4682 MaskVec.push_back(Reverse ? 1-i : i); 4683 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 4684 for (unsigned i = 0; i < 2; ++i) 4685 MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems); 4686 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 4687 } 4688 4689 if (Values.size() > 1 && VT.getSizeInBits() == 128) { 4690 // Check for a build vector of consecutive loads. 4691 for (unsigned i = 0; i < NumElems; ++i) 4692 V[i] = Op.getOperand(i); 4693 4694 // Check for elements which are consecutive loads. 4695 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 4696 if (LD.getNode()) 4697 return LD; 4698 4699 // For SSE 4.1, use insertps to put the high elements into the low element. 4700 if (getSubtarget()->hasSSE41()) { 4701 SDValue Result; 4702 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 4703 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 4704 else 4705 Result = DAG.getUNDEF(VT); 4706 4707 for (unsigned i = 1; i < NumElems; ++i) { 4708 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 4709 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 4710 Op.getOperand(i), DAG.getIntPtrConstant(i)); 4711 } 4712 return Result; 4713 } 4714 4715 // Otherwise, expand into a number of unpckl*, start by extending each of 4716 // our (non-undef) elements to the full vector width with the element in the 4717 // bottom slot of the vector (which generates no code for SSE). 4718 for (unsigned i = 0; i < NumElems; ++i) { 4719 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 4720 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 4721 else 4722 V[i] = DAG.getUNDEF(VT); 4723 } 4724 4725 // Next, we iteratively mix elements, e.g. for v4f32: 4726 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 4727 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 4728 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 4729 unsigned EltStride = NumElems >> 1; 4730 while (EltStride != 0) { 4731 for (unsigned i = 0; i < EltStride; ++i) { 4732 // If V[i+EltStride] is undef and this is the first round of mixing, 4733 // then it is safe to just drop this shuffle: V[i] is already in the 4734 // right place, the one element (since it's the first round) being 4735 // inserted as undef can be dropped. This isn't safe for successive 4736 // rounds because they will permute elements within both vectors. 4737 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 4738 EltStride == NumElems/2) 4739 continue; 4740 4741 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 4742 } 4743 EltStride >>= 1; 4744 } 4745 return V[0]; 4746 } 4747 return SDValue(); 4748} 4749 4750SDValue 4751X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 4752 // We support concatenate two MMX registers and place them in a MMX 4753 // register. This is better than doing a stack convert. 4754 DebugLoc dl = Op.getDebugLoc(); 4755 EVT ResVT = Op.getValueType(); 4756 assert(Op.getNumOperands() == 2); 4757 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || 4758 ResVT == MVT::v8i16 || ResVT == MVT::v16i8); 4759 int Mask[2]; 4760 SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0)); 4761 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 4762 InVec = Op.getOperand(1); 4763 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4764 unsigned NumElts = ResVT.getVectorNumElements(); 4765 VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 4766 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, 4767 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); 4768 } else { 4769 InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec); 4770 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 4771 Mask[0] = 0; Mask[1] = 2; 4772 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); 4773 } 4774 return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 4775} 4776 4777// v8i16 shuffles - Prefer shuffles in the following order: 4778// 1. [all] pshuflw, pshufhw, optional move 4779// 2. [ssse3] 1 x pshufb 4780// 3. [ssse3] 2 x pshufb + 1 x por 4781// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 4782SDValue 4783X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 4784 SelectionDAG &DAG) const { 4785 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 4786 SDValue V1 = SVOp->getOperand(0); 4787 SDValue V2 = SVOp->getOperand(1); 4788 DebugLoc dl = SVOp->getDebugLoc(); 4789 SmallVector<int, 8> MaskVals; 4790 4791 // Determine if more than 1 of the words in each of the low and high quadwords 4792 // of the result come from the same quadword of one of the two inputs. Undef 4793 // mask values count as coming from any quadword, for better codegen. 4794 SmallVector<unsigned, 4> LoQuad(4); 4795 SmallVector<unsigned, 4> HiQuad(4); 4796 BitVector InputQuads(4); 4797 for (unsigned i = 0; i < 8; ++i) { 4798 SmallVectorImpl<unsigned> &Quad = i < 4 ? LoQuad : HiQuad; 4799 int EltIdx = SVOp->getMaskElt(i); 4800 MaskVals.push_back(EltIdx); 4801 if (EltIdx < 0) { 4802 ++Quad[0]; 4803 ++Quad[1]; 4804 ++Quad[2]; 4805 ++Quad[3]; 4806 continue; 4807 } 4808 ++Quad[EltIdx / 4]; 4809 InputQuads.set(EltIdx / 4); 4810 } 4811 4812 int BestLoQuad = -1; 4813 unsigned MaxQuad = 1; 4814 for (unsigned i = 0; i < 4; ++i) { 4815 if (LoQuad[i] > MaxQuad) { 4816 BestLoQuad = i; 4817 MaxQuad = LoQuad[i]; 4818 } 4819 } 4820 4821 int BestHiQuad = -1; 4822 MaxQuad = 1; 4823 for (unsigned i = 0; i < 4; ++i) { 4824 if (HiQuad[i] > MaxQuad) { 4825 BestHiQuad = i; 4826 MaxQuad = HiQuad[i]; 4827 } 4828 } 4829 4830 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 4831 // of the two input vectors, shuffle them into one input vector so only a 4832 // single pshufb instruction is necessary. If There are more than 2 input 4833 // quads, disable the next transformation since it does not help SSSE3. 4834 bool V1Used = InputQuads[0] || InputQuads[1]; 4835 bool V2Used = InputQuads[2] || InputQuads[3]; 4836 if (Subtarget->hasSSSE3()) { 4837 if (InputQuads.count() == 2 && V1Used && V2Used) { 4838 BestLoQuad = InputQuads.find_first(); 4839 BestHiQuad = InputQuads.find_next(BestLoQuad); 4840 } 4841 if (InputQuads.count() > 2) { 4842 BestLoQuad = -1; 4843 BestHiQuad = -1; 4844 } 4845 } 4846 4847 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 4848 // the shuffle mask. If a quad is scored as -1, that means that it contains 4849 // words from all 4 input quadwords. 4850 SDValue NewV; 4851 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 4852 SmallVector<int, 8> MaskV; 4853 MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad); 4854 MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad); 4855 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 4856 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 4857 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 4858 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 4859 4860 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 4861 // source words for the shuffle, to aid later transformations. 4862 bool AllWordsInNewV = true; 4863 bool InOrder[2] = { true, true }; 4864 for (unsigned i = 0; i != 8; ++i) { 4865 int idx = MaskVals[i]; 4866 if (idx != (int)i) 4867 InOrder[i/4] = false; 4868 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 4869 continue; 4870 AllWordsInNewV = false; 4871 break; 4872 } 4873 4874 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 4875 if (AllWordsInNewV) { 4876 for (int i = 0; i != 8; ++i) { 4877 int idx = MaskVals[i]; 4878 if (idx < 0) 4879 continue; 4880 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 4881 if ((idx != i) && idx < 4) 4882 pshufhw = false; 4883 if ((idx != i) && idx > 3) 4884 pshuflw = false; 4885 } 4886 V1 = NewV; 4887 V2Used = false; 4888 BestLoQuad = 0; 4889 BestHiQuad = 1; 4890 } 4891 4892 // If we've eliminated the use of V2, and the new mask is a pshuflw or 4893 // pshufhw, that's as cheap as it gets. Return the new shuffle. 4894 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 4895 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 4896 unsigned TargetMask = 0; 4897 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 4898 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 4899 TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()): 4900 X86::getShufflePSHUFLWImmediate(NewV.getNode()); 4901 V1 = NewV.getOperand(0); 4902 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 4903 } 4904 } 4905 4906 // If we have SSSE3, and all words of the result are from 1 input vector, 4907 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 4908 // is present, fall back to case 4. 4909 if (Subtarget->hasSSSE3()) { 4910 SmallVector<SDValue,16> pshufbMask; 4911 4912 // If we have elements from both input vectors, set the high bit of the 4913 // shuffle mask element to zero out elements that come from V2 in the V1 4914 // mask, and elements that come from V1 in the V2 mask, so that the two 4915 // results can be OR'd together. 4916 bool TwoInputs = V1Used && V2Used; 4917 for (unsigned i = 0; i != 8; ++i) { 4918 int EltIdx = MaskVals[i] * 2; 4919 if (TwoInputs && (EltIdx >= 16)) { 4920 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4921 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4922 continue; 4923 } 4924 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 4925 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); 4926 } 4927 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 4928 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 4929 DAG.getNode(ISD::BUILD_VECTOR, dl, 4930 MVT::v16i8, &pshufbMask[0], 16)); 4931 if (!TwoInputs) 4932 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 4933 4934 // Calculate the shuffle mask for the second input, shuffle it, and 4935 // OR it with the first shuffled input. 4936 pshufbMask.clear(); 4937 for (unsigned i = 0; i != 8; ++i) { 4938 int EltIdx = MaskVals[i] * 2; 4939 if (EltIdx < 16) { 4940 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4941 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 4942 continue; 4943 } 4944 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 4945 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); 4946 } 4947 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 4948 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 4949 DAG.getNode(ISD::BUILD_VECTOR, dl, 4950 MVT::v16i8, &pshufbMask[0], 16)); 4951 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 4952 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 4953 } 4954 4955 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 4956 // and update MaskVals with new element order. 4957 BitVector InOrder(8); 4958 if (BestLoQuad >= 0) { 4959 SmallVector<int, 8> MaskV; 4960 for (int i = 0; i != 4; ++i) { 4961 int idx = MaskVals[i]; 4962 if (idx < 0) { 4963 MaskV.push_back(-1); 4964 InOrder.set(i); 4965 } else if ((idx / 4) == BestLoQuad) { 4966 MaskV.push_back(idx & 3); 4967 InOrder.set(i); 4968 } else { 4969 MaskV.push_back(-1); 4970 } 4971 } 4972 for (unsigned i = 4; i != 8; ++i) 4973 MaskV.push_back(i); 4974 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 4975 &MaskV[0]); 4976 4977 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 4978 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 4979 NewV.getOperand(0), 4980 X86::getShufflePSHUFLWImmediate(NewV.getNode()), 4981 DAG); 4982 } 4983 4984 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 4985 // and update MaskVals with the new element order. 4986 if (BestHiQuad >= 0) { 4987 SmallVector<int, 8> MaskV; 4988 for (unsigned i = 0; i != 4; ++i) 4989 MaskV.push_back(i); 4990 for (unsigned i = 4; i != 8; ++i) { 4991 int idx = MaskVals[i]; 4992 if (idx < 0) { 4993 MaskV.push_back(-1); 4994 InOrder.set(i); 4995 } else if ((idx / 4) == BestHiQuad) { 4996 MaskV.push_back((idx & 3) + 4); 4997 InOrder.set(i); 4998 } else { 4999 MaskV.push_back(-1); 5000 } 5001 } 5002 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5003 &MaskV[0]); 5004 5005 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 5006 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5007 NewV.getOperand(0), 5008 X86::getShufflePSHUFHWImmediate(NewV.getNode()), 5009 DAG); 5010 } 5011 5012 // In case BestHi & BestLo were both -1, which means each quadword has a word 5013 // from each of the four input quadwords, calculate the InOrder bitvector now 5014 // before falling through to the insert/extract cleanup. 5015 if (BestLoQuad == -1 && BestHiQuad == -1) { 5016 NewV = V1; 5017 for (int i = 0; i != 8; ++i) 5018 if (MaskVals[i] < 0 || MaskVals[i] == i) 5019 InOrder.set(i); 5020 } 5021 5022 // The other elements are put in the right place using pextrw and pinsrw. 5023 for (unsigned i = 0; i != 8; ++i) { 5024 if (InOrder[i]) 5025 continue; 5026 int EltIdx = MaskVals[i]; 5027 if (EltIdx < 0) 5028 continue; 5029 SDValue ExtOp = (EltIdx < 8) 5030 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5031 DAG.getIntPtrConstant(EltIdx)) 5032 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5033 DAG.getIntPtrConstant(EltIdx - 8)); 5034 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5035 DAG.getIntPtrConstant(i)); 5036 } 5037 return NewV; 5038} 5039 5040// v16i8 shuffles - Prefer shuffles in the following order: 5041// 1. [ssse3] 1 x pshufb 5042// 2. [ssse3] 2 x pshufb + 1 x por 5043// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5044static 5045SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5046 SelectionDAG &DAG, 5047 const X86TargetLowering &TLI) { 5048 SDValue V1 = SVOp->getOperand(0); 5049 SDValue V2 = SVOp->getOperand(1); 5050 DebugLoc dl = SVOp->getDebugLoc(); 5051 SmallVector<int, 16> MaskVals; 5052 SVOp->getMask(MaskVals); 5053 5054 // If we have SSSE3, case 1 is generated when all result bytes come from 5055 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5056 // present, fall back to case 3. 5057 // FIXME: kill V2Only once shuffles are canonizalized by getNode. 5058 bool V1Only = true; 5059 bool V2Only = true; 5060 for (unsigned i = 0; i < 16; ++i) { 5061 int EltIdx = MaskVals[i]; 5062 if (EltIdx < 0) 5063 continue; 5064 if (EltIdx < 16) 5065 V2Only = false; 5066 else 5067 V1Only = false; 5068 } 5069 5070 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5071 if (TLI.getSubtarget()->hasSSSE3()) { 5072 SmallVector<SDValue,16> pshufbMask; 5073 5074 // If all result elements are from one input vector, then only translate 5075 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5076 // 5077 // Otherwise, we have elements from both input vectors, and must zero out 5078 // elements that come from V2 in the first mask, and V1 in the second mask 5079 // so that we can OR them together. 5080 bool TwoInputs = !(V1Only || V2Only); 5081 for (unsigned i = 0; i != 16; ++i) { 5082 int EltIdx = MaskVals[i]; 5083 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) { 5084 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5085 continue; 5086 } 5087 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5088 } 5089 // If all the elements are from V2, assign it to V1 and return after 5090 // building the first pshufb. 5091 if (V2Only) 5092 V1 = V2; 5093 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5094 DAG.getNode(ISD::BUILD_VECTOR, dl, 5095 MVT::v16i8, &pshufbMask[0], 16)); 5096 if (!TwoInputs) 5097 return V1; 5098 5099 // Calculate the shuffle mask for the second input, shuffle it, and 5100 // OR it with the first shuffled input. 5101 pshufbMask.clear(); 5102 for (unsigned i = 0; i != 16; ++i) { 5103 int EltIdx = MaskVals[i]; 5104 if (EltIdx < 16) { 5105 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5106 continue; 5107 } 5108 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 5109 } 5110 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5111 DAG.getNode(ISD::BUILD_VECTOR, dl, 5112 MVT::v16i8, &pshufbMask[0], 16)); 5113 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5114 } 5115 5116 // No SSSE3 - Calculate in place words and then fix all out of place words 5117 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5118 // the 16 different words that comprise the two doublequadword input vectors. 5119 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5120 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5121 SDValue NewV = V2Only ? V2 : V1; 5122 for (int i = 0; i != 8; ++i) { 5123 int Elt0 = MaskVals[i*2]; 5124 int Elt1 = MaskVals[i*2+1]; 5125 5126 // This word of the result is all undef, skip it. 5127 if (Elt0 < 0 && Elt1 < 0) 5128 continue; 5129 5130 // This word of the result is already in the correct place, skip it. 5131 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1)) 5132 continue; 5133 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17)) 5134 continue; 5135 5136 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 5137 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 5138 SDValue InsElt; 5139 5140 // If Elt0 and Elt1 are defined, are consecutive, and can be load 5141 // using a single extract together, load it and store it. 5142 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 5143 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5144 DAG.getIntPtrConstant(Elt1 / 2)); 5145 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5146 DAG.getIntPtrConstant(i)); 5147 continue; 5148 } 5149 5150 // If Elt1 is defined, extract it from the appropriate source. If the 5151 // source byte is not also odd, shift the extracted word left 8 bits 5152 // otherwise clear the bottom 8 bits if we need to do an or. 5153 if (Elt1 >= 0) { 5154 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5155 DAG.getIntPtrConstant(Elt1 / 2)); 5156 if ((Elt1 & 1) == 0) 5157 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 5158 DAG.getConstant(8, 5159 TLI.getShiftAmountTy(InsElt.getValueType()))); 5160 else if (Elt0 >= 0) 5161 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 5162 DAG.getConstant(0xFF00, MVT::i16)); 5163 } 5164 // If Elt0 is defined, extract it from the appropriate source. If the 5165 // source byte is not also even, shift the extracted word right 8 bits. If 5166 // Elt1 was also defined, OR the extracted values together before 5167 // inserting them in the result. 5168 if (Elt0 >= 0) { 5169 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 5170 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 5171 if ((Elt0 & 1) != 0) 5172 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 5173 DAG.getConstant(8, 5174 TLI.getShiftAmountTy(InsElt0.getValueType()))); 5175 else if (Elt1 >= 0) 5176 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 5177 DAG.getConstant(0x00FF, MVT::i16)); 5178 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 5179 : InsElt0; 5180 } 5181 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5182 DAG.getIntPtrConstant(i)); 5183 } 5184 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 5185} 5186 5187/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 5188/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 5189/// done when every pair / quad of shuffle mask elements point to elements in 5190/// the right sequence. e.g. 5191/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 5192static 5193SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 5194 SelectionDAG &DAG, DebugLoc dl) { 5195 EVT VT = SVOp->getValueType(0); 5196 SDValue V1 = SVOp->getOperand(0); 5197 SDValue V2 = SVOp->getOperand(1); 5198 unsigned NumElems = VT.getVectorNumElements(); 5199 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 5200 EVT NewVT; 5201 switch (VT.getSimpleVT().SimpleTy) { 5202 default: assert(false && "Unexpected!"); 5203 case MVT::v4f32: NewVT = MVT::v2f64; break; 5204 case MVT::v4i32: NewVT = MVT::v2i64; break; 5205 case MVT::v8i16: NewVT = MVT::v4i32; break; 5206 case MVT::v16i8: NewVT = MVT::v4i32; break; 5207 } 5208 5209 int Scale = NumElems / NewWidth; 5210 SmallVector<int, 8> MaskVec; 5211 for (unsigned i = 0; i < NumElems; i += Scale) { 5212 int StartIdx = -1; 5213 for (int j = 0; j < Scale; ++j) { 5214 int EltIdx = SVOp->getMaskElt(i+j); 5215 if (EltIdx < 0) 5216 continue; 5217 if (StartIdx == -1) 5218 StartIdx = EltIdx - (EltIdx % Scale); 5219 if (EltIdx != StartIdx + j) 5220 return SDValue(); 5221 } 5222 if (StartIdx == -1) 5223 MaskVec.push_back(-1); 5224 else 5225 MaskVec.push_back(StartIdx / Scale); 5226 } 5227 5228 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1); 5229 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2); 5230 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 5231} 5232 5233/// getVZextMovL - Return a zero-extending vector move low node. 5234/// 5235static SDValue getVZextMovL(EVT VT, EVT OpVT, 5236 SDValue SrcOp, SelectionDAG &DAG, 5237 const X86Subtarget *Subtarget, DebugLoc dl) { 5238 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 5239 LoadSDNode *LD = NULL; 5240 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 5241 LD = dyn_cast<LoadSDNode>(SrcOp); 5242 if (!LD) { 5243 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 5244 // instead. 5245 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 5246 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 5247 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 5248 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 5249 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 5250 // PR2108 5251 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 5252 return DAG.getNode(ISD::BITCAST, dl, VT, 5253 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5254 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5255 OpVT, 5256 SrcOp.getOperand(0) 5257 .getOperand(0)))); 5258 } 5259 } 5260 } 5261 5262 return DAG.getNode(ISD::BITCAST, dl, VT, 5263 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5264 DAG.getNode(ISD::BITCAST, dl, 5265 OpVT, SrcOp))); 5266} 5267 5268/// LowerVECTOR_SHUFFLE_4wide - Handle all 4 wide cases with a number of 5269/// shuffles. 5270static SDValue 5271LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 5272 SDValue V1 = SVOp->getOperand(0); 5273 SDValue V2 = SVOp->getOperand(1); 5274 DebugLoc dl = SVOp->getDebugLoc(); 5275 EVT VT = SVOp->getValueType(0); 5276 5277 SmallVector<std::pair<int, int>, 8> Locs; 5278 Locs.resize(4); 5279 SmallVector<int, 8> Mask1(4U, -1); 5280 SmallVector<int, 8> PermMask; 5281 SVOp->getMask(PermMask); 5282 5283 unsigned NumHi = 0; 5284 unsigned NumLo = 0; 5285 for (unsigned i = 0; i != 4; ++i) { 5286 int Idx = PermMask[i]; 5287 if (Idx < 0) { 5288 Locs[i] = std::make_pair(-1, -1); 5289 } else { 5290 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 5291 if (Idx < 4) { 5292 Locs[i] = std::make_pair(0, NumLo); 5293 Mask1[NumLo] = Idx; 5294 NumLo++; 5295 } else { 5296 Locs[i] = std::make_pair(1, NumHi); 5297 if (2+NumHi < 4) 5298 Mask1[2+NumHi] = Idx; 5299 NumHi++; 5300 } 5301 } 5302 } 5303 5304 if (NumLo <= 2 && NumHi <= 2) { 5305 // If no more than two elements come from either vector. This can be 5306 // implemented with two shuffles. First shuffle gather the elements. 5307 // The second shuffle, which takes the first shuffle as both of its 5308 // vector operands, put the elements into the right order. 5309 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5310 5311 SmallVector<int, 8> Mask2(4U, -1); 5312 5313 for (unsigned i = 0; i != 4; ++i) { 5314 if (Locs[i].first == -1) 5315 continue; 5316 else { 5317 unsigned Idx = (i < 2) ? 0 : 4; 5318 Idx += Locs[i].first * 2 + Locs[i].second; 5319 Mask2[i] = Idx; 5320 } 5321 } 5322 5323 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 5324 } else if (NumLo == 3 || NumHi == 3) { 5325 // Otherwise, we must have three elements from one vector, call it X, and 5326 // one element from the other, call it Y. First, use a shufps to build an 5327 // intermediate vector with the one element from Y and the element from X 5328 // that will be in the same half in the final destination (the indexes don't 5329 // matter). Then, use a shufps to build the final vector, taking the half 5330 // containing the element from Y from the intermediate, and the other half 5331 // from X. 5332 if (NumHi == 3) { 5333 // Normalize it so the 3 elements come from V1. 5334 CommuteVectorShuffleMask(PermMask, VT); 5335 std::swap(V1, V2); 5336 } 5337 5338 // Find the element from V2. 5339 unsigned HiIndex; 5340 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 5341 int Val = PermMask[HiIndex]; 5342 if (Val < 0) 5343 continue; 5344 if (Val >= 4) 5345 break; 5346 } 5347 5348 Mask1[0] = PermMask[HiIndex]; 5349 Mask1[1] = -1; 5350 Mask1[2] = PermMask[HiIndex^1]; 5351 Mask1[3] = -1; 5352 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5353 5354 if (HiIndex >= 2) { 5355 Mask1[0] = PermMask[0]; 5356 Mask1[1] = PermMask[1]; 5357 Mask1[2] = HiIndex & 1 ? 6 : 4; 5358 Mask1[3] = HiIndex & 1 ? 4 : 6; 5359 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 5360 } else { 5361 Mask1[0] = HiIndex & 1 ? 2 : 0; 5362 Mask1[1] = HiIndex & 1 ? 0 : 2; 5363 Mask1[2] = PermMask[2]; 5364 Mask1[3] = PermMask[3]; 5365 if (Mask1[2] >= 0) 5366 Mask1[2] += 4; 5367 if (Mask1[3] >= 0) 5368 Mask1[3] += 4; 5369 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 5370 } 5371 } 5372 5373 // Break it into (shuffle shuffle_hi, shuffle_lo). 5374 Locs.clear(); 5375 Locs.resize(4); 5376 SmallVector<int,8> LoMask(4U, -1); 5377 SmallVector<int,8> HiMask(4U, -1); 5378 5379 SmallVector<int,8> *MaskPtr = &LoMask; 5380 unsigned MaskIdx = 0; 5381 unsigned LoIdx = 0; 5382 unsigned HiIdx = 2; 5383 for (unsigned i = 0; i != 4; ++i) { 5384 if (i == 2) { 5385 MaskPtr = &HiMask; 5386 MaskIdx = 1; 5387 LoIdx = 0; 5388 HiIdx = 2; 5389 } 5390 int Idx = PermMask[i]; 5391 if (Idx < 0) { 5392 Locs[i] = std::make_pair(-1, -1); 5393 } else if (Idx < 4) { 5394 Locs[i] = std::make_pair(MaskIdx, LoIdx); 5395 (*MaskPtr)[LoIdx] = Idx; 5396 LoIdx++; 5397 } else { 5398 Locs[i] = std::make_pair(MaskIdx, HiIdx); 5399 (*MaskPtr)[HiIdx] = Idx; 5400 HiIdx++; 5401 } 5402 } 5403 5404 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 5405 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 5406 SmallVector<int, 8> MaskOps; 5407 for (unsigned i = 0; i != 4; ++i) { 5408 if (Locs[i].first == -1) { 5409 MaskOps.push_back(-1); 5410 } else { 5411 unsigned Idx = Locs[i].first * 4 + Locs[i].second; 5412 MaskOps.push_back(Idx); 5413 } 5414 } 5415 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 5416} 5417 5418static bool MayFoldVectorLoad(SDValue V) { 5419 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 5420 V = V.getOperand(0); 5421 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 5422 V = V.getOperand(0); 5423 if (MayFoldLoad(V)) 5424 return true; 5425 return false; 5426} 5427 5428// FIXME: the version above should always be used. Since there's 5429// a bug where several vector shuffles can't be folded because the 5430// DAG is not updated during lowering and a node claims to have two 5431// uses while it only has one, use this version, and let isel match 5432// another instruction if the load really happens to have more than 5433// one use. Remove this version after this bug get fixed. 5434// rdar://8434668, PR8156 5435static bool RelaxedMayFoldVectorLoad(SDValue V) { 5436 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 5437 V = V.getOperand(0); 5438 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 5439 V = V.getOperand(0); 5440 if (ISD::isNormalLoad(V.getNode())) 5441 return true; 5442 return false; 5443} 5444 5445/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by 5446/// a vector extract, and if both can be later optimized into a single load. 5447/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked 5448/// here because otherwise a target specific shuffle node is going to be 5449/// emitted for this shuffle, and the optimization not done. 5450/// FIXME: This is probably not the best approach, but fix the problem 5451/// until the right path is decided. 5452static 5453bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG, 5454 const TargetLowering &TLI) { 5455 EVT VT = V.getValueType(); 5456 ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V); 5457 5458 // Be sure that the vector shuffle is present in a pattern like this: 5459 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr) 5460 if (!V.hasOneUse()) 5461 return false; 5462 5463 SDNode *N = *V.getNode()->use_begin(); 5464 if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 5465 return false; 5466 5467 SDValue EltNo = N->getOperand(1); 5468 if (!isa<ConstantSDNode>(EltNo)) 5469 return false; 5470 5471 // If the bit convert changed the number of elements, it is unsafe 5472 // to examine the mask. 5473 bool HasShuffleIntoBitcast = false; 5474 if (V.getOpcode() == ISD::BITCAST) { 5475 EVT SrcVT = V.getOperand(0).getValueType(); 5476 if (SrcVT.getVectorNumElements() != VT.getVectorNumElements()) 5477 return false; 5478 V = V.getOperand(0); 5479 HasShuffleIntoBitcast = true; 5480 } 5481 5482 // Select the input vector, guarding against out of range extract vector. 5483 unsigned NumElems = VT.getVectorNumElements(); 5484 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 5485 int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt); 5486 V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1); 5487 5488 // Skip one more bit_convert if necessary 5489 if (V.getOpcode() == ISD::BITCAST) 5490 V = V.getOperand(0); 5491 5492 if (ISD::isNormalLoad(V.getNode())) { 5493 // Is the original load suitable? 5494 LoadSDNode *LN0 = cast<LoadSDNode>(V); 5495 5496 // FIXME: avoid the multi-use bug that is preventing lots of 5497 // of foldings to be detected, this is still wrong of course, but 5498 // give the temporary desired behavior, and if it happens that 5499 // the load has real more uses, during isel it will not fold, and 5500 // will generate poor code. 5501 if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse() 5502 return false; 5503 5504 if (!HasShuffleIntoBitcast) 5505 return true; 5506 5507 // If there's a bitcast before the shuffle, check if the load type and 5508 // alignment is valid. 5509 unsigned Align = LN0->getAlignment(); 5510 unsigned NewAlign = 5511 TLI.getTargetData()->getABITypeAlignment( 5512 VT.getTypeForEVT(*DAG.getContext())); 5513 5514 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 5515 return false; 5516 } 5517 5518 return true; 5519} 5520 5521static 5522SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 5523 EVT VT = Op.getValueType(); 5524 5525 // Canonizalize to v2f64. 5526 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 5527 return DAG.getNode(ISD::BITCAST, dl, VT, 5528 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 5529 V1, DAG)); 5530} 5531 5532static 5533SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 5534 bool HasSSE2) { 5535 SDValue V1 = Op.getOperand(0); 5536 SDValue V2 = Op.getOperand(1); 5537 EVT VT = Op.getValueType(); 5538 5539 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 5540 5541 if (HasSSE2 && VT == MVT::v2f64) 5542 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 5543 5544 // v4f32 or v4i32 5545 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V2, DAG); 5546} 5547 5548static 5549SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 5550 SDValue V1 = Op.getOperand(0); 5551 SDValue V2 = Op.getOperand(1); 5552 EVT VT = Op.getValueType(); 5553 5554 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 5555 "unsupported shuffle type"); 5556 5557 if (V2.getOpcode() == ISD::UNDEF) 5558 V2 = V1; 5559 5560 // v4i32 or v4f32 5561 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 5562} 5563 5564static 5565SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 5566 SDValue V1 = Op.getOperand(0); 5567 SDValue V2 = Op.getOperand(1); 5568 EVT VT = Op.getValueType(); 5569 unsigned NumElems = VT.getVectorNumElements(); 5570 5571 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 5572 // operand of these instructions is only memory, so check if there's a 5573 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 5574 // same masks. 5575 bool CanFoldLoad = false; 5576 5577 // Trivial case, when V2 comes from a load. 5578 if (MayFoldVectorLoad(V2)) 5579 CanFoldLoad = true; 5580 5581 // When V1 is a load, it can be folded later into a store in isel, example: 5582 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 5583 // turns into: 5584 // (MOVLPSmr addr:$src1, VR128:$src2) 5585 // So, recognize this potential and also use MOVLPS or MOVLPD 5586 if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 5587 CanFoldLoad = true; 5588 5589 // Both of them can't be memory operations though. 5590 if (MayFoldVectorLoad(V1) && MayFoldVectorLoad(V2)) 5591 CanFoldLoad = false; 5592 5593 if (CanFoldLoad) { 5594 if (HasSSE2 && NumElems == 2) 5595 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 5596 5597 if (NumElems == 4) 5598 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 5599 } 5600 5601 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5602 // movl and movlp will both match v2i64, but v2i64 is never matched by 5603 // movl earlier because we make it strict to avoid messing with the movlp load 5604 // folding logic (see the code above getMOVLP call). Match it here then, 5605 // this is horrible, but will stay like this until we move all shuffle 5606 // matching to x86 specific nodes. Note that for the 1st condition all 5607 // types are matched with movsd. 5608 if ((HasSSE2 && NumElems == 2) || !X86::isMOVLMask(SVOp)) 5609 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 5610 else if (HasSSE2) 5611 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 5612 5613 5614 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 5615 5616 // Invert the operand order and use SHUFPS to match it. 5617 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V2, V1, 5618 X86::getShuffleSHUFImmediate(SVOp), DAG); 5619} 5620 5621static inline unsigned getUNPCKLOpcode(EVT VT, const X86Subtarget *Subtarget) { 5622 switch(VT.getSimpleVT().SimpleTy) { 5623 case MVT::v4i32: return X86ISD::PUNPCKLDQ; 5624 case MVT::v2i64: return X86ISD::PUNPCKLQDQ; 5625 case MVT::v4f32: 5626 return Subtarget->hasAVX() ? X86ISD::VUNPCKLPS : X86ISD::UNPCKLPS; 5627 case MVT::v2f64: 5628 return Subtarget->hasAVX() ? X86ISD::VUNPCKLPD : X86ISD::UNPCKLPD; 5629 case MVT::v8f32: return X86ISD::VUNPCKLPSY; 5630 case MVT::v4f64: return X86ISD::VUNPCKLPDY; 5631 case MVT::v16i8: return X86ISD::PUNPCKLBW; 5632 case MVT::v8i16: return X86ISD::PUNPCKLWD; 5633 default: 5634 llvm_unreachable("Unknown type for unpckl"); 5635 } 5636 return 0; 5637} 5638 5639static inline unsigned getUNPCKHOpcode(EVT VT) { 5640 switch(VT.getSimpleVT().SimpleTy) { 5641 case MVT::v4i32: return X86ISD::PUNPCKHDQ; 5642 case MVT::v2i64: return X86ISD::PUNPCKHQDQ; 5643 case MVT::v4f32: return X86ISD::UNPCKHPS; 5644 case MVT::v2f64: return X86ISD::UNPCKHPD; 5645 case MVT::v16i8: return X86ISD::PUNPCKHBW; 5646 case MVT::v8i16: return X86ISD::PUNPCKHWD; 5647 default: 5648 llvm_unreachable("Unknown type for unpckh"); 5649 } 5650 return 0; 5651} 5652 5653static 5654SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG, 5655 const TargetLowering &TLI, 5656 const X86Subtarget *Subtarget) { 5657 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5658 EVT VT = Op.getValueType(); 5659 DebugLoc dl = Op.getDebugLoc(); 5660 SDValue V1 = Op.getOperand(0); 5661 SDValue V2 = Op.getOperand(1); 5662 5663 if (isZeroShuffle(SVOp)) 5664 return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 5665 5666 // Handle splat operations 5667 if (SVOp->isSplat()) { 5668 // Special case, this is the only place now where it's 5669 // allowed to return a vector_shuffle operation without 5670 // using a target specific node, because *hopefully* it 5671 // will be optimized away by the dag combiner. 5672 if (VT.getVectorNumElements() <= 4 && 5673 CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI)) 5674 return Op; 5675 5676 // Handle splats by matching through known masks 5677 if (VT.getVectorNumElements() <= 4) 5678 return SDValue(); 5679 5680 // Canonicalize all of the remaining to v4f32. 5681 return PromoteSplat(SVOp, DAG); 5682 } 5683 5684 // If the shuffle can be profitably rewritten as a narrower shuffle, then 5685 // do it! 5686 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 5687 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5688 if (NewOp.getNode()) 5689 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 5690 } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 5691 // FIXME: Figure out a cleaner way to do this. 5692 // Try to make use of movq to zero out the top part. 5693 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 5694 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5695 if (NewOp.getNode()) { 5696 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false)) 5697 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0), 5698 DAG, Subtarget, dl); 5699 } 5700 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 5701 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 5702 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp))) 5703 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), 5704 DAG, Subtarget, dl); 5705 } 5706 } 5707 return SDValue(); 5708} 5709 5710SDValue 5711X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 5712 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5713 SDValue V1 = Op.getOperand(0); 5714 SDValue V2 = Op.getOperand(1); 5715 EVT VT = Op.getValueType(); 5716 DebugLoc dl = Op.getDebugLoc(); 5717 unsigned NumElems = VT.getVectorNumElements(); 5718 bool isMMX = VT.getSizeInBits() == 64; 5719 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF; 5720 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 5721 bool V1IsSplat = false; 5722 bool V2IsSplat = false; 5723 bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX(); 5724 bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX(); 5725 bool HasSSSE3 = Subtarget->hasSSSE3() || Subtarget->hasAVX(); 5726 MachineFunction &MF = DAG.getMachineFunction(); 5727 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 5728 5729 // Shuffle operations on MMX not supported. 5730 if (isMMX) 5731 return Op; 5732 5733 // Vector shuffle lowering takes 3 steps: 5734 // 5735 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 5736 // narrowing and commutation of operands should be handled. 5737 // 2) Matching of shuffles with known shuffle masks to x86 target specific 5738 // shuffle nodes. 5739 // 3) Rewriting of unmatched masks into new generic shuffle operations, 5740 // so the shuffle can be broken into other shuffles and the legalizer can 5741 // try the lowering again. 5742 // 5743 // The general ideia is that no vector_shuffle operation should be left to 5744 // be matched during isel, all of them must be converted to a target specific 5745 // node here. 5746 5747 // Normalize the input vectors. Here splats, zeroed vectors, profitable 5748 // narrowing and commutation of operands should be handled. The actual code 5749 // doesn't include all of those, work in progress... 5750 SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget); 5751 if (NewOp.getNode()) 5752 return NewOp; 5753 5754 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 5755 // unpckh_undef). Only use pshufd if speed is more important than size. 5756 if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp)) 5757 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5758 return getTargetShuffleNode(getUNPCKLOpcode(VT, getSubtarget()), dl, VT, V1, V1, DAG); 5759 if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp)) 5760 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5761 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5762 5763 if (X86::isMOVDDUPMask(SVOp) && HasSSE3 && V2IsUndef && 5764 RelaxedMayFoldVectorLoad(V1)) 5765 return getMOVDDup(Op, dl, V1, DAG); 5766 5767 if (X86::isMOVHLPS_v_undef_Mask(SVOp)) 5768 return getMOVHighToLow(Op, dl, DAG); 5769 5770 // Use to match splats 5771 if (HasSSE2 && X86::isUNPCKHMask(SVOp) && V2IsUndef && 5772 (VT == MVT::v2f64 || VT == MVT::v2i64)) 5773 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5774 5775 if (X86::isPSHUFDMask(SVOp)) { 5776 // The actual implementation will match the mask in the if above and then 5777 // during isel it can match several different instructions, not only pshufd 5778 // as its name says, sad but true, emulate the behavior for now... 5779 if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 5780 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 5781 5782 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 5783 5784 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 5785 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 5786 5787 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 5788 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V1, 5789 TargetMask, DAG); 5790 5791 if (VT == MVT::v4f32) 5792 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V1, 5793 TargetMask, DAG); 5794 } 5795 5796 // Check if this can be converted into a logical shift. 5797 bool isLeft = false; 5798 unsigned ShAmt = 0; 5799 SDValue ShVal; 5800 bool isShift = getSubtarget()->hasSSE2() && 5801 isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 5802 if (isShift && ShVal.hasOneUse()) { 5803 // If the shifted value has multiple uses, it may be cheaper to use 5804 // v_set0 + movlhps or movhlps, etc. 5805 EVT EltVT = VT.getVectorElementType(); 5806 ShAmt *= EltVT.getSizeInBits(); 5807 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 5808 } 5809 5810 if (X86::isMOVLMask(SVOp)) { 5811 if (V1IsUndef) 5812 return V2; 5813 if (ISD::isBuildVectorAllZeros(V1.getNode())) 5814 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 5815 if (!X86::isMOVLPMask(SVOp)) { 5816 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 5817 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 5818 5819 if (VT == MVT::v4i32 || VT == MVT::v4f32) 5820 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 5821 } 5822 } 5823 5824 // FIXME: fold these into legal mask. 5825 if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp)) 5826 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 5827 5828 if (X86::isMOVHLPSMask(SVOp)) 5829 return getMOVHighToLow(Op, dl, DAG); 5830 5831 if (X86::isMOVSHDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) 5832 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 5833 5834 if (X86::isMOVSLDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) 5835 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 5836 5837 if (X86::isMOVLPMask(SVOp)) 5838 return getMOVLP(Op, dl, DAG, HasSSE2); 5839 5840 if (ShouldXformToMOVHLPS(SVOp) || 5841 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) 5842 return CommuteVectorShuffle(SVOp, DAG); 5843 5844 if (isShift) { 5845 // No better options. Use a vshl / vsrl. 5846 EVT EltVT = VT.getVectorElementType(); 5847 ShAmt *= EltVT.getSizeInBits(); 5848 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 5849 } 5850 5851 bool Commuted = false; 5852 // FIXME: This should also accept a bitcast of a splat? Be careful, not 5853 // 1,1,1,1 -> v8i16 though. 5854 V1IsSplat = isSplatVector(V1.getNode()); 5855 V2IsSplat = isSplatVector(V2.getNode()); 5856 5857 // Canonicalize the splat or undef, if present, to be on the RHS. 5858 if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) { 5859 Op = CommuteVectorShuffle(SVOp, DAG); 5860 SVOp = cast<ShuffleVectorSDNode>(Op); 5861 V1 = SVOp->getOperand(0); 5862 V2 = SVOp->getOperand(1); 5863 std::swap(V1IsSplat, V2IsSplat); 5864 std::swap(V1IsUndef, V2IsUndef); 5865 Commuted = true; 5866 } 5867 5868 if (isCommutedMOVL(SVOp, V2IsSplat, V2IsUndef)) { 5869 // Shuffling low element of v1 into undef, just return v1. 5870 if (V2IsUndef) 5871 return V1; 5872 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 5873 // the instruction selector will not match, so get a canonical MOVL with 5874 // swapped operands to undo the commute. 5875 return getMOVL(DAG, dl, VT, V2, V1); 5876 } 5877 5878 if (X86::isUNPCKLMask(SVOp)) 5879 return getTargetShuffleNode(getUNPCKLOpcode(VT, getSubtarget()), 5880 dl, VT, V1, V2, DAG); 5881 5882 if (X86::isUNPCKHMask(SVOp)) 5883 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG); 5884 5885 if (V2IsSplat) { 5886 // Normalize mask so all entries that point to V2 points to its first 5887 // element then try to match unpck{h|l} again. If match, return a 5888 // new vector_shuffle with the corrected mask. 5889 SDValue NewMask = NormalizeMask(SVOp, DAG); 5890 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask); 5891 if (NSVOp != SVOp) { 5892 if (X86::isUNPCKLMask(NSVOp, true)) { 5893 return NewMask; 5894 } else if (X86::isUNPCKHMask(NSVOp, true)) { 5895 return NewMask; 5896 } 5897 } 5898 } 5899 5900 if (Commuted) { 5901 // Commute is back and try unpck* again. 5902 // FIXME: this seems wrong. 5903 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG); 5904 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); 5905 5906 if (X86::isUNPCKLMask(NewSVOp)) 5907 return getTargetShuffleNode(getUNPCKLOpcode(VT, getSubtarget()), 5908 dl, VT, V2, V1, DAG); 5909 5910 if (X86::isUNPCKHMask(NewSVOp)) 5911 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG); 5912 } 5913 5914 // Normalize the node to match x86 shuffle ops if needed 5915 if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp)) 5916 return CommuteVectorShuffle(SVOp, DAG); 5917 5918 // The checks below are all present in isShuffleMaskLegal, but they are 5919 // inlined here right now to enable us to directly emit target specific 5920 // nodes, and remove one by one until they don't return Op anymore. 5921 SmallVector<int, 16> M; 5922 SVOp->getMask(M); 5923 5924 if (isPALIGNRMask(M, VT, HasSSSE3)) 5925 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 5926 X86::getShufflePALIGNRImmediate(SVOp), 5927 DAG); 5928 5929 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 5930 SVOp->getSplatIndex() == 0 && V2IsUndef) { 5931 if (VT == MVT::v2f64) { 5932 X86ISD::NodeType Opcode = 5933 getSubtarget()->hasAVX() ? X86ISD::VUNPCKLPD : X86ISD::UNPCKLPD; 5934 return getTargetShuffleNode(Opcode, dl, VT, V1, V1, DAG); 5935 } 5936 if (VT == MVT::v2i64) 5937 return getTargetShuffleNode(X86ISD::PUNPCKLQDQ, dl, VT, V1, V1, DAG); 5938 } 5939 5940 if (isPSHUFHWMask(M, VT)) 5941 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 5942 X86::getShufflePSHUFHWImmediate(SVOp), 5943 DAG); 5944 5945 if (isPSHUFLWMask(M, VT)) 5946 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 5947 X86::getShufflePSHUFLWImmediate(SVOp), 5948 DAG); 5949 5950 if (isSHUFPMask(M, VT)) { 5951 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 5952 if (VT == MVT::v4f32 || VT == MVT::v4i32) 5953 return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V2, 5954 TargetMask, DAG); 5955 if (VT == MVT::v2f64 || VT == MVT::v2i64) 5956 return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V2, 5957 TargetMask, DAG); 5958 } 5959 5960 if (X86::isUNPCKL_v_undef_Mask(SVOp)) 5961 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5962 return getTargetShuffleNode(getUNPCKLOpcode(VT, getSubtarget()), 5963 dl, VT, V1, V1, DAG); 5964 if (X86::isUNPCKH_v_undef_Mask(SVOp)) 5965 if (VT != MVT::v2i64 && VT != MVT::v2f64) 5966 return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); 5967 5968 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 5969 if (VT == MVT::v8i16) { 5970 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); 5971 if (NewOp.getNode()) 5972 return NewOp; 5973 } 5974 5975 if (VT == MVT::v16i8) { 5976 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 5977 if (NewOp.getNode()) 5978 return NewOp; 5979 } 5980 5981 // Handle all 4 wide cases with a number of shuffles. 5982 if (NumElems == 4) 5983 return LowerVECTOR_SHUFFLE_4wide(SVOp, DAG); 5984 5985 return SDValue(); 5986} 5987 5988SDValue 5989X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 5990 SelectionDAG &DAG) const { 5991 EVT VT = Op.getValueType(); 5992 DebugLoc dl = Op.getDebugLoc(); 5993 if (VT.getSizeInBits() == 8) { 5994 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 5995 Op.getOperand(0), Op.getOperand(1)); 5996 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 5997 DAG.getValueType(VT)); 5998 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 5999 } else if (VT.getSizeInBits() == 16) { 6000 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6001 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 6002 if (Idx == 0) 6003 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6004 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6005 DAG.getNode(ISD::BITCAST, dl, 6006 MVT::v4i32, 6007 Op.getOperand(0)), 6008 Op.getOperand(1))); 6009 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 6010 Op.getOperand(0), Op.getOperand(1)); 6011 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6012 DAG.getValueType(VT)); 6013 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6014 } else if (VT == MVT::f32) { 6015 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 6016 // the result back to FR32 register. It's only worth matching if the 6017 // result has a single use which is a store or a bitcast to i32. And in 6018 // the case of a store, it's not worth it if the index is a constant 0, 6019 // because a MOVSSmr can be used instead, which is smaller and faster. 6020 if (!Op.hasOneUse()) 6021 return SDValue(); 6022 SDNode *User = *Op.getNode()->use_begin(); 6023 if ((User->getOpcode() != ISD::STORE || 6024 (isa<ConstantSDNode>(Op.getOperand(1)) && 6025 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 6026 (User->getOpcode() != ISD::BITCAST || 6027 User->getValueType(0) != MVT::i32)) 6028 return SDValue(); 6029 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6030 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 6031 Op.getOperand(0)), 6032 Op.getOperand(1)); 6033 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 6034 } else if (VT == MVT::i32) { 6035 // ExtractPS works with constant index. 6036 if (isa<ConstantSDNode>(Op.getOperand(1))) 6037 return Op; 6038 } 6039 return SDValue(); 6040} 6041 6042 6043SDValue 6044X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 6045 SelectionDAG &DAG) const { 6046 if (!isa<ConstantSDNode>(Op.getOperand(1))) 6047 return SDValue(); 6048 6049 SDValue Vec = Op.getOperand(0); 6050 EVT VecVT = Vec.getValueType(); 6051 6052 // If this is a 256-bit vector result, first extract the 128-bit 6053 // vector and then extract from the 128-bit vector. 6054 if (VecVT.getSizeInBits() > 128) { 6055 DebugLoc dl = Op.getNode()->getDebugLoc(); 6056 unsigned NumElems = VecVT.getVectorNumElements(); 6057 SDValue Idx = Op.getOperand(1); 6058 6059 if (!isa<ConstantSDNode>(Idx)) 6060 return SDValue(); 6061 6062 unsigned ExtractNumElems = NumElems / (VecVT.getSizeInBits() / 128); 6063 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 6064 6065 // Get the 128-bit vector. 6066 bool Upper = IdxVal >= ExtractNumElems; 6067 Vec = Extract128BitVector(Vec, Idx, DAG, dl); 6068 6069 // Extract from it. 6070 SDValue ScaledIdx = Idx; 6071 if (Upper) 6072 ScaledIdx = DAG.getNode(ISD::SUB, dl, Idx.getValueType(), Idx, 6073 DAG.getConstant(ExtractNumElems, 6074 Idx.getValueType())); 6075 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 6076 ScaledIdx); 6077 } 6078 6079 assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length"); 6080 6081 if (Subtarget->hasSSE41()) { 6082 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 6083 if (Res.getNode()) 6084 return Res; 6085 } 6086 6087 EVT VT = Op.getValueType(); 6088 DebugLoc dl = Op.getDebugLoc(); 6089 // TODO: handle v16i8. 6090 if (VT.getSizeInBits() == 16) { 6091 SDValue Vec = Op.getOperand(0); 6092 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6093 if (Idx == 0) 6094 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6095 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6096 DAG.getNode(ISD::BITCAST, dl, 6097 MVT::v4i32, Vec), 6098 Op.getOperand(1))); 6099 // Transform it so it match pextrw which produces a 32-bit result. 6100 EVT EltVT = MVT::i32; 6101 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 6102 Op.getOperand(0), Op.getOperand(1)); 6103 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 6104 DAG.getValueType(VT)); 6105 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6106 } else if (VT.getSizeInBits() == 32) { 6107 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6108 if (Idx == 0) 6109 return Op; 6110 6111 // SHUFPS the element to the lowest double word, then movss. 6112 int Mask[4] = { Idx, -1, -1, -1 }; 6113 EVT VVT = Op.getOperand(0).getValueType(); 6114 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6115 DAG.getUNDEF(VVT), Mask); 6116 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6117 DAG.getIntPtrConstant(0)); 6118 } else if (VT.getSizeInBits() == 64) { 6119 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 6120 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 6121 // to match extract_elt for f64. 6122 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6123 if (Idx == 0) 6124 return Op; 6125 6126 // UNPCKHPD the element to the lowest double word, then movsd. 6127 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 6128 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 6129 int Mask[2] = { 1, -1 }; 6130 EVT VVT = Op.getOperand(0).getValueType(); 6131 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6132 DAG.getUNDEF(VVT), Mask); 6133 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6134 DAG.getIntPtrConstant(0)); 6135 } 6136 6137 return SDValue(); 6138} 6139 6140SDValue 6141X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 6142 SelectionDAG &DAG) const { 6143 EVT VT = Op.getValueType(); 6144 EVT EltVT = VT.getVectorElementType(); 6145 DebugLoc dl = Op.getDebugLoc(); 6146 6147 SDValue N0 = Op.getOperand(0); 6148 SDValue N1 = Op.getOperand(1); 6149 SDValue N2 = Op.getOperand(2); 6150 6151 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 6152 isa<ConstantSDNode>(N2)) { 6153 unsigned Opc; 6154 if (VT == MVT::v8i16) 6155 Opc = X86ISD::PINSRW; 6156 else if (VT == MVT::v16i8) 6157 Opc = X86ISD::PINSRB; 6158 else 6159 Opc = X86ISD::PINSRB; 6160 6161 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 6162 // argument. 6163 if (N1.getValueType() != MVT::i32) 6164 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6165 if (N2.getValueType() != MVT::i32) 6166 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6167 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 6168 } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 6169 // Bits [7:6] of the constant are the source select. This will always be 6170 // zero here. The DAG Combiner may combine an extract_elt index into these 6171 // bits. For example (insert (extract, 3), 2) could be matched by putting 6172 // the '3' into bits [7:6] of X86ISD::INSERTPS. 6173 // Bits [5:4] of the constant are the destination select. This is the 6174 // value of the incoming immediate. 6175 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 6176 // combine either bitwise AND or insert of float 0.0 to set these bits. 6177 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 6178 // Create this as a scalar to vector.. 6179 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 6180 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 6181 } else if (EltVT == MVT::i32 && isa<ConstantSDNode>(N2)) { 6182 // PINSR* works with constant index. 6183 return Op; 6184 } 6185 return SDValue(); 6186} 6187 6188SDValue 6189X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 6190 EVT VT = Op.getValueType(); 6191 EVT EltVT = VT.getVectorElementType(); 6192 6193 DebugLoc dl = Op.getDebugLoc(); 6194 SDValue N0 = Op.getOperand(0); 6195 SDValue N1 = Op.getOperand(1); 6196 SDValue N2 = Op.getOperand(2); 6197 6198 // If this is a 256-bit vector result, first insert into a 128-bit 6199 // vector and then insert into the 256-bit vector. 6200 if (VT.getSizeInBits() > 128) { 6201 if (!isa<ConstantSDNode>(N2)) 6202 return SDValue(); 6203 6204 // Get the 128-bit vector. 6205 unsigned NumElems = VT.getVectorNumElements(); 6206 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 6207 bool Upper = IdxVal >= NumElems / 2; 6208 6209 SDValue SubN0 = Extract128BitVector(N0, N2, DAG, dl); 6210 6211 // Insert into it. 6212 SDValue ScaledN2 = N2; 6213 if (Upper) 6214 ScaledN2 = DAG.getNode(ISD::SUB, dl, N2.getValueType(), N2, 6215 DAG.getConstant(NumElems / 6216 (VT.getSizeInBits() / 128), 6217 N2.getValueType())); 6218 Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubN0.getValueType(), SubN0, 6219 N1, ScaledN2); 6220 6221 // Insert the 128-bit vector 6222 // FIXME: Why UNDEF? 6223 return Insert128BitVector(N0, Op, N2, DAG, dl); 6224 } 6225 6226 if (Subtarget->hasSSE41()) 6227 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 6228 6229 if (EltVT == MVT::i8) 6230 return SDValue(); 6231 6232 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 6233 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 6234 // as its second argument. 6235 if (N1.getValueType() != MVT::i32) 6236 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6237 if (N2.getValueType() != MVT::i32) 6238 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6239 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 6240 } 6241 return SDValue(); 6242} 6243 6244SDValue 6245X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { 6246 LLVMContext *Context = DAG.getContext(); 6247 DebugLoc dl = Op.getDebugLoc(); 6248 EVT OpVT = Op.getValueType(); 6249 6250 // If this is a 256-bit vector result, first insert into a 128-bit 6251 // vector and then insert into the 256-bit vector. 6252 if (OpVT.getSizeInBits() > 128) { 6253 // Insert into a 128-bit vector. 6254 EVT VT128 = EVT::getVectorVT(*Context, 6255 OpVT.getVectorElementType(), 6256 OpVT.getVectorNumElements() / 2); 6257 6258 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 6259 6260 // Insert the 128-bit vector. 6261 return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op, 6262 DAG.getConstant(0, MVT::i32), 6263 DAG, dl); 6264 } 6265 6266 if (Op.getValueType() == MVT::v1i64 && 6267 Op.getOperand(0).getValueType() == MVT::i64) 6268 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 6269 6270 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 6271 assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && 6272 "Expected an SSE type!"); 6273 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), 6274 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 6275} 6276 6277// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 6278// a simple subregister reference or explicit instructions to grab 6279// upper bits of a vector. 6280SDValue 6281X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 6282 if (Subtarget->hasAVX()) { 6283 DebugLoc dl = Op.getNode()->getDebugLoc(); 6284 SDValue Vec = Op.getNode()->getOperand(0); 6285 SDValue Idx = Op.getNode()->getOperand(1); 6286 6287 if (Op.getNode()->getValueType(0).getSizeInBits() == 128 6288 && Vec.getNode()->getValueType(0).getSizeInBits() == 256) { 6289 return Extract128BitVector(Vec, Idx, DAG, dl); 6290 } 6291 } 6292 return SDValue(); 6293} 6294 6295// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 6296// simple superregister reference or explicit instructions to insert 6297// the upper bits of a vector. 6298SDValue 6299X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 6300 if (Subtarget->hasAVX()) { 6301 DebugLoc dl = Op.getNode()->getDebugLoc(); 6302 SDValue Vec = Op.getNode()->getOperand(0); 6303 SDValue SubVec = Op.getNode()->getOperand(1); 6304 SDValue Idx = Op.getNode()->getOperand(2); 6305 6306 if (Op.getNode()->getValueType(0).getSizeInBits() == 256 6307 && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) { 6308 return Insert128BitVector(Vec, SubVec, Idx, DAG, dl); 6309 } 6310 } 6311 return SDValue(); 6312} 6313 6314// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 6315// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 6316// one of the above mentioned nodes. It has to be wrapped because otherwise 6317// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 6318// be used to form addressing mode. These wrapped nodes will be selected 6319// into MOV32ri. 6320SDValue 6321X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 6322 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 6323 6324 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 6325 // global base reg. 6326 unsigned char OpFlag = 0; 6327 unsigned WrapperKind = X86ISD::Wrapper; 6328 CodeModel::Model M = getTargetMachine().getCodeModel(); 6329 6330 if (Subtarget->isPICStyleRIPRel() && 6331 (M == CodeModel::Small || M == CodeModel::Kernel)) 6332 WrapperKind = X86ISD::WrapperRIP; 6333 else if (Subtarget->isPICStyleGOT()) 6334 OpFlag = X86II::MO_GOTOFF; 6335 else if (Subtarget->isPICStyleStubPIC()) 6336 OpFlag = X86II::MO_PIC_BASE_OFFSET; 6337 6338 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 6339 CP->getAlignment(), 6340 CP->getOffset(), OpFlag); 6341 DebugLoc DL = CP->getDebugLoc(); 6342 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 6343 // With PIC, the address is actually $g + Offset. 6344 if (OpFlag) { 6345 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 6346 DAG.getNode(X86ISD::GlobalBaseReg, 6347 DebugLoc(), getPointerTy()), 6348 Result); 6349 } 6350 6351 return Result; 6352} 6353 6354SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 6355 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 6356 6357 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 6358 // global base reg. 6359 unsigned char OpFlag = 0; 6360 unsigned WrapperKind = X86ISD::Wrapper; 6361 CodeModel::Model M = getTargetMachine().getCodeModel(); 6362 6363 if (Subtarget->isPICStyleRIPRel() && 6364 (M == CodeModel::Small || M == CodeModel::Kernel)) 6365 WrapperKind = X86ISD::WrapperRIP; 6366 else if (Subtarget->isPICStyleGOT()) 6367 OpFlag = X86II::MO_GOTOFF; 6368 else if (Subtarget->isPICStyleStubPIC()) 6369 OpFlag = X86II::MO_PIC_BASE_OFFSET; 6370 6371 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 6372 OpFlag); 6373 DebugLoc DL = JT->getDebugLoc(); 6374 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 6375 6376 // With PIC, the address is actually $g + Offset. 6377 if (OpFlag) 6378 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 6379 DAG.getNode(X86ISD::GlobalBaseReg, 6380 DebugLoc(), getPointerTy()), 6381 Result); 6382 6383 return Result; 6384} 6385 6386SDValue 6387X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 6388 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 6389 6390 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 6391 // global base reg. 6392 unsigned char OpFlag = 0; 6393 unsigned WrapperKind = X86ISD::Wrapper; 6394 CodeModel::Model M = getTargetMachine().getCodeModel(); 6395 6396 if (Subtarget->isPICStyleRIPRel() && 6397 (M == CodeModel::Small || M == CodeModel::Kernel)) 6398 WrapperKind = X86ISD::WrapperRIP; 6399 else if (Subtarget->isPICStyleGOT()) 6400 OpFlag = X86II::MO_GOTOFF; 6401 else if (Subtarget->isPICStyleStubPIC()) 6402 OpFlag = X86II::MO_PIC_BASE_OFFSET; 6403 6404 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 6405 6406 DebugLoc DL = Op.getDebugLoc(); 6407 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 6408 6409 6410 // With PIC, the address is actually $g + Offset. 6411 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 6412 !Subtarget->is64Bit()) { 6413 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 6414 DAG.getNode(X86ISD::GlobalBaseReg, 6415 DebugLoc(), getPointerTy()), 6416 Result); 6417 } 6418 6419 return Result; 6420} 6421 6422SDValue 6423X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 6424 // Create the TargetBlockAddressAddress node. 6425 unsigned char OpFlags = 6426 Subtarget->ClassifyBlockAddressReference(); 6427 CodeModel::Model M = getTargetMachine().getCodeModel(); 6428 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 6429 DebugLoc dl = Op.getDebugLoc(); 6430 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), 6431 /*isTarget=*/true, OpFlags); 6432 6433 if (Subtarget->isPICStyleRIPRel() && 6434 (M == CodeModel::Small || M == CodeModel::Kernel)) 6435 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 6436 else 6437 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 6438 6439 // With PIC, the address is actually $g + Offset. 6440 if (isGlobalRelativeToPICBase(OpFlags)) { 6441 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 6442 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 6443 Result); 6444 } 6445 6446 return Result; 6447} 6448 6449SDValue 6450X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 6451 int64_t Offset, 6452 SelectionDAG &DAG) const { 6453 // Create the TargetGlobalAddress node, folding in the constant 6454 // offset if it is legal. 6455 unsigned char OpFlags = 6456 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 6457 CodeModel::Model M = getTargetMachine().getCodeModel(); 6458 SDValue Result; 6459 if (OpFlags == X86II::MO_NO_FLAG && 6460 X86::isOffsetSuitableForCodeModel(Offset, M)) { 6461 // A direct static reference to a global. 6462 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 6463 Offset = 0; 6464 } else { 6465 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 6466 } 6467 6468 if (Subtarget->isPICStyleRIPRel() && 6469 (M == CodeModel::Small || M == CodeModel::Kernel)) 6470 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 6471 else 6472 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 6473 6474 // With PIC, the address is actually $g + Offset. 6475 if (isGlobalRelativeToPICBase(OpFlags)) { 6476 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 6477 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 6478 Result); 6479 } 6480 6481 // For globals that require a load from a stub to get the address, emit the 6482 // load. 6483 if (isGlobalStubReference(OpFlags)) 6484 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 6485 MachinePointerInfo::getGOT(), false, false, 0); 6486 6487 // If there was a non-zero offset that we didn't fold, create an explicit 6488 // addition for it. 6489 if (Offset != 0) 6490 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 6491 DAG.getConstant(Offset, getPointerTy())); 6492 6493 return Result; 6494} 6495 6496SDValue 6497X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 6498 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 6499 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 6500 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 6501} 6502 6503static SDValue 6504GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 6505 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 6506 unsigned char OperandFlags) { 6507 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6508 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6509 DebugLoc dl = GA->getDebugLoc(); 6510 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 6511 GA->getValueType(0), 6512 GA->getOffset(), 6513 OperandFlags); 6514 if (InFlag) { 6515 SDValue Ops[] = { Chain, TGA, *InFlag }; 6516 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3); 6517 } else { 6518 SDValue Ops[] = { Chain, TGA }; 6519 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2); 6520 } 6521 6522 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 6523 MFI->setAdjustsStack(true); 6524 6525 SDValue Flag = Chain.getValue(1); 6526 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 6527} 6528 6529// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 6530static SDValue 6531LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6532 const EVT PtrVT) { 6533 SDValue InFlag; 6534 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 6535 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 6536 DAG.getNode(X86ISD::GlobalBaseReg, 6537 DebugLoc(), PtrVT), InFlag); 6538 InFlag = Chain.getValue(1); 6539 6540 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 6541} 6542 6543// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 6544static SDValue 6545LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6546 const EVT PtrVT) { 6547 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 6548 X86::RAX, X86II::MO_TLSGD); 6549} 6550 6551// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 6552// "local exec" model. 6553static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 6554 const EVT PtrVT, TLSModel::Model model, 6555 bool is64Bit) { 6556 DebugLoc dl = GA->getDebugLoc(); 6557 6558 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 6559 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 6560 is64Bit ? 257 : 256)); 6561 6562 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 6563 DAG.getIntPtrConstant(0), 6564 MachinePointerInfo(Ptr), false, false, 0); 6565 6566 unsigned char OperandFlags = 0; 6567 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 6568 // initialexec. 6569 unsigned WrapperKind = X86ISD::Wrapper; 6570 if (model == TLSModel::LocalExec) { 6571 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 6572 } else if (is64Bit) { 6573 assert(model == TLSModel::InitialExec); 6574 OperandFlags = X86II::MO_GOTTPOFF; 6575 WrapperKind = X86ISD::WrapperRIP; 6576 } else { 6577 assert(model == TLSModel::InitialExec); 6578 OperandFlags = X86II::MO_INDNTPOFF; 6579 } 6580 6581 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 6582 // exec) 6583 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 6584 GA->getValueType(0), 6585 GA->getOffset(), OperandFlags); 6586 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 6587 6588 if (model == TLSModel::InitialExec) 6589 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 6590 MachinePointerInfo::getGOT(), false, false, 0); 6591 6592 // The address of the thread local variable is the add of the thread 6593 // pointer with the offset of the variable. 6594 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 6595} 6596 6597SDValue 6598X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 6599 6600 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 6601 const GlobalValue *GV = GA->getGlobal(); 6602 6603 if (Subtarget->isTargetELF()) { 6604 // TODO: implement the "local dynamic" model 6605 // TODO: implement the "initial exec"model for pic executables 6606 6607 // If GV is an alias then use the aliasee for determining 6608 // thread-localness. 6609 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 6610 GV = GA->resolveAliasedGlobal(false); 6611 6612 TLSModel::Model model 6613 = getTLSModel(GV, getTargetMachine().getRelocationModel()); 6614 6615 switch (model) { 6616 case TLSModel::GeneralDynamic: 6617 case TLSModel::LocalDynamic: // not implemented 6618 if (Subtarget->is64Bit()) 6619 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 6620 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 6621 6622 case TLSModel::InitialExec: 6623 case TLSModel::LocalExec: 6624 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 6625 Subtarget->is64Bit()); 6626 } 6627 } else if (Subtarget->isTargetDarwin()) { 6628 // Darwin only has one model of TLS. Lower to that. 6629 unsigned char OpFlag = 0; 6630 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 6631 X86ISD::WrapperRIP : X86ISD::Wrapper; 6632 6633 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 6634 // global base reg. 6635 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 6636 !Subtarget->is64Bit(); 6637 if (PIC32) 6638 OpFlag = X86II::MO_TLVP_PIC_BASE; 6639 else 6640 OpFlag = X86II::MO_TLVP; 6641 DebugLoc DL = Op.getDebugLoc(); 6642 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 6643 GA->getValueType(0), 6644 GA->getOffset(), OpFlag); 6645 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 6646 6647 // With PIC32, the address is actually $g + Offset. 6648 if (PIC32) 6649 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 6650 DAG.getNode(X86ISD::GlobalBaseReg, 6651 DebugLoc(), getPointerTy()), 6652 Offset); 6653 6654 // Lowering the machine isd will make sure everything is in the right 6655 // location. 6656 SDValue Chain = DAG.getEntryNode(); 6657 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6658 SDValue Args[] = { Chain, Offset }; 6659 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 6660 6661 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 6662 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6663 MFI->setAdjustsStack(true); 6664 6665 // And our return value (tls address) is in the standard call return value 6666 // location. 6667 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 6668 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); 6669 } 6670 6671 assert(false && 6672 "TLS not implemented for this target."); 6673 6674 llvm_unreachable("Unreachable"); 6675 return SDValue(); 6676} 6677 6678 6679/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values and 6680/// take a 2 x i32 value to shift plus a shift amount. 6681SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const { 6682 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 6683 EVT VT = Op.getValueType(); 6684 unsigned VTBits = VT.getSizeInBits(); 6685 DebugLoc dl = Op.getDebugLoc(); 6686 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 6687 SDValue ShOpLo = Op.getOperand(0); 6688 SDValue ShOpHi = Op.getOperand(1); 6689 SDValue ShAmt = Op.getOperand(2); 6690 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 6691 DAG.getConstant(VTBits - 1, MVT::i8)) 6692 : DAG.getConstant(0, VT); 6693 6694 SDValue Tmp2, Tmp3; 6695 if (Op.getOpcode() == ISD::SHL_PARTS) { 6696 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 6697 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 6698 } else { 6699 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 6700 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 6701 } 6702 6703 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 6704 DAG.getConstant(VTBits, MVT::i8)); 6705 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 6706 AndNode, DAG.getConstant(0, MVT::i8)); 6707 6708 SDValue Hi, Lo; 6709 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 6710 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 6711 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 6712 6713 if (Op.getOpcode() == ISD::SHL_PARTS) { 6714 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 6715 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 6716 } else { 6717 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 6718 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 6719 } 6720 6721 SDValue Ops[2] = { Lo, Hi }; 6722 return DAG.getMergeValues(Ops, 2, dl); 6723} 6724 6725SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 6726 SelectionDAG &DAG) const { 6727 EVT SrcVT = Op.getOperand(0).getValueType(); 6728 6729 if (SrcVT.isVector()) 6730 return SDValue(); 6731 6732 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 6733 "Unknown SINT_TO_FP to lower!"); 6734 6735 // These are really Legal; return the operand so the caller accepts it as 6736 // Legal. 6737 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 6738 return Op; 6739 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 6740 Subtarget->is64Bit()) { 6741 return Op; 6742 } 6743 6744 DebugLoc dl = Op.getDebugLoc(); 6745 unsigned Size = SrcVT.getSizeInBits()/8; 6746 MachineFunction &MF = DAG.getMachineFunction(); 6747 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 6748 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6749 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6750 StackSlot, 6751 MachinePointerInfo::getFixedStack(SSFI), 6752 false, false, 0); 6753 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 6754} 6755 6756SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 6757 SDValue StackSlot, 6758 SelectionDAG &DAG) const { 6759 // Build the FILD 6760 DebugLoc DL = Op.getDebugLoc(); 6761 SDVTList Tys; 6762 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 6763 if (useSSE) 6764 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 6765 else 6766 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 6767 6768 unsigned ByteSize = SrcVT.getSizeInBits()/8; 6769 6770 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 6771 MachineMemOperand *MMO; 6772 if (FI) { 6773 int SSFI = FI->getIndex(); 6774 MMO = 6775 DAG.getMachineFunction() 6776 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6777 MachineMemOperand::MOLoad, ByteSize, ByteSize); 6778 } else { 6779 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 6780 StackSlot = StackSlot.getOperand(1); 6781 } 6782 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 6783 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 6784 X86ISD::FILD, DL, 6785 Tys, Ops, array_lengthof(Ops), 6786 SrcVT, MMO); 6787 6788 if (useSSE) { 6789 Chain = Result.getValue(1); 6790 SDValue InFlag = Result.getValue(2); 6791 6792 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 6793 // shouldn't be necessary except that RFP cannot be live across 6794 // multiple blocks. When stackifier is fixed, they can be uncoupled. 6795 MachineFunction &MF = DAG.getMachineFunction(); 6796 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 6797 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 6798 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 6799 Tys = DAG.getVTList(MVT::Other); 6800 SDValue Ops[] = { 6801 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 6802 }; 6803 MachineMemOperand *MMO = 6804 DAG.getMachineFunction() 6805 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6806 MachineMemOperand::MOStore, SSFISize, SSFISize); 6807 6808 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 6809 Ops, array_lengthof(Ops), 6810 Op.getValueType(), MMO); 6811 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 6812 MachinePointerInfo::getFixedStack(SSFI), 6813 false, false, 0); 6814 } 6815 6816 return Result; 6817} 6818 6819// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 6820SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 6821 SelectionDAG &DAG) const { 6822 // This algorithm is not obvious. Here it is in C code, more or less: 6823 /* 6824 double uint64_to_double( uint32_t hi, uint32_t lo ) { 6825 static const __m128i exp = { 0x4330000045300000ULL, 0 }; 6826 static const __m128d bias = { 0x1.0p84, 0x1.0p52 }; 6827 6828 // Copy ints to xmm registers. 6829 __m128i xh = _mm_cvtsi32_si128( hi ); 6830 __m128i xl = _mm_cvtsi32_si128( lo ); 6831 6832 // Combine into low half of a single xmm register. 6833 __m128i x = _mm_unpacklo_epi32( xh, xl ); 6834 __m128d d; 6835 double sd; 6836 6837 // Merge in appropriate exponents to give the integer bits the right 6838 // magnitude. 6839 x = _mm_unpacklo_epi32( x, exp ); 6840 6841 // Subtract away the biases to deal with the IEEE-754 double precision 6842 // implicit 1. 6843 d = _mm_sub_pd( (__m128d) x, bias ); 6844 6845 // All conversions up to here are exact. The correctly rounded result is 6846 // calculated using the current rounding mode using the following 6847 // horizontal add. 6848 d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) ); 6849 _mm_store_sd( &sd, d ); // Because we are returning doubles in XMM, this 6850 // store doesn't really need to be here (except 6851 // maybe to zero the other double) 6852 return sd; 6853 } 6854 */ 6855 6856 DebugLoc dl = Op.getDebugLoc(); 6857 LLVMContext *Context = DAG.getContext(); 6858 6859 // Build some magic constants. 6860 std::vector<Constant*> CV0; 6861 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000))); 6862 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000))); 6863 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 6864 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 6865 Constant *C0 = ConstantVector::get(CV0); 6866 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 6867 6868 std::vector<Constant*> CV1; 6869 CV1.push_back( 6870 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 6871 CV1.push_back( 6872 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 6873 Constant *C1 = ConstantVector::get(CV1); 6874 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 6875 6876 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6877 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6878 Op.getOperand(0), 6879 DAG.getIntPtrConstant(1))); 6880 SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6881 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6882 Op.getOperand(0), 6883 DAG.getIntPtrConstant(0))); 6884 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2); 6885 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 6886 MachinePointerInfo::getConstantPool(), 6887 false, false, 16); 6888 SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0); 6889 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck2); 6890 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 6891 MachinePointerInfo::getConstantPool(), 6892 false, false, 16); 6893 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 6894 6895 // Add the halves; easiest way is to swap them into another reg first. 6896 int ShufMask[2] = { 1, -1 }; 6897 SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, 6898 DAG.getUNDEF(MVT::v2f64), ShufMask); 6899 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub); 6900 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add, 6901 DAG.getIntPtrConstant(0)); 6902} 6903 6904// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 6905SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 6906 SelectionDAG &DAG) const { 6907 DebugLoc dl = Op.getDebugLoc(); 6908 // FP constant to bias correct the final result. 6909 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 6910 MVT::f64); 6911 6912 // Load the 32-bit value into an XMM register. 6913 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 6914 DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6915 Op.getOperand(0), 6916 DAG.getIntPtrConstant(0))); 6917 6918 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6919 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 6920 DAG.getIntPtrConstant(0)); 6921 6922 // Or the load with the bias. 6923 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 6924 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 6925 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6926 MVT::v2f64, Load)), 6927 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 6928 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 6929 MVT::v2f64, Bias))); 6930 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 6931 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 6932 DAG.getIntPtrConstant(0)); 6933 6934 // Subtract the bias. 6935 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 6936 6937 // Handle final rounding. 6938 EVT DestVT = Op.getValueType(); 6939 6940 if (DestVT.bitsLT(MVT::f64)) { 6941 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 6942 DAG.getIntPtrConstant(0)); 6943 } else if (DestVT.bitsGT(MVT::f64)) { 6944 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 6945 } 6946 6947 // Handle final rounding. 6948 return Sub; 6949} 6950 6951SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 6952 SelectionDAG &DAG) const { 6953 SDValue N0 = Op.getOperand(0); 6954 DebugLoc dl = Op.getDebugLoc(); 6955 6956 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 6957 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 6958 // the optimization here. 6959 if (DAG.SignBitIsZero(N0)) 6960 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 6961 6962 EVT SrcVT = N0.getValueType(); 6963 EVT DstVT = Op.getValueType(); 6964 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 6965 return LowerUINT_TO_FP_i64(Op, DAG); 6966 else if (SrcVT == MVT::i32 && X86ScalarSSEf64) 6967 return LowerUINT_TO_FP_i32(Op, DAG); 6968 6969 // Make a 64-bit buffer, and use it to build an FILD. 6970 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 6971 if (SrcVT == MVT::i32) { 6972 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 6973 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 6974 getPointerTy(), StackSlot, WordOff); 6975 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6976 StackSlot, MachinePointerInfo(), 6977 false, false, 0); 6978 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 6979 OffsetSlot, MachinePointerInfo(), 6980 false, false, 0); 6981 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 6982 return Fild; 6983 } 6984 6985 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 6986 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 6987 StackSlot, MachinePointerInfo(), 6988 false, false, 0); 6989 // For i64 source, we need to add the appropriate power of 2 if the input 6990 // was negative. This is the same as the optimization in 6991 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 6992 // we must be careful to do the computation in x87 extended precision, not 6993 // in SSE. (The generic code can't know it's OK to do this, or how to.) 6994 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 6995 MachineMemOperand *MMO = 6996 DAG.getMachineFunction() 6997 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 6998 MachineMemOperand::MOLoad, 8, 8); 6999 7000 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 7001 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 7002 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 7003 MVT::i64, MMO); 7004 7005 APInt FF(32, 0x5F800000ULL); 7006 7007 // Check whether the sign bit is set. 7008 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 7009 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 7010 ISD::SETLT); 7011 7012 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 7013 SDValue FudgePtr = DAG.getConstantPool( 7014 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 7015 getPointerTy()); 7016 7017 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 7018 SDValue Zero = DAG.getIntPtrConstant(0); 7019 SDValue Four = DAG.getIntPtrConstant(4); 7020 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 7021 Zero, Four); 7022 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 7023 7024 // Load the value out, extending it from f32 to f80. 7025 // FIXME: Avoid the extend by constructing the right constant pool? 7026 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 7027 FudgePtr, MachinePointerInfo::getConstantPool(), 7028 MVT::f32, false, false, 4); 7029 // Extend everything to 80 bits to force it to be done on x87. 7030 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 7031 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 7032} 7033 7034std::pair<SDValue,SDValue> X86TargetLowering:: 7035FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const { 7036 DebugLoc DL = Op.getDebugLoc(); 7037 7038 EVT DstTy = Op.getValueType(); 7039 7040 if (!IsSigned) { 7041 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 7042 DstTy = MVT::i64; 7043 } 7044 7045 assert(DstTy.getSimpleVT() <= MVT::i64 && 7046 DstTy.getSimpleVT() >= MVT::i16 && 7047 "Unknown FP_TO_SINT to lower!"); 7048 7049 // These are really Legal. 7050 if (DstTy == MVT::i32 && 7051 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7052 return std::make_pair(SDValue(), SDValue()); 7053 if (Subtarget->is64Bit() && 7054 DstTy == MVT::i64 && 7055 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7056 return std::make_pair(SDValue(), SDValue()); 7057 7058 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 7059 // stack slot. 7060 MachineFunction &MF = DAG.getMachineFunction(); 7061 unsigned MemSize = DstTy.getSizeInBits()/8; 7062 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7063 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7064 7065 7066 7067 unsigned Opc; 7068 switch (DstTy.getSimpleVT().SimpleTy) { 7069 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 7070 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 7071 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 7072 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 7073 } 7074 7075 SDValue Chain = DAG.getEntryNode(); 7076 SDValue Value = Op.getOperand(0); 7077 EVT TheVT = Op.getOperand(0).getValueType(); 7078 if (isScalarFPTypeInSSEReg(TheVT)) { 7079 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 7080 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 7081 MachinePointerInfo::getFixedStack(SSFI), 7082 false, false, 0); 7083 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 7084 SDValue Ops[] = { 7085 Chain, StackSlot, DAG.getValueType(TheVT) 7086 }; 7087 7088 MachineMemOperand *MMO = 7089 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7090 MachineMemOperand::MOLoad, MemSize, MemSize); 7091 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 7092 DstTy, MMO); 7093 Chain = Value.getValue(1); 7094 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7095 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7096 } 7097 7098 MachineMemOperand *MMO = 7099 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7100 MachineMemOperand::MOStore, MemSize, MemSize); 7101 7102 // Build the FP_TO_INT*_IN_MEM 7103 SDValue Ops[] = { Chain, Value, StackSlot }; 7104 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 7105 Ops, 3, DstTy, MMO); 7106 7107 return std::make_pair(FIST, StackSlot); 7108} 7109 7110SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 7111 SelectionDAG &DAG) const { 7112 if (Op.getValueType().isVector()) 7113 return SDValue(); 7114 7115 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true); 7116 SDValue FIST = Vals.first, StackSlot = Vals.second; 7117 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 7118 if (FIST.getNode() == 0) return Op; 7119 7120 // Load the result. 7121 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7122 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 7123} 7124 7125SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 7126 SelectionDAG &DAG) const { 7127 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false); 7128 SDValue FIST = Vals.first, StackSlot = Vals.second; 7129 assert(FIST.getNode() && "Unexpected failure"); 7130 7131 // Load the result. 7132 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7133 FIST, StackSlot, MachinePointerInfo(), false, false, 0); 7134} 7135 7136SDValue X86TargetLowering::LowerFABS(SDValue Op, 7137 SelectionDAG &DAG) const { 7138 LLVMContext *Context = DAG.getContext(); 7139 DebugLoc dl = Op.getDebugLoc(); 7140 EVT VT = Op.getValueType(); 7141 EVT EltVT = VT; 7142 if (VT.isVector()) 7143 EltVT = VT.getVectorElementType(); 7144 std::vector<Constant*> CV; 7145 if (EltVT == MVT::f64) { 7146 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 7147 CV.push_back(C); 7148 CV.push_back(C); 7149 } else { 7150 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 7151 CV.push_back(C); 7152 CV.push_back(C); 7153 CV.push_back(C); 7154 CV.push_back(C); 7155 } 7156 Constant *C = ConstantVector::get(CV); 7157 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7158 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7159 MachinePointerInfo::getConstantPool(), 7160 false, false, 16); 7161 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 7162} 7163 7164SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 7165 LLVMContext *Context = DAG.getContext(); 7166 DebugLoc dl = Op.getDebugLoc(); 7167 EVT VT = Op.getValueType(); 7168 EVT EltVT = VT; 7169 if (VT.isVector()) 7170 EltVT = VT.getVectorElementType(); 7171 std::vector<Constant*> CV; 7172 if (EltVT == MVT::f64) { 7173 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 7174 CV.push_back(C); 7175 CV.push_back(C); 7176 } else { 7177 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 7178 CV.push_back(C); 7179 CV.push_back(C); 7180 CV.push_back(C); 7181 CV.push_back(C); 7182 } 7183 Constant *C = ConstantVector::get(CV); 7184 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7185 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7186 MachinePointerInfo::getConstantPool(), 7187 false, false, 16); 7188 if (VT.isVector()) { 7189 return DAG.getNode(ISD::BITCAST, dl, VT, 7190 DAG.getNode(ISD::XOR, dl, MVT::v2i64, 7191 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7192 Op.getOperand(0)), 7193 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Mask))); 7194 } else { 7195 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 7196 } 7197} 7198 7199SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 7200 LLVMContext *Context = DAG.getContext(); 7201 SDValue Op0 = Op.getOperand(0); 7202 SDValue Op1 = Op.getOperand(1); 7203 DebugLoc dl = Op.getDebugLoc(); 7204 EVT VT = Op.getValueType(); 7205 EVT SrcVT = Op1.getValueType(); 7206 7207 // If second operand is smaller, extend it first. 7208 if (SrcVT.bitsLT(VT)) { 7209 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 7210 SrcVT = VT; 7211 } 7212 // And if it is bigger, shrink it first. 7213 if (SrcVT.bitsGT(VT)) { 7214 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 7215 SrcVT = VT; 7216 } 7217 7218 // At this point the operands and the result should have the same 7219 // type, and that won't be f80 since that is not custom lowered. 7220 7221 // First get the sign bit of second operand. 7222 std::vector<Constant*> CV; 7223 if (SrcVT == MVT::f64) { 7224 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 7225 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 7226 } else { 7227 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 7228 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7229 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7230 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7231 } 7232 Constant *C = ConstantVector::get(CV); 7233 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7234 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 7235 MachinePointerInfo::getConstantPool(), 7236 false, false, 16); 7237 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 7238 7239 // Shift sign bit right or left if the two operands have different types. 7240 if (SrcVT.bitsGT(VT)) { 7241 // Op0 is MVT::f32, Op1 is MVT::f64. 7242 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 7243 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 7244 DAG.getConstant(32, MVT::i32)); 7245 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 7246 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 7247 DAG.getIntPtrConstant(0)); 7248 } 7249 7250 // Clear first operand sign bit. 7251 CV.clear(); 7252 if (VT == MVT::f64) { 7253 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 7254 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 7255 } else { 7256 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 7257 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7258 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7259 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7260 } 7261 C = ConstantVector::get(CV); 7262 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7263 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7264 MachinePointerInfo::getConstantPool(), 7265 false, false, 16); 7266 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 7267 7268 // Or the value with the sign bit. 7269 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 7270} 7271 7272SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const { 7273 SDValue N0 = Op.getOperand(0); 7274 DebugLoc dl = Op.getDebugLoc(); 7275 EVT VT = Op.getValueType(); 7276 7277 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 7278 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 7279 DAG.getConstant(1, VT)); 7280 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 7281} 7282 7283/// Emit nodes that will be selected as "test Op0,Op0", or something 7284/// equivalent. 7285SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 7286 SelectionDAG &DAG) const { 7287 DebugLoc dl = Op.getDebugLoc(); 7288 7289 // CF and OF aren't always set the way we want. Determine which 7290 // of these we need. 7291 bool NeedCF = false; 7292 bool NeedOF = false; 7293 switch (X86CC) { 7294 default: break; 7295 case X86::COND_A: case X86::COND_AE: 7296 case X86::COND_B: case X86::COND_BE: 7297 NeedCF = true; 7298 break; 7299 case X86::COND_G: case X86::COND_GE: 7300 case X86::COND_L: case X86::COND_LE: 7301 case X86::COND_O: case X86::COND_NO: 7302 NeedOF = true; 7303 break; 7304 } 7305 7306 // See if we can use the EFLAGS value from the operand instead of 7307 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 7308 // we prove that the arithmetic won't overflow, we can't use OF or CF. 7309 if (Op.getResNo() != 0 || NeedOF || NeedCF) 7310 // Emit a CMP with 0, which is the TEST pattern. 7311 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 7312 DAG.getConstant(0, Op.getValueType())); 7313 7314 unsigned Opcode = 0; 7315 unsigned NumOperands = 0; 7316 switch (Op.getNode()->getOpcode()) { 7317 case ISD::ADD: 7318 // Due to an isel shortcoming, be conservative if this add is likely to be 7319 // selected as part of a load-modify-store instruction. When the root node 7320 // in a match is a store, isel doesn't know how to remap non-chain non-flag 7321 // uses of other nodes in the match, such as the ADD in this case. This 7322 // leads to the ADD being left around and reselected, with the result being 7323 // two adds in the output. Alas, even if none our users are stores, that 7324 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 7325 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 7326 // climbing the DAG back to the root, and it doesn't seem to be worth the 7327 // effort. 7328 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 7329 UE = Op.getNode()->use_end(); UI != UE; ++UI) 7330 if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC) 7331 goto default_case; 7332 7333 if (ConstantSDNode *C = 7334 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) { 7335 // An add of one will be selected as an INC. 7336 if (C->getAPIntValue() == 1) { 7337 Opcode = X86ISD::INC; 7338 NumOperands = 1; 7339 break; 7340 } 7341 7342 // An add of negative one (subtract of one) will be selected as a DEC. 7343 if (C->getAPIntValue().isAllOnesValue()) { 7344 Opcode = X86ISD::DEC; 7345 NumOperands = 1; 7346 break; 7347 } 7348 } 7349 7350 // Otherwise use a regular EFLAGS-setting add. 7351 Opcode = X86ISD::ADD; 7352 NumOperands = 2; 7353 break; 7354 case ISD::AND: { 7355 // If the primary and result isn't used, don't bother using X86ISD::AND, 7356 // because a TEST instruction will be better. 7357 bool NonFlagUse = false; 7358 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 7359 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 7360 SDNode *User = *UI; 7361 unsigned UOpNo = UI.getOperandNo(); 7362 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 7363 // Look pass truncate. 7364 UOpNo = User->use_begin().getOperandNo(); 7365 User = *User->use_begin(); 7366 } 7367 7368 if (User->getOpcode() != ISD::BRCOND && 7369 User->getOpcode() != ISD::SETCC && 7370 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { 7371 NonFlagUse = true; 7372 break; 7373 } 7374 } 7375 7376 if (!NonFlagUse) 7377 break; 7378 } 7379 // FALL THROUGH 7380 case ISD::SUB: 7381 case ISD::OR: 7382 case ISD::XOR: 7383 // Due to the ISEL shortcoming noted above, be conservative if this op is 7384 // likely to be selected as part of a load-modify-store instruction. 7385 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 7386 UE = Op.getNode()->use_end(); UI != UE; ++UI) 7387 if (UI->getOpcode() == ISD::STORE) 7388 goto default_case; 7389 7390 // Otherwise use a regular EFLAGS-setting instruction. 7391 switch (Op.getNode()->getOpcode()) { 7392 default: llvm_unreachable("unexpected operator!"); 7393 case ISD::SUB: Opcode = X86ISD::SUB; break; 7394 case ISD::OR: Opcode = X86ISD::OR; break; 7395 case ISD::XOR: Opcode = X86ISD::XOR; break; 7396 case ISD::AND: Opcode = X86ISD::AND; break; 7397 } 7398 7399 NumOperands = 2; 7400 break; 7401 case X86ISD::ADD: 7402 case X86ISD::SUB: 7403 case X86ISD::INC: 7404 case X86ISD::DEC: 7405 case X86ISD::OR: 7406 case X86ISD::XOR: 7407 case X86ISD::AND: 7408 return SDValue(Op.getNode(), 1); 7409 default: 7410 default_case: 7411 break; 7412 } 7413 7414 if (Opcode == 0) 7415 // Emit a CMP with 0, which is the TEST pattern. 7416 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 7417 DAG.getConstant(0, Op.getValueType())); 7418 7419 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 7420 SmallVector<SDValue, 4> Ops; 7421 for (unsigned i = 0; i != NumOperands; ++i) 7422 Ops.push_back(Op.getOperand(i)); 7423 7424 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 7425 DAG.ReplaceAllUsesWith(Op, New); 7426 return SDValue(New.getNode(), 1); 7427} 7428 7429/// Emit nodes that will be selected as "cmp Op0,Op1", or something 7430/// equivalent. 7431SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 7432 SelectionDAG &DAG) const { 7433 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 7434 if (C->getAPIntValue() == 0) 7435 return EmitTest(Op0, X86CC, DAG); 7436 7437 DebugLoc dl = Op0.getDebugLoc(); 7438 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 7439} 7440 7441/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 7442/// if it's possible. 7443SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 7444 DebugLoc dl, SelectionDAG &DAG) const { 7445 SDValue Op0 = And.getOperand(0); 7446 SDValue Op1 = And.getOperand(1); 7447 if (Op0.getOpcode() == ISD::TRUNCATE) 7448 Op0 = Op0.getOperand(0); 7449 if (Op1.getOpcode() == ISD::TRUNCATE) 7450 Op1 = Op1.getOperand(0); 7451 7452 SDValue LHS, RHS; 7453 if (Op1.getOpcode() == ISD::SHL) 7454 std::swap(Op0, Op1); 7455 if (Op0.getOpcode() == ISD::SHL) { 7456 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 7457 if (And00C->getZExtValue() == 1) { 7458 // If we looked past a truncate, check that it's only truncating away 7459 // known zeros. 7460 unsigned BitWidth = Op0.getValueSizeInBits(); 7461 unsigned AndBitWidth = And.getValueSizeInBits(); 7462 if (BitWidth > AndBitWidth) { 7463 APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones; 7464 DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones); 7465 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 7466 return SDValue(); 7467 } 7468 LHS = Op1; 7469 RHS = Op0.getOperand(1); 7470 } 7471 } else if (Op1.getOpcode() == ISD::Constant) { 7472 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 7473 SDValue AndLHS = Op0; 7474 if (AndRHS->getZExtValue() == 1 && AndLHS.getOpcode() == ISD::SRL) { 7475 LHS = AndLHS.getOperand(0); 7476 RHS = AndLHS.getOperand(1); 7477 } 7478 } 7479 7480 if (LHS.getNode()) { 7481 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 7482 // instruction. Since the shift amount is in-range-or-undefined, we know 7483 // that doing a bittest on the i32 value is ok. We extend to i32 because 7484 // the encoding for the i16 version is larger than the i32 version. 7485 // Also promote i16 to i32 for performance / code size reason. 7486 if (LHS.getValueType() == MVT::i8 || 7487 LHS.getValueType() == MVT::i16) 7488 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 7489 7490 // If the operand types disagree, extend the shift amount to match. Since 7491 // BT ignores high bits (like shifts) we can use anyextend. 7492 if (LHS.getValueType() != RHS.getValueType()) 7493 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 7494 7495 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 7496 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 7497 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7498 DAG.getConstant(Cond, MVT::i8), BT); 7499 } 7500 7501 return SDValue(); 7502} 7503 7504SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 7505 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 7506 SDValue Op0 = Op.getOperand(0); 7507 SDValue Op1 = Op.getOperand(1); 7508 DebugLoc dl = Op.getDebugLoc(); 7509 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7510 7511 // Optimize to BT if possible. 7512 // Lower (X & (1 << N)) == 0 to BT(X, N). 7513 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 7514 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 7515 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 7516 Op1.getOpcode() == ISD::Constant && 7517 cast<ConstantSDNode>(Op1)->isNullValue() && 7518 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 7519 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 7520 if (NewSetCC.getNode()) 7521 return NewSetCC; 7522 } 7523 7524 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 7525 // these. 7526 if (Op1.getOpcode() == ISD::Constant && 7527 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 7528 cast<ConstantSDNode>(Op1)->isNullValue()) && 7529 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 7530 7531 // If the input is a setcc, then reuse the input setcc or use a new one with 7532 // the inverted condition. 7533 if (Op0.getOpcode() == X86ISD::SETCC) { 7534 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 7535 bool Invert = (CC == ISD::SETNE) ^ 7536 cast<ConstantSDNode>(Op1)->isNullValue(); 7537 if (!Invert) return Op0; 7538 7539 CCode = X86::GetOppositeBranchCondition(CCode); 7540 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7541 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 7542 } 7543 } 7544 7545 bool isFP = Op1.getValueType().isFloatingPoint(); 7546 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 7547 if (X86CC == X86::COND_INVALID) 7548 return SDValue(); 7549 7550 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 7551 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 7552 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 7553} 7554 7555SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 7556 SDValue Cond; 7557 SDValue Op0 = Op.getOperand(0); 7558 SDValue Op1 = Op.getOperand(1); 7559 SDValue CC = Op.getOperand(2); 7560 EVT VT = Op.getValueType(); 7561 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 7562 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 7563 DebugLoc dl = Op.getDebugLoc(); 7564 7565 if (isFP) { 7566 unsigned SSECC = 8; 7567 EVT VT0 = Op0.getValueType(); 7568 assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64); 7569 unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD; 7570 bool Swap = false; 7571 7572 switch (SetCCOpcode) { 7573 default: break; 7574 case ISD::SETOEQ: 7575 case ISD::SETEQ: SSECC = 0; break; 7576 case ISD::SETOGT: 7577 case ISD::SETGT: Swap = true; // Fallthrough 7578 case ISD::SETLT: 7579 case ISD::SETOLT: SSECC = 1; break; 7580 case ISD::SETOGE: 7581 case ISD::SETGE: Swap = true; // Fallthrough 7582 case ISD::SETLE: 7583 case ISD::SETOLE: SSECC = 2; break; 7584 case ISD::SETUO: SSECC = 3; break; 7585 case ISD::SETUNE: 7586 case ISD::SETNE: SSECC = 4; break; 7587 case ISD::SETULE: Swap = true; 7588 case ISD::SETUGE: SSECC = 5; break; 7589 case ISD::SETULT: Swap = true; 7590 case ISD::SETUGT: SSECC = 6; break; 7591 case ISD::SETO: SSECC = 7; break; 7592 } 7593 if (Swap) 7594 std::swap(Op0, Op1); 7595 7596 // In the two special cases we can't handle, emit two comparisons. 7597 if (SSECC == 8) { 7598 if (SetCCOpcode == ISD::SETUEQ) { 7599 SDValue UNORD, EQ; 7600 UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8)); 7601 EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8)); 7602 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); 7603 } 7604 else if (SetCCOpcode == ISD::SETONE) { 7605 SDValue ORD, NEQ; 7606 ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8)); 7607 NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8)); 7608 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ); 7609 } 7610 llvm_unreachable("Illegal FP comparison"); 7611 } 7612 // Handle all other FP comparisons here. 7613 return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8)); 7614 } 7615 7616 // We are handling one of the integer comparisons here. Since SSE only has 7617 // GT and EQ comparisons for integer, swapping operands and multiple 7618 // operations may be required for some comparisons. 7619 unsigned Opc = 0, EQOpc = 0, GTOpc = 0; 7620 bool Swap = false, Invert = false, FlipSigns = false; 7621 7622 switch (VT.getSimpleVT().SimpleTy) { 7623 default: break; 7624 case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break; 7625 case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break; 7626 case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break; 7627 case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break; 7628 } 7629 7630 switch (SetCCOpcode) { 7631 default: break; 7632 case ISD::SETNE: Invert = true; 7633 case ISD::SETEQ: Opc = EQOpc; break; 7634 case ISD::SETLT: Swap = true; 7635 case ISD::SETGT: Opc = GTOpc; break; 7636 case ISD::SETGE: Swap = true; 7637 case ISD::SETLE: Opc = GTOpc; Invert = true; break; 7638 case ISD::SETULT: Swap = true; 7639 case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break; 7640 case ISD::SETUGE: Swap = true; 7641 case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break; 7642 } 7643 if (Swap) 7644 std::swap(Op0, Op1); 7645 7646 // Since SSE has no unsigned integer comparisons, we need to flip the sign 7647 // bits of the inputs before performing those operations. 7648 if (FlipSigns) { 7649 EVT EltVT = VT.getVectorElementType(); 7650 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 7651 EltVT); 7652 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 7653 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 7654 SignBits.size()); 7655 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 7656 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 7657 } 7658 7659 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 7660 7661 // If the logical-not of the result is required, perform that now. 7662 if (Invert) 7663 Result = DAG.getNOT(dl, Result, VT); 7664 7665 return Result; 7666} 7667 7668// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 7669static bool isX86LogicalCmp(SDValue Op) { 7670 unsigned Opc = Op.getNode()->getOpcode(); 7671 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) 7672 return true; 7673 if (Op.getResNo() == 1 && 7674 (Opc == X86ISD::ADD || 7675 Opc == X86ISD::SUB || 7676 Opc == X86ISD::ADC || 7677 Opc == X86ISD::SBB || 7678 Opc == X86ISD::SMUL || 7679 Opc == X86ISD::UMUL || 7680 Opc == X86ISD::INC || 7681 Opc == X86ISD::DEC || 7682 Opc == X86ISD::OR || 7683 Opc == X86ISD::XOR || 7684 Opc == X86ISD::AND)) 7685 return true; 7686 7687 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 7688 return true; 7689 7690 return false; 7691} 7692 7693static bool isZero(SDValue V) { 7694 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 7695 return C && C->isNullValue(); 7696} 7697 7698static bool isAllOnes(SDValue V) { 7699 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 7700 return C && C->isAllOnesValue(); 7701} 7702 7703SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 7704 bool addTest = true; 7705 SDValue Cond = Op.getOperand(0); 7706 SDValue Op1 = Op.getOperand(1); 7707 SDValue Op2 = Op.getOperand(2); 7708 DebugLoc DL = Op.getDebugLoc(); 7709 SDValue CC; 7710 7711 if (Cond.getOpcode() == ISD::SETCC) { 7712 SDValue NewCond = LowerSETCC(Cond, DAG); 7713 if (NewCond.getNode()) 7714 Cond = NewCond; 7715 } 7716 7717 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 7718 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 7719 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 7720 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 7721 if (Cond.getOpcode() == X86ISD::SETCC && 7722 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 7723 isZero(Cond.getOperand(1).getOperand(1))) { 7724 SDValue Cmp = Cond.getOperand(1); 7725 7726 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 7727 7728 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 7729 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 7730 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 7731 7732 SDValue CmpOp0 = Cmp.getOperand(0); 7733 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 7734 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 7735 7736 SDValue Res = // Res = 0 or -1. 7737 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 7738 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 7739 7740 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 7741 Res = DAG.getNOT(DL, Res, Res.getValueType()); 7742 7743 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 7744 if (N2C == 0 || !N2C->isNullValue()) 7745 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 7746 return Res; 7747 } 7748 } 7749 7750 // Look past (and (setcc_carry (cmp ...)), 1). 7751 if (Cond.getOpcode() == ISD::AND && 7752 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 7753 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 7754 if (C && C->getAPIntValue() == 1) 7755 Cond = Cond.getOperand(0); 7756 } 7757 7758 // If condition flag is set by a X86ISD::CMP, then use it as the condition 7759 // setting operand in place of the X86ISD::SETCC. 7760 if (Cond.getOpcode() == X86ISD::SETCC || 7761 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 7762 CC = Cond.getOperand(0); 7763 7764 SDValue Cmp = Cond.getOperand(1); 7765 unsigned Opc = Cmp.getOpcode(); 7766 EVT VT = Op.getValueType(); 7767 7768 bool IllegalFPCMov = false; 7769 if (VT.isFloatingPoint() && !VT.isVector() && 7770 !isScalarFPTypeInSSEReg(VT)) // FPStack? 7771 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 7772 7773 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 7774 Opc == X86ISD::BT) { // FIXME 7775 Cond = Cmp; 7776 addTest = false; 7777 } 7778 } 7779 7780 if (addTest) { 7781 // Look pass the truncate. 7782 if (Cond.getOpcode() == ISD::TRUNCATE) 7783 Cond = Cond.getOperand(0); 7784 7785 // We know the result of AND is compared against zero. Try to match 7786 // it to BT. 7787 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 7788 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 7789 if (NewSetCC.getNode()) { 7790 CC = NewSetCC.getOperand(0); 7791 Cond = NewSetCC.getOperand(1); 7792 addTest = false; 7793 } 7794 } 7795 } 7796 7797 if (addTest) { 7798 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7799 Cond = EmitTest(Cond, X86::COND_NE, DAG); 7800 } 7801 7802 // a < b ? -1 : 0 -> RES = ~setcc_carry 7803 // a < b ? 0 : -1 -> RES = setcc_carry 7804 // a >= b ? -1 : 0 -> RES = setcc_carry 7805 // a >= b ? 0 : -1 -> RES = ~setcc_carry 7806 if (Cond.getOpcode() == X86ISD::CMP) { 7807 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 7808 7809 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 7810 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 7811 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 7812 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 7813 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 7814 return DAG.getNOT(DL, Res, Res.getValueType()); 7815 return Res; 7816 } 7817 } 7818 7819 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 7820 // condition is true. 7821 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 7822 SDValue Ops[] = { Op2, Op1, CC, Cond }; 7823 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 7824} 7825 7826// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 7827// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 7828// from the AND / OR. 7829static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 7830 Opc = Op.getOpcode(); 7831 if (Opc != ISD::OR && Opc != ISD::AND) 7832 return false; 7833 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 7834 Op.getOperand(0).hasOneUse() && 7835 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 7836 Op.getOperand(1).hasOneUse()); 7837} 7838 7839// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 7840// 1 and that the SETCC node has a single use. 7841static bool isXor1OfSetCC(SDValue Op) { 7842 if (Op.getOpcode() != ISD::XOR) 7843 return false; 7844 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 7845 if (N1C && N1C->getAPIntValue() == 1) { 7846 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 7847 Op.getOperand(0).hasOneUse(); 7848 } 7849 return false; 7850} 7851 7852SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 7853 bool addTest = true; 7854 SDValue Chain = Op.getOperand(0); 7855 SDValue Cond = Op.getOperand(1); 7856 SDValue Dest = Op.getOperand(2); 7857 DebugLoc dl = Op.getDebugLoc(); 7858 SDValue CC; 7859 7860 if (Cond.getOpcode() == ISD::SETCC) { 7861 SDValue NewCond = LowerSETCC(Cond, DAG); 7862 if (NewCond.getNode()) 7863 Cond = NewCond; 7864 } 7865#if 0 7866 // FIXME: LowerXALUO doesn't handle these!! 7867 else if (Cond.getOpcode() == X86ISD::ADD || 7868 Cond.getOpcode() == X86ISD::SUB || 7869 Cond.getOpcode() == X86ISD::SMUL || 7870 Cond.getOpcode() == X86ISD::UMUL) 7871 Cond = LowerXALUO(Cond, DAG); 7872#endif 7873 7874 // Look pass (and (setcc_carry (cmp ...)), 1). 7875 if (Cond.getOpcode() == ISD::AND && 7876 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 7877 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 7878 if (C && C->getAPIntValue() == 1) 7879 Cond = Cond.getOperand(0); 7880 } 7881 7882 // If condition flag is set by a X86ISD::CMP, then use it as the condition 7883 // setting operand in place of the X86ISD::SETCC. 7884 if (Cond.getOpcode() == X86ISD::SETCC || 7885 Cond.getOpcode() == X86ISD::SETCC_CARRY) { 7886 CC = Cond.getOperand(0); 7887 7888 SDValue Cmp = Cond.getOperand(1); 7889 unsigned Opc = Cmp.getOpcode(); 7890 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 7891 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 7892 Cond = Cmp; 7893 addTest = false; 7894 } else { 7895 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 7896 default: break; 7897 case X86::COND_O: 7898 case X86::COND_B: 7899 // These can only come from an arithmetic instruction with overflow, 7900 // e.g. SADDO, UADDO. 7901 Cond = Cond.getNode()->getOperand(1); 7902 addTest = false; 7903 break; 7904 } 7905 } 7906 } else { 7907 unsigned CondOpc; 7908 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 7909 SDValue Cmp = Cond.getOperand(0).getOperand(1); 7910 if (CondOpc == ISD::OR) { 7911 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 7912 // two branches instead of an explicit OR instruction with a 7913 // separate test. 7914 if (Cmp == Cond.getOperand(1).getOperand(1) && 7915 isX86LogicalCmp(Cmp)) { 7916 CC = Cond.getOperand(0).getOperand(0); 7917 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7918 Chain, Dest, CC, Cmp); 7919 CC = Cond.getOperand(1).getOperand(0); 7920 Cond = Cmp; 7921 addTest = false; 7922 } 7923 } else { // ISD::AND 7924 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 7925 // two branches instead of an explicit AND instruction with a 7926 // separate test. However, we only do this if this block doesn't 7927 // have a fall-through edge, because this requires an explicit 7928 // jmp when the condition is false. 7929 if (Cmp == Cond.getOperand(1).getOperand(1) && 7930 isX86LogicalCmp(Cmp) && 7931 Op.getNode()->hasOneUse()) { 7932 X86::CondCode CCode = 7933 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 7934 CCode = X86::GetOppositeBranchCondition(CCode); 7935 CC = DAG.getConstant(CCode, MVT::i8); 7936 SDNode *User = *Op.getNode()->use_begin(); 7937 // Look for an unconditional branch following this conditional branch. 7938 // We need this because we need to reverse the successors in order 7939 // to implement FCMP_OEQ. 7940 if (User->getOpcode() == ISD::BR) { 7941 SDValue FalseBB = User->getOperand(1); 7942 SDNode *NewBR = 7943 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 7944 assert(NewBR == User); 7945 (void)NewBR; 7946 Dest = FalseBB; 7947 7948 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7949 Chain, Dest, CC, Cmp); 7950 X86::CondCode CCode = 7951 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 7952 CCode = X86::GetOppositeBranchCondition(CCode); 7953 CC = DAG.getConstant(CCode, MVT::i8); 7954 Cond = Cmp; 7955 addTest = false; 7956 } 7957 } 7958 } 7959 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 7960 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 7961 // It should be transformed during dag combiner except when the condition 7962 // is set by a arithmetics with overflow node. 7963 X86::CondCode CCode = 7964 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 7965 CCode = X86::GetOppositeBranchCondition(CCode); 7966 CC = DAG.getConstant(CCode, MVT::i8); 7967 Cond = Cond.getOperand(0).getOperand(1); 7968 addTest = false; 7969 } 7970 } 7971 7972 if (addTest) { 7973 // Look pass the truncate. 7974 if (Cond.getOpcode() == ISD::TRUNCATE) 7975 Cond = Cond.getOperand(0); 7976 7977 // We know the result of AND is compared against zero. Try to match 7978 // it to BT. 7979 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 7980 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 7981 if (NewSetCC.getNode()) { 7982 CC = NewSetCC.getOperand(0); 7983 Cond = NewSetCC.getOperand(1); 7984 addTest = false; 7985 } 7986 } 7987 } 7988 7989 if (addTest) { 7990 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7991 Cond = EmitTest(Cond, X86::COND_NE, DAG); 7992 } 7993 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 7994 Chain, Dest, CC, Cond); 7995} 7996 7997 7998// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 7999// Calls to _alloca is needed to probe the stack when allocating more than 4k 8000// bytes in one go. Touching the stack at 4K increments is necessary to ensure 8001// that the guard pages used by the OS virtual memory manager are allocated in 8002// correct sequence. 8003SDValue 8004X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 8005 SelectionDAG &DAG) const { 8006 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows()) && 8007 "This should be used only on Windows targets"); 8008 assert(!Subtarget->isTargetEnvMacho()); 8009 DebugLoc dl = Op.getDebugLoc(); 8010 8011 // Get the inputs. 8012 SDValue Chain = Op.getOperand(0); 8013 SDValue Size = Op.getOperand(1); 8014 // FIXME: Ensure alignment here 8015 8016 SDValue Flag; 8017 8018 EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; 8019 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 8020 8021 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 8022 Flag = Chain.getValue(1); 8023 8024 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8025 8026 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 8027 Flag = Chain.getValue(1); 8028 8029 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 8030 8031 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 8032 return DAG.getMergeValues(Ops1, 2, dl); 8033} 8034 8035SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 8036 MachineFunction &MF = DAG.getMachineFunction(); 8037 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 8038 8039 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 8040 DebugLoc DL = Op.getDebugLoc(); 8041 8042 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 8043 // vastart just stores the address of the VarArgsFrameIndex slot into the 8044 // memory location argument. 8045 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 8046 getPointerTy()); 8047 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 8048 MachinePointerInfo(SV), false, false, 0); 8049 } 8050 8051 // __va_list_tag: 8052 // gp_offset (0 - 6 * 8) 8053 // fp_offset (48 - 48 + 8 * 16) 8054 // overflow_arg_area (point to parameters coming in memory). 8055 // reg_save_area 8056 SmallVector<SDValue, 8> MemOps; 8057 SDValue FIN = Op.getOperand(1); 8058 // Store gp_offset 8059 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 8060 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 8061 MVT::i32), 8062 FIN, MachinePointerInfo(SV), false, false, 0); 8063 MemOps.push_back(Store); 8064 8065 // Store fp_offset 8066 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 8067 FIN, DAG.getIntPtrConstant(4)); 8068 Store = DAG.getStore(Op.getOperand(0), DL, 8069 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 8070 MVT::i32), 8071 FIN, MachinePointerInfo(SV, 4), false, false, 0); 8072 MemOps.push_back(Store); 8073 8074 // Store ptr to overflow_arg_area 8075 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 8076 FIN, DAG.getIntPtrConstant(4)); 8077 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 8078 getPointerTy()); 8079 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 8080 MachinePointerInfo(SV, 8), 8081 false, false, 0); 8082 MemOps.push_back(Store); 8083 8084 // Store ptr to reg_save_area. 8085 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 8086 FIN, DAG.getIntPtrConstant(8)); 8087 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 8088 getPointerTy()); 8089 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 8090 MachinePointerInfo(SV, 16), false, false, 0); 8091 MemOps.push_back(Store); 8092 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 8093 &MemOps[0], MemOps.size()); 8094} 8095 8096SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 8097 assert(Subtarget->is64Bit() && 8098 "LowerVAARG only handles 64-bit va_arg!"); 8099 assert((Subtarget->isTargetLinux() || 8100 Subtarget->isTargetDarwin()) && 8101 "Unhandled target in LowerVAARG"); 8102 assert(Op.getNode()->getNumOperands() == 4); 8103 SDValue Chain = Op.getOperand(0); 8104 SDValue SrcPtr = Op.getOperand(1); 8105 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 8106 unsigned Align = Op.getConstantOperandVal(3); 8107 DebugLoc dl = Op.getDebugLoc(); 8108 8109 EVT ArgVT = Op.getNode()->getValueType(0); 8110 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 8111 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); 8112 uint8_t ArgMode; 8113 8114 // Decide which area this value should be read from. 8115 // TODO: Implement the AMD64 ABI in its entirety. This simple 8116 // selection mechanism works only for the basic types. 8117 if (ArgVT == MVT::f80) { 8118 llvm_unreachable("va_arg for f80 not yet implemented"); 8119 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 8120 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 8121 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 8122 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 8123 } else { 8124 llvm_unreachable("Unhandled argument type in LowerVAARG"); 8125 } 8126 8127 if (ArgMode == 2) { 8128 // Sanity Check: Make sure using fp_offset makes sense. 8129 assert(!UseSoftFloat && 8130 !(DAG.getMachineFunction() 8131 .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && 8132 Subtarget->hasXMM()); 8133 } 8134 8135 // Insert VAARG_64 node into the DAG 8136 // VAARG_64 returns two values: Variable Argument Address, Chain 8137 SmallVector<SDValue, 11> InstOps; 8138 InstOps.push_back(Chain); 8139 InstOps.push_back(SrcPtr); 8140 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 8141 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 8142 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 8143 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 8144 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 8145 VTs, &InstOps[0], InstOps.size(), 8146 MVT::i64, 8147 MachinePointerInfo(SV), 8148 /*Align=*/0, 8149 /*Volatile=*/false, 8150 /*ReadMem=*/true, 8151 /*WriteMem=*/true); 8152 Chain = VAARG.getValue(1); 8153 8154 // Load the next argument and return it 8155 return DAG.getLoad(ArgVT, dl, 8156 Chain, 8157 VAARG, 8158 MachinePointerInfo(), 8159 false, false, 0); 8160} 8161 8162SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 8163 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 8164 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 8165 SDValue Chain = Op.getOperand(0); 8166 SDValue DstPtr = Op.getOperand(1); 8167 SDValue SrcPtr = Op.getOperand(2); 8168 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 8169 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 8170 DebugLoc DL = Op.getDebugLoc(); 8171 8172 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 8173 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 8174 false, 8175 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 8176} 8177 8178SDValue 8179X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 8180 DebugLoc dl = Op.getDebugLoc(); 8181 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8182 switch (IntNo) { 8183 default: return SDValue(); // Don't custom lower most intrinsics. 8184 // Comparison intrinsics. 8185 case Intrinsic::x86_sse_comieq_ss: 8186 case Intrinsic::x86_sse_comilt_ss: 8187 case Intrinsic::x86_sse_comile_ss: 8188 case Intrinsic::x86_sse_comigt_ss: 8189 case Intrinsic::x86_sse_comige_ss: 8190 case Intrinsic::x86_sse_comineq_ss: 8191 case Intrinsic::x86_sse_ucomieq_ss: 8192 case Intrinsic::x86_sse_ucomilt_ss: 8193 case Intrinsic::x86_sse_ucomile_ss: 8194 case Intrinsic::x86_sse_ucomigt_ss: 8195 case Intrinsic::x86_sse_ucomige_ss: 8196 case Intrinsic::x86_sse_ucomineq_ss: 8197 case Intrinsic::x86_sse2_comieq_sd: 8198 case Intrinsic::x86_sse2_comilt_sd: 8199 case Intrinsic::x86_sse2_comile_sd: 8200 case Intrinsic::x86_sse2_comigt_sd: 8201 case Intrinsic::x86_sse2_comige_sd: 8202 case Intrinsic::x86_sse2_comineq_sd: 8203 case Intrinsic::x86_sse2_ucomieq_sd: 8204 case Intrinsic::x86_sse2_ucomilt_sd: 8205 case Intrinsic::x86_sse2_ucomile_sd: 8206 case Intrinsic::x86_sse2_ucomigt_sd: 8207 case Intrinsic::x86_sse2_ucomige_sd: 8208 case Intrinsic::x86_sse2_ucomineq_sd: { 8209 unsigned Opc = 0; 8210 ISD::CondCode CC = ISD::SETCC_INVALID; 8211 switch (IntNo) { 8212 default: break; 8213 case Intrinsic::x86_sse_comieq_ss: 8214 case Intrinsic::x86_sse2_comieq_sd: 8215 Opc = X86ISD::COMI; 8216 CC = ISD::SETEQ; 8217 break; 8218 case Intrinsic::x86_sse_comilt_ss: 8219 case Intrinsic::x86_sse2_comilt_sd: 8220 Opc = X86ISD::COMI; 8221 CC = ISD::SETLT; 8222 break; 8223 case Intrinsic::x86_sse_comile_ss: 8224 case Intrinsic::x86_sse2_comile_sd: 8225 Opc = X86ISD::COMI; 8226 CC = ISD::SETLE; 8227 break; 8228 case Intrinsic::x86_sse_comigt_ss: 8229 case Intrinsic::x86_sse2_comigt_sd: 8230 Opc = X86ISD::COMI; 8231 CC = ISD::SETGT; 8232 break; 8233 case Intrinsic::x86_sse_comige_ss: 8234 case Intrinsic::x86_sse2_comige_sd: 8235 Opc = X86ISD::COMI; 8236 CC = ISD::SETGE; 8237 break; 8238 case Intrinsic::x86_sse_comineq_ss: 8239 case Intrinsic::x86_sse2_comineq_sd: 8240 Opc = X86ISD::COMI; 8241 CC = ISD::SETNE; 8242 break; 8243 case Intrinsic::x86_sse_ucomieq_ss: 8244 case Intrinsic::x86_sse2_ucomieq_sd: 8245 Opc = X86ISD::UCOMI; 8246 CC = ISD::SETEQ; 8247 break; 8248 case Intrinsic::x86_sse_ucomilt_ss: 8249 case Intrinsic::x86_sse2_ucomilt_sd: 8250 Opc = X86ISD::UCOMI; 8251 CC = ISD::SETLT; 8252 break; 8253 case Intrinsic::x86_sse_ucomile_ss: 8254 case Intrinsic::x86_sse2_ucomile_sd: 8255 Opc = X86ISD::UCOMI; 8256 CC = ISD::SETLE; 8257 break; 8258 case Intrinsic::x86_sse_ucomigt_ss: 8259 case Intrinsic::x86_sse2_ucomigt_sd: 8260 Opc = X86ISD::UCOMI; 8261 CC = ISD::SETGT; 8262 break; 8263 case Intrinsic::x86_sse_ucomige_ss: 8264 case Intrinsic::x86_sse2_ucomige_sd: 8265 Opc = X86ISD::UCOMI; 8266 CC = ISD::SETGE; 8267 break; 8268 case Intrinsic::x86_sse_ucomineq_ss: 8269 case Intrinsic::x86_sse2_ucomineq_sd: 8270 Opc = X86ISD::UCOMI; 8271 CC = ISD::SETNE; 8272 break; 8273 } 8274 8275 SDValue LHS = Op.getOperand(1); 8276 SDValue RHS = Op.getOperand(2); 8277 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 8278 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 8279 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 8280 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8281 DAG.getConstant(X86CC, MVT::i8), Cond); 8282 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 8283 } 8284 // ptest and testp intrinsics. The intrinsic these come from are designed to 8285 // return an integer value, not just an instruction so lower it to the ptest 8286 // or testp pattern and a setcc for the result. 8287 case Intrinsic::x86_sse41_ptestz: 8288 case Intrinsic::x86_sse41_ptestc: 8289 case Intrinsic::x86_sse41_ptestnzc: 8290 case Intrinsic::x86_avx_ptestz_256: 8291 case Intrinsic::x86_avx_ptestc_256: 8292 case Intrinsic::x86_avx_ptestnzc_256: 8293 case Intrinsic::x86_avx_vtestz_ps: 8294 case Intrinsic::x86_avx_vtestc_ps: 8295 case Intrinsic::x86_avx_vtestnzc_ps: 8296 case Intrinsic::x86_avx_vtestz_pd: 8297 case Intrinsic::x86_avx_vtestc_pd: 8298 case Intrinsic::x86_avx_vtestnzc_pd: 8299 case Intrinsic::x86_avx_vtestz_ps_256: 8300 case Intrinsic::x86_avx_vtestc_ps_256: 8301 case Intrinsic::x86_avx_vtestnzc_ps_256: 8302 case Intrinsic::x86_avx_vtestz_pd_256: 8303 case Intrinsic::x86_avx_vtestc_pd_256: 8304 case Intrinsic::x86_avx_vtestnzc_pd_256: { 8305 bool IsTestPacked = false; 8306 unsigned X86CC = 0; 8307 switch (IntNo) { 8308 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 8309 case Intrinsic::x86_avx_vtestz_ps: 8310 case Intrinsic::x86_avx_vtestz_pd: 8311 case Intrinsic::x86_avx_vtestz_ps_256: 8312 case Intrinsic::x86_avx_vtestz_pd_256: 8313 IsTestPacked = true; // Fallthrough 8314 case Intrinsic::x86_sse41_ptestz: 8315 case Intrinsic::x86_avx_ptestz_256: 8316 // ZF = 1 8317 X86CC = X86::COND_E; 8318 break; 8319 case Intrinsic::x86_avx_vtestc_ps: 8320 case Intrinsic::x86_avx_vtestc_pd: 8321 case Intrinsic::x86_avx_vtestc_ps_256: 8322 case Intrinsic::x86_avx_vtestc_pd_256: 8323 IsTestPacked = true; // Fallthrough 8324 case Intrinsic::x86_sse41_ptestc: 8325 case Intrinsic::x86_avx_ptestc_256: 8326 // CF = 1 8327 X86CC = X86::COND_B; 8328 break; 8329 case Intrinsic::x86_avx_vtestnzc_ps: 8330 case Intrinsic::x86_avx_vtestnzc_pd: 8331 case Intrinsic::x86_avx_vtestnzc_ps_256: 8332 case Intrinsic::x86_avx_vtestnzc_pd_256: 8333 IsTestPacked = true; // Fallthrough 8334 case Intrinsic::x86_sse41_ptestnzc: 8335 case Intrinsic::x86_avx_ptestnzc_256: 8336 // ZF and CF = 0 8337 X86CC = X86::COND_A; 8338 break; 8339 } 8340 8341 SDValue LHS = Op.getOperand(1); 8342 SDValue RHS = Op.getOperand(2); 8343 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 8344 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 8345 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 8346 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 8347 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 8348 } 8349 8350 // Fix vector shift instructions where the last operand is a non-immediate 8351 // i32 value. 8352 case Intrinsic::x86_sse2_pslli_w: 8353 case Intrinsic::x86_sse2_pslli_d: 8354 case Intrinsic::x86_sse2_pslli_q: 8355 case Intrinsic::x86_sse2_psrli_w: 8356 case Intrinsic::x86_sse2_psrli_d: 8357 case Intrinsic::x86_sse2_psrli_q: 8358 case Intrinsic::x86_sse2_psrai_w: 8359 case Intrinsic::x86_sse2_psrai_d: 8360 case Intrinsic::x86_mmx_pslli_w: 8361 case Intrinsic::x86_mmx_pslli_d: 8362 case Intrinsic::x86_mmx_pslli_q: 8363 case Intrinsic::x86_mmx_psrli_w: 8364 case Intrinsic::x86_mmx_psrli_d: 8365 case Intrinsic::x86_mmx_psrli_q: 8366 case Intrinsic::x86_mmx_psrai_w: 8367 case Intrinsic::x86_mmx_psrai_d: { 8368 SDValue ShAmt = Op.getOperand(2); 8369 if (isa<ConstantSDNode>(ShAmt)) 8370 return SDValue(); 8371 8372 unsigned NewIntNo = 0; 8373 EVT ShAmtVT = MVT::v4i32; 8374 switch (IntNo) { 8375 case Intrinsic::x86_sse2_pslli_w: 8376 NewIntNo = Intrinsic::x86_sse2_psll_w; 8377 break; 8378 case Intrinsic::x86_sse2_pslli_d: 8379 NewIntNo = Intrinsic::x86_sse2_psll_d; 8380 break; 8381 case Intrinsic::x86_sse2_pslli_q: 8382 NewIntNo = Intrinsic::x86_sse2_psll_q; 8383 break; 8384 case Intrinsic::x86_sse2_psrli_w: 8385 NewIntNo = Intrinsic::x86_sse2_psrl_w; 8386 break; 8387 case Intrinsic::x86_sse2_psrli_d: 8388 NewIntNo = Intrinsic::x86_sse2_psrl_d; 8389 break; 8390 case Intrinsic::x86_sse2_psrli_q: 8391 NewIntNo = Intrinsic::x86_sse2_psrl_q; 8392 break; 8393 case Intrinsic::x86_sse2_psrai_w: 8394 NewIntNo = Intrinsic::x86_sse2_psra_w; 8395 break; 8396 case Intrinsic::x86_sse2_psrai_d: 8397 NewIntNo = Intrinsic::x86_sse2_psra_d; 8398 break; 8399 default: { 8400 ShAmtVT = MVT::v2i32; 8401 switch (IntNo) { 8402 case Intrinsic::x86_mmx_pslli_w: 8403 NewIntNo = Intrinsic::x86_mmx_psll_w; 8404 break; 8405 case Intrinsic::x86_mmx_pslli_d: 8406 NewIntNo = Intrinsic::x86_mmx_psll_d; 8407 break; 8408 case Intrinsic::x86_mmx_pslli_q: 8409 NewIntNo = Intrinsic::x86_mmx_psll_q; 8410 break; 8411 case Intrinsic::x86_mmx_psrli_w: 8412 NewIntNo = Intrinsic::x86_mmx_psrl_w; 8413 break; 8414 case Intrinsic::x86_mmx_psrli_d: 8415 NewIntNo = Intrinsic::x86_mmx_psrl_d; 8416 break; 8417 case Intrinsic::x86_mmx_psrli_q: 8418 NewIntNo = Intrinsic::x86_mmx_psrl_q; 8419 break; 8420 case Intrinsic::x86_mmx_psrai_w: 8421 NewIntNo = Intrinsic::x86_mmx_psra_w; 8422 break; 8423 case Intrinsic::x86_mmx_psrai_d: 8424 NewIntNo = Intrinsic::x86_mmx_psra_d; 8425 break; 8426 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 8427 } 8428 break; 8429 } 8430 } 8431 8432 // The vector shift intrinsics with scalars uses 32b shift amounts but 8433 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 8434 // to be zero. 8435 SDValue ShOps[4]; 8436 ShOps[0] = ShAmt; 8437 ShOps[1] = DAG.getConstant(0, MVT::i32); 8438 if (ShAmtVT == MVT::v4i32) { 8439 ShOps[2] = DAG.getUNDEF(MVT::i32); 8440 ShOps[3] = DAG.getUNDEF(MVT::i32); 8441 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4); 8442 } else { 8443 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2); 8444// FIXME this must be lowered to get rid of the invalid type. 8445 } 8446 8447 EVT VT = Op.getValueType(); 8448 ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); 8449 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8450 DAG.getConstant(NewIntNo, MVT::i32), 8451 Op.getOperand(1), ShAmt); 8452 } 8453 } 8454} 8455 8456SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 8457 SelectionDAG &DAG) const { 8458 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 8459 MFI->setReturnAddressIsTaken(true); 8460 8461 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8462 DebugLoc dl = Op.getDebugLoc(); 8463 8464 if (Depth > 0) { 8465 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 8466 SDValue Offset = 8467 DAG.getConstant(TD->getPointerSize(), 8468 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 8469 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 8470 DAG.getNode(ISD::ADD, dl, getPointerTy(), 8471 FrameAddr, Offset), 8472 MachinePointerInfo(), false, false, 0); 8473 } 8474 8475 // Just load the return address. 8476 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 8477 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 8478 RetAddrFI, MachinePointerInfo(), false, false, 0); 8479} 8480 8481SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 8482 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 8483 MFI->setFrameAddressIsTaken(true); 8484 8485 EVT VT = Op.getValueType(); 8486 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 8487 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8488 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 8489 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 8490 while (Depth--) 8491 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 8492 MachinePointerInfo(), 8493 false, false, 0); 8494 return FrameAddr; 8495} 8496 8497SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 8498 SelectionDAG &DAG) const { 8499 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 8500} 8501 8502SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 8503 MachineFunction &MF = DAG.getMachineFunction(); 8504 SDValue Chain = Op.getOperand(0); 8505 SDValue Offset = Op.getOperand(1); 8506 SDValue Handler = Op.getOperand(2); 8507 DebugLoc dl = Op.getDebugLoc(); 8508 8509 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 8510 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 8511 getPointerTy()); 8512 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 8513 8514 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 8515 DAG.getIntPtrConstant(TD->getPointerSize())); 8516 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 8517 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 8518 false, false, 0); 8519 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 8520 MF.getRegInfo().addLiveOut(StoreAddrReg); 8521 8522 return DAG.getNode(X86ISD::EH_RETURN, dl, 8523 MVT::Other, 8524 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 8525} 8526 8527SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op, 8528 SelectionDAG &DAG) const { 8529 SDValue Root = Op.getOperand(0); 8530 SDValue Trmp = Op.getOperand(1); // trampoline 8531 SDValue FPtr = Op.getOperand(2); // nested function 8532 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 8533 DebugLoc dl = Op.getDebugLoc(); 8534 8535 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 8536 8537 if (Subtarget->is64Bit()) { 8538 SDValue OutChains[6]; 8539 8540 // Large code-model. 8541 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 8542 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 8543 8544 const unsigned char N86R10 = RegInfo->getX86RegNum(X86::R10); 8545 const unsigned char N86R11 = RegInfo->getX86RegNum(X86::R11); 8546 8547 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 8548 8549 // Load the pointer to the nested function into R11. 8550 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 8551 SDValue Addr = Trmp; 8552 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 8553 Addr, MachinePointerInfo(TrmpAddr), 8554 false, false, 0); 8555 8556 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8557 DAG.getConstant(2, MVT::i64)); 8558 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 8559 MachinePointerInfo(TrmpAddr, 2), 8560 false, false, 2); 8561 8562 // Load the 'nest' parameter value into R10. 8563 // R10 is specified in X86CallingConv.td 8564 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 8565 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8566 DAG.getConstant(10, MVT::i64)); 8567 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 8568 Addr, MachinePointerInfo(TrmpAddr, 10), 8569 false, false, 0); 8570 8571 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8572 DAG.getConstant(12, MVT::i64)); 8573 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 8574 MachinePointerInfo(TrmpAddr, 12), 8575 false, false, 2); 8576 8577 // Jump to the nested function. 8578 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 8579 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8580 DAG.getConstant(20, MVT::i64)); 8581 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 8582 Addr, MachinePointerInfo(TrmpAddr, 20), 8583 false, false, 0); 8584 8585 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 8586 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 8587 DAG.getConstant(22, MVT::i64)); 8588 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 8589 MachinePointerInfo(TrmpAddr, 22), 8590 false, false, 0); 8591 8592 SDValue Ops[] = 8593 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) }; 8594 return DAG.getMergeValues(Ops, 2, dl); 8595 } else { 8596 const Function *Func = 8597 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 8598 CallingConv::ID CC = Func->getCallingConv(); 8599 unsigned NestReg; 8600 8601 switch (CC) { 8602 default: 8603 llvm_unreachable("Unsupported calling convention"); 8604 case CallingConv::C: 8605 case CallingConv::X86_StdCall: { 8606 // Pass 'nest' parameter in ECX. 8607 // Must be kept in sync with X86CallingConv.td 8608 NestReg = X86::ECX; 8609 8610 // Check that ECX wasn't needed by an 'inreg' parameter. 8611 const FunctionType *FTy = Func->getFunctionType(); 8612 const AttrListPtr &Attrs = Func->getAttributes(); 8613 8614 if (!Attrs.isEmpty() && !Func->isVarArg()) { 8615 unsigned InRegCount = 0; 8616 unsigned Idx = 1; 8617 8618 for (FunctionType::param_iterator I = FTy->param_begin(), 8619 E = FTy->param_end(); I != E; ++I, ++Idx) 8620 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 8621 // FIXME: should only count parameters that are lowered to integers. 8622 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 8623 8624 if (InRegCount > 2) { 8625 report_fatal_error("Nest register in use - reduce number of inreg" 8626 " parameters!"); 8627 } 8628 } 8629 break; 8630 } 8631 case CallingConv::X86_FastCall: 8632 case CallingConv::X86_ThisCall: 8633 case CallingConv::Fast: 8634 // Pass 'nest' parameter in EAX. 8635 // Must be kept in sync with X86CallingConv.td 8636 NestReg = X86::EAX; 8637 break; 8638 } 8639 8640 SDValue OutChains[4]; 8641 SDValue Addr, Disp; 8642 8643 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8644 DAG.getConstant(10, MVT::i32)); 8645 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 8646 8647 // This is storing the opcode for MOV32ri. 8648 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 8649 const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg); 8650 OutChains[0] = DAG.getStore(Root, dl, 8651 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 8652 Trmp, MachinePointerInfo(TrmpAddr), 8653 false, false, 0); 8654 8655 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8656 DAG.getConstant(1, MVT::i32)); 8657 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 8658 MachinePointerInfo(TrmpAddr, 1), 8659 false, false, 1); 8660 8661 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 8662 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8663 DAG.getConstant(5, MVT::i32)); 8664 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 8665 MachinePointerInfo(TrmpAddr, 5), 8666 false, false, 1); 8667 8668 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 8669 DAG.getConstant(6, MVT::i32)); 8670 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 8671 MachinePointerInfo(TrmpAddr, 6), 8672 false, false, 1); 8673 8674 SDValue Ops[] = 8675 { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4) }; 8676 return DAG.getMergeValues(Ops, 2, dl); 8677 } 8678} 8679 8680SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8681 SelectionDAG &DAG) const { 8682 /* 8683 The rounding mode is in bits 11:10 of FPSR, and has the following 8684 settings: 8685 00 Round to nearest 8686 01 Round to -inf 8687 10 Round to +inf 8688 11 Round to 0 8689 8690 FLT_ROUNDS, on the other hand, expects the following: 8691 -1 Undefined 8692 0 Round to 0 8693 1 Round to nearest 8694 2 Round to +inf 8695 3 Round to -inf 8696 8697 To perform the conversion, we do: 8698 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 8699 */ 8700 8701 MachineFunction &MF = DAG.getMachineFunction(); 8702 const TargetMachine &TM = MF.getTarget(); 8703 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 8704 unsigned StackAlignment = TFI.getStackAlignment(); 8705 EVT VT = Op.getValueType(); 8706 DebugLoc DL = Op.getDebugLoc(); 8707 8708 // Save FP Control Word to stack slot 8709 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 8710 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 8711 8712 8713 MachineMemOperand *MMO = 8714 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 8715 MachineMemOperand::MOStore, 2, 2); 8716 8717 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 8718 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 8719 DAG.getVTList(MVT::Other), 8720 Ops, 2, MVT::i16, MMO); 8721 8722 // Load FP Control Word from stack slot 8723 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 8724 MachinePointerInfo(), false, false, 0); 8725 8726 // Transform as necessary 8727 SDValue CWD1 = 8728 DAG.getNode(ISD::SRL, DL, MVT::i16, 8729 DAG.getNode(ISD::AND, DL, MVT::i16, 8730 CWD, DAG.getConstant(0x800, MVT::i16)), 8731 DAG.getConstant(11, MVT::i8)); 8732 SDValue CWD2 = 8733 DAG.getNode(ISD::SRL, DL, MVT::i16, 8734 DAG.getNode(ISD::AND, DL, MVT::i16, 8735 CWD, DAG.getConstant(0x400, MVT::i16)), 8736 DAG.getConstant(9, MVT::i8)); 8737 8738 SDValue RetVal = 8739 DAG.getNode(ISD::AND, DL, MVT::i16, 8740 DAG.getNode(ISD::ADD, DL, MVT::i16, 8741 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 8742 DAG.getConstant(1, MVT::i16)), 8743 DAG.getConstant(3, MVT::i16)); 8744 8745 8746 return DAG.getNode((VT.getSizeInBits() < 16 ? 8747 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 8748} 8749 8750SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 8751 EVT VT = Op.getValueType(); 8752 EVT OpVT = VT; 8753 unsigned NumBits = VT.getSizeInBits(); 8754 DebugLoc dl = Op.getDebugLoc(); 8755 8756 Op = Op.getOperand(0); 8757 if (VT == MVT::i8) { 8758 // Zero extend to i32 since there is not an i8 bsr. 8759 OpVT = MVT::i32; 8760 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 8761 } 8762 8763 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 8764 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 8765 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 8766 8767 // If src is zero (i.e. bsr sets ZF), returns NumBits. 8768 SDValue Ops[] = { 8769 Op, 8770 DAG.getConstant(NumBits+NumBits-1, OpVT), 8771 DAG.getConstant(X86::COND_E, MVT::i8), 8772 Op.getValue(1) 8773 }; 8774 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 8775 8776 // Finally xor with NumBits-1. 8777 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 8778 8779 if (VT == MVT::i8) 8780 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 8781 return Op; 8782} 8783 8784SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { 8785 EVT VT = Op.getValueType(); 8786 EVT OpVT = VT; 8787 unsigned NumBits = VT.getSizeInBits(); 8788 DebugLoc dl = Op.getDebugLoc(); 8789 8790 Op = Op.getOperand(0); 8791 if (VT == MVT::i8) { 8792 OpVT = MVT::i32; 8793 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 8794 } 8795 8796 // Issue a bsf (scan bits forward) which also sets EFLAGS. 8797 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 8798 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 8799 8800 // If src is zero (i.e. bsf sets ZF), returns NumBits. 8801 SDValue Ops[] = { 8802 Op, 8803 DAG.getConstant(NumBits, OpVT), 8804 DAG.getConstant(X86::COND_E, MVT::i8), 8805 Op.getValue(1) 8806 }; 8807 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 8808 8809 if (VT == MVT::i8) 8810 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 8811 return Op; 8812} 8813 8814SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const { 8815 EVT VT = Op.getValueType(); 8816 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply"); 8817 DebugLoc dl = Op.getDebugLoc(); 8818 8819 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32); 8820 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32); 8821 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b ); 8822 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi ); 8823 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b ); 8824 // 8825 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 ); 8826 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 ); 8827 // return AloBlo + AloBhi + AhiBlo; 8828 8829 SDValue A = Op.getOperand(0); 8830 SDValue B = Op.getOperand(1); 8831 8832 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8833 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 8834 A, DAG.getConstant(32, MVT::i32)); 8835 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8836 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 8837 B, DAG.getConstant(32, MVT::i32)); 8838 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8839 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8840 A, B); 8841 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8842 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8843 A, Bhi); 8844 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8845 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 8846 Ahi, B); 8847 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8848 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 8849 AloBhi, DAG.getConstant(32, MVT::i32)); 8850 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8851 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 8852 AhiBlo, DAG.getConstant(32, MVT::i32)); 8853 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 8854 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 8855 return Res; 8856} 8857 8858SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 8859 8860 EVT VT = Op.getValueType(); 8861 DebugLoc dl = Op.getDebugLoc(); 8862 SDValue R = Op.getOperand(0); 8863 SDValue Amt = Op.getOperand(1); 8864 8865 LLVMContext *Context = DAG.getContext(); 8866 8867 // Must have SSE2. 8868 if (!Subtarget->hasSSE2()) return SDValue(); 8869 8870 // Optimize shl/srl/sra with constant shift amount. 8871 if (isSplatVector(Amt.getNode())) { 8872 SDValue SclrAmt = Amt->getOperand(0); 8873 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 8874 uint64_t ShiftAmt = C->getZExtValue(); 8875 8876 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SHL) 8877 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8878 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 8879 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8880 8881 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SHL) 8882 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8883 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 8884 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8885 8886 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SHL) 8887 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8888 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 8889 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8890 8891 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SRL) 8892 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8893 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 8894 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8895 8896 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRL) 8897 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8898 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 8899 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8900 8901 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRL) 8902 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8903 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 8904 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8905 8906 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRA) 8907 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8908 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 8909 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8910 8911 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRA) 8912 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8913 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 8914 R, DAG.getConstant(ShiftAmt, MVT::i32)); 8915 } 8916 } 8917 8918 // Lower SHL with variable shift amount. 8919 // Cannot lower SHL without SSE4.1 or later. 8920 if (!Subtarget->hasSSE41()) return SDValue(); 8921 8922 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 8923 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8924 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 8925 Op.getOperand(1), DAG.getConstant(23, MVT::i32)); 8926 8927 ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); 8928 8929 std::vector<Constant*> CV(4, CI); 8930 Constant *C = ConstantVector::get(CV); 8931 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8932 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8933 MachinePointerInfo::getConstantPool(), 8934 false, false, 16); 8935 8936 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 8937 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 8938 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 8939 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 8940 } 8941 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 8942 // a = a << 5; 8943 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8944 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 8945 Op.getOperand(1), DAG.getConstant(5, MVT::i32)); 8946 8947 ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15)); 8948 ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63)); 8949 8950 std::vector<Constant*> CVM1(16, CM1); 8951 std::vector<Constant*> CVM2(16, CM2); 8952 Constant *C = ConstantVector::get(CVM1); 8953 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8954 SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8955 MachinePointerInfo::getConstantPool(), 8956 false, false, 16); 8957 8958 // r = pblendv(r, psllw(r & (char16)15, 4), a); 8959 M = DAG.getNode(ISD::AND, dl, VT, R, M); 8960 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8961 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 8962 DAG.getConstant(4, MVT::i32)); 8963 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op); 8964 // a += a 8965 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 8966 8967 C = ConstantVector::get(CVM2); 8968 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8969 M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8970 MachinePointerInfo::getConstantPool(), 8971 false, false, 16); 8972 8973 // r = pblendv(r, psllw(r & (char16)63, 2), a); 8974 M = DAG.getNode(ISD::AND, dl, VT, R, M); 8975 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 8976 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 8977 DAG.getConstant(2, MVT::i32)); 8978 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, M, Op); 8979 // a += a 8980 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 8981 8982 // return pblendv(r, r+r, a); 8983 R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, 8984 R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op); 8985 return R; 8986 } 8987 return SDValue(); 8988} 8989 8990SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 8991 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 8992 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 8993 // looks for this combo and may remove the "setcc" instruction if the "setcc" 8994 // has only one use. 8995 SDNode *N = Op.getNode(); 8996 SDValue LHS = N->getOperand(0); 8997 SDValue RHS = N->getOperand(1); 8998 unsigned BaseOp = 0; 8999 unsigned Cond = 0; 9000 DebugLoc DL = Op.getDebugLoc(); 9001 switch (Op.getOpcode()) { 9002 default: llvm_unreachable("Unknown ovf instruction!"); 9003 case ISD::SADDO: 9004 // A subtract of one will be selected as a INC. Note that INC doesn't 9005 // set CF, so we can't do this for UADDO. 9006 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 9007 if (C->isOne()) { 9008 BaseOp = X86ISD::INC; 9009 Cond = X86::COND_O; 9010 break; 9011 } 9012 BaseOp = X86ISD::ADD; 9013 Cond = X86::COND_O; 9014 break; 9015 case ISD::UADDO: 9016 BaseOp = X86ISD::ADD; 9017 Cond = X86::COND_B; 9018 break; 9019 case ISD::SSUBO: 9020 // A subtract of one will be selected as a DEC. Note that DEC doesn't 9021 // set CF, so we can't do this for USUBO. 9022 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 9023 if (C->isOne()) { 9024 BaseOp = X86ISD::DEC; 9025 Cond = X86::COND_O; 9026 break; 9027 } 9028 BaseOp = X86ISD::SUB; 9029 Cond = X86::COND_O; 9030 break; 9031 case ISD::USUBO: 9032 BaseOp = X86ISD::SUB; 9033 Cond = X86::COND_B; 9034 break; 9035 case ISD::SMULO: 9036 BaseOp = X86ISD::SMUL; 9037 Cond = X86::COND_O; 9038 break; 9039 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 9040 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 9041 MVT::i32); 9042 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 9043 9044 SDValue SetCC = 9045 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 9046 DAG.getConstant(X86::COND_O, MVT::i32), 9047 SDValue(Sum.getNode(), 2)); 9048 9049 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC); 9050 return Sum; 9051 } 9052 } 9053 9054 // Also sets EFLAGS. 9055 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 9056 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 9057 9058 SDValue SetCC = 9059 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 9060 DAG.getConstant(Cond, MVT::i32), 9061 SDValue(Sum.getNode(), 1)); 9062 9063 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC); 9064 return Sum; 9065} 9066 9067SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ 9068 DebugLoc dl = Op.getDebugLoc(); 9069 9070 if (!Subtarget->hasSSE2()) { 9071 SDValue Chain = Op.getOperand(0); 9072 SDValue Zero = DAG.getConstant(0, 9073 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 9074 SDValue Ops[] = { 9075 DAG.getRegister(X86::ESP, MVT::i32), // Base 9076 DAG.getTargetConstant(1, MVT::i8), // Scale 9077 DAG.getRegister(0, MVT::i32), // Index 9078 DAG.getTargetConstant(0, MVT::i32), // Disp 9079 DAG.getRegister(0, MVT::i32), // Segment. 9080 Zero, 9081 Chain 9082 }; 9083 SDNode *Res = 9084 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 9085 array_lengthof(Ops)); 9086 return SDValue(Res, 0); 9087 } 9088 9089 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 9090 if (!isDev) 9091 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 9092 9093 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 9094 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 9095 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 9096 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 9097 9098 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 9099 if (!Op1 && !Op2 && !Op3 && Op4) 9100 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 9101 9102 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 9103 if (Op1 && !Op2 && !Op3 && !Op4) 9104 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 9105 9106 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 9107 // (MFENCE)>; 9108 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 9109} 9110 9111SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 9112 EVT T = Op.getValueType(); 9113 DebugLoc DL = Op.getDebugLoc(); 9114 unsigned Reg = 0; 9115 unsigned size = 0; 9116 switch(T.getSimpleVT().SimpleTy) { 9117 default: 9118 assert(false && "Invalid value type!"); 9119 case MVT::i8: Reg = X86::AL; size = 1; break; 9120 case MVT::i16: Reg = X86::AX; size = 2; break; 9121 case MVT::i32: Reg = X86::EAX; size = 4; break; 9122 case MVT::i64: 9123 assert(Subtarget->is64Bit() && "Node not type legal!"); 9124 Reg = X86::RAX; size = 8; 9125 break; 9126 } 9127 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 9128 Op.getOperand(2), SDValue()); 9129 SDValue Ops[] = { cpIn.getValue(0), 9130 Op.getOperand(1), 9131 Op.getOperand(3), 9132 DAG.getTargetConstant(size, MVT::i8), 9133 cpIn.getValue(1) }; 9134 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 9135 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 9136 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 9137 Ops, 5, T, MMO); 9138 SDValue cpOut = 9139 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 9140 return cpOut; 9141} 9142 9143SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 9144 SelectionDAG &DAG) const { 9145 assert(Subtarget->is64Bit() && "Result not type legalized?"); 9146 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 9147 SDValue TheChain = Op.getOperand(0); 9148 DebugLoc dl = Op.getDebugLoc(); 9149 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 9150 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 9151 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 9152 rax.getValue(2)); 9153 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 9154 DAG.getConstant(32, MVT::i8)); 9155 SDValue Ops[] = { 9156 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 9157 rdx.getValue(1) 9158 }; 9159 return DAG.getMergeValues(Ops, 2, dl); 9160} 9161 9162SDValue X86TargetLowering::LowerBITCAST(SDValue Op, 9163 SelectionDAG &DAG) const { 9164 EVT SrcVT = Op.getOperand(0).getValueType(); 9165 EVT DstVT = Op.getValueType(); 9166 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 9167 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 9168 assert((DstVT == MVT::i64 || 9169 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 9170 "Unexpected custom BITCAST"); 9171 // i64 <=> MMX conversions are Legal. 9172 if (SrcVT==MVT::i64 && DstVT.isVector()) 9173 return Op; 9174 if (DstVT==MVT::i64 && SrcVT.isVector()) 9175 return Op; 9176 // MMX <=> MMX conversions are Legal. 9177 if (SrcVT.isVector() && DstVT.isVector()) 9178 return Op; 9179 // All other conversions need to be expanded. 9180 return SDValue(); 9181} 9182 9183SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { 9184 SDNode *Node = Op.getNode(); 9185 DebugLoc dl = Node->getDebugLoc(); 9186 EVT T = Node->getValueType(0); 9187 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 9188 DAG.getConstant(0, T), Node->getOperand(2)); 9189 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 9190 cast<AtomicSDNode>(Node)->getMemoryVT(), 9191 Node->getOperand(0), 9192 Node->getOperand(1), negOp, 9193 cast<AtomicSDNode>(Node)->getSrcValue(), 9194 cast<AtomicSDNode>(Node)->getAlignment()); 9195} 9196 9197static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 9198 EVT VT = Op.getNode()->getValueType(0); 9199 9200 // Let legalize expand this if it isn't a legal type yet. 9201 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9202 return SDValue(); 9203 9204 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 9205 9206 unsigned Opc; 9207 bool ExtraOp = false; 9208 switch (Op.getOpcode()) { 9209 default: assert(0 && "Invalid code"); 9210 case ISD::ADDC: Opc = X86ISD::ADD; break; 9211 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 9212 case ISD::SUBC: Opc = X86ISD::SUB; break; 9213 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 9214 } 9215 9216 if (!ExtraOp) 9217 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 9218 Op.getOperand(1)); 9219 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 9220 Op.getOperand(1), Op.getOperand(2)); 9221} 9222 9223/// LowerOperation - Provide custom lowering hooks for some operations. 9224/// 9225SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9226 switch (Op.getOpcode()) { 9227 default: llvm_unreachable("Should not custom lower this!"); 9228 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); 9229 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); 9230 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 9231 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 9232 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 9233 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 9234 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 9235 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9236 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 9237 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); 9238 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 9239 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9240 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9241 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9242 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 9243 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9244 case ISD::SHL_PARTS: 9245 case ISD::SRA_PARTS: 9246 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 9247 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 9248 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 9249 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 9250 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 9251 case ISD::FABS: return LowerFABS(Op, DAG); 9252 case ISD::FNEG: return LowerFNEG(Op, DAG); 9253 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 9254 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 9255 case ISD::SETCC: return LowerSETCC(Op, DAG); 9256 case ISD::VSETCC: return LowerVSETCC(Op, DAG); 9257 case ISD::SELECT: return LowerSELECT(Op, DAG); 9258 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 9259 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 9260 case ISD::VASTART: return LowerVASTART(Op, DAG); 9261 case ISD::VAARG: return LowerVAARG(Op, DAG); 9262 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 9263 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 9264 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9265 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9266 case ISD::FRAME_TO_ARGS_OFFSET: 9267 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 9268 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 9269 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 9270 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 9271 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9272 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 9273 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 9274 case ISD::MUL: return LowerMUL_V2I64(Op, DAG); 9275 case ISD::SRA: 9276 case ISD::SRL: 9277 case ISD::SHL: return LowerShift(Op, DAG); 9278 case ISD::SADDO: 9279 case ISD::UADDO: 9280 case ISD::SSUBO: 9281 case ISD::USUBO: 9282 case ISD::SMULO: 9283 case ISD::UMULO: return LowerXALUO(Op, DAG); 9284 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 9285 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 9286 case ISD::ADDC: 9287 case ISD::ADDE: 9288 case ISD::SUBC: 9289 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 9290 } 9291} 9292 9293void X86TargetLowering:: 9294ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 9295 SelectionDAG &DAG, unsigned NewOp) const { 9296 EVT T = Node->getValueType(0); 9297 DebugLoc dl = Node->getDebugLoc(); 9298 assert (T == MVT::i64 && "Only know how to expand i64 atomics"); 9299 9300 SDValue Chain = Node->getOperand(0); 9301 SDValue In1 = Node->getOperand(1); 9302 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 9303 Node->getOperand(2), DAG.getIntPtrConstant(0)); 9304 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 9305 Node->getOperand(2), DAG.getIntPtrConstant(1)); 9306 SDValue Ops[] = { Chain, In1, In2L, In2H }; 9307 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 9308 SDValue Result = 9309 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 9310 cast<MemSDNode>(Node)->getMemOperand()); 9311 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 9312 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 9313 Results.push_back(Result.getValue(2)); 9314} 9315 9316/// ReplaceNodeResults - Replace a node with an illegal result type 9317/// with a new node built out of custom code. 9318void X86TargetLowering::ReplaceNodeResults(SDNode *N, 9319 SmallVectorImpl<SDValue>&Results, 9320 SelectionDAG &DAG) const { 9321 DebugLoc dl = N->getDebugLoc(); 9322 switch (N->getOpcode()) { 9323 default: 9324 assert(false && "Do not know how to custom type legalize this operation!"); 9325 return; 9326 case ISD::ADDC: 9327 case ISD::ADDE: 9328 case ISD::SUBC: 9329 case ISD::SUBE: 9330 // We don't want to expand or promote these. 9331 return; 9332 case ISD::FP_TO_SINT: { 9333 std::pair<SDValue,SDValue> Vals = 9334 FP_TO_INTHelper(SDValue(N, 0), DAG, true); 9335 SDValue FIST = Vals.first, StackSlot = Vals.second; 9336 if (FIST.getNode() != 0) { 9337 EVT VT = N->getValueType(0); 9338 // Return a load from the stack slot. 9339 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 9340 MachinePointerInfo(), false, false, 0)); 9341 } 9342 return; 9343 } 9344 case ISD::READCYCLECOUNTER: { 9345 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 9346 SDValue TheChain = N->getOperand(0); 9347 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 9348 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 9349 rd.getValue(1)); 9350 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 9351 eax.getValue(2)); 9352 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 9353 SDValue Ops[] = { eax, edx }; 9354 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 9355 Results.push_back(edx.getValue(1)); 9356 return; 9357 } 9358 case ISD::ATOMIC_CMP_SWAP: { 9359 EVT T = N->getValueType(0); 9360 assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); 9361 SDValue cpInL, cpInH; 9362 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 9363 DAG.getConstant(0, MVT::i32)); 9364 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2), 9365 DAG.getConstant(1, MVT::i32)); 9366 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, X86::EAX, cpInL, SDValue()); 9367 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, X86::EDX, cpInH, 9368 cpInL.getValue(1)); 9369 SDValue swapInL, swapInH; 9370 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 9371 DAG.getConstant(0, MVT::i32)); 9372 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3), 9373 DAG.getConstant(1, MVT::i32)); 9374 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, X86::EBX, swapInL, 9375 cpInH.getValue(1)); 9376 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, X86::ECX, swapInH, 9377 swapInL.getValue(1)); 9378 SDValue Ops[] = { swapInH.getValue(0), 9379 N->getOperand(1), 9380 swapInH.getValue(1) }; 9381 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 9382 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 9383 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, 9384 Ops, 3, T, MMO); 9385 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX, 9386 MVT::i32, Result.getValue(1)); 9387 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX, 9388 MVT::i32, cpOutL.getValue(2)); 9389 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 9390 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 9391 Results.push_back(cpOutH.getValue(1)); 9392 return; 9393 } 9394 case ISD::ATOMIC_LOAD_ADD: 9395 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); 9396 return; 9397 case ISD::ATOMIC_LOAD_AND: 9398 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); 9399 return; 9400 case ISD::ATOMIC_LOAD_NAND: 9401 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); 9402 return; 9403 case ISD::ATOMIC_LOAD_OR: 9404 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); 9405 return; 9406 case ISD::ATOMIC_LOAD_SUB: 9407 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); 9408 return; 9409 case ISD::ATOMIC_LOAD_XOR: 9410 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); 9411 return; 9412 case ISD::ATOMIC_SWAP: 9413 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); 9414 return; 9415 } 9416} 9417 9418const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 9419 switch (Opcode) { 9420 default: return NULL; 9421 case X86ISD::BSF: return "X86ISD::BSF"; 9422 case X86ISD::BSR: return "X86ISD::BSR"; 9423 case X86ISD::SHLD: return "X86ISD::SHLD"; 9424 case X86ISD::SHRD: return "X86ISD::SHRD"; 9425 case X86ISD::FAND: return "X86ISD::FAND"; 9426 case X86ISD::FOR: return "X86ISD::FOR"; 9427 case X86ISD::FXOR: return "X86ISD::FXOR"; 9428 case X86ISD::FSRL: return "X86ISD::FSRL"; 9429 case X86ISD::FILD: return "X86ISD::FILD"; 9430 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 9431 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 9432 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 9433 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 9434 case X86ISD::FLD: return "X86ISD::FLD"; 9435 case X86ISD::FST: return "X86ISD::FST"; 9436 case X86ISD::CALL: return "X86ISD::CALL"; 9437 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 9438 case X86ISD::BT: return "X86ISD::BT"; 9439 case X86ISD::CMP: return "X86ISD::CMP"; 9440 case X86ISD::COMI: return "X86ISD::COMI"; 9441 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 9442 case X86ISD::SETCC: return "X86ISD::SETCC"; 9443 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 9444 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 9445 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 9446 case X86ISD::CMOV: return "X86ISD::CMOV"; 9447 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 9448 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 9449 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 9450 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 9451 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 9452 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 9453 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 9454 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 9455 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 9456 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 9457 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 9458 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 9459 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 9460 case X86ISD::PANDN: return "X86ISD::PANDN"; 9461 case X86ISD::PSIGNB: return "X86ISD::PSIGNB"; 9462 case X86ISD::PSIGNW: return "X86ISD::PSIGNW"; 9463 case X86ISD::PSIGND: return "X86ISD::PSIGND"; 9464 case X86ISD::PBLENDVB: return "X86ISD::PBLENDVB"; 9465 case X86ISD::FMAX: return "X86ISD::FMAX"; 9466 case X86ISD::FMIN: return "X86ISD::FMIN"; 9467 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 9468 case X86ISD::FRCP: return "X86ISD::FRCP"; 9469 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 9470 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 9471 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 9472 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 9473 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 9474 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 9475 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 9476 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 9477 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 9478 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 9479 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 9480 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 9481 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 9482 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 9483 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 9484 case X86ISD::VSHL: return "X86ISD::VSHL"; 9485 case X86ISD::VSRL: return "X86ISD::VSRL"; 9486 case X86ISD::CMPPD: return "X86ISD::CMPPD"; 9487 case X86ISD::CMPPS: return "X86ISD::CMPPS"; 9488 case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB"; 9489 case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW"; 9490 case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD"; 9491 case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ"; 9492 case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB"; 9493 case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW"; 9494 case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD"; 9495 case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ"; 9496 case X86ISD::ADD: return "X86ISD::ADD"; 9497 case X86ISD::SUB: return "X86ISD::SUB"; 9498 case X86ISD::ADC: return "X86ISD::ADC"; 9499 case X86ISD::SBB: return "X86ISD::SBB"; 9500 case X86ISD::SMUL: return "X86ISD::SMUL"; 9501 case X86ISD::UMUL: return "X86ISD::UMUL"; 9502 case X86ISD::INC: return "X86ISD::INC"; 9503 case X86ISD::DEC: return "X86ISD::DEC"; 9504 case X86ISD::OR: return "X86ISD::OR"; 9505 case X86ISD::XOR: return "X86ISD::XOR"; 9506 case X86ISD::AND: return "X86ISD::AND"; 9507 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 9508 case X86ISD::PTEST: return "X86ISD::PTEST"; 9509 case X86ISD::TESTP: return "X86ISD::TESTP"; 9510 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 9511 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 9512 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 9513 case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD"; 9514 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 9515 case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD"; 9516 case X86ISD::SHUFPS: return "X86ISD::SHUFPS"; 9517 case X86ISD::SHUFPD: return "X86ISD::SHUFPD"; 9518 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 9519 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 9520 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 9521 case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD"; 9522 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 9523 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 9524 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 9525 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 9526 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 9527 case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD"; 9528 case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD"; 9529 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 9530 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 9531 case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS"; 9532 case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD"; 9533 case X86ISD::VUNPCKLPS: return "X86ISD::VUNPCKLPS"; 9534 case X86ISD::VUNPCKLPD: return "X86ISD::VUNPCKLPD"; 9535 case X86ISD::VUNPCKLPSY: return "X86ISD::VUNPCKLPSY"; 9536 case X86ISD::VUNPCKLPDY: return "X86ISD::VUNPCKLPDY"; 9537 case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS"; 9538 case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD"; 9539 case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW"; 9540 case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD"; 9541 case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ"; 9542 case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ"; 9543 case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW"; 9544 case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD"; 9545 case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ"; 9546 case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ"; 9547 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 9548 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 9549 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 9550 } 9551} 9552 9553// isLegalAddressingMode - Return true if the addressing mode represented 9554// by AM is legal for this target, for a load/store of the specified type. 9555bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 9556 const Type *Ty) const { 9557 // X86 supports extremely general addressing modes. 9558 CodeModel::Model M = getTargetMachine().getCodeModel(); 9559 Reloc::Model R = getTargetMachine().getRelocationModel(); 9560 9561 // X86 allows a sign-extended 32-bit immediate field as a displacement. 9562 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 9563 return false; 9564 9565 if (AM.BaseGV) { 9566 unsigned GVFlags = 9567 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 9568 9569 // If a reference to this global requires an extra load, we can't fold it. 9570 if (isGlobalStubReference(GVFlags)) 9571 return false; 9572 9573 // If BaseGV requires a register for the PIC base, we cannot also have a 9574 // BaseReg specified. 9575 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 9576 return false; 9577 9578 // If lower 4G is not available, then we must use rip-relative addressing. 9579 if ((M != CodeModel::Small || R != Reloc::Static) && 9580 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 9581 return false; 9582 } 9583 9584 switch (AM.Scale) { 9585 case 0: 9586 case 1: 9587 case 2: 9588 case 4: 9589 case 8: 9590 // These scales always work. 9591 break; 9592 case 3: 9593 case 5: 9594 case 9: 9595 // These scales are formed with basereg+scalereg. Only accept if there is 9596 // no basereg yet. 9597 if (AM.HasBaseReg) 9598 return false; 9599 break; 9600 default: // Other stuff never works. 9601 return false; 9602 } 9603 9604 return true; 9605} 9606 9607 9608bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const { 9609 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 9610 return false; 9611 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 9612 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 9613 if (NumBits1 <= NumBits2) 9614 return false; 9615 return true; 9616} 9617 9618bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 9619 if (!VT1.isInteger() || !VT2.isInteger()) 9620 return false; 9621 unsigned NumBits1 = VT1.getSizeInBits(); 9622 unsigned NumBits2 = VT2.getSizeInBits(); 9623 if (NumBits1 <= NumBits2) 9624 return false; 9625 return true; 9626} 9627 9628bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const { 9629 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 9630 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 9631} 9632 9633bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 9634 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 9635 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 9636} 9637 9638bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 9639 // i16 instructions are longer (0x66 prefix) and potentially slower. 9640 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 9641} 9642 9643/// isShuffleMaskLegal - Targets can use this to indicate that they only 9644/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 9645/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 9646/// are assumed to be legal. 9647bool 9648X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 9649 EVT VT) const { 9650 // Very little shuffling can be done for 64-bit vectors right now. 9651 if (VT.getSizeInBits() == 64) 9652 return isPALIGNRMask(M, VT, Subtarget->hasSSSE3()); 9653 9654 // FIXME: pshufb, blends, shifts. 9655 return (VT.getVectorNumElements() == 2 || 9656 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 9657 isMOVLMask(M, VT) || 9658 isSHUFPMask(M, VT) || 9659 isPSHUFDMask(M, VT) || 9660 isPSHUFHWMask(M, VT) || 9661 isPSHUFLWMask(M, VT) || 9662 isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) || 9663 isUNPCKLMask(M, VT) || 9664 isUNPCKHMask(M, VT) || 9665 isUNPCKL_v_undef_Mask(M, VT) || 9666 isUNPCKH_v_undef_Mask(M, VT)); 9667} 9668 9669bool 9670X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 9671 EVT VT) const { 9672 unsigned NumElts = VT.getVectorNumElements(); 9673 // FIXME: This collection of masks seems suspect. 9674 if (NumElts == 2) 9675 return true; 9676 if (NumElts == 4 && VT.getSizeInBits() == 128) { 9677 return (isMOVLMask(Mask, VT) || 9678 isCommutedMOVLMask(Mask, VT, true) || 9679 isSHUFPMask(Mask, VT) || 9680 isCommutedSHUFPMask(Mask, VT)); 9681 } 9682 return false; 9683} 9684 9685//===----------------------------------------------------------------------===// 9686// X86 Scheduler Hooks 9687//===----------------------------------------------------------------------===// 9688 9689// private utility function 9690MachineBasicBlock * 9691X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 9692 MachineBasicBlock *MBB, 9693 unsigned regOpc, 9694 unsigned immOpc, 9695 unsigned LoadOpc, 9696 unsigned CXchgOpc, 9697 unsigned notOpc, 9698 unsigned EAXreg, 9699 TargetRegisterClass *RC, 9700 bool invSrc) const { 9701 // For the atomic bitwise operator, we generate 9702 // thisMBB: 9703 // newMBB: 9704 // ld t1 = [bitinstr.addr] 9705 // op t2 = t1, [bitinstr.val] 9706 // mov EAX = t1 9707 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 9708 // bz newMBB 9709 // fallthrough -->nextMBB 9710 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9711 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9712 MachineFunction::iterator MBBIter = MBB; 9713 ++MBBIter; 9714 9715 /// First build the CFG 9716 MachineFunction *F = MBB->getParent(); 9717 MachineBasicBlock *thisMBB = MBB; 9718 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9719 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9720 F->insert(MBBIter, newMBB); 9721 F->insert(MBBIter, nextMBB); 9722 9723 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9724 nextMBB->splice(nextMBB->begin(), thisMBB, 9725 llvm::next(MachineBasicBlock::iterator(bInstr)), 9726 thisMBB->end()); 9727 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9728 9729 // Update thisMBB to fall through to newMBB 9730 thisMBB->addSuccessor(newMBB); 9731 9732 // newMBB jumps to itself and fall through to nextMBB 9733 newMBB->addSuccessor(nextMBB); 9734 newMBB->addSuccessor(newMBB); 9735 9736 // Insert instructions into newMBB based on incoming instruction 9737 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && 9738 "unexpected number of operands"); 9739 DebugLoc dl = bInstr->getDebugLoc(); 9740 MachineOperand& destOper = bInstr->getOperand(0); 9741 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9742 int numArgs = bInstr->getNumOperands() - 1; 9743 for (int i=0; i < numArgs; ++i) 9744 argOpers[i] = &bInstr->getOperand(i+1); 9745 9746 // x86 address has 4 operands: base, index, scale, and displacement 9747 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9748 int valArgIndx = lastAddrIndx + 1; 9749 9750 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 9751 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); 9752 for (int i=0; i <= lastAddrIndx; ++i) 9753 (*MIB).addOperand(*argOpers[i]); 9754 9755 unsigned tt = F->getRegInfo().createVirtualRegister(RC); 9756 if (invSrc) { 9757 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1); 9758 } 9759 else 9760 tt = t1; 9761 9762 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 9763 assert((argOpers[valArgIndx]->isReg() || 9764 argOpers[valArgIndx]->isImm()) && 9765 "invalid operand"); 9766 if (argOpers[valArgIndx]->isReg()) 9767 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); 9768 else 9769 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); 9770 MIB.addReg(tt); 9771 (*MIB).addOperand(*argOpers[valArgIndx]); 9772 9773 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); 9774 MIB.addReg(t1); 9775 9776 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); 9777 for (int i=0; i <= lastAddrIndx; ++i) 9778 (*MIB).addOperand(*argOpers[i]); 9779 MIB.addReg(t2); 9780 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9781 (*MIB).setMemRefs(bInstr->memoperands_begin(), 9782 bInstr->memoperands_end()); 9783 9784 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 9785 MIB.addReg(EAXreg); 9786 9787 // insert branch 9788 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9789 9790 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 9791 return nextMBB; 9792} 9793 9794// private utility function: 64 bit atomics on 32 bit host. 9795MachineBasicBlock * 9796X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, 9797 MachineBasicBlock *MBB, 9798 unsigned regOpcL, 9799 unsigned regOpcH, 9800 unsigned immOpcL, 9801 unsigned immOpcH, 9802 bool invSrc) const { 9803 // For the atomic bitwise operator, we generate 9804 // thisMBB (instructions are in pairs, except cmpxchg8b) 9805 // ld t1,t2 = [bitinstr.addr] 9806 // newMBB: 9807 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) 9808 // op t5, t6 <- out1, out2, [bitinstr.val] 9809 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) 9810 // mov ECX, EBX <- t5, t6 9811 // mov EAX, EDX <- t1, t2 9812 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] 9813 // mov t3, t4 <- EAX, EDX 9814 // bz newMBB 9815 // result in out1, out2 9816 // fallthrough -->nextMBB 9817 9818 const TargetRegisterClass *RC = X86::GR32RegisterClass; 9819 const unsigned LoadOpc = X86::MOV32rm; 9820 const unsigned NotOpc = X86::NOT32r; 9821 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9822 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9823 MachineFunction::iterator MBBIter = MBB; 9824 ++MBBIter; 9825 9826 /// First build the CFG 9827 MachineFunction *F = MBB->getParent(); 9828 MachineBasicBlock *thisMBB = MBB; 9829 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9830 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9831 F->insert(MBBIter, newMBB); 9832 F->insert(MBBIter, nextMBB); 9833 9834 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9835 nextMBB->splice(nextMBB->begin(), thisMBB, 9836 llvm::next(MachineBasicBlock::iterator(bInstr)), 9837 thisMBB->end()); 9838 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9839 9840 // Update thisMBB to fall through to newMBB 9841 thisMBB->addSuccessor(newMBB); 9842 9843 // newMBB jumps to itself and fall through to nextMBB 9844 newMBB->addSuccessor(nextMBB); 9845 newMBB->addSuccessor(newMBB); 9846 9847 DebugLoc dl = bInstr->getDebugLoc(); 9848 // Insert instructions into newMBB based on incoming instruction 9849 // There are 8 "real" operands plus 9 implicit def/uses, ignored here. 9850 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && 9851 "unexpected number of operands"); 9852 MachineOperand& dest1Oper = bInstr->getOperand(0); 9853 MachineOperand& dest2Oper = bInstr->getOperand(1); 9854 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 9855 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { 9856 argOpers[i] = &bInstr->getOperand(i+2); 9857 9858 // We use some of the operands multiple times, so conservatively just 9859 // clear any kill flags that might be present. 9860 if (argOpers[i]->isReg() && argOpers[i]->isUse()) 9861 argOpers[i]->setIsKill(false); 9862 } 9863 9864 // x86 address has 5 operands: base, index, scale, displacement, and segment. 9865 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 9866 9867 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 9868 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); 9869 for (int i=0; i <= lastAddrIndx; ++i) 9870 (*MIB).addOperand(*argOpers[i]); 9871 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 9872 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); 9873 // add 4 to displacement. 9874 for (int i=0; i <= lastAddrIndx-2; ++i) 9875 (*MIB).addOperand(*argOpers[i]); 9876 MachineOperand newOp3 = *(argOpers[3]); 9877 if (newOp3.isImm()) 9878 newOp3.setImm(newOp3.getImm()+4); 9879 else 9880 newOp3.setOffset(newOp3.getOffset()+4); 9881 (*MIB).addOperand(newOp3); 9882 (*MIB).addOperand(*argOpers[lastAddrIndx]); 9883 9884 // t3/4 are defined later, at the bottom of the loop 9885 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 9886 unsigned t4 = F->getRegInfo().createVirtualRegister(RC); 9887 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) 9888 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); 9889 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) 9890 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); 9891 9892 // The subsequent operations should be using the destination registers of 9893 //the PHI instructions. 9894 if (invSrc) { 9895 t1 = F->getRegInfo().createVirtualRegister(RC); 9896 t2 = F->getRegInfo().createVirtualRegister(RC); 9897 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg()); 9898 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg()); 9899 } else { 9900 t1 = dest1Oper.getReg(); 9901 t2 = dest2Oper.getReg(); 9902 } 9903 9904 int valArgIndx = lastAddrIndx + 1; 9905 assert((argOpers[valArgIndx]->isReg() || 9906 argOpers[valArgIndx]->isImm()) && 9907 "invalid operand"); 9908 unsigned t5 = F->getRegInfo().createVirtualRegister(RC); 9909 unsigned t6 = F->getRegInfo().createVirtualRegister(RC); 9910 if (argOpers[valArgIndx]->isReg()) 9911 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); 9912 else 9913 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); 9914 if (regOpcL != X86::MOV32rr) 9915 MIB.addReg(t1); 9916 (*MIB).addOperand(*argOpers[valArgIndx]); 9917 assert(argOpers[valArgIndx + 1]->isReg() == 9918 argOpers[valArgIndx]->isReg()); 9919 assert(argOpers[valArgIndx + 1]->isImm() == 9920 argOpers[valArgIndx]->isImm()); 9921 if (argOpers[valArgIndx + 1]->isReg()) 9922 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); 9923 else 9924 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); 9925 if (regOpcH != X86::MOV32rr) 9926 MIB.addReg(t2); 9927 (*MIB).addOperand(*argOpers[valArgIndx + 1]); 9928 9929 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 9930 MIB.addReg(t1); 9931 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); 9932 MIB.addReg(t2); 9933 9934 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); 9935 MIB.addReg(t5); 9936 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); 9937 MIB.addReg(t6); 9938 9939 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); 9940 for (int i=0; i <= lastAddrIndx; ++i) 9941 (*MIB).addOperand(*argOpers[i]); 9942 9943 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 9944 (*MIB).setMemRefs(bInstr->memoperands_begin(), 9945 bInstr->memoperands_end()); 9946 9947 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); 9948 MIB.addReg(X86::EAX); 9949 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); 9950 MIB.addReg(X86::EDX); 9951 9952 // insert branch 9953 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 9954 9955 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 9956 return nextMBB; 9957} 9958 9959// private utility function 9960MachineBasicBlock * 9961X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 9962 MachineBasicBlock *MBB, 9963 unsigned cmovOpc) const { 9964 // For the atomic min/max operator, we generate 9965 // thisMBB: 9966 // newMBB: 9967 // ld t1 = [min/max.addr] 9968 // mov t2 = [min/max.val] 9969 // cmp t1, t2 9970 // cmov[cond] t2 = t1 9971 // mov EAX = t1 9972 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 9973 // bz newMBB 9974 // fallthrough -->nextMBB 9975 // 9976 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 9977 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 9978 MachineFunction::iterator MBBIter = MBB; 9979 ++MBBIter; 9980 9981 /// First build the CFG 9982 MachineFunction *F = MBB->getParent(); 9983 MachineBasicBlock *thisMBB = MBB; 9984 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 9985 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 9986 F->insert(MBBIter, newMBB); 9987 F->insert(MBBIter, nextMBB); 9988 9989 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 9990 nextMBB->splice(nextMBB->begin(), thisMBB, 9991 llvm::next(MachineBasicBlock::iterator(mInstr)), 9992 thisMBB->end()); 9993 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 9994 9995 // Update thisMBB to fall through to newMBB 9996 thisMBB->addSuccessor(newMBB); 9997 9998 // newMBB jumps to newMBB and fall through to nextMBB 9999 newMBB->addSuccessor(nextMBB); 10000 newMBB->addSuccessor(newMBB); 10001 10002 DebugLoc dl = mInstr->getDebugLoc(); 10003 // Insert instructions into newMBB based on incoming instruction 10004 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && 10005 "unexpected number of operands"); 10006 MachineOperand& destOper = mInstr->getOperand(0); 10007 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 10008 int numArgs = mInstr->getNumOperands() - 1; 10009 for (int i=0; i < numArgs; ++i) 10010 argOpers[i] = &mInstr->getOperand(i+1); 10011 10012 // x86 address has 4 operands: base, index, scale, and displacement 10013 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 10014 int valArgIndx = lastAddrIndx + 1; 10015 10016 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 10017 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); 10018 for (int i=0; i <= lastAddrIndx; ++i) 10019 (*MIB).addOperand(*argOpers[i]); 10020 10021 // We only support register and immediate values 10022 assert((argOpers[valArgIndx]->isReg() || 10023 argOpers[valArgIndx]->isImm()) && 10024 "invalid operand"); 10025 10026 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 10027 if (argOpers[valArgIndx]->isReg()) 10028 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); 10029 else 10030 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); 10031 (*MIB).addOperand(*argOpers[valArgIndx]); 10032 10033 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 10034 MIB.addReg(t1); 10035 10036 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); 10037 MIB.addReg(t1); 10038 MIB.addReg(t2); 10039 10040 // Generate movc 10041 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 10042 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); 10043 MIB.addReg(t2); 10044 MIB.addReg(t1); 10045 10046 // Cmp and exchange if none has modified the memory location 10047 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); 10048 for (int i=0; i <= lastAddrIndx; ++i) 10049 (*MIB).addOperand(*argOpers[i]); 10050 MIB.addReg(t3); 10051 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 10052 (*MIB).setMemRefs(mInstr->memoperands_begin(), 10053 mInstr->memoperands_end()); 10054 10055 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 10056 MIB.addReg(X86::EAX); 10057 10058 // insert branch 10059 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 10060 10061 mInstr->eraseFromParent(); // The pseudo instruction is gone now. 10062 return nextMBB; 10063} 10064 10065// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 10066// or XMM0_V32I8 in AVX all of this code can be replaced with that 10067// in the .td file. 10068MachineBasicBlock * 10069X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 10070 unsigned numArgs, bool memArg) const { 10071 assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) && 10072 "Target must have SSE4.2 or AVX features enabled"); 10073 10074 DebugLoc dl = MI->getDebugLoc(); 10075 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10076 unsigned Opc; 10077 if (!Subtarget->hasAVX()) { 10078 if (memArg) 10079 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 10080 else 10081 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 10082 } else { 10083 if (memArg) 10084 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 10085 else 10086 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 10087 } 10088 10089 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 10090 for (unsigned i = 0; i < numArgs; ++i) { 10091 MachineOperand &Op = MI->getOperand(i+1); 10092 if (!(Op.isReg() && Op.isImplicit())) 10093 MIB.addOperand(Op); 10094 } 10095 BuildMI(*BB, MI, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg()) 10096 .addReg(X86::XMM0); 10097 10098 MI->eraseFromParent(); 10099 return BB; 10100} 10101 10102MachineBasicBlock * 10103X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 10104 DebugLoc dl = MI->getDebugLoc(); 10105 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10106 10107 // Address into RAX/EAX, other two args into ECX, EDX. 10108 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 10109 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 10110 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 10111 for (int i = 0; i < X86::AddrNumOperands; ++i) 10112 MIB.addOperand(MI->getOperand(i)); 10113 10114 unsigned ValOps = X86::AddrNumOperands; 10115 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 10116 .addReg(MI->getOperand(ValOps).getReg()); 10117 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 10118 .addReg(MI->getOperand(ValOps+1).getReg()); 10119 10120 // The instruction doesn't actually take any operands though. 10121 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 10122 10123 MI->eraseFromParent(); // The pseudo is gone now. 10124 return BB; 10125} 10126 10127MachineBasicBlock * 10128X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const { 10129 DebugLoc dl = MI->getDebugLoc(); 10130 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10131 10132 // First arg in ECX, the second in EAX. 10133 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 10134 .addReg(MI->getOperand(0).getReg()); 10135 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX) 10136 .addReg(MI->getOperand(1).getReg()); 10137 10138 // The instruction doesn't actually take any operands though. 10139 BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr)); 10140 10141 MI->eraseFromParent(); // The pseudo is gone now. 10142 return BB; 10143} 10144 10145MachineBasicBlock * 10146X86TargetLowering::EmitVAARG64WithCustomInserter( 10147 MachineInstr *MI, 10148 MachineBasicBlock *MBB) const { 10149 // Emit va_arg instruction on X86-64. 10150 10151 // Operands to this pseudo-instruction: 10152 // 0 ) Output : destination address (reg) 10153 // 1-5) Input : va_list address (addr, i64mem) 10154 // 6 ) ArgSize : Size (in bytes) of vararg type 10155 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 10156 // 8 ) Align : Alignment of type 10157 // 9 ) EFLAGS (implicit-def) 10158 10159 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 10160 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 10161 10162 unsigned DestReg = MI->getOperand(0).getReg(); 10163 MachineOperand &Base = MI->getOperand(1); 10164 MachineOperand &Scale = MI->getOperand(2); 10165 MachineOperand &Index = MI->getOperand(3); 10166 MachineOperand &Disp = MI->getOperand(4); 10167 MachineOperand &Segment = MI->getOperand(5); 10168 unsigned ArgSize = MI->getOperand(6).getImm(); 10169 unsigned ArgMode = MI->getOperand(7).getImm(); 10170 unsigned Align = MI->getOperand(8).getImm(); 10171 10172 // Memory Reference 10173 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 10174 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 10175 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 10176 10177 // Machine Information 10178 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10179 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 10180 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 10181 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 10182 DebugLoc DL = MI->getDebugLoc(); 10183 10184 // struct va_list { 10185 // i32 gp_offset 10186 // i32 fp_offset 10187 // i64 overflow_area (address) 10188 // i64 reg_save_area (address) 10189 // } 10190 // sizeof(va_list) = 24 10191 // alignment(va_list) = 8 10192 10193 unsigned TotalNumIntRegs = 6; 10194 unsigned TotalNumXMMRegs = 8; 10195 bool UseGPOffset = (ArgMode == 1); 10196 bool UseFPOffset = (ArgMode == 2); 10197 unsigned MaxOffset = TotalNumIntRegs * 8 + 10198 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 10199 10200 /* Align ArgSize to a multiple of 8 */ 10201 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 10202 bool NeedsAlign = (Align > 8); 10203 10204 MachineBasicBlock *thisMBB = MBB; 10205 MachineBasicBlock *overflowMBB; 10206 MachineBasicBlock *offsetMBB; 10207 MachineBasicBlock *endMBB; 10208 10209 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 10210 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 10211 unsigned OffsetReg = 0; 10212 10213 if (!UseGPOffset && !UseFPOffset) { 10214 // If we only pull from the overflow region, we don't create a branch. 10215 // We don't need to alter control flow. 10216 OffsetDestReg = 0; // unused 10217 OverflowDestReg = DestReg; 10218 10219 offsetMBB = NULL; 10220 overflowMBB = thisMBB; 10221 endMBB = thisMBB; 10222 } else { 10223 // First emit code to check if gp_offset (or fp_offset) is below the bound. 10224 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 10225 // If not, pull from overflow_area. (branch to overflowMBB) 10226 // 10227 // thisMBB 10228 // | . 10229 // | . 10230 // offsetMBB overflowMBB 10231 // | . 10232 // | . 10233 // endMBB 10234 10235 // Registers for the PHI in endMBB 10236 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 10237 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 10238 10239 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 10240 MachineFunction *MF = MBB->getParent(); 10241 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 10242 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 10243 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 10244 10245 MachineFunction::iterator MBBIter = MBB; 10246 ++MBBIter; 10247 10248 // Insert the new basic blocks 10249 MF->insert(MBBIter, offsetMBB); 10250 MF->insert(MBBIter, overflowMBB); 10251 MF->insert(MBBIter, endMBB); 10252 10253 // Transfer the remainder of MBB and its successor edges to endMBB. 10254 endMBB->splice(endMBB->begin(), thisMBB, 10255 llvm::next(MachineBasicBlock::iterator(MI)), 10256 thisMBB->end()); 10257 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 10258 10259 // Make offsetMBB and overflowMBB successors of thisMBB 10260 thisMBB->addSuccessor(offsetMBB); 10261 thisMBB->addSuccessor(overflowMBB); 10262 10263 // endMBB is a successor of both offsetMBB and overflowMBB 10264 offsetMBB->addSuccessor(endMBB); 10265 overflowMBB->addSuccessor(endMBB); 10266 10267 // Load the offset value into a register 10268 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 10269 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 10270 .addOperand(Base) 10271 .addOperand(Scale) 10272 .addOperand(Index) 10273 .addDisp(Disp, UseFPOffset ? 4 : 0) 10274 .addOperand(Segment) 10275 .setMemRefs(MMOBegin, MMOEnd); 10276 10277 // Check if there is enough room left to pull this argument. 10278 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 10279 .addReg(OffsetReg) 10280 .addImm(MaxOffset + 8 - ArgSizeA8); 10281 10282 // Branch to "overflowMBB" if offset >= max 10283 // Fall through to "offsetMBB" otherwise 10284 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 10285 .addMBB(overflowMBB); 10286 } 10287 10288 // In offsetMBB, emit code to use the reg_save_area. 10289 if (offsetMBB) { 10290 assert(OffsetReg != 0); 10291 10292 // Read the reg_save_area address. 10293 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 10294 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 10295 .addOperand(Base) 10296 .addOperand(Scale) 10297 .addOperand(Index) 10298 .addDisp(Disp, 16) 10299 .addOperand(Segment) 10300 .setMemRefs(MMOBegin, MMOEnd); 10301 10302 // Zero-extend the offset 10303 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 10304 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 10305 .addImm(0) 10306 .addReg(OffsetReg) 10307 .addImm(X86::sub_32bit); 10308 10309 // Add the offset to the reg_save_area to get the final address. 10310 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 10311 .addReg(OffsetReg64) 10312 .addReg(RegSaveReg); 10313 10314 // Compute the offset for the next argument 10315 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 10316 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 10317 .addReg(OffsetReg) 10318 .addImm(UseFPOffset ? 16 : 8); 10319 10320 // Store it back into the va_list. 10321 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 10322 .addOperand(Base) 10323 .addOperand(Scale) 10324 .addOperand(Index) 10325 .addDisp(Disp, UseFPOffset ? 4 : 0) 10326 .addOperand(Segment) 10327 .addReg(NextOffsetReg) 10328 .setMemRefs(MMOBegin, MMOEnd); 10329 10330 // Jump to endMBB 10331 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 10332 .addMBB(endMBB); 10333 } 10334 10335 // 10336 // Emit code to use overflow area 10337 // 10338 10339 // Load the overflow_area address into a register. 10340 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 10341 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 10342 .addOperand(Base) 10343 .addOperand(Scale) 10344 .addOperand(Index) 10345 .addDisp(Disp, 8) 10346 .addOperand(Segment) 10347 .setMemRefs(MMOBegin, MMOEnd); 10348 10349 // If we need to align it, do so. Otherwise, just copy the address 10350 // to OverflowDestReg. 10351 if (NeedsAlign) { 10352 // Align the overflow address 10353 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 10354 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 10355 10356 // aligned_addr = (addr + (align-1)) & ~(align-1) 10357 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 10358 .addReg(OverflowAddrReg) 10359 .addImm(Align-1); 10360 10361 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 10362 .addReg(TmpReg) 10363 .addImm(~(uint64_t)(Align-1)); 10364 } else { 10365 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 10366 .addReg(OverflowAddrReg); 10367 } 10368 10369 // Compute the next overflow address after this argument. 10370 // (the overflow address should be kept 8-byte aligned) 10371 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 10372 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 10373 .addReg(OverflowDestReg) 10374 .addImm(ArgSizeA8); 10375 10376 // Store the new overflow address. 10377 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 10378 .addOperand(Base) 10379 .addOperand(Scale) 10380 .addOperand(Index) 10381 .addDisp(Disp, 8) 10382 .addOperand(Segment) 10383 .addReg(NextAddrReg) 10384 .setMemRefs(MMOBegin, MMOEnd); 10385 10386 // If we branched, emit the PHI to the front of endMBB. 10387 if (offsetMBB) { 10388 BuildMI(*endMBB, endMBB->begin(), DL, 10389 TII->get(X86::PHI), DestReg) 10390 .addReg(OffsetDestReg).addMBB(offsetMBB) 10391 .addReg(OverflowDestReg).addMBB(overflowMBB); 10392 } 10393 10394 // Erase the pseudo instruction 10395 MI->eraseFromParent(); 10396 10397 return endMBB; 10398} 10399 10400MachineBasicBlock * 10401X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 10402 MachineInstr *MI, 10403 MachineBasicBlock *MBB) const { 10404 // Emit code to save XMM registers to the stack. The ABI says that the 10405 // number of registers to save is given in %al, so it's theoretically 10406 // possible to do an indirect jump trick to avoid saving all of them, 10407 // however this code takes a simpler approach and just executes all 10408 // of the stores if %al is non-zero. It's less code, and it's probably 10409 // easier on the hardware branch predictor, and stores aren't all that 10410 // expensive anyway. 10411 10412 // Create the new basic blocks. One block contains all the XMM stores, 10413 // and one block is the final destination regardless of whether any 10414 // stores were performed. 10415 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 10416 MachineFunction *F = MBB->getParent(); 10417 MachineFunction::iterator MBBIter = MBB; 10418 ++MBBIter; 10419 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 10420 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 10421 F->insert(MBBIter, XMMSaveMBB); 10422 F->insert(MBBIter, EndMBB); 10423 10424 // Transfer the remainder of MBB and its successor edges to EndMBB. 10425 EndMBB->splice(EndMBB->begin(), MBB, 10426 llvm::next(MachineBasicBlock::iterator(MI)), 10427 MBB->end()); 10428 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 10429 10430 // The original block will now fall through to the XMM save block. 10431 MBB->addSuccessor(XMMSaveMBB); 10432 // The XMMSaveMBB will fall through to the end block. 10433 XMMSaveMBB->addSuccessor(EndMBB); 10434 10435 // Now add the instructions. 10436 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10437 DebugLoc DL = MI->getDebugLoc(); 10438 10439 unsigned CountReg = MI->getOperand(0).getReg(); 10440 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 10441 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 10442 10443 if (!Subtarget->isTargetWin64()) { 10444 // If %al is 0, branch around the XMM save block. 10445 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 10446 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 10447 MBB->addSuccessor(EndMBB); 10448 } 10449 10450 // In the XMM save block, save all the XMM argument registers. 10451 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 10452 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 10453 MachineMemOperand *MMO = 10454 F->getMachineMemOperand( 10455 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 10456 MachineMemOperand::MOStore, 10457 /*Size=*/16, /*Align=*/16); 10458 BuildMI(XMMSaveMBB, DL, TII->get(X86::MOVAPSmr)) 10459 .addFrameIndex(RegSaveFrameIndex) 10460 .addImm(/*Scale=*/1) 10461 .addReg(/*IndexReg=*/0) 10462 .addImm(/*Disp=*/Offset) 10463 .addReg(/*Segment=*/0) 10464 .addReg(MI->getOperand(i).getReg()) 10465 .addMemOperand(MMO); 10466 } 10467 10468 MI->eraseFromParent(); // The pseudo instruction is gone now. 10469 10470 return EndMBB; 10471} 10472 10473MachineBasicBlock * 10474X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 10475 MachineBasicBlock *BB) const { 10476 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10477 DebugLoc DL = MI->getDebugLoc(); 10478 10479 // To "insert" a SELECT_CC instruction, we actually have to insert the 10480 // diamond control-flow pattern. The incoming instruction knows the 10481 // destination vreg to set, the condition code register to branch on, the 10482 // true/false values to select between, and a branch opcode to use. 10483 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10484 MachineFunction::iterator It = BB; 10485 ++It; 10486 10487 // thisMBB: 10488 // ... 10489 // TrueVal = ... 10490 // cmpTY ccX, r1, r2 10491 // bCC copy1MBB 10492 // fallthrough --> copy0MBB 10493 MachineBasicBlock *thisMBB = BB; 10494 MachineFunction *F = BB->getParent(); 10495 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 10496 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 10497 F->insert(It, copy0MBB); 10498 F->insert(It, sinkMBB); 10499 10500 // If the EFLAGS register isn't dead in the terminator, then claim that it's 10501 // live into the sink and copy blocks. 10502 const MachineFunction *MF = BB->getParent(); 10503 const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo(); 10504 BitVector ReservedRegs = TRI->getReservedRegs(*MF); 10505 10506 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { 10507 const MachineOperand &MO = MI->getOperand(I); 10508 if (!MO.isReg() || !MO.isUse() || MO.isKill()) continue; 10509 unsigned Reg = MO.getReg(); 10510 if (Reg != X86::EFLAGS) continue; 10511 copy0MBB->addLiveIn(Reg); 10512 sinkMBB->addLiveIn(Reg); 10513 } 10514 10515 // Transfer the remainder of BB and its successor edges to sinkMBB. 10516 sinkMBB->splice(sinkMBB->begin(), BB, 10517 llvm::next(MachineBasicBlock::iterator(MI)), 10518 BB->end()); 10519 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 10520 10521 // Add the true and fallthrough blocks as its successors. 10522 BB->addSuccessor(copy0MBB); 10523 BB->addSuccessor(sinkMBB); 10524 10525 // Create the conditional branch instruction. 10526 unsigned Opc = 10527 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 10528 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 10529 10530 // copy0MBB: 10531 // %FalseValue = ... 10532 // # fallthrough to sinkMBB 10533 copy0MBB->addSuccessor(sinkMBB); 10534 10535 // sinkMBB: 10536 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 10537 // ... 10538 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 10539 TII->get(X86::PHI), MI->getOperand(0).getReg()) 10540 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 10541 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 10542 10543 MI->eraseFromParent(); // The pseudo instruction is gone now. 10544 return sinkMBB; 10545} 10546 10547MachineBasicBlock * 10548X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 10549 MachineBasicBlock *BB) const { 10550 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10551 DebugLoc DL = MI->getDebugLoc(); 10552 10553 assert(!Subtarget->isTargetEnvMacho()); 10554 10555 // The lowering is pretty easy: we're just emitting the call to _alloca. The 10556 // non-trivial part is impdef of ESP. 10557 10558 if (Subtarget->isTargetWin64()) { 10559 if (Subtarget->isTargetCygMing()) { 10560 // ___chkstk(Mingw64): 10561 // Clobbers R10, R11, RAX and EFLAGS. 10562 // Updates RSP. 10563 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 10564 .addExternalSymbol("___chkstk") 10565 .addReg(X86::RAX, RegState::Implicit) 10566 .addReg(X86::RSP, RegState::Implicit) 10567 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 10568 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 10569 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 10570 } else { 10571 // __chkstk(MSVCRT): does not update stack pointer. 10572 // Clobbers R10, R11 and EFLAGS. 10573 // FIXME: RAX(allocated size) might be reused and not killed. 10574 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 10575 .addExternalSymbol("__chkstk") 10576 .addReg(X86::RAX, RegState::Implicit) 10577 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 10578 // RAX has the offset to subtracted from RSP. 10579 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 10580 .addReg(X86::RSP) 10581 .addReg(X86::RAX); 10582 } 10583 } else { 10584 const char *StackProbeSymbol = 10585 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 10586 10587 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 10588 .addExternalSymbol(StackProbeSymbol) 10589 .addReg(X86::EAX, RegState::Implicit) 10590 .addReg(X86::ESP, RegState::Implicit) 10591 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 10592 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 10593 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 10594 } 10595 10596 MI->eraseFromParent(); // The pseudo instruction is gone now. 10597 return BB; 10598} 10599 10600MachineBasicBlock * 10601X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 10602 MachineBasicBlock *BB) const { 10603 // This is pretty easy. We're taking the value that we received from 10604 // our load from the relocation, sticking it in either RDI (x86-64) 10605 // or EAX and doing an indirect call. The return value will then 10606 // be in the normal return register. 10607 const X86InstrInfo *TII 10608 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 10609 DebugLoc DL = MI->getDebugLoc(); 10610 MachineFunction *F = BB->getParent(); 10611 10612 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 10613 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 10614 10615 if (Subtarget->is64Bit()) { 10616 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 10617 TII->get(X86::MOV64rm), X86::RDI) 10618 .addReg(X86::RIP) 10619 .addImm(0).addReg(0) 10620 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 10621 MI->getOperand(3).getTargetFlags()) 10622 .addReg(0); 10623 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 10624 addDirectMem(MIB, X86::RDI); 10625 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 10626 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 10627 TII->get(X86::MOV32rm), X86::EAX) 10628 .addReg(0) 10629 .addImm(0).addReg(0) 10630 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 10631 MI->getOperand(3).getTargetFlags()) 10632 .addReg(0); 10633 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 10634 addDirectMem(MIB, X86::EAX); 10635 } else { 10636 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 10637 TII->get(X86::MOV32rm), X86::EAX) 10638 .addReg(TII->getGlobalBaseReg(F)) 10639 .addImm(0).addReg(0) 10640 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 10641 MI->getOperand(3).getTargetFlags()) 10642 .addReg(0); 10643 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 10644 addDirectMem(MIB, X86::EAX); 10645 } 10646 10647 MI->eraseFromParent(); // The pseudo instruction is gone now. 10648 return BB; 10649} 10650 10651MachineBasicBlock * 10652X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 10653 MachineBasicBlock *BB) const { 10654 switch (MI->getOpcode()) { 10655 default: assert(false && "Unexpected instr type to insert"); 10656 case X86::TAILJMPd64: 10657 case X86::TAILJMPr64: 10658 case X86::TAILJMPm64: 10659 assert(!"TAILJMP64 would not be touched here."); 10660 case X86::TCRETURNdi64: 10661 case X86::TCRETURNri64: 10662 case X86::TCRETURNmi64: 10663 // Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset. 10664 // On AMD64, additional defs should be added before register allocation. 10665 if (!Subtarget->isTargetWin64()) { 10666 MI->addRegisterDefined(X86::RSI); 10667 MI->addRegisterDefined(X86::RDI); 10668 MI->addRegisterDefined(X86::XMM6); 10669 MI->addRegisterDefined(X86::XMM7); 10670 MI->addRegisterDefined(X86::XMM8); 10671 MI->addRegisterDefined(X86::XMM9); 10672 MI->addRegisterDefined(X86::XMM10); 10673 MI->addRegisterDefined(X86::XMM11); 10674 MI->addRegisterDefined(X86::XMM12); 10675 MI->addRegisterDefined(X86::XMM13); 10676 MI->addRegisterDefined(X86::XMM14); 10677 MI->addRegisterDefined(X86::XMM15); 10678 } 10679 return BB; 10680 case X86::WIN_ALLOCA: 10681 return EmitLoweredWinAlloca(MI, BB); 10682 case X86::TLSCall_32: 10683 case X86::TLSCall_64: 10684 return EmitLoweredTLSCall(MI, BB); 10685 case X86::CMOV_GR8: 10686 case X86::CMOV_FR32: 10687 case X86::CMOV_FR64: 10688 case X86::CMOV_V4F32: 10689 case X86::CMOV_V2F64: 10690 case X86::CMOV_V2I64: 10691 case X86::CMOV_GR16: 10692 case X86::CMOV_GR32: 10693 case X86::CMOV_RFP32: 10694 case X86::CMOV_RFP64: 10695 case X86::CMOV_RFP80: 10696 return EmitLoweredSelect(MI, BB); 10697 10698 case X86::FP32_TO_INT16_IN_MEM: 10699 case X86::FP32_TO_INT32_IN_MEM: 10700 case X86::FP32_TO_INT64_IN_MEM: 10701 case X86::FP64_TO_INT16_IN_MEM: 10702 case X86::FP64_TO_INT32_IN_MEM: 10703 case X86::FP64_TO_INT64_IN_MEM: 10704 case X86::FP80_TO_INT16_IN_MEM: 10705 case X86::FP80_TO_INT32_IN_MEM: 10706 case X86::FP80_TO_INT64_IN_MEM: { 10707 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 10708 DebugLoc DL = MI->getDebugLoc(); 10709 10710 // Change the floating point control register to use "round towards zero" 10711 // mode when truncating to an integer value. 10712 MachineFunction *F = BB->getParent(); 10713 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 10714 addFrameReference(BuildMI(*BB, MI, DL, 10715 TII->get(X86::FNSTCW16m)), CWFrameIdx); 10716 10717 // Load the old value of the high byte of the control word... 10718 unsigned OldCW = 10719 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 10720 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 10721 CWFrameIdx); 10722 10723 // Set the high part to be round to zero... 10724 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 10725 .addImm(0xC7F); 10726 10727 // Reload the modified control word now... 10728 addFrameReference(BuildMI(*BB, MI, DL, 10729 TII->get(X86::FLDCW16m)), CWFrameIdx); 10730 10731 // Restore the memory image of control word to original value 10732 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 10733 .addReg(OldCW); 10734 10735 // Get the X86 opcode to use. 10736 unsigned Opc; 10737 switch (MI->getOpcode()) { 10738 default: llvm_unreachable("illegal opcode!"); 10739 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 10740 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 10741 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 10742 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 10743 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 10744 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 10745 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 10746 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 10747 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 10748 } 10749 10750 X86AddressMode AM; 10751 MachineOperand &Op = MI->getOperand(0); 10752 if (Op.isReg()) { 10753 AM.BaseType = X86AddressMode::RegBase; 10754 AM.Base.Reg = Op.getReg(); 10755 } else { 10756 AM.BaseType = X86AddressMode::FrameIndexBase; 10757 AM.Base.FrameIndex = Op.getIndex(); 10758 } 10759 Op = MI->getOperand(1); 10760 if (Op.isImm()) 10761 AM.Scale = Op.getImm(); 10762 Op = MI->getOperand(2); 10763 if (Op.isImm()) 10764 AM.IndexReg = Op.getImm(); 10765 Op = MI->getOperand(3); 10766 if (Op.isGlobal()) { 10767 AM.GV = Op.getGlobal(); 10768 } else { 10769 AM.Disp = Op.getImm(); 10770 } 10771 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 10772 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 10773 10774 // Reload the original control word now. 10775 addFrameReference(BuildMI(*BB, MI, DL, 10776 TII->get(X86::FLDCW16m)), CWFrameIdx); 10777 10778 MI->eraseFromParent(); // The pseudo instruction is gone now. 10779 return BB; 10780 } 10781 // String/text processing lowering. 10782 case X86::PCMPISTRM128REG: 10783 case X86::VPCMPISTRM128REG: 10784 return EmitPCMP(MI, BB, 3, false /* in-mem */); 10785 case X86::PCMPISTRM128MEM: 10786 case X86::VPCMPISTRM128MEM: 10787 return EmitPCMP(MI, BB, 3, true /* in-mem */); 10788 case X86::PCMPESTRM128REG: 10789 case X86::VPCMPESTRM128REG: 10790 return EmitPCMP(MI, BB, 5, false /* in mem */); 10791 case X86::PCMPESTRM128MEM: 10792 case X86::VPCMPESTRM128MEM: 10793 return EmitPCMP(MI, BB, 5, true /* in mem */); 10794 10795 // Thread synchronization. 10796 case X86::MONITOR: 10797 return EmitMonitor(MI, BB); 10798 case X86::MWAIT: 10799 return EmitMwait(MI, BB); 10800 10801 // Atomic Lowering. 10802 case X86::ATOMAND32: 10803 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 10804 X86::AND32ri, X86::MOV32rm, 10805 X86::LCMPXCHG32, 10806 X86::NOT32r, X86::EAX, 10807 X86::GR32RegisterClass); 10808 case X86::ATOMOR32: 10809 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 10810 X86::OR32ri, X86::MOV32rm, 10811 X86::LCMPXCHG32, 10812 X86::NOT32r, X86::EAX, 10813 X86::GR32RegisterClass); 10814 case X86::ATOMXOR32: 10815 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 10816 X86::XOR32ri, X86::MOV32rm, 10817 X86::LCMPXCHG32, 10818 X86::NOT32r, X86::EAX, 10819 X86::GR32RegisterClass); 10820 case X86::ATOMNAND32: 10821 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 10822 X86::AND32ri, X86::MOV32rm, 10823 X86::LCMPXCHG32, 10824 X86::NOT32r, X86::EAX, 10825 X86::GR32RegisterClass, true); 10826 case X86::ATOMMIN32: 10827 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 10828 case X86::ATOMMAX32: 10829 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 10830 case X86::ATOMUMIN32: 10831 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 10832 case X86::ATOMUMAX32: 10833 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 10834 10835 case X86::ATOMAND16: 10836 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 10837 X86::AND16ri, X86::MOV16rm, 10838 X86::LCMPXCHG16, 10839 X86::NOT16r, X86::AX, 10840 X86::GR16RegisterClass); 10841 case X86::ATOMOR16: 10842 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 10843 X86::OR16ri, X86::MOV16rm, 10844 X86::LCMPXCHG16, 10845 X86::NOT16r, X86::AX, 10846 X86::GR16RegisterClass); 10847 case X86::ATOMXOR16: 10848 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, 10849 X86::XOR16ri, X86::MOV16rm, 10850 X86::LCMPXCHG16, 10851 X86::NOT16r, X86::AX, 10852 X86::GR16RegisterClass); 10853 case X86::ATOMNAND16: 10854 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 10855 X86::AND16ri, X86::MOV16rm, 10856 X86::LCMPXCHG16, 10857 X86::NOT16r, X86::AX, 10858 X86::GR16RegisterClass, true); 10859 case X86::ATOMMIN16: 10860 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); 10861 case X86::ATOMMAX16: 10862 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); 10863 case X86::ATOMUMIN16: 10864 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); 10865 case X86::ATOMUMAX16: 10866 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); 10867 10868 case X86::ATOMAND8: 10869 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 10870 X86::AND8ri, X86::MOV8rm, 10871 X86::LCMPXCHG8, 10872 X86::NOT8r, X86::AL, 10873 X86::GR8RegisterClass); 10874 case X86::ATOMOR8: 10875 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 10876 X86::OR8ri, X86::MOV8rm, 10877 X86::LCMPXCHG8, 10878 X86::NOT8r, X86::AL, 10879 X86::GR8RegisterClass); 10880 case X86::ATOMXOR8: 10881 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, 10882 X86::XOR8ri, X86::MOV8rm, 10883 X86::LCMPXCHG8, 10884 X86::NOT8r, X86::AL, 10885 X86::GR8RegisterClass); 10886 case X86::ATOMNAND8: 10887 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 10888 X86::AND8ri, X86::MOV8rm, 10889 X86::LCMPXCHG8, 10890 X86::NOT8r, X86::AL, 10891 X86::GR8RegisterClass, true); 10892 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. 10893 // This group is for 64-bit host. 10894 case X86::ATOMAND64: 10895 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 10896 X86::AND64ri32, X86::MOV64rm, 10897 X86::LCMPXCHG64, 10898 X86::NOT64r, X86::RAX, 10899 X86::GR64RegisterClass); 10900 case X86::ATOMOR64: 10901 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 10902 X86::OR64ri32, X86::MOV64rm, 10903 X86::LCMPXCHG64, 10904 X86::NOT64r, X86::RAX, 10905 X86::GR64RegisterClass); 10906 case X86::ATOMXOR64: 10907 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, 10908 X86::XOR64ri32, X86::MOV64rm, 10909 X86::LCMPXCHG64, 10910 X86::NOT64r, X86::RAX, 10911 X86::GR64RegisterClass); 10912 case X86::ATOMNAND64: 10913 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 10914 X86::AND64ri32, X86::MOV64rm, 10915 X86::LCMPXCHG64, 10916 X86::NOT64r, X86::RAX, 10917 X86::GR64RegisterClass, true); 10918 case X86::ATOMMIN64: 10919 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); 10920 case X86::ATOMMAX64: 10921 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); 10922 case X86::ATOMUMIN64: 10923 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); 10924 case X86::ATOMUMAX64: 10925 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); 10926 10927 // This group does 64-bit operations on a 32-bit host. 10928 case X86::ATOMAND6432: 10929 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10930 X86::AND32rr, X86::AND32rr, 10931 X86::AND32ri, X86::AND32ri, 10932 false); 10933 case X86::ATOMOR6432: 10934 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10935 X86::OR32rr, X86::OR32rr, 10936 X86::OR32ri, X86::OR32ri, 10937 false); 10938 case X86::ATOMXOR6432: 10939 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10940 X86::XOR32rr, X86::XOR32rr, 10941 X86::XOR32ri, X86::XOR32ri, 10942 false); 10943 case X86::ATOMNAND6432: 10944 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10945 X86::AND32rr, X86::AND32rr, 10946 X86::AND32ri, X86::AND32ri, 10947 true); 10948 case X86::ATOMADD6432: 10949 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10950 X86::ADD32rr, X86::ADC32rr, 10951 X86::ADD32ri, X86::ADC32ri, 10952 false); 10953 case X86::ATOMSUB6432: 10954 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10955 X86::SUB32rr, X86::SBB32rr, 10956 X86::SUB32ri, X86::SBB32ri, 10957 false); 10958 case X86::ATOMSWAP6432: 10959 return EmitAtomicBit6432WithCustomInserter(MI, BB, 10960 X86::MOV32rr, X86::MOV32rr, 10961 X86::MOV32ri, X86::MOV32ri, 10962 false); 10963 case X86::VASTART_SAVE_XMM_REGS: 10964 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 10965 10966 case X86::VAARG_64: 10967 return EmitVAARG64WithCustomInserter(MI, BB); 10968 } 10969} 10970 10971//===----------------------------------------------------------------------===// 10972// X86 Optimization Hooks 10973//===----------------------------------------------------------------------===// 10974 10975void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 10976 const APInt &Mask, 10977 APInt &KnownZero, 10978 APInt &KnownOne, 10979 const SelectionDAG &DAG, 10980 unsigned Depth) const { 10981 unsigned Opc = Op.getOpcode(); 10982 assert((Opc >= ISD::BUILTIN_OP_END || 10983 Opc == ISD::INTRINSIC_WO_CHAIN || 10984 Opc == ISD::INTRINSIC_W_CHAIN || 10985 Opc == ISD::INTRINSIC_VOID) && 10986 "Should use MaskedValueIsZero if you don't know whether Op" 10987 " is a target node!"); 10988 10989 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 10990 switch (Opc) { 10991 default: break; 10992 case X86ISD::ADD: 10993 case X86ISD::SUB: 10994 case X86ISD::ADC: 10995 case X86ISD::SBB: 10996 case X86ISD::SMUL: 10997 case X86ISD::UMUL: 10998 case X86ISD::INC: 10999 case X86ISD::DEC: 11000 case X86ISD::OR: 11001 case X86ISD::XOR: 11002 case X86ISD::AND: 11003 // These nodes' second result is a boolean. 11004 if (Op.getResNo() == 0) 11005 break; 11006 // Fallthrough 11007 case X86ISD::SETCC: 11008 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 11009 Mask.getBitWidth() - 1); 11010 break; 11011 } 11012} 11013 11014unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 11015 unsigned Depth) const { 11016 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 11017 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 11018 return Op.getValueType().getScalarType().getSizeInBits(); 11019 11020 // Fallback case. 11021 return 1; 11022} 11023 11024/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 11025/// node is a GlobalAddress + offset. 11026bool X86TargetLowering::isGAPlusOffset(SDNode *N, 11027 const GlobalValue* &GA, 11028 int64_t &Offset) const { 11029 if (N->getOpcode() == X86ISD::Wrapper) { 11030 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 11031 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 11032 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 11033 return true; 11034 } 11035 } 11036 return TargetLowering::isGAPlusOffset(N, GA, Offset); 11037} 11038 11039/// PerformShuffleCombine - Combine a vector_shuffle that is equal to 11040/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load 11041/// if the load addresses are consecutive, non-overlapping, and in the right 11042/// order. 11043static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 11044 TargetLowering::DAGCombinerInfo &DCI) { 11045 DebugLoc dl = N->getDebugLoc(); 11046 EVT VT = N->getValueType(0); 11047 11048 if (VT.getSizeInBits() != 128) 11049 return SDValue(); 11050 11051 // Don't create instructions with illegal types after legalize types has run. 11052 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11053 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 11054 return SDValue(); 11055 11056 SmallVector<SDValue, 16> Elts; 11057 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 11058 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 11059 11060 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 11061} 11062 11063/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 11064/// generation and convert it from being a bunch of shuffles and extracts 11065/// to a simple store and scalar loads to extract the elements. 11066static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 11067 const TargetLowering &TLI) { 11068 SDValue InputVector = N->getOperand(0); 11069 11070 // Only operate on vectors of 4 elements, where the alternative shuffling 11071 // gets to be more expensive. 11072 if (InputVector.getValueType() != MVT::v4i32) 11073 return SDValue(); 11074 11075 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 11076 // single use which is a sign-extend or zero-extend, and all elements are 11077 // used. 11078 SmallVector<SDNode *, 4> Uses; 11079 unsigned ExtractedElements = 0; 11080 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 11081 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 11082 if (UI.getUse().getResNo() != InputVector.getResNo()) 11083 return SDValue(); 11084 11085 SDNode *Extract = *UI; 11086 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11087 return SDValue(); 11088 11089 if (Extract->getValueType(0) != MVT::i32) 11090 return SDValue(); 11091 if (!Extract->hasOneUse()) 11092 return SDValue(); 11093 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 11094 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 11095 return SDValue(); 11096 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 11097 return SDValue(); 11098 11099 // Record which element was extracted. 11100 ExtractedElements |= 11101 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 11102 11103 Uses.push_back(Extract); 11104 } 11105 11106 // If not all the elements were used, this may not be worthwhile. 11107 if (ExtractedElements != 15) 11108 return SDValue(); 11109 11110 // Ok, we've now decided to do the transformation. 11111 DebugLoc dl = InputVector.getDebugLoc(); 11112 11113 // Store the value to a temporary stack slot. 11114 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 11115 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 11116 MachinePointerInfo(), false, false, 0); 11117 11118 // Replace each use (extract) with a load of the appropriate element. 11119 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 11120 UE = Uses.end(); UI != UE; ++UI) { 11121 SDNode *Extract = *UI; 11122 11123 // cOMpute the element's address. 11124 SDValue Idx = Extract->getOperand(1); 11125 unsigned EltSize = 11126 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 11127 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 11128 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 11129 11130 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 11131 StackPtr, OffsetVal); 11132 11133 // Load the scalar. 11134 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 11135 ScalarAddr, MachinePointerInfo(), 11136 false, false, 0); 11137 11138 // Replace the exact with the load. 11139 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 11140 } 11141 11142 // The replacement was made in place; don't return anything. 11143 return SDValue(); 11144} 11145 11146/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. 11147static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 11148 const X86Subtarget *Subtarget) { 11149 DebugLoc DL = N->getDebugLoc(); 11150 SDValue Cond = N->getOperand(0); 11151 // Get the LHS/RHS of the select. 11152 SDValue LHS = N->getOperand(1); 11153 SDValue RHS = N->getOperand(2); 11154 11155 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 11156 // instructions match the semantics of the common C idiom x<y?x:y but not 11157 // x<=y?x:y, because of how they handle negative zero (which can be 11158 // ignored in unsafe-math mode). 11159 if (Subtarget->hasSSE2() && 11160 (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) && 11161 Cond.getOpcode() == ISD::SETCC) { 11162 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 11163 11164 unsigned Opcode = 0; 11165 // Check for x CC y ? x : y. 11166 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 11167 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 11168 switch (CC) { 11169 default: break; 11170 case ISD::SETULT: 11171 // Converting this to a min would handle NaNs incorrectly, and swapping 11172 // the operands would cause it to handle comparisons between positive 11173 // and negative zero incorrectly. 11174 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 11175 if (!UnsafeFPMath && 11176 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 11177 break; 11178 std::swap(LHS, RHS); 11179 } 11180 Opcode = X86ISD::FMIN; 11181 break; 11182 case ISD::SETOLE: 11183 // Converting this to a min would handle comparisons between positive 11184 // and negative zero incorrectly. 11185 if (!UnsafeFPMath && 11186 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 11187 break; 11188 Opcode = X86ISD::FMIN; 11189 break; 11190 case ISD::SETULE: 11191 // Converting this to a min would handle both negative zeros and NaNs 11192 // incorrectly, but we can swap the operands to fix both. 11193 std::swap(LHS, RHS); 11194 case ISD::SETOLT: 11195 case ISD::SETLT: 11196 case ISD::SETLE: 11197 Opcode = X86ISD::FMIN; 11198 break; 11199 11200 case ISD::SETOGE: 11201 // Converting this to a max would handle comparisons between positive 11202 // and negative zero incorrectly. 11203 if (!UnsafeFPMath && 11204 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(LHS)) 11205 break; 11206 Opcode = X86ISD::FMAX; 11207 break; 11208 case ISD::SETUGT: 11209 // Converting this to a max would handle NaNs incorrectly, and swapping 11210 // the operands would cause it to handle comparisons between positive 11211 // and negative zero incorrectly. 11212 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 11213 if (!UnsafeFPMath && 11214 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 11215 break; 11216 std::swap(LHS, RHS); 11217 } 11218 Opcode = X86ISD::FMAX; 11219 break; 11220 case ISD::SETUGE: 11221 // Converting this to a max would handle both negative zeros and NaNs 11222 // incorrectly, but we can swap the operands to fix both. 11223 std::swap(LHS, RHS); 11224 case ISD::SETOGT: 11225 case ISD::SETGT: 11226 case ISD::SETGE: 11227 Opcode = X86ISD::FMAX; 11228 break; 11229 } 11230 // Check for x CC y ? y : x -- a min/max with reversed arms. 11231 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 11232 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 11233 switch (CC) { 11234 default: break; 11235 case ISD::SETOGE: 11236 // Converting this to a min would handle comparisons between positive 11237 // and negative zero incorrectly, and swapping the operands would 11238 // cause it to handle NaNs incorrectly. 11239 if (!UnsafeFPMath && 11240 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 11241 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 11242 break; 11243 std::swap(LHS, RHS); 11244 } 11245 Opcode = X86ISD::FMIN; 11246 break; 11247 case ISD::SETUGT: 11248 // Converting this to a min would handle NaNs incorrectly. 11249 if (!UnsafeFPMath && 11250 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 11251 break; 11252 Opcode = X86ISD::FMIN; 11253 break; 11254 case ISD::SETUGE: 11255 // Converting this to a min would handle both negative zeros and NaNs 11256 // incorrectly, but we can swap the operands to fix both. 11257 std::swap(LHS, RHS); 11258 case ISD::SETOGT: 11259 case ISD::SETGT: 11260 case ISD::SETGE: 11261 Opcode = X86ISD::FMIN; 11262 break; 11263 11264 case ISD::SETULT: 11265 // Converting this to a max would handle NaNs incorrectly. 11266 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 11267 break; 11268 Opcode = X86ISD::FMAX; 11269 break; 11270 case ISD::SETOLE: 11271 // Converting this to a max would handle comparisons between positive 11272 // and negative zero incorrectly, and swapping the operands would 11273 // cause it to handle NaNs incorrectly. 11274 if (!UnsafeFPMath && 11275 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 11276 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 11277 break; 11278 std::swap(LHS, RHS); 11279 } 11280 Opcode = X86ISD::FMAX; 11281 break; 11282 case ISD::SETULE: 11283 // Converting this to a max would handle both negative zeros and NaNs 11284 // incorrectly, but we can swap the operands to fix both. 11285 std::swap(LHS, RHS); 11286 case ISD::SETOLT: 11287 case ISD::SETLT: 11288 case ISD::SETLE: 11289 Opcode = X86ISD::FMAX; 11290 break; 11291 } 11292 } 11293 11294 if (Opcode) 11295 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 11296 } 11297 11298 // If this is a select between two integer constants, try to do some 11299 // optimizations. 11300 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 11301 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 11302 // Don't do this for crazy integer types. 11303 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 11304 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 11305 // so that TrueC (the true value) is larger than FalseC. 11306 bool NeedsCondInvert = false; 11307 11308 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 11309 // Efficiently invertible. 11310 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 11311 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 11312 isa<ConstantSDNode>(Cond.getOperand(1))))) { 11313 NeedsCondInvert = true; 11314 std::swap(TrueC, FalseC); 11315 } 11316 11317 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 11318 if (FalseC->getAPIntValue() == 0 && 11319 TrueC->getAPIntValue().isPowerOf2()) { 11320 if (NeedsCondInvert) // Invert the condition if needed. 11321 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 11322 DAG.getConstant(1, Cond.getValueType())); 11323 11324 // Zero extend the condition if needed. 11325 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 11326 11327 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 11328 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 11329 DAG.getConstant(ShAmt, MVT::i8)); 11330 } 11331 11332 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 11333 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 11334 if (NeedsCondInvert) // Invert the condition if needed. 11335 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 11336 DAG.getConstant(1, Cond.getValueType())); 11337 11338 // Zero extend the condition if needed. 11339 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 11340 FalseC->getValueType(0), Cond); 11341 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 11342 SDValue(FalseC, 0)); 11343 } 11344 11345 // Optimize cases that will turn into an LEA instruction. This requires 11346 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 11347 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 11348 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 11349 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 11350 11351 bool isFastMultiplier = false; 11352 if (Diff < 10) { 11353 switch ((unsigned char)Diff) { 11354 default: break; 11355 case 1: // result = add base, cond 11356 case 2: // result = lea base( , cond*2) 11357 case 3: // result = lea base(cond, cond*2) 11358 case 4: // result = lea base( , cond*4) 11359 case 5: // result = lea base(cond, cond*4) 11360 case 8: // result = lea base( , cond*8) 11361 case 9: // result = lea base(cond, cond*8) 11362 isFastMultiplier = true; 11363 break; 11364 } 11365 } 11366 11367 if (isFastMultiplier) { 11368 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 11369 if (NeedsCondInvert) // Invert the condition if needed. 11370 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 11371 DAG.getConstant(1, Cond.getValueType())); 11372 11373 // Zero extend the condition if needed. 11374 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 11375 Cond); 11376 // Scale the condition by the difference. 11377 if (Diff != 1) 11378 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 11379 DAG.getConstant(Diff, Cond.getValueType())); 11380 11381 // Add the base if non-zero. 11382 if (FalseC->getAPIntValue() != 0) 11383 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 11384 SDValue(FalseC, 0)); 11385 return Cond; 11386 } 11387 } 11388 } 11389 } 11390 11391 return SDValue(); 11392} 11393 11394/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 11395static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 11396 TargetLowering::DAGCombinerInfo &DCI) { 11397 DebugLoc DL = N->getDebugLoc(); 11398 11399 // If the flag operand isn't dead, don't touch this CMOV. 11400 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 11401 return SDValue(); 11402 11403 SDValue FalseOp = N->getOperand(0); 11404 SDValue TrueOp = N->getOperand(1); 11405 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 11406 SDValue Cond = N->getOperand(3); 11407 if (CC == X86::COND_E || CC == X86::COND_NE) { 11408 switch (Cond.getOpcode()) { 11409 default: break; 11410 case X86ISD::BSR: 11411 case X86ISD::BSF: 11412 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 11413 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 11414 return (CC == X86::COND_E) ? FalseOp : TrueOp; 11415 } 11416 } 11417 11418 // If this is a select between two integer constants, try to do some 11419 // optimizations. Note that the operands are ordered the opposite of SELECT 11420 // operands. 11421 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 11422 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 11423 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 11424 // larger than FalseC (the false value). 11425 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 11426 CC = X86::GetOppositeBranchCondition(CC); 11427 std::swap(TrueC, FalseC); 11428 } 11429 11430 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 11431 // This is efficient for any integer data type (including i8/i16) and 11432 // shift amount. 11433 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 11434 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 11435 DAG.getConstant(CC, MVT::i8), Cond); 11436 11437 // Zero extend the condition if needed. 11438 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 11439 11440 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 11441 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 11442 DAG.getConstant(ShAmt, MVT::i8)); 11443 if (N->getNumValues() == 2) // Dead flag value? 11444 return DCI.CombineTo(N, Cond, SDValue()); 11445 return Cond; 11446 } 11447 11448 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 11449 // for any integer data type, including i8/i16. 11450 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 11451 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 11452 DAG.getConstant(CC, MVT::i8), Cond); 11453 11454 // Zero extend the condition if needed. 11455 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 11456 FalseC->getValueType(0), Cond); 11457 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 11458 SDValue(FalseC, 0)); 11459 11460 if (N->getNumValues() == 2) // Dead flag value? 11461 return DCI.CombineTo(N, Cond, SDValue()); 11462 return Cond; 11463 } 11464 11465 // Optimize cases that will turn into an LEA instruction. This requires 11466 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 11467 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 11468 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 11469 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 11470 11471 bool isFastMultiplier = false; 11472 if (Diff < 10) { 11473 switch ((unsigned char)Diff) { 11474 default: break; 11475 case 1: // result = add base, cond 11476 case 2: // result = lea base( , cond*2) 11477 case 3: // result = lea base(cond, cond*2) 11478 case 4: // result = lea base( , cond*4) 11479 case 5: // result = lea base(cond, cond*4) 11480 case 8: // result = lea base( , cond*8) 11481 case 9: // result = lea base(cond, cond*8) 11482 isFastMultiplier = true; 11483 break; 11484 } 11485 } 11486 11487 if (isFastMultiplier) { 11488 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 11489 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 11490 DAG.getConstant(CC, MVT::i8), Cond); 11491 // Zero extend the condition if needed. 11492 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 11493 Cond); 11494 // Scale the condition by the difference. 11495 if (Diff != 1) 11496 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 11497 DAG.getConstant(Diff, Cond.getValueType())); 11498 11499 // Add the base if non-zero. 11500 if (FalseC->getAPIntValue() != 0) 11501 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 11502 SDValue(FalseC, 0)); 11503 if (N->getNumValues() == 2) // Dead flag value? 11504 return DCI.CombineTo(N, Cond, SDValue()); 11505 return Cond; 11506 } 11507 } 11508 } 11509 } 11510 return SDValue(); 11511} 11512 11513 11514/// PerformMulCombine - Optimize a single multiply with constant into two 11515/// in order to implement it with two cheaper instructions, e.g. 11516/// LEA + SHL, LEA + LEA. 11517static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 11518 TargetLowering::DAGCombinerInfo &DCI) { 11519 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 11520 return SDValue(); 11521 11522 EVT VT = N->getValueType(0); 11523 if (VT != MVT::i64) 11524 return SDValue(); 11525 11526 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 11527 if (!C) 11528 return SDValue(); 11529 uint64_t MulAmt = C->getZExtValue(); 11530 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 11531 return SDValue(); 11532 11533 uint64_t MulAmt1 = 0; 11534 uint64_t MulAmt2 = 0; 11535 if ((MulAmt % 9) == 0) { 11536 MulAmt1 = 9; 11537 MulAmt2 = MulAmt / 9; 11538 } else if ((MulAmt % 5) == 0) { 11539 MulAmt1 = 5; 11540 MulAmt2 = MulAmt / 5; 11541 } else if ((MulAmt % 3) == 0) { 11542 MulAmt1 = 3; 11543 MulAmt2 = MulAmt / 3; 11544 } 11545 if (MulAmt2 && 11546 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 11547 DebugLoc DL = N->getDebugLoc(); 11548 11549 if (isPowerOf2_64(MulAmt2) && 11550 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 11551 // If second multiplifer is pow2, issue it first. We want the multiply by 11552 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 11553 // is an add. 11554 std::swap(MulAmt1, MulAmt2); 11555 11556 SDValue NewMul; 11557 if (isPowerOf2_64(MulAmt1)) 11558 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 11559 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 11560 else 11561 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 11562 DAG.getConstant(MulAmt1, VT)); 11563 11564 if (isPowerOf2_64(MulAmt2)) 11565 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 11566 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 11567 else 11568 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 11569 DAG.getConstant(MulAmt2, VT)); 11570 11571 // Do not add new nodes to DAG combiner worklist. 11572 DCI.CombineTo(N, NewMul, false); 11573 } 11574 return SDValue(); 11575} 11576 11577static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 11578 SDValue N0 = N->getOperand(0); 11579 SDValue N1 = N->getOperand(1); 11580 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 11581 EVT VT = N0.getValueType(); 11582 11583 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 11584 // since the result of setcc_c is all zero's or all ones. 11585 if (N1C && N0.getOpcode() == ISD::AND && 11586 N0.getOperand(1).getOpcode() == ISD::Constant) { 11587 SDValue N00 = N0.getOperand(0); 11588 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 11589 ((N00.getOpcode() == ISD::ANY_EXTEND || 11590 N00.getOpcode() == ISD::ZERO_EXTEND) && 11591 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 11592 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 11593 APInt ShAmt = N1C->getAPIntValue(); 11594 Mask = Mask.shl(ShAmt); 11595 if (Mask != 0) 11596 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 11597 N00, DAG.getConstant(Mask, VT)); 11598 } 11599 } 11600 11601 return SDValue(); 11602} 11603 11604/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 11605/// when possible. 11606static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 11607 const X86Subtarget *Subtarget) { 11608 EVT VT = N->getValueType(0); 11609 if (!VT.isVector() && VT.isInteger() && 11610 N->getOpcode() == ISD::SHL) 11611 return PerformSHLCombine(N, DAG); 11612 11613 // On X86 with SSE2 support, we can transform this to a vector shift if 11614 // all elements are shifted by the same amount. We can't do this in legalize 11615 // because the a constant vector is typically transformed to a constant pool 11616 // so we have no knowledge of the shift amount. 11617 if (!Subtarget->hasSSE2()) 11618 return SDValue(); 11619 11620 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16) 11621 return SDValue(); 11622 11623 SDValue ShAmtOp = N->getOperand(1); 11624 EVT EltVT = VT.getVectorElementType(); 11625 DebugLoc DL = N->getDebugLoc(); 11626 SDValue BaseShAmt = SDValue(); 11627 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 11628 unsigned NumElts = VT.getVectorNumElements(); 11629 unsigned i = 0; 11630 for (; i != NumElts; ++i) { 11631 SDValue Arg = ShAmtOp.getOperand(i); 11632 if (Arg.getOpcode() == ISD::UNDEF) continue; 11633 BaseShAmt = Arg; 11634 break; 11635 } 11636 for (; i != NumElts; ++i) { 11637 SDValue Arg = ShAmtOp.getOperand(i); 11638 if (Arg.getOpcode() == ISD::UNDEF) continue; 11639 if (Arg != BaseShAmt) { 11640 return SDValue(); 11641 } 11642 } 11643 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 11644 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 11645 SDValue InVec = ShAmtOp.getOperand(0); 11646 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 11647 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 11648 unsigned i = 0; 11649 for (; i != NumElts; ++i) { 11650 SDValue Arg = InVec.getOperand(i); 11651 if (Arg.getOpcode() == ISD::UNDEF) continue; 11652 BaseShAmt = Arg; 11653 break; 11654 } 11655 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 11656 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 11657 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 11658 if (C->getZExtValue() == SplatIdx) 11659 BaseShAmt = InVec.getOperand(1); 11660 } 11661 } 11662 if (BaseShAmt.getNode() == 0) 11663 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 11664 DAG.getIntPtrConstant(0)); 11665 } else 11666 return SDValue(); 11667 11668 // The shift amount is an i32. 11669 if (EltVT.bitsGT(MVT::i32)) 11670 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 11671 else if (EltVT.bitsLT(MVT::i32)) 11672 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 11673 11674 // The shift amount is identical so we can do a vector shift. 11675 SDValue ValOp = N->getOperand(0); 11676 switch (N->getOpcode()) { 11677 default: 11678 llvm_unreachable("Unknown shift opcode!"); 11679 break; 11680 case ISD::SHL: 11681 if (VT == MVT::v2i64) 11682 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11683 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 11684 ValOp, BaseShAmt); 11685 if (VT == MVT::v4i32) 11686 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11687 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 11688 ValOp, BaseShAmt); 11689 if (VT == MVT::v8i16) 11690 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11691 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 11692 ValOp, BaseShAmt); 11693 break; 11694 case ISD::SRA: 11695 if (VT == MVT::v4i32) 11696 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11697 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 11698 ValOp, BaseShAmt); 11699 if (VT == MVT::v8i16) 11700 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11701 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 11702 ValOp, BaseShAmt); 11703 break; 11704 case ISD::SRL: 11705 if (VT == MVT::v2i64) 11706 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11707 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 11708 ValOp, BaseShAmt); 11709 if (VT == MVT::v4i32) 11710 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11711 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 11712 ValOp, BaseShAmt); 11713 if (VT == MVT::v8i16) 11714 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 11715 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 11716 ValOp, BaseShAmt); 11717 break; 11718 } 11719 return SDValue(); 11720} 11721 11722 11723// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 11724// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 11725// and friends. Likewise for OR -> CMPNEQSS. 11726static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 11727 TargetLowering::DAGCombinerInfo &DCI, 11728 const X86Subtarget *Subtarget) { 11729 unsigned opcode; 11730 11731 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 11732 // we're requiring SSE2 for both. 11733 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 11734 SDValue N0 = N->getOperand(0); 11735 SDValue N1 = N->getOperand(1); 11736 SDValue CMP0 = N0->getOperand(1); 11737 SDValue CMP1 = N1->getOperand(1); 11738 DebugLoc DL = N->getDebugLoc(); 11739 11740 // The SETCCs should both refer to the same CMP. 11741 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 11742 return SDValue(); 11743 11744 SDValue CMP00 = CMP0->getOperand(0); 11745 SDValue CMP01 = CMP0->getOperand(1); 11746 EVT VT = CMP00.getValueType(); 11747 11748 if (VT == MVT::f32 || VT == MVT::f64) { 11749 bool ExpectingFlags = false; 11750 // Check for any users that want flags: 11751 for (SDNode::use_iterator UI = N->use_begin(), 11752 UE = N->use_end(); 11753 !ExpectingFlags && UI != UE; ++UI) 11754 switch (UI->getOpcode()) { 11755 default: 11756 case ISD::BR_CC: 11757 case ISD::BRCOND: 11758 case ISD::SELECT: 11759 ExpectingFlags = true; 11760 break; 11761 case ISD::CopyToReg: 11762 case ISD::SIGN_EXTEND: 11763 case ISD::ZERO_EXTEND: 11764 case ISD::ANY_EXTEND: 11765 break; 11766 } 11767 11768 if (!ExpectingFlags) { 11769 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 11770 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 11771 11772 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 11773 X86::CondCode tmp = cc0; 11774 cc0 = cc1; 11775 cc1 = tmp; 11776 } 11777 11778 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 11779 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 11780 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 11781 X86ISD::NodeType NTOperator = is64BitFP ? 11782 X86ISD::FSETCCsd : X86ISD::FSETCCss; 11783 // FIXME: need symbolic constants for these magic numbers. 11784 // See X86ATTInstPrinter.cpp:printSSECC(). 11785 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 11786 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 11787 DAG.getConstant(x86cc, MVT::i8)); 11788 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 11789 OnesOrZeroesF); 11790 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 11791 DAG.getConstant(1, MVT::i32)); 11792 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 11793 return OneBitOfTruth; 11794 } 11795 } 11796 } 11797 } 11798 return SDValue(); 11799} 11800 11801static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 11802 TargetLowering::DAGCombinerInfo &DCI, 11803 const X86Subtarget *Subtarget) { 11804 if (DCI.isBeforeLegalizeOps()) 11805 return SDValue(); 11806 11807 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 11808 if (R.getNode()) 11809 return R; 11810 11811 // Want to form PANDN nodes, in the hopes of then easily combining them with 11812 // OR and AND nodes to form PBLEND/PSIGN. 11813 EVT VT = N->getValueType(0); 11814 if (VT != MVT::v2i64) 11815 return SDValue(); 11816 11817 SDValue N0 = N->getOperand(0); 11818 SDValue N1 = N->getOperand(1); 11819 DebugLoc DL = N->getDebugLoc(); 11820 11821 // Check LHS for vnot 11822 if (N0.getOpcode() == ISD::XOR && 11823 ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 11824 return DAG.getNode(X86ISD::PANDN, DL, VT, N0.getOperand(0), N1); 11825 11826 // Check RHS for vnot 11827 if (N1.getOpcode() == ISD::XOR && 11828 ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 11829 return DAG.getNode(X86ISD::PANDN, DL, VT, N1.getOperand(0), N0); 11830 11831 return SDValue(); 11832} 11833 11834static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 11835 TargetLowering::DAGCombinerInfo &DCI, 11836 const X86Subtarget *Subtarget) { 11837 if (DCI.isBeforeLegalizeOps()) 11838 return SDValue(); 11839 11840 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 11841 if (R.getNode()) 11842 return R; 11843 11844 EVT VT = N->getValueType(0); 11845 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64 && VT != MVT::v2i64) 11846 return SDValue(); 11847 11848 SDValue N0 = N->getOperand(0); 11849 SDValue N1 = N->getOperand(1); 11850 11851 // look for psign/blend 11852 if (Subtarget->hasSSSE3()) { 11853 if (VT == MVT::v2i64) { 11854 // Canonicalize pandn to RHS 11855 if (N0.getOpcode() == X86ISD::PANDN) 11856 std::swap(N0, N1); 11857 // or (and (m, x), (pandn m, y)) 11858 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::PANDN) { 11859 SDValue Mask = N1.getOperand(0); 11860 SDValue X = N1.getOperand(1); 11861 SDValue Y; 11862 if (N0.getOperand(0) == Mask) 11863 Y = N0.getOperand(1); 11864 if (N0.getOperand(1) == Mask) 11865 Y = N0.getOperand(0); 11866 11867 // Check to see if the mask appeared in both the AND and PANDN and 11868 if (!Y.getNode()) 11869 return SDValue(); 11870 11871 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 11872 if (Mask.getOpcode() != ISD::BITCAST || 11873 X.getOpcode() != ISD::BITCAST || 11874 Y.getOpcode() != ISD::BITCAST) 11875 return SDValue(); 11876 11877 // Look through mask bitcast. 11878 Mask = Mask.getOperand(0); 11879 EVT MaskVT = Mask.getValueType(); 11880 11881 // Validate that the Mask operand is a vector sra node. The sra node 11882 // will be an intrinsic. 11883 if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN) 11884 return SDValue(); 11885 11886 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 11887 // there is no psrai.b 11888 switch (cast<ConstantSDNode>(Mask.getOperand(0))->getZExtValue()) { 11889 case Intrinsic::x86_sse2_psrai_w: 11890 case Intrinsic::x86_sse2_psrai_d: 11891 break; 11892 default: return SDValue(); 11893 } 11894 11895 // Check that the SRA is all signbits. 11896 SDValue SraC = Mask.getOperand(2); 11897 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 11898 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 11899 if ((SraAmt + 1) != EltBits) 11900 return SDValue(); 11901 11902 DebugLoc DL = N->getDebugLoc(); 11903 11904 // Now we know we at least have a plendvb with the mask val. See if 11905 // we can form a psignb/w/d. 11906 // psign = x.type == y.type == mask.type && y = sub(0, x); 11907 X = X.getOperand(0); 11908 Y = Y.getOperand(0); 11909 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 11910 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 11911 X.getValueType() == MaskVT && X.getValueType() == Y.getValueType()){ 11912 unsigned Opc = 0; 11913 switch (EltBits) { 11914 case 8: Opc = X86ISD::PSIGNB; break; 11915 case 16: Opc = X86ISD::PSIGNW; break; 11916 case 32: Opc = X86ISD::PSIGND; break; 11917 default: break; 11918 } 11919 if (Opc) { 11920 SDValue Sign = DAG.getNode(Opc, DL, MaskVT, X, Mask.getOperand(1)); 11921 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Sign); 11922 } 11923 } 11924 // PBLENDVB only available on SSE 4.1 11925 if (!Subtarget->hasSSE41()) 11926 return SDValue(); 11927 11928 X = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, X); 11929 Y = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Y); 11930 Mask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Mask); 11931 Mask = DAG.getNode(X86ISD::PBLENDVB, DL, MVT::v16i8, X, Y, Mask); 11932 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Mask); 11933 } 11934 } 11935 } 11936 11937 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 11938 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 11939 std::swap(N0, N1); 11940 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 11941 return SDValue(); 11942 if (!N0.hasOneUse() || !N1.hasOneUse()) 11943 return SDValue(); 11944 11945 SDValue ShAmt0 = N0.getOperand(1); 11946 if (ShAmt0.getValueType() != MVT::i8) 11947 return SDValue(); 11948 SDValue ShAmt1 = N1.getOperand(1); 11949 if (ShAmt1.getValueType() != MVT::i8) 11950 return SDValue(); 11951 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 11952 ShAmt0 = ShAmt0.getOperand(0); 11953 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 11954 ShAmt1 = ShAmt1.getOperand(0); 11955 11956 DebugLoc DL = N->getDebugLoc(); 11957 unsigned Opc = X86ISD::SHLD; 11958 SDValue Op0 = N0.getOperand(0); 11959 SDValue Op1 = N1.getOperand(0); 11960 if (ShAmt0.getOpcode() == ISD::SUB) { 11961 Opc = X86ISD::SHRD; 11962 std::swap(Op0, Op1); 11963 std::swap(ShAmt0, ShAmt1); 11964 } 11965 11966 unsigned Bits = VT.getSizeInBits(); 11967 if (ShAmt1.getOpcode() == ISD::SUB) { 11968 SDValue Sum = ShAmt1.getOperand(0); 11969 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 11970 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 11971 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 11972 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 11973 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 11974 return DAG.getNode(Opc, DL, VT, 11975 Op0, Op1, 11976 DAG.getNode(ISD::TRUNCATE, DL, 11977 MVT::i8, ShAmt0)); 11978 } 11979 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 11980 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 11981 if (ShAmt0C && 11982 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 11983 return DAG.getNode(Opc, DL, VT, 11984 N0.getOperand(0), N1.getOperand(0), 11985 DAG.getNode(ISD::TRUNCATE, DL, 11986 MVT::i8, ShAmt0)); 11987 } 11988 11989 return SDValue(); 11990} 11991 11992/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 11993static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 11994 const X86Subtarget *Subtarget) { 11995 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 11996 // the FP state in cases where an emms may be missing. 11997 // A preferable solution to the general problem is to figure out the right 11998 // places to insert EMMS. This qualifies as a quick hack. 11999 12000 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 12001 StoreSDNode *St = cast<StoreSDNode>(N); 12002 EVT VT = St->getValue().getValueType(); 12003 if (VT.getSizeInBits() != 64) 12004 return SDValue(); 12005 12006 const Function *F = DAG.getMachineFunction().getFunction(); 12007 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); 12008 bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps 12009 && Subtarget->hasSSE2(); 12010 if ((VT.isVector() || 12011 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 12012 isa<LoadSDNode>(St->getValue()) && 12013 !cast<LoadSDNode>(St->getValue())->isVolatile() && 12014 St->getChain().hasOneUse() && !St->isVolatile()) { 12015 SDNode* LdVal = St->getValue().getNode(); 12016 LoadSDNode *Ld = 0; 12017 int TokenFactorIndex = -1; 12018 SmallVector<SDValue, 8> Ops; 12019 SDNode* ChainVal = St->getChain().getNode(); 12020 // Must be a store of a load. We currently handle two cases: the load 12021 // is a direct child, and it's under an intervening TokenFactor. It is 12022 // possible to dig deeper under nested TokenFactors. 12023 if (ChainVal == LdVal) 12024 Ld = cast<LoadSDNode>(St->getChain()); 12025 else if (St->getValue().hasOneUse() && 12026 ChainVal->getOpcode() == ISD::TokenFactor) { 12027 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 12028 if (ChainVal->getOperand(i).getNode() == LdVal) { 12029 TokenFactorIndex = i; 12030 Ld = cast<LoadSDNode>(St->getValue()); 12031 } else 12032 Ops.push_back(ChainVal->getOperand(i)); 12033 } 12034 } 12035 12036 if (!Ld || !ISD::isNormalLoad(Ld)) 12037 return SDValue(); 12038 12039 // If this is not the MMX case, i.e. we are just turning i64 load/store 12040 // into f64 load/store, avoid the transformation if there are multiple 12041 // uses of the loaded value. 12042 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 12043 return SDValue(); 12044 12045 DebugLoc LdDL = Ld->getDebugLoc(); 12046 DebugLoc StDL = N->getDebugLoc(); 12047 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 12048 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 12049 // pair instead. 12050 if (Subtarget->is64Bit() || F64IsLegal) { 12051 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 12052 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 12053 Ld->getPointerInfo(), Ld->isVolatile(), 12054 Ld->isNonTemporal(), Ld->getAlignment()); 12055 SDValue NewChain = NewLd.getValue(1); 12056 if (TokenFactorIndex != -1) { 12057 Ops.push_back(NewChain); 12058 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 12059 Ops.size()); 12060 } 12061 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 12062 St->getPointerInfo(), 12063 St->isVolatile(), St->isNonTemporal(), 12064 St->getAlignment()); 12065 } 12066 12067 // Otherwise, lower to two pairs of 32-bit loads / stores. 12068 SDValue LoAddr = Ld->getBasePtr(); 12069 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 12070 DAG.getConstant(4, MVT::i32)); 12071 12072 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 12073 Ld->getPointerInfo(), 12074 Ld->isVolatile(), Ld->isNonTemporal(), 12075 Ld->getAlignment()); 12076 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 12077 Ld->getPointerInfo().getWithOffset(4), 12078 Ld->isVolatile(), Ld->isNonTemporal(), 12079 MinAlign(Ld->getAlignment(), 4)); 12080 12081 SDValue NewChain = LoLd.getValue(1); 12082 if (TokenFactorIndex != -1) { 12083 Ops.push_back(LoLd); 12084 Ops.push_back(HiLd); 12085 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 12086 Ops.size()); 12087 } 12088 12089 LoAddr = St->getBasePtr(); 12090 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 12091 DAG.getConstant(4, MVT::i32)); 12092 12093 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 12094 St->getPointerInfo(), 12095 St->isVolatile(), St->isNonTemporal(), 12096 St->getAlignment()); 12097 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 12098 St->getPointerInfo().getWithOffset(4), 12099 St->isVolatile(), 12100 St->isNonTemporal(), 12101 MinAlign(St->getAlignment(), 4)); 12102 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 12103 } 12104 return SDValue(); 12105} 12106 12107/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 12108/// X86ISD::FXOR nodes. 12109static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 12110 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 12111 // F[X]OR(0.0, x) -> x 12112 // F[X]OR(x, 0.0) -> x 12113 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 12114 if (C->getValueAPF().isPosZero()) 12115 return N->getOperand(1); 12116 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 12117 if (C->getValueAPF().isPosZero()) 12118 return N->getOperand(0); 12119 return SDValue(); 12120} 12121 12122/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 12123static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 12124 // FAND(0.0, x) -> 0.0 12125 // FAND(x, 0.0) -> 0.0 12126 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 12127 if (C->getValueAPF().isPosZero()) 12128 return N->getOperand(0); 12129 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 12130 if (C->getValueAPF().isPosZero()) 12131 return N->getOperand(1); 12132 return SDValue(); 12133} 12134 12135static SDValue PerformBTCombine(SDNode *N, 12136 SelectionDAG &DAG, 12137 TargetLowering::DAGCombinerInfo &DCI) { 12138 // BT ignores high bits in the bit index operand. 12139 SDValue Op1 = N->getOperand(1); 12140 if (Op1.hasOneUse()) { 12141 unsigned BitWidth = Op1.getValueSizeInBits(); 12142 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 12143 APInt KnownZero, KnownOne; 12144 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 12145 !DCI.isBeforeLegalizeOps()); 12146 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12147 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 12148 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 12149 DCI.CommitTargetLoweringOpt(TLO); 12150 } 12151 return SDValue(); 12152} 12153 12154static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 12155 SDValue Op = N->getOperand(0); 12156 if (Op.getOpcode() == ISD::BITCAST) 12157 Op = Op.getOperand(0); 12158 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 12159 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 12160 VT.getVectorElementType().getSizeInBits() == 12161 OpVT.getVectorElementType().getSizeInBits()) { 12162 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 12163 } 12164 return SDValue(); 12165} 12166 12167static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) { 12168 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 12169 // (and (i32 x86isd::setcc_carry), 1) 12170 // This eliminates the zext. This transformation is necessary because 12171 // ISD::SETCC is always legalized to i8. 12172 DebugLoc dl = N->getDebugLoc(); 12173 SDValue N0 = N->getOperand(0); 12174 EVT VT = N->getValueType(0); 12175 if (N0.getOpcode() == ISD::AND && 12176 N0.hasOneUse() && 12177 N0.getOperand(0).hasOneUse()) { 12178 SDValue N00 = N0.getOperand(0); 12179 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 12180 return SDValue(); 12181 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 12182 if (!C || C->getZExtValue() != 1) 12183 return SDValue(); 12184 return DAG.getNode(ISD::AND, dl, VT, 12185 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 12186 N00.getOperand(0), N00.getOperand(1)), 12187 DAG.getConstant(1, VT)); 12188 } 12189 12190 return SDValue(); 12191} 12192 12193// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 12194static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { 12195 unsigned X86CC = N->getConstantOperandVal(0); 12196 SDValue EFLAG = N->getOperand(1); 12197 DebugLoc DL = N->getDebugLoc(); 12198 12199 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 12200 // a zext and produces an all-ones bit which is more useful than 0/1 in some 12201 // cases. 12202 if (X86CC == X86::COND_B) 12203 return DAG.getNode(ISD::AND, DL, MVT::i8, 12204 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 12205 DAG.getConstant(X86CC, MVT::i8), EFLAG), 12206 DAG.getConstant(1, MVT::i8)); 12207 12208 return SDValue(); 12209} 12210 12211static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 12212 const X86TargetLowering *XTLI) { 12213 SDValue Op0 = N->getOperand(0); 12214 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 12215 // a 32-bit target where SSE doesn't support i64->FP operations. 12216 if (Op0.getOpcode() == ISD::LOAD) { 12217 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 12218 EVT VT = Ld->getValueType(0); 12219 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 12220 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 12221 !XTLI->getSubtarget()->is64Bit() && 12222 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 12223 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 12224 Ld->getChain(), Op0, DAG); 12225 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 12226 return FILDChain; 12227 } 12228 } 12229 return SDValue(); 12230} 12231 12232// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 12233static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 12234 X86TargetLowering::DAGCombinerInfo &DCI) { 12235 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 12236 // the result is either zero or one (depending on the input carry bit). 12237 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 12238 if (X86::isZeroNode(N->getOperand(0)) && 12239 X86::isZeroNode(N->getOperand(1)) && 12240 // We don't have a good way to replace an EFLAGS use, so only do this when 12241 // dead right now. 12242 SDValue(N, 1).use_empty()) { 12243 DebugLoc DL = N->getDebugLoc(); 12244 EVT VT = N->getValueType(0); 12245 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 12246 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 12247 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 12248 DAG.getConstant(X86::COND_B,MVT::i8), 12249 N->getOperand(2)), 12250 DAG.getConstant(1, VT)); 12251 return DCI.CombineTo(N, Res1, CarryOut); 12252 } 12253 12254 return SDValue(); 12255} 12256 12257// fold (add Y, (sete X, 0)) -> adc 0, Y 12258// (add Y, (setne X, 0)) -> sbb -1, Y 12259// (sub (sete X, 0), Y) -> sbb 0, Y 12260// (sub (setne X, 0), Y) -> adc -1, Y 12261static SDValue OptimizeConditonalInDecrement(SDNode *N, SelectionDAG &DAG) { 12262 DebugLoc DL = N->getDebugLoc(); 12263 12264 // Look through ZExts. 12265 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 12266 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 12267 return SDValue(); 12268 12269 SDValue SetCC = Ext.getOperand(0); 12270 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 12271 return SDValue(); 12272 12273 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 12274 if (CC != X86::COND_E && CC != X86::COND_NE) 12275 return SDValue(); 12276 12277 SDValue Cmp = SetCC.getOperand(1); 12278 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 12279 !X86::isZeroNode(Cmp.getOperand(1)) || 12280 !Cmp.getOperand(0).getValueType().isInteger()) 12281 return SDValue(); 12282 12283 SDValue CmpOp0 = Cmp.getOperand(0); 12284 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 12285 DAG.getConstant(1, CmpOp0.getValueType())); 12286 12287 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 12288 if (CC == X86::COND_NE) 12289 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 12290 DL, OtherVal.getValueType(), OtherVal, 12291 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 12292 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 12293 DL, OtherVal.getValueType(), OtherVal, 12294 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 12295} 12296 12297SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 12298 DAGCombinerInfo &DCI) const { 12299 SelectionDAG &DAG = DCI.DAG; 12300 switch (N->getOpcode()) { 12301 default: break; 12302 case ISD::EXTRACT_VECTOR_ELT: 12303 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); 12304 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 12305 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); 12306 case ISD::ADD: 12307 case ISD::SUB: return OptimizeConditonalInDecrement(N, DAG); 12308 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 12309 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 12310 case ISD::SHL: 12311 case ISD::SRA: 12312 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget); 12313 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 12314 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 12315 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 12316 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 12317 case X86ISD::FXOR: 12318 case X86ISD::FOR: return PerformFORCombine(N, DAG); 12319 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 12320 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 12321 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 12322 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); 12323 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); 12324 case X86ISD::SHUFPS: // Handle all target specific shuffles 12325 case X86ISD::SHUFPD: 12326 case X86ISD::PALIGN: 12327 case X86ISD::PUNPCKHBW: 12328 case X86ISD::PUNPCKHWD: 12329 case X86ISD::PUNPCKHDQ: 12330 case X86ISD::PUNPCKHQDQ: 12331 case X86ISD::UNPCKHPS: 12332 case X86ISD::UNPCKHPD: 12333 case X86ISD::PUNPCKLBW: 12334 case X86ISD::PUNPCKLWD: 12335 case X86ISD::PUNPCKLDQ: 12336 case X86ISD::PUNPCKLQDQ: 12337 case X86ISD::UNPCKLPS: 12338 case X86ISD::UNPCKLPD: 12339 case X86ISD::VUNPCKLPS: 12340 case X86ISD::VUNPCKLPD: 12341 case X86ISD::VUNPCKLPSY: 12342 case X86ISD::VUNPCKLPDY: 12343 case X86ISD::MOVHLPS: 12344 case X86ISD::MOVLHPS: 12345 case X86ISD::PSHUFD: 12346 case X86ISD::PSHUFHW: 12347 case X86ISD::PSHUFLW: 12348 case X86ISD::MOVSS: 12349 case X86ISD::MOVSD: 12350 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI); 12351 } 12352 12353 return SDValue(); 12354} 12355 12356/// isTypeDesirableForOp - Return true if the target has native support for 12357/// the specified value type and it is 'desirable' to use the type for the 12358/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 12359/// instruction encodings are longer and some i16 instructions are slow. 12360bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 12361 if (!isTypeLegal(VT)) 12362 return false; 12363 if (VT != MVT::i16) 12364 return true; 12365 12366 switch (Opc) { 12367 default: 12368 return true; 12369 case ISD::LOAD: 12370 case ISD::SIGN_EXTEND: 12371 case ISD::ZERO_EXTEND: 12372 case ISD::ANY_EXTEND: 12373 case ISD::SHL: 12374 case ISD::SRL: 12375 case ISD::SUB: 12376 case ISD::ADD: 12377 case ISD::MUL: 12378 case ISD::AND: 12379 case ISD::OR: 12380 case ISD::XOR: 12381 return false; 12382 } 12383} 12384 12385/// IsDesirableToPromoteOp - This method query the target whether it is 12386/// beneficial for dag combiner to promote the specified node. If true, it 12387/// should return the desired promotion type by reference. 12388bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 12389 EVT VT = Op.getValueType(); 12390 if (VT != MVT::i16) 12391 return false; 12392 12393 bool Promote = false; 12394 bool Commute = false; 12395 switch (Op.getOpcode()) { 12396 default: break; 12397 case ISD::LOAD: { 12398 LoadSDNode *LD = cast<LoadSDNode>(Op); 12399 // If the non-extending load has a single use and it's not live out, then it 12400 // might be folded. 12401 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 12402 Op.hasOneUse()*/) { 12403 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 12404 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 12405 // The only case where we'd want to promote LOAD (rather then it being 12406 // promoted as an operand is when it's only use is liveout. 12407 if (UI->getOpcode() != ISD::CopyToReg) 12408 return false; 12409 } 12410 } 12411 Promote = true; 12412 break; 12413 } 12414 case ISD::SIGN_EXTEND: 12415 case ISD::ZERO_EXTEND: 12416 case ISD::ANY_EXTEND: 12417 Promote = true; 12418 break; 12419 case ISD::SHL: 12420 case ISD::SRL: { 12421 SDValue N0 = Op.getOperand(0); 12422 // Look out for (store (shl (load), x)). 12423 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 12424 return false; 12425 Promote = true; 12426 break; 12427 } 12428 case ISD::ADD: 12429 case ISD::MUL: 12430 case ISD::AND: 12431 case ISD::OR: 12432 case ISD::XOR: 12433 Commute = true; 12434 // fallthrough 12435 case ISD::SUB: { 12436 SDValue N0 = Op.getOperand(0); 12437 SDValue N1 = Op.getOperand(1); 12438 if (!Commute && MayFoldLoad(N1)) 12439 return false; 12440 // Avoid disabling potential load folding opportunities. 12441 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 12442 return false; 12443 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 12444 return false; 12445 Promote = true; 12446 } 12447 } 12448 12449 PVT = MVT::i32; 12450 return Promote; 12451} 12452 12453//===----------------------------------------------------------------------===// 12454// X86 Inline Assembly Support 12455//===----------------------------------------------------------------------===// 12456 12457bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 12458 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 12459 12460 std::string AsmStr = IA->getAsmString(); 12461 12462 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 12463 SmallVector<StringRef, 4> AsmPieces; 12464 SplitString(AsmStr, AsmPieces, ";\n"); 12465 12466 switch (AsmPieces.size()) { 12467 default: return false; 12468 case 1: 12469 AsmStr = AsmPieces[0]; 12470 AsmPieces.clear(); 12471 SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace. 12472 12473 // FIXME: this should verify that we are targeting a 486 or better. If not, 12474 // we will turn this bswap into something that will be lowered to logical ops 12475 // instead of emitting the bswap asm. For now, we don't support 486 or lower 12476 // so don't worry about this. 12477 // bswap $0 12478 if (AsmPieces.size() == 2 && 12479 (AsmPieces[0] == "bswap" || 12480 AsmPieces[0] == "bswapq" || 12481 AsmPieces[0] == "bswapl") && 12482 (AsmPieces[1] == "$0" || 12483 AsmPieces[1] == "${0:q}")) { 12484 // No need to check constraints, nothing other than the equivalent of 12485 // "=r,0" would be valid here. 12486 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 12487 if (!Ty || Ty->getBitWidth() % 16 != 0) 12488 return false; 12489 return IntrinsicLowering::LowerToByteSwap(CI); 12490 } 12491 // rorw $$8, ${0:w} --> llvm.bswap.i16 12492 if (CI->getType()->isIntegerTy(16) && 12493 AsmPieces.size() == 3 && 12494 (AsmPieces[0] == "rorw" || AsmPieces[0] == "rolw") && 12495 AsmPieces[1] == "$$8," && 12496 AsmPieces[2] == "${0:w}" && 12497 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) { 12498 AsmPieces.clear(); 12499 const std::string &ConstraintsStr = IA->getConstraintString(); 12500 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 12501 std::sort(AsmPieces.begin(), AsmPieces.end()); 12502 if (AsmPieces.size() == 4 && 12503 AsmPieces[0] == "~{cc}" && 12504 AsmPieces[1] == "~{dirflag}" && 12505 AsmPieces[2] == "~{flags}" && 12506 AsmPieces[3] == "~{fpsr}") { 12507 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 12508 if (!Ty || Ty->getBitWidth() % 16 != 0) 12509 return false; 12510 return IntrinsicLowering::LowerToByteSwap(CI); 12511 } 12512 } 12513 break; 12514 case 3: 12515 if (CI->getType()->isIntegerTy(32) && 12516 IA->getConstraintString().compare(0, 5, "=r,0,") == 0) { 12517 SmallVector<StringRef, 4> Words; 12518 SplitString(AsmPieces[0], Words, " \t,"); 12519 if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" && 12520 Words[2] == "${0:w}") { 12521 Words.clear(); 12522 SplitString(AsmPieces[1], Words, " \t,"); 12523 if (Words.size() == 3 && Words[0] == "rorl" && Words[1] == "$$16" && 12524 Words[2] == "$0") { 12525 Words.clear(); 12526 SplitString(AsmPieces[2], Words, " \t,"); 12527 if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" && 12528 Words[2] == "${0:w}") { 12529 AsmPieces.clear(); 12530 const std::string &ConstraintsStr = IA->getConstraintString(); 12531 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 12532 std::sort(AsmPieces.begin(), AsmPieces.end()); 12533 if (AsmPieces.size() == 4 && 12534 AsmPieces[0] == "~{cc}" && 12535 AsmPieces[1] == "~{dirflag}" && 12536 AsmPieces[2] == "~{flags}" && 12537 AsmPieces[3] == "~{fpsr}") { 12538 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 12539 if (!Ty || Ty->getBitWidth() % 16 != 0) 12540 return false; 12541 return IntrinsicLowering::LowerToByteSwap(CI); 12542 } 12543 } 12544 } 12545 } 12546 } 12547 12548 if (CI->getType()->isIntegerTy(64)) { 12549 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 12550 if (Constraints.size() >= 2 && 12551 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 12552 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 12553 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 12554 SmallVector<StringRef, 4> Words; 12555 SplitString(AsmPieces[0], Words, " \t"); 12556 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") { 12557 Words.clear(); 12558 SplitString(AsmPieces[1], Words, " \t"); 12559 if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") { 12560 Words.clear(); 12561 SplitString(AsmPieces[2], Words, " \t,"); 12562 if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" && 12563 Words[2] == "%edx") { 12564 const IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 12565 if (!Ty || Ty->getBitWidth() % 16 != 0) 12566 return false; 12567 return IntrinsicLowering::LowerToByteSwap(CI); 12568 } 12569 } 12570 } 12571 } 12572 } 12573 break; 12574 } 12575 return false; 12576} 12577 12578 12579 12580/// getConstraintType - Given a constraint letter, return the type of 12581/// constraint it is for this target. 12582X86TargetLowering::ConstraintType 12583X86TargetLowering::getConstraintType(const std::string &Constraint) const { 12584 if (Constraint.size() == 1) { 12585 switch (Constraint[0]) { 12586 case 'R': 12587 case 'q': 12588 case 'Q': 12589 case 'f': 12590 case 't': 12591 case 'u': 12592 case 'y': 12593 case 'x': 12594 case 'Y': 12595 case 'l': 12596 return C_RegisterClass; 12597 case 'a': 12598 case 'b': 12599 case 'c': 12600 case 'd': 12601 case 'S': 12602 case 'D': 12603 case 'A': 12604 return C_Register; 12605 case 'I': 12606 case 'J': 12607 case 'K': 12608 case 'L': 12609 case 'M': 12610 case 'N': 12611 case 'G': 12612 case 'C': 12613 case 'e': 12614 case 'Z': 12615 return C_Other; 12616 default: 12617 break; 12618 } 12619 } 12620 return TargetLowering::getConstraintType(Constraint); 12621} 12622 12623/// Examine constraint type and operand type and determine a weight value. 12624/// This object must already have been set up with the operand type 12625/// and the current alternative constraint selected. 12626TargetLowering::ConstraintWeight 12627 X86TargetLowering::getSingleConstraintMatchWeight( 12628 AsmOperandInfo &info, const char *constraint) const { 12629 ConstraintWeight weight = CW_Invalid; 12630 Value *CallOperandVal = info.CallOperandVal; 12631 // If we don't have a value, we can't do a match, 12632 // but allow it at the lowest weight. 12633 if (CallOperandVal == NULL) 12634 return CW_Default; 12635 const Type *type = CallOperandVal->getType(); 12636 // Look at the constraint type. 12637 switch (*constraint) { 12638 default: 12639 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 12640 case 'R': 12641 case 'q': 12642 case 'Q': 12643 case 'a': 12644 case 'b': 12645 case 'c': 12646 case 'd': 12647 case 'S': 12648 case 'D': 12649 case 'A': 12650 if (CallOperandVal->getType()->isIntegerTy()) 12651 weight = CW_SpecificReg; 12652 break; 12653 case 'f': 12654 case 't': 12655 case 'u': 12656 if (type->isFloatingPointTy()) 12657 weight = CW_SpecificReg; 12658 break; 12659 case 'y': 12660 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 12661 weight = CW_SpecificReg; 12662 break; 12663 case 'x': 12664 case 'Y': 12665 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasXMM()) 12666 weight = CW_Register; 12667 break; 12668 case 'I': 12669 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 12670 if (C->getZExtValue() <= 31) 12671 weight = CW_Constant; 12672 } 12673 break; 12674 case 'J': 12675 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 12676 if (C->getZExtValue() <= 63) 12677 weight = CW_Constant; 12678 } 12679 break; 12680 case 'K': 12681 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 12682 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 12683 weight = CW_Constant; 12684 } 12685 break; 12686 case 'L': 12687 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 12688 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 12689 weight = CW_Constant; 12690 } 12691 break; 12692 case 'M': 12693 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 12694 if (C->getZExtValue() <= 3) 12695 weight = CW_Constant; 12696 } 12697 break; 12698 case 'N': 12699 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 12700 if (C->getZExtValue() <= 0xff) 12701 weight = CW_Constant; 12702 } 12703 break; 12704 case 'G': 12705 case 'C': 12706 if (dyn_cast<ConstantFP>(CallOperandVal)) { 12707 weight = CW_Constant; 12708 } 12709 break; 12710 case 'e': 12711 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 12712 if ((C->getSExtValue() >= -0x80000000LL) && 12713 (C->getSExtValue() <= 0x7fffffffLL)) 12714 weight = CW_Constant; 12715 } 12716 break; 12717 case 'Z': 12718 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 12719 if (C->getZExtValue() <= 0xffffffff) 12720 weight = CW_Constant; 12721 } 12722 break; 12723 } 12724 return weight; 12725} 12726 12727/// LowerXConstraint - try to replace an X constraint, which matches anything, 12728/// with another that has more specific requirements based on the type of the 12729/// corresponding operand. 12730const char *X86TargetLowering:: 12731LowerXConstraint(EVT ConstraintVT) const { 12732 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 12733 // 'f' like normal targets. 12734 if (ConstraintVT.isFloatingPoint()) { 12735 if (Subtarget->hasXMMInt()) 12736 return "Y"; 12737 if (Subtarget->hasXMM()) 12738 return "x"; 12739 } 12740 12741 return TargetLowering::LowerXConstraint(ConstraintVT); 12742} 12743 12744/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 12745/// vector. If it is invalid, don't add anything to Ops. 12746void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 12747 std::string &Constraint, 12748 std::vector<SDValue>&Ops, 12749 SelectionDAG &DAG) const { 12750 SDValue Result(0, 0); 12751 12752 // Only support length 1 constraints for now. 12753 if (Constraint.length() > 1) return; 12754 12755 char ConstraintLetter = Constraint[0]; 12756 switch (ConstraintLetter) { 12757 default: break; 12758 case 'I': 12759 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 12760 if (C->getZExtValue() <= 31) { 12761 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 12762 break; 12763 } 12764 } 12765 return; 12766 case 'J': 12767 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 12768 if (C->getZExtValue() <= 63) { 12769 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 12770 break; 12771 } 12772 } 12773 return; 12774 case 'K': 12775 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 12776 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 12777 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 12778 break; 12779 } 12780 } 12781 return; 12782 case 'N': 12783 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 12784 if (C->getZExtValue() <= 255) { 12785 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 12786 break; 12787 } 12788 } 12789 return; 12790 case 'e': { 12791 // 32-bit signed value 12792 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 12793 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 12794 C->getSExtValue())) { 12795 // Widen to 64 bits here to get it sign extended. 12796 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 12797 break; 12798 } 12799 // FIXME gcc accepts some relocatable values here too, but only in certain 12800 // memory models; it's complicated. 12801 } 12802 return; 12803 } 12804 case 'Z': { 12805 // 32-bit unsigned value 12806 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 12807 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 12808 C->getZExtValue())) { 12809 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 12810 break; 12811 } 12812 } 12813 // FIXME gcc accepts some relocatable values here too, but only in certain 12814 // memory models; it's complicated. 12815 return; 12816 } 12817 case 'i': { 12818 // Literal immediates are always ok. 12819 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 12820 // Widen to 64 bits here to get it sign extended. 12821 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 12822 break; 12823 } 12824 12825 // In any sort of PIC mode addresses need to be computed at runtime by 12826 // adding in a register or some sort of table lookup. These can't 12827 // be used as immediates. 12828 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 12829 return; 12830 12831 // If we are in non-pic codegen mode, we allow the address of a global (with 12832 // an optional displacement) to be used with 'i'. 12833 GlobalAddressSDNode *GA = 0; 12834 int64_t Offset = 0; 12835 12836 // Match either (GA), (GA+C), (GA+C1+C2), etc. 12837 while (1) { 12838 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 12839 Offset += GA->getOffset(); 12840 break; 12841 } else if (Op.getOpcode() == ISD::ADD) { 12842 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 12843 Offset += C->getZExtValue(); 12844 Op = Op.getOperand(0); 12845 continue; 12846 } 12847 } else if (Op.getOpcode() == ISD::SUB) { 12848 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 12849 Offset += -C->getZExtValue(); 12850 Op = Op.getOperand(0); 12851 continue; 12852 } 12853 } 12854 12855 // Otherwise, this isn't something we can handle, reject it. 12856 return; 12857 } 12858 12859 const GlobalValue *GV = GA->getGlobal(); 12860 // If we require an extra load to get this address, as in PIC mode, we 12861 // can't accept it. 12862 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 12863 getTargetMachine()))) 12864 return; 12865 12866 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 12867 GA->getValueType(0), Offset); 12868 break; 12869 } 12870 } 12871 12872 if (Result.getNode()) { 12873 Ops.push_back(Result); 12874 return; 12875 } 12876 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 12877} 12878 12879std::pair<unsigned, const TargetRegisterClass*> 12880X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 12881 EVT VT) const { 12882 // First, see if this is a constraint that directly corresponds to an LLVM 12883 // register class. 12884 if (Constraint.size() == 1) { 12885 // GCC Constraint Letters 12886 switch (Constraint[0]) { 12887 default: break; 12888 // TODO: Slight differences here in allocation order and leaving 12889 // RIP in the class. Do they matter any more here than they do 12890 // in the normal allocation? 12891 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 12892 if (Subtarget->is64Bit()) { 12893 if (VT == MVT::i32) 12894 return std::make_pair(0U, X86::GR32RegisterClass); 12895 else if (VT == MVT::i16) 12896 return std::make_pair(0U, X86::GR16RegisterClass); 12897 else if (VT == MVT::i8) 12898 return std::make_pair(0U, X86::GR8RegisterClass); 12899 else if (VT == MVT::i64) 12900 return std::make_pair(0U, X86::GR64RegisterClass); 12901 break; 12902 } 12903 // 32-bit fallthrough 12904 case 'Q': // Q_REGS 12905 if (VT == MVT::i32) 12906 return std::make_pair(0U, X86::GR32_ABCDRegisterClass); 12907 else if (VT == MVT::i16) 12908 return std::make_pair(0U, X86::GR16_ABCDRegisterClass); 12909 else if (VT == MVT::i8) 12910 return std::make_pair(0U, X86::GR8_ABCD_LRegisterClass); 12911 else if (VT == MVT::i64) 12912 return std::make_pair(0U, X86::GR64_ABCDRegisterClass); 12913 break; 12914 case 'r': // GENERAL_REGS 12915 case 'l': // INDEX_REGS 12916 if (VT == MVT::i8) 12917 return std::make_pair(0U, X86::GR8RegisterClass); 12918 if (VT == MVT::i16) 12919 return std::make_pair(0U, X86::GR16RegisterClass); 12920 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 12921 return std::make_pair(0U, X86::GR32RegisterClass); 12922 return std::make_pair(0U, X86::GR64RegisterClass); 12923 case 'R': // LEGACY_REGS 12924 if (VT == MVT::i8) 12925 return std::make_pair(0U, X86::GR8_NOREXRegisterClass); 12926 if (VT == MVT::i16) 12927 return std::make_pair(0U, X86::GR16_NOREXRegisterClass); 12928 if (VT == MVT::i32 || !Subtarget->is64Bit()) 12929 return std::make_pair(0U, X86::GR32_NOREXRegisterClass); 12930 return std::make_pair(0U, X86::GR64_NOREXRegisterClass); 12931 case 'f': // FP Stack registers. 12932 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 12933 // value to the correct fpstack register class. 12934 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 12935 return std::make_pair(0U, X86::RFP32RegisterClass); 12936 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 12937 return std::make_pair(0U, X86::RFP64RegisterClass); 12938 return std::make_pair(0U, X86::RFP80RegisterClass); 12939 case 'y': // MMX_REGS if MMX allowed. 12940 if (!Subtarget->hasMMX()) break; 12941 return std::make_pair(0U, X86::VR64RegisterClass); 12942 case 'Y': // SSE_REGS if SSE2 allowed 12943 if (!Subtarget->hasXMMInt()) break; 12944 // FALL THROUGH. 12945 case 'x': // SSE_REGS if SSE1 allowed 12946 if (!Subtarget->hasXMM()) break; 12947 12948 switch (VT.getSimpleVT().SimpleTy) { 12949 default: break; 12950 // Scalar SSE types. 12951 case MVT::f32: 12952 case MVT::i32: 12953 return std::make_pair(0U, X86::FR32RegisterClass); 12954 case MVT::f64: 12955 case MVT::i64: 12956 return std::make_pair(0U, X86::FR64RegisterClass); 12957 // Vector types. 12958 case MVT::v16i8: 12959 case MVT::v8i16: 12960 case MVT::v4i32: 12961 case MVT::v2i64: 12962 case MVT::v4f32: 12963 case MVT::v2f64: 12964 return std::make_pair(0U, X86::VR128RegisterClass); 12965 } 12966 break; 12967 } 12968 } 12969 12970 // Use the default implementation in TargetLowering to convert the register 12971 // constraint into a member of a register class. 12972 std::pair<unsigned, const TargetRegisterClass*> Res; 12973 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 12974 12975 // Not found as a standard register? 12976 if (Res.second == 0) { 12977 // Map st(0) -> st(7) -> ST0 12978 if (Constraint.size() == 7 && Constraint[0] == '{' && 12979 tolower(Constraint[1]) == 's' && 12980 tolower(Constraint[2]) == 't' && 12981 Constraint[3] == '(' && 12982 (Constraint[4] >= '0' && Constraint[4] <= '7') && 12983 Constraint[5] == ')' && 12984 Constraint[6] == '}') { 12985 12986 Res.first = X86::ST0+Constraint[4]-'0'; 12987 Res.second = X86::RFP80RegisterClass; 12988 return Res; 12989 } 12990 12991 // GCC allows "st(0)" to be called just plain "st". 12992 if (StringRef("{st}").equals_lower(Constraint)) { 12993 Res.first = X86::ST0; 12994 Res.second = X86::RFP80RegisterClass; 12995 return Res; 12996 } 12997 12998 // flags -> EFLAGS 12999 if (StringRef("{flags}").equals_lower(Constraint)) { 13000 Res.first = X86::EFLAGS; 13001 Res.second = X86::CCRRegisterClass; 13002 return Res; 13003 } 13004 13005 // 'A' means EAX + EDX. 13006 if (Constraint == "A") { 13007 Res.first = X86::EAX; 13008 Res.second = X86::GR32_ADRegisterClass; 13009 return Res; 13010 } 13011 return Res; 13012 } 13013 13014 // Otherwise, check to see if this is a register class of the wrong value 13015 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 13016 // turn into {ax},{dx}. 13017 if (Res.second->hasType(VT)) 13018 return Res; // Correct type already, nothing to do. 13019 13020 // All of the single-register GCC register classes map their values onto 13021 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 13022 // really want an 8-bit or 32-bit register, map to the appropriate register 13023 // class and return the appropriate register. 13024 if (Res.second == X86::GR16RegisterClass) { 13025 if (VT == MVT::i8) { 13026 unsigned DestReg = 0; 13027 switch (Res.first) { 13028 default: break; 13029 case X86::AX: DestReg = X86::AL; break; 13030 case X86::DX: DestReg = X86::DL; break; 13031 case X86::CX: DestReg = X86::CL; break; 13032 case X86::BX: DestReg = X86::BL; break; 13033 } 13034 if (DestReg) { 13035 Res.first = DestReg; 13036 Res.second = X86::GR8RegisterClass; 13037 } 13038 } else if (VT == MVT::i32) { 13039 unsigned DestReg = 0; 13040 switch (Res.first) { 13041 default: break; 13042 case X86::AX: DestReg = X86::EAX; break; 13043 case X86::DX: DestReg = X86::EDX; break; 13044 case X86::CX: DestReg = X86::ECX; break; 13045 case X86::BX: DestReg = X86::EBX; break; 13046 case X86::SI: DestReg = X86::ESI; break; 13047 case X86::DI: DestReg = X86::EDI; break; 13048 case X86::BP: DestReg = X86::EBP; break; 13049 case X86::SP: DestReg = X86::ESP; break; 13050 } 13051 if (DestReg) { 13052 Res.first = DestReg; 13053 Res.second = X86::GR32RegisterClass; 13054 } 13055 } else if (VT == MVT::i64) { 13056 unsigned DestReg = 0; 13057 switch (Res.first) { 13058 default: break; 13059 case X86::AX: DestReg = X86::RAX; break; 13060 case X86::DX: DestReg = X86::RDX; break; 13061 case X86::CX: DestReg = X86::RCX; break; 13062 case X86::BX: DestReg = X86::RBX; break; 13063 case X86::SI: DestReg = X86::RSI; break; 13064 case X86::DI: DestReg = X86::RDI; break; 13065 case X86::BP: DestReg = X86::RBP; break; 13066 case X86::SP: DestReg = X86::RSP; break; 13067 } 13068 if (DestReg) { 13069 Res.first = DestReg; 13070 Res.second = X86::GR64RegisterClass; 13071 } 13072 } 13073 } else if (Res.second == X86::FR32RegisterClass || 13074 Res.second == X86::FR64RegisterClass || 13075 Res.second == X86::VR128RegisterClass) { 13076 // Handle references to XMM physical registers that got mapped into the 13077 // wrong class. This can happen with constraints like {xmm0} where the 13078 // target independent register mapper will just pick the first match it can 13079 // find, ignoring the required type. 13080 if (VT == MVT::f32) 13081 Res.second = X86::FR32RegisterClass; 13082 else if (VT == MVT::f64) 13083 Res.second = X86::FR64RegisterClass; 13084 else if (X86::VR128RegisterClass->hasType(VT)) 13085 Res.second = X86::VR128RegisterClass; 13086 } 13087 13088 return Res; 13089} 13090