X86ISelLowering.cpp revision f6c0747ae326a5ee83b846d40f6c657cf43a1e21
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86TargetMachine.h" 20#include "X86TargetObjectFile.h" 21#include "Utils/X86ShuffleDecode.h" 22#include "llvm/CallingConv.h" 23#include "llvm/Constants.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalAlias.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Function.h" 28#include "llvm/Instructions.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/LLVMContext.h" 31#include "llvm/CodeGen/IntrinsicLowering.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunction.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineJumpTableInfo.h" 36#include "llvm/CodeGen/MachineModuleInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/MC/MCAsmInfo.h" 39#include "llvm/MC/MCContext.h" 40#include "llvm/MC/MCExpr.h" 41#include "llvm/MC/MCSymbol.h" 42#include "llvm/ADT/BitVector.h" 43#include "llvm/ADT/SmallSet.h" 44#include "llvm/ADT/Statistic.h" 45#include "llvm/ADT/StringExtras.h" 46#include "llvm/ADT/VariadicFunction.h" 47#include "llvm/Support/CallSite.h" 48#include "llvm/Support/Debug.h" 49#include "llvm/Support/Dwarf.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/MathExtras.h" 52#include "llvm/Support/raw_ostream.h" 53#include "llvm/Target/TargetOptions.h" 54using namespace llvm; 55using namespace dwarf; 56 57STATISTIC(NumTailCalls, "Number of tail calls"); 58 59// Forward declarations. 60static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 61 SDValue V2); 62 63static SDValue Insert128BitVector(SDValue Result, 64 SDValue Vec, 65 SDValue Idx, 66 SelectionDAG &DAG, 67 DebugLoc dl); 68 69static SDValue Extract128BitVector(SDValue Vec, 70 SDValue Idx, 71 SelectionDAG &DAG, 72 DebugLoc dl); 73 74/// Generate a DAG to grab 128-bits from a vector > 128 bits. This 75/// sets things up to match to an AVX VEXTRACTF128 instruction or a 76/// simple subregister reference. Idx is an index in the 128 bits we 77/// want. It need not be aligned to a 128-bit bounday. That makes 78/// lowering EXTRACT_VECTOR_ELT operations easier. 79static SDValue Extract128BitVector(SDValue Vec, 80 SDValue Idx, 81 SelectionDAG &DAG, 82 DebugLoc dl) { 83 EVT VT = Vec.getValueType(); 84 assert(VT.getSizeInBits() == 256 && "Unexpected vector size!"); 85 EVT ElVT = VT.getVectorElementType(); 86 int Factor = VT.getSizeInBits()/128; 87 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 88 VT.getVectorNumElements()/Factor); 89 90 // Extract from UNDEF is UNDEF. 91 if (Vec.getOpcode() == ISD::UNDEF) 92 return DAG.getNode(ISD::UNDEF, dl, ResultVT); 93 94 if (isa<ConstantSDNode>(Idx)) { 95 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 96 97 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 98 // we can match to VEXTRACTF128. 99 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 100 101 // This is the index of the first element of the 128-bit chunk 102 // we want. 103 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 104 * ElemsPerChunk); 105 106 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 107 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 108 VecIdx); 109 110 return Result; 111 } 112 113 return SDValue(); 114} 115 116/// Generate a DAG to put 128-bits into a vector > 128 bits. This 117/// sets things up to match to an AVX VINSERTF128 instruction or a 118/// simple superregister reference. Idx is an index in the 128 bits 119/// we want. It need not be aligned to a 128-bit bounday. That makes 120/// lowering INSERT_VECTOR_ELT operations easier. 121static SDValue Insert128BitVector(SDValue Result, 122 SDValue Vec, 123 SDValue Idx, 124 SelectionDAG &DAG, 125 DebugLoc dl) { 126 if (isa<ConstantSDNode>(Idx)) { 127 EVT VT = Vec.getValueType(); 128 assert(VT.getSizeInBits() == 128 && "Unexpected vector size!"); 129 130 EVT ElVT = VT.getVectorElementType(); 131 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 132 EVT ResultVT = Result.getValueType(); 133 134 // Insert the relevant 128 bits. 135 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 136 137 // This is the index of the first element of the 128-bit chunk 138 // we want. 139 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 140 * ElemsPerChunk); 141 142 SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); 143 Result = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 144 VecIdx); 145 return Result; 146 } 147 148 return SDValue(); 149} 150 151static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 152 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 153 bool is64Bit = Subtarget->is64Bit(); 154 155 if (Subtarget->isTargetEnvMacho()) { 156 if (is64Bit) 157 return new X8664_MachoTargetObjectFile(); 158 return new TargetLoweringObjectFileMachO(); 159 } 160 161 if (Subtarget->isTargetELF()) 162 return new TargetLoweringObjectFileELF(); 163 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 164 return new TargetLoweringObjectFileCOFF(); 165 llvm_unreachable("unknown subtarget type"); 166} 167 168X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 169 : TargetLowering(TM, createTLOF(TM)) { 170 Subtarget = &TM.getSubtarget<X86Subtarget>(); 171 X86ScalarSSEf64 = Subtarget->hasSSE2(); 172 X86ScalarSSEf32 = Subtarget->hasSSE1(); 173 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 174 175 RegInfo = TM.getRegisterInfo(); 176 TD = getTargetData(); 177 178 // Set up the TargetLowering object. 179 static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 180 181 // X86 is weird, it always uses i8 for shift amounts and setcc results. 182 setBooleanContents(ZeroOrOneBooleanContent); 183 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 184 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 185 186 // For 64-bit since we have so many registers use the ILP scheduler, for 187 // 32-bit code use the register pressure specific scheduling. 188 if (Subtarget->is64Bit()) 189 setSchedulingPreference(Sched::ILP); 190 else 191 setSchedulingPreference(Sched::RegPressure); 192 setStackPointerRegisterToSaveRestore(X86StackPtr); 193 194 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 195 // Setup Windows compiler runtime calls. 196 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 197 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 198 setLibcallName(RTLIB::SREM_I64, "_allrem"); 199 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 200 setLibcallName(RTLIB::MUL_I64, "_allmul"); 201 setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2"); 202 setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2"); 203 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 204 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 205 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 206 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 207 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 208 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C); 209 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C); 210 } 211 212 if (Subtarget->isTargetDarwin()) { 213 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 214 setUseUnderscoreSetJmp(false); 215 setUseUnderscoreLongJmp(false); 216 } else if (Subtarget->isTargetMingw()) { 217 // MS runtime is weird: it exports _setjmp, but longjmp! 218 setUseUnderscoreSetJmp(true); 219 setUseUnderscoreLongJmp(false); 220 } else { 221 setUseUnderscoreSetJmp(true); 222 setUseUnderscoreLongJmp(true); 223 } 224 225 // Set up the register classes. 226 addRegisterClass(MVT::i8, X86::GR8RegisterClass); 227 addRegisterClass(MVT::i16, X86::GR16RegisterClass); 228 addRegisterClass(MVT::i32, X86::GR32RegisterClass); 229 if (Subtarget->is64Bit()) 230 addRegisterClass(MVT::i64, X86::GR64RegisterClass); 231 232 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 233 234 // We don't accept any truncstore of integer registers. 235 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 236 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 237 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 238 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 239 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 240 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 241 242 // SETOEQ and SETUNE require checking two conditions. 243 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 244 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 245 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 246 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 247 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 248 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 249 250 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 251 // operation. 252 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 253 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 254 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 255 256 if (Subtarget->is64Bit()) { 257 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 258 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 259 } else if (!TM.Options.UseSoftFloat) { 260 // We have an algorithm for SSE2->double, and we turn this into a 261 // 64-bit FILD followed by conditional FADD for other targets. 262 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 263 // We have an algorithm for SSE2, and we turn this into a 64-bit 264 // FILD for other targets. 265 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 266 } 267 268 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 269 // this operation. 270 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 271 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 272 273 if (!TM.Options.UseSoftFloat) { 274 // SSE has no i16 to fp conversion, only i32 275 if (X86ScalarSSEf32) { 276 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 277 // f32 and f64 cases are Legal, f80 case is not 278 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 279 } else { 280 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 281 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 282 } 283 } else { 284 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 285 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 286 } 287 288 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 289 // are Legal, f80 is custom lowered. 290 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 291 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 292 293 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 294 // this operation. 295 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 296 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 297 298 if (X86ScalarSSEf32) { 299 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 300 // f32 and f64 cases are Legal, f80 case is not 301 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 302 } else { 303 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 304 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 305 } 306 307 // Handle FP_TO_UINT by promoting the destination to a larger signed 308 // conversion. 309 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 310 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 311 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 312 313 if (Subtarget->is64Bit()) { 314 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 315 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 316 } else if (!TM.Options.UseSoftFloat) { 317 // Since AVX is a superset of SSE3, only check for SSE here. 318 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 319 // Expand FP_TO_UINT into a select. 320 // FIXME: We would like to use a Custom expander here eventually to do 321 // the optimal thing for SSE vs. the default expansion in the legalizer. 322 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 323 else 324 // With SSE3 we can use fisttpll to convert to a signed i64; without 325 // SSE, we're stuck with a fistpll. 326 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 327 } 328 329 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 330 if (!X86ScalarSSEf64) { 331 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 332 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 333 if (Subtarget->is64Bit()) { 334 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 335 // Without SSE, i64->f64 goes through memory. 336 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 337 } 338 } 339 340 // Scalar integer divide and remainder are lowered to use operations that 341 // produce two results, to match the available instructions. This exposes 342 // the two-result form to trivial CSE, which is able to combine x/y and x%y 343 // into a single instruction. 344 // 345 // Scalar integer multiply-high is also lowered to use two-result 346 // operations, to match the available instructions. However, plain multiply 347 // (low) operations are left as Legal, as there are single-result 348 // instructions for this in x86. Using the two-result multiply instructions 349 // when both high and low results are needed must be arranged by dagcombine. 350 for (unsigned i = 0, e = 4; i != e; ++i) { 351 MVT VT = IntVTs[i]; 352 setOperationAction(ISD::MULHS, VT, Expand); 353 setOperationAction(ISD::MULHU, VT, Expand); 354 setOperationAction(ISD::SDIV, VT, Expand); 355 setOperationAction(ISD::UDIV, VT, Expand); 356 setOperationAction(ISD::SREM, VT, Expand); 357 setOperationAction(ISD::UREM, VT, Expand); 358 359 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 360 setOperationAction(ISD::ADDC, VT, Custom); 361 setOperationAction(ISD::ADDE, VT, Custom); 362 setOperationAction(ISD::SUBC, VT, Custom); 363 setOperationAction(ISD::SUBE, VT, Custom); 364 } 365 366 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 367 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 368 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 369 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 370 if (Subtarget->is64Bit()) 371 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 372 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 373 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 374 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 375 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 376 setOperationAction(ISD::FREM , MVT::f32 , Expand); 377 setOperationAction(ISD::FREM , MVT::f64 , Expand); 378 setOperationAction(ISD::FREM , MVT::f80 , Expand); 379 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 380 381 // Promote the i8 variants and force them on up to i32 which has a shorter 382 // encoding. 383 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 384 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 385 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 386 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 387 if (Subtarget->hasBMI()) { 388 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 389 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 390 if (Subtarget->is64Bit()) 391 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 392 } else { 393 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 394 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 395 if (Subtarget->is64Bit()) 396 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 397 } 398 399 if (Subtarget->hasLZCNT()) { 400 // When promoting the i8 variants, force them to i32 for a shorter 401 // encoding. 402 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 403 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 404 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 405 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 406 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 407 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 408 if (Subtarget->is64Bit()) 409 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 410 } else { 411 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 412 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 413 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 414 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 415 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 416 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 417 if (Subtarget->is64Bit()) { 418 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 419 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 420 } 421 } 422 423 if (Subtarget->hasPOPCNT()) { 424 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 425 } else { 426 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 427 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 428 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 429 if (Subtarget->is64Bit()) 430 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 431 } 432 433 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 434 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 435 436 // These should be promoted to a larger select which is supported. 437 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 438 // X86 wants to expand cmov itself. 439 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 440 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 441 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 442 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 443 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 444 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 445 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 446 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 447 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 448 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 449 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 450 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 451 if (Subtarget->is64Bit()) { 452 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 453 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 454 } 455 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 456 457 // Darwin ABI issue. 458 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 459 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 460 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 461 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 462 if (Subtarget->is64Bit()) 463 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 464 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 465 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 466 if (Subtarget->is64Bit()) { 467 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 468 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 469 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 470 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 471 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 472 } 473 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 474 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 475 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 476 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 477 if (Subtarget->is64Bit()) { 478 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 479 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 480 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 481 } 482 483 if (Subtarget->hasSSE1()) 484 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 485 486 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 487 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 488 489 // On X86 and X86-64, atomic operations are lowered to locked instructions. 490 // Locked instructions, in turn, have implicit fence semantics (all memory 491 // operations are flushed before issuing the locked instruction, and they 492 // are not buffered), so we can fold away the common pattern of 493 // fence-atomic-fence. 494 setShouldFoldAtomicFences(true); 495 496 // Expand certain atomics 497 for (unsigned i = 0, e = 4; i != e; ++i) { 498 MVT VT = IntVTs[i]; 499 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 500 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 501 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 502 } 503 504 if (!Subtarget->is64Bit()) { 505 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 506 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 507 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 508 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 509 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 510 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 511 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 512 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 513 } 514 515 if (Subtarget->hasCmpxchg16b()) { 516 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 517 } 518 519 // FIXME - use subtarget debug flags 520 if (!Subtarget->isTargetDarwin() && 521 !Subtarget->isTargetELF() && 522 !Subtarget->isTargetCygMing()) { 523 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 524 } 525 526 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 527 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 528 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 529 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 530 if (Subtarget->is64Bit()) { 531 setExceptionPointerRegister(X86::RAX); 532 setExceptionSelectorRegister(X86::RDX); 533 } else { 534 setExceptionPointerRegister(X86::EAX); 535 setExceptionSelectorRegister(X86::EDX); 536 } 537 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 538 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 539 540 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 541 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 542 543 setOperationAction(ISD::TRAP, MVT::Other, Legal); 544 545 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 546 setOperationAction(ISD::VASTART , MVT::Other, Custom); 547 setOperationAction(ISD::VAEND , MVT::Other, Expand); 548 if (Subtarget->is64Bit()) { 549 setOperationAction(ISD::VAARG , MVT::Other, Custom); 550 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 551 } else { 552 setOperationAction(ISD::VAARG , MVT::Other, Expand); 553 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 554 } 555 556 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 557 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 558 559 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 560 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 561 MVT::i64 : MVT::i32, Custom); 562 else if (TM.Options.EnableSegmentedStacks) 563 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 564 MVT::i64 : MVT::i32, Custom); 565 else 566 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 567 MVT::i64 : MVT::i32, Expand); 568 569 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 570 // f32 and f64 use SSE. 571 // Set up the FP register classes. 572 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 573 addRegisterClass(MVT::f64, X86::FR64RegisterClass); 574 575 // Use ANDPD to simulate FABS. 576 setOperationAction(ISD::FABS , MVT::f64, Custom); 577 setOperationAction(ISD::FABS , MVT::f32, Custom); 578 579 // Use XORP to simulate FNEG. 580 setOperationAction(ISD::FNEG , MVT::f64, Custom); 581 setOperationAction(ISD::FNEG , MVT::f32, Custom); 582 583 // Use ANDPD and ORPD to simulate FCOPYSIGN. 584 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 585 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 586 587 // Lower this to FGETSIGNx86 plus an AND. 588 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 589 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 590 591 // We don't support sin/cos/fmod 592 setOperationAction(ISD::FSIN , MVT::f64, Expand); 593 setOperationAction(ISD::FCOS , MVT::f64, Expand); 594 setOperationAction(ISD::FSIN , MVT::f32, Expand); 595 setOperationAction(ISD::FCOS , MVT::f32, Expand); 596 597 // Expand FP immediates into loads from the stack, except for the special 598 // cases we handle. 599 addLegalFPImmediate(APFloat(+0.0)); // xorpd 600 addLegalFPImmediate(APFloat(+0.0f)); // xorps 601 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 602 // Use SSE for f32, x87 for f64. 603 // Set up the FP register classes. 604 addRegisterClass(MVT::f32, X86::FR32RegisterClass); 605 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 606 607 // Use ANDPS to simulate FABS. 608 setOperationAction(ISD::FABS , MVT::f32, Custom); 609 610 // Use XORP to simulate FNEG. 611 setOperationAction(ISD::FNEG , MVT::f32, Custom); 612 613 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 614 615 // Use ANDPS and ORPS to simulate FCOPYSIGN. 616 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 617 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 618 619 // We don't support sin/cos/fmod 620 setOperationAction(ISD::FSIN , MVT::f32, Expand); 621 setOperationAction(ISD::FCOS , MVT::f32, Expand); 622 623 // Special cases we handle for FP constants. 624 addLegalFPImmediate(APFloat(+0.0f)); // xorps 625 addLegalFPImmediate(APFloat(+0.0)); // FLD0 626 addLegalFPImmediate(APFloat(+1.0)); // FLD1 627 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 628 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 629 630 if (!TM.Options.UnsafeFPMath) { 631 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 632 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 633 } 634 } else if (!TM.Options.UseSoftFloat) { 635 // f32 and f64 in x87. 636 // Set up the FP register classes. 637 addRegisterClass(MVT::f64, X86::RFP64RegisterClass); 638 addRegisterClass(MVT::f32, X86::RFP32RegisterClass); 639 640 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 641 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 642 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 643 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 644 645 if (!TM.Options.UnsafeFPMath) { 646 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 647 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 648 } 649 addLegalFPImmediate(APFloat(+0.0)); // FLD0 650 addLegalFPImmediate(APFloat(+1.0)); // FLD1 651 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 652 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 653 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 654 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 655 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 656 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 657 } 658 659 // We don't support FMA. 660 setOperationAction(ISD::FMA, MVT::f64, Expand); 661 setOperationAction(ISD::FMA, MVT::f32, Expand); 662 663 // Long double always uses X87. 664 if (!TM.Options.UseSoftFloat) { 665 addRegisterClass(MVT::f80, X86::RFP80RegisterClass); 666 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 667 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 668 { 669 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 670 addLegalFPImmediate(TmpFlt); // FLD0 671 TmpFlt.changeSign(); 672 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 673 674 bool ignored; 675 APFloat TmpFlt2(+1.0); 676 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 677 &ignored); 678 addLegalFPImmediate(TmpFlt2); // FLD1 679 TmpFlt2.changeSign(); 680 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 681 } 682 683 if (!TM.Options.UnsafeFPMath) { 684 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 685 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 686 } 687 688 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 689 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 690 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 691 setOperationAction(ISD::FRINT, MVT::f80, Expand); 692 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 693 setOperationAction(ISD::FMA, MVT::f80, Expand); 694 } 695 696 // Always use a library call for pow. 697 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 698 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 699 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 700 701 setOperationAction(ISD::FLOG, MVT::f80, Expand); 702 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 703 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 704 setOperationAction(ISD::FEXP, MVT::f80, Expand); 705 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 706 707 // First set operation action for all vector types to either promote 708 // (for widening) or expand (for scalarization). Then we will selectively 709 // turn on ones that can be effectively codegen'd. 710 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 711 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 712 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 713 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 714 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 715 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 716 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 717 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 718 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 719 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 720 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 721 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 722 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 723 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 724 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 725 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 726 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 727 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 728 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 729 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 730 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 731 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 733 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 734 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 738 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 740 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 741 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 745 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 746 setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 747 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 748 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 749 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 750 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 751 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 752 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 753 setOperationAction(ISD::SETCC, (MVT::SimpleValueType)VT, Expand); 754 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 755 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 756 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 757 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 758 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 759 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 760 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 761 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 762 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 763 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 764 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 765 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 766 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 767 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 768 setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); 769 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 770 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 771 setTruncStoreAction((MVT::SimpleValueType)VT, 772 (MVT::SimpleValueType)InnerVT, Expand); 773 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 774 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 775 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 776 } 777 778 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 779 // with -msoft-float, disable use of MMX as well. 780 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 781 addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass); 782 // No operations on x86mmx supported, everything uses intrinsics. 783 } 784 785 // MMX-sized vectors (other than x86mmx) are expected to be expanded 786 // into smaller operations. 787 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 788 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 789 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 790 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 791 setOperationAction(ISD::AND, MVT::v8i8, Expand); 792 setOperationAction(ISD::AND, MVT::v4i16, Expand); 793 setOperationAction(ISD::AND, MVT::v2i32, Expand); 794 setOperationAction(ISD::AND, MVT::v1i64, Expand); 795 setOperationAction(ISD::OR, MVT::v8i8, Expand); 796 setOperationAction(ISD::OR, MVT::v4i16, Expand); 797 setOperationAction(ISD::OR, MVT::v2i32, Expand); 798 setOperationAction(ISD::OR, MVT::v1i64, Expand); 799 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 800 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 801 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 802 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 803 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 804 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 805 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 806 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 807 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 808 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 809 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 810 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 811 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 812 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 813 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 814 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 815 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 816 817 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 818 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); 819 820 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 821 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 822 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 823 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 824 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 825 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 826 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 827 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 828 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 829 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 830 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 831 setOperationAction(ISD::SETCC, MVT::v4f32, Custom); 832 } 833 834 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 835 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass); 836 837 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 838 // registers cannot be used even for integer operations. 839 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass); 840 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass); 841 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass); 842 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass); 843 844 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 845 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 846 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 847 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 848 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 849 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 850 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 851 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 852 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 853 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 854 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 855 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 856 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 857 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 858 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 859 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 860 861 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 862 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 863 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 864 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 865 866 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 867 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 868 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 869 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 870 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 871 872 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Custom); 873 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Custom); 874 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); 875 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); 876 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 877 878 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 879 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) { 880 EVT VT = (MVT::SimpleValueType)i; 881 // Do not attempt to custom lower non-power-of-2 vectors 882 if (!isPowerOf2_32(VT.getVectorNumElements())) 883 continue; 884 // Do not attempt to custom lower non-128-bit vectors 885 if (!VT.is128BitVector()) 886 continue; 887 setOperationAction(ISD::BUILD_VECTOR, 888 VT.getSimpleVT().SimpleTy, Custom); 889 setOperationAction(ISD::VECTOR_SHUFFLE, 890 VT.getSimpleVT().SimpleTy, Custom); 891 setOperationAction(ISD::EXTRACT_VECTOR_ELT, 892 VT.getSimpleVT().SimpleTy, Custom); 893 } 894 895 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 896 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 897 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 898 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 899 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 900 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 901 902 if (Subtarget->is64Bit()) { 903 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 904 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 905 } 906 907 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 908 for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) { 909 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 910 EVT VT = SVT; 911 912 // Do not attempt to promote non-128-bit vectors 913 if (!VT.is128BitVector()) 914 continue; 915 916 setOperationAction(ISD::AND, SVT, Promote); 917 AddPromotedToType (ISD::AND, SVT, MVT::v2i64); 918 setOperationAction(ISD::OR, SVT, Promote); 919 AddPromotedToType (ISD::OR, SVT, MVT::v2i64); 920 setOperationAction(ISD::XOR, SVT, Promote); 921 AddPromotedToType (ISD::XOR, SVT, MVT::v2i64); 922 setOperationAction(ISD::LOAD, SVT, Promote); 923 AddPromotedToType (ISD::LOAD, SVT, MVT::v2i64); 924 setOperationAction(ISD::SELECT, SVT, Promote); 925 AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64); 926 } 927 928 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 929 930 // Custom lower v2i64 and v2f64 selects. 931 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 932 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 933 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 934 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 935 936 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 937 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 938 } 939 940 if (Subtarget->hasSSE41()) { 941 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 942 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 943 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 944 setOperationAction(ISD::FRINT, MVT::f32, Legal); 945 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 946 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 947 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 948 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 949 setOperationAction(ISD::FRINT, MVT::f64, Legal); 950 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 951 952 // FIXME: Do we need to handle scalar-to-vector here? 953 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 954 955 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 956 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 957 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 958 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 959 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 960 961 // i8 and i16 vectors are custom , because the source register and source 962 // source memory operand types are not the same width. f32 vectors are 963 // custom since the immediate controlling the insert encodes additional 964 // information. 965 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 966 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 967 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 968 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 969 970 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 971 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 972 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 973 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 974 975 // FIXME: these should be Legal but thats only for the case where 976 // the index is constant. For now custom expand to deal with that. 977 if (Subtarget->is64Bit()) { 978 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 979 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 980 } 981 } 982 983 if (Subtarget->hasSSE2()) { 984 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 985 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 986 987 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 988 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 989 990 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 991 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 992 993 if (Subtarget->hasAVX2()) { 994 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 995 setOperationAction(ISD::SRL, MVT::v4i32, Legal); 996 997 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 998 setOperationAction(ISD::SHL, MVT::v4i32, Legal); 999 1000 setOperationAction(ISD::SRA, MVT::v4i32, Legal); 1001 } else { 1002 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1003 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1004 1005 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1006 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1007 1008 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1009 } 1010 } 1011 1012 if (Subtarget->hasSSE42()) 1013 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 1014 1015 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { 1016 addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); 1017 addRegisterClass(MVT::v16i16, X86::VR256RegisterClass); 1018 addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); 1019 addRegisterClass(MVT::v8f32, X86::VR256RegisterClass); 1020 addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); 1021 addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); 1022 1023 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1024 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1025 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1026 1027 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1028 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1029 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1030 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1031 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1032 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1033 1034 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1035 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1036 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1037 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1038 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1039 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1040 1041 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1042 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1043 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1044 1045 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f64, Custom); 1046 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i64, Custom); 1047 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 1048 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 1049 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i8, Custom); 1050 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i16, Custom); 1051 1052 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1053 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1054 1055 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1056 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1057 1058 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1059 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1060 1061 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1062 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1063 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1064 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1065 1066 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1067 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1068 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1069 1070 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1071 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1072 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1073 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1074 1075 if (Subtarget->hasAVX2()) { 1076 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1077 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1078 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1079 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1080 1081 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1082 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1083 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1084 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1085 1086 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1087 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1088 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1089 // Don't lower v32i8 because there is no 128-bit byte mul 1090 1091 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1092 1093 setOperationAction(ISD::SRL, MVT::v4i64, Legal); 1094 setOperationAction(ISD::SRL, MVT::v8i32, Legal); 1095 1096 setOperationAction(ISD::SHL, MVT::v4i64, Legal); 1097 setOperationAction(ISD::SHL, MVT::v8i32, Legal); 1098 1099 setOperationAction(ISD::SRA, MVT::v8i32, Legal); 1100 } else { 1101 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1102 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1103 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1104 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1105 1106 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1107 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1108 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1109 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1110 1111 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1112 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1113 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1114 // Don't lower v32i8 because there is no 128-bit byte mul 1115 1116 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1117 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1118 1119 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1120 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1121 1122 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1123 } 1124 1125 // Custom lower several nodes for 256-bit types. 1126 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1127 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1128 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 1129 EVT VT = SVT; 1130 1131 // Extract subvector is special because the value type 1132 // (result) is 128-bit but the source is 256-bit wide. 1133 if (VT.is128BitVector()) 1134 setOperationAction(ISD::EXTRACT_SUBVECTOR, SVT, Custom); 1135 1136 // Do not attempt to custom lower other non-256-bit vectors 1137 if (!VT.is256BitVector()) 1138 continue; 1139 1140 setOperationAction(ISD::BUILD_VECTOR, SVT, Custom); 1141 setOperationAction(ISD::VECTOR_SHUFFLE, SVT, Custom); 1142 setOperationAction(ISD::INSERT_VECTOR_ELT, SVT, Custom); 1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, SVT, Custom); 1144 setOperationAction(ISD::SCALAR_TO_VECTOR, SVT, Custom); 1145 setOperationAction(ISD::INSERT_SUBVECTOR, SVT, Custom); 1146 } 1147 1148 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1149 for (unsigned i = (unsigned)MVT::v32i8; i != (unsigned)MVT::v4i64; ++i) { 1150 MVT::SimpleValueType SVT = (MVT::SimpleValueType)i; 1151 EVT VT = SVT; 1152 1153 // Do not attempt to promote non-256-bit vectors 1154 if (!VT.is256BitVector()) 1155 continue; 1156 1157 setOperationAction(ISD::AND, SVT, Promote); 1158 AddPromotedToType (ISD::AND, SVT, MVT::v4i64); 1159 setOperationAction(ISD::OR, SVT, Promote); 1160 AddPromotedToType (ISD::OR, SVT, MVT::v4i64); 1161 setOperationAction(ISD::XOR, SVT, Promote); 1162 AddPromotedToType (ISD::XOR, SVT, MVT::v4i64); 1163 setOperationAction(ISD::LOAD, SVT, Promote); 1164 AddPromotedToType (ISD::LOAD, SVT, MVT::v4i64); 1165 setOperationAction(ISD::SELECT, SVT, Promote); 1166 AddPromotedToType (ISD::SELECT, SVT, MVT::v4i64); 1167 } 1168 } 1169 1170 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1171 // of this type with custom code. 1172 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 1173 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT++) { 1174 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1175 Custom); 1176 } 1177 1178 // We want to custom lower some of our intrinsics. 1179 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1180 1181 1182 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1183 // handle type legalization for these operations here. 1184 // 1185 // FIXME: We really should do custom legalization for addition and 1186 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1187 // than generic legalization for 64-bit multiplication-with-overflow, though. 1188 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1189 // Add/Sub/Mul with overflow operations are custom lowered. 1190 MVT VT = IntVTs[i]; 1191 setOperationAction(ISD::SADDO, VT, Custom); 1192 setOperationAction(ISD::UADDO, VT, Custom); 1193 setOperationAction(ISD::SSUBO, VT, Custom); 1194 setOperationAction(ISD::USUBO, VT, Custom); 1195 setOperationAction(ISD::SMULO, VT, Custom); 1196 setOperationAction(ISD::UMULO, VT, Custom); 1197 } 1198 1199 // There are no 8-bit 3-address imul/mul instructions 1200 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1201 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1202 1203 if (!Subtarget->is64Bit()) { 1204 // These libcalls are not available in 32-bit. 1205 setLibcallName(RTLIB::SHL_I128, 0); 1206 setLibcallName(RTLIB::SRL_I128, 0); 1207 setLibcallName(RTLIB::SRA_I128, 0); 1208 } 1209 1210 // We have target-specific dag combine patterns for the following nodes: 1211 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1212 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1213 setTargetDAGCombine(ISD::VSELECT); 1214 setTargetDAGCombine(ISD::SELECT); 1215 setTargetDAGCombine(ISD::SHL); 1216 setTargetDAGCombine(ISD::SRA); 1217 setTargetDAGCombine(ISD::SRL); 1218 setTargetDAGCombine(ISD::OR); 1219 setTargetDAGCombine(ISD::AND); 1220 setTargetDAGCombine(ISD::ADD); 1221 setTargetDAGCombine(ISD::FADD); 1222 setTargetDAGCombine(ISD::FSUB); 1223 setTargetDAGCombine(ISD::SUB); 1224 setTargetDAGCombine(ISD::LOAD); 1225 setTargetDAGCombine(ISD::STORE); 1226 setTargetDAGCombine(ISD::ZERO_EXTEND); 1227 setTargetDAGCombine(ISD::SINT_TO_FP); 1228 if (Subtarget->is64Bit()) 1229 setTargetDAGCombine(ISD::MUL); 1230 if (Subtarget->hasBMI()) 1231 setTargetDAGCombine(ISD::XOR); 1232 1233 computeRegisterProperties(); 1234 1235 // On Darwin, -Os means optimize for size without hurting performance, 1236 // do not reduce the limit. 1237 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1238 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1239 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1240 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1241 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1242 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1243 setPrefLoopAlignment(4); // 2^4 bytes. 1244 benefitFromCodePlacementOpt = true; 1245 1246 setPrefFunctionAlignment(4); // 2^4 bytes. 1247} 1248 1249 1250EVT X86TargetLowering::getSetCCResultType(EVT VT) const { 1251 if (!VT.isVector()) return MVT::i8; 1252 return VT.changeVectorElementTypeToInteger(); 1253} 1254 1255 1256/// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1257/// the desired ByVal argument alignment. 1258static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1259 if (MaxAlign == 16) 1260 return; 1261 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1262 if (VTy->getBitWidth() == 128) 1263 MaxAlign = 16; 1264 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1265 unsigned EltAlign = 0; 1266 getMaxByValAlign(ATy->getElementType(), EltAlign); 1267 if (EltAlign > MaxAlign) 1268 MaxAlign = EltAlign; 1269 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1270 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1271 unsigned EltAlign = 0; 1272 getMaxByValAlign(STy->getElementType(i), EltAlign); 1273 if (EltAlign > MaxAlign) 1274 MaxAlign = EltAlign; 1275 if (MaxAlign == 16) 1276 break; 1277 } 1278 } 1279 return; 1280} 1281 1282/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1283/// function arguments in the caller parameter area. For X86, aggregates 1284/// that contain SSE vectors are placed at 16-byte boundaries while the rest 1285/// are at 4-byte boundaries. 1286unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1287 if (Subtarget->is64Bit()) { 1288 // Max of 8 and alignment of type. 1289 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1290 if (TyAlign > 8) 1291 return TyAlign; 1292 return 8; 1293 } 1294 1295 unsigned Align = 4; 1296 if (Subtarget->hasSSE1()) 1297 getMaxByValAlign(Ty, Align); 1298 return Align; 1299} 1300 1301/// getOptimalMemOpType - Returns the target specific optimal type for load 1302/// and store operations as a result of memset, memcpy, and memmove 1303/// lowering. If DstAlign is zero that means it's safe to destination 1304/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1305/// means there isn't a need to check it against alignment requirement, 1306/// probably because the source does not need to be loaded. If 1307/// 'IsZeroVal' is true, that means it's safe to return a 1308/// non-scalar-integer type, e.g. empty string source, constant, or loaded 1309/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1310/// constant so it does not need to be loaded. 1311/// It returns EVT::Other if the type should be determined using generic 1312/// target-independent logic. 1313EVT 1314X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1315 unsigned DstAlign, unsigned SrcAlign, 1316 bool IsZeroVal, 1317 bool MemcpyStrSrc, 1318 MachineFunction &MF) const { 1319 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1320 // linux. This is because the stack realignment code can't handle certain 1321 // cases like PR2962. This should be removed when PR2962 is fixed. 1322 const Function *F = MF.getFunction(); 1323 if (IsZeroVal && 1324 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1325 if (Size >= 16 && 1326 (Subtarget->isUnalignedMemAccessFast() || 1327 ((DstAlign == 0 || DstAlign >= 16) && 1328 (SrcAlign == 0 || SrcAlign >= 16))) && 1329 Subtarget->getStackAlignment() >= 16) { 1330 if (Subtarget->hasAVX() && 1331 Subtarget->getStackAlignment() >= 32) 1332 return MVT::v8f32; 1333 if (Subtarget->hasSSE2()) 1334 return MVT::v4i32; 1335 if (Subtarget->hasSSE1()) 1336 return MVT::v4f32; 1337 } else if (!MemcpyStrSrc && Size >= 8 && 1338 !Subtarget->is64Bit() && 1339 Subtarget->getStackAlignment() >= 8 && 1340 Subtarget->hasSSE2()) { 1341 // Do not use f64 to lower memcpy if source is string constant. It's 1342 // better to use i32 to avoid the loads. 1343 return MVT::f64; 1344 } 1345 } 1346 if (Subtarget->is64Bit() && Size >= 8) 1347 return MVT::i64; 1348 return MVT::i32; 1349} 1350 1351/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1352/// current function. The returned value is a member of the 1353/// MachineJumpTableInfo::JTEntryKind enum. 1354unsigned X86TargetLowering::getJumpTableEncoding() const { 1355 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1356 // symbol. 1357 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1358 Subtarget->isPICStyleGOT()) 1359 return MachineJumpTableInfo::EK_Custom32; 1360 1361 // Otherwise, use the normal jump table encoding heuristics. 1362 return TargetLowering::getJumpTableEncoding(); 1363} 1364 1365const MCExpr * 1366X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1367 const MachineBasicBlock *MBB, 1368 unsigned uid,MCContext &Ctx) const{ 1369 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1370 Subtarget->isPICStyleGOT()); 1371 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1372 // entries. 1373 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1374 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1375} 1376 1377/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1378/// jumptable. 1379SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1380 SelectionDAG &DAG) const { 1381 if (!Subtarget->is64Bit()) 1382 // This doesn't have DebugLoc associated with it, but is not really the 1383 // same as a Register. 1384 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1385 return Table; 1386} 1387 1388/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1389/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1390/// MCExpr. 1391const MCExpr *X86TargetLowering:: 1392getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1393 MCContext &Ctx) const { 1394 // X86-64 uses RIP relative addressing based on the jump table label. 1395 if (Subtarget->isPICStyleRIPRel()) 1396 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1397 1398 // Otherwise, the reference is relative to the PIC base. 1399 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1400} 1401 1402// FIXME: Why this routine is here? Move to RegInfo! 1403std::pair<const TargetRegisterClass*, uint8_t> 1404X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1405 const TargetRegisterClass *RRC = 0; 1406 uint8_t Cost = 1; 1407 switch (VT.getSimpleVT().SimpleTy) { 1408 default: 1409 return TargetLowering::findRepresentativeClass(VT); 1410 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1411 RRC = (Subtarget->is64Bit() 1412 ? X86::GR64RegisterClass : X86::GR32RegisterClass); 1413 break; 1414 case MVT::x86mmx: 1415 RRC = X86::VR64RegisterClass; 1416 break; 1417 case MVT::f32: case MVT::f64: 1418 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1419 case MVT::v4f32: case MVT::v2f64: 1420 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1421 case MVT::v4f64: 1422 RRC = X86::VR128RegisterClass; 1423 break; 1424 } 1425 return std::make_pair(RRC, Cost); 1426} 1427 1428bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1429 unsigned &Offset) const { 1430 if (!Subtarget->isTargetLinux()) 1431 return false; 1432 1433 if (Subtarget->is64Bit()) { 1434 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1435 Offset = 0x28; 1436 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1437 AddressSpace = 256; 1438 else 1439 AddressSpace = 257; 1440 } else { 1441 // %gs:0x14 on i386 1442 Offset = 0x14; 1443 AddressSpace = 256; 1444 } 1445 return true; 1446} 1447 1448 1449//===----------------------------------------------------------------------===// 1450// Return Value Calling Convention Implementation 1451//===----------------------------------------------------------------------===// 1452 1453#include "X86GenCallingConv.inc" 1454 1455bool 1456X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1457 MachineFunction &MF, bool isVarArg, 1458 const SmallVectorImpl<ISD::OutputArg> &Outs, 1459 LLVMContext &Context) const { 1460 SmallVector<CCValAssign, 16> RVLocs; 1461 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1462 RVLocs, Context); 1463 return CCInfo.CheckReturn(Outs, RetCC_X86); 1464} 1465 1466SDValue 1467X86TargetLowering::LowerReturn(SDValue Chain, 1468 CallingConv::ID CallConv, bool isVarArg, 1469 const SmallVectorImpl<ISD::OutputArg> &Outs, 1470 const SmallVectorImpl<SDValue> &OutVals, 1471 DebugLoc dl, SelectionDAG &DAG) const { 1472 MachineFunction &MF = DAG.getMachineFunction(); 1473 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1474 1475 SmallVector<CCValAssign, 16> RVLocs; 1476 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1477 RVLocs, *DAG.getContext()); 1478 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1479 1480 // Add the regs to the liveout set for the function. 1481 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1482 for (unsigned i = 0; i != RVLocs.size(); ++i) 1483 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1484 MRI.addLiveOut(RVLocs[i].getLocReg()); 1485 1486 SDValue Flag; 1487 1488 SmallVector<SDValue, 6> RetOps; 1489 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1490 // Operand #1 = Bytes To Pop 1491 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1492 MVT::i16)); 1493 1494 // Copy the result values into the output registers. 1495 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1496 CCValAssign &VA = RVLocs[i]; 1497 assert(VA.isRegLoc() && "Can only return in registers!"); 1498 SDValue ValToCopy = OutVals[i]; 1499 EVT ValVT = ValToCopy.getValueType(); 1500 1501 // If this is x86-64, and we disabled SSE, we can't return FP values, 1502 // or SSE or MMX vectors. 1503 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1504 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1505 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1506 report_fatal_error("SSE register return with SSE disabled"); 1507 } 1508 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1509 // llvm-gcc has never done it right and no one has noticed, so this 1510 // should be OK for now. 1511 if (ValVT == MVT::f64 && 1512 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1513 report_fatal_error("SSE2 register return with SSE2 disabled"); 1514 1515 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1516 // the RET instruction and handled by the FP Stackifier. 1517 if (VA.getLocReg() == X86::ST0 || 1518 VA.getLocReg() == X86::ST1) { 1519 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1520 // change the value to the FP stack register class. 1521 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1522 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1523 RetOps.push_back(ValToCopy); 1524 // Don't emit a copytoreg. 1525 continue; 1526 } 1527 1528 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1529 // which is returned in RAX / RDX. 1530 if (Subtarget->is64Bit()) { 1531 if (ValVT == MVT::x86mmx) { 1532 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1533 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1534 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1535 ValToCopy); 1536 // If we don't have SSE2 available, convert to v4f32 so the generated 1537 // register is legal. 1538 if (!Subtarget->hasSSE2()) 1539 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1540 } 1541 } 1542 } 1543 1544 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1545 Flag = Chain.getValue(1); 1546 } 1547 1548 // The x86-64 ABI for returning structs by value requires that we copy 1549 // the sret argument into %rax for the return. We saved the argument into 1550 // a virtual register in the entry block, so now we copy the value out 1551 // and into %rax. 1552 if (Subtarget->is64Bit() && 1553 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1554 MachineFunction &MF = DAG.getMachineFunction(); 1555 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1556 unsigned Reg = FuncInfo->getSRetReturnReg(); 1557 assert(Reg && 1558 "SRetReturnReg should have been set in LowerFormalArguments()."); 1559 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1560 1561 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1562 Flag = Chain.getValue(1); 1563 1564 // RAX now acts like a return value. 1565 MRI.addLiveOut(X86::RAX); 1566 } 1567 1568 RetOps[0] = Chain; // Update chain. 1569 1570 // Add the flag if we have it. 1571 if (Flag.getNode()) 1572 RetOps.push_back(Flag); 1573 1574 return DAG.getNode(X86ISD::RET_FLAG, dl, 1575 MVT::Other, &RetOps[0], RetOps.size()); 1576} 1577 1578bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const { 1579 if (N->getNumValues() != 1) 1580 return false; 1581 if (!N->hasNUsesOfValue(1, 0)) 1582 return false; 1583 1584 SDNode *Copy = *N->use_begin(); 1585 if (Copy->getOpcode() != ISD::CopyToReg && 1586 Copy->getOpcode() != ISD::FP_EXTEND) 1587 return false; 1588 1589 bool HasRet = false; 1590 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1591 UI != UE; ++UI) { 1592 if (UI->getOpcode() != X86ISD::RET_FLAG) 1593 return false; 1594 HasRet = true; 1595 } 1596 1597 return HasRet; 1598} 1599 1600EVT 1601X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1602 ISD::NodeType ExtendKind) const { 1603 MVT ReturnMVT; 1604 // TODO: Is this also valid on 32-bit? 1605 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1606 ReturnMVT = MVT::i8; 1607 else 1608 ReturnMVT = MVT::i32; 1609 1610 EVT MinVT = getRegisterType(Context, ReturnMVT); 1611 return VT.bitsLT(MinVT) ? MinVT : VT; 1612} 1613 1614/// LowerCallResult - Lower the result values of a call into the 1615/// appropriate copies out of appropriate physical registers. 1616/// 1617SDValue 1618X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1619 CallingConv::ID CallConv, bool isVarArg, 1620 const SmallVectorImpl<ISD::InputArg> &Ins, 1621 DebugLoc dl, SelectionDAG &DAG, 1622 SmallVectorImpl<SDValue> &InVals) const { 1623 1624 // Assign locations to each value returned by this call. 1625 SmallVector<CCValAssign, 16> RVLocs; 1626 bool Is64Bit = Subtarget->is64Bit(); 1627 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1628 getTargetMachine(), RVLocs, *DAG.getContext()); 1629 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1630 1631 // Copy all of the result registers out of their specified physreg. 1632 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1633 CCValAssign &VA = RVLocs[i]; 1634 EVT CopyVT = VA.getValVT(); 1635 1636 // If this is x86-64, and we disabled SSE, we can't return FP values 1637 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1638 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1639 report_fatal_error("SSE register return with SSE disabled"); 1640 } 1641 1642 SDValue Val; 1643 1644 // If this is a call to a function that returns an fp value on the floating 1645 // point stack, we must guarantee the the value is popped from the stack, so 1646 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1647 // if the return value is not used. We use the FpPOP_RETVAL instruction 1648 // instead. 1649 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1650 // If we prefer to use the value in xmm registers, copy it out as f80 and 1651 // use a truncate to move it from fp stack reg to xmm reg. 1652 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1653 SDValue Ops[] = { Chain, InFlag }; 1654 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1655 MVT::Other, MVT::Glue, Ops, 2), 1); 1656 Val = Chain.getValue(0); 1657 1658 // Round the f80 to the right size, which also moves it to the appropriate 1659 // xmm register. 1660 if (CopyVT != VA.getValVT()) 1661 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1662 // This truncation won't change the value. 1663 DAG.getIntPtrConstant(1)); 1664 } else { 1665 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1666 CopyVT, InFlag).getValue(1); 1667 Val = Chain.getValue(0); 1668 } 1669 InFlag = Chain.getValue(2); 1670 InVals.push_back(Val); 1671 } 1672 1673 return Chain; 1674} 1675 1676 1677//===----------------------------------------------------------------------===// 1678// C & StdCall & Fast Calling Convention implementation 1679//===----------------------------------------------------------------------===// 1680// StdCall calling convention seems to be standard for many Windows' API 1681// routines and around. It differs from C calling convention just a little: 1682// callee should clean up the stack, not caller. Symbols should be also 1683// decorated in some fancy way :) It doesn't support any vector arguments. 1684// For info on fast calling convention see Fast Calling Convention (tail call) 1685// implementation LowerX86_32FastCCCallTo. 1686 1687/// CallIsStructReturn - Determines whether a call uses struct return 1688/// semantics. 1689static bool CallIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1690 if (Outs.empty()) 1691 return false; 1692 1693 return Outs[0].Flags.isSRet(); 1694} 1695 1696/// ArgsAreStructReturn - Determines whether a function uses struct 1697/// return semantics. 1698static bool 1699ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1700 if (Ins.empty()) 1701 return false; 1702 1703 return Ins[0].Flags.isSRet(); 1704} 1705 1706/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1707/// by "Src" to address "Dst" with size and alignment information specified by 1708/// the specific parameter attribute. The copy will be passed as a byval 1709/// function parameter. 1710static SDValue 1711CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1712 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1713 DebugLoc dl) { 1714 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1715 1716 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1717 /*isVolatile*/false, /*AlwaysInline=*/true, 1718 MachinePointerInfo(), MachinePointerInfo()); 1719} 1720 1721/// IsTailCallConvention - Return true if the calling convention is one that 1722/// supports tail call optimization. 1723static bool IsTailCallConvention(CallingConv::ID CC) { 1724 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1725} 1726 1727bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1728 if (!CI->isTailCall()) 1729 return false; 1730 1731 CallSite CS(CI); 1732 CallingConv::ID CalleeCC = CS.getCallingConv(); 1733 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1734 return false; 1735 1736 return true; 1737} 1738 1739/// FuncIsMadeTailCallSafe - Return true if the function is being made into 1740/// a tailcall target by changing its ABI. 1741static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1742 bool GuaranteedTailCallOpt) { 1743 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1744} 1745 1746SDValue 1747X86TargetLowering::LowerMemArgument(SDValue Chain, 1748 CallingConv::ID CallConv, 1749 const SmallVectorImpl<ISD::InputArg> &Ins, 1750 DebugLoc dl, SelectionDAG &DAG, 1751 const CCValAssign &VA, 1752 MachineFrameInfo *MFI, 1753 unsigned i) const { 1754 // Create the nodes corresponding to a load from this parameter slot. 1755 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1756 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1757 getTargetMachine().Options.GuaranteedTailCallOpt); 1758 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1759 EVT ValVT; 1760 1761 // If value is passed by pointer we have address passed instead of the value 1762 // itself. 1763 if (VA.getLocInfo() == CCValAssign::Indirect) 1764 ValVT = VA.getLocVT(); 1765 else 1766 ValVT = VA.getValVT(); 1767 1768 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1769 // changed with more analysis. 1770 // In case of tail call optimization mark all arguments mutable. Since they 1771 // could be overwritten by lowering of arguments in case of a tail call. 1772 if (Flags.isByVal()) { 1773 unsigned Bytes = Flags.getByValSize(); 1774 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1775 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1776 return DAG.getFrameIndex(FI, getPointerTy()); 1777 } else { 1778 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1779 VA.getLocMemOffset(), isImmutable); 1780 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1781 return DAG.getLoad(ValVT, dl, Chain, FIN, 1782 MachinePointerInfo::getFixedStack(FI), 1783 false, false, false, 0); 1784 } 1785} 1786 1787SDValue 1788X86TargetLowering::LowerFormalArguments(SDValue Chain, 1789 CallingConv::ID CallConv, 1790 bool isVarArg, 1791 const SmallVectorImpl<ISD::InputArg> &Ins, 1792 DebugLoc dl, 1793 SelectionDAG &DAG, 1794 SmallVectorImpl<SDValue> &InVals) 1795 const { 1796 MachineFunction &MF = DAG.getMachineFunction(); 1797 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1798 1799 const Function* Fn = MF.getFunction(); 1800 if (Fn->hasExternalLinkage() && 1801 Subtarget->isTargetCygMing() && 1802 Fn->getName() == "main") 1803 FuncInfo->setForceFramePointer(true); 1804 1805 MachineFrameInfo *MFI = MF.getFrameInfo(); 1806 bool Is64Bit = Subtarget->is64Bit(); 1807 bool IsWin64 = Subtarget->isTargetWin64(); 1808 1809 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1810 "Var args not supported with calling convention fastcc or ghc"); 1811 1812 // Assign locations to all of the incoming arguments. 1813 SmallVector<CCValAssign, 16> ArgLocs; 1814 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1815 ArgLocs, *DAG.getContext()); 1816 1817 // Allocate shadow area for Win64 1818 if (IsWin64) { 1819 CCInfo.AllocateStack(32, 8); 1820 } 1821 1822 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1823 1824 unsigned LastVal = ~0U; 1825 SDValue ArgValue; 1826 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1827 CCValAssign &VA = ArgLocs[i]; 1828 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1829 // places. 1830 assert(VA.getValNo() != LastVal && 1831 "Don't support value assigned to multiple locs yet"); 1832 (void)LastVal; 1833 LastVal = VA.getValNo(); 1834 1835 if (VA.isRegLoc()) { 1836 EVT RegVT = VA.getLocVT(); 1837 TargetRegisterClass *RC = NULL; 1838 if (RegVT == MVT::i32) 1839 RC = X86::GR32RegisterClass; 1840 else if (Is64Bit && RegVT == MVT::i64) 1841 RC = X86::GR64RegisterClass; 1842 else if (RegVT == MVT::f32) 1843 RC = X86::FR32RegisterClass; 1844 else if (RegVT == MVT::f64) 1845 RC = X86::FR64RegisterClass; 1846 else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) 1847 RC = X86::VR256RegisterClass; 1848 else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) 1849 RC = X86::VR128RegisterClass; 1850 else if (RegVT == MVT::x86mmx) 1851 RC = X86::VR64RegisterClass; 1852 else 1853 llvm_unreachable("Unknown argument type!"); 1854 1855 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1856 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1857 1858 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1859 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1860 // right size. 1861 if (VA.getLocInfo() == CCValAssign::SExt) 1862 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1863 DAG.getValueType(VA.getValVT())); 1864 else if (VA.getLocInfo() == CCValAssign::ZExt) 1865 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1866 DAG.getValueType(VA.getValVT())); 1867 else if (VA.getLocInfo() == CCValAssign::BCvt) 1868 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1869 1870 if (VA.isExtInLoc()) { 1871 // Handle MMX values passed in XMM regs. 1872 if (RegVT.isVector()) { 1873 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1874 ArgValue); 1875 } else 1876 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1877 } 1878 } else { 1879 assert(VA.isMemLoc()); 1880 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1881 } 1882 1883 // If value is passed via pointer - do a load. 1884 if (VA.getLocInfo() == CCValAssign::Indirect) 1885 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1886 MachinePointerInfo(), false, false, false, 0); 1887 1888 InVals.push_back(ArgValue); 1889 } 1890 1891 // The x86-64 ABI for returning structs by value requires that we copy 1892 // the sret argument into %rax for the return. Save the argument into 1893 // a virtual register so that we can access it from the return points. 1894 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1895 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1896 unsigned Reg = FuncInfo->getSRetReturnReg(); 1897 if (!Reg) { 1898 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1899 FuncInfo->setSRetReturnReg(Reg); 1900 } 1901 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1902 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1903 } 1904 1905 unsigned StackSize = CCInfo.getNextStackOffset(); 1906 // Align stack specially for tail calls. 1907 if (FuncIsMadeTailCallSafe(CallConv, 1908 MF.getTarget().Options.GuaranteedTailCallOpt)) 1909 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1910 1911 // If the function takes variable number of arguments, make a frame index for 1912 // the start of the first vararg value... for expansion of llvm.va_start. 1913 if (isVarArg) { 1914 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1915 CallConv != CallingConv::X86_ThisCall)) { 1916 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1917 } 1918 if (Is64Bit) { 1919 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1920 1921 // FIXME: We should really autogenerate these arrays 1922 static const unsigned GPR64ArgRegsWin64[] = { 1923 X86::RCX, X86::RDX, X86::R8, X86::R9 1924 }; 1925 static const unsigned GPR64ArgRegs64Bit[] = { 1926 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1927 }; 1928 static const unsigned XMMArgRegs64Bit[] = { 1929 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1930 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1931 }; 1932 const unsigned *GPR64ArgRegs; 1933 unsigned NumXMMRegs = 0; 1934 1935 if (IsWin64) { 1936 // The XMM registers which might contain var arg parameters are shadowed 1937 // in their paired GPR. So we only need to save the GPR to their home 1938 // slots. 1939 TotalNumIntRegs = 4; 1940 GPR64ArgRegs = GPR64ArgRegsWin64; 1941 } else { 1942 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1943 GPR64ArgRegs = GPR64ArgRegs64Bit; 1944 1945 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 1946 TotalNumXMMRegs); 1947 } 1948 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 1949 TotalNumIntRegs); 1950 1951 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 1952 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 1953 "SSE register cannot be used when SSE is disabled!"); 1954 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 1955 NoImplicitFloatOps) && 1956 "SSE register cannot be used when SSE is disabled!"); 1957 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 1958 !Subtarget->hasSSE1()) 1959 // Kernel mode asks for SSE to be disabled, so don't push them 1960 // on the stack. 1961 TotalNumXMMRegs = 0; 1962 1963 if (IsWin64) { 1964 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 1965 // Get to the caller-allocated home save location. Add 8 to account 1966 // for the return address. 1967 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 1968 FuncInfo->setRegSaveFrameIndex( 1969 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 1970 // Fixup to set vararg frame on shadow area (4 x i64). 1971 if (NumIntRegs < 4) 1972 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 1973 } else { 1974 // For X86-64, if there are vararg parameters that are passed via 1975 // registers, then we must store them to their spots on the stack so 1976 // they may be loaded by deferencing the result of va_next. 1977 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 1978 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 1979 FuncInfo->setRegSaveFrameIndex( 1980 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 1981 false)); 1982 } 1983 1984 // Store the integer parameter registers. 1985 SmallVector<SDValue, 8> MemOps; 1986 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 1987 getPointerTy()); 1988 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 1989 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 1990 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 1991 DAG.getIntPtrConstant(Offset)); 1992 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 1993 X86::GR64RegisterClass); 1994 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1995 SDValue Store = 1996 DAG.getStore(Val.getValue(1), dl, Val, FIN, 1997 MachinePointerInfo::getFixedStack( 1998 FuncInfo->getRegSaveFrameIndex(), Offset), 1999 false, false, 0); 2000 MemOps.push_back(Store); 2001 Offset += 8; 2002 } 2003 2004 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2005 // Now store the XMM (fp + vector) parameter registers. 2006 SmallVector<SDValue, 11> SaveXMMOps; 2007 SaveXMMOps.push_back(Chain); 2008 2009 unsigned AL = MF.addLiveIn(X86::AL, X86::GR8RegisterClass); 2010 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2011 SaveXMMOps.push_back(ALVal); 2012 2013 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2014 FuncInfo->getRegSaveFrameIndex())); 2015 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2016 FuncInfo->getVarArgsFPOffset())); 2017 2018 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2019 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2020 X86::VR128RegisterClass); 2021 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2022 SaveXMMOps.push_back(Val); 2023 } 2024 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2025 MVT::Other, 2026 &SaveXMMOps[0], SaveXMMOps.size())); 2027 } 2028 2029 if (!MemOps.empty()) 2030 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2031 &MemOps[0], MemOps.size()); 2032 } 2033 } 2034 2035 // Some CCs need callee pop. 2036 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2037 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2038 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2039 } else { 2040 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2041 // If this is an sret function, the return should pop the hidden pointer. 2042 if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins)) 2043 FuncInfo->setBytesToPopOnReturn(4); 2044 } 2045 2046 if (!Is64Bit) { 2047 // RegSaveFrameIndex is X86-64 only. 2048 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2049 if (CallConv == CallingConv::X86_FastCall || 2050 CallConv == CallingConv::X86_ThisCall) 2051 // fastcc functions can't have varargs. 2052 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2053 } 2054 2055 FuncInfo->setArgumentStackSize(StackSize); 2056 2057 return Chain; 2058} 2059 2060SDValue 2061X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2062 SDValue StackPtr, SDValue Arg, 2063 DebugLoc dl, SelectionDAG &DAG, 2064 const CCValAssign &VA, 2065 ISD::ArgFlagsTy Flags) const { 2066 unsigned LocMemOffset = VA.getLocMemOffset(); 2067 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2068 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2069 if (Flags.isByVal()) 2070 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2071 2072 return DAG.getStore(Chain, dl, Arg, PtrOff, 2073 MachinePointerInfo::getStack(LocMemOffset), 2074 false, false, 0); 2075} 2076 2077/// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2078/// optimization is performed and it is required. 2079SDValue 2080X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2081 SDValue &OutRetAddr, SDValue Chain, 2082 bool IsTailCall, bool Is64Bit, 2083 int FPDiff, DebugLoc dl) const { 2084 // Adjust the Return address stack slot. 2085 EVT VT = getPointerTy(); 2086 OutRetAddr = getReturnAddressFrameIndex(DAG); 2087 2088 // Load the "old" Return address. 2089 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2090 false, false, false, 0); 2091 return SDValue(OutRetAddr.getNode(), 1); 2092} 2093 2094/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2095/// optimization is performed and it is required (FPDiff!=0). 2096static SDValue 2097EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2098 SDValue Chain, SDValue RetAddrFrIdx, 2099 bool Is64Bit, int FPDiff, DebugLoc dl) { 2100 // Store the return address to the appropriate stack slot. 2101 if (!FPDiff) return Chain; 2102 // Calculate the new stack slot for the return address. 2103 int SlotSize = Is64Bit ? 8 : 4; 2104 int NewReturnAddrFI = 2105 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2106 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2107 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 2108 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2109 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2110 false, false, 0); 2111 return Chain; 2112} 2113 2114SDValue 2115X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, 2116 CallingConv::ID CallConv, bool isVarArg, 2117 bool &isTailCall, 2118 const SmallVectorImpl<ISD::OutputArg> &Outs, 2119 const SmallVectorImpl<SDValue> &OutVals, 2120 const SmallVectorImpl<ISD::InputArg> &Ins, 2121 DebugLoc dl, SelectionDAG &DAG, 2122 SmallVectorImpl<SDValue> &InVals) const { 2123 MachineFunction &MF = DAG.getMachineFunction(); 2124 bool Is64Bit = Subtarget->is64Bit(); 2125 bool IsWin64 = Subtarget->isTargetWin64(); 2126 bool IsStructRet = CallIsStructReturn(Outs); 2127 bool IsSibcall = false; 2128 2129 if (isTailCall) { 2130 // Check if it's really possible to do a tail call. 2131 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2132 isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 2133 Outs, OutVals, Ins, DAG); 2134 2135 // Sibcalls are automatically detected tailcalls which do not require 2136 // ABI changes. 2137 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2138 IsSibcall = true; 2139 2140 if (isTailCall) 2141 ++NumTailCalls; 2142 } 2143 2144 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2145 "Var args not supported with calling convention fastcc or ghc"); 2146 2147 // Analyze operands of the call, assigning locations to each operand. 2148 SmallVector<CCValAssign, 16> ArgLocs; 2149 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2150 ArgLocs, *DAG.getContext()); 2151 2152 // Allocate shadow area for Win64 2153 if (IsWin64) { 2154 CCInfo.AllocateStack(32, 8); 2155 } 2156 2157 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2158 2159 // Get a count of how many bytes are to be pushed on the stack. 2160 unsigned NumBytes = CCInfo.getNextStackOffset(); 2161 if (IsSibcall) 2162 // This is a sibcall. The memory operands are available in caller's 2163 // own caller's stack. 2164 NumBytes = 0; 2165 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2166 IsTailCallConvention(CallConv)) 2167 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2168 2169 int FPDiff = 0; 2170 if (isTailCall && !IsSibcall) { 2171 // Lower arguments at fp - stackoffset + fpdiff. 2172 unsigned NumBytesCallerPushed = 2173 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 2174 FPDiff = NumBytesCallerPushed - NumBytes; 2175 2176 // Set the delta of movement of the returnaddr stackslot. 2177 // But only set if delta is greater than previous delta. 2178 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 2179 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 2180 } 2181 2182 if (!IsSibcall) 2183 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2184 2185 SDValue RetAddrFrIdx; 2186 // Load return address for tail calls. 2187 if (isTailCall && FPDiff) 2188 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2189 Is64Bit, FPDiff, dl); 2190 2191 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2192 SmallVector<SDValue, 8> MemOpChains; 2193 SDValue StackPtr; 2194 2195 // Walk the register/memloc assignments, inserting copies/loads. In the case 2196 // of tail call optimization arguments are handle later. 2197 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2198 CCValAssign &VA = ArgLocs[i]; 2199 EVT RegVT = VA.getLocVT(); 2200 SDValue Arg = OutVals[i]; 2201 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2202 bool isByVal = Flags.isByVal(); 2203 2204 // Promote the value if needed. 2205 switch (VA.getLocInfo()) { 2206 default: llvm_unreachable("Unknown loc info!"); 2207 case CCValAssign::Full: break; 2208 case CCValAssign::SExt: 2209 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2210 break; 2211 case CCValAssign::ZExt: 2212 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2213 break; 2214 case CCValAssign::AExt: 2215 if (RegVT.isVector() && RegVT.getSizeInBits() == 128) { 2216 // Special case: passing MMX values in XMM registers. 2217 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2218 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2219 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2220 } else 2221 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2222 break; 2223 case CCValAssign::BCvt: 2224 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2225 break; 2226 case CCValAssign::Indirect: { 2227 // Store the argument. 2228 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2229 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2230 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2231 MachinePointerInfo::getFixedStack(FI), 2232 false, false, 0); 2233 Arg = SpillSlot; 2234 break; 2235 } 2236 } 2237 2238 if (VA.isRegLoc()) { 2239 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2240 if (isVarArg && IsWin64) { 2241 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2242 // shadow reg if callee is a varargs function. 2243 unsigned ShadowReg = 0; 2244 switch (VA.getLocReg()) { 2245 case X86::XMM0: ShadowReg = X86::RCX; break; 2246 case X86::XMM1: ShadowReg = X86::RDX; break; 2247 case X86::XMM2: ShadowReg = X86::R8; break; 2248 case X86::XMM3: ShadowReg = X86::R9; break; 2249 } 2250 if (ShadowReg) 2251 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2252 } 2253 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2254 assert(VA.isMemLoc()); 2255 if (StackPtr.getNode() == 0) 2256 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2257 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2258 dl, DAG, VA, Flags)); 2259 } 2260 } 2261 2262 if (!MemOpChains.empty()) 2263 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2264 &MemOpChains[0], MemOpChains.size()); 2265 2266 // Build a sequence of copy-to-reg nodes chained together with token chain 2267 // and flag operands which copy the outgoing args into registers. 2268 SDValue InFlag; 2269 // Tail call byval lowering might overwrite argument registers so in case of 2270 // tail call optimization the copies to registers are lowered later. 2271 if (!isTailCall) 2272 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2273 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2274 RegsToPass[i].second, InFlag); 2275 InFlag = Chain.getValue(1); 2276 } 2277 2278 if (Subtarget->isPICStyleGOT()) { 2279 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2280 // GOT pointer. 2281 if (!isTailCall) { 2282 Chain = DAG.getCopyToReg(Chain, dl, X86::EBX, 2283 DAG.getNode(X86ISD::GlobalBaseReg, 2284 DebugLoc(), getPointerTy()), 2285 InFlag); 2286 InFlag = Chain.getValue(1); 2287 } else { 2288 // If we are tail calling and generating PIC/GOT style code load the 2289 // address of the callee into ECX. The value in ecx is used as target of 2290 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2291 // for tail calls on PIC/GOT architectures. Normally we would just put the 2292 // address of GOT into ebx and then call target@PLT. But for tail calls 2293 // ebx would be restored (since ebx is callee saved) before jumping to the 2294 // target@PLT. 2295 2296 // Note: The actual moving to ECX is done further down. 2297 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2298 if (G && !G->getGlobal()->hasHiddenVisibility() && 2299 !G->getGlobal()->hasProtectedVisibility()) 2300 Callee = LowerGlobalAddress(Callee, DAG); 2301 else if (isa<ExternalSymbolSDNode>(Callee)) 2302 Callee = LowerExternalSymbol(Callee, DAG); 2303 } 2304 } 2305 2306 if (Is64Bit && isVarArg && !IsWin64) { 2307 // From AMD64 ABI document: 2308 // For calls that may call functions that use varargs or stdargs 2309 // (prototype-less calls or calls to functions containing ellipsis (...) in 2310 // the declaration) %al is used as hidden argument to specify the number 2311 // of SSE registers used. The contents of %al do not need to match exactly 2312 // the number of registers, but must be an ubound on the number of SSE 2313 // registers used and is in the range 0 - 8 inclusive. 2314 2315 // Count the number of XMM registers allocated. 2316 static const unsigned XMMArgRegs[] = { 2317 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2318 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2319 }; 2320 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2321 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2322 && "SSE registers cannot be used when SSE is disabled"); 2323 2324 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, 2325 DAG.getConstant(NumXMMRegs, MVT::i8), InFlag); 2326 InFlag = Chain.getValue(1); 2327 } 2328 2329 2330 // For tail calls lower the arguments to the 'real' stack slot. 2331 if (isTailCall) { 2332 // Force all the incoming stack arguments to be loaded from the stack 2333 // before any new outgoing arguments are stored to the stack, because the 2334 // outgoing stack slots may alias the incoming argument stack slots, and 2335 // the alias isn't otherwise explicit. This is slightly more conservative 2336 // than necessary, because it means that each store effectively depends 2337 // on every argument instead of just those arguments it would clobber. 2338 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2339 2340 SmallVector<SDValue, 8> MemOpChains2; 2341 SDValue FIN; 2342 int FI = 0; 2343 // Do not flag preceding copytoreg stuff together with the following stuff. 2344 InFlag = SDValue(); 2345 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2346 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2347 CCValAssign &VA = ArgLocs[i]; 2348 if (VA.isRegLoc()) 2349 continue; 2350 assert(VA.isMemLoc()); 2351 SDValue Arg = OutVals[i]; 2352 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2353 // Create frame index. 2354 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2355 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2356 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2357 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2358 2359 if (Flags.isByVal()) { 2360 // Copy relative to framepointer. 2361 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2362 if (StackPtr.getNode() == 0) 2363 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2364 getPointerTy()); 2365 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2366 2367 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2368 ArgChain, 2369 Flags, DAG, dl)); 2370 } else { 2371 // Store relative to framepointer. 2372 MemOpChains2.push_back( 2373 DAG.getStore(ArgChain, dl, Arg, FIN, 2374 MachinePointerInfo::getFixedStack(FI), 2375 false, false, 0)); 2376 } 2377 } 2378 } 2379 2380 if (!MemOpChains2.empty()) 2381 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2382 &MemOpChains2[0], MemOpChains2.size()); 2383 2384 // Copy arguments to their registers. 2385 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2386 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2387 RegsToPass[i].second, InFlag); 2388 InFlag = Chain.getValue(1); 2389 } 2390 InFlag =SDValue(); 2391 2392 // Store the return address to the appropriate stack slot. 2393 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2394 FPDiff, dl); 2395 } 2396 2397 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2398 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2399 // In the 64-bit large code model, we have to make all calls 2400 // through a register, since the call instruction's 32-bit 2401 // pc-relative offset may not be large enough to hold the whole 2402 // address. 2403 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2404 // If the callee is a GlobalAddress node (quite common, every direct call 2405 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2406 // it. 2407 2408 // We should use extra load for direct calls to dllimported functions in 2409 // non-JIT mode. 2410 const GlobalValue *GV = G->getGlobal(); 2411 if (!GV->hasDLLImportLinkage()) { 2412 unsigned char OpFlags = 0; 2413 bool ExtraLoad = false; 2414 unsigned WrapperKind = ISD::DELETED_NODE; 2415 2416 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2417 // external symbols most go through the PLT in PIC mode. If the symbol 2418 // has hidden or protected visibility, or if it is static or local, then 2419 // we don't need to use the PLT - we can directly call it. 2420 if (Subtarget->isTargetELF() && 2421 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2422 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2423 OpFlags = X86II::MO_PLT; 2424 } else if (Subtarget->isPICStyleStubAny() && 2425 (GV->isDeclaration() || GV->isWeakForLinker()) && 2426 (!Subtarget->getTargetTriple().isMacOSX() || 2427 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2428 // PC-relative references to external symbols should go through $stub, 2429 // unless we're building with the leopard linker or later, which 2430 // automatically synthesizes these stubs. 2431 OpFlags = X86II::MO_DARWIN_STUB; 2432 } else if (Subtarget->isPICStyleRIPRel() && 2433 isa<Function>(GV) && 2434 cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) { 2435 // If the function is marked as non-lazy, generate an indirect call 2436 // which loads from the GOT directly. This avoids runtime overhead 2437 // at the cost of eager binding (and one extra byte of encoding). 2438 OpFlags = X86II::MO_GOTPCREL; 2439 WrapperKind = X86ISD::WrapperRIP; 2440 ExtraLoad = true; 2441 } 2442 2443 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2444 G->getOffset(), OpFlags); 2445 2446 // Add a wrapper if needed. 2447 if (WrapperKind != ISD::DELETED_NODE) 2448 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2449 // Add extra indirection if needed. 2450 if (ExtraLoad) 2451 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2452 MachinePointerInfo::getGOT(), 2453 false, false, false, 0); 2454 } 2455 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2456 unsigned char OpFlags = 0; 2457 2458 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2459 // external symbols should go through the PLT. 2460 if (Subtarget->isTargetELF() && 2461 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2462 OpFlags = X86II::MO_PLT; 2463 } else if (Subtarget->isPICStyleStubAny() && 2464 (!Subtarget->getTargetTriple().isMacOSX() || 2465 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2466 // PC-relative references to external symbols should go through $stub, 2467 // unless we're building with the leopard linker or later, which 2468 // automatically synthesizes these stubs. 2469 OpFlags = X86II::MO_DARWIN_STUB; 2470 } 2471 2472 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2473 OpFlags); 2474 } 2475 2476 // Returns a chain & a flag for retval copy to use. 2477 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2478 SmallVector<SDValue, 8> Ops; 2479 2480 if (!IsSibcall && isTailCall) { 2481 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2482 DAG.getIntPtrConstant(0, true), InFlag); 2483 InFlag = Chain.getValue(1); 2484 } 2485 2486 Ops.push_back(Chain); 2487 Ops.push_back(Callee); 2488 2489 if (isTailCall) 2490 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2491 2492 // Add argument registers to the end of the list so that they are known live 2493 // into the call. 2494 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2495 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2496 RegsToPass[i].second.getValueType())); 2497 2498 // Add an implicit use GOT pointer in EBX. 2499 if (!isTailCall && Subtarget->isPICStyleGOT()) 2500 Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); 2501 2502 // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. 2503 if (Is64Bit && isVarArg && !IsWin64) 2504 Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); 2505 2506 if (InFlag.getNode()) 2507 Ops.push_back(InFlag); 2508 2509 if (isTailCall) { 2510 // We used to do: 2511 //// If this is the first return lowered for this function, add the regs 2512 //// to the liveout set for the function. 2513 // This isn't right, although it's probably harmless on x86; liveouts 2514 // should be computed from returns not tail calls. Consider a void 2515 // function making a tail call to a function returning int. 2516 return DAG.getNode(X86ISD::TC_RETURN, dl, 2517 NodeTys, &Ops[0], Ops.size()); 2518 } 2519 2520 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2521 InFlag = Chain.getValue(1); 2522 2523 // Create the CALLSEQ_END node. 2524 unsigned NumBytesForCalleeToPush; 2525 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2526 getTargetMachine().Options.GuaranteedTailCallOpt)) 2527 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2528 else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet) 2529 // If this is a call to a struct-return function, the callee 2530 // pops the hidden struct pointer, so we have to push it back. 2531 // This is common for Darwin/X86, Linux & Mingw32 targets. 2532 NumBytesForCalleeToPush = 4; 2533 else 2534 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2535 2536 // Returns a flag for retval copy to use. 2537 if (!IsSibcall) { 2538 Chain = DAG.getCALLSEQ_END(Chain, 2539 DAG.getIntPtrConstant(NumBytes, true), 2540 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2541 true), 2542 InFlag); 2543 InFlag = Chain.getValue(1); 2544 } 2545 2546 // Handle result values, copying them out of physregs into vregs that we 2547 // return. 2548 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2549 Ins, dl, DAG, InVals); 2550} 2551 2552 2553//===----------------------------------------------------------------------===// 2554// Fast Calling Convention (tail call) implementation 2555//===----------------------------------------------------------------------===// 2556 2557// Like std call, callee cleans arguments, convention except that ECX is 2558// reserved for storing the tail called function address. Only 2 registers are 2559// free for argument passing (inreg). Tail call optimization is performed 2560// provided: 2561// * tailcallopt is enabled 2562// * caller/callee are fastcc 2563// On X86_64 architecture with GOT-style position independent code only local 2564// (within module) calls are supported at the moment. 2565// To keep the stack aligned according to platform abi the function 2566// GetAlignedArgumentStackSize ensures that argument delta is always multiples 2567// of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2568// If a tail called function callee has more arguments than the caller the 2569// caller needs to make sure that there is room to move the RETADDR to. This is 2570// achieved by reserving an area the size of the argument delta right after the 2571// original REtADDR, but before the saved framepointer or the spilled registers 2572// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2573// stack layout: 2574// arg1 2575// arg2 2576// RETADDR 2577// [ new RETADDR 2578// move area ] 2579// (possible EBP) 2580// ESI 2581// EDI 2582// local1 .. 2583 2584/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2585/// for a 16 byte align requirement. 2586unsigned 2587X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2588 SelectionDAG& DAG) const { 2589 MachineFunction &MF = DAG.getMachineFunction(); 2590 const TargetMachine &TM = MF.getTarget(); 2591 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2592 unsigned StackAlignment = TFI.getStackAlignment(); 2593 uint64_t AlignMask = StackAlignment - 1; 2594 int64_t Offset = StackSize; 2595 uint64_t SlotSize = TD->getPointerSize(); 2596 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2597 // Number smaller than 12 so just add the difference. 2598 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2599 } else { 2600 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2601 Offset = ((~AlignMask) & Offset) + StackAlignment + 2602 (StackAlignment-SlotSize); 2603 } 2604 return Offset; 2605} 2606 2607/// MatchingStackOffset - Return true if the given stack call argument is 2608/// already available in the same position (relatively) of the caller's 2609/// incoming argument stack. 2610static 2611bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2612 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2613 const X86InstrInfo *TII) { 2614 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2615 int FI = INT_MAX; 2616 if (Arg.getOpcode() == ISD::CopyFromReg) { 2617 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2618 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2619 return false; 2620 MachineInstr *Def = MRI->getVRegDef(VR); 2621 if (!Def) 2622 return false; 2623 if (!Flags.isByVal()) { 2624 if (!TII->isLoadFromStackSlot(Def, FI)) 2625 return false; 2626 } else { 2627 unsigned Opcode = Def->getOpcode(); 2628 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2629 Def->getOperand(1).isFI()) { 2630 FI = Def->getOperand(1).getIndex(); 2631 Bytes = Flags.getByValSize(); 2632 } else 2633 return false; 2634 } 2635 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2636 if (Flags.isByVal()) 2637 // ByVal argument is passed in as a pointer but it's now being 2638 // dereferenced. e.g. 2639 // define @foo(%struct.X* %A) { 2640 // tail call @bar(%struct.X* byval %A) 2641 // } 2642 return false; 2643 SDValue Ptr = Ld->getBasePtr(); 2644 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2645 if (!FINode) 2646 return false; 2647 FI = FINode->getIndex(); 2648 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2649 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2650 FI = FINode->getIndex(); 2651 Bytes = Flags.getByValSize(); 2652 } else 2653 return false; 2654 2655 assert(FI != INT_MAX); 2656 if (!MFI->isFixedObjectIndex(FI)) 2657 return false; 2658 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2659} 2660 2661/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2662/// for tail call optimization. Targets which want to do tail call 2663/// optimization should implement this function. 2664bool 2665X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2666 CallingConv::ID CalleeCC, 2667 bool isVarArg, 2668 bool isCalleeStructRet, 2669 bool isCallerStructRet, 2670 const SmallVectorImpl<ISD::OutputArg> &Outs, 2671 const SmallVectorImpl<SDValue> &OutVals, 2672 const SmallVectorImpl<ISD::InputArg> &Ins, 2673 SelectionDAG& DAG) const { 2674 if (!IsTailCallConvention(CalleeCC) && 2675 CalleeCC != CallingConv::C) 2676 return false; 2677 2678 // If -tailcallopt is specified, make fastcc functions tail-callable. 2679 const MachineFunction &MF = DAG.getMachineFunction(); 2680 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2681 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2682 bool CCMatch = CallerCC == CalleeCC; 2683 2684 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2685 if (IsTailCallConvention(CalleeCC) && CCMatch) 2686 return true; 2687 return false; 2688 } 2689 2690 // Look for obvious safe cases to perform tail call optimization that do not 2691 // require ABI changes. This is what gcc calls sibcall. 2692 2693 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2694 // emit a special epilogue. 2695 if (RegInfo->needsStackRealignment(MF)) 2696 return false; 2697 2698 // Also avoid sibcall optimization if either caller or callee uses struct 2699 // return semantics. 2700 if (isCalleeStructRet || isCallerStructRet) 2701 return false; 2702 2703 // An stdcall caller is expected to clean up its arguments; the callee 2704 // isn't going to do that. 2705 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2706 return false; 2707 2708 // Do not sibcall optimize vararg calls unless all arguments are passed via 2709 // registers. 2710 if (isVarArg && !Outs.empty()) { 2711 2712 // Optimizing for varargs on Win64 is unlikely to be safe without 2713 // additional testing. 2714 if (Subtarget->isTargetWin64()) 2715 return false; 2716 2717 SmallVector<CCValAssign, 16> ArgLocs; 2718 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2719 getTargetMachine(), ArgLocs, *DAG.getContext()); 2720 2721 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2722 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2723 if (!ArgLocs[i].isRegLoc()) 2724 return false; 2725 } 2726 2727 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2728 // stack. Therefore, if it's not used by the call it is not safe to optimize 2729 // this into a sibcall. 2730 bool Unused = false; 2731 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2732 if (!Ins[i].Used) { 2733 Unused = true; 2734 break; 2735 } 2736 } 2737 if (Unused) { 2738 SmallVector<CCValAssign, 16> RVLocs; 2739 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2740 getTargetMachine(), RVLocs, *DAG.getContext()); 2741 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2742 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2743 CCValAssign &VA = RVLocs[i]; 2744 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2745 return false; 2746 } 2747 } 2748 2749 // If the calling conventions do not match, then we'd better make sure the 2750 // results are returned in the same way as what the caller expects. 2751 if (!CCMatch) { 2752 SmallVector<CCValAssign, 16> RVLocs1; 2753 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2754 getTargetMachine(), RVLocs1, *DAG.getContext()); 2755 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2756 2757 SmallVector<CCValAssign, 16> RVLocs2; 2758 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2759 getTargetMachine(), RVLocs2, *DAG.getContext()); 2760 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2761 2762 if (RVLocs1.size() != RVLocs2.size()) 2763 return false; 2764 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2765 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2766 return false; 2767 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2768 return false; 2769 if (RVLocs1[i].isRegLoc()) { 2770 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2771 return false; 2772 } else { 2773 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2774 return false; 2775 } 2776 } 2777 } 2778 2779 // If the callee takes no arguments then go on to check the results of the 2780 // call. 2781 if (!Outs.empty()) { 2782 // Check if stack adjustment is needed. For now, do not do this if any 2783 // argument is passed on the stack. 2784 SmallVector<CCValAssign, 16> ArgLocs; 2785 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2786 getTargetMachine(), ArgLocs, *DAG.getContext()); 2787 2788 // Allocate shadow area for Win64 2789 if (Subtarget->isTargetWin64()) { 2790 CCInfo.AllocateStack(32, 8); 2791 } 2792 2793 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2794 if (CCInfo.getNextStackOffset()) { 2795 MachineFunction &MF = DAG.getMachineFunction(); 2796 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2797 return false; 2798 2799 // Check if the arguments are already laid out in the right way as 2800 // the caller's fixed stack objects. 2801 MachineFrameInfo *MFI = MF.getFrameInfo(); 2802 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2803 const X86InstrInfo *TII = 2804 ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2805 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2806 CCValAssign &VA = ArgLocs[i]; 2807 SDValue Arg = OutVals[i]; 2808 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2809 if (VA.getLocInfo() == CCValAssign::Indirect) 2810 return false; 2811 if (!VA.isRegLoc()) { 2812 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2813 MFI, MRI, TII)) 2814 return false; 2815 } 2816 } 2817 } 2818 2819 // If the tailcall address may be in a register, then make sure it's 2820 // possible to register allocate for it. In 32-bit, the call address can 2821 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2822 // callee-saved registers are restored. These happen to be the same 2823 // registers used to pass 'inreg' arguments so watch out for those. 2824 if (!Subtarget->is64Bit() && 2825 !isa<GlobalAddressSDNode>(Callee) && 2826 !isa<ExternalSymbolSDNode>(Callee)) { 2827 unsigned NumInRegs = 0; 2828 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2829 CCValAssign &VA = ArgLocs[i]; 2830 if (!VA.isRegLoc()) 2831 continue; 2832 unsigned Reg = VA.getLocReg(); 2833 switch (Reg) { 2834 default: break; 2835 case X86::EAX: case X86::EDX: case X86::ECX: 2836 if (++NumInRegs == 3) 2837 return false; 2838 break; 2839 } 2840 } 2841 } 2842 } 2843 2844 return true; 2845} 2846 2847FastISel * 2848X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { 2849 return X86::createFastISel(funcInfo); 2850} 2851 2852 2853//===----------------------------------------------------------------------===// 2854// Other Lowering Hooks 2855//===----------------------------------------------------------------------===// 2856 2857static bool MayFoldLoad(SDValue Op) { 2858 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2859} 2860 2861static bool MayFoldIntoStore(SDValue Op) { 2862 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2863} 2864 2865static bool isTargetShuffle(unsigned Opcode) { 2866 switch(Opcode) { 2867 default: return false; 2868 case X86ISD::PSHUFD: 2869 case X86ISD::PSHUFHW: 2870 case X86ISD::PSHUFLW: 2871 case X86ISD::SHUFP: 2872 case X86ISD::PALIGN: 2873 case X86ISD::MOVLHPS: 2874 case X86ISD::MOVLHPD: 2875 case X86ISD::MOVHLPS: 2876 case X86ISD::MOVLPS: 2877 case X86ISD::MOVLPD: 2878 case X86ISD::MOVSHDUP: 2879 case X86ISD::MOVSLDUP: 2880 case X86ISD::MOVDDUP: 2881 case X86ISD::MOVSS: 2882 case X86ISD::MOVSD: 2883 case X86ISD::UNPCKL: 2884 case X86ISD::UNPCKH: 2885 case X86ISD::VPERMILP: 2886 case X86ISD::VPERM2X128: 2887 return true; 2888 } 2889 return false; 2890} 2891 2892static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2893 SDValue V1, SelectionDAG &DAG) { 2894 switch(Opc) { 2895 default: llvm_unreachable("Unknown x86 shuffle node"); 2896 case X86ISD::MOVSHDUP: 2897 case X86ISD::MOVSLDUP: 2898 case X86ISD::MOVDDUP: 2899 return DAG.getNode(Opc, dl, VT, V1); 2900 } 2901 2902 return SDValue(); 2903} 2904 2905static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2906 SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { 2907 switch(Opc) { 2908 default: llvm_unreachable("Unknown x86 shuffle node"); 2909 case X86ISD::PSHUFD: 2910 case X86ISD::PSHUFHW: 2911 case X86ISD::PSHUFLW: 2912 case X86ISD::VPERMILP: 2913 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2914 } 2915 2916 return SDValue(); 2917} 2918 2919static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2920 SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { 2921 switch(Opc) { 2922 default: llvm_unreachable("Unknown x86 shuffle node"); 2923 case X86ISD::PALIGN: 2924 case X86ISD::SHUFP: 2925 case X86ISD::VPERM2X128: 2926 return DAG.getNode(Opc, dl, VT, V1, V2, 2927 DAG.getConstant(TargetMask, MVT::i8)); 2928 } 2929 return SDValue(); 2930} 2931 2932static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2933 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2934 switch(Opc) { 2935 default: llvm_unreachable("Unknown x86 shuffle node"); 2936 case X86ISD::MOVLHPS: 2937 case X86ISD::MOVLHPD: 2938 case X86ISD::MOVHLPS: 2939 case X86ISD::MOVLPS: 2940 case X86ISD::MOVLPD: 2941 case X86ISD::MOVSS: 2942 case X86ISD::MOVSD: 2943 case X86ISD::UNPCKL: 2944 case X86ISD::UNPCKH: 2945 return DAG.getNode(Opc, dl, VT, V1, V2); 2946 } 2947 return SDValue(); 2948} 2949 2950SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2951 MachineFunction &MF = DAG.getMachineFunction(); 2952 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 2953 int ReturnAddrIndex = FuncInfo->getRAIndex(); 2954 2955 if (ReturnAddrIndex == 0) { 2956 // Set up a frame object for the return address. 2957 uint64_t SlotSize = TD->getPointerSize(); 2958 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 2959 false); 2960 FuncInfo->setRAIndex(ReturnAddrIndex); 2961 } 2962 2963 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 2964} 2965 2966 2967bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 2968 bool hasSymbolicDisplacement) { 2969 // Offset should fit into 32 bit immediate field. 2970 if (!isInt<32>(Offset)) 2971 return false; 2972 2973 // If we don't have a symbolic displacement - we don't have any extra 2974 // restrictions. 2975 if (!hasSymbolicDisplacement) 2976 return true; 2977 2978 // FIXME: Some tweaks might be needed for medium code model. 2979 if (M != CodeModel::Small && M != CodeModel::Kernel) 2980 return false; 2981 2982 // For small code model we assume that latest object is 16MB before end of 31 2983 // bits boundary. We may also accept pretty large negative constants knowing 2984 // that all objects are in the positive half of address space. 2985 if (M == CodeModel::Small && Offset < 16*1024*1024) 2986 return true; 2987 2988 // For kernel code model we know that all object resist in the negative half 2989 // of 32bits address space. We may not accept negative offsets, since they may 2990 // be just off and we may accept pretty large positive ones. 2991 if (M == CodeModel::Kernel && Offset > 0) 2992 return true; 2993 2994 return false; 2995} 2996 2997/// isCalleePop - Determines whether the callee is required to pop its 2998/// own arguments. Callee pop is necessary to support tail calls. 2999bool X86::isCalleePop(CallingConv::ID CallingConv, 3000 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3001 if (IsVarArg) 3002 return false; 3003 3004 switch (CallingConv) { 3005 default: 3006 return false; 3007 case CallingConv::X86_StdCall: 3008 return !is64Bit; 3009 case CallingConv::X86_FastCall: 3010 return !is64Bit; 3011 case CallingConv::X86_ThisCall: 3012 return !is64Bit; 3013 case CallingConv::Fast: 3014 return TailCallOpt; 3015 case CallingConv::GHC: 3016 return TailCallOpt; 3017 } 3018} 3019 3020/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3021/// specific condition code, returning the condition code and the LHS/RHS of the 3022/// comparison to make. 3023static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3024 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3025 if (!isFP) { 3026 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3027 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3028 // X > -1 -> X == 0, jump !sign. 3029 RHS = DAG.getConstant(0, RHS.getValueType()); 3030 return X86::COND_NS; 3031 } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3032 // X < 0 -> X == 0, jump on sign. 3033 return X86::COND_S; 3034 } else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3035 // X < 1 -> X <= 0 3036 RHS = DAG.getConstant(0, RHS.getValueType()); 3037 return X86::COND_LE; 3038 } 3039 } 3040 3041 switch (SetCCOpcode) { 3042 default: llvm_unreachable("Invalid integer condition!"); 3043 case ISD::SETEQ: return X86::COND_E; 3044 case ISD::SETGT: return X86::COND_G; 3045 case ISD::SETGE: return X86::COND_GE; 3046 case ISD::SETLT: return X86::COND_L; 3047 case ISD::SETLE: return X86::COND_LE; 3048 case ISD::SETNE: return X86::COND_NE; 3049 case ISD::SETULT: return X86::COND_B; 3050 case ISD::SETUGT: return X86::COND_A; 3051 case ISD::SETULE: return X86::COND_BE; 3052 case ISD::SETUGE: return X86::COND_AE; 3053 } 3054 } 3055 3056 // First determine if it is required or is profitable to flip the operands. 3057 3058 // If LHS is a foldable load, but RHS is not, flip the condition. 3059 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3060 !ISD::isNON_EXTLoad(RHS.getNode())) { 3061 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3062 std::swap(LHS, RHS); 3063 } 3064 3065 switch (SetCCOpcode) { 3066 default: break; 3067 case ISD::SETOLT: 3068 case ISD::SETOLE: 3069 case ISD::SETUGT: 3070 case ISD::SETUGE: 3071 std::swap(LHS, RHS); 3072 break; 3073 } 3074 3075 // On a floating point condition, the flags are set as follows: 3076 // ZF PF CF op 3077 // 0 | 0 | 0 | X > Y 3078 // 0 | 0 | 1 | X < Y 3079 // 1 | 0 | 0 | X == Y 3080 // 1 | 1 | 1 | unordered 3081 switch (SetCCOpcode) { 3082 default: llvm_unreachable("Condcode should be pre-legalized away"); 3083 case ISD::SETUEQ: 3084 case ISD::SETEQ: return X86::COND_E; 3085 case ISD::SETOLT: // flipped 3086 case ISD::SETOGT: 3087 case ISD::SETGT: return X86::COND_A; 3088 case ISD::SETOLE: // flipped 3089 case ISD::SETOGE: 3090 case ISD::SETGE: return X86::COND_AE; 3091 case ISD::SETUGT: // flipped 3092 case ISD::SETULT: 3093 case ISD::SETLT: return X86::COND_B; 3094 case ISD::SETUGE: // flipped 3095 case ISD::SETULE: 3096 case ISD::SETLE: return X86::COND_BE; 3097 case ISD::SETONE: 3098 case ISD::SETNE: return X86::COND_NE; 3099 case ISD::SETUO: return X86::COND_P; 3100 case ISD::SETO: return X86::COND_NP; 3101 case ISD::SETOEQ: 3102 case ISD::SETUNE: return X86::COND_INVALID; 3103 } 3104} 3105 3106/// hasFPCMov - is there a floating point cmov for the specific X86 condition 3107/// code. Current x86 isa includes the following FP cmov instructions: 3108/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3109static bool hasFPCMov(unsigned X86CC) { 3110 switch (X86CC) { 3111 default: 3112 return false; 3113 case X86::COND_B: 3114 case X86::COND_BE: 3115 case X86::COND_E: 3116 case X86::COND_P: 3117 case X86::COND_A: 3118 case X86::COND_AE: 3119 case X86::COND_NE: 3120 case X86::COND_NP: 3121 return true; 3122 } 3123} 3124 3125/// isFPImmLegal - Returns true if the target can instruction select the 3126/// specified FP immediate natively. If false, the legalizer will 3127/// materialize the FP immediate as a load from a constant pool. 3128bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3129 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3130 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3131 return true; 3132 } 3133 return false; 3134} 3135 3136/// isUndefOrInRange - Return true if Val is undef or if its value falls within 3137/// the specified range (L, H]. 3138static bool isUndefOrInRange(int Val, int Low, int Hi) { 3139 return (Val < 0) || (Val >= Low && Val < Hi); 3140} 3141 3142/// isUndefOrInRange - Return true if every element in Mask, begining 3143/// from position Pos and ending in Pos+Size, falls within the specified 3144/// range (L, L+Pos]. or is undef. 3145static bool isUndefOrInRange(const SmallVectorImpl<int> &Mask, 3146 int Pos, int Size, int Low, int Hi) { 3147 for (int i = Pos, e = Pos+Size; i != e; ++i) 3148 if (!isUndefOrInRange(Mask[i], Low, Hi)) 3149 return false; 3150 return true; 3151} 3152 3153/// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3154/// specified value. 3155static bool isUndefOrEqual(int Val, int CmpVal) { 3156 if (Val < 0 || Val == CmpVal) 3157 return true; 3158 return false; 3159} 3160 3161/// isSequentialOrUndefInRange - Return true if every element in Mask, begining 3162/// from position Pos and ending in Pos+Size, falls within the specified 3163/// sequential range (L, L+Pos]. or is undef. 3164static bool isSequentialOrUndefInRange(const SmallVectorImpl<int> &Mask, 3165 int Pos, int Size, int Low) { 3166 for (int i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3167 if (!isUndefOrEqual(Mask[i], Low)) 3168 return false; 3169 return true; 3170} 3171 3172/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3173/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3174/// the second operand. 3175static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3176 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3177 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3178 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3179 return (Mask[0] < 2 && Mask[1] < 2); 3180 return false; 3181} 3182 3183bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) { 3184 SmallVector<int, 8> M; 3185 N->getMask(M); 3186 return ::isPSHUFDMask(M, N->getValueType(0)); 3187} 3188 3189/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3190/// is suitable for input to PSHUFHW. 3191static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3192 if (VT != MVT::v8i16) 3193 return false; 3194 3195 // Lower quadword copied in order or undef. 3196 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3197 return false; 3198 3199 // Upper quadword shuffled. 3200 for (unsigned i = 4; i != 8; ++i) 3201 if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7)) 3202 return false; 3203 3204 return true; 3205} 3206 3207bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) { 3208 SmallVector<int, 8> M; 3209 N->getMask(M); 3210 return ::isPSHUFHWMask(M, N->getValueType(0)); 3211} 3212 3213/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3214/// is suitable for input to PSHUFLW. 3215static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3216 if (VT != MVT::v8i16) 3217 return false; 3218 3219 // Upper quadword copied in order. 3220 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3221 return false; 3222 3223 // Lower quadword shuffled. 3224 for (unsigned i = 0; i != 4; ++i) 3225 if (Mask[i] >= 4) 3226 return false; 3227 3228 return true; 3229} 3230 3231bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) { 3232 SmallVector<int, 8> M; 3233 N->getMask(M); 3234 return ::isPSHUFLWMask(M, N->getValueType(0)); 3235} 3236 3237/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3238/// is suitable for input to PALIGNR. 3239static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT, 3240 bool hasSSSE3) { 3241 int i, e = VT.getVectorNumElements(); 3242 if (VT.getSizeInBits() != 128) 3243 return false; 3244 3245 // Do not handle v2i64 / v2f64 shuffles with palignr. 3246 if (e < 4 || !hasSSSE3) 3247 return false; 3248 3249 for (i = 0; i != e; ++i) 3250 if (Mask[i] >= 0) 3251 break; 3252 3253 // All undef, not a palignr. 3254 if (i == e) 3255 return false; 3256 3257 // Make sure we're shifting in the right direction. 3258 if (Mask[i] <= i) 3259 return false; 3260 3261 int s = Mask[i] - i; 3262 3263 // Check the rest of the elements to see if they are consecutive. 3264 for (++i; i != e; ++i) { 3265 int m = Mask[i]; 3266 if (m >= 0 && m != s+i) 3267 return false; 3268 } 3269 return true; 3270} 3271 3272/// isVSHUFPYMask - Return true if the specified VECTOR_SHUFFLE operand 3273/// specifies a shuffle of elements that is suitable for input to 256-bit 3274/// VSHUFPSY. 3275static bool isVSHUFPYMask(const SmallVectorImpl<int> &Mask, EVT VT, 3276 bool HasAVX, bool Commuted = false) { 3277 int NumElems = VT.getVectorNumElements(); 3278 3279 if (!HasAVX || VT.getSizeInBits() != 256) 3280 return false; 3281 3282 if (NumElems != 4 && NumElems != 8) 3283 return false; 3284 3285 // VSHUFPSY divides the resulting vector into 4 chunks. 3286 // The sources are also splitted into 4 chunks, and each destination 3287 // chunk must come from a different source chunk. 3288 // 3289 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3290 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3291 // 3292 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3293 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3294 // 3295 // VSHUFPDY divides the resulting vector into 4 chunks. 3296 // The sources are also splitted into 4 chunks, and each destination 3297 // chunk must come from a different source chunk. 3298 // 3299 // SRC1 => X3 X2 X1 X0 3300 // SRC2 => Y3 Y2 Y1 Y0 3301 // 3302 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3303 // 3304 unsigned QuarterSize = NumElems/4; 3305 unsigned HalfSize = QuarterSize*2; 3306 for (unsigned l = 0; l != 2; ++l) { 3307 unsigned LaneStart = l*HalfSize; 3308 for (unsigned s = 0; s != 2; ++s) { 3309 unsigned QuarterStart = s*QuarterSize; 3310 unsigned Src = (Commuted) ? (1-s) : s; 3311 unsigned SrcStart = Src*NumElems + LaneStart; 3312 for (unsigned i = 0; i != QuarterSize; ++i) { 3313 int Idx = Mask[i+QuarterStart+LaneStart]; 3314 if (!isUndefOrInRange(Idx, SrcStart, SrcStart+HalfSize)) 3315 return false; 3316 // For VSHUFPSY, the mask of the second half must be the same as the 3317 // first but with the appropriate offsets. This works in the same way as 3318 // VPERMILPS works with masks. 3319 if (NumElems == 4 || l == 0 || Mask[i+QuarterStart] < 0) 3320 continue; 3321 if (!isUndefOrEqual(Idx, Mask[i+QuarterStart]+LaneStart)) 3322 return false; 3323 } 3324 } 3325 } 3326 3327 return true; 3328} 3329 3330/// getShuffleVSHUFPYImmediate - Return the appropriate immediate to shuffle 3331/// the specified VECTOR_MASK mask with VSHUFPSY/VSHUFPDY instructions. 3332static unsigned getShuffleVSHUFPYImmediate(ShuffleVectorSDNode *SVOp) { 3333 EVT VT = SVOp->getValueType(0); 3334 unsigned NumElems = VT.getVectorNumElements(); 3335 3336 assert(VT.getSizeInBits() == 256 && "Only supports 256-bit types"); 3337 assert((NumElems == 4 || NumElems == 8) && "Only supports v4 and v8 types"); 3338 3339 unsigned HalfSize = NumElems/2; 3340 unsigned Mul = (NumElems == 8) ? 2 : 1; 3341 unsigned Mask = 0; 3342 for (unsigned i = 0; i != NumElems; ++i) { 3343 int Elt = SVOp->getMaskElt(i); 3344 if (Elt < 0) 3345 continue; 3346 Elt %= HalfSize; 3347 unsigned Shamt = i; 3348 // For VSHUFPSY, the mask of the first half must be equal to the second one. 3349 if (NumElems == 8) Shamt %= HalfSize; 3350 Mask |= Elt << (Shamt*Mul); 3351 } 3352 3353 return Mask; 3354} 3355 3356/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3357/// the two vector operands have swapped position. 3358static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3359 unsigned NumElems) { 3360 for (unsigned i = 0; i != NumElems; ++i) { 3361 int idx = Mask[i]; 3362 if (idx < 0) 3363 continue; 3364 else if (idx < (int)NumElems) 3365 Mask[i] = idx + NumElems; 3366 else 3367 Mask[i] = idx - NumElems; 3368 } 3369} 3370 3371/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3372/// specifies a shuffle of elements that is suitable for input to 128-bit 3373/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3374/// reverse of what x86 shuffles want. 3375static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT, 3376 bool Commuted = false) { 3377 unsigned NumElems = VT.getVectorNumElements(); 3378 3379 if (VT.getSizeInBits() != 128) 3380 return false; 3381 3382 if (NumElems != 2 && NumElems != 4) 3383 return false; 3384 3385 unsigned Half = NumElems / 2; 3386 unsigned SrcStart = Commuted ? NumElems : 0; 3387 for (unsigned i = 0; i != Half; ++i) 3388 if (!isUndefOrInRange(Mask[i], SrcStart, SrcStart+NumElems)) 3389 return false; 3390 SrcStart = Commuted ? 0 : NumElems; 3391 for (unsigned i = Half; i != NumElems; ++i) 3392 if (!isUndefOrInRange(Mask[i], SrcStart, SrcStart+NumElems)) 3393 return false; 3394 3395 return true; 3396} 3397 3398bool X86::isSHUFPMask(ShuffleVectorSDNode *N) { 3399 SmallVector<int, 8> M; 3400 N->getMask(M); 3401 return ::isSHUFPMask(M, N->getValueType(0)); 3402} 3403 3404/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3405/// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3406bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) { 3407 EVT VT = N->getValueType(0); 3408 unsigned NumElems = VT.getVectorNumElements(); 3409 3410 if (VT.getSizeInBits() != 128) 3411 return false; 3412 3413 if (NumElems != 4) 3414 return false; 3415 3416 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3417 return isUndefOrEqual(N->getMaskElt(0), 6) && 3418 isUndefOrEqual(N->getMaskElt(1), 7) && 3419 isUndefOrEqual(N->getMaskElt(2), 2) && 3420 isUndefOrEqual(N->getMaskElt(3), 3); 3421} 3422 3423/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3424/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3425/// <2, 3, 2, 3> 3426bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) { 3427 EVT VT = N->getValueType(0); 3428 unsigned NumElems = VT.getVectorNumElements(); 3429 3430 if (VT.getSizeInBits() != 128) 3431 return false; 3432 3433 if (NumElems != 4) 3434 return false; 3435 3436 return isUndefOrEqual(N->getMaskElt(0), 2) && 3437 isUndefOrEqual(N->getMaskElt(1), 3) && 3438 isUndefOrEqual(N->getMaskElt(2), 2) && 3439 isUndefOrEqual(N->getMaskElt(3), 3); 3440} 3441 3442/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3443/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3444bool X86::isMOVLPMask(ShuffleVectorSDNode *N) { 3445 EVT VT = N->getValueType(0); 3446 3447 if (VT.getSizeInBits() != 128) 3448 return false; 3449 3450 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3451 3452 if (NumElems != 2 && NumElems != 4) 3453 return false; 3454 3455 for (unsigned i = 0; i < NumElems/2; ++i) 3456 if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems)) 3457 return false; 3458 3459 for (unsigned i = NumElems/2; i < NumElems; ++i) 3460 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3461 return false; 3462 3463 return true; 3464} 3465 3466/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3467/// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3468bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) { 3469 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 3470 3471 if ((NumElems != 2 && NumElems != 4) 3472 || N->getValueType(0).getSizeInBits() > 128) 3473 return false; 3474 3475 for (unsigned i = 0; i < NumElems/2; ++i) 3476 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3477 return false; 3478 3479 for (unsigned i = 0; i < NumElems/2; ++i) 3480 if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems)) 3481 return false; 3482 3483 return true; 3484} 3485 3486/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3487/// specifies a shuffle of elements that is suitable for input to UNPCKL. 3488static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3489 bool HasAVX2, bool V2IsSplat = false) { 3490 unsigned NumElts = VT.getVectorNumElements(); 3491 3492 assert((VT.is128BitVector() || VT.is256BitVector()) && 3493 "Unsupported vector type for unpckh"); 3494 3495 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3496 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3497 return false; 3498 3499 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3500 // independently on 128-bit lanes. 3501 unsigned NumLanes = VT.getSizeInBits()/128; 3502 unsigned NumLaneElts = NumElts/NumLanes; 3503 3504 for (unsigned l = 0; l != NumLanes; ++l) { 3505 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3506 i != (l+1)*NumLaneElts; 3507 i += 2, ++j) { 3508 int BitI = Mask[i]; 3509 int BitI1 = Mask[i+1]; 3510 if (!isUndefOrEqual(BitI, j)) 3511 return false; 3512 if (V2IsSplat) { 3513 if (!isUndefOrEqual(BitI1, NumElts)) 3514 return false; 3515 } else { 3516 if (!isUndefOrEqual(BitI1, j + NumElts)) 3517 return false; 3518 } 3519 } 3520 } 3521 3522 return true; 3523} 3524 3525bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool HasAVX2, bool V2IsSplat) { 3526 SmallVector<int, 8> M; 3527 N->getMask(M); 3528 return ::isUNPCKLMask(M, N->getValueType(0), HasAVX2, V2IsSplat); 3529} 3530 3531/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3532/// specifies a shuffle of elements that is suitable for input to UNPCKH. 3533static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT, 3534 bool HasAVX2, bool V2IsSplat = false) { 3535 unsigned NumElts = VT.getVectorNumElements(); 3536 3537 assert((VT.is128BitVector() || VT.is256BitVector()) && 3538 "Unsupported vector type for unpckh"); 3539 3540 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3541 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3542 return false; 3543 3544 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3545 // independently on 128-bit lanes. 3546 unsigned NumLanes = VT.getSizeInBits()/128; 3547 unsigned NumLaneElts = NumElts/NumLanes; 3548 3549 for (unsigned l = 0; l != NumLanes; ++l) { 3550 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3551 i != (l+1)*NumLaneElts; i += 2, ++j) { 3552 int BitI = Mask[i]; 3553 int BitI1 = Mask[i+1]; 3554 if (!isUndefOrEqual(BitI, j)) 3555 return false; 3556 if (V2IsSplat) { 3557 if (isUndefOrEqual(BitI1, NumElts)) 3558 return false; 3559 } else { 3560 if (!isUndefOrEqual(BitI1, j+NumElts)) 3561 return false; 3562 } 3563 } 3564 } 3565 return true; 3566} 3567 3568bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool HasAVX2, bool V2IsSplat) { 3569 SmallVector<int, 8> M; 3570 N->getMask(M); 3571 return ::isUNPCKHMask(M, N->getValueType(0), HasAVX2, V2IsSplat); 3572} 3573 3574/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3575/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3576/// <0, 0, 1, 1> 3577static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT, 3578 bool HasAVX2) { 3579 unsigned NumElts = VT.getVectorNumElements(); 3580 3581 assert((VT.is128BitVector() || VT.is256BitVector()) && 3582 "Unsupported vector type for unpckh"); 3583 3584 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3585 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3586 return false; 3587 3588 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3589 // FIXME: Need a better way to get rid of this, there's no latency difference 3590 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3591 // the former later. We should also remove the "_undef" special mask. 3592 if (NumElts == 4 && VT.getSizeInBits() == 256) 3593 return false; 3594 3595 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3596 // independently on 128-bit lanes. 3597 unsigned NumLanes = VT.getSizeInBits()/128; 3598 unsigned NumLaneElts = NumElts/NumLanes; 3599 3600 for (unsigned l = 0; l != NumLanes; ++l) { 3601 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3602 i != (l+1)*NumLaneElts; 3603 i += 2, ++j) { 3604 int BitI = Mask[i]; 3605 int BitI1 = Mask[i+1]; 3606 3607 if (!isUndefOrEqual(BitI, j)) 3608 return false; 3609 if (!isUndefOrEqual(BitI1, j)) 3610 return false; 3611 } 3612 } 3613 3614 return true; 3615} 3616 3617bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N, bool HasAVX2) { 3618 SmallVector<int, 8> M; 3619 N->getMask(M); 3620 return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0), HasAVX2); 3621} 3622 3623/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3624/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3625/// <2, 2, 3, 3> 3626static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT, 3627 bool HasAVX2) { 3628 unsigned NumElts = VT.getVectorNumElements(); 3629 3630 assert((VT.is128BitVector() || VT.is256BitVector()) && 3631 "Unsupported vector type for unpckh"); 3632 3633 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3634 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3635 return false; 3636 3637 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3638 // independently on 128-bit lanes. 3639 unsigned NumLanes = VT.getSizeInBits()/128; 3640 unsigned NumLaneElts = NumElts/NumLanes; 3641 3642 for (unsigned l = 0; l != NumLanes; ++l) { 3643 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3644 i != (l+1)*NumLaneElts; i += 2, ++j) { 3645 int BitI = Mask[i]; 3646 int BitI1 = Mask[i+1]; 3647 if (!isUndefOrEqual(BitI, j)) 3648 return false; 3649 if (!isUndefOrEqual(BitI1, j)) 3650 return false; 3651 } 3652 } 3653 return true; 3654} 3655 3656bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N, bool HasAVX2) { 3657 SmallVector<int, 8> M; 3658 N->getMask(M); 3659 return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0), HasAVX2); 3660} 3661 3662/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3663/// specifies a shuffle of elements that is suitable for input to MOVSS, 3664/// MOVSD, and MOVD, i.e. setting the lowest element. 3665static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) { 3666 if (VT.getVectorElementType().getSizeInBits() < 32) 3667 return false; 3668 if (VT.getSizeInBits() == 256) 3669 return false; 3670 3671 unsigned NumElts = VT.getVectorNumElements(); 3672 3673 if (!isUndefOrEqual(Mask[0], NumElts)) 3674 return false; 3675 3676 for (unsigned i = 1; i != NumElts; ++i) 3677 if (!isUndefOrEqual(Mask[i], i)) 3678 return false; 3679 3680 return true; 3681} 3682 3683bool X86::isMOVLMask(ShuffleVectorSDNode *N) { 3684 SmallVector<int, 8> M; 3685 N->getMask(M); 3686 return ::isMOVLMask(M, N->getValueType(0)); 3687} 3688 3689/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3690/// as permutations between 128-bit chunks or halves. As an example: this 3691/// shuffle bellow: 3692/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3693/// The first half comes from the second half of V1 and the second half from the 3694/// the second half of V2. 3695static bool isVPERM2X128Mask(const SmallVectorImpl<int> &Mask, EVT VT, 3696 bool HasAVX) { 3697 if (!HasAVX || VT.getSizeInBits() != 256) 3698 return false; 3699 3700 // The shuffle result is divided into half A and half B. In total the two 3701 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3702 // B must come from C, D, E or F. 3703 unsigned HalfSize = VT.getVectorNumElements()/2; 3704 bool MatchA = false, MatchB = false; 3705 3706 // Check if A comes from one of C, D, E, F. 3707 for (unsigned Half = 0; Half != 4; ++Half) { 3708 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3709 MatchA = true; 3710 break; 3711 } 3712 } 3713 3714 // Check if B comes from one of C, D, E, F. 3715 for (unsigned Half = 0; Half != 4; ++Half) { 3716 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3717 MatchB = true; 3718 break; 3719 } 3720 } 3721 3722 return MatchA && MatchB; 3723} 3724 3725/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3726/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3727static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3728 EVT VT = SVOp->getValueType(0); 3729 3730 unsigned HalfSize = VT.getVectorNumElements()/2; 3731 3732 unsigned FstHalf = 0, SndHalf = 0; 3733 for (unsigned i = 0; i < HalfSize; ++i) { 3734 if (SVOp->getMaskElt(i) > 0) { 3735 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3736 break; 3737 } 3738 } 3739 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3740 if (SVOp->getMaskElt(i) > 0) { 3741 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3742 break; 3743 } 3744 } 3745 3746 return (FstHalf | (SndHalf << 4)); 3747} 3748 3749/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3750/// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3751/// Note that VPERMIL mask matching is different depending whether theunderlying 3752/// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3753/// to the same elements of the low, but to the higher half of the source. 3754/// In VPERMILPD the two lanes could be shuffled independently of each other 3755/// with the same restriction that lanes can't be crossed. 3756static bool isVPERMILPMask(const SmallVectorImpl<int> &Mask, EVT VT, 3757 bool HasAVX) { 3758 if (!HasAVX) 3759 return false; 3760 3761 unsigned NumElts = VT.getVectorNumElements(); 3762 // Only match 256-bit with 32/64-bit types 3763 if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8)) 3764 return false; 3765 3766 unsigned NumLanes = VT.getSizeInBits()/128; 3767 unsigned LaneSize = NumElts/NumLanes; 3768 for (unsigned l = 0; l != NumLanes; ++l) { 3769 unsigned LaneStart = l*LaneSize; 3770 for (unsigned i = 0; i != LaneSize; ++i) { 3771 if (!isUndefOrInRange(Mask[i+LaneStart], LaneStart, LaneStart+LaneSize)) 3772 return false; 3773 if (NumElts == 4 || l == 0) 3774 continue; 3775 // VPERMILPS handling 3776 if (Mask[i] < 0) 3777 continue; 3778 if (!isUndefOrEqual(Mask[i+LaneStart], Mask[i]+LaneStart)) 3779 return false; 3780 } 3781 } 3782 3783 return true; 3784} 3785 3786/// getShuffleVPERMILPImmediate - Return the appropriate immediate to shuffle 3787/// the specified VECTOR_MASK mask with VPERMILPS/D* instructions. 3788static unsigned getShuffleVPERMILPImmediate(ShuffleVectorSDNode *SVOp) { 3789 EVT VT = SVOp->getValueType(0); 3790 3791 unsigned NumElts = VT.getVectorNumElements(); 3792 unsigned NumLanes = VT.getSizeInBits()/128; 3793 unsigned LaneSize = NumElts/NumLanes; 3794 3795 // Although the mask is equal for both lanes do it twice to get the cases 3796 // where a mask will match because the same mask element is undef on the 3797 // first half but valid on the second. This would get pathological cases 3798 // such as: shuffle <u, 0, 1, 2, 4, 4, 5, 6>, which is completely valid. 3799 unsigned Shift = (LaneSize == 4) ? 2 : 1; 3800 unsigned Mask = 0; 3801 for (unsigned i = 0; i != NumElts; ++i) { 3802 int MaskElt = SVOp->getMaskElt(i); 3803 if (MaskElt < 0) 3804 continue; 3805 MaskElt %= LaneSize; 3806 unsigned Shamt = i; 3807 // VPERMILPSY, the mask of the first half must be equal to the second one 3808 if (NumElts == 8) Shamt %= LaneSize; 3809 Mask |= MaskElt << (Shamt*Shift); 3810 } 3811 3812 return Mask; 3813} 3814 3815/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse 3816/// of what x86 movss want. X86 movs requires the lowest element to be lowest 3817/// element of vector 2 and the other elements to come from vector 1 in order. 3818static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT, 3819 bool V2IsSplat = false, bool V2IsUndef = false) { 3820 unsigned NumOps = VT.getVectorNumElements(); 3821 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3822 return false; 3823 3824 if (!isUndefOrEqual(Mask[0], 0)) 3825 return false; 3826 3827 for (unsigned i = 1; i != NumOps; ++i) 3828 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3829 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3830 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3831 return false; 3832 3833 return true; 3834} 3835 3836static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false, 3837 bool V2IsUndef = false) { 3838 SmallVector<int, 8> M; 3839 N->getMask(M); 3840 return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef); 3841} 3842 3843/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3844/// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3845/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3846bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N, 3847 const X86Subtarget *Subtarget) { 3848 if (!Subtarget->hasSSE3()) 3849 return false; 3850 3851 // The second vector must be undef 3852 if (N->getOperand(1).getOpcode() != ISD::UNDEF) 3853 return false; 3854 3855 EVT VT = N->getValueType(0); 3856 unsigned NumElems = VT.getVectorNumElements(); 3857 3858 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3859 (VT.getSizeInBits() == 256 && NumElems != 8)) 3860 return false; 3861 3862 // "i+1" is the value the indexed mask element must have 3863 for (unsigned i = 0; i < NumElems; i += 2) 3864 if (!isUndefOrEqual(N->getMaskElt(i), i+1) || 3865 !isUndefOrEqual(N->getMaskElt(i+1), i+1)) 3866 return false; 3867 3868 return true; 3869} 3870 3871/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3872/// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3873/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3874bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N, 3875 const X86Subtarget *Subtarget) { 3876 if (!Subtarget->hasSSE3()) 3877 return false; 3878 3879 // The second vector must be undef 3880 if (N->getOperand(1).getOpcode() != ISD::UNDEF) 3881 return false; 3882 3883 EVT VT = N->getValueType(0); 3884 unsigned NumElems = VT.getVectorNumElements(); 3885 3886 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3887 (VT.getSizeInBits() == 256 && NumElems != 8)) 3888 return false; 3889 3890 // "i" is the value the indexed mask element must have 3891 for (unsigned i = 0; i != NumElems; i += 2) 3892 if (!isUndefOrEqual(N->getMaskElt(i), i) || 3893 !isUndefOrEqual(N->getMaskElt(i+1), i)) 3894 return false; 3895 3896 return true; 3897} 3898 3899/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 3900/// specifies a shuffle of elements that is suitable for input to 256-bit 3901/// version of MOVDDUP. 3902static bool isMOVDDUPYMask(const SmallVectorImpl<int> &Mask, EVT VT, 3903 bool HasAVX) { 3904 unsigned NumElts = VT.getVectorNumElements(); 3905 3906 if (!HasAVX || VT.getSizeInBits() != 256 || NumElts != 4) 3907 return false; 3908 3909 for (unsigned i = 0; i != NumElts/2; ++i) 3910 if (!isUndefOrEqual(Mask[i], 0)) 3911 return false; 3912 for (unsigned i = NumElts/2; i != NumElts; ++i) 3913 if (!isUndefOrEqual(Mask[i], NumElts/2)) 3914 return false; 3915 return true; 3916} 3917 3918/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3919/// specifies a shuffle of elements that is suitable for input to 128-bit 3920/// version of MOVDDUP. 3921bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) { 3922 EVT VT = N->getValueType(0); 3923 3924 if (VT.getSizeInBits() != 128) 3925 return false; 3926 3927 unsigned e = VT.getVectorNumElements() / 2; 3928 for (unsigned i = 0; i != e; ++i) 3929 if (!isUndefOrEqual(N->getMaskElt(i), i)) 3930 return false; 3931 for (unsigned i = 0; i != e; ++i) 3932 if (!isUndefOrEqual(N->getMaskElt(e+i), i)) 3933 return false; 3934 return true; 3935} 3936 3937/// isVEXTRACTF128Index - Return true if the specified 3938/// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3939/// suitable for input to VEXTRACTF128. 3940bool X86::isVEXTRACTF128Index(SDNode *N) { 3941 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3942 return false; 3943 3944 // The index should be aligned on a 128-bit boundary. 3945 uint64_t Index = 3946 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3947 3948 unsigned VL = N->getValueType(0).getVectorNumElements(); 3949 unsigned VBits = N->getValueType(0).getSizeInBits(); 3950 unsigned ElSize = VBits / VL; 3951 bool Result = (Index * ElSize) % 128 == 0; 3952 3953 return Result; 3954} 3955 3956/// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3957/// operand specifies a subvector insert that is suitable for input to 3958/// VINSERTF128. 3959bool X86::isVINSERTF128Index(SDNode *N) { 3960 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3961 return false; 3962 3963 // The index should be aligned on a 128-bit boundary. 3964 uint64_t Index = 3965 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3966 3967 unsigned VL = N->getValueType(0).getVectorNumElements(); 3968 unsigned VBits = N->getValueType(0).getSizeInBits(); 3969 unsigned ElSize = VBits / VL; 3970 bool Result = (Index * ElSize) % 128 == 0; 3971 3972 return Result; 3973} 3974 3975/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3976/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3977unsigned X86::getShuffleSHUFImmediate(SDNode *N) { 3978 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3979 unsigned NumOperands = SVOp->getValueType(0).getVectorNumElements(); 3980 3981 unsigned Shift = (NumOperands == 4) ? 2 : 1; 3982 unsigned Mask = 0; 3983 for (unsigned i = 0; i != NumOperands; ++i) { 3984 int Val = SVOp->getMaskElt(NumOperands-i-1); 3985 if (Val < 0) Val = 0; 3986 if (Val >= (int)NumOperands) Val -= NumOperands; 3987 Mask |= Val; 3988 if (i != NumOperands - 1) 3989 Mask <<= Shift; 3990 } 3991 return Mask; 3992} 3993 3994/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3995/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3996unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { 3997 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 3998 unsigned Mask = 0; 3999 // 8 nodes, but we only care about the last 4. 4000 for (unsigned i = 7; i >= 4; --i) { 4001 int Val = SVOp->getMaskElt(i); 4002 if (Val >= 0) 4003 Mask |= (Val - 4); 4004 if (i != 4) 4005 Mask <<= 2; 4006 } 4007 return Mask; 4008} 4009 4010/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4011/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4012unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { 4013 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 4014 unsigned Mask = 0; 4015 // 8 nodes, but we only care about the first 4. 4016 for (int i = 3; i >= 0; --i) { 4017 int Val = SVOp->getMaskElt(i); 4018 if (Val >= 0) 4019 Mask |= Val; 4020 if (i != 0) 4021 Mask <<= 2; 4022 } 4023 return Mask; 4024} 4025 4026/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4027/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4028static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4029 EVT VT = SVOp->getValueType(0); 4030 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4031 int Val = 0; 4032 4033 unsigned i, e; 4034 for (i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4035 Val = SVOp->getMaskElt(i); 4036 if (Val >= 0) 4037 break; 4038 } 4039 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4040 return (Val - i) * EltSize; 4041} 4042 4043/// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4044/// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4045/// instructions. 4046unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4047 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4048 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4049 4050 uint64_t Index = 4051 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4052 4053 EVT VecVT = N->getOperand(0).getValueType(); 4054 EVT ElVT = VecVT.getVectorElementType(); 4055 4056 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4057 return Index / NumElemsPerChunk; 4058} 4059 4060/// getInsertVINSERTF128Immediate - Return the appropriate immediate 4061/// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4062/// instructions. 4063unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4064 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4065 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4066 4067 uint64_t Index = 4068 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4069 4070 EVT VecVT = N->getValueType(0); 4071 EVT ElVT = VecVT.getVectorElementType(); 4072 4073 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4074 return Index / NumElemsPerChunk; 4075} 4076 4077/// isZeroNode - Returns true if Elt is a constant zero or a floating point 4078/// constant +0.0. 4079bool X86::isZeroNode(SDValue Elt) { 4080 return ((isa<ConstantSDNode>(Elt) && 4081 cast<ConstantSDNode>(Elt)->isNullValue()) || 4082 (isa<ConstantFPSDNode>(Elt) && 4083 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4084} 4085 4086/// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4087/// their permute mask. 4088static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4089 SelectionDAG &DAG) { 4090 EVT VT = SVOp->getValueType(0); 4091 unsigned NumElems = VT.getVectorNumElements(); 4092 SmallVector<int, 8> MaskVec; 4093 4094 for (unsigned i = 0; i != NumElems; ++i) { 4095 int idx = SVOp->getMaskElt(i); 4096 if (idx < 0) 4097 MaskVec.push_back(idx); 4098 else if (idx < (int)NumElems) 4099 MaskVec.push_back(idx + NumElems); 4100 else 4101 MaskVec.push_back(idx - NumElems); 4102 } 4103 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4104 SVOp->getOperand(0), &MaskVec[0]); 4105} 4106 4107/// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4108/// match movhlps. The lower half elements should come from upper half of 4109/// V1 (and in order), and the upper half elements should come from the upper 4110/// half of V2 (and in order). 4111static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) { 4112 EVT VT = Op->getValueType(0); 4113 if (VT.getSizeInBits() != 128) 4114 return false; 4115 if (VT.getVectorNumElements() != 4) 4116 return false; 4117 for (unsigned i = 0, e = 2; i != e; ++i) 4118 if (!isUndefOrEqual(Op->getMaskElt(i), i+2)) 4119 return false; 4120 for (unsigned i = 2; i != 4; ++i) 4121 if (!isUndefOrEqual(Op->getMaskElt(i), i+4)) 4122 return false; 4123 return true; 4124} 4125 4126/// isScalarLoadToVector - Returns true if the node is a scalar load that 4127/// is promoted to a vector. It also returns the LoadSDNode by reference if 4128/// required. 4129static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4130 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4131 return false; 4132 N = N->getOperand(0).getNode(); 4133 if (!ISD::isNON_EXTLoad(N)) 4134 return false; 4135 if (LD) 4136 *LD = cast<LoadSDNode>(N); 4137 return true; 4138} 4139 4140// Test whether the given value is a vector value which will be legalized 4141// into a load. 4142static bool WillBeConstantPoolLoad(SDNode *N) { 4143 if (N->getOpcode() != ISD::BUILD_VECTOR) 4144 return false; 4145 4146 // Check for any non-constant elements. 4147 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4148 switch (N->getOperand(i).getNode()->getOpcode()) { 4149 case ISD::UNDEF: 4150 case ISD::ConstantFP: 4151 case ISD::Constant: 4152 break; 4153 default: 4154 return false; 4155 } 4156 4157 // Vectors of all-zeros and all-ones are materialized with special 4158 // instructions rather than being loaded. 4159 return !ISD::isBuildVectorAllZeros(N) && 4160 !ISD::isBuildVectorAllOnes(N); 4161} 4162 4163/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4164/// match movlp{s|d}. The lower half elements should come from lower half of 4165/// V1 (and in order), and the upper half elements should come from the upper 4166/// half of V2 (and in order). And since V1 will become the source of the 4167/// MOVLP, it must be either a vector load or a scalar load to vector. 4168static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4169 ShuffleVectorSDNode *Op) { 4170 EVT VT = Op->getValueType(0); 4171 if (VT.getSizeInBits() != 128) 4172 return false; 4173 4174 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4175 return false; 4176 // Is V2 is a vector load, don't do this transformation. We will try to use 4177 // load folding shufps op. 4178 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4179 return false; 4180 4181 unsigned NumElems = VT.getVectorNumElements(); 4182 4183 if (NumElems != 2 && NumElems != 4) 4184 return false; 4185 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4186 if (!isUndefOrEqual(Op->getMaskElt(i), i)) 4187 return false; 4188 for (unsigned i = NumElems/2; i != NumElems; ++i) 4189 if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems)) 4190 return false; 4191 return true; 4192} 4193 4194/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4195/// all the same. 4196static bool isSplatVector(SDNode *N) { 4197 if (N->getOpcode() != ISD::BUILD_VECTOR) 4198 return false; 4199 4200 SDValue SplatValue = N->getOperand(0); 4201 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4202 if (N->getOperand(i) != SplatValue) 4203 return false; 4204 return true; 4205} 4206 4207/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4208/// to an zero vector. 4209/// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4210static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4211 SDValue V1 = N->getOperand(0); 4212 SDValue V2 = N->getOperand(1); 4213 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4214 for (unsigned i = 0; i != NumElems; ++i) { 4215 int Idx = N->getMaskElt(i); 4216 if (Idx >= (int)NumElems) { 4217 unsigned Opc = V2.getOpcode(); 4218 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4219 continue; 4220 if (Opc != ISD::BUILD_VECTOR || 4221 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4222 return false; 4223 } else if (Idx >= 0) { 4224 unsigned Opc = V1.getOpcode(); 4225 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4226 continue; 4227 if (Opc != ISD::BUILD_VECTOR || 4228 !X86::isZeroNode(V1.getOperand(Idx))) 4229 return false; 4230 } 4231 } 4232 return true; 4233} 4234 4235/// getZeroVector - Returns a vector of specified type with all zero elements. 4236/// 4237static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG, 4238 DebugLoc dl) { 4239 assert(VT.isVector() && "Expected a vector type"); 4240 4241 // Always build SSE zero vectors as <4 x i32> bitcasted 4242 // to their dest type. This ensures they get CSE'd. 4243 SDValue Vec; 4244 if (VT.getSizeInBits() == 128) { // SSE 4245 if (HasSSE2) { // SSE2 4246 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4247 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4248 } else { // SSE1 4249 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4250 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4251 } 4252 } else if (VT.getSizeInBits() == 256) { // AVX 4253 // 256-bit logic and arithmetic instructions in AVX are 4254 // all floating-point, no support for integer ops. Default 4255 // to emitting fp zeroed vectors then. 4256 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4257 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4258 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4259 } 4260 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4261} 4262 4263/// getOnesVector - Returns a vector of specified type with all bits set. 4264/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4265/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4266/// Then bitcast to their original type, ensuring they get CSE'd. 4267static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, 4268 DebugLoc dl) { 4269 assert(VT.isVector() && "Expected a vector type"); 4270 assert((VT.is128BitVector() || VT.is256BitVector()) 4271 && "Expected a 128-bit or 256-bit vector type"); 4272 4273 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4274 SDValue Vec; 4275 if (VT.getSizeInBits() == 256) { 4276 if (HasAVX2) { // AVX2 4277 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4278 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4279 } else { // AVX 4280 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4281 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32), 4282 Vec, DAG.getConstant(0, MVT::i32), DAG, dl); 4283 Vec = Insert128BitVector(InsV, Vec, 4284 DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl); 4285 } 4286 } else { 4287 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4288 } 4289 4290 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4291} 4292 4293/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4294/// that point to V2 points to its first element. 4295static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 4296 EVT VT = SVOp->getValueType(0); 4297 unsigned NumElems = VT.getVectorNumElements(); 4298 4299 bool Changed = false; 4300 SmallVector<int, 8> MaskVec; 4301 SVOp->getMask(MaskVec); 4302 4303 for (unsigned i = 0; i != NumElems; ++i) { 4304 if (MaskVec[i] > (int)NumElems) { 4305 MaskVec[i] = NumElems; 4306 Changed = true; 4307 } 4308 } 4309 if (Changed) 4310 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0), 4311 SVOp->getOperand(1), &MaskVec[0]); 4312 return SDValue(SVOp, 0); 4313} 4314 4315/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4316/// operation of specified width. 4317static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4318 SDValue V2) { 4319 unsigned NumElems = VT.getVectorNumElements(); 4320 SmallVector<int, 8> Mask; 4321 Mask.push_back(NumElems); 4322 for (unsigned i = 1; i != NumElems; ++i) 4323 Mask.push_back(i); 4324 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4325} 4326 4327/// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4328static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4329 SDValue V2) { 4330 unsigned NumElems = VT.getVectorNumElements(); 4331 SmallVector<int, 8> Mask; 4332 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4333 Mask.push_back(i); 4334 Mask.push_back(i + NumElems); 4335 } 4336 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4337} 4338 4339/// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4340static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4341 SDValue V2) { 4342 unsigned NumElems = VT.getVectorNumElements(); 4343 unsigned Half = NumElems/2; 4344 SmallVector<int, 8> Mask; 4345 for (unsigned i = 0; i != Half; ++i) { 4346 Mask.push_back(i + Half); 4347 Mask.push_back(i + NumElems + Half); 4348 } 4349 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4350} 4351 4352// PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4353// a generic shuffle instruction because the target has no such instructions. 4354// Generate shuffles which repeat i16 and i8 several times until they can be 4355// represented by v4f32 and then be manipulated by target suported shuffles. 4356static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4357 EVT VT = V.getValueType(); 4358 int NumElems = VT.getVectorNumElements(); 4359 DebugLoc dl = V.getDebugLoc(); 4360 4361 while (NumElems > 4) { 4362 if (EltNo < NumElems/2) { 4363 V = getUnpackl(DAG, dl, VT, V, V); 4364 } else { 4365 V = getUnpackh(DAG, dl, VT, V, V); 4366 EltNo -= NumElems/2; 4367 } 4368 NumElems >>= 1; 4369 } 4370 return V; 4371} 4372 4373/// getLegalSplat - Generate a legal splat with supported x86 shuffles 4374static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4375 EVT VT = V.getValueType(); 4376 DebugLoc dl = V.getDebugLoc(); 4377 assert((VT.getSizeInBits() == 128 || VT.getSizeInBits() == 256) 4378 && "Vector size not supported"); 4379 4380 if (VT.getSizeInBits() == 128) { 4381 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4382 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4383 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4384 &SplatMask[0]); 4385 } else { 4386 // To use VPERMILPS to splat scalars, the second half of indicies must 4387 // refer to the higher part, which is a duplication of the lower one, 4388 // because VPERMILPS can only handle in-lane permutations. 4389 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4390 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4391 4392 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4393 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4394 &SplatMask[0]); 4395 } 4396 4397 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4398} 4399 4400/// PromoteSplat - Splat is promoted to target supported vector shuffles. 4401static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4402 EVT SrcVT = SV->getValueType(0); 4403 SDValue V1 = SV->getOperand(0); 4404 DebugLoc dl = SV->getDebugLoc(); 4405 4406 int EltNo = SV->getSplatIndex(); 4407 int NumElems = SrcVT.getVectorNumElements(); 4408 unsigned Size = SrcVT.getSizeInBits(); 4409 4410 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4411 "Unknown how to promote splat for type"); 4412 4413 // Extract the 128-bit part containing the splat element and update 4414 // the splat element index when it refers to the higher register. 4415 if (Size == 256) { 4416 unsigned Idx = (EltNo > NumElems/2) ? NumElems/2 : 0; 4417 V1 = Extract128BitVector(V1, DAG.getConstant(Idx, MVT::i32), DAG, dl); 4418 if (Idx > 0) 4419 EltNo -= NumElems/2; 4420 } 4421 4422 // All i16 and i8 vector types can't be used directly by a generic shuffle 4423 // instruction because the target has no such instruction. Generate shuffles 4424 // which repeat i16 and i8 several times until they fit in i32, and then can 4425 // be manipulated by target suported shuffles. 4426 EVT EltVT = SrcVT.getVectorElementType(); 4427 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4428 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4429 4430 // Recreate the 256-bit vector and place the same 128-bit vector 4431 // into the low and high part. This is necessary because we want 4432 // to use VPERM* to shuffle the vectors 4433 if (Size == 256) { 4434 SDValue InsV = Insert128BitVector(DAG.getUNDEF(SrcVT), V1, 4435 DAG.getConstant(0, MVT::i32), DAG, dl); 4436 V1 = Insert128BitVector(InsV, V1, 4437 DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); 4438 } 4439 4440 return getLegalSplat(DAG, V1, EltNo); 4441} 4442 4443/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4444/// vector of zero or undef vector. This produces a shuffle where the low 4445/// element of V2 is swizzled into the zero/undef vector, landing at element 4446/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4447static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4448 bool isZero, bool HasSSE2, 4449 SelectionDAG &DAG) { 4450 EVT VT = V2.getValueType(); 4451 SDValue V1 = isZero 4452 ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4453 unsigned NumElems = VT.getVectorNumElements(); 4454 SmallVector<int, 16> MaskVec; 4455 for (unsigned i = 0; i != NumElems; ++i) 4456 // If this is the insertion idx, put the low elt of V2 here. 4457 MaskVec.push_back(i == Idx ? NumElems : i); 4458 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4459} 4460 4461/// getShuffleScalarElt - Returns the scalar element that will make up the ith 4462/// element of the result of the vector shuffle. 4463static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, 4464 unsigned Depth) { 4465 if (Depth == 6) 4466 return SDValue(); // Limit search depth. 4467 4468 SDValue V = SDValue(N, 0); 4469 EVT VT = V.getValueType(); 4470 unsigned Opcode = V.getOpcode(); 4471 4472 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4473 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4474 Index = SV->getMaskElt(Index); 4475 4476 if (Index < 0) 4477 return DAG.getUNDEF(VT.getVectorElementType()); 4478 4479 int NumElems = VT.getVectorNumElements(); 4480 SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1); 4481 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1); 4482 } 4483 4484 // Recurse into target specific vector shuffles to find scalars. 4485 if (isTargetShuffle(Opcode)) { 4486 int NumElems = VT.getVectorNumElements(); 4487 SmallVector<unsigned, 16> ShuffleMask; 4488 SDValue ImmN; 4489 4490 switch(Opcode) { 4491 case X86ISD::SHUFP: 4492 ImmN = N->getOperand(N->getNumOperands()-1); 4493 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4494 ShuffleMask); 4495 break; 4496 case X86ISD::UNPCKH: 4497 DecodeUNPCKHMask(VT, ShuffleMask); 4498 break; 4499 case X86ISD::UNPCKL: 4500 DecodeUNPCKLMask(VT, ShuffleMask); 4501 break; 4502 case X86ISD::MOVHLPS: 4503 DecodeMOVHLPSMask(NumElems, ShuffleMask); 4504 break; 4505 case X86ISD::MOVLHPS: 4506 DecodeMOVLHPSMask(NumElems, ShuffleMask); 4507 break; 4508 case X86ISD::PSHUFD: 4509 ImmN = N->getOperand(N->getNumOperands()-1); 4510 DecodePSHUFMask(NumElems, 4511 cast<ConstantSDNode>(ImmN)->getZExtValue(), 4512 ShuffleMask); 4513 break; 4514 case X86ISD::PSHUFHW: 4515 ImmN = N->getOperand(N->getNumOperands()-1); 4516 DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4517 ShuffleMask); 4518 break; 4519 case X86ISD::PSHUFLW: 4520 ImmN = N->getOperand(N->getNumOperands()-1); 4521 DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), 4522 ShuffleMask); 4523 break; 4524 case X86ISD::MOVSS: 4525 case X86ISD::MOVSD: { 4526 // The index 0 always comes from the first element of the second source, 4527 // this is why MOVSS and MOVSD are used in the first place. The other 4528 // elements come from the other positions of the first source vector. 4529 unsigned OpNum = (Index == 0) ? 1 : 0; 4530 return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG, 4531 Depth+1); 4532 } 4533 case X86ISD::VPERMILP: 4534 ImmN = N->getOperand(N->getNumOperands()-1); 4535 DecodeVPERMILPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4536 ShuffleMask); 4537 break; 4538 case X86ISD::VPERM2X128: 4539 ImmN = N->getOperand(N->getNumOperands()-1); 4540 DecodeVPERM2F128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), 4541 ShuffleMask); 4542 break; 4543 case X86ISD::MOVDDUP: 4544 case X86ISD::MOVLHPD: 4545 case X86ISD::MOVLPD: 4546 case X86ISD::MOVLPS: 4547 case X86ISD::MOVSHDUP: 4548 case X86ISD::MOVSLDUP: 4549 case X86ISD::PALIGN: 4550 return SDValue(); // Not yet implemented. 4551 default: 4552 assert(0 && "unknown target shuffle node"); 4553 return SDValue(); 4554 } 4555 4556 Index = ShuffleMask[Index]; 4557 if (Index < 0) 4558 return DAG.getUNDEF(VT.getVectorElementType()); 4559 4560 SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); 4561 return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, 4562 Depth+1); 4563 } 4564 4565 // Actual nodes that may contain scalar elements 4566 if (Opcode == ISD::BITCAST) { 4567 V = V.getOperand(0); 4568 EVT SrcVT = V.getValueType(); 4569 unsigned NumElems = VT.getVectorNumElements(); 4570 4571 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4572 return SDValue(); 4573 } 4574 4575 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4576 return (Index == 0) ? V.getOperand(0) 4577 : DAG.getUNDEF(VT.getVectorElementType()); 4578 4579 if (V.getOpcode() == ISD::BUILD_VECTOR) 4580 return V.getOperand(Index); 4581 4582 return SDValue(); 4583} 4584 4585/// getNumOfConsecutiveZeros - Return the number of elements of a vector 4586/// shuffle operation which come from a consecutively from a zero. The 4587/// search can start in two different directions, from left or right. 4588static 4589unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, 4590 bool ZerosFromLeft, SelectionDAG &DAG) { 4591 int i = 0; 4592 4593 while (i < NumElems) { 4594 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4595 SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0); 4596 if (!(Elt.getNode() && 4597 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4598 break; 4599 ++i; 4600 } 4601 4602 return i; 4603} 4604 4605/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to 4606/// MaskE correspond consecutively to elements from one of the vector operands, 4607/// starting from its index OpIdx. Also tell OpNum which source vector operand. 4608static 4609bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE, 4610 int OpIdx, int NumElems, unsigned &OpNum) { 4611 bool SeenV1 = false; 4612 bool SeenV2 = false; 4613 4614 for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) { 4615 int Idx = SVOp->getMaskElt(i); 4616 // Ignore undef indicies 4617 if (Idx < 0) 4618 continue; 4619 4620 if (Idx < NumElems) 4621 SeenV1 = true; 4622 else 4623 SeenV2 = true; 4624 4625 // Only accept consecutive elements from the same vector 4626 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4627 return false; 4628 } 4629 4630 OpNum = SeenV1 ? 0 : 1; 4631 return true; 4632} 4633 4634/// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4635/// logical left shift of a vector. 4636static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4637 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4638 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4639 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4640 false /* check zeros from right */, DAG); 4641 unsigned OpSrc; 4642 4643 if (!NumZeros) 4644 return false; 4645 4646 // Considering the elements in the mask that are not consecutive zeros, 4647 // check if they consecutively come from only one of the source vectors. 4648 // 4649 // V1 = {X, A, B, C} 0 4650 // \ \ \ / 4651 // vector_shuffle V1, V2 <1, 2, 3, X> 4652 // 4653 if (!isShuffleMaskConsecutive(SVOp, 4654 0, // Mask Start Index 4655 NumElems-NumZeros-1, // Mask End Index 4656 NumZeros, // Where to start looking in the src vector 4657 NumElems, // Number of elements in vector 4658 OpSrc)) // Which source operand ? 4659 return false; 4660 4661 isLeft = false; 4662 ShAmt = NumZeros; 4663 ShVal = SVOp->getOperand(OpSrc); 4664 return true; 4665} 4666 4667/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4668/// logical left shift of a vector. 4669static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4670 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4671 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4672 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4673 true /* check zeros from left */, DAG); 4674 unsigned OpSrc; 4675 4676 if (!NumZeros) 4677 return false; 4678 4679 // Considering the elements in the mask that are not consecutive zeros, 4680 // check if they consecutively come from only one of the source vectors. 4681 // 4682 // 0 { A, B, X, X } = V2 4683 // / \ / / 4684 // vector_shuffle V1, V2 <X, X, 4, 5> 4685 // 4686 if (!isShuffleMaskConsecutive(SVOp, 4687 NumZeros, // Mask Start Index 4688 NumElems-1, // Mask End Index 4689 0, // Where to start looking in the src vector 4690 NumElems, // Number of elements in vector 4691 OpSrc)) // Which source operand ? 4692 return false; 4693 4694 isLeft = true; 4695 ShAmt = NumZeros; 4696 ShVal = SVOp->getOperand(OpSrc); 4697 return true; 4698} 4699 4700/// isVectorShift - Returns true if the shuffle can be implemented as a 4701/// logical left or right shift of a vector. 4702static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4703 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4704 // Although the logic below support any bitwidth size, there are no 4705 // shift instructions which handle more than 128-bit vectors. 4706 if (SVOp->getValueType(0).getSizeInBits() > 128) 4707 return false; 4708 4709 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4710 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4711 return true; 4712 4713 return false; 4714} 4715 4716/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4717/// 4718static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4719 unsigned NumNonZero, unsigned NumZero, 4720 SelectionDAG &DAG, 4721 const TargetLowering &TLI) { 4722 if (NumNonZero > 8) 4723 return SDValue(); 4724 4725 DebugLoc dl = Op.getDebugLoc(); 4726 SDValue V(0, 0); 4727 bool First = true; 4728 for (unsigned i = 0; i < 16; ++i) { 4729 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4730 if (ThisIsNonZero && First) { 4731 if (NumZero) 4732 V = getZeroVector(MVT::v8i16, true, DAG, dl); 4733 else 4734 V = DAG.getUNDEF(MVT::v8i16); 4735 First = false; 4736 } 4737 4738 if ((i & 1) != 0) { 4739 SDValue ThisElt(0, 0), LastElt(0, 0); 4740 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4741 if (LastIsNonZero) { 4742 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4743 MVT::i16, Op.getOperand(i-1)); 4744 } 4745 if (ThisIsNonZero) { 4746 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4747 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4748 ThisElt, DAG.getConstant(8, MVT::i8)); 4749 if (LastIsNonZero) 4750 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4751 } else 4752 ThisElt = LastElt; 4753 4754 if (ThisElt.getNode()) 4755 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4756 DAG.getIntPtrConstant(i/2)); 4757 } 4758 } 4759 4760 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4761} 4762 4763/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4764/// 4765static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4766 unsigned NumNonZero, unsigned NumZero, 4767 SelectionDAG &DAG, 4768 const TargetLowering &TLI) { 4769 if (NumNonZero > 4) 4770 return SDValue(); 4771 4772 DebugLoc dl = Op.getDebugLoc(); 4773 SDValue V(0, 0); 4774 bool First = true; 4775 for (unsigned i = 0; i < 8; ++i) { 4776 bool isNonZero = (NonZeros & (1 << i)) != 0; 4777 if (isNonZero) { 4778 if (First) { 4779 if (NumZero) 4780 V = getZeroVector(MVT::v8i16, true, DAG, dl); 4781 else 4782 V = DAG.getUNDEF(MVT::v8i16); 4783 First = false; 4784 } 4785 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4786 MVT::v8i16, V, Op.getOperand(i), 4787 DAG.getIntPtrConstant(i)); 4788 } 4789 } 4790 4791 return V; 4792} 4793 4794/// getVShift - Return a vector logical shift node. 4795/// 4796static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4797 unsigned NumBits, SelectionDAG &DAG, 4798 const TargetLowering &TLI, DebugLoc dl) { 4799 assert(VT.getSizeInBits() == 128 && "Unknown type for VShift"); 4800 EVT ShVT = MVT::v2i64; 4801 unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; 4802 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4803 return DAG.getNode(ISD::BITCAST, dl, VT, 4804 DAG.getNode(Opc, dl, ShVT, SrcOp, 4805 DAG.getConstant(NumBits, 4806 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4807} 4808 4809SDValue 4810X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4811 SelectionDAG &DAG) const { 4812 4813 // Check if the scalar load can be widened into a vector load. And if 4814 // the address is "base + cst" see if the cst can be "absorbed" into 4815 // the shuffle mask. 4816 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4817 SDValue Ptr = LD->getBasePtr(); 4818 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4819 return SDValue(); 4820 EVT PVT = LD->getValueType(0); 4821 if (PVT != MVT::i32 && PVT != MVT::f32) 4822 return SDValue(); 4823 4824 int FI = -1; 4825 int64_t Offset = 0; 4826 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4827 FI = FINode->getIndex(); 4828 Offset = 0; 4829 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4830 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4831 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4832 Offset = Ptr.getConstantOperandVal(1); 4833 Ptr = Ptr.getOperand(0); 4834 } else { 4835 return SDValue(); 4836 } 4837 4838 // FIXME: 256-bit vector instructions don't require a strict alignment, 4839 // improve this code to support it better. 4840 unsigned RequiredAlign = VT.getSizeInBits()/8; 4841 SDValue Chain = LD->getChain(); 4842 // Make sure the stack object alignment is at least 16 or 32. 4843 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4844 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4845 if (MFI->isFixedObjectIndex(FI)) { 4846 // Can't change the alignment. FIXME: It's possible to compute 4847 // the exact stack offset and reference FI + adjust offset instead. 4848 // If someone *really* cares about this. That's the way to implement it. 4849 return SDValue(); 4850 } else { 4851 MFI->setObjectAlignment(FI, RequiredAlign); 4852 } 4853 } 4854 4855 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4856 // Ptr + (Offset & ~15). 4857 if (Offset < 0) 4858 return SDValue(); 4859 if ((Offset % RequiredAlign) & 3) 4860 return SDValue(); 4861 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4862 if (StartOffset) 4863 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4864 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4865 4866 int EltNo = (Offset - StartOffset) >> 2; 4867 int NumElems = VT.getVectorNumElements(); 4868 4869 EVT CanonVT = VT.getSizeInBits() == 128 ? MVT::v4i32 : MVT::v8i32; 4870 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4871 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4872 LD->getPointerInfo().getWithOffset(StartOffset), 4873 false, false, false, 0); 4874 4875 // Canonicalize it to a v4i32 or v8i32 shuffle. 4876 SmallVector<int, 8> Mask; 4877 for (int i = 0; i < NumElems; ++i) 4878 Mask.push_back(EltNo); 4879 4880 V1 = DAG.getNode(ISD::BITCAST, dl, CanonVT, V1); 4881 return DAG.getNode(ISD::BITCAST, dl, NVT, 4882 DAG.getVectorShuffle(CanonVT, dl, V1, 4883 DAG.getUNDEF(CanonVT),&Mask[0])); 4884 } 4885 4886 return SDValue(); 4887} 4888 4889/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4890/// vector of type 'VT', see if the elements can be replaced by a single large 4891/// load which has the same value as a build_vector whose operands are 'elts'. 4892/// 4893/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4894/// 4895/// FIXME: we'd also like to handle the case where the last elements are zero 4896/// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4897/// There's even a handy isZeroNode for that purpose. 4898static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4899 DebugLoc &DL, SelectionDAG &DAG) { 4900 EVT EltVT = VT.getVectorElementType(); 4901 unsigned NumElems = Elts.size(); 4902 4903 LoadSDNode *LDBase = NULL; 4904 unsigned LastLoadedElt = -1U; 4905 4906 // For each element in the initializer, see if we've found a load or an undef. 4907 // If we don't find an initial load element, or later load elements are 4908 // non-consecutive, bail out. 4909 for (unsigned i = 0; i < NumElems; ++i) { 4910 SDValue Elt = Elts[i]; 4911 4912 if (!Elt.getNode() || 4913 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4914 return SDValue(); 4915 if (!LDBase) { 4916 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4917 return SDValue(); 4918 LDBase = cast<LoadSDNode>(Elt.getNode()); 4919 LastLoadedElt = i; 4920 continue; 4921 } 4922 if (Elt.getOpcode() == ISD::UNDEF) 4923 continue; 4924 4925 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4926 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4927 return SDValue(); 4928 LastLoadedElt = i; 4929 } 4930 4931 // If we have found an entire vector of loads and undefs, then return a large 4932 // load of the entire vector width starting at the base pointer. If we found 4933 // consecutive loads for the low half, generate a vzext_load node. 4934 if (LastLoadedElt == NumElems - 1) { 4935 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4936 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4937 LDBase->getPointerInfo(), 4938 LDBase->isVolatile(), LDBase->isNonTemporal(), 4939 LDBase->isInvariant(), 0); 4940 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4941 LDBase->getPointerInfo(), 4942 LDBase->isVolatile(), LDBase->isNonTemporal(), 4943 LDBase->isInvariant(), LDBase->getAlignment()); 4944 } else if (NumElems == 4 && LastLoadedElt == 1 && 4945 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 4946 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4947 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4948 SDValue ResNode = 4949 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, 4950 LDBase->getPointerInfo(), 4951 LDBase->getAlignment(), 4952 false/*isVolatile*/, true/*ReadMem*/, 4953 false/*WriteMem*/); 4954 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 4955 } 4956 return SDValue(); 4957} 4958 4959/// isVectorBroadcast - Check if the node chain is suitable to be xformed to 4960/// a vbroadcast node. We support two patterns: 4961/// 1. A splat BUILD_VECTOR which uses a single scalar load. 4962/// 2. A splat shuffle which uses a scalar_to_vector node which comes from 4963/// a scalar load. 4964/// The scalar load node is returned when a pattern is found, 4965/// or SDValue() otherwise. 4966static SDValue isVectorBroadcast(SDValue &Op, const X86Subtarget *Subtarget) { 4967 if (!Subtarget->hasAVX()) 4968 return SDValue(); 4969 4970 EVT VT = Op.getValueType(); 4971 SDValue V = Op; 4972 4973 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 4974 V = V.getOperand(0); 4975 4976 //A suspected load to be broadcasted. 4977 SDValue Ld; 4978 4979 switch (V.getOpcode()) { 4980 default: 4981 // Unknown pattern found. 4982 return SDValue(); 4983 4984 case ISD::BUILD_VECTOR: { 4985 // The BUILD_VECTOR node must be a splat. 4986 if (!isSplatVector(V.getNode())) 4987 return SDValue(); 4988 4989 Ld = V.getOperand(0); 4990 4991 // The suspected load node has several users. Make sure that all 4992 // of its users are from the BUILD_VECTOR node. 4993 if (!Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 4994 return SDValue(); 4995 break; 4996 } 4997 4998 case ISD::VECTOR_SHUFFLE: { 4999 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5000 5001 // Shuffles must have a splat mask where the first element is 5002 // broadcasted. 5003 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5004 return SDValue(); 5005 5006 SDValue Sc = Op.getOperand(0); 5007 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR) 5008 return SDValue(); 5009 5010 Ld = Sc.getOperand(0); 5011 5012 // The scalar_to_vector node and the suspected 5013 // load node must have exactly one user. 5014 if (!Sc.hasOneUse() || !Ld.hasOneUse()) 5015 return SDValue(); 5016 break; 5017 } 5018 } 5019 5020 // The scalar source must be a normal load. 5021 if (!ISD::isNormalLoad(Ld.getNode())) 5022 return SDValue(); 5023 5024 bool Is256 = VT.getSizeInBits() == 256; 5025 bool Is128 = VT.getSizeInBits() == 128; 5026 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5027 5028 // VBroadcast to YMM 5029 if (Is256 && (ScalarSize == 32 || ScalarSize == 64)) 5030 return Ld; 5031 5032 // VBroadcast to XMM 5033 if (Is128 && (ScalarSize == 32)) 5034 return Ld; 5035 5036 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5037 // double since there is vbroadcastsd xmm 5038 if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { 5039 // VBroadcast to YMM 5040 if (Is256 && (ScalarSize == 8 || ScalarSize == 16)) 5041 return Ld; 5042 5043 // VBroadcast to XMM 5044 if (Is128 && (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) 5045 return Ld; 5046 } 5047 5048 // Unsupported broadcast. 5049 return SDValue(); 5050} 5051 5052SDValue 5053X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5054 DebugLoc dl = Op.getDebugLoc(); 5055 5056 EVT VT = Op.getValueType(); 5057 EVT ExtVT = VT.getVectorElementType(); 5058 unsigned NumElems = Op.getNumOperands(); 5059 5060 // Vectors containing all zeros can be matched by pxor and xorps later 5061 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5062 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5063 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5064 if (Op.getValueType() == MVT::v4i32 || 5065 Op.getValueType() == MVT::v8i32) 5066 return Op; 5067 5068 return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl); 5069 } 5070 5071 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5072 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5073 // vpcmpeqd on 256-bit vectors. 5074 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5075 if (Op.getValueType() == MVT::v4i32 || 5076 (Op.getValueType() == MVT::v8i32 && Subtarget->hasAVX2())) 5077 return Op; 5078 5079 return getOnesVector(Op.getValueType(), Subtarget->hasAVX2(), DAG, dl); 5080 } 5081 5082 SDValue LD = isVectorBroadcast(Op, Subtarget); 5083 if (LD.getNode()) 5084 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, LD); 5085 5086 unsigned EVTBits = ExtVT.getSizeInBits(); 5087 5088 unsigned NumZero = 0; 5089 unsigned NumNonZero = 0; 5090 unsigned NonZeros = 0; 5091 bool IsAllConstants = true; 5092 SmallSet<SDValue, 8> Values; 5093 for (unsigned i = 0; i < NumElems; ++i) { 5094 SDValue Elt = Op.getOperand(i); 5095 if (Elt.getOpcode() == ISD::UNDEF) 5096 continue; 5097 Values.insert(Elt); 5098 if (Elt.getOpcode() != ISD::Constant && 5099 Elt.getOpcode() != ISD::ConstantFP) 5100 IsAllConstants = false; 5101 if (X86::isZeroNode(Elt)) 5102 NumZero++; 5103 else { 5104 NonZeros |= (1 << i); 5105 NumNonZero++; 5106 } 5107 } 5108 5109 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5110 if (NumNonZero == 0) 5111 return DAG.getUNDEF(VT); 5112 5113 // Special case for single non-zero, non-undef, element. 5114 if (NumNonZero == 1) { 5115 unsigned Idx = CountTrailingZeros_32(NonZeros); 5116 SDValue Item = Op.getOperand(Idx); 5117 5118 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5119 // the value are obviously zero, truncate the value to i32 and do the 5120 // insertion that way. Only do this if the value is non-constant or if the 5121 // value is a constant being inserted into element 0. It is cheaper to do 5122 // a constant pool load than it is to do a movd + shuffle. 5123 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5124 (!IsAllConstants || Idx == 0)) { 5125 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5126 // Handle SSE only. 5127 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5128 EVT VecVT = MVT::v4i32; 5129 unsigned VecElts = 4; 5130 5131 // Truncate the value (which may itself be a constant) to i32, and 5132 // convert it to a vector with movd (S2V+shuffle to zero extend). 5133 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5134 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5135 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 5136 Subtarget->hasSSE2(), DAG); 5137 5138 // Now we have our 32-bit value zero extended in the low element of 5139 // a vector. If Idx != 0, swizzle it into place. 5140 if (Idx != 0) { 5141 SmallVector<int, 4> Mask; 5142 Mask.push_back(Idx); 5143 for (unsigned i = 1; i != VecElts; ++i) 5144 Mask.push_back(i); 5145 Item = DAG.getVectorShuffle(VecVT, dl, Item, 5146 DAG.getUNDEF(Item.getValueType()), 5147 &Mask[0]); 5148 } 5149 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item); 5150 } 5151 } 5152 5153 // If we have a constant or non-constant insertion into the low element of 5154 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5155 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5156 // depending on what the source datatype is. 5157 if (Idx == 0) { 5158 if (NumZero == 0) 5159 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5160 5161 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5162 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5163 if (VT.getSizeInBits() == 256) { 5164 EVT VT128 = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems / 2); 5165 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Item); 5166 SDValue ZeroVec = getZeroVector(VT, true, DAG, dl); 5167 return Insert128BitVector(ZeroVec, Item, DAG.getConstant(0, MVT::i32), 5168 DAG, dl); 5169 } 5170 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 5171 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5172 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5173 return getShuffleVectorZeroOrUndef(Item, 0, true, 5174 Subtarget->hasSSE2(), DAG); 5175 } 5176 5177 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5178 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5179 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5180 if (VT.getSizeInBits() == 256) { 5181 SDValue ZeroVec = getZeroVector(MVT::v8i32, true, DAG, dl); 5182 Item = Insert128BitVector(ZeroVec, Item, DAG.getConstant(0, MVT::i32), 5183 DAG, dl); 5184 } else { 5185 assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!"); 5186 Item = getShuffleVectorZeroOrUndef(Item, 0, true, 5187 Subtarget->hasSSE2(), DAG); 5188 } 5189 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5190 } 5191 } 5192 5193 // Is it a vector logical left shift? 5194 if (NumElems == 2 && Idx == 1 && 5195 X86::isZeroNode(Op.getOperand(0)) && 5196 !X86::isZeroNode(Op.getOperand(1))) { 5197 unsigned NumBits = VT.getSizeInBits(); 5198 return getVShift(true, VT, 5199 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5200 VT, Op.getOperand(1)), 5201 NumBits/2, DAG, *this, dl); 5202 } 5203 5204 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5205 return SDValue(); 5206 5207 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5208 // is a non-constant being inserted into an element other than the low one, 5209 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5210 // movd/movss) to move this into the low element, then shuffle it into 5211 // place. 5212 if (EVTBits == 32) { 5213 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5214 5215 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5216 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, 5217 Subtarget->hasSSE2(), DAG); 5218 SmallVector<int, 8> MaskVec; 5219 for (unsigned i = 0; i < NumElems; i++) 5220 MaskVec.push_back(i == Idx ? 0 : 1); 5221 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5222 } 5223 } 5224 5225 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5226 if (Values.size() == 1) { 5227 if (EVTBits == 32) { 5228 // Instead of a shuffle like this: 5229 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5230 // Check if it's possible to issue this instead. 5231 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5232 unsigned Idx = CountTrailingZeros_32(NonZeros); 5233 SDValue Item = Op.getOperand(Idx); 5234 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5235 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5236 } 5237 return SDValue(); 5238 } 5239 5240 // A vector full of immediates; various special cases are already 5241 // handled, so this is best done with a single constant-pool load. 5242 if (IsAllConstants) 5243 return SDValue(); 5244 5245 // For AVX-length vectors, build the individual 128-bit pieces and use 5246 // shuffles to put them in place. 5247 if (VT.getSizeInBits() == 256 && !ISD::isBuildVectorAllZeros(Op.getNode())) { 5248 SmallVector<SDValue, 32> V; 5249 for (unsigned i = 0; i < NumElems; ++i) 5250 V.push_back(Op.getOperand(i)); 5251 5252 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5253 5254 // Build both the lower and upper subvector. 5255 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5256 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5257 NumElems/2); 5258 5259 // Recreate the wider vector with the lower and upper part. 5260 SDValue Vec = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Lower, 5261 DAG.getConstant(0, MVT::i32), DAG, dl); 5262 return Insert128BitVector(Vec, Upper, DAG.getConstant(NumElems/2, MVT::i32), 5263 DAG, dl); 5264 } 5265 5266 // Let legalizer expand 2-wide build_vectors. 5267 if (EVTBits == 64) { 5268 if (NumNonZero == 1) { 5269 // One half is zero or undef. 5270 unsigned Idx = CountTrailingZeros_32(NonZeros); 5271 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5272 Op.getOperand(Idx)); 5273 return getShuffleVectorZeroOrUndef(V2, Idx, true, 5274 Subtarget->hasSSE2(), DAG); 5275 } 5276 return SDValue(); 5277 } 5278 5279 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5280 if (EVTBits == 8 && NumElems == 16) { 5281 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5282 *this); 5283 if (V.getNode()) return V; 5284 } 5285 5286 if (EVTBits == 16 && NumElems == 8) { 5287 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5288 *this); 5289 if (V.getNode()) return V; 5290 } 5291 5292 // If element VT is == 32 bits, turn it into a number of shuffles. 5293 SmallVector<SDValue, 8> V; 5294 V.resize(NumElems); 5295 if (NumElems == 4 && NumZero > 0) { 5296 for (unsigned i = 0; i < 4; ++i) { 5297 bool isZero = !(NonZeros & (1 << i)); 5298 if (isZero) 5299 V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 5300 else 5301 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5302 } 5303 5304 for (unsigned i = 0; i < 2; ++i) { 5305 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5306 default: break; 5307 case 0: 5308 V[i] = V[i*2]; // Must be a zero vector. 5309 break; 5310 case 1: 5311 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5312 break; 5313 case 2: 5314 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5315 break; 5316 case 3: 5317 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5318 break; 5319 } 5320 } 5321 5322 SmallVector<int, 8> MaskVec; 5323 bool Reverse = (NonZeros & 0x3) == 2; 5324 for (unsigned i = 0; i < 2; ++i) 5325 MaskVec.push_back(Reverse ? 1-i : i); 5326 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5327 for (unsigned i = 0; i < 2; ++i) 5328 MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems); 5329 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5330 } 5331 5332 if (Values.size() > 1 && VT.getSizeInBits() == 128) { 5333 // Check for a build vector of consecutive loads. 5334 for (unsigned i = 0; i < NumElems; ++i) 5335 V[i] = Op.getOperand(i); 5336 5337 // Check for elements which are consecutive loads. 5338 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5339 if (LD.getNode()) 5340 return LD; 5341 5342 // For SSE 4.1, use insertps to put the high elements into the low element. 5343 if (getSubtarget()->hasSSE41()) { 5344 SDValue Result; 5345 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5346 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5347 else 5348 Result = DAG.getUNDEF(VT); 5349 5350 for (unsigned i = 1; i < NumElems; ++i) { 5351 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5352 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5353 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5354 } 5355 return Result; 5356 } 5357 5358 // Otherwise, expand into a number of unpckl*, start by extending each of 5359 // our (non-undef) elements to the full vector width with the element in the 5360 // bottom slot of the vector (which generates no code for SSE). 5361 for (unsigned i = 0; i < NumElems; ++i) { 5362 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5363 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5364 else 5365 V[i] = DAG.getUNDEF(VT); 5366 } 5367 5368 // Next, we iteratively mix elements, e.g. for v4f32: 5369 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5370 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5371 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5372 unsigned EltStride = NumElems >> 1; 5373 while (EltStride != 0) { 5374 for (unsigned i = 0; i < EltStride; ++i) { 5375 // If V[i+EltStride] is undef and this is the first round of mixing, 5376 // then it is safe to just drop this shuffle: V[i] is already in the 5377 // right place, the one element (since it's the first round) being 5378 // inserted as undef can be dropped. This isn't safe for successive 5379 // rounds because they will permute elements within both vectors. 5380 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5381 EltStride == NumElems/2) 5382 continue; 5383 5384 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5385 } 5386 EltStride >>= 1; 5387 } 5388 return V[0]; 5389 } 5390 return SDValue(); 5391} 5392 5393// LowerMMXCONCAT_VECTORS - We support concatenate two MMX registers and place 5394// them in a MMX register. This is better than doing a stack convert. 5395static SDValue LowerMMXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5396 DebugLoc dl = Op.getDebugLoc(); 5397 EVT ResVT = Op.getValueType(); 5398 5399 assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || 5400 ResVT == MVT::v8i16 || ResVT == MVT::v16i8); 5401 int Mask[2]; 5402 SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0)); 5403 SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5404 InVec = Op.getOperand(1); 5405 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5406 unsigned NumElts = ResVT.getVectorNumElements(); 5407 VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5408 VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, 5409 InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); 5410 } else { 5411 InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec); 5412 SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); 5413 Mask[0] = 0; Mask[1] = 2; 5414 VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); 5415 } 5416 return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp); 5417} 5418 5419// LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5420// to create 256-bit vectors from two other 128-bit ones. 5421static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5422 DebugLoc dl = Op.getDebugLoc(); 5423 EVT ResVT = Op.getValueType(); 5424 5425 assert(ResVT.getSizeInBits() == 256 && "Value type must be 256-bit wide"); 5426 5427 SDValue V1 = Op.getOperand(0); 5428 SDValue V2 = Op.getOperand(1); 5429 unsigned NumElems = ResVT.getVectorNumElements(); 5430 5431 SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, ResVT), V1, 5432 DAG.getConstant(0, MVT::i32), DAG, dl); 5433 return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), 5434 DAG, dl); 5435} 5436 5437SDValue 5438X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 5439 EVT ResVT = Op.getValueType(); 5440 5441 assert(Op.getNumOperands() == 2); 5442 assert((ResVT.getSizeInBits() == 128 || ResVT.getSizeInBits() == 256) && 5443 "Unsupported CONCAT_VECTORS for value type"); 5444 5445 // We support concatenate two MMX registers and place them in a MMX register. 5446 // This is better than doing a stack convert. 5447 if (ResVT.is128BitVector()) 5448 return LowerMMXCONCAT_VECTORS(Op, DAG); 5449 5450 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5451 // from two other 128-bit ones. 5452 return LowerAVXCONCAT_VECTORS(Op, DAG); 5453} 5454 5455// v8i16 shuffles - Prefer shuffles in the following order: 5456// 1. [all] pshuflw, pshufhw, optional move 5457// 2. [ssse3] 1 x pshufb 5458// 3. [ssse3] 2 x pshufb + 1 x por 5459// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5460SDValue 5461X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 5462 SelectionDAG &DAG) const { 5463 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5464 SDValue V1 = SVOp->getOperand(0); 5465 SDValue V2 = SVOp->getOperand(1); 5466 DebugLoc dl = SVOp->getDebugLoc(); 5467 SmallVector<int, 8> MaskVals; 5468 5469 // Determine if more than 1 of the words in each of the low and high quadwords 5470 // of the result come from the same quadword of one of the two inputs. Undef 5471 // mask values count as coming from any quadword, for better codegen. 5472 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5473 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5474 BitVector InputQuads(4); 5475 for (unsigned i = 0; i < 8; ++i) { 5476 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5477 int EltIdx = SVOp->getMaskElt(i); 5478 MaskVals.push_back(EltIdx); 5479 if (EltIdx < 0) { 5480 ++Quad[0]; 5481 ++Quad[1]; 5482 ++Quad[2]; 5483 ++Quad[3]; 5484 continue; 5485 } 5486 ++Quad[EltIdx / 4]; 5487 InputQuads.set(EltIdx / 4); 5488 } 5489 5490 int BestLoQuad = -1; 5491 unsigned MaxQuad = 1; 5492 for (unsigned i = 0; i < 4; ++i) { 5493 if (LoQuad[i] > MaxQuad) { 5494 BestLoQuad = i; 5495 MaxQuad = LoQuad[i]; 5496 } 5497 } 5498 5499 int BestHiQuad = -1; 5500 MaxQuad = 1; 5501 for (unsigned i = 0; i < 4; ++i) { 5502 if (HiQuad[i] > MaxQuad) { 5503 BestHiQuad = i; 5504 MaxQuad = HiQuad[i]; 5505 } 5506 } 5507 5508 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5509 // of the two input vectors, shuffle them into one input vector so only a 5510 // single pshufb instruction is necessary. If There are more than 2 input 5511 // quads, disable the next transformation since it does not help SSSE3. 5512 bool V1Used = InputQuads[0] || InputQuads[1]; 5513 bool V2Used = InputQuads[2] || InputQuads[3]; 5514 if (Subtarget->hasSSSE3()) { 5515 if (InputQuads.count() == 2 && V1Used && V2Used) { 5516 BestLoQuad = InputQuads.find_first(); 5517 BestHiQuad = InputQuads.find_next(BestLoQuad); 5518 } 5519 if (InputQuads.count() > 2) { 5520 BestLoQuad = -1; 5521 BestHiQuad = -1; 5522 } 5523 } 5524 5525 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5526 // the shuffle mask. If a quad is scored as -1, that means that it contains 5527 // words from all 4 input quadwords. 5528 SDValue NewV; 5529 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5530 SmallVector<int, 8> MaskV; 5531 MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad); 5532 MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad); 5533 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5534 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5535 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5536 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5537 5538 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5539 // source words for the shuffle, to aid later transformations. 5540 bool AllWordsInNewV = true; 5541 bool InOrder[2] = { true, true }; 5542 for (unsigned i = 0; i != 8; ++i) { 5543 int idx = MaskVals[i]; 5544 if (idx != (int)i) 5545 InOrder[i/4] = false; 5546 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5547 continue; 5548 AllWordsInNewV = false; 5549 break; 5550 } 5551 5552 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5553 if (AllWordsInNewV) { 5554 for (int i = 0; i != 8; ++i) { 5555 int idx = MaskVals[i]; 5556 if (idx < 0) 5557 continue; 5558 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5559 if ((idx != i) && idx < 4) 5560 pshufhw = false; 5561 if ((idx != i) && idx > 3) 5562 pshuflw = false; 5563 } 5564 V1 = NewV; 5565 V2Used = false; 5566 BestLoQuad = 0; 5567 BestHiQuad = 1; 5568 } 5569 5570 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5571 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5572 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5573 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5574 unsigned TargetMask = 0; 5575 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5576 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5577 TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()): 5578 X86::getShufflePSHUFLWImmediate(NewV.getNode()); 5579 V1 = NewV.getOperand(0); 5580 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5581 } 5582 } 5583 5584 // If we have SSSE3, and all words of the result are from 1 input vector, 5585 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5586 // is present, fall back to case 4. 5587 if (Subtarget->hasSSSE3()) { 5588 SmallVector<SDValue,16> pshufbMask; 5589 5590 // If we have elements from both input vectors, set the high bit of the 5591 // shuffle mask element to zero out elements that come from V2 in the V1 5592 // mask, and elements that come from V1 in the V2 mask, so that the two 5593 // results can be OR'd together. 5594 bool TwoInputs = V1Used && V2Used; 5595 for (unsigned i = 0; i != 8; ++i) { 5596 int EltIdx = MaskVals[i] * 2; 5597 if (TwoInputs && (EltIdx >= 16)) { 5598 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5599 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5600 continue; 5601 } 5602 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5603 pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); 5604 } 5605 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5606 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5607 DAG.getNode(ISD::BUILD_VECTOR, dl, 5608 MVT::v16i8, &pshufbMask[0], 16)); 5609 if (!TwoInputs) 5610 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5611 5612 // Calculate the shuffle mask for the second input, shuffle it, and 5613 // OR it with the first shuffled input. 5614 pshufbMask.clear(); 5615 for (unsigned i = 0; i != 8; ++i) { 5616 int EltIdx = MaskVals[i] * 2; 5617 if (EltIdx < 16) { 5618 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5619 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5620 continue; 5621 } 5622 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 5623 pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); 5624 } 5625 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2); 5626 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5627 DAG.getNode(ISD::BUILD_VECTOR, dl, 5628 MVT::v16i8, &pshufbMask[0], 16)); 5629 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5630 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5631 } 5632 5633 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, 5634 // and update MaskVals with new element order. 5635 BitVector InOrder(8); 5636 if (BestLoQuad >= 0) { 5637 SmallVector<int, 8> MaskV; 5638 for (int i = 0; i != 4; ++i) { 5639 int idx = MaskVals[i]; 5640 if (idx < 0) { 5641 MaskV.push_back(-1); 5642 InOrder.set(i); 5643 } else if ((idx / 4) == BestLoQuad) { 5644 MaskV.push_back(idx & 3); 5645 InOrder.set(i); 5646 } else { 5647 MaskV.push_back(-1); 5648 } 5649 } 5650 for (unsigned i = 4; i != 8; ++i) 5651 MaskV.push_back(i); 5652 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5653 &MaskV[0]); 5654 5655 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 5656 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, 5657 NewV.getOperand(0), 5658 X86::getShufflePSHUFLWImmediate(NewV.getNode()), 5659 DAG); 5660 } 5661 5662 // If BestHi >= 0, generate a pshufhw to put the high elements in order, 5663 // and update MaskVals with the new element order. 5664 if (BestHiQuad >= 0) { 5665 SmallVector<int, 8> MaskV; 5666 for (unsigned i = 0; i != 4; ++i) 5667 MaskV.push_back(i); 5668 for (unsigned i = 4; i != 8; ++i) { 5669 int idx = MaskVals[i]; 5670 if (idx < 0) { 5671 MaskV.push_back(-1); 5672 InOrder.set(i); 5673 } else if ((idx / 4) == BestHiQuad) { 5674 MaskV.push_back((idx & 3) + 4); 5675 InOrder.set(i); 5676 } else { 5677 MaskV.push_back(-1); 5678 } 5679 } 5680 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), 5681 &MaskV[0]); 5682 5683 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) 5684 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, 5685 NewV.getOperand(0), 5686 X86::getShufflePSHUFHWImmediate(NewV.getNode()), 5687 DAG); 5688 } 5689 5690 // In case BestHi & BestLo were both -1, which means each quadword has a word 5691 // from each of the four input quadwords, calculate the InOrder bitvector now 5692 // before falling through to the insert/extract cleanup. 5693 if (BestLoQuad == -1 && BestHiQuad == -1) { 5694 NewV = V1; 5695 for (int i = 0; i != 8; ++i) 5696 if (MaskVals[i] < 0 || MaskVals[i] == i) 5697 InOrder.set(i); 5698 } 5699 5700 // The other elements are put in the right place using pextrw and pinsrw. 5701 for (unsigned i = 0; i != 8; ++i) { 5702 if (InOrder[i]) 5703 continue; 5704 int EltIdx = MaskVals[i]; 5705 if (EltIdx < 0) 5706 continue; 5707 SDValue ExtOp = (EltIdx < 8) 5708 ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1, 5709 DAG.getIntPtrConstant(EltIdx)) 5710 : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2, 5711 DAG.getIntPtrConstant(EltIdx - 8)); 5712 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp, 5713 DAG.getIntPtrConstant(i)); 5714 } 5715 return NewV; 5716} 5717 5718// v16i8 shuffles - Prefer shuffles in the following order: 5719// 1. [ssse3] 1 x pshufb 5720// 2. [ssse3] 2 x pshufb + 1 x por 5721// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw 5722static 5723SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, 5724 SelectionDAG &DAG, 5725 const X86TargetLowering &TLI) { 5726 SDValue V1 = SVOp->getOperand(0); 5727 SDValue V2 = SVOp->getOperand(1); 5728 DebugLoc dl = SVOp->getDebugLoc(); 5729 SmallVector<int, 16> MaskVals; 5730 SVOp->getMask(MaskVals); 5731 5732 // If we have SSSE3, case 1 is generated when all result bytes come from 5733 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is 5734 // present, fall back to case 3. 5735 // FIXME: kill V2Only once shuffles are canonizalized by getNode. 5736 bool V1Only = true; 5737 bool V2Only = true; 5738 for (unsigned i = 0; i < 16; ++i) { 5739 int EltIdx = MaskVals[i]; 5740 if (EltIdx < 0) 5741 continue; 5742 if (EltIdx < 16) 5743 V2Only = false; 5744 else 5745 V1Only = false; 5746 } 5747 5748 // If SSSE3, use 1 pshufb instruction per vector with elements in the result. 5749 if (TLI.getSubtarget()->hasSSSE3()) { 5750 SmallVector<SDValue,16> pshufbMask; 5751 5752 // If all result elements are from one input vector, then only translate 5753 // undef mask values to 0x80 (zero out result) in the pshufb mask. 5754 // 5755 // Otherwise, we have elements from both input vectors, and must zero out 5756 // elements that come from V2 in the first mask, and V1 in the second mask 5757 // so that we can OR them together. 5758 bool TwoInputs = !(V1Only || V2Only); 5759 for (unsigned i = 0; i != 16; ++i) { 5760 int EltIdx = MaskVals[i]; 5761 if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) { 5762 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5763 continue; 5764 } 5765 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); 5766 } 5767 // If all the elements are from V2, assign it to V1 and return after 5768 // building the first pshufb. 5769 if (V2Only) 5770 V1 = V2; 5771 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5772 DAG.getNode(ISD::BUILD_VECTOR, dl, 5773 MVT::v16i8, &pshufbMask[0], 16)); 5774 if (!TwoInputs) 5775 return V1; 5776 5777 // Calculate the shuffle mask for the second input, shuffle it, and 5778 // OR it with the first shuffled input. 5779 pshufbMask.clear(); 5780 for (unsigned i = 0; i != 16; ++i) { 5781 int EltIdx = MaskVals[i]; 5782 if (EltIdx < 16) { 5783 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8)); 5784 continue; 5785 } 5786 pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); 5787 } 5788 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 5789 DAG.getNode(ISD::BUILD_VECTOR, dl, 5790 MVT::v16i8, &pshufbMask[0], 16)); 5791 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); 5792 } 5793 5794 // No SSSE3 - Calculate in place words and then fix all out of place words 5795 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from 5796 // the 16 different words that comprise the two doublequadword input vectors. 5797 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5798 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 5799 SDValue NewV = V2Only ? V2 : V1; 5800 for (int i = 0; i != 8; ++i) { 5801 int Elt0 = MaskVals[i*2]; 5802 int Elt1 = MaskVals[i*2+1]; 5803 5804 // This word of the result is all undef, skip it. 5805 if (Elt0 < 0 && Elt1 < 0) 5806 continue; 5807 5808 // This word of the result is already in the correct place, skip it. 5809 if (V1Only && (Elt0 == i*2) && (Elt1 == i*2+1)) 5810 continue; 5811 if (V2Only && (Elt0 == i*2+16) && (Elt1 == i*2+17)) 5812 continue; 5813 5814 SDValue Elt0Src = Elt0 < 16 ? V1 : V2; 5815 SDValue Elt1Src = Elt1 < 16 ? V1 : V2; 5816 SDValue InsElt; 5817 5818 // If Elt0 and Elt1 are defined, are consecutive, and can be load 5819 // using a single extract together, load it and store it. 5820 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) { 5821 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5822 DAG.getIntPtrConstant(Elt1 / 2)); 5823 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5824 DAG.getIntPtrConstant(i)); 5825 continue; 5826 } 5827 5828 // If Elt1 is defined, extract it from the appropriate source. If the 5829 // source byte is not also odd, shift the extracted word left 8 bits 5830 // otherwise clear the bottom 8 bits if we need to do an or. 5831 if (Elt1 >= 0) { 5832 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src, 5833 DAG.getIntPtrConstant(Elt1 / 2)); 5834 if ((Elt1 & 1) == 0) 5835 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt, 5836 DAG.getConstant(8, 5837 TLI.getShiftAmountTy(InsElt.getValueType()))); 5838 else if (Elt0 >= 0) 5839 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt, 5840 DAG.getConstant(0xFF00, MVT::i16)); 5841 } 5842 // If Elt0 is defined, extract it from the appropriate source. If the 5843 // source byte is not also even, shift the extracted word right 8 bits. If 5844 // Elt1 was also defined, OR the extracted values together before 5845 // inserting them in the result. 5846 if (Elt0 >= 0) { 5847 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, 5848 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2)); 5849 if ((Elt0 & 1) != 0) 5850 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0, 5851 DAG.getConstant(8, 5852 TLI.getShiftAmountTy(InsElt0.getValueType()))); 5853 else if (Elt1 >= 0) 5854 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0, 5855 DAG.getConstant(0x00FF, MVT::i16)); 5856 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0) 5857 : InsElt0; 5858 } 5859 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, 5860 DAG.getIntPtrConstant(i)); 5861 } 5862 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); 5863} 5864 5865/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide 5866/// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be 5867/// done when every pair / quad of shuffle mask elements point to elements in 5868/// the right sequence. e.g. 5869/// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15> 5870static 5871SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, 5872 SelectionDAG &DAG, DebugLoc dl) { 5873 EVT VT = SVOp->getValueType(0); 5874 SDValue V1 = SVOp->getOperand(0); 5875 SDValue V2 = SVOp->getOperand(1); 5876 unsigned NumElems = VT.getVectorNumElements(); 5877 unsigned NewWidth = (NumElems == 4) ? 2 : 4; 5878 EVT NewVT; 5879 switch (VT.getSimpleVT().SimpleTy) { 5880 default: assert(false && "Unexpected!"); 5881 case MVT::v4f32: NewVT = MVT::v2f64; break; 5882 case MVT::v4i32: NewVT = MVT::v2i64; break; 5883 case MVT::v8i16: NewVT = MVT::v4i32; break; 5884 case MVT::v16i8: NewVT = MVT::v4i32; break; 5885 } 5886 5887 int Scale = NumElems / NewWidth; 5888 SmallVector<int, 8> MaskVec; 5889 for (unsigned i = 0; i < NumElems; i += Scale) { 5890 int StartIdx = -1; 5891 for (int j = 0; j < Scale; ++j) { 5892 int EltIdx = SVOp->getMaskElt(i+j); 5893 if (EltIdx < 0) 5894 continue; 5895 if (StartIdx == -1) 5896 StartIdx = EltIdx - (EltIdx % Scale); 5897 if (EltIdx != StartIdx + j) 5898 return SDValue(); 5899 } 5900 if (StartIdx == -1) 5901 MaskVec.push_back(-1); 5902 else 5903 MaskVec.push_back(StartIdx / Scale); 5904 } 5905 5906 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1); 5907 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2); 5908 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); 5909} 5910 5911/// getVZextMovL - Return a zero-extending vector move low node. 5912/// 5913static SDValue getVZextMovL(EVT VT, EVT OpVT, 5914 SDValue SrcOp, SelectionDAG &DAG, 5915 const X86Subtarget *Subtarget, DebugLoc dl) { 5916 if (VT == MVT::v2f64 || VT == MVT::v4f32) { 5917 LoadSDNode *LD = NULL; 5918 if (!isScalarLoadToVector(SrcOp.getNode(), &LD)) 5919 LD = dyn_cast<LoadSDNode>(SrcOp); 5920 if (!LD) { 5921 // movssrr and movsdrr do not clear top bits. Try to use movd, movq 5922 // instead. 5923 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; 5924 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && 5925 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && 5926 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST && 5927 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { 5928 // PR2108 5929 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; 5930 return DAG.getNode(ISD::BITCAST, dl, VT, 5931 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5932 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5933 OpVT, 5934 SrcOp.getOperand(0) 5935 .getOperand(0)))); 5936 } 5937 } 5938 } 5939 5940 return DAG.getNode(ISD::BITCAST, dl, VT, 5941 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, 5942 DAG.getNode(ISD::BITCAST, dl, 5943 OpVT, SrcOp))); 5944} 5945 5946/// areShuffleHalvesWithinDisjointLanes - Check whether each half of a vector 5947/// shuffle node referes to only one lane in the sources. 5948static bool areShuffleHalvesWithinDisjointLanes(ShuffleVectorSDNode *SVOp) { 5949 EVT VT = SVOp->getValueType(0); 5950 int NumElems = VT.getVectorNumElements(); 5951 int HalfSize = NumElems/2; 5952 SmallVector<int, 16> M; 5953 SVOp->getMask(M); 5954 bool MatchA = false, MatchB = false; 5955 5956 for (int l = 0; l < NumElems*2; l += HalfSize) { 5957 if (isUndefOrInRange(M, 0, HalfSize, l, l+HalfSize)) { 5958 MatchA = true; 5959 break; 5960 } 5961 } 5962 5963 for (int l = 0; l < NumElems*2; l += HalfSize) { 5964 if (isUndefOrInRange(M, HalfSize, HalfSize, l, l+HalfSize)) { 5965 MatchB = true; 5966 break; 5967 } 5968 } 5969 5970 return MatchA && MatchB; 5971} 5972 5973/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles 5974/// which could not be matched by any known target speficic shuffle 5975static SDValue 5976LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 5977 if (areShuffleHalvesWithinDisjointLanes(SVOp)) { 5978 // If each half of a vector shuffle node referes to only one lane in the 5979 // source vectors, extract each used 128-bit lane and shuffle them using 5980 // 128-bit shuffles. Then, concatenate the results. Otherwise leave 5981 // the work to the legalizer. 5982 DebugLoc dl = SVOp->getDebugLoc(); 5983 EVT VT = SVOp->getValueType(0); 5984 int NumElems = VT.getVectorNumElements(); 5985 int HalfSize = NumElems/2; 5986 5987 // Extract the reference for each half 5988 int FstVecExtractIdx = 0, SndVecExtractIdx = 0; 5989 int FstVecOpNum = 0, SndVecOpNum = 0; 5990 for (int i = 0; i < HalfSize; ++i) { 5991 int Elt = SVOp->getMaskElt(i); 5992 if (SVOp->getMaskElt(i) < 0) 5993 continue; 5994 FstVecOpNum = Elt/NumElems; 5995 FstVecExtractIdx = Elt % NumElems < HalfSize ? 0 : HalfSize; 5996 break; 5997 } 5998 for (int i = HalfSize; i < NumElems; ++i) { 5999 int Elt = SVOp->getMaskElt(i); 6000 if (SVOp->getMaskElt(i) < 0) 6001 continue; 6002 SndVecOpNum = Elt/NumElems; 6003 SndVecExtractIdx = Elt % NumElems < HalfSize ? 0 : HalfSize; 6004 break; 6005 } 6006 6007 // Extract the subvectors 6008 SDValue V1 = Extract128BitVector(SVOp->getOperand(FstVecOpNum), 6009 DAG.getConstant(FstVecExtractIdx, MVT::i32), DAG, dl); 6010 SDValue V2 = Extract128BitVector(SVOp->getOperand(SndVecOpNum), 6011 DAG.getConstant(SndVecExtractIdx, MVT::i32), DAG, dl); 6012 6013 // Generate 128-bit shuffles 6014 SmallVector<int, 16> MaskV1, MaskV2; 6015 for (int i = 0; i < HalfSize; ++i) { 6016 int Elt = SVOp->getMaskElt(i); 6017 MaskV1.push_back(Elt < 0 ? Elt : Elt % HalfSize); 6018 } 6019 for (int i = HalfSize; i < NumElems; ++i) { 6020 int Elt = SVOp->getMaskElt(i); 6021 MaskV2.push_back(Elt < 0 ? Elt : Elt % HalfSize); 6022 } 6023 6024 EVT NVT = V1.getValueType(); 6025 V1 = DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &MaskV1[0]); 6026 V2 = DAG.getVectorShuffle(NVT, dl, V2, DAG.getUNDEF(NVT), &MaskV2[0]); 6027 6028 // Concatenate the result back 6029 SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), V1, 6030 DAG.getConstant(0, MVT::i32), DAG, dl); 6031 return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32), 6032 DAG, dl); 6033 } 6034 6035 return SDValue(); 6036} 6037 6038/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with 6039/// 4 elements, and match them with several different shuffle types. 6040static SDValue 6041LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { 6042 SDValue V1 = SVOp->getOperand(0); 6043 SDValue V2 = SVOp->getOperand(1); 6044 DebugLoc dl = SVOp->getDebugLoc(); 6045 EVT VT = SVOp->getValueType(0); 6046 6047 assert(VT.getSizeInBits() == 128 && "Unsupported vector size"); 6048 6049 SmallVector<std::pair<int, int>, 8> Locs; 6050 Locs.resize(4); 6051 SmallVector<int, 8> Mask1(4U, -1); 6052 SmallVector<int, 8> PermMask; 6053 SVOp->getMask(PermMask); 6054 6055 unsigned NumHi = 0; 6056 unsigned NumLo = 0; 6057 for (unsigned i = 0; i != 4; ++i) { 6058 int Idx = PermMask[i]; 6059 if (Idx < 0) { 6060 Locs[i] = std::make_pair(-1, -1); 6061 } else { 6062 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!"); 6063 if (Idx < 4) { 6064 Locs[i] = std::make_pair(0, NumLo); 6065 Mask1[NumLo] = Idx; 6066 NumLo++; 6067 } else { 6068 Locs[i] = std::make_pair(1, NumHi); 6069 if (2+NumHi < 4) 6070 Mask1[2+NumHi] = Idx; 6071 NumHi++; 6072 } 6073 } 6074 } 6075 6076 if (NumLo <= 2 && NumHi <= 2) { 6077 // If no more than two elements come from either vector. This can be 6078 // implemented with two shuffles. First shuffle gather the elements. 6079 // The second shuffle, which takes the first shuffle as both of its 6080 // vector operands, put the elements into the right order. 6081 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6082 6083 SmallVector<int, 8> Mask2(4U, -1); 6084 6085 for (unsigned i = 0; i != 4; ++i) { 6086 if (Locs[i].first == -1) 6087 continue; 6088 else { 6089 unsigned Idx = (i < 2) ? 0 : 4; 6090 Idx += Locs[i].first * 2 + Locs[i].second; 6091 Mask2[i] = Idx; 6092 } 6093 } 6094 6095 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]); 6096 } else if (NumLo == 3 || NumHi == 3) { 6097 // Otherwise, we must have three elements from one vector, call it X, and 6098 // one element from the other, call it Y. First, use a shufps to build an 6099 // intermediate vector with the one element from Y and the element from X 6100 // that will be in the same half in the final destination (the indexes don't 6101 // matter). Then, use a shufps to build the final vector, taking the half 6102 // containing the element from Y from the intermediate, and the other half 6103 // from X. 6104 if (NumHi == 3) { 6105 // Normalize it so the 3 elements come from V1. 6106 CommuteVectorShuffleMask(PermMask, 4); 6107 std::swap(V1, V2); 6108 } 6109 6110 // Find the element from V2. 6111 unsigned HiIndex; 6112 for (HiIndex = 0; HiIndex < 3; ++HiIndex) { 6113 int Val = PermMask[HiIndex]; 6114 if (Val < 0) 6115 continue; 6116 if (Val >= 4) 6117 break; 6118 } 6119 6120 Mask1[0] = PermMask[HiIndex]; 6121 Mask1[1] = -1; 6122 Mask1[2] = PermMask[HiIndex^1]; 6123 Mask1[3] = -1; 6124 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6125 6126 if (HiIndex >= 2) { 6127 Mask1[0] = PermMask[0]; 6128 Mask1[1] = PermMask[1]; 6129 Mask1[2] = HiIndex & 1 ? 6 : 4; 6130 Mask1[3] = HiIndex & 1 ? 4 : 6; 6131 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]); 6132 } else { 6133 Mask1[0] = HiIndex & 1 ? 2 : 0; 6134 Mask1[1] = HiIndex & 1 ? 0 : 2; 6135 Mask1[2] = PermMask[2]; 6136 Mask1[3] = PermMask[3]; 6137 if (Mask1[2] >= 0) 6138 Mask1[2] += 4; 6139 if (Mask1[3] >= 0) 6140 Mask1[3] += 4; 6141 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]); 6142 } 6143 } 6144 6145 // Break it into (shuffle shuffle_hi, shuffle_lo). 6146 Locs.clear(); 6147 Locs.resize(4); 6148 SmallVector<int,8> LoMask(4U, -1); 6149 SmallVector<int,8> HiMask(4U, -1); 6150 6151 SmallVector<int,8> *MaskPtr = &LoMask; 6152 unsigned MaskIdx = 0; 6153 unsigned LoIdx = 0; 6154 unsigned HiIdx = 2; 6155 for (unsigned i = 0; i != 4; ++i) { 6156 if (i == 2) { 6157 MaskPtr = &HiMask; 6158 MaskIdx = 1; 6159 LoIdx = 0; 6160 HiIdx = 2; 6161 } 6162 int Idx = PermMask[i]; 6163 if (Idx < 0) { 6164 Locs[i] = std::make_pair(-1, -1); 6165 } else if (Idx < 4) { 6166 Locs[i] = std::make_pair(MaskIdx, LoIdx); 6167 (*MaskPtr)[LoIdx] = Idx; 6168 LoIdx++; 6169 } else { 6170 Locs[i] = std::make_pair(MaskIdx, HiIdx); 6171 (*MaskPtr)[HiIdx] = Idx; 6172 HiIdx++; 6173 } 6174 } 6175 6176 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]); 6177 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]); 6178 SmallVector<int, 8> MaskOps; 6179 for (unsigned i = 0; i != 4; ++i) { 6180 if (Locs[i].first == -1) { 6181 MaskOps.push_back(-1); 6182 } else { 6183 unsigned Idx = Locs[i].first * 4 + Locs[i].second; 6184 MaskOps.push_back(Idx); 6185 } 6186 } 6187 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); 6188} 6189 6190static bool MayFoldVectorLoad(SDValue V) { 6191 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6192 V = V.getOperand(0); 6193 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6194 V = V.getOperand(0); 6195 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && 6196 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) 6197 // BUILD_VECTOR (load), undef 6198 V = V.getOperand(0); 6199 if (MayFoldLoad(V)) 6200 return true; 6201 return false; 6202} 6203 6204// FIXME: the version above should always be used. Since there's 6205// a bug where several vector shuffles can't be folded because the 6206// DAG is not updated during lowering and a node claims to have two 6207// uses while it only has one, use this version, and let isel match 6208// another instruction if the load really happens to have more than 6209// one use. Remove this version after this bug get fixed. 6210// rdar://8434668, PR8156 6211static bool RelaxedMayFoldVectorLoad(SDValue V) { 6212 if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) 6213 V = V.getOperand(0); 6214 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) 6215 V = V.getOperand(0); 6216 if (ISD::isNormalLoad(V.getNode())) 6217 return true; 6218 return false; 6219} 6220 6221/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by 6222/// a vector extract, and if both can be later optimized into a single load. 6223/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked 6224/// here because otherwise a target specific shuffle node is going to be 6225/// emitted for this shuffle, and the optimization not done. 6226/// FIXME: This is probably not the best approach, but fix the problem 6227/// until the right path is decided. 6228static 6229bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG, 6230 const TargetLowering &TLI) { 6231 EVT VT = V.getValueType(); 6232 ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V); 6233 6234 // Be sure that the vector shuffle is present in a pattern like this: 6235 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr) 6236 if (!V.hasOneUse()) 6237 return false; 6238 6239 SDNode *N = *V.getNode()->use_begin(); 6240 if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 6241 return false; 6242 6243 SDValue EltNo = N->getOperand(1); 6244 if (!isa<ConstantSDNode>(EltNo)) 6245 return false; 6246 6247 // If the bit convert changed the number of elements, it is unsafe 6248 // to examine the mask. 6249 bool HasShuffleIntoBitcast = false; 6250 if (V.getOpcode() == ISD::BITCAST) { 6251 EVT SrcVT = V.getOperand(0).getValueType(); 6252 if (SrcVT.getVectorNumElements() != VT.getVectorNumElements()) 6253 return false; 6254 V = V.getOperand(0); 6255 HasShuffleIntoBitcast = true; 6256 } 6257 6258 // Select the input vector, guarding against out of range extract vector. 6259 unsigned NumElems = VT.getVectorNumElements(); 6260 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 6261 int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt); 6262 V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1); 6263 6264 // Skip one more bit_convert if necessary 6265 if (V.getOpcode() == ISD::BITCAST) 6266 V = V.getOperand(0); 6267 6268 if (!ISD::isNormalLoad(V.getNode())) 6269 return false; 6270 6271 // Is the original load suitable? 6272 LoadSDNode *LN0 = cast<LoadSDNode>(V); 6273 6274 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 6275 return false; 6276 6277 if (!HasShuffleIntoBitcast) 6278 return true; 6279 6280 // If there's a bitcast before the shuffle, check if the load type and 6281 // alignment is valid. 6282 unsigned Align = LN0->getAlignment(); 6283 unsigned NewAlign = 6284 TLI.getTargetData()->getABITypeAlignment( 6285 VT.getTypeForEVT(*DAG.getContext())); 6286 6287 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) 6288 return false; 6289 6290 return true; 6291} 6292 6293static 6294SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) { 6295 EVT VT = Op.getValueType(); 6296 6297 // Canonizalize to v2f64. 6298 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 6299 return DAG.getNode(ISD::BITCAST, dl, VT, 6300 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, 6301 V1, DAG)); 6302} 6303 6304static 6305SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, 6306 bool HasSSE2) { 6307 SDValue V1 = Op.getOperand(0); 6308 SDValue V2 = Op.getOperand(1); 6309 EVT VT = Op.getValueType(); 6310 6311 assert(VT != MVT::v2i64 && "unsupported shuffle type"); 6312 6313 if (HasSSE2 && VT == MVT::v2f64) 6314 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); 6315 6316 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1) 6317 return DAG.getNode(ISD::BITCAST, dl, VT, 6318 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32, 6319 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1), 6320 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG)); 6321} 6322 6323static 6324SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { 6325 SDValue V1 = Op.getOperand(0); 6326 SDValue V2 = Op.getOperand(1); 6327 EVT VT = Op.getValueType(); 6328 6329 assert((VT == MVT::v4i32 || VT == MVT::v4f32) && 6330 "unsupported shuffle type"); 6331 6332 if (V2.getOpcode() == ISD::UNDEF) 6333 V2 = V1; 6334 6335 // v4i32 or v4f32 6336 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); 6337} 6338 6339static 6340SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { 6341 SDValue V1 = Op.getOperand(0); 6342 SDValue V2 = Op.getOperand(1); 6343 EVT VT = Op.getValueType(); 6344 unsigned NumElems = VT.getVectorNumElements(); 6345 6346 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second 6347 // operand of these instructions is only memory, so check if there's a 6348 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the 6349 // same masks. 6350 bool CanFoldLoad = false; 6351 6352 // Trivial case, when V2 comes from a load. 6353 if (MayFoldVectorLoad(V2)) 6354 CanFoldLoad = true; 6355 6356 // When V1 is a load, it can be folded later into a store in isel, example: 6357 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) 6358 // turns into: 6359 // (MOVLPSmr addr:$src1, VR128:$src2) 6360 // So, recognize this potential and also use MOVLPS or MOVLPD 6361 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) 6362 CanFoldLoad = true; 6363 6364 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6365 if (CanFoldLoad) { 6366 if (HasSSE2 && NumElems == 2) 6367 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); 6368 6369 if (NumElems == 4) 6370 // If we don't care about the second element, procede to use movss. 6371 if (SVOp->getMaskElt(1) != -1) 6372 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); 6373 } 6374 6375 // movl and movlp will both match v2i64, but v2i64 is never matched by 6376 // movl earlier because we make it strict to avoid messing with the movlp load 6377 // folding logic (see the code above getMOVLP call). Match it here then, 6378 // this is horrible, but will stay like this until we move all shuffle 6379 // matching to x86 specific nodes. Note that for the 1st condition all 6380 // types are matched with movsd. 6381 if (HasSSE2) { 6382 // FIXME: isMOVLMask should be checked and matched before getMOVLP, 6383 // as to remove this logic from here, as much as possible 6384 if (NumElems == 2 || !X86::isMOVLMask(SVOp)) 6385 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6386 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6387 } 6388 6389 assert(VT != MVT::v4i32 && "unsupported shuffle type"); 6390 6391 // Invert the operand order and use SHUFPS to match it. 6392 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1, 6393 X86::getShuffleSHUFImmediate(SVOp), DAG); 6394} 6395 6396static 6397SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG, 6398 const TargetLowering &TLI, 6399 const X86Subtarget *Subtarget) { 6400 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6401 EVT VT = Op.getValueType(); 6402 DebugLoc dl = Op.getDebugLoc(); 6403 SDValue V1 = Op.getOperand(0); 6404 SDValue V2 = Op.getOperand(1); 6405 6406 if (isZeroShuffle(SVOp)) 6407 return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); 6408 6409 // Handle splat operations 6410 if (SVOp->isSplat()) { 6411 unsigned NumElem = VT.getVectorNumElements(); 6412 int Size = VT.getSizeInBits(); 6413 // Special case, this is the only place now where it's allowed to return 6414 // a vector_shuffle operation without using a target specific node, because 6415 // *hopefully* it will be optimized away by the dag combiner. FIXME: should 6416 // this be moved to DAGCombine instead? 6417 if (NumElem <= 4 && CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI)) 6418 return Op; 6419 6420 // Use vbroadcast whenever the splat comes from a foldable load 6421 SDValue LD = isVectorBroadcast(Op, Subtarget); 6422 if (LD.getNode()) 6423 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, LD); 6424 6425 // Handle splats by matching through known shuffle masks 6426 if ((Size == 128 && NumElem <= 4) || 6427 (Size == 256 && NumElem < 8)) 6428 return SDValue(); 6429 6430 // All remaning splats are promoted to target supported vector shuffles. 6431 return PromoteSplat(SVOp, DAG); 6432 } 6433 6434 // If the shuffle can be profitably rewritten as a narrower shuffle, then 6435 // do it! 6436 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 6437 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6438 if (NewOp.getNode()) 6439 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp); 6440 } else if ((VT == MVT::v4i32 || 6441 (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { 6442 // FIXME: Figure out a cleaner way to do this. 6443 // Try to make use of movq to zero out the top part. 6444 if (ISD::isBuildVectorAllZeros(V2.getNode())) { 6445 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6446 if (NewOp.getNode()) { 6447 if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false)) 6448 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0), 6449 DAG, Subtarget, dl); 6450 } 6451 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) { 6452 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); 6453 if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp))) 6454 return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), 6455 DAG, Subtarget, dl); 6456 } 6457 } 6458 return SDValue(); 6459} 6460 6461SDValue 6462X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { 6463 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 6464 SDValue V1 = Op.getOperand(0); 6465 SDValue V2 = Op.getOperand(1); 6466 EVT VT = Op.getValueType(); 6467 DebugLoc dl = Op.getDebugLoc(); 6468 unsigned NumElems = VT.getVectorNumElements(); 6469 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; 6470 bool V1IsSplat = false; 6471 bool V2IsSplat = false; 6472 bool HasSSE2 = Subtarget->hasSSE2(); 6473 bool HasAVX = Subtarget->hasAVX(); 6474 bool HasAVX2 = Subtarget->hasAVX2(); 6475 MachineFunction &MF = DAG.getMachineFunction(); 6476 bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); 6477 6478 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); 6479 6480 assert(V1.getOpcode() != ISD::UNDEF && "Op 1 of shuffle should not be undef"); 6481 6482 // Vector shuffle lowering takes 3 steps: 6483 // 6484 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable 6485 // narrowing and commutation of operands should be handled. 6486 // 2) Matching of shuffles with known shuffle masks to x86 target specific 6487 // shuffle nodes. 6488 // 3) Rewriting of unmatched masks into new generic shuffle operations, 6489 // so the shuffle can be broken into other shuffles and the legalizer can 6490 // try the lowering again. 6491 // 6492 // The general idea is that no vector_shuffle operation should be left to 6493 // be matched during isel, all of them must be converted to a target specific 6494 // node here. 6495 6496 // Normalize the input vectors. Here splats, zeroed vectors, profitable 6497 // narrowing and commutation of operands should be handled. The actual code 6498 // doesn't include all of those, work in progress... 6499 SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget); 6500 if (NewOp.getNode()) 6501 return NewOp; 6502 6503 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and 6504 // unpckh_undef). Only use pshufd if speed is more important than size. 6505 if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp, HasAVX2)) 6506 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6507 if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp, HasAVX2)) 6508 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6509 6510 if (X86::isMOVDDUPMask(SVOp) && Subtarget->hasSSE3() && 6511 V2IsUndef && RelaxedMayFoldVectorLoad(V1)) 6512 return getMOVDDup(Op, dl, V1, DAG); 6513 6514 if (X86::isMOVHLPS_v_undef_Mask(SVOp)) 6515 return getMOVHighToLow(Op, dl, DAG); 6516 6517 // Use to match splats 6518 if (HasSSE2 && X86::isUNPCKHMask(SVOp, HasAVX2) && V2IsUndef && 6519 (VT == MVT::v2f64 || VT == MVT::v2i64)) 6520 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6521 6522 if (X86::isPSHUFDMask(SVOp)) { 6523 // The actual implementation will match the mask in the if above and then 6524 // during isel it can match several different instructions, not only pshufd 6525 // as its name says, sad but true, emulate the behavior for now... 6526 if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) 6527 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); 6528 6529 unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); 6530 6531 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) 6532 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); 6533 6534 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1, 6535 TargetMask, DAG); 6536 } 6537 6538 // Check if this can be converted into a logical shift. 6539 bool isLeft = false; 6540 unsigned ShAmt = 0; 6541 SDValue ShVal; 6542 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt); 6543 if (isShift && ShVal.hasOneUse()) { 6544 // If the shifted value has multiple uses, it may be cheaper to use 6545 // v_set0 + movlhps or movhlps, etc. 6546 EVT EltVT = VT.getVectorElementType(); 6547 ShAmt *= EltVT.getSizeInBits(); 6548 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6549 } 6550 6551 if (X86::isMOVLMask(SVOp)) { 6552 if (ISD::isBuildVectorAllZeros(V1.getNode())) 6553 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); 6554 if (!X86::isMOVLPMask(SVOp)) { 6555 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) 6556 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); 6557 6558 if (VT == MVT::v4i32 || VT == MVT::v4f32) 6559 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); 6560 } 6561 } 6562 6563 // FIXME: fold these into legal mask. 6564 if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp, HasAVX2)) 6565 return getMOVLowToHigh(Op, dl, DAG, HasSSE2); 6566 6567 if (X86::isMOVHLPSMask(SVOp)) 6568 return getMOVHighToLow(Op, dl, DAG); 6569 6570 if (X86::isMOVSHDUPMask(SVOp, Subtarget)) 6571 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); 6572 6573 if (X86::isMOVSLDUPMask(SVOp, Subtarget)) 6574 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); 6575 6576 if (X86::isMOVLPMask(SVOp)) 6577 return getMOVLP(Op, dl, DAG, HasSSE2); 6578 6579 if (ShouldXformToMOVHLPS(SVOp) || 6580 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) 6581 return CommuteVectorShuffle(SVOp, DAG); 6582 6583 if (isShift) { 6584 // No better options. Use a vshl / vsrl. 6585 EVT EltVT = VT.getVectorElementType(); 6586 ShAmt *= EltVT.getSizeInBits(); 6587 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl); 6588 } 6589 6590 bool Commuted = false; 6591 // FIXME: This should also accept a bitcast of a splat? Be careful, not 6592 // 1,1,1,1 -> v8i16 though. 6593 V1IsSplat = isSplatVector(V1.getNode()); 6594 V2IsSplat = isSplatVector(V2.getNode()); 6595 6596 // Canonicalize the splat or undef, if present, to be on the RHS. 6597 if (V1IsSplat && !V2IsSplat) { 6598 Op = CommuteVectorShuffle(SVOp, DAG); 6599 SVOp = cast<ShuffleVectorSDNode>(Op); 6600 V1 = SVOp->getOperand(0); 6601 V2 = SVOp->getOperand(1); 6602 std::swap(V1IsSplat, V2IsSplat); 6603 Commuted = true; 6604 } 6605 6606 SmallVector<int, 32> M; 6607 SVOp->getMask(M); 6608 6609 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) { 6610 // Shuffling low element of v1 into undef, just return v1. 6611 if (V2IsUndef) 6612 return V1; 6613 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which 6614 // the instruction selector will not match, so get a canonical MOVL with 6615 // swapped operands to undo the commute. 6616 return getMOVL(DAG, dl, VT, V2, V1); 6617 } 6618 6619 if (isUNPCKLMask(M, VT, HasAVX2)) 6620 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG); 6621 6622 if (isUNPCKHMask(M, VT, HasAVX2)) 6623 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG); 6624 6625 if (V2IsSplat) { 6626 // Normalize mask so all entries that point to V2 points to its first 6627 // element then try to match unpck{h|l} again. If match, return a 6628 // new vector_shuffle with the corrected mask. 6629 SDValue NewMask = NormalizeMask(SVOp, DAG); 6630 ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask); 6631 if (NSVOp != SVOp) { 6632 if (X86::isUNPCKLMask(NSVOp, HasAVX2, true)) { 6633 return NewMask; 6634 } else if (X86::isUNPCKHMask(NSVOp, HasAVX2, true)) { 6635 return NewMask; 6636 } 6637 } 6638 } 6639 6640 if (Commuted) { 6641 // Commute is back and try unpck* again. 6642 // FIXME: this seems wrong. 6643 SDValue NewOp = CommuteVectorShuffle(SVOp, DAG); 6644 ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); 6645 6646 if (X86::isUNPCKLMask(NewSVOp, HasAVX2)) 6647 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V2, V1, DAG); 6648 6649 if (X86::isUNPCKHMask(NewSVOp, HasAVX2)) 6650 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V2, V1, DAG); 6651 } 6652 6653 // Normalize the node to match x86 shuffle ops if needed 6654 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true) || 6655 isVSHUFPYMask(M, VT, HasAVX, /* Commuted */ true))) 6656 return CommuteVectorShuffle(SVOp, DAG); 6657 6658 // The checks below are all present in isShuffleMaskLegal, but they are 6659 // inlined here right now to enable us to directly emit target specific 6660 // nodes, and remove one by one until they don't return Op anymore. 6661 6662 if (isPALIGNRMask(M, VT, Subtarget->hasSSSE3())) 6663 return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2, 6664 getShufflePALIGNRImmediate(SVOp), 6665 DAG); 6666 6667 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) && 6668 SVOp->getSplatIndex() == 0 && V2IsUndef) { 6669 if (VT == MVT::v2f64 || VT == MVT::v2i64) 6670 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6671 } 6672 6673 if (isPSHUFHWMask(M, VT)) 6674 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, 6675 X86::getShufflePSHUFHWImmediate(SVOp), 6676 DAG); 6677 6678 if (isPSHUFLWMask(M, VT)) 6679 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, 6680 X86::getShufflePSHUFLWImmediate(SVOp), 6681 DAG); 6682 6683 if (isSHUFPMask(M, VT)) 6684 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 6685 X86::getShuffleSHUFImmediate(SVOp), DAG); 6686 6687 if (isUNPCKL_v_undef_Mask(M, VT, HasAVX2)) 6688 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG); 6689 if (isUNPCKH_v_undef_Mask(M, VT, HasAVX2)) 6690 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG); 6691 6692 //===--------------------------------------------------------------------===// 6693 // Generate target specific nodes for 128 or 256-bit shuffles only 6694 // supported in the AVX instruction set. 6695 // 6696 6697 // Handle VMOVDDUPY permutations 6698 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasAVX)) 6699 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG); 6700 6701 // Handle VPERMILPS/D* permutations 6702 if (isVPERMILPMask(M, VT, HasAVX)) 6703 return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, 6704 getShuffleVPERMILPImmediate(SVOp), DAG); 6705 6706 // Handle VPERM2F128/VPERM2I128 permutations 6707 if (isVPERM2X128Mask(M, VT, HasAVX)) 6708 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1, 6709 V2, getShuffleVPERM2X128Immediate(SVOp), DAG); 6710 6711 // Handle VSHUFPS/DY permutations 6712 if (isVSHUFPYMask(M, VT, HasAVX)) 6713 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2, 6714 getShuffleVSHUFPYImmediate(SVOp), DAG); 6715 6716 //===--------------------------------------------------------------------===// 6717 // Since no target specific shuffle was selected for this generic one, 6718 // lower it into other known shuffles. FIXME: this isn't true yet, but 6719 // this is the plan. 6720 // 6721 6722 // Handle v8i16 specifically since SSE can do byte extraction and insertion. 6723 if (VT == MVT::v8i16) { 6724 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); 6725 if (NewOp.getNode()) 6726 return NewOp; 6727 } 6728 6729 if (VT == MVT::v16i8) { 6730 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this); 6731 if (NewOp.getNode()) 6732 return NewOp; 6733 } 6734 6735 // Handle all 128-bit wide vectors with 4 elements, and match them with 6736 // several different shuffle types. 6737 if (NumElems == 4 && VT.getSizeInBits() == 128) 6738 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG); 6739 6740 // Handle general 256-bit shuffles 6741 if (VT.is256BitVector()) 6742 return LowerVECTOR_SHUFFLE_256(SVOp, DAG); 6743 6744 return SDValue(); 6745} 6746 6747SDValue 6748X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, 6749 SelectionDAG &DAG) const { 6750 EVT VT = Op.getValueType(); 6751 DebugLoc dl = Op.getDebugLoc(); 6752 6753 if (Op.getOperand(0).getValueType().getSizeInBits() != 128) 6754 return SDValue(); 6755 6756 if (VT.getSizeInBits() == 8) { 6757 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, 6758 Op.getOperand(0), Op.getOperand(1)); 6759 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6760 DAG.getValueType(VT)); 6761 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6762 } else if (VT.getSizeInBits() == 16) { 6763 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6764 // If Idx is 0, it's cheaper to do a move instead of a pextrw. 6765 if (Idx == 0) 6766 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6767 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6768 DAG.getNode(ISD::BITCAST, dl, 6769 MVT::v4i32, 6770 Op.getOperand(0)), 6771 Op.getOperand(1))); 6772 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, 6773 Op.getOperand(0), Op.getOperand(1)); 6774 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, 6775 DAG.getValueType(VT)); 6776 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6777 } else if (VT == MVT::f32) { 6778 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy 6779 // the result back to FR32 register. It's only worth matching if the 6780 // result has a single use which is a store or a bitcast to i32. And in 6781 // the case of a store, it's not worth it if the index is a constant 0, 6782 // because a MOVSSmr can be used instead, which is smaller and faster. 6783 if (!Op.hasOneUse()) 6784 return SDValue(); 6785 SDNode *User = *Op.getNode()->use_begin(); 6786 if ((User->getOpcode() != ISD::STORE || 6787 (isa<ConstantSDNode>(Op.getOperand(1)) && 6788 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && 6789 (User->getOpcode() != ISD::BITCAST || 6790 User->getValueType(0) != MVT::i32)) 6791 return SDValue(); 6792 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6793 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, 6794 Op.getOperand(0)), 6795 Op.getOperand(1)); 6796 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract); 6797 } else if (VT == MVT::i32 || VT == MVT::i64) { 6798 // ExtractPS/pextrq works with constant index. 6799 if (isa<ConstantSDNode>(Op.getOperand(1))) 6800 return Op; 6801 } 6802 return SDValue(); 6803} 6804 6805 6806SDValue 6807X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 6808 SelectionDAG &DAG) const { 6809 if (!isa<ConstantSDNode>(Op.getOperand(1))) 6810 return SDValue(); 6811 6812 SDValue Vec = Op.getOperand(0); 6813 EVT VecVT = Vec.getValueType(); 6814 6815 // If this is a 256-bit vector result, first extract the 128-bit vector and 6816 // then extract the element from the 128-bit vector. 6817 if (VecVT.getSizeInBits() == 256) { 6818 DebugLoc dl = Op.getNode()->getDebugLoc(); 6819 unsigned NumElems = VecVT.getVectorNumElements(); 6820 SDValue Idx = Op.getOperand(1); 6821 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); 6822 6823 // Get the 128-bit vector. 6824 bool Upper = IdxVal >= NumElems/2; 6825 Vec = Extract128BitVector(Vec, 6826 DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32), DAG, dl); 6827 6828 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, 6829 Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : Idx); 6830 } 6831 6832 assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length"); 6833 6834 if (Subtarget->hasSSE41()) { 6835 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); 6836 if (Res.getNode()) 6837 return Res; 6838 } 6839 6840 EVT VT = Op.getValueType(); 6841 DebugLoc dl = Op.getDebugLoc(); 6842 // TODO: handle v16i8. 6843 if (VT.getSizeInBits() == 16) { 6844 SDValue Vec = Op.getOperand(0); 6845 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6846 if (Idx == 0) 6847 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, 6848 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 6849 DAG.getNode(ISD::BITCAST, dl, 6850 MVT::v4i32, Vec), 6851 Op.getOperand(1))); 6852 // Transform it so it match pextrw which produces a 32-bit result. 6853 EVT EltVT = MVT::i32; 6854 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, 6855 Op.getOperand(0), Op.getOperand(1)); 6856 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, 6857 DAG.getValueType(VT)); 6858 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); 6859 } else if (VT.getSizeInBits() == 32) { 6860 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6861 if (Idx == 0) 6862 return Op; 6863 6864 // SHUFPS the element to the lowest double word, then movss. 6865 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 }; 6866 EVT VVT = Op.getOperand(0).getValueType(); 6867 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6868 DAG.getUNDEF(VVT), Mask); 6869 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6870 DAG.getIntPtrConstant(0)); 6871 } else if (VT.getSizeInBits() == 64) { 6872 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b 6873 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught 6874 // to match extract_elt for f64. 6875 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 6876 if (Idx == 0) 6877 return Op; 6878 6879 // UNPCKHPD the element to the lowest double word, then movsd. 6880 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored 6881 // to a f64mem, the whole operation is folded into a single MOVHPDmr. 6882 int Mask[2] = { 1, -1 }; 6883 EVT VVT = Op.getOperand(0).getValueType(); 6884 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 6885 DAG.getUNDEF(VVT), Mask); 6886 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, 6887 DAG.getIntPtrConstant(0)); 6888 } 6889 6890 return SDValue(); 6891} 6892 6893SDValue 6894X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, 6895 SelectionDAG &DAG) const { 6896 EVT VT = Op.getValueType(); 6897 EVT EltVT = VT.getVectorElementType(); 6898 DebugLoc dl = Op.getDebugLoc(); 6899 6900 SDValue N0 = Op.getOperand(0); 6901 SDValue N1 = Op.getOperand(1); 6902 SDValue N2 = Op.getOperand(2); 6903 6904 if (VT.getSizeInBits() == 256) 6905 return SDValue(); 6906 6907 if ((EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) && 6908 isa<ConstantSDNode>(N2)) { 6909 unsigned Opc; 6910 if (VT == MVT::v8i16) 6911 Opc = X86ISD::PINSRW; 6912 else if (VT == MVT::v16i8) 6913 Opc = X86ISD::PINSRB; 6914 else 6915 Opc = X86ISD::PINSRB; 6916 6917 // Transform it so it match pinsr{b,w} which expects a GR32 as its second 6918 // argument. 6919 if (N1.getValueType() != MVT::i32) 6920 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6921 if (N2.getValueType() != MVT::i32) 6922 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6923 return DAG.getNode(Opc, dl, VT, N0, N1, N2); 6924 } else if (EltVT == MVT::f32 && isa<ConstantSDNode>(N2)) { 6925 // Bits [7:6] of the constant are the source select. This will always be 6926 // zero here. The DAG Combiner may combine an extract_elt index into these 6927 // bits. For example (insert (extract, 3), 2) could be matched by putting 6928 // the '3' into bits [7:6] of X86ISD::INSERTPS. 6929 // Bits [5:4] of the constant are the destination select. This is the 6930 // value of the incoming immediate. 6931 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may 6932 // combine either bitwise AND or insert of float 0.0 to set these bits. 6933 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4); 6934 // Create this as a scalar to vector.. 6935 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); 6936 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); 6937 } else if ((EltVT == MVT::i32 || EltVT == MVT::i64) && 6938 isa<ConstantSDNode>(N2)) { 6939 // PINSR* works with constant index. 6940 return Op; 6941 } 6942 return SDValue(); 6943} 6944 6945SDValue 6946X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 6947 EVT VT = Op.getValueType(); 6948 EVT EltVT = VT.getVectorElementType(); 6949 6950 DebugLoc dl = Op.getDebugLoc(); 6951 SDValue N0 = Op.getOperand(0); 6952 SDValue N1 = Op.getOperand(1); 6953 SDValue N2 = Op.getOperand(2); 6954 6955 // If this is a 256-bit vector result, first extract the 128-bit vector, 6956 // insert the element into the extracted half and then place it back. 6957 if (VT.getSizeInBits() == 256) { 6958 if (!isa<ConstantSDNode>(N2)) 6959 return SDValue(); 6960 6961 // Get the desired 128-bit vector half. 6962 unsigned NumElems = VT.getVectorNumElements(); 6963 unsigned IdxVal = cast<ConstantSDNode>(N2)->getZExtValue(); 6964 bool Upper = IdxVal >= NumElems/2; 6965 SDValue Ins128Idx = DAG.getConstant(Upper ? NumElems/2 : 0, MVT::i32); 6966 SDValue V = Extract128BitVector(N0, Ins128Idx, DAG, dl); 6967 6968 // Insert the element into the desired half. 6969 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, 6970 N1, Upper ? DAG.getConstant(IdxVal-NumElems/2, MVT::i32) : N2); 6971 6972 // Insert the changed part back to the 256-bit vector 6973 return Insert128BitVector(N0, V, Ins128Idx, DAG, dl); 6974 } 6975 6976 if (Subtarget->hasSSE41()) 6977 return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); 6978 6979 if (EltVT == MVT::i8) 6980 return SDValue(); 6981 6982 if (EltVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) { 6983 // Transform it so it match pinsrw which expects a 16-bit value in a GR32 6984 // as its second argument. 6985 if (N1.getValueType() != MVT::i32) 6986 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); 6987 if (N2.getValueType() != MVT::i32) 6988 N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue()); 6989 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2); 6990 } 6991 return SDValue(); 6992} 6993 6994SDValue 6995X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { 6996 LLVMContext *Context = DAG.getContext(); 6997 DebugLoc dl = Op.getDebugLoc(); 6998 EVT OpVT = Op.getValueType(); 6999 7000 // If this is a 256-bit vector result, first insert into a 128-bit 7001 // vector and then insert into the 256-bit vector. 7002 if (OpVT.getSizeInBits() > 128) { 7003 // Insert into a 128-bit vector. 7004 EVT VT128 = EVT::getVectorVT(*Context, 7005 OpVT.getVectorElementType(), 7006 OpVT.getVectorNumElements() / 2); 7007 7008 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); 7009 7010 // Insert the 128-bit vector. 7011 return Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, OpVT), Op, 7012 DAG.getConstant(0, MVT::i32), 7013 DAG, dl); 7014 } 7015 7016 if (Op.getValueType() == MVT::v1i64 && 7017 Op.getOperand(0).getValueType() == MVT::i64) 7018 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0)); 7019 7020 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); 7021 assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && 7022 "Expected an SSE type!"); 7023 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), 7024 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); 7025} 7026 7027// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in 7028// a simple subregister reference or explicit instructions to grab 7029// upper bits of a vector. 7030SDValue 7031X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 7032 if (Subtarget->hasAVX()) { 7033 DebugLoc dl = Op.getNode()->getDebugLoc(); 7034 SDValue Vec = Op.getNode()->getOperand(0); 7035 SDValue Idx = Op.getNode()->getOperand(1); 7036 7037 if (Op.getNode()->getValueType(0).getSizeInBits() == 128 7038 && Vec.getNode()->getValueType(0).getSizeInBits() == 256) { 7039 return Extract128BitVector(Vec, Idx, DAG, dl); 7040 } 7041 } 7042 return SDValue(); 7043} 7044 7045// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a 7046// simple superregister reference or explicit instructions to insert 7047// the upper bits of a vector. 7048SDValue 7049X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { 7050 if (Subtarget->hasAVX()) { 7051 DebugLoc dl = Op.getNode()->getDebugLoc(); 7052 SDValue Vec = Op.getNode()->getOperand(0); 7053 SDValue SubVec = Op.getNode()->getOperand(1); 7054 SDValue Idx = Op.getNode()->getOperand(2); 7055 7056 if (Op.getNode()->getValueType(0).getSizeInBits() == 256 7057 && SubVec.getNode()->getValueType(0).getSizeInBits() == 128) { 7058 return Insert128BitVector(Vec, SubVec, Idx, DAG, dl); 7059 } 7060 } 7061 return SDValue(); 7062} 7063 7064// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 7065// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is 7066// one of the above mentioned nodes. It has to be wrapped because otherwise 7067// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 7068// be used to form addressing mode. These wrapped nodes will be selected 7069// into MOV32ri. 7070SDValue 7071X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 7072 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 7073 7074 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7075 // global base reg. 7076 unsigned char OpFlag = 0; 7077 unsigned WrapperKind = X86ISD::Wrapper; 7078 CodeModel::Model M = getTargetMachine().getCodeModel(); 7079 7080 if (Subtarget->isPICStyleRIPRel() && 7081 (M == CodeModel::Small || M == CodeModel::Kernel)) 7082 WrapperKind = X86ISD::WrapperRIP; 7083 else if (Subtarget->isPICStyleGOT()) 7084 OpFlag = X86II::MO_GOTOFF; 7085 else if (Subtarget->isPICStyleStubPIC()) 7086 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7087 7088 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), 7089 CP->getAlignment(), 7090 CP->getOffset(), OpFlag); 7091 DebugLoc DL = CP->getDebugLoc(); 7092 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7093 // With PIC, the address is actually $g + Offset. 7094 if (OpFlag) { 7095 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7096 DAG.getNode(X86ISD::GlobalBaseReg, 7097 DebugLoc(), getPointerTy()), 7098 Result); 7099 } 7100 7101 return Result; 7102} 7103 7104SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 7105 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 7106 7107 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7108 // global base reg. 7109 unsigned char OpFlag = 0; 7110 unsigned WrapperKind = X86ISD::Wrapper; 7111 CodeModel::Model M = getTargetMachine().getCodeModel(); 7112 7113 if (Subtarget->isPICStyleRIPRel() && 7114 (M == CodeModel::Small || M == CodeModel::Kernel)) 7115 WrapperKind = X86ISD::WrapperRIP; 7116 else if (Subtarget->isPICStyleGOT()) 7117 OpFlag = X86II::MO_GOTOFF; 7118 else if (Subtarget->isPICStyleStubPIC()) 7119 OpFlag = X86II::MO_PIC_BASE_OFFSET; 7120 7121 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(), 7122 OpFlag); 7123 DebugLoc DL = JT->getDebugLoc(); 7124 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7125 7126 // With PIC, the address is actually $g + Offset. 7127 if (OpFlag) 7128 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7129 DAG.getNode(X86ISD::GlobalBaseReg, 7130 DebugLoc(), getPointerTy()), 7131 Result); 7132 7133 return Result; 7134} 7135 7136SDValue 7137X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { 7138 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); 7139 7140 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7141 // global base reg. 7142 unsigned char OpFlag = 0; 7143 unsigned WrapperKind = X86ISD::Wrapper; 7144 CodeModel::Model M = getTargetMachine().getCodeModel(); 7145 7146 if (Subtarget->isPICStyleRIPRel() && 7147 (M == CodeModel::Small || M == CodeModel::Kernel)) { 7148 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF()) 7149 OpFlag = X86II::MO_GOTPCREL; 7150 WrapperKind = X86ISD::WrapperRIP; 7151 } else if (Subtarget->isPICStyleGOT()) { 7152 OpFlag = X86II::MO_GOT; 7153 } else if (Subtarget->isPICStyleStubPIC()) { 7154 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE; 7155 } else if (Subtarget->isPICStyleStubNoDynamic()) { 7156 OpFlag = X86II::MO_DARWIN_NONLAZY; 7157 } 7158 7159 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag); 7160 7161 DebugLoc DL = Op.getDebugLoc(); 7162 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7163 7164 7165 // With PIC, the address is actually $g + Offset. 7166 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 7167 !Subtarget->is64Bit()) { 7168 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7169 DAG.getNode(X86ISD::GlobalBaseReg, 7170 DebugLoc(), getPointerTy()), 7171 Result); 7172 } 7173 7174 // For symbols that require a load from a stub to get the address, emit the 7175 // load. 7176 if (isGlobalStubReference(OpFlag)) 7177 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result, 7178 MachinePointerInfo::getGOT(), false, false, false, 0); 7179 7180 return Result; 7181} 7182 7183SDValue 7184X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 7185 // Create the TargetBlockAddressAddress node. 7186 unsigned char OpFlags = 7187 Subtarget->ClassifyBlockAddressReference(); 7188 CodeModel::Model M = getTargetMachine().getCodeModel(); 7189 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 7190 DebugLoc dl = Op.getDebugLoc(); 7191 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), 7192 /*isTarget=*/true, OpFlags); 7193 7194 if (Subtarget->isPICStyleRIPRel() && 7195 (M == CodeModel::Small || M == CodeModel::Kernel)) 7196 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7197 else 7198 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7199 7200 // With PIC, the address is actually $g + Offset. 7201 if (isGlobalRelativeToPICBase(OpFlags)) { 7202 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7203 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7204 Result); 7205 } 7206 7207 return Result; 7208} 7209 7210SDValue 7211X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 7212 int64_t Offset, 7213 SelectionDAG &DAG) const { 7214 // Create the TargetGlobalAddress node, folding in the constant 7215 // offset if it is legal. 7216 unsigned char OpFlags = 7217 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 7218 CodeModel::Model M = getTargetMachine().getCodeModel(); 7219 SDValue Result; 7220 if (OpFlags == X86II::MO_NO_FLAG && 7221 X86::isOffsetSuitableForCodeModel(Offset, M)) { 7222 // A direct static reference to a global. 7223 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 7224 Offset = 0; 7225 } else { 7226 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 7227 } 7228 7229 if (Subtarget->isPICStyleRIPRel() && 7230 (M == CodeModel::Small || M == CodeModel::Kernel)) 7231 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result); 7232 else 7233 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); 7234 7235 // With PIC, the address is actually $g + Offset. 7236 if (isGlobalRelativeToPICBase(OpFlags)) { 7237 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), 7238 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()), 7239 Result); 7240 } 7241 7242 // For globals that require a load from a stub to get the address, emit the 7243 // load. 7244 if (isGlobalStubReference(OpFlags)) 7245 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result, 7246 MachinePointerInfo::getGOT(), false, false, false, 0); 7247 7248 // If there was a non-zero offset that we didn't fold, create an explicit 7249 // addition for it. 7250 if (Offset != 0) 7251 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result, 7252 DAG.getConstant(Offset, getPointerTy())); 7253 7254 return Result; 7255} 7256 7257SDValue 7258X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { 7259 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 7260 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 7261 return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG); 7262} 7263 7264static SDValue 7265GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, 7266 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, 7267 unsigned char OperandFlags) { 7268 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7269 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7270 DebugLoc dl = GA->getDebugLoc(); 7271 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7272 GA->getValueType(0), 7273 GA->getOffset(), 7274 OperandFlags); 7275 if (InFlag) { 7276 SDValue Ops[] = { Chain, TGA, *InFlag }; 7277 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3); 7278 } else { 7279 SDValue Ops[] = { Chain, TGA }; 7280 Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2); 7281 } 7282 7283 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7284 MFI->setAdjustsStack(true); 7285 7286 SDValue Flag = Chain.getValue(1); 7287 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); 7288} 7289 7290// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit 7291static SDValue 7292LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7293 const EVT PtrVT) { 7294 SDValue InFlag; 7295 DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better 7296 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, 7297 DAG.getNode(X86ISD::GlobalBaseReg, 7298 DebugLoc(), PtrVT), InFlag); 7299 InFlag = Chain.getValue(1); 7300 7301 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); 7302} 7303 7304// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit 7305static SDValue 7306LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7307 const EVT PtrVT) { 7308 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, 7309 X86::RAX, X86II::MO_TLSGD); 7310} 7311 7312// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or 7313// "local exec" model. 7314static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, 7315 const EVT PtrVT, TLSModel::Model model, 7316 bool is64Bit) { 7317 DebugLoc dl = GA->getDebugLoc(); 7318 7319 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). 7320 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), 7321 is64Bit ? 257 : 256)); 7322 7323 SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 7324 DAG.getIntPtrConstant(0), 7325 MachinePointerInfo(Ptr), 7326 false, false, false, 0); 7327 7328 unsigned char OperandFlags = 0; 7329 // Most TLS accesses are not RIP relative, even on x86-64. One exception is 7330 // initialexec. 7331 unsigned WrapperKind = X86ISD::Wrapper; 7332 if (model == TLSModel::LocalExec) { 7333 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; 7334 } else if (is64Bit) { 7335 assert(model == TLSModel::InitialExec); 7336 OperandFlags = X86II::MO_GOTTPOFF; 7337 WrapperKind = X86ISD::WrapperRIP; 7338 } else { 7339 assert(model == TLSModel::InitialExec); 7340 OperandFlags = X86II::MO_INDNTPOFF; 7341 } 7342 7343 // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial 7344 // exec) 7345 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, 7346 GA->getValueType(0), 7347 GA->getOffset(), OperandFlags); 7348 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); 7349 7350 if (model == TLSModel::InitialExec) 7351 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, 7352 MachinePointerInfo::getGOT(), false, false, false, 0); 7353 7354 // The address of the thread local variable is the add of the thread 7355 // pointer with the offset of the variable. 7356 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 7357} 7358 7359SDValue 7360X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 7361 7362 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 7363 const GlobalValue *GV = GA->getGlobal(); 7364 7365 if (Subtarget->isTargetELF()) { 7366 // TODO: implement the "local dynamic" model 7367 // TODO: implement the "initial exec"model for pic executables 7368 7369 // If GV is an alias then use the aliasee for determining 7370 // thread-localness. 7371 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 7372 GV = GA->resolveAliasedGlobal(false); 7373 7374 TLSModel::Model model 7375 = getTLSModel(GV, getTargetMachine().getRelocationModel()); 7376 7377 switch (model) { 7378 case TLSModel::GeneralDynamic: 7379 case TLSModel::LocalDynamic: // not implemented 7380 if (Subtarget->is64Bit()) 7381 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy()); 7382 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy()); 7383 7384 case TLSModel::InitialExec: 7385 case TLSModel::LocalExec: 7386 return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, 7387 Subtarget->is64Bit()); 7388 } 7389 } else if (Subtarget->isTargetDarwin()) { 7390 // Darwin only has one model of TLS. Lower to that. 7391 unsigned char OpFlag = 0; 7392 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ? 7393 X86ISD::WrapperRIP : X86ISD::Wrapper; 7394 7395 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the 7396 // global base reg. 7397 bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) && 7398 !Subtarget->is64Bit(); 7399 if (PIC32) 7400 OpFlag = X86II::MO_TLVP_PIC_BASE; 7401 else 7402 OpFlag = X86II::MO_TLVP; 7403 DebugLoc DL = Op.getDebugLoc(); 7404 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, 7405 GA->getValueType(0), 7406 GA->getOffset(), OpFlag); 7407 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result); 7408 7409 // With PIC32, the address is actually $g + Offset. 7410 if (PIC32) 7411 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(), 7412 DAG.getNode(X86ISD::GlobalBaseReg, 7413 DebugLoc(), getPointerTy()), 7414 Offset); 7415 7416 // Lowering the machine isd will make sure everything is in the right 7417 // location. 7418 SDValue Chain = DAG.getEntryNode(); 7419 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7420 SDValue Args[] = { Chain, Offset }; 7421 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args, 2); 7422 7423 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. 7424 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 7425 MFI->setAdjustsStack(true); 7426 7427 // And our return value (tls address) is in the standard call return value 7428 // location. 7429 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 7430 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(), 7431 Chain.getValue(1)); 7432 } 7433 7434 assert(false && 7435 "TLS not implemented for this target."); 7436 7437 llvm_unreachable("Unreachable"); 7438 return SDValue(); 7439} 7440 7441 7442/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values 7443/// and take a 2 x i32 value to shift plus a shift amount. 7444SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{ 7445 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 7446 EVT VT = Op.getValueType(); 7447 unsigned VTBits = VT.getSizeInBits(); 7448 DebugLoc dl = Op.getDebugLoc(); 7449 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; 7450 SDValue ShOpLo = Op.getOperand(0); 7451 SDValue ShOpHi = Op.getOperand(1); 7452 SDValue ShAmt = Op.getOperand(2); 7453 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7454 DAG.getConstant(VTBits - 1, MVT::i8)) 7455 : DAG.getConstant(0, VT); 7456 7457 SDValue Tmp2, Tmp3; 7458 if (Op.getOpcode() == ISD::SHL_PARTS) { 7459 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt); 7460 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 7461 } else { 7462 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt); 7463 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt); 7464 } 7465 7466 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, 7467 DAG.getConstant(VTBits, MVT::i8)); 7468 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 7469 AndNode, DAG.getConstant(0, MVT::i8)); 7470 7471 SDValue Hi, Lo; 7472 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); 7473 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; 7474 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; 7475 7476 if (Op.getOpcode() == ISD::SHL_PARTS) { 7477 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7478 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7479 } else { 7480 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0, 4); 7481 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1, 4); 7482 } 7483 7484 SDValue Ops[2] = { Lo, Hi }; 7485 return DAG.getMergeValues(Ops, 2, dl); 7486} 7487 7488SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, 7489 SelectionDAG &DAG) const { 7490 EVT SrcVT = Op.getOperand(0).getValueType(); 7491 7492 if (SrcVT.isVector()) 7493 return SDValue(); 7494 7495 assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && 7496 "Unknown SINT_TO_FP to lower!"); 7497 7498 // These are really Legal; return the operand so the caller accepts it as 7499 // Legal. 7500 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) 7501 return Op; 7502 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) && 7503 Subtarget->is64Bit()) { 7504 return Op; 7505 } 7506 7507 DebugLoc dl = Op.getDebugLoc(); 7508 unsigned Size = SrcVT.getSizeInBits()/8; 7509 MachineFunction &MF = DAG.getMachineFunction(); 7510 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false); 7511 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7512 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7513 StackSlot, 7514 MachinePointerInfo::getFixedStack(SSFI), 7515 false, false, 0); 7516 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); 7517} 7518 7519SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, 7520 SDValue StackSlot, 7521 SelectionDAG &DAG) const { 7522 // Build the FILD 7523 DebugLoc DL = Op.getDebugLoc(); 7524 SDVTList Tys; 7525 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); 7526 if (useSSE) 7527 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); 7528 else 7529 Tys = DAG.getVTList(Op.getValueType(), MVT::Other); 7530 7531 unsigned ByteSize = SrcVT.getSizeInBits()/8; 7532 7533 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); 7534 MachineMemOperand *MMO; 7535 if (FI) { 7536 int SSFI = FI->getIndex(); 7537 MMO = 7538 DAG.getMachineFunction() 7539 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7540 MachineMemOperand::MOLoad, ByteSize, ByteSize); 7541 } else { 7542 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); 7543 StackSlot = StackSlot.getOperand(1); 7544 } 7545 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; 7546 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : 7547 X86ISD::FILD, DL, 7548 Tys, Ops, array_lengthof(Ops), 7549 SrcVT, MMO); 7550 7551 if (useSSE) { 7552 Chain = Result.getValue(1); 7553 SDValue InFlag = Result.getValue(2); 7554 7555 // FIXME: Currently the FST is flagged to the FILD_FLAG. This 7556 // shouldn't be necessary except that RFP cannot be live across 7557 // multiple blocks. When stackifier is fixed, they can be uncoupled. 7558 MachineFunction &MF = DAG.getMachineFunction(); 7559 unsigned SSFISize = Op.getValueType().getSizeInBits()/8; 7560 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false); 7561 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7562 Tys = DAG.getVTList(MVT::Other); 7563 SDValue Ops[] = { 7564 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag 7565 }; 7566 MachineMemOperand *MMO = 7567 DAG.getMachineFunction() 7568 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7569 MachineMemOperand::MOStore, SSFISize, SSFISize); 7570 7571 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, 7572 Ops, array_lengthof(Ops), 7573 Op.getValueType(), MMO); 7574 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot, 7575 MachinePointerInfo::getFixedStack(SSFI), 7576 false, false, false, 0); 7577 } 7578 7579 return Result; 7580} 7581 7582// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion. 7583SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, 7584 SelectionDAG &DAG) const { 7585 // This algorithm is not obvious. Here it is what we're trying to output: 7586 /* 7587 movq %rax, %xmm0 7588 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } 7589 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } 7590 #ifdef __SSE3__ 7591 haddpd %xmm0, %xmm0 7592 #else 7593 pshufd $0x4e, %xmm0, %xmm1 7594 addpd %xmm1, %xmm0 7595 #endif 7596 */ 7597 7598 DebugLoc dl = Op.getDebugLoc(); 7599 LLVMContext *Context = DAG.getContext(); 7600 7601 // Build some magic constants. 7602 SmallVector<Constant*,4> CV0; 7603 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000))); 7604 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000))); 7605 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 7606 CV0.push_back(ConstantInt::get(*Context, APInt(32, 0))); 7607 Constant *C0 = ConstantVector::get(CV0); 7608 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16); 7609 7610 SmallVector<Constant*,2> CV1; 7611 CV1.push_back( 7612 ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL)))); 7613 CV1.push_back( 7614 ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL)))); 7615 Constant *C1 = ConstantVector::get(CV1); 7616 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16); 7617 7618 // Load the 64-bit value into an XMM register. 7619 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 7620 Op.getOperand(0)); 7621 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, 7622 MachinePointerInfo::getConstantPool(), 7623 false, false, false, 16); 7624 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, 7625 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1), 7626 CLod0); 7627 7628 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, 7629 MachinePointerInfo::getConstantPool(), 7630 false, false, false, 16); 7631 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1); 7632 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); 7633 SDValue Result; 7634 7635 if (Subtarget->hasSSE3()) { 7636 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'. 7637 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); 7638 } else { 7639 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub); 7640 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32, 7641 S2F, 0x4E, DAG); 7642 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, 7643 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle), 7644 Sub); 7645 } 7646 7647 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, 7648 DAG.getIntPtrConstant(0)); 7649} 7650 7651// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion. 7652SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, 7653 SelectionDAG &DAG) const { 7654 DebugLoc dl = Op.getDebugLoc(); 7655 // FP constant to bias correct the final result. 7656 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), 7657 MVT::f64); 7658 7659 // Load the 32-bit value into an XMM register. 7660 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, 7661 Op.getOperand(0)); 7662 7663 // Zero out the upper parts of the register. 7664 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget->hasSSE2(), 7665 DAG); 7666 7667 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7668 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load), 7669 DAG.getIntPtrConstant(0)); 7670 7671 // Or the load with the bias. 7672 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, 7673 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7674 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7675 MVT::v2f64, Load)), 7676 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 7677 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 7678 MVT::v2f64, Bias))); 7679 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 7680 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or), 7681 DAG.getIntPtrConstant(0)); 7682 7683 // Subtract the bias. 7684 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); 7685 7686 // Handle final rounding. 7687 EVT DestVT = Op.getValueType(); 7688 7689 if (DestVT.bitsLT(MVT::f64)) { 7690 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 7691 DAG.getIntPtrConstant(0)); 7692 } else if (DestVT.bitsGT(MVT::f64)) { 7693 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 7694 } 7695 7696 // Handle final rounding. 7697 return Sub; 7698} 7699 7700SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, 7701 SelectionDAG &DAG) const { 7702 SDValue N0 = Op.getOperand(0); 7703 DebugLoc dl = Op.getDebugLoc(); 7704 7705 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't 7706 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform 7707 // the optimization here. 7708 if (DAG.SignBitIsZero(N0)) 7709 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0); 7710 7711 EVT SrcVT = N0.getValueType(); 7712 EVT DstVT = Op.getValueType(); 7713 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) 7714 return LowerUINT_TO_FP_i64(Op, DAG); 7715 else if (SrcVT == MVT::i32 && X86ScalarSSEf64) 7716 return LowerUINT_TO_FP_i32(Op, DAG); 7717 else if (Subtarget->is64Bit() && 7718 SrcVT == MVT::i64 && DstVT == MVT::f32) 7719 return SDValue(); 7720 7721 // Make a 64-bit buffer, and use it to build an FILD. 7722 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); 7723 if (SrcVT == MVT::i32) { 7724 SDValue WordOff = DAG.getConstant(4, getPointerTy()); 7725 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, 7726 getPointerTy(), StackSlot, WordOff); 7727 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7728 StackSlot, MachinePointerInfo(), 7729 false, false, 0); 7730 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32), 7731 OffsetSlot, MachinePointerInfo(), 7732 false, false, 0); 7733 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); 7734 return Fild; 7735 } 7736 7737 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"); 7738 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), 7739 StackSlot, MachinePointerInfo(), 7740 false, false, 0); 7741 // For i64 source, we need to add the appropriate power of 2 if the input 7742 // was negative. This is the same as the optimization in 7743 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, 7744 // we must be careful to do the computation in x87 extended precision, not 7745 // in SSE. (The generic code can't know it's OK to do this, or how to.) 7746 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); 7747 MachineMemOperand *MMO = 7748 DAG.getMachineFunction() 7749 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7750 MachineMemOperand::MOLoad, 8, 8); 7751 7752 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); 7753 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; 7754 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, 3, 7755 MVT::i64, MMO); 7756 7757 APInt FF(32, 0x5F800000ULL); 7758 7759 // Check whether the sign bit is set. 7760 SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64), 7761 Op.getOperand(0), DAG.getConstant(0, MVT::i64), 7762 ISD::SETLT); 7763 7764 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. 7765 SDValue FudgePtr = DAG.getConstantPool( 7766 ConstantInt::get(*DAG.getContext(), FF.zext(64)), 7767 getPointerTy()); 7768 7769 // Get a pointer to FF if the sign bit was set, or to 0 otherwise. 7770 SDValue Zero = DAG.getIntPtrConstant(0); 7771 SDValue Four = DAG.getIntPtrConstant(4); 7772 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet, 7773 Zero, Four); 7774 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset); 7775 7776 // Load the value out, extending it from f32 to f80. 7777 // FIXME: Avoid the extend by constructing the right constant pool? 7778 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), 7779 FudgePtr, MachinePointerInfo::getConstantPool(), 7780 MVT::f32, false, false, 4); 7781 // Extend everything to 80 bits to force it to be done on x87. 7782 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); 7783 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0)); 7784} 7785 7786std::pair<SDValue,SDValue> X86TargetLowering:: 7787FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const { 7788 DebugLoc DL = Op.getDebugLoc(); 7789 7790 EVT DstTy = Op.getValueType(); 7791 7792 if (!IsSigned) { 7793 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT"); 7794 DstTy = MVT::i64; 7795 } 7796 7797 assert(DstTy.getSimpleVT() <= MVT::i64 && 7798 DstTy.getSimpleVT() >= MVT::i16 && 7799 "Unknown FP_TO_SINT to lower!"); 7800 7801 // These are really Legal. 7802 if (DstTy == MVT::i32 && 7803 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7804 return std::make_pair(SDValue(), SDValue()); 7805 if (Subtarget->is64Bit() && 7806 DstTy == MVT::i64 && 7807 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) 7808 return std::make_pair(SDValue(), SDValue()); 7809 7810 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary 7811 // stack slot. 7812 MachineFunction &MF = DAG.getMachineFunction(); 7813 unsigned MemSize = DstTy.getSizeInBits()/8; 7814 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7815 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7816 7817 7818 7819 unsigned Opc; 7820 switch (DstTy.getSimpleVT().SimpleTy) { 7821 default: llvm_unreachable("Invalid FP_TO_SINT to lower!"); 7822 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; 7823 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; 7824 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; 7825 } 7826 7827 SDValue Chain = DAG.getEntryNode(); 7828 SDValue Value = Op.getOperand(0); 7829 EVT TheVT = Op.getOperand(0).getValueType(); 7830 if (isScalarFPTypeInSSEReg(TheVT)) { 7831 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"); 7832 Chain = DAG.getStore(Chain, DL, Value, StackSlot, 7833 MachinePointerInfo::getFixedStack(SSFI), 7834 false, false, 0); 7835 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); 7836 SDValue Ops[] = { 7837 Chain, StackSlot, DAG.getValueType(TheVT) 7838 }; 7839 7840 MachineMemOperand *MMO = 7841 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7842 MachineMemOperand::MOLoad, MemSize, MemSize); 7843 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, 3, 7844 DstTy, MMO); 7845 Chain = Value.getValue(1); 7846 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false); 7847 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 7848 } 7849 7850 MachineMemOperand *MMO = 7851 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 7852 MachineMemOperand::MOStore, MemSize, MemSize); 7853 7854 // Build the FP_TO_INT*_IN_MEM 7855 SDValue Ops[] = { Chain, Value, StackSlot }; 7856 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), 7857 Ops, 3, DstTy, MMO); 7858 7859 return std::make_pair(FIST, StackSlot); 7860} 7861 7862SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, 7863 SelectionDAG &DAG) const { 7864 if (Op.getValueType().isVector()) 7865 return SDValue(); 7866 7867 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true); 7868 SDValue FIST = Vals.first, StackSlot = Vals.second; 7869 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. 7870 if (FIST.getNode() == 0) return Op; 7871 7872 // Load the result. 7873 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7874 FIST, StackSlot, MachinePointerInfo(), 7875 false, false, false, 0); 7876} 7877 7878SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, 7879 SelectionDAG &DAG) const { 7880 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false); 7881 SDValue FIST = Vals.first, StackSlot = Vals.second; 7882 assert(FIST.getNode() && "Unexpected failure"); 7883 7884 // Load the result. 7885 return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(), 7886 FIST, StackSlot, MachinePointerInfo(), 7887 false, false, false, 0); 7888} 7889 7890SDValue X86TargetLowering::LowerFABS(SDValue Op, 7891 SelectionDAG &DAG) const { 7892 LLVMContext *Context = DAG.getContext(); 7893 DebugLoc dl = Op.getDebugLoc(); 7894 EVT VT = Op.getValueType(); 7895 EVT EltVT = VT; 7896 if (VT.isVector()) 7897 EltVT = VT.getVectorElementType(); 7898 SmallVector<Constant*,4> CV; 7899 if (EltVT == MVT::f64) { 7900 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); 7901 CV.assign(2, C); 7902 } else { 7903 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); 7904 CV.assign(4, C); 7905 } 7906 Constant *C = ConstantVector::get(CV); 7907 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7908 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7909 MachinePointerInfo::getConstantPool(), 7910 false, false, false, 16); 7911 return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); 7912} 7913 7914SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { 7915 LLVMContext *Context = DAG.getContext(); 7916 DebugLoc dl = Op.getDebugLoc(); 7917 EVT VT = Op.getValueType(); 7918 EVT EltVT = VT; 7919 unsigned NumElts = VT == MVT::f64 ? 2 : 4; 7920 if (VT.isVector()) { 7921 EltVT = VT.getVectorElementType(); 7922 NumElts = VT.getVectorNumElements(); 7923 } 7924 SmallVector<Constant*,8> CV; 7925 if (EltVT == MVT::f64) { 7926 Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))); 7927 CV.assign(NumElts, C); 7928 } else { 7929 Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); 7930 CV.assign(NumElts, C); 7931 } 7932 Constant *C = ConstantVector::get(CV); 7933 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7934 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 7935 MachinePointerInfo::getConstantPool(), 7936 false, false, false, 16); 7937 if (VT.isVector()) { 7938 MVT XORVT = VT.getSizeInBits() == 128 ? MVT::v2i64 : MVT::v4i64; 7939 return DAG.getNode(ISD::BITCAST, dl, VT, 7940 DAG.getNode(ISD::XOR, dl, XORVT, 7941 DAG.getNode(ISD::BITCAST, dl, XORVT, 7942 Op.getOperand(0)), 7943 DAG.getNode(ISD::BITCAST, dl, XORVT, Mask))); 7944 } else { 7945 return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); 7946 } 7947} 7948 7949SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 7950 LLVMContext *Context = DAG.getContext(); 7951 SDValue Op0 = Op.getOperand(0); 7952 SDValue Op1 = Op.getOperand(1); 7953 DebugLoc dl = Op.getDebugLoc(); 7954 EVT VT = Op.getValueType(); 7955 EVT SrcVT = Op1.getValueType(); 7956 7957 // If second operand is smaller, extend it first. 7958 if (SrcVT.bitsLT(VT)) { 7959 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1); 7960 SrcVT = VT; 7961 } 7962 // And if it is bigger, shrink it first. 7963 if (SrcVT.bitsGT(VT)) { 7964 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1)); 7965 SrcVT = VT; 7966 } 7967 7968 // At this point the operands and the result should have the same 7969 // type, and that won't be f80 since that is not custom lowered. 7970 7971 // First get the sign bit of second operand. 7972 SmallVector<Constant*,4> CV; 7973 if (SrcVT == MVT::f64) { 7974 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)))); 7975 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 7976 } else { 7977 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)))); 7978 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7979 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7980 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 7981 } 7982 Constant *C = ConstantVector::get(CV); 7983 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 7984 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx, 7985 MachinePointerInfo::getConstantPool(), 7986 false, false, false, 16); 7987 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1); 7988 7989 // Shift sign bit right or left if the two operands have different types. 7990 if (SrcVT.bitsGT(VT)) { 7991 // Op0 is MVT::f32, Op1 is MVT::f64. 7992 SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); 7993 SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, 7994 DAG.getConstant(32, MVT::i32)); 7995 SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit); 7996 SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, 7997 DAG.getIntPtrConstant(0)); 7998 } 7999 8000 // Clear first operand sign bit. 8001 CV.clear(); 8002 if (VT == MVT::f64) { 8003 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); 8004 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0)))); 8005 } else { 8006 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); 8007 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8008 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8009 CV.push_back(ConstantFP::get(*Context, APFloat(APInt(32, 0)))); 8010 } 8011 C = ConstantVector::get(CV); 8012 CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 8013 SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 8014 MachinePointerInfo::getConstantPool(), 8015 false, false, false, 16); 8016 SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2); 8017 8018 // Or the value with the sign bit. 8019 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); 8020} 8021 8022SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const { 8023 SDValue N0 = Op.getOperand(0); 8024 DebugLoc dl = Op.getDebugLoc(); 8025 EVT VT = Op.getValueType(); 8026 8027 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1). 8028 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0, 8029 DAG.getConstant(1, VT)); 8030 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); 8031} 8032 8033/// Emit nodes that will be selected as "test Op0,Op0", or something 8034/// equivalent. 8035SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, 8036 SelectionDAG &DAG) const { 8037 DebugLoc dl = Op.getDebugLoc(); 8038 8039 // CF and OF aren't always set the way we want. Determine which 8040 // of these we need. 8041 bool NeedCF = false; 8042 bool NeedOF = false; 8043 switch (X86CC) { 8044 default: break; 8045 case X86::COND_A: case X86::COND_AE: 8046 case X86::COND_B: case X86::COND_BE: 8047 NeedCF = true; 8048 break; 8049 case X86::COND_G: case X86::COND_GE: 8050 case X86::COND_L: case X86::COND_LE: 8051 case X86::COND_O: case X86::COND_NO: 8052 NeedOF = true; 8053 break; 8054 } 8055 8056 // See if we can use the EFLAGS value from the operand instead of 8057 // doing a separate TEST. TEST always sets OF and CF to 0, so unless 8058 // we prove that the arithmetic won't overflow, we can't use OF or CF. 8059 if (Op.getResNo() != 0 || NeedOF || NeedCF) 8060 // Emit a CMP with 0, which is the TEST pattern. 8061 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8062 DAG.getConstant(0, Op.getValueType())); 8063 8064 unsigned Opcode = 0; 8065 unsigned NumOperands = 0; 8066 switch (Op.getNode()->getOpcode()) { 8067 case ISD::ADD: 8068 // Due to an isel shortcoming, be conservative if this add is likely to be 8069 // selected as part of a load-modify-store instruction. When the root node 8070 // in a match is a store, isel doesn't know how to remap non-chain non-flag 8071 // uses of other nodes in the match, such as the ADD in this case. This 8072 // leads to the ADD being left around and reselected, with the result being 8073 // two adds in the output. Alas, even if none our users are stores, that 8074 // doesn't prove we're O.K. Ergo, if we have any parents that aren't 8075 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require 8076 // climbing the DAG back to the root, and it doesn't seem to be worth the 8077 // effort. 8078 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8079 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8080 if (UI->getOpcode() != ISD::CopyToReg && 8081 UI->getOpcode() != ISD::SETCC && 8082 UI->getOpcode() != ISD::STORE) 8083 goto default_case; 8084 8085 if (ConstantSDNode *C = 8086 dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) { 8087 // An add of one will be selected as an INC. 8088 if (C->getAPIntValue() == 1) { 8089 Opcode = X86ISD::INC; 8090 NumOperands = 1; 8091 break; 8092 } 8093 8094 // An add of negative one (subtract of one) will be selected as a DEC. 8095 if (C->getAPIntValue().isAllOnesValue()) { 8096 Opcode = X86ISD::DEC; 8097 NumOperands = 1; 8098 break; 8099 } 8100 } 8101 8102 // Otherwise use a regular EFLAGS-setting add. 8103 Opcode = X86ISD::ADD; 8104 NumOperands = 2; 8105 break; 8106 case ISD::AND: { 8107 // If the primary and result isn't used, don't bother using X86ISD::AND, 8108 // because a TEST instruction will be better. 8109 bool NonFlagUse = false; 8110 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8111 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 8112 SDNode *User = *UI; 8113 unsigned UOpNo = UI.getOperandNo(); 8114 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { 8115 // Look pass truncate. 8116 UOpNo = User->use_begin().getOperandNo(); 8117 User = *User->use_begin(); 8118 } 8119 8120 if (User->getOpcode() != ISD::BRCOND && 8121 User->getOpcode() != ISD::SETCC && 8122 (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { 8123 NonFlagUse = true; 8124 break; 8125 } 8126 } 8127 8128 if (!NonFlagUse) 8129 break; 8130 } 8131 // FALL THROUGH 8132 case ISD::SUB: 8133 case ISD::OR: 8134 case ISD::XOR: 8135 // Due to the ISEL shortcoming noted above, be conservative if this op is 8136 // likely to be selected as part of a load-modify-store instruction. 8137 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 8138 UE = Op.getNode()->use_end(); UI != UE; ++UI) 8139 if (UI->getOpcode() == ISD::STORE) 8140 goto default_case; 8141 8142 // Otherwise use a regular EFLAGS-setting instruction. 8143 switch (Op.getNode()->getOpcode()) { 8144 default: llvm_unreachable("unexpected operator!"); 8145 case ISD::SUB: Opcode = X86ISD::SUB; break; 8146 case ISD::OR: Opcode = X86ISD::OR; break; 8147 case ISD::XOR: Opcode = X86ISD::XOR; break; 8148 case ISD::AND: Opcode = X86ISD::AND; break; 8149 } 8150 8151 NumOperands = 2; 8152 break; 8153 case X86ISD::ADD: 8154 case X86ISD::SUB: 8155 case X86ISD::INC: 8156 case X86ISD::DEC: 8157 case X86ISD::OR: 8158 case X86ISD::XOR: 8159 case X86ISD::AND: 8160 return SDValue(Op.getNode(), 1); 8161 default: 8162 default_case: 8163 break; 8164 } 8165 8166 if (Opcode == 0) 8167 // Emit a CMP with 0, which is the TEST pattern. 8168 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, 8169 DAG.getConstant(0, Op.getValueType())); 8170 8171 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 8172 SmallVector<SDValue, 4> Ops; 8173 for (unsigned i = 0; i != NumOperands; ++i) 8174 Ops.push_back(Op.getOperand(i)); 8175 8176 SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands); 8177 DAG.ReplaceAllUsesWith(Op, New); 8178 return SDValue(New.getNode(), 1); 8179} 8180 8181/// Emit nodes that will be selected as "cmp Op0,Op1", or something 8182/// equivalent. 8183SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 8184 SelectionDAG &DAG) const { 8185 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) 8186 if (C->getAPIntValue() == 0) 8187 return EmitTest(Op0, X86CC, DAG); 8188 8189 DebugLoc dl = Op0.getDebugLoc(); 8190 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); 8191} 8192 8193/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node 8194/// if it's possible. 8195SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC, 8196 DebugLoc dl, SelectionDAG &DAG) const { 8197 SDValue Op0 = And.getOperand(0); 8198 SDValue Op1 = And.getOperand(1); 8199 if (Op0.getOpcode() == ISD::TRUNCATE) 8200 Op0 = Op0.getOperand(0); 8201 if (Op1.getOpcode() == ISD::TRUNCATE) 8202 Op1 = Op1.getOperand(0); 8203 8204 SDValue LHS, RHS; 8205 if (Op1.getOpcode() == ISD::SHL) 8206 std::swap(Op0, Op1); 8207 if (Op0.getOpcode() == ISD::SHL) { 8208 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0))) 8209 if (And00C->getZExtValue() == 1) { 8210 // If we looked past a truncate, check that it's only truncating away 8211 // known zeros. 8212 unsigned BitWidth = Op0.getValueSizeInBits(); 8213 unsigned AndBitWidth = And.getValueSizeInBits(); 8214 if (BitWidth > AndBitWidth) { 8215 APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones; 8216 DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones); 8217 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth) 8218 return SDValue(); 8219 } 8220 LHS = Op1; 8221 RHS = Op0.getOperand(1); 8222 } 8223 } else if (Op1.getOpcode() == ISD::Constant) { 8224 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); 8225 uint64_t AndRHSVal = AndRHS->getZExtValue(); 8226 SDValue AndLHS = Op0; 8227 8228 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { 8229 LHS = AndLHS.getOperand(0); 8230 RHS = AndLHS.getOperand(1); 8231 } 8232 8233 // Use BT if the immediate can't be encoded in a TEST instruction. 8234 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) { 8235 LHS = AndLHS; 8236 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType()); 8237 } 8238 } 8239 8240 if (LHS.getNode()) { 8241 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT 8242 // instruction. Since the shift amount is in-range-or-undefined, we know 8243 // that doing a bittest on the i32 value is ok. We extend to i32 because 8244 // the encoding for the i16 version is larger than the i32 version. 8245 // Also promote i16 to i32 for performance / code size reason. 8246 if (LHS.getValueType() == MVT::i8 || 8247 LHS.getValueType() == MVT::i16) 8248 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 8249 8250 // If the operand types disagree, extend the shift amount to match. Since 8251 // BT ignores high bits (like shifts) we can use anyextend. 8252 if (LHS.getValueType() != RHS.getValueType()) 8253 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS); 8254 8255 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS); 8256 unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; 8257 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8258 DAG.getConstant(Cond, MVT::i8), BT); 8259 } 8260 8261 return SDValue(); 8262} 8263 8264SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 8265 8266 if (Op.getValueType().isVector()) return LowerVSETCC(Op, DAG); 8267 8268 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); 8269 SDValue Op0 = Op.getOperand(0); 8270 SDValue Op1 = Op.getOperand(1); 8271 DebugLoc dl = Op.getDebugLoc(); 8272 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8273 8274 // Optimize to BT if possible. 8275 // Lower (X & (1 << N)) == 0 to BT(X, N). 8276 // Lower ((X >>u N) & 1) != 0 to BT(X, N). 8277 // Lower ((X >>s N) & 1) != 0 to BT(X, N). 8278 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && 8279 Op1.getOpcode() == ISD::Constant && 8280 cast<ConstantSDNode>(Op1)->isNullValue() && 8281 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8282 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG); 8283 if (NewSetCC.getNode()) 8284 return NewSetCC; 8285 } 8286 8287 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of 8288 // these. 8289 if (Op1.getOpcode() == ISD::Constant && 8290 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 || 8291 cast<ConstantSDNode>(Op1)->isNullValue()) && 8292 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 8293 8294 // If the input is a setcc, then reuse the input setcc or use a new one with 8295 // the inverted condition. 8296 if (Op0.getOpcode() == X86ISD::SETCC) { 8297 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); 8298 bool Invert = (CC == ISD::SETNE) ^ 8299 cast<ConstantSDNode>(Op1)->isNullValue(); 8300 if (!Invert) return Op0; 8301 8302 CCode = X86::GetOppositeBranchCondition(CCode); 8303 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8304 DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); 8305 } 8306 } 8307 8308 bool isFP = Op1.getValueType().isFloatingPoint(); 8309 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG); 8310 if (X86CC == X86::COND_INVALID) 8311 return SDValue(); 8312 8313 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, DAG); 8314 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 8315 DAG.getConstant(X86CC, MVT::i8), EFLAGS); 8316} 8317 8318// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128 8319// ones, and then concatenate the result back. 8320static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { 8321 EVT VT = Op.getValueType(); 8322 8323 assert(VT.getSizeInBits() == 256 && Op.getOpcode() == ISD::SETCC && 8324 "Unsupported value type for operation"); 8325 8326 int NumElems = VT.getVectorNumElements(); 8327 DebugLoc dl = Op.getDebugLoc(); 8328 SDValue CC = Op.getOperand(2); 8329 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 8330 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 8331 8332 // Extract the LHS vectors 8333 SDValue LHS = Op.getOperand(0); 8334 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 8335 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 8336 8337 // Extract the RHS vectors 8338 SDValue RHS = Op.getOperand(1); 8339 SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); 8340 SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); 8341 8342 // Issue the operation on the smaller types and concatenate the result back 8343 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 8344 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 8345 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 8346 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), 8347 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); 8348} 8349 8350 8351SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const { 8352 SDValue Cond; 8353 SDValue Op0 = Op.getOperand(0); 8354 SDValue Op1 = Op.getOperand(1); 8355 SDValue CC = Op.getOperand(2); 8356 EVT VT = Op.getValueType(); 8357 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 8358 bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); 8359 DebugLoc dl = Op.getDebugLoc(); 8360 8361 if (isFP) { 8362 unsigned SSECC = 8; 8363 EVT EltVT = Op0.getValueType().getVectorElementType(); 8364 assert(EltVT == MVT::f32 || EltVT == MVT::f64); 8365 8366 unsigned Opc = EltVT == MVT::f32 ? X86ISD::CMPPS : X86ISD::CMPPD; 8367 bool Swap = false; 8368 8369 // SSE Condition code mapping: 8370 // 0 - EQ 8371 // 1 - LT 8372 // 2 - LE 8373 // 3 - UNORD 8374 // 4 - NEQ 8375 // 5 - NLT 8376 // 6 - NLE 8377 // 7 - ORD 8378 switch (SetCCOpcode) { 8379 default: break; 8380 case ISD::SETOEQ: 8381 case ISD::SETEQ: SSECC = 0; break; 8382 case ISD::SETOGT: 8383 case ISD::SETGT: Swap = true; // Fallthrough 8384 case ISD::SETLT: 8385 case ISD::SETOLT: SSECC = 1; break; 8386 case ISD::SETOGE: 8387 case ISD::SETGE: Swap = true; // Fallthrough 8388 case ISD::SETLE: 8389 case ISD::SETOLE: SSECC = 2; break; 8390 case ISD::SETUO: SSECC = 3; break; 8391 case ISD::SETUNE: 8392 case ISD::SETNE: SSECC = 4; break; 8393 case ISD::SETULE: Swap = true; 8394 case ISD::SETUGE: SSECC = 5; break; 8395 case ISD::SETULT: Swap = true; 8396 case ISD::SETUGT: SSECC = 6; break; 8397 case ISD::SETO: SSECC = 7; break; 8398 } 8399 if (Swap) 8400 std::swap(Op0, Op1); 8401 8402 // In the two special cases we can't handle, emit two comparisons. 8403 if (SSECC == 8) { 8404 if (SetCCOpcode == ISD::SETUEQ) { 8405 SDValue UNORD, EQ; 8406 UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8)); 8407 EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8)); 8408 return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ); 8409 } else if (SetCCOpcode == ISD::SETONE) { 8410 SDValue ORD, NEQ; 8411 ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8)); 8412 NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8)); 8413 return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ); 8414 } 8415 llvm_unreachable("Illegal FP comparison"); 8416 } 8417 // Handle all other FP comparisons here. 8418 return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8)); 8419 } 8420 8421 // Break 256-bit integer vector compare into smaller ones. 8422 if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2()) 8423 return Lower256IntVSETCC(Op, DAG); 8424 8425 // We are handling one of the integer comparisons here. Since SSE only has 8426 // GT and EQ comparisons for integer, swapping operands and multiple 8427 // operations may be required for some comparisons. 8428 unsigned Opc = 0, EQOpc = 0, GTOpc = 0; 8429 bool Swap = false, Invert = false, FlipSigns = false; 8430 8431 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 8432 default: break; 8433 case MVT::i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break; 8434 case MVT::i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break; 8435 case MVT::i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break; 8436 case MVT::i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break; 8437 } 8438 8439 switch (SetCCOpcode) { 8440 default: break; 8441 case ISD::SETNE: Invert = true; 8442 case ISD::SETEQ: Opc = EQOpc; break; 8443 case ISD::SETLT: Swap = true; 8444 case ISD::SETGT: Opc = GTOpc; break; 8445 case ISD::SETGE: Swap = true; 8446 case ISD::SETLE: Opc = GTOpc; Invert = true; break; 8447 case ISD::SETULT: Swap = true; 8448 case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break; 8449 case ISD::SETUGE: Swap = true; 8450 case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break; 8451 } 8452 if (Swap) 8453 std::swap(Op0, Op1); 8454 8455 // Check that the operation in question is available (most are plain SSE2, 8456 // but PCMPGTQ and PCMPEQQ have different requirements). 8457 if (Opc == X86ISD::PCMPGTQ && !Subtarget->hasSSE42()) 8458 return SDValue(); 8459 if (Opc == X86ISD::PCMPEQQ && !Subtarget->hasSSE41()) 8460 return SDValue(); 8461 8462 // Since SSE has no unsigned integer comparisons, we need to flip the sign 8463 // bits of the inputs before performing those operations. 8464 if (FlipSigns) { 8465 EVT EltVT = VT.getVectorElementType(); 8466 SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), 8467 EltVT); 8468 std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); 8469 SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &SignBits[0], 8470 SignBits.size()); 8471 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec); 8472 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec); 8473 } 8474 8475 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 8476 8477 // If the logical-not of the result is required, perform that now. 8478 if (Invert) 8479 Result = DAG.getNOT(dl, Result, VT); 8480 8481 return Result; 8482} 8483 8484// isX86LogicalCmp - Return true if opcode is a X86 logical comparison. 8485static bool isX86LogicalCmp(SDValue Op) { 8486 unsigned Opc = Op.getNode()->getOpcode(); 8487 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) 8488 return true; 8489 if (Op.getResNo() == 1 && 8490 (Opc == X86ISD::ADD || 8491 Opc == X86ISD::SUB || 8492 Opc == X86ISD::ADC || 8493 Opc == X86ISD::SBB || 8494 Opc == X86ISD::SMUL || 8495 Opc == X86ISD::UMUL || 8496 Opc == X86ISD::INC || 8497 Opc == X86ISD::DEC || 8498 Opc == X86ISD::OR || 8499 Opc == X86ISD::XOR || 8500 Opc == X86ISD::AND)) 8501 return true; 8502 8503 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) 8504 return true; 8505 8506 return false; 8507} 8508 8509static bool isZero(SDValue V) { 8510 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8511 return C && C->isNullValue(); 8512} 8513 8514static bool isAllOnes(SDValue V) { 8515 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 8516 return C && C->isAllOnesValue(); 8517} 8518 8519SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 8520 bool addTest = true; 8521 SDValue Cond = Op.getOperand(0); 8522 SDValue Op1 = Op.getOperand(1); 8523 SDValue Op2 = Op.getOperand(2); 8524 DebugLoc DL = Op.getDebugLoc(); 8525 SDValue CC; 8526 8527 if (Cond.getOpcode() == ISD::SETCC) { 8528 SDValue NewCond = LowerSETCC(Cond, DAG); 8529 if (NewCond.getNode()) 8530 Cond = NewCond; 8531 } 8532 8533 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y 8534 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y 8535 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y 8536 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y 8537 if (Cond.getOpcode() == X86ISD::SETCC && 8538 Cond.getOperand(1).getOpcode() == X86ISD::CMP && 8539 isZero(Cond.getOperand(1).getOperand(1))) { 8540 SDValue Cmp = Cond.getOperand(1); 8541 8542 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); 8543 8544 if ((isAllOnes(Op1) || isAllOnes(Op2)) && 8545 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { 8546 SDValue Y = isAllOnes(Op2) ? Op1 : Op2; 8547 8548 SDValue CmpOp0 = Cmp.getOperand(0); 8549 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, 8550 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); 8551 8552 SDValue Res = // Res = 0 or -1. 8553 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8554 DAG.getConstant(X86::COND_B, MVT::i8), Cmp); 8555 8556 if (isAllOnes(Op1) != (CondCode == X86::COND_E)) 8557 Res = DAG.getNOT(DL, Res, Res.getValueType()); 8558 8559 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2); 8560 if (N2C == 0 || !N2C->isNullValue()) 8561 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); 8562 return Res; 8563 } 8564 } 8565 8566 // Look past (and (setcc_carry (cmp ...)), 1). 8567 if (Cond.getOpcode() == ISD::AND && 8568 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8569 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8570 if (C && C->getAPIntValue() == 1) 8571 Cond = Cond.getOperand(0); 8572 } 8573 8574 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8575 // setting operand in place of the X86ISD::SETCC. 8576 unsigned CondOpcode = Cond.getOpcode(); 8577 if (CondOpcode == X86ISD::SETCC || 8578 CondOpcode == X86ISD::SETCC_CARRY) { 8579 CC = Cond.getOperand(0); 8580 8581 SDValue Cmp = Cond.getOperand(1); 8582 unsigned Opc = Cmp.getOpcode(); 8583 EVT VT = Op.getValueType(); 8584 8585 bool IllegalFPCMov = false; 8586 if (VT.isFloatingPoint() && !VT.isVector() && 8587 !isScalarFPTypeInSSEReg(VT)) // FPStack? 8588 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); 8589 8590 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || 8591 Opc == X86ISD::BT) { // FIXME 8592 Cond = Cmp; 8593 addTest = false; 8594 } 8595 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 8596 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 8597 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 8598 Cond.getOperand(0).getValueType() != MVT::i8)) { 8599 SDValue LHS = Cond.getOperand(0); 8600 SDValue RHS = Cond.getOperand(1); 8601 unsigned X86Opcode; 8602 unsigned X86Cond; 8603 SDVTList VTs; 8604 switch (CondOpcode) { 8605 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 8606 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 8607 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 8608 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 8609 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 8610 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 8611 default: llvm_unreachable("unexpected overflowing operator"); 8612 } 8613 if (CondOpcode == ISD::UMULO) 8614 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 8615 MVT::i32); 8616 else 8617 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 8618 8619 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); 8620 8621 if (CondOpcode == ISD::UMULO) 8622 Cond = X86Op.getValue(2); 8623 else 8624 Cond = X86Op.getValue(1); 8625 8626 CC = DAG.getConstant(X86Cond, MVT::i8); 8627 addTest = false; 8628 } 8629 8630 if (addTest) { 8631 // Look pass the truncate. 8632 if (Cond.getOpcode() == ISD::TRUNCATE) 8633 Cond = Cond.getOperand(0); 8634 8635 // We know the result of AND is compared against zero. Try to match 8636 // it to BT. 8637 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 8638 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG); 8639 if (NewSetCC.getNode()) { 8640 CC = NewSetCC.getOperand(0); 8641 Cond = NewSetCC.getOperand(1); 8642 addTest = false; 8643 } 8644 } 8645 } 8646 8647 if (addTest) { 8648 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8649 Cond = EmitTest(Cond, X86::COND_NE, DAG); 8650 } 8651 8652 // a < b ? -1 : 0 -> RES = ~setcc_carry 8653 // a < b ? 0 : -1 -> RES = setcc_carry 8654 // a >= b ? -1 : 0 -> RES = setcc_carry 8655 // a >= b ? 0 : -1 -> RES = ~setcc_carry 8656 if (Cond.getOpcode() == X86ISD::CMP) { 8657 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); 8658 8659 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && 8660 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) { 8661 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), 8662 DAG.getConstant(X86::COND_B, MVT::i8), Cond); 8663 if (isAllOnes(Op1) != (CondCode == X86::COND_B)) 8664 return DAG.getNOT(DL, Res, Res.getValueType()); 8665 return Res; 8666 } 8667 } 8668 8669 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if 8670 // condition is true. 8671 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 8672 SDValue Ops[] = { Op2, Op1, CC, Cond }; 8673 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops, array_lengthof(Ops)); 8674} 8675 8676// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or 8677// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart 8678// from the AND / OR. 8679static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { 8680 Opc = Op.getOpcode(); 8681 if (Opc != ISD::OR && Opc != ISD::AND) 8682 return false; 8683 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8684 Op.getOperand(0).hasOneUse() && 8685 Op.getOperand(1).getOpcode() == X86ISD::SETCC && 8686 Op.getOperand(1).hasOneUse()); 8687} 8688 8689// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and 8690// 1 and that the SETCC node has a single use. 8691static bool isXor1OfSetCC(SDValue Op) { 8692 if (Op.getOpcode() != ISD::XOR) 8693 return false; 8694 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 8695 if (N1C && N1C->getAPIntValue() == 1) { 8696 return Op.getOperand(0).getOpcode() == X86ISD::SETCC && 8697 Op.getOperand(0).hasOneUse(); 8698 } 8699 return false; 8700} 8701 8702SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 8703 bool addTest = true; 8704 SDValue Chain = Op.getOperand(0); 8705 SDValue Cond = Op.getOperand(1); 8706 SDValue Dest = Op.getOperand(2); 8707 DebugLoc dl = Op.getDebugLoc(); 8708 SDValue CC; 8709 bool Inverted = false; 8710 8711 if (Cond.getOpcode() == ISD::SETCC) { 8712 // Check for setcc([su]{add,sub,mul}o == 0). 8713 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && 8714 isa<ConstantSDNode>(Cond.getOperand(1)) && 8715 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() && 8716 Cond.getOperand(0).getResNo() == 1 && 8717 (Cond.getOperand(0).getOpcode() == ISD::SADDO || 8718 Cond.getOperand(0).getOpcode() == ISD::UADDO || 8719 Cond.getOperand(0).getOpcode() == ISD::SSUBO || 8720 Cond.getOperand(0).getOpcode() == ISD::USUBO || 8721 Cond.getOperand(0).getOpcode() == ISD::SMULO || 8722 Cond.getOperand(0).getOpcode() == ISD::UMULO)) { 8723 Inverted = true; 8724 Cond = Cond.getOperand(0); 8725 } else { 8726 SDValue NewCond = LowerSETCC(Cond, DAG); 8727 if (NewCond.getNode()) 8728 Cond = NewCond; 8729 } 8730 } 8731#if 0 8732 // FIXME: LowerXALUO doesn't handle these!! 8733 else if (Cond.getOpcode() == X86ISD::ADD || 8734 Cond.getOpcode() == X86ISD::SUB || 8735 Cond.getOpcode() == X86ISD::SMUL || 8736 Cond.getOpcode() == X86ISD::UMUL) 8737 Cond = LowerXALUO(Cond, DAG); 8738#endif 8739 8740 // Look pass (and (setcc_carry (cmp ...)), 1). 8741 if (Cond.getOpcode() == ISD::AND && 8742 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { 8743 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 8744 if (C && C->getAPIntValue() == 1) 8745 Cond = Cond.getOperand(0); 8746 } 8747 8748 // If condition flag is set by a X86ISD::CMP, then use it as the condition 8749 // setting operand in place of the X86ISD::SETCC. 8750 unsigned CondOpcode = Cond.getOpcode(); 8751 if (CondOpcode == X86ISD::SETCC || 8752 CondOpcode == X86ISD::SETCC_CARRY) { 8753 CC = Cond.getOperand(0); 8754 8755 SDValue Cmp = Cond.getOperand(1); 8756 unsigned Opc = Cmp.getOpcode(); 8757 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? 8758 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { 8759 Cond = Cmp; 8760 addTest = false; 8761 } else { 8762 switch (cast<ConstantSDNode>(CC)->getZExtValue()) { 8763 default: break; 8764 case X86::COND_O: 8765 case X86::COND_B: 8766 // These can only come from an arithmetic instruction with overflow, 8767 // e.g. SADDO, UADDO. 8768 Cond = Cond.getNode()->getOperand(1); 8769 addTest = false; 8770 break; 8771 } 8772 } 8773 } 8774 CondOpcode = Cond.getOpcode(); 8775 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || 8776 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || 8777 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && 8778 Cond.getOperand(0).getValueType() != MVT::i8)) { 8779 SDValue LHS = Cond.getOperand(0); 8780 SDValue RHS = Cond.getOperand(1); 8781 unsigned X86Opcode; 8782 unsigned X86Cond; 8783 SDVTList VTs; 8784 switch (CondOpcode) { 8785 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; 8786 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; 8787 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; 8788 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; 8789 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; 8790 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; 8791 default: llvm_unreachable("unexpected overflowing operator"); 8792 } 8793 if (Inverted) 8794 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); 8795 if (CondOpcode == ISD::UMULO) 8796 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), 8797 MVT::i32); 8798 else 8799 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 8800 8801 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); 8802 8803 if (CondOpcode == ISD::UMULO) 8804 Cond = X86Op.getValue(2); 8805 else 8806 Cond = X86Op.getValue(1); 8807 8808 CC = DAG.getConstant(X86Cond, MVT::i8); 8809 addTest = false; 8810 } else { 8811 unsigned CondOpc; 8812 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { 8813 SDValue Cmp = Cond.getOperand(0).getOperand(1); 8814 if (CondOpc == ISD::OR) { 8815 // Also, recognize the pattern generated by an FCMP_UNE. We can emit 8816 // two branches instead of an explicit OR instruction with a 8817 // separate test. 8818 if (Cmp == Cond.getOperand(1).getOperand(1) && 8819 isX86LogicalCmp(Cmp)) { 8820 CC = Cond.getOperand(0).getOperand(0); 8821 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8822 Chain, Dest, CC, Cmp); 8823 CC = Cond.getOperand(1).getOperand(0); 8824 Cond = Cmp; 8825 addTest = false; 8826 } 8827 } else { // ISD::AND 8828 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit 8829 // two branches instead of an explicit AND instruction with a 8830 // separate test. However, we only do this if this block doesn't 8831 // have a fall-through edge, because this requires an explicit 8832 // jmp when the condition is false. 8833 if (Cmp == Cond.getOperand(1).getOperand(1) && 8834 isX86LogicalCmp(Cmp) && 8835 Op.getNode()->hasOneUse()) { 8836 X86::CondCode CCode = 8837 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 8838 CCode = X86::GetOppositeBranchCondition(CCode); 8839 CC = DAG.getConstant(CCode, MVT::i8); 8840 SDNode *User = *Op.getNode()->use_begin(); 8841 // Look for an unconditional branch following this conditional branch. 8842 // We need this because we need to reverse the successors in order 8843 // to implement FCMP_OEQ. 8844 if (User->getOpcode() == ISD::BR) { 8845 SDValue FalseBB = User->getOperand(1); 8846 SDNode *NewBR = 8847 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 8848 assert(NewBR == User); 8849 (void)NewBR; 8850 Dest = FalseBB; 8851 8852 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8853 Chain, Dest, CC, Cmp); 8854 X86::CondCode CCode = 8855 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); 8856 CCode = X86::GetOppositeBranchCondition(CCode); 8857 CC = DAG.getConstant(CCode, MVT::i8); 8858 Cond = Cmp; 8859 addTest = false; 8860 } 8861 } 8862 } 8863 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { 8864 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. 8865 // It should be transformed during dag combiner except when the condition 8866 // is set by a arithmetics with overflow node. 8867 X86::CondCode CCode = 8868 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); 8869 CCode = X86::GetOppositeBranchCondition(CCode); 8870 CC = DAG.getConstant(CCode, MVT::i8); 8871 Cond = Cond.getOperand(0).getOperand(1); 8872 addTest = false; 8873 } else if (Cond.getOpcode() == ISD::SETCC && 8874 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { 8875 // For FCMP_OEQ, we can emit 8876 // two branches instead of an explicit AND instruction with a 8877 // separate test. However, we only do this if this block doesn't 8878 // have a fall-through edge, because this requires an explicit 8879 // jmp when the condition is false. 8880 if (Op.getNode()->hasOneUse()) { 8881 SDNode *User = *Op.getNode()->use_begin(); 8882 // Look for an unconditional branch following this conditional branch. 8883 // We need this because we need to reverse the successors in order 8884 // to implement FCMP_OEQ. 8885 if (User->getOpcode() == ISD::BR) { 8886 SDValue FalseBB = User->getOperand(1); 8887 SDNode *NewBR = 8888 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 8889 assert(NewBR == User); 8890 (void)NewBR; 8891 Dest = FalseBB; 8892 8893 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 8894 Cond.getOperand(0), Cond.getOperand(1)); 8895 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8896 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8897 Chain, Dest, CC, Cmp); 8898 CC = DAG.getConstant(X86::COND_P, MVT::i8); 8899 Cond = Cmp; 8900 addTest = false; 8901 } 8902 } 8903 } else if (Cond.getOpcode() == ISD::SETCC && 8904 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { 8905 // For FCMP_UNE, we can emit 8906 // two branches instead of an explicit AND instruction with a 8907 // separate test. However, we only do this if this block doesn't 8908 // have a fall-through edge, because this requires an explicit 8909 // jmp when the condition is false. 8910 if (Op.getNode()->hasOneUse()) { 8911 SDNode *User = *Op.getNode()->use_begin(); 8912 // Look for an unconditional branch following this conditional branch. 8913 // We need this because we need to reverse the successors in order 8914 // to implement FCMP_UNE. 8915 if (User->getOpcode() == ISD::BR) { 8916 SDValue FalseBB = User->getOperand(1); 8917 SDNode *NewBR = 8918 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); 8919 assert(NewBR == User); 8920 (void)NewBR; 8921 8922 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, 8923 Cond.getOperand(0), Cond.getOperand(1)); 8924 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8925 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8926 Chain, Dest, CC, Cmp); 8927 CC = DAG.getConstant(X86::COND_NP, MVT::i8); 8928 Cond = Cmp; 8929 addTest = false; 8930 Dest = FalseBB; 8931 } 8932 } 8933 } 8934 } 8935 8936 if (addTest) { 8937 // Look pass the truncate. 8938 if (Cond.getOpcode() == ISD::TRUNCATE) 8939 Cond = Cond.getOperand(0); 8940 8941 // We know the result of AND is compared against zero. Try to match 8942 // it to BT. 8943 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { 8944 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG); 8945 if (NewSetCC.getNode()) { 8946 CC = NewSetCC.getOperand(0); 8947 Cond = NewSetCC.getOperand(1); 8948 addTest = false; 8949 } 8950 } 8951 } 8952 8953 if (addTest) { 8954 CC = DAG.getConstant(X86::COND_NE, MVT::i8); 8955 Cond = EmitTest(Cond, X86::COND_NE, DAG); 8956 } 8957 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), 8958 Chain, Dest, CC, Cond); 8959} 8960 8961 8962// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. 8963// Calls to _alloca is needed to probe the stack when allocating more than 4k 8964// bytes in one go. Touching the stack at 4K increments is necessary to ensure 8965// that the guard pages used by the OS virtual memory manager are allocated in 8966// correct sequence. 8967SDValue 8968X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 8969 SelectionDAG &DAG) const { 8970 assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() || 8971 getTargetMachine().Options.EnableSegmentedStacks) && 8972 "This should be used only on Windows targets or when segmented stacks " 8973 "are being used"); 8974 assert(!Subtarget->isTargetEnvMacho() && "Not implemented"); 8975 DebugLoc dl = Op.getDebugLoc(); 8976 8977 // Get the inputs. 8978 SDValue Chain = Op.getOperand(0); 8979 SDValue Size = Op.getOperand(1); 8980 // FIXME: Ensure alignment here 8981 8982 bool Is64Bit = Subtarget->is64Bit(); 8983 EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32; 8984 8985 if (getTargetMachine().Options.EnableSegmentedStacks) { 8986 MachineFunction &MF = DAG.getMachineFunction(); 8987 MachineRegisterInfo &MRI = MF.getRegInfo(); 8988 8989 if (Is64Bit) { 8990 // The 64 bit implementation of segmented stacks needs to clobber both r10 8991 // r11. This makes it impossible to use it along with nested parameters. 8992 const Function *F = MF.getFunction(); 8993 8994 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); 8995 I != E; I++) 8996 if (I->hasNestAttr()) 8997 report_fatal_error("Cannot use segmented stacks with functions that " 8998 "have nested arguments."); 8999 } 9000 9001 const TargetRegisterClass *AddrRegClass = 9002 getRegClassFor(Subtarget->is64Bit() ? MVT::i64:MVT::i32); 9003 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); 9004 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); 9005 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, 9006 DAG.getRegister(Vreg, SPTy)); 9007 SDValue Ops1[2] = { Value, Chain }; 9008 return DAG.getMergeValues(Ops1, 2, dl); 9009 } else { 9010 SDValue Flag; 9011 unsigned Reg = (Subtarget->is64Bit() ? X86::RAX : X86::EAX); 9012 9013 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); 9014 Flag = Chain.getValue(1); 9015 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 9016 9017 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); 9018 Flag = Chain.getValue(1); 9019 9020 Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); 9021 9022 SDValue Ops1[2] = { Chain.getValue(0), Chain }; 9023 return DAG.getMergeValues(Ops1, 2, dl); 9024 } 9025} 9026 9027SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 9028 MachineFunction &MF = DAG.getMachineFunction(); 9029 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 9030 9031 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9032 DebugLoc DL = Op.getDebugLoc(); 9033 9034 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) { 9035 // vastart just stores the address of the VarArgsFrameIndex slot into the 9036 // memory location argument. 9037 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9038 getPointerTy()); 9039 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 9040 MachinePointerInfo(SV), false, false, 0); 9041 } 9042 9043 // __va_list_tag: 9044 // gp_offset (0 - 6 * 8) 9045 // fp_offset (48 - 48 + 8 * 16) 9046 // overflow_arg_area (point to parameters coming in memory). 9047 // reg_save_area 9048 SmallVector<SDValue, 8> MemOps; 9049 SDValue FIN = Op.getOperand(1); 9050 // Store gp_offset 9051 SDValue Store = DAG.getStore(Op.getOperand(0), DL, 9052 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), 9053 MVT::i32), 9054 FIN, MachinePointerInfo(SV), false, false, 0); 9055 MemOps.push_back(Store); 9056 9057 // Store fp_offset 9058 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9059 FIN, DAG.getIntPtrConstant(4)); 9060 Store = DAG.getStore(Op.getOperand(0), DL, 9061 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), 9062 MVT::i32), 9063 FIN, MachinePointerInfo(SV, 4), false, false, 0); 9064 MemOps.push_back(Store); 9065 9066 // Store ptr to overflow_arg_area 9067 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9068 FIN, DAG.getIntPtrConstant(4)); 9069 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 9070 getPointerTy()); 9071 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, 9072 MachinePointerInfo(SV, 8), 9073 false, false, 0); 9074 MemOps.push_back(Store); 9075 9076 // Store ptr to reg_save_area. 9077 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), 9078 FIN, DAG.getIntPtrConstant(8)); 9079 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 9080 getPointerTy()); 9081 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, 9082 MachinePointerInfo(SV, 16), false, false, 0); 9083 MemOps.push_back(Store); 9084 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 9085 &MemOps[0], MemOps.size()); 9086} 9087 9088SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 9089 assert(Subtarget->is64Bit() && 9090 "LowerVAARG only handles 64-bit va_arg!"); 9091 assert((Subtarget->isTargetLinux() || 9092 Subtarget->isTargetDarwin()) && 9093 "Unhandled target in LowerVAARG"); 9094 assert(Op.getNode()->getNumOperands() == 4); 9095 SDValue Chain = Op.getOperand(0); 9096 SDValue SrcPtr = Op.getOperand(1); 9097 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 9098 unsigned Align = Op.getConstantOperandVal(3); 9099 DebugLoc dl = Op.getDebugLoc(); 9100 9101 EVT ArgVT = Op.getNode()->getValueType(0); 9102 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9103 uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); 9104 uint8_t ArgMode; 9105 9106 // Decide which area this value should be read from. 9107 // TODO: Implement the AMD64 ABI in its entirety. This simple 9108 // selection mechanism works only for the basic types. 9109 if (ArgVT == MVT::f80) { 9110 llvm_unreachable("va_arg for f80 not yet implemented"); 9111 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { 9112 ArgMode = 2; // Argument passed in XMM register. Use fp_offset. 9113 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { 9114 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. 9115 } else { 9116 llvm_unreachable("Unhandled argument type in LowerVAARG"); 9117 } 9118 9119 if (ArgMode == 2) { 9120 // Sanity Check: Make sure using fp_offset makes sense. 9121 assert(!getTargetMachine().Options.UseSoftFloat && 9122 !(DAG.getMachineFunction() 9123 .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && 9124 Subtarget->hasSSE1()); 9125 } 9126 9127 // Insert VAARG_64 node into the DAG 9128 // VAARG_64 returns two values: Variable Argument Address, Chain 9129 SmallVector<SDValue, 11> InstOps; 9130 InstOps.push_back(Chain); 9131 InstOps.push_back(SrcPtr); 9132 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32)); 9133 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8)); 9134 InstOps.push_back(DAG.getConstant(Align, MVT::i32)); 9135 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other); 9136 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl, 9137 VTs, &InstOps[0], InstOps.size(), 9138 MVT::i64, 9139 MachinePointerInfo(SV), 9140 /*Align=*/0, 9141 /*Volatile=*/false, 9142 /*ReadMem=*/true, 9143 /*WriteMem=*/true); 9144 Chain = VAARG.getValue(1); 9145 9146 // Load the next argument and return it 9147 return DAG.getLoad(ArgVT, dl, 9148 Chain, 9149 VAARG, 9150 MachinePointerInfo(), 9151 false, false, false, 0); 9152} 9153 9154SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 9155 // X86-64 va_list is a struct { i32, i32, i8*, i8* }. 9156 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); 9157 SDValue Chain = Op.getOperand(0); 9158 SDValue DstPtr = Op.getOperand(1); 9159 SDValue SrcPtr = Op.getOperand(2); 9160 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 9161 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9162 DebugLoc DL = Op.getDebugLoc(); 9163 9164 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, 9165 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false, 9166 false, 9167 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 9168} 9169 9170SDValue 9171X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 9172 DebugLoc dl = Op.getDebugLoc(); 9173 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9174 switch (IntNo) { 9175 default: return SDValue(); // Don't custom lower most intrinsics. 9176 // Comparison intrinsics. 9177 case Intrinsic::x86_sse_comieq_ss: 9178 case Intrinsic::x86_sse_comilt_ss: 9179 case Intrinsic::x86_sse_comile_ss: 9180 case Intrinsic::x86_sse_comigt_ss: 9181 case Intrinsic::x86_sse_comige_ss: 9182 case Intrinsic::x86_sse_comineq_ss: 9183 case Intrinsic::x86_sse_ucomieq_ss: 9184 case Intrinsic::x86_sse_ucomilt_ss: 9185 case Intrinsic::x86_sse_ucomile_ss: 9186 case Intrinsic::x86_sse_ucomigt_ss: 9187 case Intrinsic::x86_sse_ucomige_ss: 9188 case Intrinsic::x86_sse_ucomineq_ss: 9189 case Intrinsic::x86_sse2_comieq_sd: 9190 case Intrinsic::x86_sse2_comilt_sd: 9191 case Intrinsic::x86_sse2_comile_sd: 9192 case Intrinsic::x86_sse2_comigt_sd: 9193 case Intrinsic::x86_sse2_comige_sd: 9194 case Intrinsic::x86_sse2_comineq_sd: 9195 case Intrinsic::x86_sse2_ucomieq_sd: 9196 case Intrinsic::x86_sse2_ucomilt_sd: 9197 case Intrinsic::x86_sse2_ucomile_sd: 9198 case Intrinsic::x86_sse2_ucomigt_sd: 9199 case Intrinsic::x86_sse2_ucomige_sd: 9200 case Intrinsic::x86_sse2_ucomineq_sd: { 9201 unsigned Opc = 0; 9202 ISD::CondCode CC = ISD::SETCC_INVALID; 9203 switch (IntNo) { 9204 default: break; 9205 case Intrinsic::x86_sse_comieq_ss: 9206 case Intrinsic::x86_sse2_comieq_sd: 9207 Opc = X86ISD::COMI; 9208 CC = ISD::SETEQ; 9209 break; 9210 case Intrinsic::x86_sse_comilt_ss: 9211 case Intrinsic::x86_sse2_comilt_sd: 9212 Opc = X86ISD::COMI; 9213 CC = ISD::SETLT; 9214 break; 9215 case Intrinsic::x86_sse_comile_ss: 9216 case Intrinsic::x86_sse2_comile_sd: 9217 Opc = X86ISD::COMI; 9218 CC = ISD::SETLE; 9219 break; 9220 case Intrinsic::x86_sse_comigt_ss: 9221 case Intrinsic::x86_sse2_comigt_sd: 9222 Opc = X86ISD::COMI; 9223 CC = ISD::SETGT; 9224 break; 9225 case Intrinsic::x86_sse_comige_ss: 9226 case Intrinsic::x86_sse2_comige_sd: 9227 Opc = X86ISD::COMI; 9228 CC = ISD::SETGE; 9229 break; 9230 case Intrinsic::x86_sse_comineq_ss: 9231 case Intrinsic::x86_sse2_comineq_sd: 9232 Opc = X86ISD::COMI; 9233 CC = ISD::SETNE; 9234 break; 9235 case Intrinsic::x86_sse_ucomieq_ss: 9236 case Intrinsic::x86_sse2_ucomieq_sd: 9237 Opc = X86ISD::UCOMI; 9238 CC = ISD::SETEQ; 9239 break; 9240 case Intrinsic::x86_sse_ucomilt_ss: 9241 case Intrinsic::x86_sse2_ucomilt_sd: 9242 Opc = X86ISD::UCOMI; 9243 CC = ISD::SETLT; 9244 break; 9245 case Intrinsic::x86_sse_ucomile_ss: 9246 case Intrinsic::x86_sse2_ucomile_sd: 9247 Opc = X86ISD::UCOMI; 9248 CC = ISD::SETLE; 9249 break; 9250 case Intrinsic::x86_sse_ucomigt_ss: 9251 case Intrinsic::x86_sse2_ucomigt_sd: 9252 Opc = X86ISD::UCOMI; 9253 CC = ISD::SETGT; 9254 break; 9255 case Intrinsic::x86_sse_ucomige_ss: 9256 case Intrinsic::x86_sse2_ucomige_sd: 9257 Opc = X86ISD::UCOMI; 9258 CC = ISD::SETGE; 9259 break; 9260 case Intrinsic::x86_sse_ucomineq_ss: 9261 case Intrinsic::x86_sse2_ucomineq_sd: 9262 Opc = X86ISD::UCOMI; 9263 CC = ISD::SETNE; 9264 break; 9265 } 9266 9267 SDValue LHS = Op.getOperand(1); 9268 SDValue RHS = Op.getOperand(2); 9269 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG); 9270 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!"); 9271 SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS); 9272 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, 9273 DAG.getConstant(X86CC, MVT::i8), Cond); 9274 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9275 } 9276 // Arithmetic intrinsics. 9277 case Intrinsic::x86_sse3_hadd_ps: 9278 case Intrinsic::x86_sse3_hadd_pd: 9279 case Intrinsic::x86_avx_hadd_ps_256: 9280 case Intrinsic::x86_avx_hadd_pd_256: 9281 return DAG.getNode(X86ISD::FHADD, dl, Op.getValueType(), 9282 Op.getOperand(1), Op.getOperand(2)); 9283 case Intrinsic::x86_sse3_hsub_ps: 9284 case Intrinsic::x86_sse3_hsub_pd: 9285 case Intrinsic::x86_avx_hsub_ps_256: 9286 case Intrinsic::x86_avx_hsub_pd_256: 9287 return DAG.getNode(X86ISD::FHSUB, dl, Op.getValueType(), 9288 Op.getOperand(1), Op.getOperand(2)); 9289 case Intrinsic::x86_avx2_psllv_d: 9290 case Intrinsic::x86_avx2_psllv_q: 9291 case Intrinsic::x86_avx2_psllv_d_256: 9292 case Intrinsic::x86_avx2_psllv_q_256: 9293 return DAG.getNode(ISD::SHL, dl, Op.getValueType(), 9294 Op.getOperand(1), Op.getOperand(2)); 9295 case Intrinsic::x86_avx2_psrlv_d: 9296 case Intrinsic::x86_avx2_psrlv_q: 9297 case Intrinsic::x86_avx2_psrlv_d_256: 9298 case Intrinsic::x86_avx2_psrlv_q_256: 9299 return DAG.getNode(ISD::SRL, dl, Op.getValueType(), 9300 Op.getOperand(1), Op.getOperand(2)); 9301 case Intrinsic::x86_avx2_psrav_d: 9302 case Intrinsic::x86_avx2_psrav_d_256: 9303 return DAG.getNode(ISD::SRA, dl, Op.getValueType(), 9304 Op.getOperand(1), Op.getOperand(2)); 9305 9306 // ptest and testp intrinsics. The intrinsic these come from are designed to 9307 // return an integer value, not just an instruction so lower it to the ptest 9308 // or testp pattern and a setcc for the result. 9309 case Intrinsic::x86_sse41_ptestz: 9310 case Intrinsic::x86_sse41_ptestc: 9311 case Intrinsic::x86_sse41_ptestnzc: 9312 case Intrinsic::x86_avx_ptestz_256: 9313 case Intrinsic::x86_avx_ptestc_256: 9314 case Intrinsic::x86_avx_ptestnzc_256: 9315 case Intrinsic::x86_avx_vtestz_ps: 9316 case Intrinsic::x86_avx_vtestc_ps: 9317 case Intrinsic::x86_avx_vtestnzc_ps: 9318 case Intrinsic::x86_avx_vtestz_pd: 9319 case Intrinsic::x86_avx_vtestc_pd: 9320 case Intrinsic::x86_avx_vtestnzc_pd: 9321 case Intrinsic::x86_avx_vtestz_ps_256: 9322 case Intrinsic::x86_avx_vtestc_ps_256: 9323 case Intrinsic::x86_avx_vtestnzc_ps_256: 9324 case Intrinsic::x86_avx_vtestz_pd_256: 9325 case Intrinsic::x86_avx_vtestc_pd_256: 9326 case Intrinsic::x86_avx_vtestnzc_pd_256: { 9327 bool IsTestPacked = false; 9328 unsigned X86CC = 0; 9329 switch (IntNo) { 9330 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); 9331 case Intrinsic::x86_avx_vtestz_ps: 9332 case Intrinsic::x86_avx_vtestz_pd: 9333 case Intrinsic::x86_avx_vtestz_ps_256: 9334 case Intrinsic::x86_avx_vtestz_pd_256: 9335 IsTestPacked = true; // Fallthrough 9336 case Intrinsic::x86_sse41_ptestz: 9337 case Intrinsic::x86_avx_ptestz_256: 9338 // ZF = 1 9339 X86CC = X86::COND_E; 9340 break; 9341 case Intrinsic::x86_avx_vtestc_ps: 9342 case Intrinsic::x86_avx_vtestc_pd: 9343 case Intrinsic::x86_avx_vtestc_ps_256: 9344 case Intrinsic::x86_avx_vtestc_pd_256: 9345 IsTestPacked = true; // Fallthrough 9346 case Intrinsic::x86_sse41_ptestc: 9347 case Intrinsic::x86_avx_ptestc_256: 9348 // CF = 1 9349 X86CC = X86::COND_B; 9350 break; 9351 case Intrinsic::x86_avx_vtestnzc_ps: 9352 case Intrinsic::x86_avx_vtestnzc_pd: 9353 case Intrinsic::x86_avx_vtestnzc_ps_256: 9354 case Intrinsic::x86_avx_vtestnzc_pd_256: 9355 IsTestPacked = true; // Fallthrough 9356 case Intrinsic::x86_sse41_ptestnzc: 9357 case Intrinsic::x86_avx_ptestnzc_256: 9358 // ZF and CF = 0 9359 X86CC = X86::COND_A; 9360 break; 9361 } 9362 9363 SDValue LHS = Op.getOperand(1); 9364 SDValue RHS = Op.getOperand(2); 9365 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; 9366 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); 9367 SDValue CC = DAG.getConstant(X86CC, MVT::i8); 9368 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); 9369 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); 9370 } 9371 9372 // Fix vector shift instructions where the last operand is a non-immediate 9373 // i32 value. 9374 case Intrinsic::x86_avx2_pslli_w: 9375 case Intrinsic::x86_avx2_pslli_d: 9376 case Intrinsic::x86_avx2_pslli_q: 9377 case Intrinsic::x86_avx2_psrli_w: 9378 case Intrinsic::x86_avx2_psrli_d: 9379 case Intrinsic::x86_avx2_psrli_q: 9380 case Intrinsic::x86_avx2_psrai_w: 9381 case Intrinsic::x86_avx2_psrai_d: 9382 case Intrinsic::x86_sse2_pslli_w: 9383 case Intrinsic::x86_sse2_pslli_d: 9384 case Intrinsic::x86_sse2_pslli_q: 9385 case Intrinsic::x86_sse2_psrli_w: 9386 case Intrinsic::x86_sse2_psrli_d: 9387 case Intrinsic::x86_sse2_psrli_q: 9388 case Intrinsic::x86_sse2_psrai_w: 9389 case Intrinsic::x86_sse2_psrai_d: 9390 case Intrinsic::x86_mmx_pslli_w: 9391 case Intrinsic::x86_mmx_pslli_d: 9392 case Intrinsic::x86_mmx_pslli_q: 9393 case Intrinsic::x86_mmx_psrli_w: 9394 case Intrinsic::x86_mmx_psrli_d: 9395 case Intrinsic::x86_mmx_psrli_q: 9396 case Intrinsic::x86_mmx_psrai_w: 9397 case Intrinsic::x86_mmx_psrai_d: { 9398 SDValue ShAmt = Op.getOperand(2); 9399 if (isa<ConstantSDNode>(ShAmt)) 9400 return SDValue(); 9401 9402 unsigned NewIntNo = 0; 9403 EVT ShAmtVT = MVT::v4i32; 9404 switch (IntNo) { 9405 case Intrinsic::x86_sse2_pslli_w: 9406 NewIntNo = Intrinsic::x86_sse2_psll_w; 9407 break; 9408 case Intrinsic::x86_sse2_pslli_d: 9409 NewIntNo = Intrinsic::x86_sse2_psll_d; 9410 break; 9411 case Intrinsic::x86_sse2_pslli_q: 9412 NewIntNo = Intrinsic::x86_sse2_psll_q; 9413 break; 9414 case Intrinsic::x86_sse2_psrli_w: 9415 NewIntNo = Intrinsic::x86_sse2_psrl_w; 9416 break; 9417 case Intrinsic::x86_sse2_psrli_d: 9418 NewIntNo = Intrinsic::x86_sse2_psrl_d; 9419 break; 9420 case Intrinsic::x86_sse2_psrli_q: 9421 NewIntNo = Intrinsic::x86_sse2_psrl_q; 9422 break; 9423 case Intrinsic::x86_sse2_psrai_w: 9424 NewIntNo = Intrinsic::x86_sse2_psra_w; 9425 break; 9426 case Intrinsic::x86_sse2_psrai_d: 9427 NewIntNo = Intrinsic::x86_sse2_psra_d; 9428 break; 9429 case Intrinsic::x86_avx2_pslli_w: 9430 NewIntNo = Intrinsic::x86_avx2_psll_w; 9431 break; 9432 case Intrinsic::x86_avx2_pslli_d: 9433 NewIntNo = Intrinsic::x86_avx2_psll_d; 9434 break; 9435 case Intrinsic::x86_avx2_pslli_q: 9436 NewIntNo = Intrinsic::x86_avx2_psll_q; 9437 break; 9438 case Intrinsic::x86_avx2_psrli_w: 9439 NewIntNo = Intrinsic::x86_avx2_psrl_w; 9440 break; 9441 case Intrinsic::x86_avx2_psrli_d: 9442 NewIntNo = Intrinsic::x86_avx2_psrl_d; 9443 break; 9444 case Intrinsic::x86_avx2_psrli_q: 9445 NewIntNo = Intrinsic::x86_avx2_psrl_q; 9446 break; 9447 case Intrinsic::x86_avx2_psrai_w: 9448 NewIntNo = Intrinsic::x86_avx2_psra_w; 9449 break; 9450 case Intrinsic::x86_avx2_psrai_d: 9451 NewIntNo = Intrinsic::x86_avx2_psra_d; 9452 break; 9453 default: { 9454 ShAmtVT = MVT::v2i32; 9455 switch (IntNo) { 9456 case Intrinsic::x86_mmx_pslli_w: 9457 NewIntNo = Intrinsic::x86_mmx_psll_w; 9458 break; 9459 case Intrinsic::x86_mmx_pslli_d: 9460 NewIntNo = Intrinsic::x86_mmx_psll_d; 9461 break; 9462 case Intrinsic::x86_mmx_pslli_q: 9463 NewIntNo = Intrinsic::x86_mmx_psll_q; 9464 break; 9465 case Intrinsic::x86_mmx_psrli_w: 9466 NewIntNo = Intrinsic::x86_mmx_psrl_w; 9467 break; 9468 case Intrinsic::x86_mmx_psrli_d: 9469 NewIntNo = Intrinsic::x86_mmx_psrl_d; 9470 break; 9471 case Intrinsic::x86_mmx_psrli_q: 9472 NewIntNo = Intrinsic::x86_mmx_psrl_q; 9473 break; 9474 case Intrinsic::x86_mmx_psrai_w: 9475 NewIntNo = Intrinsic::x86_mmx_psra_w; 9476 break; 9477 case Intrinsic::x86_mmx_psrai_d: 9478 NewIntNo = Intrinsic::x86_mmx_psra_d; 9479 break; 9480 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 9481 } 9482 break; 9483 } 9484 } 9485 9486 // The vector shift intrinsics with scalars uses 32b shift amounts but 9487 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 9488 // to be zero. 9489 SDValue ShOps[4]; 9490 ShOps[0] = ShAmt; 9491 ShOps[1] = DAG.getConstant(0, MVT::i32); 9492 if (ShAmtVT == MVT::v4i32) { 9493 ShOps[2] = DAG.getUNDEF(MVT::i32); 9494 ShOps[3] = DAG.getUNDEF(MVT::i32); 9495 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4); 9496 } else { 9497 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2); 9498// FIXME this must be lowered to get rid of the invalid type. 9499 } 9500 9501 EVT VT = Op.getValueType(); 9502 ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); 9503 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9504 DAG.getConstant(NewIntNo, MVT::i32), 9505 Op.getOperand(1), ShAmt); 9506 } 9507 } 9508} 9509 9510SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, 9511 SelectionDAG &DAG) const { 9512 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9513 MFI->setReturnAddressIsTaken(true); 9514 9515 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9516 DebugLoc dl = Op.getDebugLoc(); 9517 9518 if (Depth > 0) { 9519 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 9520 SDValue Offset = 9521 DAG.getConstant(TD->getPointerSize(), 9522 Subtarget->is64Bit() ? MVT::i64 : MVT::i32); 9523 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9524 DAG.getNode(ISD::ADD, dl, getPointerTy(), 9525 FrameAddr, Offset), 9526 MachinePointerInfo(), false, false, false, 0); 9527 } 9528 9529 // Just load the return address. 9530 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); 9531 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 9532 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 9533} 9534 9535SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 9536 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 9537 MFI->setFrameAddressIsTaken(true); 9538 9539 EVT VT = Op.getValueType(); 9540 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 9541 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9542 unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP; 9543 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 9544 while (Depth--) 9545 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 9546 MachinePointerInfo(), 9547 false, false, false, 0); 9548 return FrameAddr; 9549} 9550 9551SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, 9552 SelectionDAG &DAG) const { 9553 return DAG.getIntPtrConstant(2*TD->getPointerSize()); 9554} 9555 9556SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 9557 MachineFunction &MF = DAG.getMachineFunction(); 9558 SDValue Chain = Op.getOperand(0); 9559 SDValue Offset = Op.getOperand(1); 9560 SDValue Handler = Op.getOperand(2); 9561 DebugLoc dl = Op.getDebugLoc(); 9562 9563 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 9564 Subtarget->is64Bit() ? X86::RBP : X86::EBP, 9565 getPointerTy()); 9566 unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); 9567 9568 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, 9569 DAG.getIntPtrConstant(TD->getPointerSize())); 9570 StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); 9571 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 9572 false, false, 0); 9573 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); 9574 MF.getRegInfo().addLiveOut(StoreAddrReg); 9575 9576 return DAG.getNode(X86ISD::EH_RETURN, dl, 9577 MVT::Other, 9578 Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); 9579} 9580 9581SDValue X86TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 9582 SelectionDAG &DAG) const { 9583 return Op.getOperand(0); 9584} 9585 9586SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 9587 SelectionDAG &DAG) const { 9588 SDValue Root = Op.getOperand(0); 9589 SDValue Trmp = Op.getOperand(1); // trampoline 9590 SDValue FPtr = Op.getOperand(2); // nested function 9591 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 9592 DebugLoc dl = Op.getDebugLoc(); 9593 9594 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 9595 9596 if (Subtarget->is64Bit()) { 9597 SDValue OutChains[6]; 9598 9599 // Large code-model. 9600 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. 9601 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. 9602 9603 const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10); 9604 const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11); 9605 9606 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix 9607 9608 // Load the pointer to the nested function into R11. 9609 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 9610 SDValue Addr = Trmp; 9611 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9612 Addr, MachinePointerInfo(TrmpAddr), 9613 false, false, 0); 9614 9615 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9616 DAG.getConstant(2, MVT::i64)); 9617 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, 9618 MachinePointerInfo(TrmpAddr, 2), 9619 false, false, 2); 9620 9621 // Load the 'nest' parameter value into R10. 9622 // R10 is specified in X86CallingConv.td 9623 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 9624 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9625 DAG.getConstant(10, MVT::i64)); 9626 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9627 Addr, MachinePointerInfo(TrmpAddr, 10), 9628 false, false, 0); 9629 9630 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9631 DAG.getConstant(12, MVT::i64)); 9632 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, 9633 MachinePointerInfo(TrmpAddr, 12), 9634 false, false, 2); 9635 9636 // Jump to the nested function. 9637 OpCode = (JMP64r << 8) | REX_WB; // jmpq *... 9638 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9639 DAG.getConstant(20, MVT::i64)); 9640 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16), 9641 Addr, MachinePointerInfo(TrmpAddr, 20), 9642 false, false, 0); 9643 9644 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 9645 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 9646 DAG.getConstant(22, MVT::i64)); 9647 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr, 9648 MachinePointerInfo(TrmpAddr, 22), 9649 false, false, 0); 9650 9651 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6); 9652 } else { 9653 const Function *Func = 9654 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); 9655 CallingConv::ID CC = Func->getCallingConv(); 9656 unsigned NestReg; 9657 9658 switch (CC) { 9659 default: 9660 llvm_unreachable("Unsupported calling convention"); 9661 case CallingConv::C: 9662 case CallingConv::X86_StdCall: { 9663 // Pass 'nest' parameter in ECX. 9664 // Must be kept in sync with X86CallingConv.td 9665 NestReg = X86::ECX; 9666 9667 // Check that ECX wasn't needed by an 'inreg' parameter. 9668 FunctionType *FTy = Func->getFunctionType(); 9669 const AttrListPtr &Attrs = Func->getAttributes(); 9670 9671 if (!Attrs.isEmpty() && !Func->isVarArg()) { 9672 unsigned InRegCount = 0; 9673 unsigned Idx = 1; 9674 9675 for (FunctionType::param_iterator I = FTy->param_begin(), 9676 E = FTy->param_end(); I != E; ++I, ++Idx) 9677 if (Attrs.paramHasAttr(Idx, Attribute::InReg)) 9678 // FIXME: should only count parameters that are lowered to integers. 9679 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; 9680 9681 if (InRegCount > 2) { 9682 report_fatal_error("Nest register in use - reduce number of inreg" 9683 " parameters!"); 9684 } 9685 } 9686 break; 9687 } 9688 case CallingConv::X86_FastCall: 9689 case CallingConv::X86_ThisCall: 9690 case CallingConv::Fast: 9691 // Pass 'nest' parameter in EAX. 9692 // Must be kept in sync with X86CallingConv.td 9693 NestReg = X86::EAX; 9694 break; 9695 } 9696 9697 SDValue OutChains[4]; 9698 SDValue Addr, Disp; 9699 9700 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9701 DAG.getConstant(10, MVT::i32)); 9702 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); 9703 9704 // This is storing the opcode for MOV32ri. 9705 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. 9706 const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg); 9707 OutChains[0] = DAG.getStore(Root, dl, 9708 DAG.getConstant(MOV32ri|N86Reg, MVT::i8), 9709 Trmp, MachinePointerInfo(TrmpAddr), 9710 false, false, 0); 9711 9712 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9713 DAG.getConstant(1, MVT::i32)); 9714 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, 9715 MachinePointerInfo(TrmpAddr, 1), 9716 false, false, 1); 9717 9718 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. 9719 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9720 DAG.getConstant(5, MVT::i32)); 9721 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr, 9722 MachinePointerInfo(TrmpAddr, 5), 9723 false, false, 1); 9724 9725 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 9726 DAG.getConstant(6, MVT::i32)); 9727 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, 9728 MachinePointerInfo(TrmpAddr, 6), 9729 false, false, 1); 9730 9731 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4); 9732 } 9733} 9734 9735SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 9736 SelectionDAG &DAG) const { 9737 /* 9738 The rounding mode is in bits 11:10 of FPSR, and has the following 9739 settings: 9740 00 Round to nearest 9741 01 Round to -inf 9742 10 Round to +inf 9743 11 Round to 0 9744 9745 FLT_ROUNDS, on the other hand, expects the following: 9746 -1 Undefined 9747 0 Round to 0 9748 1 Round to nearest 9749 2 Round to +inf 9750 3 Round to -inf 9751 9752 To perform the conversion, we do: 9753 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) 9754 */ 9755 9756 MachineFunction &MF = DAG.getMachineFunction(); 9757 const TargetMachine &TM = MF.getTarget(); 9758 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 9759 unsigned StackAlignment = TFI.getStackAlignment(); 9760 EVT VT = Op.getValueType(); 9761 DebugLoc DL = Op.getDebugLoc(); 9762 9763 // Save FP Control Word to stack slot 9764 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false); 9765 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); 9766 9767 9768 MachineMemOperand *MMO = 9769 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI), 9770 MachineMemOperand::MOStore, 2, 2); 9771 9772 SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; 9773 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, 9774 DAG.getVTList(MVT::Other), 9775 Ops, 2, MVT::i16, MMO); 9776 9777 // Load FP Control Word from stack slot 9778 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, 9779 MachinePointerInfo(), false, false, false, 0); 9780 9781 // Transform as necessary 9782 SDValue CWD1 = 9783 DAG.getNode(ISD::SRL, DL, MVT::i16, 9784 DAG.getNode(ISD::AND, DL, MVT::i16, 9785 CWD, DAG.getConstant(0x800, MVT::i16)), 9786 DAG.getConstant(11, MVT::i8)); 9787 SDValue CWD2 = 9788 DAG.getNode(ISD::SRL, DL, MVT::i16, 9789 DAG.getNode(ISD::AND, DL, MVT::i16, 9790 CWD, DAG.getConstant(0x400, MVT::i16)), 9791 DAG.getConstant(9, MVT::i8)); 9792 9793 SDValue RetVal = 9794 DAG.getNode(ISD::AND, DL, MVT::i16, 9795 DAG.getNode(ISD::ADD, DL, MVT::i16, 9796 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), 9797 DAG.getConstant(1, MVT::i16)), 9798 DAG.getConstant(3, MVT::i16)); 9799 9800 9801 return DAG.getNode((VT.getSizeInBits() < 16 ? 9802 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); 9803} 9804 9805SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 9806 EVT VT = Op.getValueType(); 9807 EVT OpVT = VT; 9808 unsigned NumBits = VT.getSizeInBits(); 9809 DebugLoc dl = Op.getDebugLoc(); 9810 9811 Op = Op.getOperand(0); 9812 if (VT == MVT::i8) { 9813 // Zero extend to i32 since there is not an i8 bsr. 9814 OpVT = MVT::i32; 9815 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 9816 } 9817 9818 // Issue a bsr (scan bits in reverse) which also sets EFLAGS. 9819 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 9820 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 9821 9822 // If src is zero (i.e. bsr sets ZF), returns NumBits. 9823 SDValue Ops[] = { 9824 Op, 9825 DAG.getConstant(NumBits+NumBits-1, OpVT), 9826 DAG.getConstant(X86::COND_E, MVT::i8), 9827 Op.getValue(1) 9828 }; 9829 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); 9830 9831 // Finally xor with NumBits-1. 9832 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 9833 9834 if (VT == MVT::i8) 9835 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 9836 return Op; 9837} 9838 9839SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op, 9840 SelectionDAG &DAG) const { 9841 EVT VT = Op.getValueType(); 9842 EVT OpVT = VT; 9843 unsigned NumBits = VT.getSizeInBits(); 9844 DebugLoc dl = Op.getDebugLoc(); 9845 9846 Op = Op.getOperand(0); 9847 if (VT == MVT::i8) { 9848 // Zero extend to i32 since there is not an i8 bsr. 9849 OpVT = MVT::i32; 9850 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); 9851 } 9852 9853 // Issue a bsr (scan bits in reverse). 9854 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); 9855 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); 9856 9857 // And xor with NumBits-1. 9858 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); 9859 9860 if (VT == MVT::i8) 9861 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); 9862 return Op; 9863} 9864 9865SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { 9866 EVT VT = Op.getValueType(); 9867 unsigned NumBits = VT.getSizeInBits(); 9868 DebugLoc dl = Op.getDebugLoc(); 9869 Op = Op.getOperand(0); 9870 9871 // Issue a bsf (scan bits forward) which also sets EFLAGS. 9872 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 9873 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); 9874 9875 // If src is zero (i.e. bsf sets ZF), returns NumBits. 9876 SDValue Ops[] = { 9877 Op, 9878 DAG.getConstant(NumBits, VT), 9879 DAG.getConstant(X86::COND_E, MVT::i8), 9880 Op.getValue(1) 9881 }; 9882 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops)); 9883} 9884 9885// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit 9886// ones, and then concatenate the result back. 9887static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { 9888 EVT VT = Op.getValueType(); 9889 9890 assert(VT.getSizeInBits() == 256 && VT.isInteger() && 9891 "Unsupported value type for operation"); 9892 9893 int NumElems = VT.getVectorNumElements(); 9894 DebugLoc dl = Op.getDebugLoc(); 9895 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 9896 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 9897 9898 // Extract the LHS vectors 9899 SDValue LHS = Op.getOperand(0); 9900 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 9901 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 9902 9903 // Extract the RHS vectors 9904 SDValue RHS = Op.getOperand(1); 9905 SDValue RHS1 = Extract128BitVector(RHS, Idx0, DAG, dl); 9906 SDValue RHS2 = Extract128BitVector(RHS, Idx1, DAG, dl); 9907 9908 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 9909 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 9910 9911 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9912 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), 9913 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); 9914} 9915 9916SDValue X86TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const { 9917 assert(Op.getValueType().getSizeInBits() == 256 && 9918 Op.getValueType().isInteger() && 9919 "Only handle AVX 256-bit vector integer operation"); 9920 return Lower256IntArith(Op, DAG); 9921} 9922 9923SDValue X86TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const { 9924 assert(Op.getValueType().getSizeInBits() == 256 && 9925 Op.getValueType().isInteger() && 9926 "Only handle AVX 256-bit vector integer operation"); 9927 return Lower256IntArith(Op, DAG); 9928} 9929 9930SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 9931 EVT VT = Op.getValueType(); 9932 9933 // Decompose 256-bit ops into smaller 128-bit ops. 9934 if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2()) 9935 return Lower256IntArith(Op, DAG); 9936 9937 DebugLoc dl = Op.getDebugLoc(); 9938 9939 SDValue A = Op.getOperand(0); 9940 SDValue B = Op.getOperand(1); 9941 9942 if (VT == MVT::v4i64) { 9943 assert(Subtarget->hasAVX2() && "Lowering v4i64 multiply requires AVX2"); 9944 9945 // ulong2 Ahi = __builtin_ia32_psrlqi256( a, 32); 9946 // ulong2 Bhi = __builtin_ia32_psrlqi256( b, 32); 9947 // ulong2 AloBlo = __builtin_ia32_pmuludq256( a, b ); 9948 // ulong2 AloBhi = __builtin_ia32_pmuludq256( a, Bhi ); 9949 // ulong2 AhiBlo = __builtin_ia32_pmuludq256( Ahi, b ); 9950 // 9951 // AloBhi = __builtin_ia32_psllqi256( AloBhi, 32 ); 9952 // AhiBlo = __builtin_ia32_psllqi256( AhiBlo, 32 ); 9953 // return AloBlo + AloBhi + AhiBlo; 9954 9955 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9956 DAG.getConstant(Intrinsic::x86_avx2_psrli_q, MVT::i32), 9957 A, DAG.getConstant(32, MVT::i32)); 9958 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9959 DAG.getConstant(Intrinsic::x86_avx2_psrli_q, MVT::i32), 9960 B, DAG.getConstant(32, MVT::i32)); 9961 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9962 DAG.getConstant(Intrinsic::x86_avx2_pmulu_dq, MVT::i32), 9963 A, B); 9964 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9965 DAG.getConstant(Intrinsic::x86_avx2_pmulu_dq, MVT::i32), 9966 A, Bhi); 9967 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9968 DAG.getConstant(Intrinsic::x86_avx2_pmulu_dq, MVT::i32), 9969 Ahi, B); 9970 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9971 DAG.getConstant(Intrinsic::x86_avx2_pslli_q, MVT::i32), 9972 AloBhi, DAG.getConstant(32, MVT::i32)); 9973 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9974 DAG.getConstant(Intrinsic::x86_avx2_pslli_q, MVT::i32), 9975 AhiBlo, DAG.getConstant(32, MVT::i32)); 9976 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 9977 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 9978 return Res; 9979 } 9980 9981 assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply"); 9982 9983 // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32); 9984 // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32); 9985 // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b ); 9986 // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi ); 9987 // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b ); 9988 // 9989 // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 ); 9990 // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 ); 9991 // return AloBlo + AloBhi + AhiBlo; 9992 9993 SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9994 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 9995 A, DAG.getConstant(32, MVT::i32)); 9996 SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 9997 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 9998 B, DAG.getConstant(32, MVT::i32)); 9999 SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10000 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 10001 A, B); 10002 SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10003 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 10004 A, Bhi); 10005 SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10006 DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32), 10007 Ahi, B); 10008 AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10009 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 10010 AloBhi, DAG.getConstant(32, MVT::i32)); 10011 AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10012 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 10013 AhiBlo, DAG.getConstant(32, MVT::i32)); 10014 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi); 10015 Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); 10016 return Res; 10017} 10018 10019SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { 10020 10021 EVT VT = Op.getValueType(); 10022 DebugLoc dl = Op.getDebugLoc(); 10023 SDValue R = Op.getOperand(0); 10024 SDValue Amt = Op.getOperand(1); 10025 LLVMContext *Context = DAG.getContext(); 10026 10027 if (!Subtarget->hasSSE2()) 10028 return SDValue(); 10029 10030 // Optimize shl/srl/sra with constant shift amount. 10031 if (isSplatVector(Amt.getNode())) { 10032 SDValue SclrAmt = Amt->getOperand(0); 10033 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) { 10034 uint64_t ShiftAmt = C->getZExtValue(); 10035 10036 if (VT == MVT::v16i8 && Op.getOpcode() == ISD::SHL) { 10037 // Make a large shift. 10038 SDValue SHL = 10039 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10040 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 10041 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10042 // Zero out the rightmost bits. 10043 SmallVector<SDValue, 16> V(16, DAG.getConstant(uint8_t(-1U << ShiftAmt), 10044 MVT::i8)); 10045 return DAG.getNode(ISD::AND, dl, VT, SHL, 10046 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10047 } 10048 10049 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SHL) 10050 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10051 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 10052 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10053 10054 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SHL) 10055 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10056 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 10057 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10058 10059 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SHL) 10060 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10061 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 10062 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10063 10064 if (VT == MVT::v16i8 && Op.getOpcode() == ISD::SRL) { 10065 // Make a large shift. 10066 SDValue SRL = 10067 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10068 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 10069 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10070 // Zero out the leftmost bits. 10071 SmallVector<SDValue, 16> V(16, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10072 MVT::i8)); 10073 return DAG.getNode(ISD::AND, dl, VT, SRL, 10074 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16)); 10075 } 10076 10077 if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SRL) 10078 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10079 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 10080 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10081 10082 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRL) 10083 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10084 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 10085 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10086 10087 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRL) 10088 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10089 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 10090 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10091 10092 if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRA) 10093 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10094 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 10095 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10096 10097 if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRA) 10098 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10099 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 10100 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10101 10102 if (VT == MVT::v16i8 && Op.getOpcode() == ISD::SRA) { 10103 if (ShiftAmt == 7) { 10104 // R s>> 7 === R s< 0 10105 SDValue Zeros = getZeroVector(VT, true /* HasSSE2 */, DAG, dl); 10106 return DAG.getNode(X86ISD::PCMPGTB, dl, VT, Zeros, R); 10107 } 10108 10109 // R s>> a === ((R u>> a) ^ m) - m 10110 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10111 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt, 10112 MVT::i8)); 10113 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16); 10114 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10115 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10116 return Res; 10117 } 10118 10119 if (Subtarget->hasAVX2() && VT == MVT::v32i8) { 10120 if (Op.getOpcode() == ISD::SHL) { 10121 // Make a large shift. 10122 SDValue SHL = 10123 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10124 DAG.getConstant(Intrinsic::x86_avx2_pslli_w, MVT::i32), 10125 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10126 // Zero out the rightmost bits. 10127 SmallVector<SDValue, 32> V(32, DAG.getConstant(uint8_t(-1U << ShiftAmt), 10128 MVT::i8)); 10129 return DAG.getNode(ISD::AND, dl, VT, SHL, 10130 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10131 } 10132 if (Op.getOpcode() == ISD::SRL) { 10133 // Make a large shift. 10134 SDValue SRL = 10135 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10136 DAG.getConstant(Intrinsic::x86_avx2_psrli_w, MVT::i32), 10137 R, DAG.getConstant(ShiftAmt, MVT::i32)); 10138 // Zero out the leftmost bits. 10139 SmallVector<SDValue, 32> V(32, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, 10140 MVT::i8)); 10141 return DAG.getNode(ISD::AND, dl, VT, SRL, 10142 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32)); 10143 } 10144 if (Op.getOpcode() == ISD::SRA) { 10145 if (ShiftAmt == 7) { 10146 // R s>> 7 === R s< 0 10147 SDValue Zeros = getZeroVector(VT, true /* HasSSE2 */, DAG, dl); 10148 return DAG.getNode(X86ISD::PCMPGTB, dl, VT, Zeros, R); 10149 } 10150 10151 // R s>> a === ((R u>> a) ^ m) - m 10152 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); 10153 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt, 10154 MVT::i8)); 10155 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32); 10156 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); 10157 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); 10158 return Res; 10159 } 10160 } 10161 } 10162 } 10163 10164 // Lower SHL with variable shift amount. 10165 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) { 10166 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10167 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 10168 Op.getOperand(1), DAG.getConstant(23, MVT::i32)); 10169 10170 ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); 10171 10172 std::vector<Constant*> CV(4, CI); 10173 Constant *C = ConstantVector::get(CV); 10174 SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); 10175 SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 10176 MachinePointerInfo::getConstantPool(), 10177 false, false, false, 16); 10178 10179 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); 10180 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op); 10181 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); 10182 return DAG.getNode(ISD::MUL, dl, VT, Op, R); 10183 } 10184 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { 10185 assert((Subtarget->hasSSE2() || Subtarget->hasAVX()) && 10186 "Need SSE2 for pslli/pcmpeq."); 10187 10188 // a = a << 5; 10189 Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10190 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 10191 Op.getOperand(1), DAG.getConstant(5, MVT::i32)); 10192 10193 // Turn 'a' into a mask suitable for VSELECT 10194 SDValue VSelM = DAG.getConstant(0x80, VT); 10195 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10196 OpVSel = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10197 DAG.getConstant(Intrinsic::x86_sse2_pcmpeq_b, MVT::i32), 10198 OpVSel, VSelM); 10199 10200 SDValue CM1 = DAG.getConstant(0x0f, VT); 10201 SDValue CM2 = DAG.getConstant(0x3f, VT); 10202 10203 // r = VSELECT(r, psllw(r & (char16)15, 4), a); 10204 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1); 10205 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10206 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 10207 DAG.getConstant(4, MVT::i32)); 10208 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10209 10210 // a += a 10211 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10212 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10213 OpVSel = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10214 DAG.getConstant(Intrinsic::x86_sse2_pcmpeq_b, MVT::i32), 10215 OpVSel, VSelM); 10216 10217 // r = VSELECT(r, psllw(r & (char16)63, 2), a); 10218 M = DAG.getNode(ISD::AND, dl, VT, R, CM2); 10219 M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10220 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, 10221 DAG.getConstant(2, MVT::i32)); 10222 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R); 10223 10224 // a += a 10225 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); 10226 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op); 10227 OpVSel = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10228 DAG.getConstant(Intrinsic::x86_sse2_pcmpeq_b, MVT::i32), 10229 OpVSel, VSelM); 10230 10231 // return VSELECT(r, r+r, a); 10232 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, 10233 DAG.getNode(ISD::ADD, dl, VT, R, R), R); 10234 return R; 10235 } 10236 10237 // Decompose 256-bit shifts into smaller 128-bit shifts. 10238 if (VT.getSizeInBits() == 256) { 10239 int NumElems = VT.getVectorNumElements(); 10240 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10241 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10242 10243 // Extract the two vectors 10244 SDValue V1 = Extract128BitVector(R, DAG.getConstant(0, MVT::i32), DAG, dl); 10245 SDValue V2 = Extract128BitVector(R, DAG.getConstant(NumElems/2, MVT::i32), 10246 DAG, dl); 10247 10248 // Recreate the shift amount vectors 10249 SDValue Amt1, Amt2; 10250 if (Amt.getOpcode() == ISD::BUILD_VECTOR) { 10251 // Constant shift amount 10252 SmallVector<SDValue, 4> Amt1Csts; 10253 SmallVector<SDValue, 4> Amt2Csts; 10254 for (int i = 0; i < NumElems/2; ++i) 10255 Amt1Csts.push_back(Amt->getOperand(i)); 10256 for (int i = NumElems/2; i < NumElems; ++i) 10257 Amt2Csts.push_back(Amt->getOperand(i)); 10258 10259 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 10260 &Amt1Csts[0], NumElems/2); 10261 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, 10262 &Amt2Csts[0], NumElems/2); 10263 } else { 10264 // Variable shift amount 10265 Amt1 = Extract128BitVector(Amt, DAG.getConstant(0, MVT::i32), DAG, dl); 10266 Amt2 = Extract128BitVector(Amt, DAG.getConstant(NumElems/2, MVT::i32), 10267 DAG, dl); 10268 } 10269 10270 // Issue new vector shifts for the smaller types 10271 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1); 10272 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2); 10273 10274 // Concatenate the result back 10275 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2); 10276 } 10277 10278 return SDValue(); 10279} 10280 10281SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 10282 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus 10283 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering 10284 // looks for this combo and may remove the "setcc" instruction if the "setcc" 10285 // has only one use. 10286 SDNode *N = Op.getNode(); 10287 SDValue LHS = N->getOperand(0); 10288 SDValue RHS = N->getOperand(1); 10289 unsigned BaseOp = 0; 10290 unsigned Cond = 0; 10291 DebugLoc DL = Op.getDebugLoc(); 10292 switch (Op.getOpcode()) { 10293 default: llvm_unreachable("Unknown ovf instruction!"); 10294 case ISD::SADDO: 10295 // A subtract of one will be selected as a INC. Note that INC doesn't 10296 // set CF, so we can't do this for UADDO. 10297 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 10298 if (C->isOne()) { 10299 BaseOp = X86ISD::INC; 10300 Cond = X86::COND_O; 10301 break; 10302 } 10303 BaseOp = X86ISD::ADD; 10304 Cond = X86::COND_O; 10305 break; 10306 case ISD::UADDO: 10307 BaseOp = X86ISD::ADD; 10308 Cond = X86::COND_B; 10309 break; 10310 case ISD::SSUBO: 10311 // A subtract of one will be selected as a DEC. Note that DEC doesn't 10312 // set CF, so we can't do this for USUBO. 10313 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) 10314 if (C->isOne()) { 10315 BaseOp = X86ISD::DEC; 10316 Cond = X86::COND_O; 10317 break; 10318 } 10319 BaseOp = X86ISD::SUB; 10320 Cond = X86::COND_O; 10321 break; 10322 case ISD::USUBO: 10323 BaseOp = X86ISD::SUB; 10324 Cond = X86::COND_B; 10325 break; 10326 case ISD::SMULO: 10327 BaseOp = X86ISD::SMUL; 10328 Cond = X86::COND_O; 10329 break; 10330 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs 10331 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), 10332 MVT::i32); 10333 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); 10334 10335 SDValue SetCC = 10336 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 10337 DAG.getConstant(X86::COND_O, MVT::i32), 10338 SDValue(Sum.getNode(), 2)); 10339 10340 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 10341 } 10342 } 10343 10344 // Also sets EFLAGS. 10345 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 10346 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 10347 10348 SDValue SetCC = 10349 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1), 10350 DAG.getConstant(Cond, MVT::i32), 10351 SDValue(Sum.getNode(), 1)); 10352 10353 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); 10354} 10355 10356SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 10357 SelectionDAG &DAG) const { 10358 DebugLoc dl = Op.getDebugLoc(); 10359 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 10360 EVT VT = Op.getValueType(); 10361 10362 if (Subtarget->hasSSE2() && VT.isVector()) { 10363 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 10364 ExtraVT.getScalarType().getSizeInBits(); 10365 SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32); 10366 10367 unsigned SHLIntrinsicsID = 0; 10368 unsigned SRAIntrinsicsID = 0; 10369 switch (VT.getSimpleVT().SimpleTy) { 10370 default: 10371 return SDValue(); 10372 case MVT::v4i32: 10373 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_d; 10374 SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_d; 10375 break; 10376 case MVT::v8i16: 10377 SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_w; 10378 SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_w; 10379 break; 10380 case MVT::v8i32: 10381 case MVT::v16i16: 10382 if (!Subtarget->hasAVX()) 10383 return SDValue(); 10384 if (!Subtarget->hasAVX2()) { 10385 // needs to be split 10386 int NumElems = VT.getVectorNumElements(); 10387 SDValue Idx0 = DAG.getConstant(0, MVT::i32); 10388 SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32); 10389 10390 // Extract the LHS vectors 10391 SDValue LHS = Op.getOperand(0); 10392 SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl); 10393 SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl); 10394 10395 MVT EltVT = VT.getVectorElementType().getSimpleVT(); 10396 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); 10397 10398 EVT ExtraEltVT = ExtraVT.getVectorElementType(); 10399 int ExtraNumElems = ExtraVT.getVectorNumElements(); 10400 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT, 10401 ExtraNumElems/2); 10402 SDValue Extra = DAG.getValueType(ExtraVT); 10403 10404 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); 10405 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); 10406 10407 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);; 10408 } 10409 if (VT == MVT::v8i32) { 10410 SHLIntrinsicsID = Intrinsic::x86_avx2_pslli_d; 10411 SRAIntrinsicsID = Intrinsic::x86_avx2_psrai_d; 10412 } else { 10413 SHLIntrinsicsID = Intrinsic::x86_avx2_pslli_w; 10414 SRAIntrinsicsID = Intrinsic::x86_avx2_psrai_w; 10415 } 10416 } 10417 10418 SDValue Tmp1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10419 DAG.getConstant(SHLIntrinsicsID, MVT::i32), 10420 Op.getOperand(0), ShAmt); 10421 10422 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 10423 DAG.getConstant(SRAIntrinsicsID, MVT::i32), 10424 Tmp1, ShAmt); 10425 } 10426 10427 return SDValue(); 10428} 10429 10430 10431SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ 10432 DebugLoc dl = Op.getDebugLoc(); 10433 10434 // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. 10435 // There isn't any reason to disable it if the target processor supports it. 10436 if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) { 10437 SDValue Chain = Op.getOperand(0); 10438 SDValue Zero = DAG.getConstant(0, MVT::i32); 10439 SDValue Ops[] = { 10440 DAG.getRegister(X86::ESP, MVT::i32), // Base 10441 DAG.getTargetConstant(1, MVT::i8), // Scale 10442 DAG.getRegister(0, MVT::i32), // Index 10443 DAG.getTargetConstant(0, MVT::i32), // Disp 10444 DAG.getRegister(0, MVT::i32), // Segment. 10445 Zero, 10446 Chain 10447 }; 10448 SDNode *Res = 10449 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10450 array_lengthof(Ops)); 10451 return SDValue(Res, 0); 10452 } 10453 10454 unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); 10455 if (!isDev) 10456 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10457 10458 unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 10459 unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 10460 unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 10461 unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 10462 10463 // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; 10464 if (!Op1 && !Op2 && !Op3 && Op4) 10465 return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); 10466 10467 // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; 10468 if (Op1 && !Op2 && !Op3 && !Op4) 10469 return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); 10470 10471 // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), 10472 // (MFENCE)>; 10473 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10474} 10475 10476SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op, 10477 SelectionDAG &DAG) const { 10478 DebugLoc dl = Op.getDebugLoc(); 10479 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 10480 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 10481 SynchronizationScope FenceScope = static_cast<SynchronizationScope>( 10482 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 10483 10484 // The only fence that needs an instruction is a sequentially-consistent 10485 // cross-thread fence. 10486 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { 10487 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for 10488 // no-sse2). There isn't any reason to disable it if the target processor 10489 // supports it. 10490 if (Subtarget->hasSSE2() || Subtarget->is64Bit()) 10491 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); 10492 10493 SDValue Chain = Op.getOperand(0); 10494 SDValue Zero = DAG.getConstant(0, MVT::i32); 10495 SDValue Ops[] = { 10496 DAG.getRegister(X86::ESP, MVT::i32), // Base 10497 DAG.getTargetConstant(1, MVT::i8), // Scale 10498 DAG.getRegister(0, MVT::i32), // Index 10499 DAG.getTargetConstant(0, MVT::i32), // Disp 10500 DAG.getRegister(0, MVT::i32), // Segment. 10501 Zero, 10502 Chain 10503 }; 10504 SDNode *Res = 10505 DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, 10506 array_lengthof(Ops)); 10507 return SDValue(Res, 0); 10508 } 10509 10510 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 10511 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); 10512} 10513 10514 10515SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 10516 EVT T = Op.getValueType(); 10517 DebugLoc DL = Op.getDebugLoc(); 10518 unsigned Reg = 0; 10519 unsigned size = 0; 10520 switch(T.getSimpleVT().SimpleTy) { 10521 default: 10522 assert(false && "Invalid value type!"); 10523 case MVT::i8: Reg = X86::AL; size = 1; break; 10524 case MVT::i16: Reg = X86::AX; size = 2; break; 10525 case MVT::i32: Reg = X86::EAX; size = 4; break; 10526 case MVT::i64: 10527 assert(Subtarget->is64Bit() && "Node not type legal!"); 10528 Reg = X86::RAX; size = 8; 10529 break; 10530 } 10531 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, 10532 Op.getOperand(2), SDValue()); 10533 SDValue Ops[] = { cpIn.getValue(0), 10534 Op.getOperand(1), 10535 Op.getOperand(3), 10536 DAG.getTargetConstant(size, MVT::i8), 10537 cpIn.getValue(1) }; 10538 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10539 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); 10540 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, 10541 Ops, 5, T, MMO); 10542 SDValue cpOut = 10543 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); 10544 return cpOut; 10545} 10546 10547SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, 10548 SelectionDAG &DAG) const { 10549 assert(Subtarget->is64Bit() && "Result not type legalized?"); 10550 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10551 SDValue TheChain = Op.getOperand(0); 10552 DebugLoc dl = Op.getDebugLoc(); 10553 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 10554 SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1)); 10555 SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64, 10556 rax.getValue(2)); 10557 SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx, 10558 DAG.getConstant(32, MVT::i8)); 10559 SDValue Ops[] = { 10560 DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp), 10561 rdx.getValue(1) 10562 }; 10563 return DAG.getMergeValues(Ops, 2, dl); 10564} 10565 10566SDValue X86TargetLowering::LowerBITCAST(SDValue Op, 10567 SelectionDAG &DAG) const { 10568 EVT SrcVT = Op.getOperand(0).getValueType(); 10569 EVT DstVT = Op.getValueType(); 10570 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && 10571 Subtarget->hasMMX() && "Unexpected custom BITCAST"); 10572 assert((DstVT == MVT::i64 || 10573 (DstVT.isVector() && DstVT.getSizeInBits()==64)) && 10574 "Unexpected custom BITCAST"); 10575 // i64 <=> MMX conversions are Legal. 10576 if (SrcVT==MVT::i64 && DstVT.isVector()) 10577 return Op; 10578 if (DstVT==MVT::i64 && SrcVT.isVector()) 10579 return Op; 10580 // MMX <=> MMX conversions are Legal. 10581 if (SrcVT.isVector() && DstVT.isVector()) 10582 return Op; 10583 // All other conversions need to be expanded. 10584 return SDValue(); 10585} 10586 10587SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { 10588 SDNode *Node = Op.getNode(); 10589 DebugLoc dl = Node->getDebugLoc(); 10590 EVT T = Node->getValueType(0); 10591 SDValue negOp = DAG.getNode(ISD::SUB, dl, T, 10592 DAG.getConstant(0, T), Node->getOperand(2)); 10593 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, 10594 cast<AtomicSDNode>(Node)->getMemoryVT(), 10595 Node->getOperand(0), 10596 Node->getOperand(1), negOp, 10597 cast<AtomicSDNode>(Node)->getSrcValue(), 10598 cast<AtomicSDNode>(Node)->getAlignment(), 10599 cast<AtomicSDNode>(Node)->getOrdering(), 10600 cast<AtomicSDNode>(Node)->getSynchScope()); 10601} 10602 10603static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { 10604 SDNode *Node = Op.getNode(); 10605 DebugLoc dl = Node->getDebugLoc(); 10606 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 10607 10608 // Convert seq_cst store -> xchg 10609 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) 10610 // FIXME: On 32-bit, store -> fist or movq would be more efficient 10611 // (The only way to get a 16-byte store is cmpxchg16b) 10612 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. 10613 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent || 10614 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 10615 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 10616 cast<AtomicSDNode>(Node)->getMemoryVT(), 10617 Node->getOperand(0), 10618 Node->getOperand(1), Node->getOperand(2), 10619 cast<AtomicSDNode>(Node)->getMemOperand(), 10620 cast<AtomicSDNode>(Node)->getOrdering(), 10621 cast<AtomicSDNode>(Node)->getSynchScope()); 10622 return Swap.getValue(1); 10623 } 10624 // Other atomic stores have a simple pattern. 10625 return Op; 10626} 10627 10628static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 10629 EVT VT = Op.getNode()->getValueType(0); 10630 10631 // Let legalize expand this if it isn't a legal type yet. 10632 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10633 return SDValue(); 10634 10635 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 10636 10637 unsigned Opc; 10638 bool ExtraOp = false; 10639 switch (Op.getOpcode()) { 10640 default: assert(0 && "Invalid code"); 10641 case ISD::ADDC: Opc = X86ISD::ADD; break; 10642 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break; 10643 case ISD::SUBC: Opc = X86ISD::SUB; break; 10644 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; 10645 } 10646 10647 if (!ExtraOp) 10648 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10649 Op.getOperand(1)); 10650 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 10651 Op.getOperand(1), Op.getOperand(2)); 10652} 10653 10654/// LowerOperation - Provide custom lowering hooks for some operations. 10655/// 10656SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10657 switch (Op.getOpcode()) { 10658 default: llvm_unreachable("Should not custom lower this!"); 10659 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); 10660 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); 10661 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG); 10662 case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); 10663 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); 10664 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); 10665 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10666 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 10667 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10668 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10669 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10670 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 10671 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); 10672 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10673 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10674 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10675 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10676 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); 10677 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10678 case ISD::SHL_PARTS: 10679 case ISD::SRA_PARTS: 10680 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 10681 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 10682 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 10683 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 10684 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 10685 case ISD::FABS: return LowerFABS(Op, DAG); 10686 case ISD::FNEG: return LowerFNEG(Op, DAG); 10687 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 10688 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); 10689 case ISD::SETCC: return LowerSETCC(Op, DAG); 10690 case ISD::SELECT: return LowerSELECT(Op, DAG); 10691 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 10692 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10693 case ISD::VASTART: return LowerVASTART(Op, DAG); 10694 case ISD::VAARG: return LowerVAARG(Op, DAG); 10695 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10696 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10697 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10698 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10699 case ISD::FRAME_TO_ARGS_OFFSET: 10700 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 10701 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10702 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 10703 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10704 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10705 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10706 case ISD::CTLZ: return LowerCTLZ(Op, DAG); 10707 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); 10708 case ISD::CTTZ: return LowerCTTZ(Op, DAG); 10709 case ISD::MUL: return LowerMUL(Op, DAG); 10710 case ISD::SRA: 10711 case ISD::SRL: 10712 case ISD::SHL: return LowerShift(Op, DAG); 10713 case ISD::SADDO: 10714 case ISD::UADDO: 10715 case ISD::SSUBO: 10716 case ISD::USUBO: 10717 case ISD::SMULO: 10718 case ISD::UMULO: return LowerXALUO(Op, DAG); 10719 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); 10720 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10721 case ISD::ADDC: 10722 case ISD::ADDE: 10723 case ISD::SUBC: 10724 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 10725 case ISD::ADD: return LowerADD(Op, DAG); 10726 case ISD::SUB: return LowerSUB(Op, DAG); 10727 } 10728} 10729 10730static void ReplaceATOMIC_LOAD(SDNode *Node, 10731 SmallVectorImpl<SDValue> &Results, 10732 SelectionDAG &DAG) { 10733 DebugLoc dl = Node->getDebugLoc(); 10734 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); 10735 10736 // Convert wide load -> cmpxchg8b/cmpxchg16b 10737 // FIXME: On 32-bit, load -> fild or movq would be more efficient 10738 // (The only way to get a 16-byte load is cmpxchg16b) 10739 // FIXME: 16-byte ATOMIC_CMP_SWAP isn't actually hooked up at the moment. 10740 SDValue Zero = DAG.getConstant(0, VT); 10741 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, VT, 10742 Node->getOperand(0), 10743 Node->getOperand(1), Zero, Zero, 10744 cast<AtomicSDNode>(Node)->getMemOperand(), 10745 cast<AtomicSDNode>(Node)->getOrdering(), 10746 cast<AtomicSDNode>(Node)->getSynchScope()); 10747 Results.push_back(Swap.getValue(0)); 10748 Results.push_back(Swap.getValue(1)); 10749} 10750 10751void X86TargetLowering:: 10752ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results, 10753 SelectionDAG &DAG, unsigned NewOp) const { 10754 DebugLoc dl = Node->getDebugLoc(); 10755 assert (Node->getValueType(0) == MVT::i64 && 10756 "Only know how to expand i64 atomics"); 10757 10758 SDValue Chain = Node->getOperand(0); 10759 SDValue In1 = Node->getOperand(1); 10760 SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 10761 Node->getOperand(2), DAG.getIntPtrConstant(0)); 10762 SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 10763 Node->getOperand(2), DAG.getIntPtrConstant(1)); 10764 SDValue Ops[] = { Chain, In1, In2L, In2H }; 10765 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10766 SDValue Result = 10767 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops, 4, MVT::i64, 10768 cast<MemSDNode>(Node)->getMemOperand()); 10769 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)}; 10770 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 10771 Results.push_back(Result.getValue(2)); 10772} 10773 10774/// ReplaceNodeResults - Replace a node with an illegal result type 10775/// with a new node built out of custom code. 10776void X86TargetLowering::ReplaceNodeResults(SDNode *N, 10777 SmallVectorImpl<SDValue>&Results, 10778 SelectionDAG &DAG) const { 10779 DebugLoc dl = N->getDebugLoc(); 10780 switch (N->getOpcode()) { 10781 default: 10782 assert(false && "Do not know how to custom type legalize this operation!"); 10783 return; 10784 case ISD::SIGN_EXTEND_INREG: 10785 case ISD::ADDC: 10786 case ISD::ADDE: 10787 case ISD::SUBC: 10788 case ISD::SUBE: 10789 // We don't want to expand or promote these. 10790 return; 10791 case ISD::FP_TO_SINT: { 10792 std::pair<SDValue,SDValue> Vals = 10793 FP_TO_INTHelper(SDValue(N, 0), DAG, true); 10794 SDValue FIST = Vals.first, StackSlot = Vals.second; 10795 if (FIST.getNode() != 0) { 10796 EVT VT = N->getValueType(0); 10797 // Return a load from the stack slot. 10798 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, 10799 MachinePointerInfo(), 10800 false, false, false, 0)); 10801 } 10802 return; 10803 } 10804 case ISD::READCYCLECOUNTER: { 10805 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10806 SDValue TheChain = N->getOperand(0); 10807 SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1); 10808 SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 10809 rd.getValue(1)); 10810 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32, 10811 eax.getValue(2)); 10812 // Use a buildpair to merge the two 32-bit values into a 64-bit one. 10813 SDValue Ops[] = { eax, edx }; 10814 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2)); 10815 Results.push_back(edx.getValue(1)); 10816 return; 10817 } 10818 case ISD::ATOMIC_CMP_SWAP: { 10819 EVT T = N->getValueType(0); 10820 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"); 10821 bool Regs64bit = T == MVT::i128; 10822 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; 10823 SDValue cpInL, cpInH; 10824 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 10825 DAG.getConstant(0, HalfT)); 10826 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), 10827 DAG.getConstant(1, HalfT)); 10828 cpInL = DAG.getCopyToReg(N->getOperand(0), dl, 10829 Regs64bit ? X86::RAX : X86::EAX, 10830 cpInL, SDValue()); 10831 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, 10832 Regs64bit ? X86::RDX : X86::EDX, 10833 cpInH, cpInL.getValue(1)); 10834 SDValue swapInL, swapInH; 10835 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 10836 DAG.getConstant(0, HalfT)); 10837 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), 10838 DAG.getConstant(1, HalfT)); 10839 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, 10840 Regs64bit ? X86::RBX : X86::EBX, 10841 swapInL, cpInH.getValue(1)); 10842 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, 10843 Regs64bit ? X86::RCX : X86::ECX, 10844 swapInH, swapInL.getValue(1)); 10845 SDValue Ops[] = { swapInH.getValue(0), 10846 N->getOperand(1), 10847 swapInH.getValue(1) }; 10848 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); 10849 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 10850 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG : 10851 X86ISD::LCMPXCHG8_DAG; 10852 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, 10853 Ops, 3, T, MMO); 10854 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, 10855 Regs64bit ? X86::RAX : X86::EAX, 10856 HalfT, Result.getValue(1)); 10857 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, 10858 Regs64bit ? X86::RDX : X86::EDX, 10859 HalfT, cpOutL.getValue(2)); 10860 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; 10861 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF, 2)); 10862 Results.push_back(cpOutH.getValue(1)); 10863 return; 10864 } 10865 case ISD::ATOMIC_LOAD_ADD: 10866 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMADD64_DAG); 10867 return; 10868 case ISD::ATOMIC_LOAD_AND: 10869 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMAND64_DAG); 10870 return; 10871 case ISD::ATOMIC_LOAD_NAND: 10872 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMNAND64_DAG); 10873 return; 10874 case ISD::ATOMIC_LOAD_OR: 10875 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMOR64_DAG); 10876 return; 10877 case ISD::ATOMIC_LOAD_SUB: 10878 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSUB64_DAG); 10879 return; 10880 case ISD::ATOMIC_LOAD_XOR: 10881 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMXOR64_DAG); 10882 return; 10883 case ISD::ATOMIC_SWAP: 10884 ReplaceATOMIC_BINARY_64(N, Results, DAG, X86ISD::ATOMSWAP64_DAG); 10885 return; 10886 case ISD::ATOMIC_LOAD: 10887 ReplaceATOMIC_LOAD(N, Results, DAG); 10888 } 10889} 10890 10891const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { 10892 switch (Opcode) { 10893 default: return NULL; 10894 case X86ISD::BSF: return "X86ISD::BSF"; 10895 case X86ISD::BSR: return "X86ISD::BSR"; 10896 case X86ISD::SHLD: return "X86ISD::SHLD"; 10897 case X86ISD::SHRD: return "X86ISD::SHRD"; 10898 case X86ISD::FAND: return "X86ISD::FAND"; 10899 case X86ISD::FOR: return "X86ISD::FOR"; 10900 case X86ISD::FXOR: return "X86ISD::FXOR"; 10901 case X86ISD::FSRL: return "X86ISD::FSRL"; 10902 case X86ISD::FILD: return "X86ISD::FILD"; 10903 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; 10904 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; 10905 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; 10906 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; 10907 case X86ISD::FLD: return "X86ISD::FLD"; 10908 case X86ISD::FST: return "X86ISD::FST"; 10909 case X86ISD::CALL: return "X86ISD::CALL"; 10910 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; 10911 case X86ISD::BT: return "X86ISD::BT"; 10912 case X86ISD::CMP: return "X86ISD::CMP"; 10913 case X86ISD::COMI: return "X86ISD::COMI"; 10914 case X86ISD::UCOMI: return "X86ISD::UCOMI"; 10915 case X86ISD::SETCC: return "X86ISD::SETCC"; 10916 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; 10917 case X86ISD::FSETCCsd: return "X86ISD::FSETCCsd"; 10918 case X86ISD::FSETCCss: return "X86ISD::FSETCCss"; 10919 case X86ISD::CMOV: return "X86ISD::CMOV"; 10920 case X86ISD::BRCOND: return "X86ISD::BRCOND"; 10921 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; 10922 case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; 10923 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; 10924 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; 10925 case X86ISD::Wrapper: return "X86ISD::Wrapper"; 10926 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; 10927 case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; 10928 case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; 10929 case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; 10930 case X86ISD::PINSRB: return "X86ISD::PINSRB"; 10931 case X86ISD::PINSRW: return "X86ISD::PINSRW"; 10932 case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; 10933 case X86ISD::ANDNP: return "X86ISD::ANDNP"; 10934 case X86ISD::PSIGN: return "X86ISD::PSIGN"; 10935 case X86ISD::BLENDV: return "X86ISD::BLENDV"; 10936 case X86ISD::HADD: return "X86ISD::HADD"; 10937 case X86ISD::HSUB: return "X86ISD::HSUB"; 10938 case X86ISD::FHADD: return "X86ISD::FHADD"; 10939 case X86ISD::FHSUB: return "X86ISD::FHSUB"; 10940 case X86ISD::FMAX: return "X86ISD::FMAX"; 10941 case X86ISD::FMIN: return "X86ISD::FMIN"; 10942 case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; 10943 case X86ISD::FRCP: return "X86ISD::FRCP"; 10944 case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; 10945 case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; 10946 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; 10947 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; 10948 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; 10949 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; 10950 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; 10951 case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG"; 10952 case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG"; 10953 case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG"; 10954 case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG"; 10955 case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG"; 10956 case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG"; 10957 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; 10958 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; 10959 case X86ISD::VSHL: return "X86ISD::VSHL"; 10960 case X86ISD::VSRL: return "X86ISD::VSRL"; 10961 case X86ISD::CMPPD: return "X86ISD::CMPPD"; 10962 case X86ISD::CMPPS: return "X86ISD::CMPPS"; 10963 case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB"; 10964 case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW"; 10965 case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD"; 10966 case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ"; 10967 case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB"; 10968 case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW"; 10969 case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD"; 10970 case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ"; 10971 case X86ISD::ADD: return "X86ISD::ADD"; 10972 case X86ISD::SUB: return "X86ISD::SUB"; 10973 case X86ISD::ADC: return "X86ISD::ADC"; 10974 case X86ISD::SBB: return "X86ISD::SBB"; 10975 case X86ISD::SMUL: return "X86ISD::SMUL"; 10976 case X86ISD::UMUL: return "X86ISD::UMUL"; 10977 case X86ISD::INC: return "X86ISD::INC"; 10978 case X86ISD::DEC: return "X86ISD::DEC"; 10979 case X86ISD::OR: return "X86ISD::OR"; 10980 case X86ISD::XOR: return "X86ISD::XOR"; 10981 case X86ISD::AND: return "X86ISD::AND"; 10982 case X86ISD::ANDN: return "X86ISD::ANDN"; 10983 case X86ISD::BLSI: return "X86ISD::BLSI"; 10984 case X86ISD::BLSMSK: return "X86ISD::BLSMSK"; 10985 case X86ISD::BLSR: return "X86ISD::BLSR"; 10986 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; 10987 case X86ISD::PTEST: return "X86ISD::PTEST"; 10988 case X86ISD::TESTP: return "X86ISD::TESTP"; 10989 case X86ISD::PALIGN: return "X86ISD::PALIGN"; 10990 case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; 10991 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; 10992 case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD"; 10993 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; 10994 case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD"; 10995 case X86ISD::SHUFP: return "X86ISD::SHUFP"; 10996 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; 10997 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; 10998 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; 10999 case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; 11000 case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; 11001 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; 11002 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; 11003 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; 11004 case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD"; 11005 case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD"; 11006 case X86ISD::MOVSD: return "X86ISD::MOVSD"; 11007 case X86ISD::MOVSS: return "X86ISD::MOVSS"; 11008 case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; 11009 case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; 11010 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; 11011 case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; 11012 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; 11013 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; 11014 case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; 11015 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; 11016 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; 11017 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; 11018 } 11019} 11020 11021// isLegalAddressingMode - Return true if the addressing mode represented 11022// by AM is legal for this target, for a load/store of the specified type. 11023bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 11024 Type *Ty) const { 11025 // X86 supports extremely general addressing modes. 11026 CodeModel::Model M = getTargetMachine().getCodeModel(); 11027 Reloc::Model R = getTargetMachine().getRelocationModel(); 11028 11029 // X86 allows a sign-extended 32-bit immediate field as a displacement. 11030 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) 11031 return false; 11032 11033 if (AM.BaseGV) { 11034 unsigned GVFlags = 11035 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine()); 11036 11037 // If a reference to this global requires an extra load, we can't fold it. 11038 if (isGlobalStubReference(GVFlags)) 11039 return false; 11040 11041 // If BaseGV requires a register for the PIC base, we cannot also have a 11042 // BaseReg specified. 11043 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) 11044 return false; 11045 11046 // If lower 4G is not available, then we must use rip-relative addressing. 11047 if ((M != CodeModel::Small || R != Reloc::Static) && 11048 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 11049 return false; 11050 } 11051 11052 switch (AM.Scale) { 11053 case 0: 11054 case 1: 11055 case 2: 11056 case 4: 11057 case 8: 11058 // These scales always work. 11059 break; 11060 case 3: 11061 case 5: 11062 case 9: 11063 // These scales are formed with basereg+scalereg. Only accept if there is 11064 // no basereg yet. 11065 if (AM.HasBaseReg) 11066 return false; 11067 break; 11068 default: // Other stuff never works. 11069 return false; 11070 } 11071 11072 return true; 11073} 11074 11075 11076bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 11077 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11078 return false; 11079 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 11080 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 11081 if (NumBits1 <= NumBits2) 11082 return false; 11083 return true; 11084} 11085 11086bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 11087 if (!VT1.isInteger() || !VT2.isInteger()) 11088 return false; 11089 unsigned NumBits1 = VT1.getSizeInBits(); 11090 unsigned NumBits2 = VT2.getSizeInBits(); 11091 if (NumBits1 <= NumBits2) 11092 return false; 11093 return true; 11094} 11095 11096bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 11097 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11098 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit(); 11099} 11100 11101bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 11102 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. 11103 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit(); 11104} 11105 11106bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { 11107 // i16 instructions are longer (0x66 prefix) and potentially slower. 11108 return !(VT1 == MVT::i32 && VT2 == MVT::i16); 11109} 11110 11111/// isShuffleMaskLegal - Targets can use this to indicate that they only 11112/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 11113/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 11114/// are assumed to be legal. 11115bool 11116X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 11117 EVT VT) const { 11118 // Very little shuffling can be done for 64-bit vectors right now. 11119 if (VT.getSizeInBits() == 64) 11120 return false; 11121 11122 // FIXME: pshufb, blends, shifts. 11123 return (VT.getVectorNumElements() == 2 || 11124 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 11125 isMOVLMask(M, VT) || 11126 isSHUFPMask(M, VT) || 11127 isPSHUFDMask(M, VT) || 11128 isPSHUFHWMask(M, VT) || 11129 isPSHUFLWMask(M, VT) || 11130 isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) || 11131 isUNPCKLMask(M, VT, Subtarget->hasAVX2()) || 11132 isUNPCKHMask(M, VT, Subtarget->hasAVX2()) || 11133 isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasAVX2()) || 11134 isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasAVX2())); 11135} 11136 11137bool 11138X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 11139 EVT VT) const { 11140 unsigned NumElts = VT.getVectorNumElements(); 11141 // FIXME: This collection of masks seems suspect. 11142 if (NumElts == 2) 11143 return true; 11144 if (NumElts == 4 && VT.getSizeInBits() == 128) { 11145 return (isMOVLMask(Mask, VT) || 11146 isCommutedMOVLMask(Mask, VT, true) || 11147 isSHUFPMask(Mask, VT) || 11148 isSHUFPMask(Mask, VT, /* Commuted */ true)); 11149 } 11150 return false; 11151} 11152 11153//===----------------------------------------------------------------------===// 11154// X86 Scheduler Hooks 11155//===----------------------------------------------------------------------===// 11156 11157// private utility function 11158MachineBasicBlock * 11159X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, 11160 MachineBasicBlock *MBB, 11161 unsigned regOpc, 11162 unsigned immOpc, 11163 unsigned LoadOpc, 11164 unsigned CXchgOpc, 11165 unsigned notOpc, 11166 unsigned EAXreg, 11167 TargetRegisterClass *RC, 11168 bool invSrc) const { 11169 // For the atomic bitwise operator, we generate 11170 // thisMBB: 11171 // newMBB: 11172 // ld t1 = [bitinstr.addr] 11173 // op t2 = t1, [bitinstr.val] 11174 // mov EAX = t1 11175 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 11176 // bz newMBB 11177 // fallthrough -->nextMBB 11178 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11179 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11180 MachineFunction::iterator MBBIter = MBB; 11181 ++MBBIter; 11182 11183 /// First build the CFG 11184 MachineFunction *F = MBB->getParent(); 11185 MachineBasicBlock *thisMBB = MBB; 11186 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11187 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11188 F->insert(MBBIter, newMBB); 11189 F->insert(MBBIter, nextMBB); 11190 11191 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11192 nextMBB->splice(nextMBB->begin(), thisMBB, 11193 llvm::next(MachineBasicBlock::iterator(bInstr)), 11194 thisMBB->end()); 11195 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11196 11197 // Update thisMBB to fall through to newMBB 11198 thisMBB->addSuccessor(newMBB); 11199 11200 // newMBB jumps to itself and fall through to nextMBB 11201 newMBB->addSuccessor(nextMBB); 11202 newMBB->addSuccessor(newMBB); 11203 11204 // Insert instructions into newMBB based on incoming instruction 11205 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && 11206 "unexpected number of operands"); 11207 DebugLoc dl = bInstr->getDebugLoc(); 11208 MachineOperand& destOper = bInstr->getOperand(0); 11209 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11210 int numArgs = bInstr->getNumOperands() - 1; 11211 for (int i=0; i < numArgs; ++i) 11212 argOpers[i] = &bInstr->getOperand(i+1); 11213 11214 // x86 address has 4 operands: base, index, scale, and displacement 11215 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11216 int valArgIndx = lastAddrIndx + 1; 11217 11218 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 11219 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); 11220 for (int i=0; i <= lastAddrIndx; ++i) 11221 (*MIB).addOperand(*argOpers[i]); 11222 11223 unsigned tt = F->getRegInfo().createVirtualRegister(RC); 11224 if (invSrc) { 11225 MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1); 11226 } 11227 else 11228 tt = t1; 11229 11230 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 11231 assert((argOpers[valArgIndx]->isReg() || 11232 argOpers[valArgIndx]->isImm()) && 11233 "invalid operand"); 11234 if (argOpers[valArgIndx]->isReg()) 11235 MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); 11236 else 11237 MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); 11238 MIB.addReg(tt); 11239 (*MIB).addOperand(*argOpers[valArgIndx]); 11240 11241 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); 11242 MIB.addReg(t1); 11243 11244 MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); 11245 for (int i=0; i <= lastAddrIndx; ++i) 11246 (*MIB).addOperand(*argOpers[i]); 11247 MIB.addReg(t2); 11248 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11249 (*MIB).setMemRefs(bInstr->memoperands_begin(), 11250 bInstr->memoperands_end()); 11251 11252 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 11253 MIB.addReg(EAXreg); 11254 11255 // insert branch 11256 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11257 11258 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 11259 return nextMBB; 11260} 11261 11262// private utility function: 64 bit atomics on 32 bit host. 11263MachineBasicBlock * 11264X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, 11265 MachineBasicBlock *MBB, 11266 unsigned regOpcL, 11267 unsigned regOpcH, 11268 unsigned immOpcL, 11269 unsigned immOpcH, 11270 bool invSrc) const { 11271 // For the atomic bitwise operator, we generate 11272 // thisMBB (instructions are in pairs, except cmpxchg8b) 11273 // ld t1,t2 = [bitinstr.addr] 11274 // newMBB: 11275 // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) 11276 // op t5, t6 <- out1, out2, [bitinstr.val] 11277 // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) 11278 // mov ECX, EBX <- t5, t6 11279 // mov EAX, EDX <- t1, t2 11280 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] 11281 // mov t3, t4 <- EAX, EDX 11282 // bz newMBB 11283 // result in out1, out2 11284 // fallthrough -->nextMBB 11285 11286 const TargetRegisterClass *RC = X86::GR32RegisterClass; 11287 const unsigned LoadOpc = X86::MOV32rm; 11288 const unsigned NotOpc = X86::NOT32r; 11289 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11290 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11291 MachineFunction::iterator MBBIter = MBB; 11292 ++MBBIter; 11293 11294 /// First build the CFG 11295 MachineFunction *F = MBB->getParent(); 11296 MachineBasicBlock *thisMBB = MBB; 11297 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11298 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11299 F->insert(MBBIter, newMBB); 11300 F->insert(MBBIter, nextMBB); 11301 11302 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11303 nextMBB->splice(nextMBB->begin(), thisMBB, 11304 llvm::next(MachineBasicBlock::iterator(bInstr)), 11305 thisMBB->end()); 11306 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11307 11308 // Update thisMBB to fall through to newMBB 11309 thisMBB->addSuccessor(newMBB); 11310 11311 // newMBB jumps to itself and fall through to nextMBB 11312 newMBB->addSuccessor(nextMBB); 11313 newMBB->addSuccessor(newMBB); 11314 11315 DebugLoc dl = bInstr->getDebugLoc(); 11316 // Insert instructions into newMBB based on incoming instruction 11317 // There are 8 "real" operands plus 9 implicit def/uses, ignored here. 11318 assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && 11319 "unexpected number of operands"); 11320 MachineOperand& dest1Oper = bInstr->getOperand(0); 11321 MachineOperand& dest2Oper = bInstr->getOperand(1); 11322 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11323 for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { 11324 argOpers[i] = &bInstr->getOperand(i+2); 11325 11326 // We use some of the operands multiple times, so conservatively just 11327 // clear any kill flags that might be present. 11328 if (argOpers[i]->isReg() && argOpers[i]->isUse()) 11329 argOpers[i]->setIsKill(false); 11330 } 11331 11332 // x86 address has 5 operands: base, index, scale, displacement, and segment. 11333 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11334 11335 unsigned t1 = F->getRegInfo().createVirtualRegister(RC); 11336 MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); 11337 for (int i=0; i <= lastAddrIndx; ++i) 11338 (*MIB).addOperand(*argOpers[i]); 11339 unsigned t2 = F->getRegInfo().createVirtualRegister(RC); 11340 MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); 11341 // add 4 to displacement. 11342 for (int i=0; i <= lastAddrIndx-2; ++i) 11343 (*MIB).addOperand(*argOpers[i]); 11344 MachineOperand newOp3 = *(argOpers[3]); 11345 if (newOp3.isImm()) 11346 newOp3.setImm(newOp3.getImm()+4); 11347 else 11348 newOp3.setOffset(newOp3.getOffset()+4); 11349 (*MIB).addOperand(newOp3); 11350 (*MIB).addOperand(*argOpers[lastAddrIndx]); 11351 11352 // t3/4 are defined later, at the bottom of the loop 11353 unsigned t3 = F->getRegInfo().createVirtualRegister(RC); 11354 unsigned t4 = F->getRegInfo().createVirtualRegister(RC); 11355 BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) 11356 .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); 11357 BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) 11358 .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); 11359 11360 // The subsequent operations should be using the destination registers of 11361 //the PHI instructions. 11362 if (invSrc) { 11363 t1 = F->getRegInfo().createVirtualRegister(RC); 11364 t2 = F->getRegInfo().createVirtualRegister(RC); 11365 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg()); 11366 MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg()); 11367 } else { 11368 t1 = dest1Oper.getReg(); 11369 t2 = dest2Oper.getReg(); 11370 } 11371 11372 int valArgIndx = lastAddrIndx + 1; 11373 assert((argOpers[valArgIndx]->isReg() || 11374 argOpers[valArgIndx]->isImm()) && 11375 "invalid operand"); 11376 unsigned t5 = F->getRegInfo().createVirtualRegister(RC); 11377 unsigned t6 = F->getRegInfo().createVirtualRegister(RC); 11378 if (argOpers[valArgIndx]->isReg()) 11379 MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); 11380 else 11381 MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); 11382 if (regOpcL != X86::MOV32rr) 11383 MIB.addReg(t1); 11384 (*MIB).addOperand(*argOpers[valArgIndx]); 11385 assert(argOpers[valArgIndx + 1]->isReg() == 11386 argOpers[valArgIndx]->isReg()); 11387 assert(argOpers[valArgIndx + 1]->isImm() == 11388 argOpers[valArgIndx]->isImm()); 11389 if (argOpers[valArgIndx + 1]->isReg()) 11390 MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); 11391 else 11392 MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); 11393 if (regOpcH != X86::MOV32rr) 11394 MIB.addReg(t2); 11395 (*MIB).addOperand(*argOpers[valArgIndx + 1]); 11396 11397 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 11398 MIB.addReg(t1); 11399 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); 11400 MIB.addReg(t2); 11401 11402 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); 11403 MIB.addReg(t5); 11404 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); 11405 MIB.addReg(t6); 11406 11407 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); 11408 for (int i=0; i <= lastAddrIndx; ++i) 11409 (*MIB).addOperand(*argOpers[i]); 11410 11411 assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11412 (*MIB).setMemRefs(bInstr->memoperands_begin(), 11413 bInstr->memoperands_end()); 11414 11415 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); 11416 MIB.addReg(X86::EAX); 11417 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); 11418 MIB.addReg(X86::EDX); 11419 11420 // insert branch 11421 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11422 11423 bInstr->eraseFromParent(); // The pseudo instruction is gone now. 11424 return nextMBB; 11425} 11426 11427// private utility function 11428MachineBasicBlock * 11429X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, 11430 MachineBasicBlock *MBB, 11431 unsigned cmovOpc) const { 11432 // For the atomic min/max operator, we generate 11433 // thisMBB: 11434 // newMBB: 11435 // ld t1 = [min/max.addr] 11436 // mov t2 = [min/max.val] 11437 // cmp t1, t2 11438 // cmov[cond] t2 = t1 11439 // mov EAX = t1 11440 // lcs dest = [bitinstr.addr], t2 [EAX is implicit] 11441 // bz newMBB 11442 // fallthrough -->nextMBB 11443 // 11444 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11445 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11446 MachineFunction::iterator MBBIter = MBB; 11447 ++MBBIter; 11448 11449 /// First build the CFG 11450 MachineFunction *F = MBB->getParent(); 11451 MachineBasicBlock *thisMBB = MBB; 11452 MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); 11453 MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); 11454 F->insert(MBBIter, newMBB); 11455 F->insert(MBBIter, nextMBB); 11456 11457 // Transfer the remainder of thisMBB and its successor edges to nextMBB. 11458 nextMBB->splice(nextMBB->begin(), thisMBB, 11459 llvm::next(MachineBasicBlock::iterator(mInstr)), 11460 thisMBB->end()); 11461 nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11462 11463 // Update thisMBB to fall through to newMBB 11464 thisMBB->addSuccessor(newMBB); 11465 11466 // newMBB jumps to newMBB and fall through to nextMBB 11467 newMBB->addSuccessor(nextMBB); 11468 newMBB->addSuccessor(newMBB); 11469 11470 DebugLoc dl = mInstr->getDebugLoc(); 11471 // Insert instructions into newMBB based on incoming instruction 11472 assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && 11473 "unexpected number of operands"); 11474 MachineOperand& destOper = mInstr->getOperand(0); 11475 MachineOperand* argOpers[2 + X86::AddrNumOperands]; 11476 int numArgs = mInstr->getNumOperands() - 1; 11477 for (int i=0; i < numArgs; ++i) 11478 argOpers[i] = &mInstr->getOperand(i+1); 11479 11480 // x86 address has 4 operands: base, index, scale, and displacement 11481 int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] 11482 int valArgIndx = lastAddrIndx + 1; 11483 11484 unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11485 MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); 11486 for (int i=0; i <= lastAddrIndx; ++i) 11487 (*MIB).addOperand(*argOpers[i]); 11488 11489 // We only support register and immediate values 11490 assert((argOpers[valArgIndx]->isReg() || 11491 argOpers[valArgIndx]->isImm()) && 11492 "invalid operand"); 11493 11494 unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11495 if (argOpers[valArgIndx]->isReg()) 11496 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); 11497 else 11498 MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); 11499 (*MIB).addOperand(*argOpers[valArgIndx]); 11500 11501 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); 11502 MIB.addReg(t1); 11503 11504 MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); 11505 MIB.addReg(t1); 11506 MIB.addReg(t2); 11507 11508 // Generate movc 11509 unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass); 11510 MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); 11511 MIB.addReg(t2); 11512 MIB.addReg(t1); 11513 11514 // Cmp and exchange if none has modified the memory location 11515 MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); 11516 for (int i=0; i <= lastAddrIndx; ++i) 11517 (*MIB).addOperand(*argOpers[i]); 11518 MIB.addReg(t3); 11519 assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); 11520 (*MIB).setMemRefs(mInstr->memoperands_begin(), 11521 mInstr->memoperands_end()); 11522 11523 MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); 11524 MIB.addReg(X86::EAX); 11525 11526 // insert branch 11527 BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); 11528 11529 mInstr->eraseFromParent(); // The pseudo instruction is gone now. 11530 return nextMBB; 11531} 11532 11533// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 11534// or XMM0_V32I8 in AVX all of this code can be replaced with that 11535// in the .td file. 11536MachineBasicBlock * 11537X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, 11538 unsigned numArgs, bool memArg) const { 11539 assert(Subtarget->hasSSE42() && 11540 "Target must have SSE4.2 or AVX features enabled"); 11541 11542 DebugLoc dl = MI->getDebugLoc(); 11543 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11544 unsigned Opc; 11545 if (!Subtarget->hasAVX()) { 11546 if (memArg) 11547 Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; 11548 else 11549 Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; 11550 } else { 11551 if (memArg) 11552 Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; 11553 else 11554 Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; 11555 } 11556 11557 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); 11558 for (unsigned i = 0; i < numArgs; ++i) { 11559 MachineOperand &Op = MI->getOperand(i+1); 11560 if (!(Op.isReg() && Op.isImplicit())) 11561 MIB.addOperand(Op); 11562 } 11563 BuildMI(*BB, MI, dl, 11564 TII->get(Subtarget->hasAVX() ? X86::VMOVAPSrr : X86::MOVAPSrr), 11565 MI->getOperand(0).getReg()) 11566 .addReg(X86::XMM0); 11567 11568 MI->eraseFromParent(); 11569 return BB; 11570} 11571 11572MachineBasicBlock * 11573X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { 11574 DebugLoc dl = MI->getDebugLoc(); 11575 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11576 11577 // Address into RAX/EAX, other two args into ECX, EDX. 11578 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 11579 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; 11580 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); 11581 for (int i = 0; i < X86::AddrNumOperands; ++i) 11582 MIB.addOperand(MI->getOperand(i)); 11583 11584 unsigned ValOps = X86::AddrNumOperands; 11585 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 11586 .addReg(MI->getOperand(ValOps).getReg()); 11587 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) 11588 .addReg(MI->getOperand(ValOps+1).getReg()); 11589 11590 // The instruction doesn't actually take any operands though. 11591 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); 11592 11593 MI->eraseFromParent(); // The pseudo is gone now. 11594 return BB; 11595} 11596 11597MachineBasicBlock * 11598X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const { 11599 DebugLoc dl = MI->getDebugLoc(); 11600 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11601 11602 // First arg in ECX, the second in EAX. 11603 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) 11604 .addReg(MI->getOperand(0).getReg()); 11605 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX) 11606 .addReg(MI->getOperand(1).getReg()); 11607 11608 // The instruction doesn't actually take any operands though. 11609 BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr)); 11610 11611 MI->eraseFromParent(); // The pseudo is gone now. 11612 return BB; 11613} 11614 11615MachineBasicBlock * 11616X86TargetLowering::EmitVAARG64WithCustomInserter( 11617 MachineInstr *MI, 11618 MachineBasicBlock *MBB) const { 11619 // Emit va_arg instruction on X86-64. 11620 11621 // Operands to this pseudo-instruction: 11622 // 0 ) Output : destination address (reg) 11623 // 1-5) Input : va_list address (addr, i64mem) 11624 // 6 ) ArgSize : Size (in bytes) of vararg type 11625 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset 11626 // 8 ) Align : Alignment of type 11627 // 9 ) EFLAGS (implicit-def) 11628 11629 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!"); 11630 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands"); 11631 11632 unsigned DestReg = MI->getOperand(0).getReg(); 11633 MachineOperand &Base = MI->getOperand(1); 11634 MachineOperand &Scale = MI->getOperand(2); 11635 MachineOperand &Index = MI->getOperand(3); 11636 MachineOperand &Disp = MI->getOperand(4); 11637 MachineOperand &Segment = MI->getOperand(5); 11638 unsigned ArgSize = MI->getOperand(6).getImm(); 11639 unsigned ArgMode = MI->getOperand(7).getImm(); 11640 unsigned Align = MI->getOperand(8).getImm(); 11641 11642 // Memory Reference 11643 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); 11644 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 11645 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 11646 11647 // Machine Information 11648 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11649 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 11650 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); 11651 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); 11652 DebugLoc DL = MI->getDebugLoc(); 11653 11654 // struct va_list { 11655 // i32 gp_offset 11656 // i32 fp_offset 11657 // i64 overflow_area (address) 11658 // i64 reg_save_area (address) 11659 // } 11660 // sizeof(va_list) = 24 11661 // alignment(va_list) = 8 11662 11663 unsigned TotalNumIntRegs = 6; 11664 unsigned TotalNumXMMRegs = 8; 11665 bool UseGPOffset = (ArgMode == 1); 11666 bool UseFPOffset = (ArgMode == 2); 11667 unsigned MaxOffset = TotalNumIntRegs * 8 + 11668 (UseFPOffset ? TotalNumXMMRegs * 16 : 0); 11669 11670 /* Align ArgSize to a multiple of 8 */ 11671 unsigned ArgSizeA8 = (ArgSize + 7) & ~7; 11672 bool NeedsAlign = (Align > 8); 11673 11674 MachineBasicBlock *thisMBB = MBB; 11675 MachineBasicBlock *overflowMBB; 11676 MachineBasicBlock *offsetMBB; 11677 MachineBasicBlock *endMBB; 11678 11679 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB 11680 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB 11681 unsigned OffsetReg = 0; 11682 11683 if (!UseGPOffset && !UseFPOffset) { 11684 // If we only pull from the overflow region, we don't create a branch. 11685 // We don't need to alter control flow. 11686 OffsetDestReg = 0; // unused 11687 OverflowDestReg = DestReg; 11688 11689 offsetMBB = NULL; 11690 overflowMBB = thisMBB; 11691 endMBB = thisMBB; 11692 } else { 11693 // First emit code to check if gp_offset (or fp_offset) is below the bound. 11694 // If so, pull the argument from reg_save_area. (branch to offsetMBB) 11695 // If not, pull from overflow_area. (branch to overflowMBB) 11696 // 11697 // thisMBB 11698 // | . 11699 // | . 11700 // offsetMBB overflowMBB 11701 // | . 11702 // | . 11703 // endMBB 11704 11705 // Registers for the PHI in endMBB 11706 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); 11707 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); 11708 11709 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11710 MachineFunction *MF = MBB->getParent(); 11711 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11712 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11713 endMBB = MF->CreateMachineBasicBlock(LLVM_BB); 11714 11715 MachineFunction::iterator MBBIter = MBB; 11716 ++MBBIter; 11717 11718 // Insert the new basic blocks 11719 MF->insert(MBBIter, offsetMBB); 11720 MF->insert(MBBIter, overflowMBB); 11721 MF->insert(MBBIter, endMBB); 11722 11723 // Transfer the remainder of MBB and its successor edges to endMBB. 11724 endMBB->splice(endMBB->begin(), thisMBB, 11725 llvm::next(MachineBasicBlock::iterator(MI)), 11726 thisMBB->end()); 11727 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); 11728 11729 // Make offsetMBB and overflowMBB successors of thisMBB 11730 thisMBB->addSuccessor(offsetMBB); 11731 thisMBB->addSuccessor(overflowMBB); 11732 11733 // endMBB is a successor of both offsetMBB and overflowMBB 11734 offsetMBB->addSuccessor(endMBB); 11735 overflowMBB->addSuccessor(endMBB); 11736 11737 // Load the offset value into a register 11738 OffsetReg = MRI.createVirtualRegister(OffsetRegClass); 11739 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) 11740 .addOperand(Base) 11741 .addOperand(Scale) 11742 .addOperand(Index) 11743 .addDisp(Disp, UseFPOffset ? 4 : 0) 11744 .addOperand(Segment) 11745 .setMemRefs(MMOBegin, MMOEnd); 11746 11747 // Check if there is enough room left to pull this argument. 11748 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) 11749 .addReg(OffsetReg) 11750 .addImm(MaxOffset + 8 - ArgSizeA8); 11751 11752 // Branch to "overflowMBB" if offset >= max 11753 // Fall through to "offsetMBB" otherwise 11754 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) 11755 .addMBB(overflowMBB); 11756 } 11757 11758 // In offsetMBB, emit code to use the reg_save_area. 11759 if (offsetMBB) { 11760 assert(OffsetReg != 0); 11761 11762 // Read the reg_save_area address. 11763 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); 11764 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) 11765 .addOperand(Base) 11766 .addOperand(Scale) 11767 .addOperand(Index) 11768 .addDisp(Disp, 16) 11769 .addOperand(Segment) 11770 .setMemRefs(MMOBegin, MMOEnd); 11771 11772 // Zero-extend the offset 11773 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); 11774 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) 11775 .addImm(0) 11776 .addReg(OffsetReg) 11777 .addImm(X86::sub_32bit); 11778 11779 // Add the offset to the reg_save_area to get the final address. 11780 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) 11781 .addReg(OffsetReg64) 11782 .addReg(RegSaveReg); 11783 11784 // Compute the offset for the next argument 11785 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); 11786 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) 11787 .addReg(OffsetReg) 11788 .addImm(UseFPOffset ? 16 : 8); 11789 11790 // Store it back into the va_list. 11791 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) 11792 .addOperand(Base) 11793 .addOperand(Scale) 11794 .addOperand(Index) 11795 .addDisp(Disp, UseFPOffset ? 4 : 0) 11796 .addOperand(Segment) 11797 .addReg(NextOffsetReg) 11798 .setMemRefs(MMOBegin, MMOEnd); 11799 11800 // Jump to endMBB 11801 BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) 11802 .addMBB(endMBB); 11803 } 11804 11805 // 11806 // Emit code to use overflow area 11807 // 11808 11809 // Load the overflow_area address into a register. 11810 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); 11811 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) 11812 .addOperand(Base) 11813 .addOperand(Scale) 11814 .addOperand(Index) 11815 .addDisp(Disp, 8) 11816 .addOperand(Segment) 11817 .setMemRefs(MMOBegin, MMOEnd); 11818 11819 // If we need to align it, do so. Otherwise, just copy the address 11820 // to OverflowDestReg. 11821 if (NeedsAlign) { 11822 // Align the overflow address 11823 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2"); 11824 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); 11825 11826 // aligned_addr = (addr + (align-1)) & ~(align-1) 11827 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) 11828 .addReg(OverflowAddrReg) 11829 .addImm(Align-1); 11830 11831 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) 11832 .addReg(TmpReg) 11833 .addImm(~(uint64_t)(Align-1)); 11834 } else { 11835 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) 11836 .addReg(OverflowAddrReg); 11837 } 11838 11839 // Compute the next overflow address after this argument. 11840 // (the overflow address should be kept 8-byte aligned) 11841 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); 11842 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) 11843 .addReg(OverflowDestReg) 11844 .addImm(ArgSizeA8); 11845 11846 // Store the new overflow address. 11847 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) 11848 .addOperand(Base) 11849 .addOperand(Scale) 11850 .addOperand(Index) 11851 .addDisp(Disp, 8) 11852 .addOperand(Segment) 11853 .addReg(NextAddrReg) 11854 .setMemRefs(MMOBegin, MMOEnd); 11855 11856 // If we branched, emit the PHI to the front of endMBB. 11857 if (offsetMBB) { 11858 BuildMI(*endMBB, endMBB->begin(), DL, 11859 TII->get(X86::PHI), DestReg) 11860 .addReg(OffsetDestReg).addMBB(offsetMBB) 11861 .addReg(OverflowDestReg).addMBB(overflowMBB); 11862 } 11863 11864 // Erase the pseudo instruction 11865 MI->eraseFromParent(); 11866 11867 return endMBB; 11868} 11869 11870MachineBasicBlock * 11871X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( 11872 MachineInstr *MI, 11873 MachineBasicBlock *MBB) const { 11874 // Emit code to save XMM registers to the stack. The ABI says that the 11875 // number of registers to save is given in %al, so it's theoretically 11876 // possible to do an indirect jump trick to avoid saving all of them, 11877 // however this code takes a simpler approach and just executes all 11878 // of the stores if %al is non-zero. It's less code, and it's probably 11879 // easier on the hardware branch predictor, and stores aren't all that 11880 // expensive anyway. 11881 11882 // Create the new basic blocks. One block contains all the XMM stores, 11883 // and one block is the final destination regardless of whether any 11884 // stores were performed. 11885 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 11886 MachineFunction *F = MBB->getParent(); 11887 MachineFunction::iterator MBBIter = MBB; 11888 ++MBBIter; 11889 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); 11890 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); 11891 F->insert(MBBIter, XMMSaveMBB); 11892 F->insert(MBBIter, EndMBB); 11893 11894 // Transfer the remainder of MBB and its successor edges to EndMBB. 11895 EndMBB->splice(EndMBB->begin(), MBB, 11896 llvm::next(MachineBasicBlock::iterator(MI)), 11897 MBB->end()); 11898 EndMBB->transferSuccessorsAndUpdatePHIs(MBB); 11899 11900 // The original block will now fall through to the XMM save block. 11901 MBB->addSuccessor(XMMSaveMBB); 11902 // The XMMSaveMBB will fall through to the end block. 11903 XMMSaveMBB->addSuccessor(EndMBB); 11904 11905 // Now add the instructions. 11906 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11907 DebugLoc DL = MI->getDebugLoc(); 11908 11909 unsigned CountReg = MI->getOperand(0).getReg(); 11910 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm(); 11911 int64_t VarArgsFPOffset = MI->getOperand(2).getImm(); 11912 11913 if (!Subtarget->isTargetWin64()) { 11914 // If %al is 0, branch around the XMM save block. 11915 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); 11916 BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); 11917 MBB->addSuccessor(EndMBB); 11918 } 11919 11920 unsigned MOVOpc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; 11921 // In the XMM save block, save all the XMM argument registers. 11922 for (int i = 3, e = MI->getNumOperands(); i != e; ++i) { 11923 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; 11924 MachineMemOperand *MMO = 11925 F->getMachineMemOperand( 11926 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset), 11927 MachineMemOperand::MOStore, 11928 /*Size=*/16, /*Align=*/16); 11929 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) 11930 .addFrameIndex(RegSaveFrameIndex) 11931 .addImm(/*Scale=*/1) 11932 .addReg(/*IndexReg=*/0) 11933 .addImm(/*Disp=*/Offset) 11934 .addReg(/*Segment=*/0) 11935 .addReg(MI->getOperand(i).getReg()) 11936 .addMemOperand(MMO); 11937 } 11938 11939 MI->eraseFromParent(); // The pseudo instruction is gone now. 11940 11941 return EndMBB; 11942} 11943 11944MachineBasicBlock * 11945X86TargetLowering::EmitLoweredSelect(MachineInstr *MI, 11946 MachineBasicBlock *BB) const { 11947 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 11948 DebugLoc DL = MI->getDebugLoc(); 11949 11950 // To "insert" a SELECT_CC instruction, we actually have to insert the 11951 // diamond control-flow pattern. The incoming instruction knows the 11952 // destination vreg to set, the condition code register to branch on, the 11953 // true/false values to select between, and a branch opcode to use. 11954 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11955 MachineFunction::iterator It = BB; 11956 ++It; 11957 11958 // thisMBB: 11959 // ... 11960 // TrueVal = ... 11961 // cmpTY ccX, r1, r2 11962 // bCC copy1MBB 11963 // fallthrough --> copy0MBB 11964 MachineBasicBlock *thisMBB = BB; 11965 MachineFunction *F = BB->getParent(); 11966 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11967 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11968 F->insert(It, copy0MBB); 11969 F->insert(It, sinkMBB); 11970 11971 // If the EFLAGS register isn't dead in the terminator, then claim that it's 11972 // live into the sink and copy blocks. 11973 if (!MI->killsRegister(X86::EFLAGS)) { 11974 copy0MBB->addLiveIn(X86::EFLAGS); 11975 sinkMBB->addLiveIn(X86::EFLAGS); 11976 } 11977 11978 // Transfer the remainder of BB and its successor edges to sinkMBB. 11979 sinkMBB->splice(sinkMBB->begin(), BB, 11980 llvm::next(MachineBasicBlock::iterator(MI)), 11981 BB->end()); 11982 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11983 11984 // Add the true and fallthrough blocks as its successors. 11985 BB->addSuccessor(copy0MBB); 11986 BB->addSuccessor(sinkMBB); 11987 11988 // Create the conditional branch instruction. 11989 unsigned Opc = 11990 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm()); 11991 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB); 11992 11993 // copy0MBB: 11994 // %FalseValue = ... 11995 // # fallthrough to sinkMBB 11996 copy0MBB->addSuccessor(sinkMBB); 11997 11998 // sinkMBB: 11999 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 12000 // ... 12001 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 12002 TII->get(X86::PHI), MI->getOperand(0).getReg()) 12003 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 12004 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 12005 12006 MI->eraseFromParent(); // The pseudo instruction is gone now. 12007 return sinkMBB; 12008} 12009 12010MachineBasicBlock * 12011X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB, 12012 bool Is64Bit) const { 12013 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12014 DebugLoc DL = MI->getDebugLoc(); 12015 MachineFunction *MF = BB->getParent(); 12016 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 12017 12018 assert(getTargetMachine().Options.EnableSegmentedStacks); 12019 12020 unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; 12021 unsigned TlsOffset = Is64Bit ? 0x70 : 0x30; 12022 12023 // BB: 12024 // ... [Till the alloca] 12025 // If stacklet is not large enough, jump to mallocMBB 12026 // 12027 // bumpMBB: 12028 // Allocate by subtracting from RSP 12029 // Jump to continueMBB 12030 // 12031 // mallocMBB: 12032 // Allocate by call to runtime 12033 // 12034 // continueMBB: 12035 // ... 12036 // [rest of original BB] 12037 // 12038 12039 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12040 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12041 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 12042 12043 MachineRegisterInfo &MRI = MF->getRegInfo(); 12044 const TargetRegisterClass *AddrRegClass = 12045 getRegClassFor(Is64Bit ? MVT::i64:MVT::i32); 12046 12047 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), 12048 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), 12049 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), 12050 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), 12051 sizeVReg = MI->getOperand(1).getReg(), 12052 physSPReg = Is64Bit ? X86::RSP : X86::ESP; 12053 12054 MachineFunction::iterator MBBIter = BB; 12055 ++MBBIter; 12056 12057 MF->insert(MBBIter, bumpMBB); 12058 MF->insert(MBBIter, mallocMBB); 12059 MF->insert(MBBIter, continueMBB); 12060 12061 continueMBB->splice(continueMBB->begin(), BB, llvm::next 12062 (MachineBasicBlock::iterator(MI)), BB->end()); 12063 continueMBB->transferSuccessorsAndUpdatePHIs(BB); 12064 12065 // Add code to the main basic block to check if the stack limit has been hit, 12066 // and if so, jump to mallocMBB otherwise to bumpMBB. 12067 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); 12068 BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) 12069 .addReg(tmpSPVReg).addReg(sizeVReg); 12070 BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr)) 12071 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg) 12072 .addReg(SPLimitVReg); 12073 BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); 12074 12075 // bumpMBB simply decreases the stack pointer, since we know the current 12076 // stacklet has enough space. 12077 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) 12078 .addReg(SPLimitVReg); 12079 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) 12080 .addReg(SPLimitVReg); 12081 BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 12082 12083 // Calls into a routine in libgcc to allocate more space from the heap. 12084 if (Is64Bit) { 12085 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) 12086 .addReg(sizeVReg); 12087 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) 12088 .addExternalSymbol("__morestack_allocate_stack_space").addReg(X86::RDI); 12089 } else { 12090 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) 12091 .addImm(12); 12092 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); 12093 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) 12094 .addExternalSymbol("__morestack_allocate_stack_space"); 12095 } 12096 12097 if (!Is64Bit) 12098 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) 12099 .addImm(16); 12100 12101 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) 12102 .addReg(Is64Bit ? X86::RAX : X86::EAX); 12103 BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); 12104 12105 // Set up the CFG correctly. 12106 BB->addSuccessor(bumpMBB); 12107 BB->addSuccessor(mallocMBB); 12108 mallocMBB->addSuccessor(continueMBB); 12109 bumpMBB->addSuccessor(continueMBB); 12110 12111 // Take care of the PHI nodes. 12112 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), 12113 MI->getOperand(0).getReg()) 12114 .addReg(mallocPtrVReg).addMBB(mallocMBB) 12115 .addReg(bumpSPPtrVReg).addMBB(bumpMBB); 12116 12117 // Delete the original pseudo instruction. 12118 MI->eraseFromParent(); 12119 12120 // And we're done. 12121 return continueMBB; 12122} 12123 12124MachineBasicBlock * 12125X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI, 12126 MachineBasicBlock *BB) const { 12127 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12128 DebugLoc DL = MI->getDebugLoc(); 12129 12130 assert(!Subtarget->isTargetEnvMacho()); 12131 12132 // The lowering is pretty easy: we're just emitting the call to _alloca. The 12133 // non-trivial part is impdef of ESP. 12134 12135 if (Subtarget->isTargetWin64()) { 12136 if (Subtarget->isTargetCygMing()) { 12137 // ___chkstk(Mingw64): 12138 // Clobbers R10, R11, RAX and EFLAGS. 12139 // Updates RSP. 12140 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 12141 .addExternalSymbol("___chkstk") 12142 .addReg(X86::RAX, RegState::Implicit) 12143 .addReg(X86::RSP, RegState::Implicit) 12144 .addReg(X86::RAX, RegState::Define | RegState::Implicit) 12145 .addReg(X86::RSP, RegState::Define | RegState::Implicit) 12146 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12147 } else { 12148 // __chkstk(MSVCRT): does not update stack pointer. 12149 // Clobbers R10, R11 and EFLAGS. 12150 // FIXME: RAX(allocated size) might be reused and not killed. 12151 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA)) 12152 .addExternalSymbol("__chkstk") 12153 .addReg(X86::RAX, RegState::Implicit) 12154 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12155 // RAX has the offset to subtracted from RSP. 12156 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP) 12157 .addReg(X86::RSP) 12158 .addReg(X86::RAX); 12159 } 12160 } else { 12161 const char *StackProbeSymbol = 12162 Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; 12163 12164 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32)) 12165 .addExternalSymbol(StackProbeSymbol) 12166 .addReg(X86::EAX, RegState::Implicit) 12167 .addReg(X86::ESP, RegState::Implicit) 12168 .addReg(X86::EAX, RegState::Define | RegState::Implicit) 12169 .addReg(X86::ESP, RegState::Define | RegState::Implicit) 12170 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); 12171 } 12172 12173 MI->eraseFromParent(); // The pseudo instruction is gone now. 12174 return BB; 12175} 12176 12177MachineBasicBlock * 12178X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, 12179 MachineBasicBlock *BB) const { 12180 // This is pretty easy. We're taking the value that we received from 12181 // our load from the relocation, sticking it in either RDI (x86-64) 12182 // or EAX and doing an indirect call. The return value will then 12183 // be in the normal return register. 12184 const X86InstrInfo *TII 12185 = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); 12186 DebugLoc DL = MI->getDebugLoc(); 12187 MachineFunction *F = BB->getParent(); 12188 12189 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); 12190 assert(MI->getOperand(3).isGlobal() && "This should be a global"); 12191 12192 if (Subtarget->is64Bit()) { 12193 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12194 TII->get(X86::MOV64rm), X86::RDI) 12195 .addReg(X86::RIP) 12196 .addImm(0).addReg(0) 12197 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12198 MI->getOperand(3).getTargetFlags()) 12199 .addReg(0); 12200 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); 12201 addDirectMem(MIB, X86::RDI); 12202 } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { 12203 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12204 TII->get(X86::MOV32rm), X86::EAX) 12205 .addReg(0) 12206 .addImm(0).addReg(0) 12207 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12208 MI->getOperand(3).getTargetFlags()) 12209 .addReg(0); 12210 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 12211 addDirectMem(MIB, X86::EAX); 12212 } else { 12213 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, 12214 TII->get(X86::MOV32rm), X86::EAX) 12215 .addReg(TII->getGlobalBaseReg(F)) 12216 .addImm(0).addReg(0) 12217 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, 12218 MI->getOperand(3).getTargetFlags()) 12219 .addReg(0); 12220 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); 12221 addDirectMem(MIB, X86::EAX); 12222 } 12223 12224 MI->eraseFromParent(); // The pseudo instruction is gone now. 12225 return BB; 12226} 12227 12228MachineBasicBlock * 12229X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 12230 MachineBasicBlock *BB) const { 12231 switch (MI->getOpcode()) { 12232 default: assert(0 && "Unexpected instr type to insert"); 12233 case X86::TAILJMPd64: 12234 case X86::TAILJMPr64: 12235 case X86::TAILJMPm64: 12236 assert(0 && "TAILJMP64 would not be touched here."); 12237 case X86::TCRETURNdi64: 12238 case X86::TCRETURNri64: 12239 case X86::TCRETURNmi64: 12240 // Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset. 12241 // On AMD64, additional defs should be added before register allocation. 12242 if (!Subtarget->isTargetWin64()) { 12243 MI->addRegisterDefined(X86::RSI); 12244 MI->addRegisterDefined(X86::RDI); 12245 MI->addRegisterDefined(X86::XMM6); 12246 MI->addRegisterDefined(X86::XMM7); 12247 MI->addRegisterDefined(X86::XMM8); 12248 MI->addRegisterDefined(X86::XMM9); 12249 MI->addRegisterDefined(X86::XMM10); 12250 MI->addRegisterDefined(X86::XMM11); 12251 MI->addRegisterDefined(X86::XMM12); 12252 MI->addRegisterDefined(X86::XMM13); 12253 MI->addRegisterDefined(X86::XMM14); 12254 MI->addRegisterDefined(X86::XMM15); 12255 } 12256 return BB; 12257 case X86::WIN_ALLOCA: 12258 return EmitLoweredWinAlloca(MI, BB); 12259 case X86::SEG_ALLOCA_32: 12260 return EmitLoweredSegAlloca(MI, BB, false); 12261 case X86::SEG_ALLOCA_64: 12262 return EmitLoweredSegAlloca(MI, BB, true); 12263 case X86::TLSCall_32: 12264 case X86::TLSCall_64: 12265 return EmitLoweredTLSCall(MI, BB); 12266 case X86::CMOV_GR8: 12267 case X86::CMOV_FR32: 12268 case X86::CMOV_FR64: 12269 case X86::CMOV_V4F32: 12270 case X86::CMOV_V2F64: 12271 case X86::CMOV_V2I64: 12272 case X86::CMOV_V8F32: 12273 case X86::CMOV_V4F64: 12274 case X86::CMOV_V4I64: 12275 case X86::CMOV_GR16: 12276 case X86::CMOV_GR32: 12277 case X86::CMOV_RFP32: 12278 case X86::CMOV_RFP64: 12279 case X86::CMOV_RFP80: 12280 return EmitLoweredSelect(MI, BB); 12281 12282 case X86::FP32_TO_INT16_IN_MEM: 12283 case X86::FP32_TO_INT32_IN_MEM: 12284 case X86::FP32_TO_INT64_IN_MEM: 12285 case X86::FP64_TO_INT16_IN_MEM: 12286 case X86::FP64_TO_INT32_IN_MEM: 12287 case X86::FP64_TO_INT64_IN_MEM: 12288 case X86::FP80_TO_INT16_IN_MEM: 12289 case X86::FP80_TO_INT32_IN_MEM: 12290 case X86::FP80_TO_INT64_IN_MEM: { 12291 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 12292 DebugLoc DL = MI->getDebugLoc(); 12293 12294 // Change the floating point control register to use "round towards zero" 12295 // mode when truncating to an integer value. 12296 MachineFunction *F = BB->getParent(); 12297 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false); 12298 addFrameReference(BuildMI(*BB, MI, DL, 12299 TII->get(X86::FNSTCW16m)), CWFrameIdx); 12300 12301 // Load the old value of the high byte of the control word... 12302 unsigned OldCW = 12303 F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass); 12304 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), 12305 CWFrameIdx); 12306 12307 // Set the high part to be round to zero... 12308 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) 12309 .addImm(0xC7F); 12310 12311 // Reload the modified control word now... 12312 addFrameReference(BuildMI(*BB, MI, DL, 12313 TII->get(X86::FLDCW16m)), CWFrameIdx); 12314 12315 // Restore the memory image of control word to original value 12316 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) 12317 .addReg(OldCW); 12318 12319 // Get the X86 opcode to use. 12320 unsigned Opc; 12321 switch (MI->getOpcode()) { 12322 default: llvm_unreachable("illegal opcode!"); 12323 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; 12324 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; 12325 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; 12326 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; 12327 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; 12328 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; 12329 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; 12330 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; 12331 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; 12332 } 12333 12334 X86AddressMode AM; 12335 MachineOperand &Op = MI->getOperand(0); 12336 if (Op.isReg()) { 12337 AM.BaseType = X86AddressMode::RegBase; 12338 AM.Base.Reg = Op.getReg(); 12339 } else { 12340 AM.BaseType = X86AddressMode::FrameIndexBase; 12341 AM.Base.FrameIndex = Op.getIndex(); 12342 } 12343 Op = MI->getOperand(1); 12344 if (Op.isImm()) 12345 AM.Scale = Op.getImm(); 12346 Op = MI->getOperand(2); 12347 if (Op.isImm()) 12348 AM.IndexReg = Op.getImm(); 12349 Op = MI->getOperand(3); 12350 if (Op.isGlobal()) { 12351 AM.GV = Op.getGlobal(); 12352 } else { 12353 AM.Disp = Op.getImm(); 12354 } 12355 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) 12356 .addReg(MI->getOperand(X86::AddrNumOperands).getReg()); 12357 12358 // Reload the original control word now. 12359 addFrameReference(BuildMI(*BB, MI, DL, 12360 TII->get(X86::FLDCW16m)), CWFrameIdx); 12361 12362 MI->eraseFromParent(); // The pseudo instruction is gone now. 12363 return BB; 12364 } 12365 // String/text processing lowering. 12366 case X86::PCMPISTRM128REG: 12367 case X86::VPCMPISTRM128REG: 12368 return EmitPCMP(MI, BB, 3, false /* in-mem */); 12369 case X86::PCMPISTRM128MEM: 12370 case X86::VPCMPISTRM128MEM: 12371 return EmitPCMP(MI, BB, 3, true /* in-mem */); 12372 case X86::PCMPESTRM128REG: 12373 case X86::VPCMPESTRM128REG: 12374 return EmitPCMP(MI, BB, 5, false /* in mem */); 12375 case X86::PCMPESTRM128MEM: 12376 case X86::VPCMPESTRM128MEM: 12377 return EmitPCMP(MI, BB, 5, true /* in mem */); 12378 12379 // Thread synchronization. 12380 case X86::MONITOR: 12381 return EmitMonitor(MI, BB); 12382 case X86::MWAIT: 12383 return EmitMwait(MI, BB); 12384 12385 // Atomic Lowering. 12386 case X86::ATOMAND32: 12387 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 12388 X86::AND32ri, X86::MOV32rm, 12389 X86::LCMPXCHG32, 12390 X86::NOT32r, X86::EAX, 12391 X86::GR32RegisterClass); 12392 case X86::ATOMOR32: 12393 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 12394 X86::OR32ri, X86::MOV32rm, 12395 X86::LCMPXCHG32, 12396 X86::NOT32r, X86::EAX, 12397 X86::GR32RegisterClass); 12398 case X86::ATOMXOR32: 12399 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, 12400 X86::XOR32ri, X86::MOV32rm, 12401 X86::LCMPXCHG32, 12402 X86::NOT32r, X86::EAX, 12403 X86::GR32RegisterClass); 12404 case X86::ATOMNAND32: 12405 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, 12406 X86::AND32ri, X86::MOV32rm, 12407 X86::LCMPXCHG32, 12408 X86::NOT32r, X86::EAX, 12409 X86::GR32RegisterClass, true); 12410 case X86::ATOMMIN32: 12411 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); 12412 case X86::ATOMMAX32: 12413 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); 12414 case X86::ATOMUMIN32: 12415 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); 12416 case X86::ATOMUMAX32: 12417 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); 12418 12419 case X86::ATOMAND16: 12420 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 12421 X86::AND16ri, X86::MOV16rm, 12422 X86::LCMPXCHG16, 12423 X86::NOT16r, X86::AX, 12424 X86::GR16RegisterClass); 12425 case X86::ATOMOR16: 12426 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 12427 X86::OR16ri, X86::MOV16rm, 12428 X86::LCMPXCHG16, 12429 X86::NOT16r, X86::AX, 12430 X86::GR16RegisterClass); 12431 case X86::ATOMXOR16: 12432 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, 12433 X86::XOR16ri, X86::MOV16rm, 12434 X86::LCMPXCHG16, 12435 X86::NOT16r, X86::AX, 12436 X86::GR16RegisterClass); 12437 case X86::ATOMNAND16: 12438 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, 12439 X86::AND16ri, X86::MOV16rm, 12440 X86::LCMPXCHG16, 12441 X86::NOT16r, X86::AX, 12442 X86::GR16RegisterClass, true); 12443 case X86::ATOMMIN16: 12444 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); 12445 case X86::ATOMMAX16: 12446 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); 12447 case X86::ATOMUMIN16: 12448 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); 12449 case X86::ATOMUMAX16: 12450 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); 12451 12452 case X86::ATOMAND8: 12453 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 12454 X86::AND8ri, X86::MOV8rm, 12455 X86::LCMPXCHG8, 12456 X86::NOT8r, X86::AL, 12457 X86::GR8RegisterClass); 12458 case X86::ATOMOR8: 12459 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 12460 X86::OR8ri, X86::MOV8rm, 12461 X86::LCMPXCHG8, 12462 X86::NOT8r, X86::AL, 12463 X86::GR8RegisterClass); 12464 case X86::ATOMXOR8: 12465 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, 12466 X86::XOR8ri, X86::MOV8rm, 12467 X86::LCMPXCHG8, 12468 X86::NOT8r, X86::AL, 12469 X86::GR8RegisterClass); 12470 case X86::ATOMNAND8: 12471 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, 12472 X86::AND8ri, X86::MOV8rm, 12473 X86::LCMPXCHG8, 12474 X86::NOT8r, X86::AL, 12475 X86::GR8RegisterClass, true); 12476 // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. 12477 // This group is for 64-bit host. 12478 case X86::ATOMAND64: 12479 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 12480 X86::AND64ri32, X86::MOV64rm, 12481 X86::LCMPXCHG64, 12482 X86::NOT64r, X86::RAX, 12483 X86::GR64RegisterClass); 12484 case X86::ATOMOR64: 12485 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 12486 X86::OR64ri32, X86::MOV64rm, 12487 X86::LCMPXCHG64, 12488 X86::NOT64r, X86::RAX, 12489 X86::GR64RegisterClass); 12490 case X86::ATOMXOR64: 12491 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, 12492 X86::XOR64ri32, X86::MOV64rm, 12493 X86::LCMPXCHG64, 12494 X86::NOT64r, X86::RAX, 12495 X86::GR64RegisterClass); 12496 case X86::ATOMNAND64: 12497 return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, 12498 X86::AND64ri32, X86::MOV64rm, 12499 X86::LCMPXCHG64, 12500 X86::NOT64r, X86::RAX, 12501 X86::GR64RegisterClass, true); 12502 case X86::ATOMMIN64: 12503 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); 12504 case X86::ATOMMAX64: 12505 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); 12506 case X86::ATOMUMIN64: 12507 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); 12508 case X86::ATOMUMAX64: 12509 return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); 12510 12511 // This group does 64-bit operations on a 32-bit host. 12512 case X86::ATOMAND6432: 12513 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12514 X86::AND32rr, X86::AND32rr, 12515 X86::AND32ri, X86::AND32ri, 12516 false); 12517 case X86::ATOMOR6432: 12518 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12519 X86::OR32rr, X86::OR32rr, 12520 X86::OR32ri, X86::OR32ri, 12521 false); 12522 case X86::ATOMXOR6432: 12523 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12524 X86::XOR32rr, X86::XOR32rr, 12525 X86::XOR32ri, X86::XOR32ri, 12526 false); 12527 case X86::ATOMNAND6432: 12528 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12529 X86::AND32rr, X86::AND32rr, 12530 X86::AND32ri, X86::AND32ri, 12531 true); 12532 case X86::ATOMADD6432: 12533 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12534 X86::ADD32rr, X86::ADC32rr, 12535 X86::ADD32ri, X86::ADC32ri, 12536 false); 12537 case X86::ATOMSUB6432: 12538 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12539 X86::SUB32rr, X86::SBB32rr, 12540 X86::SUB32ri, X86::SBB32ri, 12541 false); 12542 case X86::ATOMSWAP6432: 12543 return EmitAtomicBit6432WithCustomInserter(MI, BB, 12544 X86::MOV32rr, X86::MOV32rr, 12545 X86::MOV32ri, X86::MOV32ri, 12546 false); 12547 case X86::VASTART_SAVE_XMM_REGS: 12548 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); 12549 12550 case X86::VAARG_64: 12551 return EmitVAARG64WithCustomInserter(MI, BB); 12552 } 12553} 12554 12555//===----------------------------------------------------------------------===// 12556// X86 Optimization Hooks 12557//===----------------------------------------------------------------------===// 12558 12559void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 12560 const APInt &Mask, 12561 APInt &KnownZero, 12562 APInt &KnownOne, 12563 const SelectionDAG &DAG, 12564 unsigned Depth) const { 12565 unsigned Opc = Op.getOpcode(); 12566 assert((Opc >= ISD::BUILTIN_OP_END || 12567 Opc == ISD::INTRINSIC_WO_CHAIN || 12568 Opc == ISD::INTRINSIC_W_CHAIN || 12569 Opc == ISD::INTRINSIC_VOID) && 12570 "Should use MaskedValueIsZero if you don't know whether Op" 12571 " is a target node!"); 12572 12573 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything. 12574 switch (Opc) { 12575 default: break; 12576 case X86ISD::ADD: 12577 case X86ISD::SUB: 12578 case X86ISD::ADC: 12579 case X86ISD::SBB: 12580 case X86ISD::SMUL: 12581 case X86ISD::UMUL: 12582 case X86ISD::INC: 12583 case X86ISD::DEC: 12584 case X86ISD::OR: 12585 case X86ISD::XOR: 12586 case X86ISD::AND: 12587 // These nodes' second result is a boolean. 12588 if (Op.getResNo() == 0) 12589 break; 12590 // Fallthrough 12591 case X86ISD::SETCC: 12592 KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(), 12593 Mask.getBitWidth() - 1); 12594 break; 12595 case ISD::INTRINSIC_WO_CHAIN: { 12596 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 12597 unsigned NumLoBits = 0; 12598 switch (IntId) { 12599 default: break; 12600 case Intrinsic::x86_sse_movmsk_ps: 12601 case Intrinsic::x86_avx_movmsk_ps_256: 12602 case Intrinsic::x86_sse2_movmsk_pd: 12603 case Intrinsic::x86_avx_movmsk_pd_256: 12604 case Intrinsic::x86_mmx_pmovmskb: 12605 case Intrinsic::x86_sse2_pmovmskb_128: 12606 case Intrinsic::x86_avx2_pmovmskb: { 12607 // High bits of movmskp{s|d}, pmovmskb are known zero. 12608 switch (IntId) { 12609 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break; 12610 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break; 12611 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break; 12612 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break; 12613 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break; 12614 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break; 12615 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break; 12616 } 12617 KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(), 12618 Mask.getBitWidth() - NumLoBits); 12619 break; 12620 } 12621 } 12622 break; 12623 } 12624 } 12625} 12626 12627unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 12628 unsigned Depth) const { 12629 // SETCC_CARRY sets the dest to ~0 for true or 0 for false. 12630 if (Op.getOpcode() == X86ISD::SETCC_CARRY) 12631 return Op.getValueType().getScalarType().getSizeInBits(); 12632 12633 // Fallback case. 12634 return 1; 12635} 12636 12637/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 12638/// node is a GlobalAddress + offset. 12639bool X86TargetLowering::isGAPlusOffset(SDNode *N, 12640 const GlobalValue* &GA, 12641 int64_t &Offset) const { 12642 if (N->getOpcode() == X86ISD::Wrapper) { 12643 if (isa<GlobalAddressSDNode>(N->getOperand(0))) { 12644 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal(); 12645 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset(); 12646 return true; 12647 } 12648 } 12649 return TargetLowering::isGAPlusOffset(N, GA, Offset); 12650} 12651 12652/// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the 12653/// same as extracting the high 128-bit part of 256-bit vector and then 12654/// inserting the result into the low part of a new 256-bit vector 12655static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) { 12656 EVT VT = SVOp->getValueType(0); 12657 int NumElems = VT.getVectorNumElements(); 12658 12659 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 12660 for (int i = 0, j = NumElems/2; i < NumElems/2; ++i, ++j) 12661 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 12662 SVOp->getMaskElt(j) >= 0) 12663 return false; 12664 12665 return true; 12666} 12667 12668/// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the 12669/// same as extracting the low 128-bit part of 256-bit vector and then 12670/// inserting the result into the high part of a new 256-bit vector 12671static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) { 12672 EVT VT = SVOp->getValueType(0); 12673 int NumElems = VT.getVectorNumElements(); 12674 12675 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 12676 for (int i = NumElems/2, j = 0; i < NumElems; ++i, ++j) 12677 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) || 12678 SVOp->getMaskElt(j) >= 0) 12679 return false; 12680 12681 return true; 12682} 12683 12684/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors. 12685static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG, 12686 TargetLowering::DAGCombinerInfo &DCI) { 12687 DebugLoc dl = N->getDebugLoc(); 12688 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 12689 SDValue V1 = SVOp->getOperand(0); 12690 SDValue V2 = SVOp->getOperand(1); 12691 EVT VT = SVOp->getValueType(0); 12692 int NumElems = VT.getVectorNumElements(); 12693 12694 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 12695 V2.getOpcode() == ISD::CONCAT_VECTORS) { 12696 // 12697 // 0,0,0,... 12698 // | 12699 // V UNDEF BUILD_VECTOR UNDEF 12700 // \ / \ / 12701 // CONCAT_VECTOR CONCAT_VECTOR 12702 // \ / 12703 // \ / 12704 // RESULT: V + zero extended 12705 // 12706 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR || 12707 V2.getOperand(1).getOpcode() != ISD::UNDEF || 12708 V1.getOperand(1).getOpcode() != ISD::UNDEF) 12709 return SDValue(); 12710 12711 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode())) 12712 return SDValue(); 12713 12714 // To match the shuffle mask, the first half of the mask should 12715 // be exactly the first vector, and all the rest a splat with the 12716 // first element of the second one. 12717 for (int i = 0; i < NumElems/2; ++i) 12718 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) || 12719 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems)) 12720 return SDValue(); 12721 12722 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD. 12723 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) { 12724 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other); 12725 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() }; 12726 SDValue ResNode = 12727 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2, 12728 Ld->getMemoryVT(), 12729 Ld->getPointerInfo(), 12730 Ld->getAlignment(), 12731 false/*isVolatile*/, true/*ReadMem*/, 12732 false/*WriteMem*/); 12733 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode); 12734 } 12735 12736 // Emit a zeroed vector and insert the desired subvector on its 12737 // first half. 12738 SDValue Zeros = getZeroVector(VT, true /* HasSSE2 */, DAG, dl); 12739 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 12740 DAG.getConstant(0, MVT::i32), DAG, dl); 12741 return DCI.CombineTo(N, InsV); 12742 } 12743 12744 //===--------------------------------------------------------------------===// 12745 // Combine some shuffles into subvector extracts and inserts: 12746 // 12747 12748 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> 12749 if (isShuffleHigh128VectorInsertLow(SVOp)) { 12750 SDValue V = Extract128BitVector(V1, DAG.getConstant(NumElems/2, MVT::i32), 12751 DAG, dl); 12752 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), 12753 V, DAG.getConstant(0, MVT::i32), DAG, dl); 12754 return DCI.CombineTo(N, InsV); 12755 } 12756 12757 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> 12758 if (isShuffleLow128VectorInsertHigh(SVOp)) { 12759 SDValue V = Extract128BitVector(V1, DAG.getConstant(0, MVT::i32), DAG, dl); 12760 SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), 12761 V, DAG.getConstant(NumElems/2, MVT::i32), DAG, dl); 12762 return DCI.CombineTo(N, InsV); 12763 } 12764 12765 return SDValue(); 12766} 12767 12768/// PerformShuffleCombine - Performs several different shuffle combines. 12769static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, 12770 TargetLowering::DAGCombinerInfo &DCI, 12771 const X86Subtarget *Subtarget) { 12772 DebugLoc dl = N->getDebugLoc(); 12773 EVT VT = N->getValueType(0); 12774 12775 // Don't create instructions with illegal types after legalize types has run. 12776 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12777 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType())) 12778 return SDValue(); 12779 12780 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode 12781 if (Subtarget->hasAVX() && VT.getSizeInBits() == 256 && 12782 N->getOpcode() == ISD::VECTOR_SHUFFLE) 12783 return PerformShuffleCombine256(N, DAG, DCI); 12784 12785 // Only handle 128 wide vector from here on. 12786 if (VT.getSizeInBits() != 128) 12787 return SDValue(); 12788 12789 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, 12790 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are 12791 // consecutive, non-overlapping, and in the right order. 12792 SmallVector<SDValue, 16> Elts; 12793 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) 12794 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); 12795 12796 return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); 12797} 12798 12799/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index 12800/// generation and convert it from being a bunch of shuffles and extracts 12801/// to a simple store and scalar loads to extract the elements. 12802static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, 12803 const TargetLowering &TLI) { 12804 SDValue InputVector = N->getOperand(0); 12805 12806 // Only operate on vectors of 4 elements, where the alternative shuffling 12807 // gets to be more expensive. 12808 if (InputVector.getValueType() != MVT::v4i32) 12809 return SDValue(); 12810 12811 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a 12812 // single use which is a sign-extend or zero-extend, and all elements are 12813 // used. 12814 SmallVector<SDNode *, 4> Uses; 12815 unsigned ExtractedElements = 0; 12816 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(), 12817 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) { 12818 if (UI.getUse().getResNo() != InputVector.getResNo()) 12819 return SDValue(); 12820 12821 SDNode *Extract = *UI; 12822 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12823 return SDValue(); 12824 12825 if (Extract->getValueType(0) != MVT::i32) 12826 return SDValue(); 12827 if (!Extract->hasOneUse()) 12828 return SDValue(); 12829 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND && 12830 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND) 12831 return SDValue(); 12832 if (!isa<ConstantSDNode>(Extract->getOperand(1))) 12833 return SDValue(); 12834 12835 // Record which element was extracted. 12836 ExtractedElements |= 12837 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue(); 12838 12839 Uses.push_back(Extract); 12840 } 12841 12842 // If not all the elements were used, this may not be worthwhile. 12843 if (ExtractedElements != 15) 12844 return SDValue(); 12845 12846 // Ok, we've now decided to do the transformation. 12847 DebugLoc dl = InputVector.getDebugLoc(); 12848 12849 // Store the value to a temporary stack slot. 12850 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); 12851 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, 12852 MachinePointerInfo(), false, false, 0); 12853 12854 // Replace each use (extract) with a load of the appropriate element. 12855 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), 12856 UE = Uses.end(); UI != UE; ++UI) { 12857 SDNode *Extract = *UI; 12858 12859 // cOMpute the element's address. 12860 SDValue Idx = Extract->getOperand(1); 12861 unsigned EltSize = 12862 InputVector.getValueType().getVectorElementType().getSizeInBits()/8; 12863 uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); 12864 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); 12865 12866 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), 12867 StackPtr, OffsetVal); 12868 12869 // Load the scalar. 12870 SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, 12871 ScalarAddr, MachinePointerInfo(), 12872 false, false, false, 0); 12873 12874 // Replace the exact with the load. 12875 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); 12876 } 12877 12878 // The replacement was made in place; don't return anything. 12879 return SDValue(); 12880} 12881 12882/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT 12883/// nodes. 12884static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, 12885 const X86Subtarget *Subtarget) { 12886 DebugLoc DL = N->getDebugLoc(); 12887 SDValue Cond = N->getOperand(0); 12888 // Get the LHS/RHS of the select. 12889 SDValue LHS = N->getOperand(1); 12890 SDValue RHS = N->getOperand(2); 12891 EVT VT = LHS.getValueType(); 12892 12893 // If we have SSE[12] support, try to form min/max nodes. SSE min/max 12894 // instructions match the semantics of the common C idiom x<y?x:y but not 12895 // x<=y?x:y, because of how they handle negative zero (which can be 12896 // ignored in unsafe-math mode). 12897 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && 12898 VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) && 12899 (Subtarget->hasSSE2() || 12900 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) { 12901 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 12902 12903 unsigned Opcode = 0; 12904 // Check for x CC y ? x : y. 12905 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && 12906 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 12907 switch (CC) { 12908 default: break; 12909 case ISD::SETULT: 12910 // Converting this to a min would handle NaNs incorrectly, and swapping 12911 // the operands would cause it to handle comparisons between positive 12912 // and negative zero incorrectly. 12913 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 12914 if (!DAG.getTarget().Options.UnsafeFPMath && 12915 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 12916 break; 12917 std::swap(LHS, RHS); 12918 } 12919 Opcode = X86ISD::FMIN; 12920 break; 12921 case ISD::SETOLE: 12922 // Converting this to a min would handle comparisons between positive 12923 // and negative zero incorrectly. 12924 if (!DAG.getTarget().Options.UnsafeFPMath && 12925 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 12926 break; 12927 Opcode = X86ISD::FMIN; 12928 break; 12929 case ISD::SETULE: 12930 // Converting this to a min would handle both negative zeros and NaNs 12931 // incorrectly, but we can swap the operands to fix both. 12932 std::swap(LHS, RHS); 12933 case ISD::SETOLT: 12934 case ISD::SETLT: 12935 case ISD::SETLE: 12936 Opcode = X86ISD::FMIN; 12937 break; 12938 12939 case ISD::SETOGE: 12940 // Converting this to a max would handle comparisons between positive 12941 // and negative zero incorrectly. 12942 if (!DAG.getTarget().Options.UnsafeFPMath && 12943 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) 12944 break; 12945 Opcode = X86ISD::FMAX; 12946 break; 12947 case ISD::SETUGT: 12948 // Converting this to a max would handle NaNs incorrectly, and swapping 12949 // the operands would cause it to handle comparisons between positive 12950 // and negative zero incorrectly. 12951 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { 12952 if (!DAG.getTarget().Options.UnsafeFPMath && 12953 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 12954 break; 12955 std::swap(LHS, RHS); 12956 } 12957 Opcode = X86ISD::FMAX; 12958 break; 12959 case ISD::SETUGE: 12960 // Converting this to a max would handle both negative zeros and NaNs 12961 // incorrectly, but we can swap the operands to fix both. 12962 std::swap(LHS, RHS); 12963 case ISD::SETOGT: 12964 case ISD::SETGT: 12965 case ISD::SETGE: 12966 Opcode = X86ISD::FMAX; 12967 break; 12968 } 12969 // Check for x CC y ? y : x -- a min/max with reversed arms. 12970 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && 12971 DAG.isEqualTo(RHS, Cond.getOperand(0))) { 12972 switch (CC) { 12973 default: break; 12974 case ISD::SETOGE: 12975 // Converting this to a min would handle comparisons between positive 12976 // and negative zero incorrectly, and swapping the operands would 12977 // cause it to handle NaNs incorrectly. 12978 if (!DAG.getTarget().Options.UnsafeFPMath && 12979 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { 12980 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 12981 break; 12982 std::swap(LHS, RHS); 12983 } 12984 Opcode = X86ISD::FMIN; 12985 break; 12986 case ISD::SETUGT: 12987 // Converting this to a min would handle NaNs incorrectly. 12988 if (!DAG.getTarget().Options.UnsafeFPMath && 12989 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) 12990 break; 12991 Opcode = X86ISD::FMIN; 12992 break; 12993 case ISD::SETUGE: 12994 // Converting this to a min would handle both negative zeros and NaNs 12995 // incorrectly, but we can swap the operands to fix both. 12996 std::swap(LHS, RHS); 12997 case ISD::SETOGT: 12998 case ISD::SETGT: 12999 case ISD::SETGE: 13000 Opcode = X86ISD::FMIN; 13001 break; 13002 13003 case ISD::SETULT: 13004 // Converting this to a max would handle NaNs incorrectly. 13005 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13006 break; 13007 Opcode = X86ISD::FMAX; 13008 break; 13009 case ISD::SETOLE: 13010 // Converting this to a max would handle comparisons between positive 13011 // and negative zero incorrectly, and swapping the operands would 13012 // cause it to handle NaNs incorrectly. 13013 if (!DAG.getTarget().Options.UnsafeFPMath && 13014 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { 13015 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) 13016 break; 13017 std::swap(LHS, RHS); 13018 } 13019 Opcode = X86ISD::FMAX; 13020 break; 13021 case ISD::SETULE: 13022 // Converting this to a max would handle both negative zeros and NaNs 13023 // incorrectly, but we can swap the operands to fix both. 13024 std::swap(LHS, RHS); 13025 case ISD::SETOLT: 13026 case ISD::SETLT: 13027 case ISD::SETLE: 13028 Opcode = X86ISD::FMAX; 13029 break; 13030 } 13031 } 13032 13033 if (Opcode) 13034 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); 13035 } 13036 13037 // If this is a select between two integer constants, try to do some 13038 // optimizations. 13039 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) { 13040 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS)) 13041 // Don't do this for crazy integer types. 13042 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) { 13043 // If this is efficiently invertible, canonicalize the LHSC/RHSC values 13044 // so that TrueC (the true value) is larger than FalseC. 13045 bool NeedsCondInvert = false; 13046 13047 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) && 13048 // Efficiently invertible. 13049 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible. 13050 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible. 13051 isa<ConstantSDNode>(Cond.getOperand(1))))) { 13052 NeedsCondInvert = true; 13053 std::swap(TrueC, FalseC); 13054 } 13055 13056 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0. 13057 if (FalseC->getAPIntValue() == 0 && 13058 TrueC->getAPIntValue().isPowerOf2()) { 13059 if (NeedsCondInvert) // Invert the condition if needed. 13060 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13061 DAG.getConstant(1, Cond.getValueType())); 13062 13063 // Zero extend the condition if needed. 13064 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond); 13065 13066 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 13067 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond, 13068 DAG.getConstant(ShAmt, MVT::i8)); 13069 } 13070 13071 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. 13072 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 13073 if (NeedsCondInvert) // Invert the condition if needed. 13074 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13075 DAG.getConstant(1, Cond.getValueType())); 13076 13077 // Zero extend the condition if needed. 13078 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 13079 FalseC->getValueType(0), Cond); 13080 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13081 SDValue(FalseC, 0)); 13082 } 13083 13084 // Optimize cases that will turn into an LEA instruction. This requires 13085 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 13086 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 13087 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 13088 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 13089 13090 bool isFastMultiplier = false; 13091 if (Diff < 10) { 13092 switch ((unsigned char)Diff) { 13093 default: break; 13094 case 1: // result = add base, cond 13095 case 2: // result = lea base( , cond*2) 13096 case 3: // result = lea base(cond, cond*2) 13097 case 4: // result = lea base( , cond*4) 13098 case 5: // result = lea base(cond, cond*4) 13099 case 8: // result = lea base( , cond*8) 13100 case 9: // result = lea base(cond, cond*8) 13101 isFastMultiplier = true; 13102 break; 13103 } 13104 } 13105 13106 if (isFastMultiplier) { 13107 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 13108 if (NeedsCondInvert) // Invert the condition if needed. 13109 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond, 13110 DAG.getConstant(1, Cond.getValueType())); 13111 13112 // Zero extend the condition if needed. 13113 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 13114 Cond); 13115 // Scale the condition by the difference. 13116 if (Diff != 1) 13117 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 13118 DAG.getConstant(Diff, Cond.getValueType())); 13119 13120 // Add the base if non-zero. 13121 if (FalseC->getAPIntValue() != 0) 13122 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13123 SDValue(FalseC, 0)); 13124 return Cond; 13125 } 13126 } 13127 } 13128 } 13129 13130 // Canonicalize max and min: 13131 // (x > y) ? x : y -> (x >= y) ? x : y 13132 // (x < y) ? x : y -> (x <= y) ? x : y 13133 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates 13134 // the need for an extra compare 13135 // against zero. e.g. 13136 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 13137 // subl %esi, %edi 13138 // testl %edi, %edi 13139 // movl $0, %eax 13140 // cmovgl %edi, %eax 13141 // => 13142 // xorl %eax, %eax 13143 // subl %esi, $edi 13144 // cmovsl %eax, %edi 13145 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && 13146 DAG.isEqualTo(LHS, Cond.getOperand(0)) && 13147 DAG.isEqualTo(RHS, Cond.getOperand(1))) { 13148 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 13149 switch (CC) { 13150 default: break; 13151 case ISD::SETLT: 13152 case ISD::SETGT: { 13153 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; 13154 Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(), 13155 Cond.getOperand(0), Cond.getOperand(1), NewCC); 13156 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS); 13157 } 13158 } 13159 } 13160 13161 return SDValue(); 13162} 13163 13164/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] 13165static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, 13166 TargetLowering::DAGCombinerInfo &DCI) { 13167 DebugLoc DL = N->getDebugLoc(); 13168 13169 // If the flag operand isn't dead, don't touch this CMOV. 13170 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty()) 13171 return SDValue(); 13172 13173 SDValue FalseOp = N->getOperand(0); 13174 SDValue TrueOp = N->getOperand(1); 13175 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); 13176 SDValue Cond = N->getOperand(3); 13177 if (CC == X86::COND_E || CC == X86::COND_NE) { 13178 switch (Cond.getOpcode()) { 13179 default: break; 13180 case X86ISD::BSR: 13181 case X86ISD::BSF: 13182 // If operand of BSR / BSF are proven never zero, then ZF cannot be set. 13183 if (DAG.isKnownNeverZero(Cond.getOperand(0))) 13184 return (CC == X86::COND_E) ? FalseOp : TrueOp; 13185 } 13186 } 13187 13188 // If this is a select between two integer constants, try to do some 13189 // optimizations. Note that the operands are ordered the opposite of SELECT 13190 // operands. 13191 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { 13192 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { 13193 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is 13194 // larger than FalseC (the false value). 13195 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { 13196 CC = X86::GetOppositeBranchCondition(CC); 13197 std::swap(TrueC, FalseC); 13198 } 13199 13200 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. 13201 // This is efficient for any integer data type (including i8/i16) and 13202 // shift amount. 13203 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { 13204 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13205 DAG.getConstant(CC, MVT::i8), Cond); 13206 13207 // Zero extend the condition if needed. 13208 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); 13209 13210 unsigned ShAmt = TrueC->getAPIntValue().logBase2(); 13211 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, 13212 DAG.getConstant(ShAmt, MVT::i8)); 13213 if (N->getNumValues() == 2) // Dead flag value? 13214 return DCI.CombineTo(N, Cond, SDValue()); 13215 return Cond; 13216 } 13217 13218 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient 13219 // for any integer data type, including i8/i16. 13220 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { 13221 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13222 DAG.getConstant(CC, MVT::i8), Cond); 13223 13224 // Zero extend the condition if needed. 13225 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, 13226 FalseC->getValueType(0), Cond); 13227 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13228 SDValue(FalseC, 0)); 13229 13230 if (N->getNumValues() == 2) // Dead flag value? 13231 return DCI.CombineTo(N, Cond, SDValue()); 13232 return Cond; 13233 } 13234 13235 // Optimize cases that will turn into an LEA instruction. This requires 13236 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). 13237 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { 13238 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); 13239 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; 13240 13241 bool isFastMultiplier = false; 13242 if (Diff < 10) { 13243 switch ((unsigned char)Diff) { 13244 default: break; 13245 case 1: // result = add base, cond 13246 case 2: // result = lea base( , cond*2) 13247 case 3: // result = lea base(cond, cond*2) 13248 case 4: // result = lea base( , cond*4) 13249 case 5: // result = lea base(cond, cond*4) 13250 case 8: // result = lea base( , cond*8) 13251 case 9: // result = lea base(cond, cond*8) 13252 isFastMultiplier = true; 13253 break; 13254 } 13255 } 13256 13257 if (isFastMultiplier) { 13258 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); 13259 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, 13260 DAG.getConstant(CC, MVT::i8), Cond); 13261 // Zero extend the condition if needed. 13262 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), 13263 Cond); 13264 // Scale the condition by the difference. 13265 if (Diff != 1) 13266 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, 13267 DAG.getConstant(Diff, Cond.getValueType())); 13268 13269 // Add the base if non-zero. 13270 if (FalseC->getAPIntValue() != 0) 13271 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, 13272 SDValue(FalseC, 0)); 13273 if (N->getNumValues() == 2) // Dead flag value? 13274 return DCI.CombineTo(N, Cond, SDValue()); 13275 return Cond; 13276 } 13277 } 13278 } 13279 } 13280 return SDValue(); 13281} 13282 13283 13284/// PerformMulCombine - Optimize a single multiply with constant into two 13285/// in order to implement it with two cheaper instructions, e.g. 13286/// LEA + SHL, LEA + LEA. 13287static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG, 13288 TargetLowering::DAGCombinerInfo &DCI) { 13289 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 13290 return SDValue(); 13291 13292 EVT VT = N->getValueType(0); 13293 if (VT != MVT::i64) 13294 return SDValue(); 13295 13296 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 13297 if (!C) 13298 return SDValue(); 13299 uint64_t MulAmt = C->getZExtValue(); 13300 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9) 13301 return SDValue(); 13302 13303 uint64_t MulAmt1 = 0; 13304 uint64_t MulAmt2 = 0; 13305 if ((MulAmt % 9) == 0) { 13306 MulAmt1 = 9; 13307 MulAmt2 = MulAmt / 9; 13308 } else if ((MulAmt % 5) == 0) { 13309 MulAmt1 = 5; 13310 MulAmt2 = MulAmt / 5; 13311 } else if ((MulAmt % 3) == 0) { 13312 MulAmt1 = 3; 13313 MulAmt2 = MulAmt / 3; 13314 } 13315 if (MulAmt2 && 13316 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){ 13317 DebugLoc DL = N->getDebugLoc(); 13318 13319 if (isPowerOf2_64(MulAmt2) && 13320 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD)) 13321 // If second multiplifer is pow2, issue it first. We want the multiply by 13322 // 3, 5, or 9 to be folded into the addressing mode unless the lone use 13323 // is an add. 13324 std::swap(MulAmt1, MulAmt2); 13325 13326 SDValue NewMul; 13327 if (isPowerOf2_64(MulAmt1)) 13328 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 13329 DAG.getConstant(Log2_64(MulAmt1), MVT::i8)); 13330 else 13331 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), 13332 DAG.getConstant(MulAmt1, VT)); 13333 13334 if (isPowerOf2_64(MulAmt2)) 13335 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, 13336 DAG.getConstant(Log2_64(MulAmt2), MVT::i8)); 13337 else 13338 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, 13339 DAG.getConstant(MulAmt2, VT)); 13340 13341 // Do not add new nodes to DAG combiner worklist. 13342 DCI.CombineTo(N, NewMul, false); 13343 } 13344 return SDValue(); 13345} 13346 13347static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { 13348 SDValue N0 = N->getOperand(0); 13349 SDValue N1 = N->getOperand(1); 13350 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 13351 EVT VT = N0.getValueType(); 13352 13353 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) 13354 // since the result of setcc_c is all zero's or all ones. 13355 if (VT.isInteger() && !VT.isVector() && 13356 N1C && N0.getOpcode() == ISD::AND && 13357 N0.getOperand(1).getOpcode() == ISD::Constant) { 13358 SDValue N00 = N0.getOperand(0); 13359 if (N00.getOpcode() == X86ISD::SETCC_CARRY || 13360 ((N00.getOpcode() == ISD::ANY_EXTEND || 13361 N00.getOpcode() == ISD::ZERO_EXTEND) && 13362 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) { 13363 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 13364 APInt ShAmt = N1C->getAPIntValue(); 13365 Mask = Mask.shl(ShAmt); 13366 if (Mask != 0) 13367 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 13368 N00, DAG.getConstant(Mask, VT)); 13369 } 13370 } 13371 13372 13373 // Hardware support for vector shifts is sparse which makes us scalarize the 13374 // vector operations in many cases. Also, on sandybridge ADD is faster than 13375 // shl. 13376 // (shl V, 1) -> add V,V 13377 if (isSplatVector(N1.getNode())) { 13378 assert(N0.getValueType().isVector() && "Invalid vector shift type"); 13379 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0)); 13380 // We shift all of the values by one. In many cases we do not have 13381 // hardware support for this operation. This is better expressed as an ADD 13382 // of two values. 13383 if (N1C && (1 == N1C->getZExtValue())) { 13384 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0); 13385 } 13386 } 13387 13388 return SDValue(); 13389} 13390 13391/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts 13392/// when possible. 13393static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG, 13394 const X86Subtarget *Subtarget) { 13395 EVT VT = N->getValueType(0); 13396 if (N->getOpcode() == ISD::SHL) { 13397 SDValue V = PerformSHLCombine(N, DAG); 13398 if (V.getNode()) return V; 13399 } 13400 13401 // On X86 with SSE2 support, we can transform this to a vector shift if 13402 // all elements are shifted by the same amount. We can't do this in legalize 13403 // because the a constant vector is typically transformed to a constant pool 13404 // so we have no knowledge of the shift amount. 13405 if (!Subtarget->hasSSE2()) 13406 return SDValue(); 13407 13408 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 && 13409 (!Subtarget->hasAVX2() || 13410 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16))) 13411 return SDValue(); 13412 13413 SDValue ShAmtOp = N->getOperand(1); 13414 EVT EltVT = VT.getVectorElementType(); 13415 DebugLoc DL = N->getDebugLoc(); 13416 SDValue BaseShAmt = SDValue(); 13417 if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) { 13418 unsigned NumElts = VT.getVectorNumElements(); 13419 unsigned i = 0; 13420 for (; i != NumElts; ++i) { 13421 SDValue Arg = ShAmtOp.getOperand(i); 13422 if (Arg.getOpcode() == ISD::UNDEF) continue; 13423 BaseShAmt = Arg; 13424 break; 13425 } 13426 for (; i != NumElts; ++i) { 13427 SDValue Arg = ShAmtOp.getOperand(i); 13428 if (Arg.getOpcode() == ISD::UNDEF) continue; 13429 if (Arg != BaseShAmt) { 13430 return SDValue(); 13431 } 13432 } 13433 } else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE && 13434 cast<ShuffleVectorSDNode>(ShAmtOp)->isSplat()) { 13435 SDValue InVec = ShAmtOp.getOperand(0); 13436 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 13437 unsigned NumElts = InVec.getValueType().getVectorNumElements(); 13438 unsigned i = 0; 13439 for (; i != NumElts; ++i) { 13440 SDValue Arg = InVec.getOperand(i); 13441 if (Arg.getOpcode() == ISD::UNDEF) continue; 13442 BaseShAmt = Arg; 13443 break; 13444 } 13445 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) { 13446 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2))) { 13447 unsigned SplatIdx= cast<ShuffleVectorSDNode>(ShAmtOp)->getSplatIndex(); 13448 if (C->getZExtValue() == SplatIdx) 13449 BaseShAmt = InVec.getOperand(1); 13450 } 13451 } 13452 if (BaseShAmt.getNode() == 0) 13453 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp, 13454 DAG.getIntPtrConstant(0)); 13455 } else 13456 return SDValue(); 13457 13458 // The shift amount is an i32. 13459 if (EltVT.bitsGT(MVT::i32)) 13460 BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt); 13461 else if (EltVT.bitsLT(MVT::i32)) 13462 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseShAmt); 13463 13464 // The shift amount is identical so we can do a vector shift. 13465 SDValue ValOp = N->getOperand(0); 13466 switch (N->getOpcode()) { 13467 default: 13468 llvm_unreachable("Unknown shift opcode!"); 13469 break; 13470 case ISD::SHL: 13471 if (VT == MVT::v2i64) 13472 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13473 DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32), 13474 ValOp, BaseShAmt); 13475 if (VT == MVT::v4i32) 13476 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13477 DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), 13478 ValOp, BaseShAmt); 13479 if (VT == MVT::v8i16) 13480 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13481 DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), 13482 ValOp, BaseShAmt); 13483 if (VT == MVT::v4i64) 13484 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13485 DAG.getConstant(Intrinsic::x86_avx2_pslli_q, MVT::i32), 13486 ValOp, BaseShAmt); 13487 if (VT == MVT::v8i32) 13488 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13489 DAG.getConstant(Intrinsic::x86_avx2_pslli_d, MVT::i32), 13490 ValOp, BaseShAmt); 13491 if (VT == MVT::v16i16) 13492 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13493 DAG.getConstant(Intrinsic::x86_avx2_pslli_w, MVT::i32), 13494 ValOp, BaseShAmt); 13495 break; 13496 case ISD::SRA: 13497 if (VT == MVT::v4i32) 13498 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13499 DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32), 13500 ValOp, BaseShAmt); 13501 if (VT == MVT::v8i16) 13502 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13503 DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32), 13504 ValOp, BaseShAmt); 13505 if (VT == MVT::v8i32) 13506 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13507 DAG.getConstant(Intrinsic::x86_avx2_psrai_d, MVT::i32), 13508 ValOp, BaseShAmt); 13509 if (VT == MVT::v16i16) 13510 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13511 DAG.getConstant(Intrinsic::x86_avx2_psrai_w, MVT::i32), 13512 ValOp, BaseShAmt); 13513 break; 13514 case ISD::SRL: 13515 if (VT == MVT::v2i64) 13516 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13517 DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32), 13518 ValOp, BaseShAmt); 13519 if (VT == MVT::v4i32) 13520 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13521 DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32), 13522 ValOp, BaseShAmt); 13523 if (VT == MVT::v8i16) 13524 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13525 DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32), 13526 ValOp, BaseShAmt); 13527 if (VT == MVT::v4i64) 13528 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13529 DAG.getConstant(Intrinsic::x86_avx2_psrli_q, MVT::i32), 13530 ValOp, BaseShAmt); 13531 if (VT == MVT::v8i32) 13532 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13533 DAG.getConstant(Intrinsic::x86_avx2_psrli_d, MVT::i32), 13534 ValOp, BaseShAmt); 13535 if (VT == MVT::v16i16) 13536 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 13537 DAG.getConstant(Intrinsic::x86_avx2_psrli_w, MVT::i32), 13538 ValOp, BaseShAmt); 13539 break; 13540 } 13541 return SDValue(); 13542} 13543 13544 13545// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..)) 13546// where both setccs reference the same FP CMP, and rewrite for CMPEQSS 13547// and friends. Likewise for OR -> CMPNEQSS. 13548static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG, 13549 TargetLowering::DAGCombinerInfo &DCI, 13550 const X86Subtarget *Subtarget) { 13551 unsigned opcode; 13552 13553 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but 13554 // we're requiring SSE2 for both. 13555 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { 13556 SDValue N0 = N->getOperand(0); 13557 SDValue N1 = N->getOperand(1); 13558 SDValue CMP0 = N0->getOperand(1); 13559 SDValue CMP1 = N1->getOperand(1); 13560 DebugLoc DL = N->getDebugLoc(); 13561 13562 // The SETCCs should both refer to the same CMP. 13563 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) 13564 return SDValue(); 13565 13566 SDValue CMP00 = CMP0->getOperand(0); 13567 SDValue CMP01 = CMP0->getOperand(1); 13568 EVT VT = CMP00.getValueType(); 13569 13570 if (VT == MVT::f32 || VT == MVT::f64) { 13571 bool ExpectingFlags = false; 13572 // Check for any users that want flags: 13573 for (SDNode::use_iterator UI = N->use_begin(), 13574 UE = N->use_end(); 13575 !ExpectingFlags && UI != UE; ++UI) 13576 switch (UI->getOpcode()) { 13577 default: 13578 case ISD::BR_CC: 13579 case ISD::BRCOND: 13580 case ISD::SELECT: 13581 ExpectingFlags = true; 13582 break; 13583 case ISD::CopyToReg: 13584 case ISD::SIGN_EXTEND: 13585 case ISD::ZERO_EXTEND: 13586 case ISD::ANY_EXTEND: 13587 break; 13588 } 13589 13590 if (!ExpectingFlags) { 13591 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); 13592 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); 13593 13594 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { 13595 X86::CondCode tmp = cc0; 13596 cc0 = cc1; 13597 cc1 = tmp; 13598 } 13599 13600 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || 13601 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { 13602 bool is64BitFP = (CMP00.getValueType() == MVT::f64); 13603 X86ISD::NodeType NTOperator = is64BitFP ? 13604 X86ISD::FSETCCsd : X86ISD::FSETCCss; 13605 // FIXME: need symbolic constants for these magic numbers. 13606 // See X86ATTInstPrinter.cpp:printSSECC(). 13607 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; 13608 SDValue OnesOrZeroesF = DAG.getNode(NTOperator, DL, MVT::f32, CMP00, CMP01, 13609 DAG.getConstant(x86cc, MVT::i8)); 13610 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, MVT::i32, 13611 OnesOrZeroesF); 13612 SDValue ANDed = DAG.getNode(ISD::AND, DL, MVT::i32, OnesOrZeroesI, 13613 DAG.getConstant(1, MVT::i32)); 13614 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed); 13615 return OneBitOfTruth; 13616 } 13617 } 13618 } 13619 } 13620 return SDValue(); 13621} 13622 13623/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector 13624/// so it can be folded inside ANDNP. 13625static bool CanFoldXORWithAllOnes(const SDNode *N) { 13626 EVT VT = N->getValueType(0); 13627 13628 // Match direct AllOnes for 128 and 256-bit vectors 13629 if (ISD::isBuildVectorAllOnes(N)) 13630 return true; 13631 13632 // Look through a bit convert. 13633 if (N->getOpcode() == ISD::BITCAST) 13634 N = N->getOperand(0).getNode(); 13635 13636 // Sometimes the operand may come from a insert_subvector building a 256-bit 13637 // allones vector 13638 if (VT.getSizeInBits() == 256 && 13639 N->getOpcode() == ISD::INSERT_SUBVECTOR) { 13640 SDValue V1 = N->getOperand(0); 13641 SDValue V2 = N->getOperand(1); 13642 13643 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR && 13644 V1.getOperand(0).getOpcode() == ISD::UNDEF && 13645 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) && 13646 ISD::isBuildVectorAllOnes(V2.getNode())) 13647 return true; 13648 } 13649 13650 return false; 13651} 13652 13653static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, 13654 TargetLowering::DAGCombinerInfo &DCI, 13655 const X86Subtarget *Subtarget) { 13656 if (DCI.isBeforeLegalizeOps()) 13657 return SDValue(); 13658 13659 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 13660 if (R.getNode()) 13661 return R; 13662 13663 EVT VT = N->getValueType(0); 13664 13665 // Create ANDN, BLSI, and BLSR instructions 13666 // BLSI is X & (-X) 13667 // BLSR is X & (X-1) 13668 if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) { 13669 SDValue N0 = N->getOperand(0); 13670 SDValue N1 = N->getOperand(1); 13671 DebugLoc DL = N->getDebugLoc(); 13672 13673 // Check LHS for not 13674 if (N0.getOpcode() == ISD::XOR && isAllOnes(N0.getOperand(1))) 13675 return DAG.getNode(X86ISD::ANDN, DL, VT, N0.getOperand(0), N1); 13676 // Check RHS for not 13677 if (N1.getOpcode() == ISD::XOR && isAllOnes(N1.getOperand(1))) 13678 return DAG.getNode(X86ISD::ANDN, DL, VT, N1.getOperand(0), N0); 13679 13680 // Check LHS for neg 13681 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 && 13682 isZero(N0.getOperand(0))) 13683 return DAG.getNode(X86ISD::BLSI, DL, VT, N1); 13684 13685 // Check RHS for neg 13686 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 && 13687 isZero(N1.getOperand(0))) 13688 return DAG.getNode(X86ISD::BLSI, DL, VT, N0); 13689 13690 // Check LHS for X-1 13691 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 13692 isAllOnes(N0.getOperand(1))) 13693 return DAG.getNode(X86ISD::BLSR, DL, VT, N1); 13694 13695 // Check RHS for X-1 13696 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 13697 isAllOnes(N1.getOperand(1))) 13698 return DAG.getNode(X86ISD::BLSR, DL, VT, N0); 13699 13700 return SDValue(); 13701 } 13702 13703 // Want to form ANDNP nodes: 13704 // 1) In the hopes of then easily combining them with OR and AND nodes 13705 // to form PBLEND/PSIGN. 13706 // 2) To match ANDN packed intrinsics 13707 if (VT != MVT::v2i64 && VT != MVT::v4i64) 13708 return SDValue(); 13709 13710 SDValue N0 = N->getOperand(0); 13711 SDValue N1 = N->getOperand(1); 13712 DebugLoc DL = N->getDebugLoc(); 13713 13714 // Check LHS for vnot 13715 if (N0.getOpcode() == ISD::XOR && 13716 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) 13717 CanFoldXORWithAllOnes(N0.getOperand(1).getNode())) 13718 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1); 13719 13720 // Check RHS for vnot 13721 if (N1.getOpcode() == ISD::XOR && 13722 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) 13723 CanFoldXORWithAllOnes(N1.getOperand(1).getNode())) 13724 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0); 13725 13726 return SDValue(); 13727} 13728 13729static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, 13730 TargetLowering::DAGCombinerInfo &DCI, 13731 const X86Subtarget *Subtarget) { 13732 if (DCI.isBeforeLegalizeOps()) 13733 return SDValue(); 13734 13735 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget); 13736 if (R.getNode()) 13737 return R; 13738 13739 EVT VT = N->getValueType(0); 13740 13741 SDValue N0 = N->getOperand(0); 13742 SDValue N1 = N->getOperand(1); 13743 13744 // look for psign/blend 13745 if (VT == MVT::v2i64 || VT == MVT::v4i64) { 13746 if (!Subtarget->hasSSSE3() || 13747 (VT == MVT::v4i64 && !Subtarget->hasAVX2())) 13748 return SDValue(); 13749 13750 // Canonicalize pandn to RHS 13751 if (N0.getOpcode() == X86ISD::ANDNP) 13752 std::swap(N0, N1); 13753 // or (and (m, x), (pandn m, y)) 13754 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) { 13755 SDValue Mask = N1.getOperand(0); 13756 SDValue X = N1.getOperand(1); 13757 SDValue Y; 13758 if (N0.getOperand(0) == Mask) 13759 Y = N0.getOperand(1); 13760 if (N0.getOperand(1) == Mask) 13761 Y = N0.getOperand(0); 13762 13763 // Check to see if the mask appeared in both the AND and ANDNP and 13764 if (!Y.getNode()) 13765 return SDValue(); 13766 13767 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. 13768 if (Mask.getOpcode() != ISD::BITCAST || 13769 X.getOpcode() != ISD::BITCAST || 13770 Y.getOpcode() != ISD::BITCAST) 13771 return SDValue(); 13772 13773 // Look through mask bitcast. 13774 Mask = Mask.getOperand(0); 13775 EVT MaskVT = Mask.getValueType(); 13776 13777 // Validate that the Mask operand is a vector sra node. The sra node 13778 // will be an intrinsic. 13779 if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN) 13780 return SDValue(); 13781 13782 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but 13783 // there is no psrai.b 13784 switch (cast<ConstantSDNode>(Mask.getOperand(0))->getZExtValue()) { 13785 case Intrinsic::x86_sse2_psrai_w: 13786 case Intrinsic::x86_sse2_psrai_d: 13787 case Intrinsic::x86_avx2_psrai_w: 13788 case Intrinsic::x86_avx2_psrai_d: 13789 break; 13790 default: return SDValue(); 13791 } 13792 13793 // Check that the SRA is all signbits. 13794 SDValue SraC = Mask.getOperand(2); 13795 unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue(); 13796 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); 13797 if ((SraAmt + 1) != EltBits) 13798 return SDValue(); 13799 13800 DebugLoc DL = N->getDebugLoc(); 13801 13802 // Now we know we at least have a plendvb with the mask val. See if 13803 // we can form a psignb/w/d. 13804 // psign = x.type == y.type == mask.type && y = sub(0, x); 13805 X = X.getOperand(0); 13806 Y = Y.getOperand(0); 13807 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X && 13808 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) && 13809 X.getValueType() == MaskVT && X.getValueType() == Y.getValueType() && 13810 (EltBits == 8 || EltBits == 16 || EltBits == 32)) { 13811 SDValue Sign = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, 13812 Mask.getOperand(1)); 13813 return DAG.getNode(ISD::BITCAST, DL, VT, Sign); 13814 } 13815 // PBLENDVB only available on SSE 4.1 13816 if (!Subtarget->hasSSE41()) 13817 return SDValue(); 13818 13819 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8; 13820 13821 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X); 13822 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y); 13823 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask); 13824 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X); 13825 return DAG.getNode(ISD::BITCAST, DL, VT, Mask); 13826 } 13827 } 13828 13829 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 13830 return SDValue(); 13831 13832 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) 13833 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 13834 std::swap(N0, N1); 13835 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 13836 return SDValue(); 13837 if (!N0.hasOneUse() || !N1.hasOneUse()) 13838 return SDValue(); 13839 13840 SDValue ShAmt0 = N0.getOperand(1); 13841 if (ShAmt0.getValueType() != MVT::i8) 13842 return SDValue(); 13843 SDValue ShAmt1 = N1.getOperand(1); 13844 if (ShAmt1.getValueType() != MVT::i8) 13845 return SDValue(); 13846 if (ShAmt0.getOpcode() == ISD::TRUNCATE) 13847 ShAmt0 = ShAmt0.getOperand(0); 13848 if (ShAmt1.getOpcode() == ISD::TRUNCATE) 13849 ShAmt1 = ShAmt1.getOperand(0); 13850 13851 DebugLoc DL = N->getDebugLoc(); 13852 unsigned Opc = X86ISD::SHLD; 13853 SDValue Op0 = N0.getOperand(0); 13854 SDValue Op1 = N1.getOperand(0); 13855 if (ShAmt0.getOpcode() == ISD::SUB) { 13856 Opc = X86ISD::SHRD; 13857 std::swap(Op0, Op1); 13858 std::swap(ShAmt0, ShAmt1); 13859 } 13860 13861 unsigned Bits = VT.getSizeInBits(); 13862 if (ShAmt1.getOpcode() == ISD::SUB) { 13863 SDValue Sum = ShAmt1.getOperand(0); 13864 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) { 13865 SDValue ShAmt1Op1 = ShAmt1.getOperand(1); 13866 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE) 13867 ShAmt1Op1 = ShAmt1Op1.getOperand(0); 13868 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) 13869 return DAG.getNode(Opc, DL, VT, 13870 Op0, Op1, 13871 DAG.getNode(ISD::TRUNCATE, DL, 13872 MVT::i8, ShAmt0)); 13873 } 13874 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { 13875 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); 13876 if (ShAmt0C && 13877 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits) 13878 return DAG.getNode(Opc, DL, VT, 13879 N0.getOperand(0), N1.getOperand(0), 13880 DAG.getNode(ISD::TRUNCATE, DL, 13881 MVT::i8, ShAmt0)); 13882 } 13883 13884 return SDValue(); 13885} 13886 13887// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes 13888static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG, 13889 TargetLowering::DAGCombinerInfo &DCI, 13890 const X86Subtarget *Subtarget) { 13891 if (DCI.isBeforeLegalizeOps()) 13892 return SDValue(); 13893 13894 EVT VT = N->getValueType(0); 13895 13896 if (VT != MVT::i32 && VT != MVT::i64) 13897 return SDValue(); 13898 13899 assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions"); 13900 13901 // Create BLSMSK instructions by finding X ^ (X-1) 13902 SDValue N0 = N->getOperand(0); 13903 SDValue N1 = N->getOperand(1); 13904 DebugLoc DL = N->getDebugLoc(); 13905 13906 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 && 13907 isAllOnes(N0.getOperand(1))) 13908 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1); 13909 13910 if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 && 13911 isAllOnes(N1.getOperand(1))) 13912 return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0); 13913 13914 return SDValue(); 13915} 13916 13917/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes. 13918static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, 13919 const X86Subtarget *Subtarget) { 13920 LoadSDNode *Ld = cast<LoadSDNode>(N); 13921 EVT RegVT = Ld->getValueType(0); 13922 EVT MemVT = Ld->getMemoryVT(); 13923 DebugLoc dl = Ld->getDebugLoc(); 13924 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13925 13926 ISD::LoadExtType Ext = Ld->getExtensionType(); 13927 13928 // If this is a vector EXT Load then attempt to optimize it using a 13929 // shuffle. We need SSE4 for the shuffles. 13930 // TODO: It is possible to support ZExt by zeroing the undef values 13931 // during the shuffle phase or after the shuffle. 13932 if (RegVT.isVector() && RegVT.isInteger() && 13933 Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) { 13934 assert(MemVT != RegVT && "Cannot extend to the same type"); 13935 assert(MemVT.isVector() && "Must load a vector from memory"); 13936 13937 unsigned NumElems = RegVT.getVectorNumElements(); 13938 unsigned RegSz = RegVT.getSizeInBits(); 13939 unsigned MemSz = MemVT.getSizeInBits(); 13940 assert(RegSz > MemSz && "Register size must be greater than the mem size"); 13941 // All sizes must be a power of two 13942 if (!isPowerOf2_32(RegSz * MemSz * NumElems)) return SDValue(); 13943 13944 // Attempt to load the original value using a single load op. 13945 // Find a scalar type which is equal to the loaded word size. 13946 MVT SclrLoadTy = MVT::i8; 13947 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 13948 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 13949 MVT Tp = (MVT::SimpleValueType)tp; 13950 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() == MemSz) { 13951 SclrLoadTy = Tp; 13952 break; 13953 } 13954 } 13955 13956 // Proceed if a load word is found. 13957 if (SclrLoadTy.getSizeInBits() != MemSz) return SDValue(); 13958 13959 EVT LoadUnitVecVT = EVT::getVectorVT(*DAG.getContext(), SclrLoadTy, 13960 RegSz/SclrLoadTy.getSizeInBits()); 13961 13962 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), 13963 RegSz/MemVT.getScalarType().getSizeInBits()); 13964 // Can't shuffle using an illegal type. 13965 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 13966 13967 // Perform a single load. 13968 SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), 13969 Ld->getBasePtr(), 13970 Ld->getPointerInfo(), Ld->isVolatile(), 13971 Ld->isNonTemporal(), Ld->isInvariant(), 13972 Ld->getAlignment()); 13973 13974 // Insert the word loaded into a vector. 13975 SDValue ScalarInVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 13976 LoadUnitVecVT, ScalarLoad); 13977 13978 // Bitcast the loaded value to a vector of the original element type, in 13979 // the size of the target vector type. 13980 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, 13981 ScalarInVector); 13982 unsigned SizeRatio = RegSz/MemSz; 13983 13984 // Redistribute the loaded elements into the different locations. 13985 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 13986 for (unsigned i = 0; i < NumElems; i++) ShuffleVec[i*SizeRatio] = i; 13987 13988 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, 13989 DAG.getUNDEF(SlicedVec.getValueType()), 13990 ShuffleVec.data()); 13991 13992 // Bitcast to the requested type. 13993 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff); 13994 // Replace the original load with the new sequence 13995 // and return the new chain. 13996 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Shuff); 13997 return SDValue(ScalarLoad.getNode(), 1); 13998 } 13999 14000 return SDValue(); 14001} 14002 14003/// PerformSTORECombine - Do target-specific dag combines on STORE nodes. 14004static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, 14005 const X86Subtarget *Subtarget) { 14006 StoreSDNode *St = cast<StoreSDNode>(N); 14007 EVT VT = St->getValue().getValueType(); 14008 EVT StVT = St->getMemoryVT(); 14009 DebugLoc dl = St->getDebugLoc(); 14010 SDValue StoredVal = St->getOperand(1); 14011 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14012 14013 // If we are saving a concatenation of two XMM registers, perform two stores. 14014 // This is better in Sandy Bridge cause one 256-bit mem op is done via two 14015 // 128-bit ones. If in the future the cost becomes only one memory access the 14016 // first version would be better. 14017 if (VT.getSizeInBits() == 256 && 14018 StoredVal.getNode()->getOpcode() == ISD::CONCAT_VECTORS && 14019 StoredVal.getNumOperands() == 2) { 14020 14021 SDValue Value0 = StoredVal.getOperand(0); 14022 SDValue Value1 = StoredVal.getOperand(1); 14023 14024 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy()); 14025 SDValue Ptr0 = St->getBasePtr(); 14026 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride); 14027 14028 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0, 14029 St->getPointerInfo(), St->isVolatile(), 14030 St->isNonTemporal(), St->getAlignment()); 14031 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1, 14032 St->getPointerInfo(), St->isVolatile(), 14033 St->isNonTemporal(), St->getAlignment()); 14034 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); 14035 } 14036 14037 // Optimize trunc store (of multiple scalars) to shuffle and store. 14038 // First, pack all of the elements in one place. Next, store to memory 14039 // in fewer chunks. 14040 if (St->isTruncatingStore() && VT.isVector()) { 14041 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14042 unsigned NumElems = VT.getVectorNumElements(); 14043 assert(StVT != VT && "Cannot truncate to the same type"); 14044 unsigned FromSz = VT.getVectorElementType().getSizeInBits(); 14045 unsigned ToSz = StVT.getVectorElementType().getSizeInBits(); 14046 14047 // From, To sizes and ElemCount must be pow of two 14048 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); 14049 // We are going to use the original vector elt for storing. 14050 // Accumulated smaller vector elements must be a multiple of the store size. 14051 if (0 != (NumElems * FromSz) % ToSz) return SDValue(); 14052 14053 unsigned SizeRatio = FromSz / ToSz; 14054 14055 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits()); 14056 14057 // Create a type on which we perform the shuffle 14058 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), 14059 StVT.getScalarType(), NumElems*SizeRatio); 14060 14061 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 14062 14063 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue()); 14064 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 14065 for (unsigned i = 0; i < NumElems; i++ ) ShuffleVec[i] = i * SizeRatio; 14066 14067 // Can't shuffle using an illegal type 14068 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 14069 14070 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, 14071 DAG.getUNDEF(WideVec.getValueType()), 14072 ShuffleVec.data()); 14073 // At this point all of the data is stored at the bottom of the 14074 // register. We now need to save it to mem. 14075 14076 // Find the largest store unit 14077 MVT StoreType = MVT::i8; 14078 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 14079 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 14080 MVT Tp = (MVT::SimpleValueType)tp; 14081 if (TLI.isTypeLegal(Tp) && StoreType.getSizeInBits() < NumElems * ToSz) 14082 StoreType = Tp; 14083 } 14084 14085 // Bitcast the original vector into a vector of store-size units 14086 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 14087 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 14088 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 14089 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff); 14090 SmallVector<SDValue, 8> Chains; 14091 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 14092 TLI.getPointerTy()); 14093 SDValue Ptr = St->getBasePtr(); 14094 14095 // Perform one or more big stores into memory. 14096 for (unsigned i = 0; i < (ToSz*NumElems)/StoreType.getSizeInBits() ; i++) { 14097 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 14098 StoreType, ShuffWide, 14099 DAG.getIntPtrConstant(i)); 14100 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr, 14101 St->getPointerInfo(), St->isVolatile(), 14102 St->isNonTemporal(), St->getAlignment()); 14103 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14104 Chains.push_back(Ch); 14105 } 14106 14107 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], 14108 Chains.size()); 14109 } 14110 14111 14112 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering 14113 // the FP state in cases where an emms may be missing. 14114 // A preferable solution to the general problem is to figure out the right 14115 // places to insert EMMS. This qualifies as a quick hack. 14116 14117 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. 14118 if (VT.getSizeInBits() != 64) 14119 return SDValue(); 14120 14121 const Function *F = DAG.getMachineFunction().getFunction(); 14122 bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); 14123 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps 14124 && Subtarget->hasSSE2(); 14125 if ((VT.isVector() || 14126 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) && 14127 isa<LoadSDNode>(St->getValue()) && 14128 !cast<LoadSDNode>(St->getValue())->isVolatile() && 14129 St->getChain().hasOneUse() && !St->isVolatile()) { 14130 SDNode* LdVal = St->getValue().getNode(); 14131 LoadSDNode *Ld = 0; 14132 int TokenFactorIndex = -1; 14133 SmallVector<SDValue, 8> Ops; 14134 SDNode* ChainVal = St->getChain().getNode(); 14135 // Must be a store of a load. We currently handle two cases: the load 14136 // is a direct child, and it's under an intervening TokenFactor. It is 14137 // possible to dig deeper under nested TokenFactors. 14138 if (ChainVal == LdVal) 14139 Ld = cast<LoadSDNode>(St->getChain()); 14140 else if (St->getValue().hasOneUse() && 14141 ChainVal->getOpcode() == ISD::TokenFactor) { 14142 for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) { 14143 if (ChainVal->getOperand(i).getNode() == LdVal) { 14144 TokenFactorIndex = i; 14145 Ld = cast<LoadSDNode>(St->getValue()); 14146 } else 14147 Ops.push_back(ChainVal->getOperand(i)); 14148 } 14149 } 14150 14151 if (!Ld || !ISD::isNormalLoad(Ld)) 14152 return SDValue(); 14153 14154 // If this is not the MMX case, i.e. we are just turning i64 load/store 14155 // into f64 load/store, avoid the transformation if there are multiple 14156 // uses of the loaded value. 14157 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) 14158 return SDValue(); 14159 14160 DebugLoc LdDL = Ld->getDebugLoc(); 14161 DebugLoc StDL = N->getDebugLoc(); 14162 // If we are a 64-bit capable x86, lower to a single movq load/store pair. 14163 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store 14164 // pair instead. 14165 if (Subtarget->is64Bit() || F64IsLegal) { 14166 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64; 14167 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), 14168 Ld->getPointerInfo(), Ld->isVolatile(), 14169 Ld->isNonTemporal(), Ld->isInvariant(), 14170 Ld->getAlignment()); 14171 SDValue NewChain = NewLd.getValue(1); 14172 if (TokenFactorIndex != -1) { 14173 Ops.push_back(NewChain); 14174 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 14175 Ops.size()); 14176 } 14177 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(), 14178 St->getPointerInfo(), 14179 St->isVolatile(), St->isNonTemporal(), 14180 St->getAlignment()); 14181 } 14182 14183 // Otherwise, lower to two pairs of 32-bit loads / stores. 14184 SDValue LoAddr = Ld->getBasePtr(); 14185 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr, 14186 DAG.getConstant(4, MVT::i32)); 14187 14188 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, 14189 Ld->getPointerInfo(), 14190 Ld->isVolatile(), Ld->isNonTemporal(), 14191 Ld->isInvariant(), Ld->getAlignment()); 14192 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, 14193 Ld->getPointerInfo().getWithOffset(4), 14194 Ld->isVolatile(), Ld->isNonTemporal(), 14195 Ld->isInvariant(), 14196 MinAlign(Ld->getAlignment(), 4)); 14197 14198 SDValue NewChain = LoLd.getValue(1); 14199 if (TokenFactorIndex != -1) { 14200 Ops.push_back(LoLd); 14201 Ops.push_back(HiLd); 14202 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0], 14203 Ops.size()); 14204 } 14205 14206 LoAddr = St->getBasePtr(); 14207 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr, 14208 DAG.getConstant(4, MVT::i32)); 14209 14210 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr, 14211 St->getPointerInfo(), 14212 St->isVolatile(), St->isNonTemporal(), 14213 St->getAlignment()); 14214 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr, 14215 St->getPointerInfo().getWithOffset(4), 14216 St->isVolatile(), 14217 St->isNonTemporal(), 14218 MinAlign(St->getAlignment(), 4)); 14219 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); 14220 } 14221 return SDValue(); 14222} 14223 14224/// isHorizontalBinOp - Return 'true' if this vector operation is "horizontal" 14225/// and return the operands for the horizontal operation in LHS and RHS. A 14226/// horizontal operation performs the binary operation on successive elements 14227/// of its first operand, then on successive elements of its second operand, 14228/// returning the resulting values in a vector. For example, if 14229/// A = < float a0, float a1, float a2, float a3 > 14230/// and 14231/// B = < float b0, float b1, float b2, float b3 > 14232/// then the result of doing a horizontal operation on A and B is 14233/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. 14234/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form 14235/// A horizontal-op B, for some already available A and B, and if so then LHS is 14236/// set to A, RHS to B, and the routine returns 'true'. 14237/// Note that the binary operation should have the property that if one of the 14238/// operands is UNDEF then the result is UNDEF. 14239static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { 14240 // Look for the following pattern: if 14241 // A = < float a0, float a1, float a2, float a3 > 14242 // B = < float b0, float b1, float b2, float b3 > 14243 // and 14244 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> 14245 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> 14246 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > 14247 // which is A horizontal-op B. 14248 14249 // At least one of the operands should be a vector shuffle. 14250 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && 14251 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) 14252 return false; 14253 14254 EVT VT = LHS.getValueType(); 14255 14256 assert((VT.is128BitVector() || VT.is256BitVector()) && 14257 "Unsupported vector type for horizontal add/sub"); 14258 14259 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to 14260 // operate independently on 128-bit lanes. 14261 unsigned NumElts = VT.getVectorNumElements(); 14262 unsigned NumLanes = VT.getSizeInBits()/128; 14263 unsigned NumLaneElts = NumElts / NumLanes; 14264 assert((NumLaneElts % 2 == 0) && 14265 "Vector type should have an even number of elements in each lane"); 14266 unsigned HalfLaneElts = NumLaneElts/2; 14267 14268 // View LHS in the form 14269 // LHS = VECTOR_SHUFFLE A, B, LMask 14270 // If LHS is not a shuffle then pretend it is the shuffle 14271 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> 14272 // NOTE: in what follows a default initialized SDValue represents an UNDEF of 14273 // type VT. 14274 SDValue A, B; 14275 SmallVector<int, 16> LMask(NumElts); 14276 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 14277 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF) 14278 A = LHS.getOperand(0); 14279 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF) 14280 B = LHS.getOperand(1); 14281 cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(LMask); 14282 } else { 14283 if (LHS.getOpcode() != ISD::UNDEF) 14284 A = LHS; 14285 for (unsigned i = 0; i != NumElts; ++i) 14286 LMask[i] = i; 14287 } 14288 14289 // Likewise, view RHS in the form 14290 // RHS = VECTOR_SHUFFLE C, D, RMask 14291 SDValue C, D; 14292 SmallVector<int, 16> RMask(NumElts); 14293 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { 14294 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF) 14295 C = RHS.getOperand(0); 14296 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF) 14297 D = RHS.getOperand(1); 14298 cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(RMask); 14299 } else { 14300 if (RHS.getOpcode() != ISD::UNDEF) 14301 C = RHS; 14302 for (unsigned i = 0; i != NumElts; ++i) 14303 RMask[i] = i; 14304 } 14305 14306 // Check that the shuffles are both shuffling the same vectors. 14307 if (!(A == C && B == D) && !(A == D && B == C)) 14308 return false; 14309 14310 // If everything is UNDEF then bail out: it would be better to fold to UNDEF. 14311 if (!A.getNode() && !B.getNode()) 14312 return false; 14313 14314 // If A and B occur in reverse order in RHS, then "swap" them (which means 14315 // rewriting the mask). 14316 if (A != C) 14317 CommuteVectorShuffleMask(RMask, NumElts); 14318 14319 // At this point LHS and RHS are equivalent to 14320 // LHS = VECTOR_SHUFFLE A, B, LMask 14321 // RHS = VECTOR_SHUFFLE A, B, RMask 14322 // Check that the masks correspond to performing a horizontal operation. 14323 for (unsigned i = 0; i != NumElts; ++i) { 14324 int LIdx = LMask[i], RIdx = RMask[i]; 14325 14326 // Ignore any UNDEF components. 14327 if (LIdx < 0 || RIdx < 0 || 14328 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || 14329 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) 14330 continue; 14331 14332 // Check that successive elements are being operated on. If not, this is 14333 // not a horizontal operation. 14334 unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs 14335 unsigned LaneStart = (i/NumLaneElts) * NumLaneElts; 14336 int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart; 14337 if (!(LIdx == Index && RIdx == Index + 1) && 14338 !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) 14339 return false; 14340 } 14341 14342 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. 14343 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. 14344 return true; 14345} 14346 14347/// PerformFADDCombine - Do target-specific dag combines on floating point adds. 14348static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, 14349 const X86Subtarget *Subtarget) { 14350 EVT VT = N->getValueType(0); 14351 SDValue LHS = N->getOperand(0); 14352 SDValue RHS = N->getOperand(1); 14353 14354 // Try to synthesize horizontal adds from adds of shuffles. 14355 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 14356 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 14357 isHorizontalBinOp(LHS, RHS, true)) 14358 return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS); 14359 return SDValue(); 14360} 14361 14362/// PerformFSUBCombine - Do target-specific dag combines on floating point subs. 14363static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG, 14364 const X86Subtarget *Subtarget) { 14365 EVT VT = N->getValueType(0); 14366 SDValue LHS = N->getOperand(0); 14367 SDValue RHS = N->getOperand(1); 14368 14369 // Try to synthesize horizontal subs from subs of shuffles. 14370 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || 14371 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && 14372 isHorizontalBinOp(LHS, RHS, false)) 14373 return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS); 14374 return SDValue(); 14375} 14376 14377/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and 14378/// X86ISD::FXOR nodes. 14379static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { 14380 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); 14381 // F[X]OR(0.0, x) -> x 14382 // F[X]OR(x, 0.0) -> x 14383 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 14384 if (C->getValueAPF().isPosZero()) 14385 return N->getOperand(1); 14386 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 14387 if (C->getValueAPF().isPosZero()) 14388 return N->getOperand(0); 14389 return SDValue(); 14390} 14391 14392/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. 14393static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { 14394 // FAND(0.0, x) -> 0.0 14395 // FAND(x, 0.0) -> 0.0 14396 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 14397 if (C->getValueAPF().isPosZero()) 14398 return N->getOperand(0); 14399 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) 14400 if (C->getValueAPF().isPosZero()) 14401 return N->getOperand(1); 14402 return SDValue(); 14403} 14404 14405static SDValue PerformBTCombine(SDNode *N, 14406 SelectionDAG &DAG, 14407 TargetLowering::DAGCombinerInfo &DCI) { 14408 // BT ignores high bits in the bit index operand. 14409 SDValue Op1 = N->getOperand(1); 14410 if (Op1.hasOneUse()) { 14411 unsigned BitWidth = Op1.getValueSizeInBits(); 14412 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); 14413 APInt KnownZero, KnownOne; 14414 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 14415 !DCI.isBeforeLegalizeOps()); 14416 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14417 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) || 14418 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO)) 14419 DCI.CommitTargetLoweringOpt(TLO); 14420 } 14421 return SDValue(); 14422} 14423 14424static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { 14425 SDValue Op = N->getOperand(0); 14426 if (Op.getOpcode() == ISD::BITCAST) 14427 Op = Op.getOperand(0); 14428 EVT VT = N->getValueType(0), OpVT = Op.getValueType(); 14429 if (Op.getOpcode() == X86ISD::VZEXT_LOAD && 14430 VT.getVectorElementType().getSizeInBits() == 14431 OpVT.getVectorElementType().getSizeInBits()) { 14432 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 14433 } 14434 return SDValue(); 14435} 14436 14437static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) { 14438 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> 14439 // (and (i32 x86isd::setcc_carry), 1) 14440 // This eliminates the zext. This transformation is necessary because 14441 // ISD::SETCC is always legalized to i8. 14442 DebugLoc dl = N->getDebugLoc(); 14443 SDValue N0 = N->getOperand(0); 14444 EVT VT = N->getValueType(0); 14445 if (N0.getOpcode() == ISD::AND && 14446 N0.hasOneUse() && 14447 N0.getOperand(0).hasOneUse()) { 14448 SDValue N00 = N0.getOperand(0); 14449 if (N00.getOpcode() != X86ISD::SETCC_CARRY) 14450 return SDValue(); 14451 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 14452 if (!C || C->getZExtValue() != 1) 14453 return SDValue(); 14454 return DAG.getNode(ISD::AND, dl, VT, 14455 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, 14456 N00.getOperand(0), N00.getOperand(1)), 14457 DAG.getConstant(1, VT)); 14458 } 14459 14460 return SDValue(); 14461} 14462 14463// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT 14464static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { 14465 unsigned X86CC = N->getConstantOperandVal(0); 14466 SDValue EFLAG = N->getOperand(1); 14467 DebugLoc DL = N->getDebugLoc(); 14468 14469 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without 14470 // a zext and produces an all-ones bit which is more useful than 0/1 in some 14471 // cases. 14472 if (X86CC == X86::COND_B) 14473 return DAG.getNode(ISD::AND, DL, MVT::i8, 14474 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, 14475 DAG.getConstant(X86CC, MVT::i8), EFLAG), 14476 DAG.getConstant(1, MVT::i8)); 14477 14478 return SDValue(); 14479} 14480 14481static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, 14482 const X86TargetLowering *XTLI) { 14483 SDValue Op0 = N->getOperand(0); 14484 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have 14485 // a 32-bit target where SSE doesn't support i64->FP operations. 14486 if (Op0.getOpcode() == ISD::LOAD) { 14487 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); 14488 EVT VT = Ld->getValueType(0); 14489 if (!Ld->isVolatile() && !N->getValueType(0).isVector() && 14490 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && 14491 !XTLI->getSubtarget()->is64Bit() && 14492 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 14493 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0), 14494 Ld->getChain(), Op0, DAG); 14495 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); 14496 return FILDChain; 14497 } 14498 } 14499 return SDValue(); 14500} 14501 14502// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS 14503static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, 14504 X86TargetLowering::DAGCombinerInfo &DCI) { 14505 // If the LHS and RHS of the ADC node are zero, then it can't overflow and 14506 // the result is either zero or one (depending on the input carry bit). 14507 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. 14508 if (X86::isZeroNode(N->getOperand(0)) && 14509 X86::isZeroNode(N->getOperand(1)) && 14510 // We don't have a good way to replace an EFLAGS use, so only do this when 14511 // dead right now. 14512 SDValue(N, 1).use_empty()) { 14513 DebugLoc DL = N->getDebugLoc(); 14514 EVT VT = N->getValueType(0); 14515 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1)); 14516 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, 14517 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, 14518 DAG.getConstant(X86::COND_B,MVT::i8), 14519 N->getOperand(2)), 14520 DAG.getConstant(1, VT)); 14521 return DCI.CombineTo(N, Res1, CarryOut); 14522 } 14523 14524 return SDValue(); 14525} 14526 14527// fold (add Y, (sete X, 0)) -> adc 0, Y 14528// (add Y, (setne X, 0)) -> sbb -1, Y 14529// (sub (sete X, 0), Y) -> sbb 0, Y 14530// (sub (setne X, 0), Y) -> adc -1, Y 14531static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { 14532 DebugLoc DL = N->getDebugLoc(); 14533 14534 // Look through ZExts. 14535 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); 14536 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) 14537 return SDValue(); 14538 14539 SDValue SetCC = Ext.getOperand(0); 14540 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse()) 14541 return SDValue(); 14542 14543 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0); 14544 if (CC != X86::COND_E && CC != X86::COND_NE) 14545 return SDValue(); 14546 14547 SDValue Cmp = SetCC.getOperand(1); 14548 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || 14549 !X86::isZeroNode(Cmp.getOperand(1)) || 14550 !Cmp.getOperand(0).getValueType().isInteger()) 14551 return SDValue(); 14552 14553 SDValue CmpOp0 = Cmp.getOperand(0); 14554 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, 14555 DAG.getConstant(1, CmpOp0.getValueType())); 14556 14557 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1); 14558 if (CC == X86::COND_NE) 14559 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB, 14560 DL, OtherVal.getValueType(), OtherVal, 14561 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp); 14562 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC, 14563 DL, OtherVal.getValueType(), OtherVal, 14564 DAG.getConstant(0, OtherVal.getValueType()), NewCmp); 14565} 14566 14567/// PerformADDCombine - Do target-specific dag combines on integer adds. 14568static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, 14569 const X86Subtarget *Subtarget) { 14570 EVT VT = N->getValueType(0); 14571 SDValue Op0 = N->getOperand(0); 14572 SDValue Op1 = N->getOperand(1); 14573 14574 // Try to synthesize horizontal adds from adds of shuffles. 14575 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 14576 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || MVT::v8i32))) && 14577 isHorizontalBinOp(Op0, Op1, true)) 14578 return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); 14579 14580 return OptimizeConditionalInDecrement(N, DAG); 14581} 14582 14583static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, 14584 const X86Subtarget *Subtarget) { 14585 SDValue Op0 = N->getOperand(0); 14586 SDValue Op1 = N->getOperand(1); 14587 14588 // X86 can't encode an immediate LHS of a sub. See if we can push the 14589 // negation into a preceding instruction. 14590 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { 14591 // If the RHS of the sub is a XOR with one use and a constant, invert the 14592 // immediate. Then add one to the LHS of the sub so we can turn 14593 // X-Y -> X+~Y+1, saving one register. 14594 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && 14595 isa<ConstantSDNode>(Op1.getOperand(1))) { 14596 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); 14597 EVT VT = Op0.getValueType(); 14598 SDValue NewXor = DAG.getNode(ISD::XOR, Op1.getDebugLoc(), VT, 14599 Op1.getOperand(0), 14600 DAG.getConstant(~XorC, VT)); 14601 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, NewXor, 14602 DAG.getConstant(C->getAPIntValue()+1, VT)); 14603 } 14604 } 14605 14606 // Try to synthesize horizontal adds from adds of shuffles. 14607 EVT VT = N->getValueType(0); 14608 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || 14609 (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && 14610 isHorizontalBinOp(Op0, Op1, true)) 14611 return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); 14612 14613 return OptimizeConditionalInDecrement(N, DAG); 14614} 14615 14616SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, 14617 DAGCombinerInfo &DCI) const { 14618 SelectionDAG &DAG = DCI.DAG; 14619 switch (N->getOpcode()) { 14620 default: break; 14621 case ISD::EXTRACT_VECTOR_ELT: 14622 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); 14623 case ISD::VSELECT: 14624 case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); 14625 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); 14626 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); 14627 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); 14628 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); 14629 case ISD::MUL: return PerformMulCombine(N, DAG, DCI); 14630 case ISD::SHL: 14631 case ISD::SRA: 14632 case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget); 14633 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget); 14634 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget); 14635 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); 14636 case ISD::LOAD: return PerformLOADCombine(N, DAG, Subtarget); 14637 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); 14638 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); 14639 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); 14640 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); 14641 case X86ISD::FXOR: 14642 case X86ISD::FOR: return PerformFORCombine(N, DAG); 14643 case X86ISD::FAND: return PerformFANDCombine(N, DAG); 14644 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); 14645 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); 14646 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); 14647 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); 14648 case X86ISD::SHUFP: // Handle all target specific shuffles 14649 case X86ISD::PALIGN: 14650 case X86ISD::UNPCKH: 14651 case X86ISD::UNPCKL: 14652 case X86ISD::MOVHLPS: 14653 case X86ISD::MOVLHPS: 14654 case X86ISD::PSHUFD: 14655 case X86ISD::PSHUFHW: 14656 case X86ISD::PSHUFLW: 14657 case X86ISD::MOVSS: 14658 case X86ISD::MOVSD: 14659 case X86ISD::VPERMILP: 14660 case X86ISD::VPERM2X128: 14661 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); 14662 } 14663 14664 return SDValue(); 14665} 14666 14667/// isTypeDesirableForOp - Return true if the target has native support for 14668/// the specified value type and it is 'desirable' to use the type for the 14669/// given node type. e.g. On x86 i16 is legal, but undesirable since i16 14670/// instruction encodings are longer and some i16 instructions are slow. 14671bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { 14672 if (!isTypeLegal(VT)) 14673 return false; 14674 if (VT != MVT::i16) 14675 return true; 14676 14677 switch (Opc) { 14678 default: 14679 return true; 14680 case ISD::LOAD: 14681 case ISD::SIGN_EXTEND: 14682 case ISD::ZERO_EXTEND: 14683 case ISD::ANY_EXTEND: 14684 case ISD::SHL: 14685 case ISD::SRL: 14686 case ISD::SUB: 14687 case ISD::ADD: 14688 case ISD::MUL: 14689 case ISD::AND: 14690 case ISD::OR: 14691 case ISD::XOR: 14692 return false; 14693 } 14694} 14695 14696/// IsDesirableToPromoteOp - This method query the target whether it is 14697/// beneficial for dag combiner to promote the specified node. If true, it 14698/// should return the desired promotion type by reference. 14699bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { 14700 EVT VT = Op.getValueType(); 14701 if (VT != MVT::i16) 14702 return false; 14703 14704 bool Promote = false; 14705 bool Commute = false; 14706 switch (Op.getOpcode()) { 14707 default: break; 14708 case ISD::LOAD: { 14709 LoadSDNode *LD = cast<LoadSDNode>(Op); 14710 // If the non-extending load has a single use and it's not live out, then it 14711 // might be folded. 14712 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&& 14713 Op.hasOneUse()*/) { 14714 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 14715 UE = Op.getNode()->use_end(); UI != UE; ++UI) { 14716 // The only case where we'd want to promote LOAD (rather then it being 14717 // promoted as an operand is when it's only use is liveout. 14718 if (UI->getOpcode() != ISD::CopyToReg) 14719 return false; 14720 } 14721 } 14722 Promote = true; 14723 break; 14724 } 14725 case ISD::SIGN_EXTEND: 14726 case ISD::ZERO_EXTEND: 14727 case ISD::ANY_EXTEND: 14728 Promote = true; 14729 break; 14730 case ISD::SHL: 14731 case ISD::SRL: { 14732 SDValue N0 = Op.getOperand(0); 14733 // Look out for (store (shl (load), x)). 14734 if (MayFoldLoad(N0) && MayFoldIntoStore(Op)) 14735 return false; 14736 Promote = true; 14737 break; 14738 } 14739 case ISD::ADD: 14740 case ISD::MUL: 14741 case ISD::AND: 14742 case ISD::OR: 14743 case ISD::XOR: 14744 Commute = true; 14745 // fallthrough 14746 case ISD::SUB: { 14747 SDValue N0 = Op.getOperand(0); 14748 SDValue N1 = Op.getOperand(1); 14749 if (!Commute && MayFoldLoad(N1)) 14750 return false; 14751 // Avoid disabling potential load folding opportunities. 14752 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op))) 14753 return false; 14754 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op))) 14755 return false; 14756 Promote = true; 14757 } 14758 } 14759 14760 PVT = MVT::i32; 14761 return Promote; 14762} 14763 14764//===----------------------------------------------------------------------===// 14765// X86 Inline Assembly Support 14766//===----------------------------------------------------------------------===// 14767 14768namespace { 14769 // Helper to match a string separated by whitespace. 14770 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) { 14771 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace. 14772 14773 for (unsigned i = 0, e = args.size(); i != e; ++i) { 14774 StringRef piece(*args[i]); 14775 if (!s.startswith(piece)) // Check if the piece matches. 14776 return false; 14777 14778 s = s.substr(piece.size()); 14779 StringRef::size_type pos = s.find_first_not_of(" \t"); 14780 if (pos == 0) // We matched a prefix. 14781 return false; 14782 14783 s = s.substr(pos); 14784 } 14785 14786 return s.empty(); 14787 } 14788 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={}; 14789} 14790 14791bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { 14792 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 14793 14794 std::string AsmStr = IA->getAsmString(); 14795 14796 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 14797 if (!Ty || Ty->getBitWidth() % 16 != 0) 14798 return false; 14799 14800 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" 14801 SmallVector<StringRef, 4> AsmPieces; 14802 SplitString(AsmStr, AsmPieces, ";\n"); 14803 14804 switch (AsmPieces.size()) { 14805 default: return false; 14806 case 1: 14807 // FIXME: this should verify that we are targeting a 486 or better. If not, 14808 // we will turn this bswap into something that will be lowered to logical 14809 // ops instead of emitting the bswap asm. For now, we don't support 486 or 14810 // lower so don't worry about this. 14811 // bswap $0 14812 if (matchAsm(AsmPieces[0], "bswap", "$0") || 14813 matchAsm(AsmPieces[0], "bswapl", "$0") || 14814 matchAsm(AsmPieces[0], "bswapq", "$0") || 14815 matchAsm(AsmPieces[0], "bswap", "${0:q}") || 14816 matchAsm(AsmPieces[0], "bswapl", "${0:q}") || 14817 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) { 14818 // No need to check constraints, nothing other than the equivalent of 14819 // "=r,0" would be valid here. 14820 return IntrinsicLowering::LowerToByteSwap(CI); 14821 } 14822 14823 // rorw $$8, ${0:w} --> llvm.bswap.i16 14824 if (CI->getType()->isIntegerTy(16) && 14825 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 14826 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") || 14827 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) { 14828 AsmPieces.clear(); 14829 const std::string &ConstraintsStr = IA->getConstraintString(); 14830 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 14831 std::sort(AsmPieces.begin(), AsmPieces.end()); 14832 if (AsmPieces.size() == 4 && 14833 AsmPieces[0] == "~{cc}" && 14834 AsmPieces[1] == "~{dirflag}" && 14835 AsmPieces[2] == "~{flags}" && 14836 AsmPieces[3] == "~{fpsr}") 14837 return IntrinsicLowering::LowerToByteSwap(CI); 14838 } 14839 break; 14840 case 3: 14841 if (CI->getType()->isIntegerTy(32) && 14842 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && 14843 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") && 14844 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") && 14845 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) { 14846 AsmPieces.clear(); 14847 const std::string &ConstraintsStr = IA->getConstraintString(); 14848 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); 14849 std::sort(AsmPieces.begin(), AsmPieces.end()); 14850 if (AsmPieces.size() == 4 && 14851 AsmPieces[0] == "~{cc}" && 14852 AsmPieces[1] == "~{dirflag}" && 14853 AsmPieces[2] == "~{flags}" && 14854 AsmPieces[3] == "~{fpsr}") 14855 return IntrinsicLowering::LowerToByteSwap(CI); 14856 } 14857 14858 if (CI->getType()->isIntegerTy(64)) { 14859 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); 14860 if (Constraints.size() >= 2 && 14861 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && 14862 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { 14863 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 14864 if (matchAsm(AsmPieces[0], "bswap", "%eax") && 14865 matchAsm(AsmPieces[1], "bswap", "%edx") && 14866 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx")) 14867 return IntrinsicLowering::LowerToByteSwap(CI); 14868 } 14869 } 14870 break; 14871 } 14872 return false; 14873} 14874 14875 14876 14877/// getConstraintType - Given a constraint letter, return the type of 14878/// constraint it is for this target. 14879X86TargetLowering::ConstraintType 14880X86TargetLowering::getConstraintType(const std::string &Constraint) const { 14881 if (Constraint.size() == 1) { 14882 switch (Constraint[0]) { 14883 case 'R': 14884 case 'q': 14885 case 'Q': 14886 case 'f': 14887 case 't': 14888 case 'u': 14889 case 'y': 14890 case 'x': 14891 case 'Y': 14892 case 'l': 14893 return C_RegisterClass; 14894 case 'a': 14895 case 'b': 14896 case 'c': 14897 case 'd': 14898 case 'S': 14899 case 'D': 14900 case 'A': 14901 return C_Register; 14902 case 'I': 14903 case 'J': 14904 case 'K': 14905 case 'L': 14906 case 'M': 14907 case 'N': 14908 case 'G': 14909 case 'C': 14910 case 'e': 14911 case 'Z': 14912 return C_Other; 14913 default: 14914 break; 14915 } 14916 } 14917 return TargetLowering::getConstraintType(Constraint); 14918} 14919 14920/// Examine constraint type and operand type and determine a weight value. 14921/// This object must already have been set up with the operand type 14922/// and the current alternative constraint selected. 14923TargetLowering::ConstraintWeight 14924 X86TargetLowering::getSingleConstraintMatchWeight( 14925 AsmOperandInfo &info, const char *constraint) const { 14926 ConstraintWeight weight = CW_Invalid; 14927 Value *CallOperandVal = info.CallOperandVal; 14928 // If we don't have a value, we can't do a match, 14929 // but allow it at the lowest weight. 14930 if (CallOperandVal == NULL) 14931 return CW_Default; 14932 Type *type = CallOperandVal->getType(); 14933 // Look at the constraint type. 14934 switch (*constraint) { 14935 default: 14936 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 14937 case 'R': 14938 case 'q': 14939 case 'Q': 14940 case 'a': 14941 case 'b': 14942 case 'c': 14943 case 'd': 14944 case 'S': 14945 case 'D': 14946 case 'A': 14947 if (CallOperandVal->getType()->isIntegerTy()) 14948 weight = CW_SpecificReg; 14949 break; 14950 case 'f': 14951 case 't': 14952 case 'u': 14953 if (type->isFloatingPointTy()) 14954 weight = CW_SpecificReg; 14955 break; 14956 case 'y': 14957 if (type->isX86_MMXTy() && Subtarget->hasMMX()) 14958 weight = CW_SpecificReg; 14959 break; 14960 case 'x': 14961 case 'Y': 14962 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) || 14963 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasAVX())) 14964 weight = CW_Register; 14965 break; 14966 case 'I': 14967 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 14968 if (C->getZExtValue() <= 31) 14969 weight = CW_Constant; 14970 } 14971 break; 14972 case 'J': 14973 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14974 if (C->getZExtValue() <= 63) 14975 weight = CW_Constant; 14976 } 14977 break; 14978 case 'K': 14979 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14980 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) 14981 weight = CW_Constant; 14982 } 14983 break; 14984 case 'L': 14985 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14986 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) 14987 weight = CW_Constant; 14988 } 14989 break; 14990 case 'M': 14991 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14992 if (C->getZExtValue() <= 3) 14993 weight = CW_Constant; 14994 } 14995 break; 14996 case 'N': 14997 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 14998 if (C->getZExtValue() <= 0xff) 14999 weight = CW_Constant; 15000 } 15001 break; 15002 case 'G': 15003 case 'C': 15004 if (dyn_cast<ConstantFP>(CallOperandVal)) { 15005 weight = CW_Constant; 15006 } 15007 break; 15008 case 'e': 15009 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15010 if ((C->getSExtValue() >= -0x80000000LL) && 15011 (C->getSExtValue() <= 0x7fffffffLL)) 15012 weight = CW_Constant; 15013 } 15014 break; 15015 case 'Z': 15016 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 15017 if (C->getZExtValue() <= 0xffffffff) 15018 weight = CW_Constant; 15019 } 15020 break; 15021 } 15022 return weight; 15023} 15024 15025/// LowerXConstraint - try to replace an X constraint, which matches anything, 15026/// with another that has more specific requirements based on the type of the 15027/// corresponding operand. 15028const char *X86TargetLowering:: 15029LowerXConstraint(EVT ConstraintVT) const { 15030 // FP X constraints get lowered to SSE1/2 registers if available, otherwise 15031 // 'f' like normal targets. 15032 if (ConstraintVT.isFloatingPoint()) { 15033 if (Subtarget->hasSSE2()) 15034 return "Y"; 15035 if (Subtarget->hasSSE1()) 15036 return "x"; 15037 } 15038 15039 return TargetLowering::LowerXConstraint(ConstraintVT); 15040} 15041 15042/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 15043/// vector. If it is invalid, don't add anything to Ops. 15044void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 15045 std::string &Constraint, 15046 std::vector<SDValue>&Ops, 15047 SelectionDAG &DAG) const { 15048 SDValue Result(0, 0); 15049 15050 // Only support length 1 constraints for now. 15051 if (Constraint.length() > 1) return; 15052 15053 char ConstraintLetter = Constraint[0]; 15054 switch (ConstraintLetter) { 15055 default: break; 15056 case 'I': 15057 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15058 if (C->getZExtValue() <= 31) { 15059 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15060 break; 15061 } 15062 } 15063 return; 15064 case 'J': 15065 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15066 if (C->getZExtValue() <= 63) { 15067 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15068 break; 15069 } 15070 } 15071 return; 15072 case 'K': 15073 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15074 if ((int8_t)C->getSExtValue() == C->getSExtValue()) { 15075 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15076 break; 15077 } 15078 } 15079 return; 15080 case 'N': 15081 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15082 if (C->getZExtValue() <= 255) { 15083 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15084 break; 15085 } 15086 } 15087 return; 15088 case 'e': { 15089 // 32-bit signed value 15090 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15091 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 15092 C->getSExtValue())) { 15093 // Widen to 64 bits here to get it sign extended. 15094 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); 15095 break; 15096 } 15097 // FIXME gcc accepts some relocatable values here too, but only in certain 15098 // memory models; it's complicated. 15099 } 15100 return; 15101 } 15102 case 'Z': { 15103 // 32-bit unsigned value 15104 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 15105 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), 15106 C->getZExtValue())) { 15107 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); 15108 break; 15109 } 15110 } 15111 // FIXME gcc accepts some relocatable values here too, but only in certain 15112 // memory models; it's complicated. 15113 return; 15114 } 15115 case 'i': { 15116 // Literal immediates are always ok. 15117 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { 15118 // Widen to 64 bits here to get it sign extended. 15119 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64); 15120 break; 15121 } 15122 15123 // In any sort of PIC mode addresses need to be computed at runtime by 15124 // adding in a register or some sort of table lookup. These can't 15125 // be used as immediates. 15126 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC()) 15127 return; 15128 15129 // If we are in non-pic codegen mode, we allow the address of a global (with 15130 // an optional displacement) to be used with 'i'. 15131 GlobalAddressSDNode *GA = 0; 15132 int64_t Offset = 0; 15133 15134 // Match either (GA), (GA+C), (GA+C1+C2), etc. 15135 while (1) { 15136 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { 15137 Offset += GA->getOffset(); 15138 break; 15139 } else if (Op.getOpcode() == ISD::ADD) { 15140 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 15141 Offset += C->getZExtValue(); 15142 Op = Op.getOperand(0); 15143 continue; 15144 } 15145 } else if (Op.getOpcode() == ISD::SUB) { 15146 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 15147 Offset += -C->getZExtValue(); 15148 Op = Op.getOperand(0); 15149 continue; 15150 } 15151 } 15152 15153 // Otherwise, this isn't something we can handle, reject it. 15154 return; 15155 } 15156 15157 const GlobalValue *GV = GA->getGlobal(); 15158 // If we require an extra load to get this address, as in PIC mode, we 15159 // can't accept it. 15160 if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV, 15161 getTargetMachine()))) 15162 return; 15163 15164 Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), 15165 GA->getValueType(0), Offset); 15166 break; 15167 } 15168 } 15169 15170 if (Result.getNode()) { 15171 Ops.push_back(Result); 15172 return; 15173 } 15174 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 15175} 15176 15177std::pair<unsigned, const TargetRegisterClass*> 15178X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 15179 EVT VT) const { 15180 // First, see if this is a constraint that directly corresponds to an LLVM 15181 // register class. 15182 if (Constraint.size() == 1) { 15183 // GCC Constraint Letters 15184 switch (Constraint[0]) { 15185 default: break; 15186 // TODO: Slight differences here in allocation order and leaving 15187 // RIP in the class. Do they matter any more here than they do 15188 // in the normal allocation? 15189 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. 15190 if (Subtarget->is64Bit()) { 15191 if (VT == MVT::i32 || VT == MVT::f32) 15192 return std::make_pair(0U, X86::GR32RegisterClass); 15193 else if (VT == MVT::i16) 15194 return std::make_pair(0U, X86::GR16RegisterClass); 15195 else if (VT == MVT::i8 || VT == MVT::i1) 15196 return std::make_pair(0U, X86::GR8RegisterClass); 15197 else if (VT == MVT::i64 || VT == MVT::f64) 15198 return std::make_pair(0U, X86::GR64RegisterClass); 15199 break; 15200 } 15201 // 32-bit fallthrough 15202 case 'Q': // Q_REGS 15203 if (VT == MVT::i32 || VT == MVT::f32) 15204 return std::make_pair(0U, X86::GR32_ABCDRegisterClass); 15205 else if (VT == MVT::i16) 15206 return std::make_pair(0U, X86::GR16_ABCDRegisterClass); 15207 else if (VT == MVT::i8 || VT == MVT::i1) 15208 return std::make_pair(0U, X86::GR8_ABCD_LRegisterClass); 15209 else if (VT == MVT::i64) 15210 return std::make_pair(0U, X86::GR64_ABCDRegisterClass); 15211 break; 15212 case 'r': // GENERAL_REGS 15213 case 'l': // INDEX_REGS 15214 if (VT == MVT::i8 || VT == MVT::i1) 15215 return std::make_pair(0U, X86::GR8RegisterClass); 15216 if (VT == MVT::i16) 15217 return std::make_pair(0U, X86::GR16RegisterClass); 15218 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit()) 15219 return std::make_pair(0U, X86::GR32RegisterClass); 15220 return std::make_pair(0U, X86::GR64RegisterClass); 15221 case 'R': // LEGACY_REGS 15222 if (VT == MVT::i8 || VT == MVT::i1) 15223 return std::make_pair(0U, X86::GR8_NOREXRegisterClass); 15224 if (VT == MVT::i16) 15225 return std::make_pair(0U, X86::GR16_NOREXRegisterClass); 15226 if (VT == MVT::i32 || !Subtarget->is64Bit()) 15227 return std::make_pair(0U, X86::GR32_NOREXRegisterClass); 15228 return std::make_pair(0U, X86::GR64_NOREXRegisterClass); 15229 case 'f': // FP Stack registers. 15230 // If SSE is enabled for this VT, use f80 to ensure the isel moves the 15231 // value to the correct fpstack register class. 15232 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) 15233 return std::make_pair(0U, X86::RFP32RegisterClass); 15234 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) 15235 return std::make_pair(0U, X86::RFP64RegisterClass); 15236 return std::make_pair(0U, X86::RFP80RegisterClass); 15237 case 'y': // MMX_REGS if MMX allowed. 15238 if (!Subtarget->hasMMX()) break; 15239 return std::make_pair(0U, X86::VR64RegisterClass); 15240 case 'Y': // SSE_REGS if SSE2 allowed 15241 if (!Subtarget->hasSSE2()) break; 15242 // FALL THROUGH. 15243 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed 15244 if (!Subtarget->hasSSE1()) break; 15245 15246 switch (VT.getSimpleVT().SimpleTy) { 15247 default: break; 15248 // Scalar SSE types. 15249 case MVT::f32: 15250 case MVT::i32: 15251 return std::make_pair(0U, X86::FR32RegisterClass); 15252 case MVT::f64: 15253 case MVT::i64: 15254 return std::make_pair(0U, X86::FR64RegisterClass); 15255 // Vector types. 15256 case MVT::v16i8: 15257 case MVT::v8i16: 15258 case MVT::v4i32: 15259 case MVT::v2i64: 15260 case MVT::v4f32: 15261 case MVT::v2f64: 15262 return std::make_pair(0U, X86::VR128RegisterClass); 15263 // AVX types. 15264 case MVT::v32i8: 15265 case MVT::v16i16: 15266 case MVT::v8i32: 15267 case MVT::v4i64: 15268 case MVT::v8f32: 15269 case MVT::v4f64: 15270 return std::make_pair(0U, X86::VR256RegisterClass); 15271 15272 } 15273 break; 15274 } 15275 } 15276 15277 // Use the default implementation in TargetLowering to convert the register 15278 // constraint into a member of a register class. 15279 std::pair<unsigned, const TargetRegisterClass*> Res; 15280 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 15281 15282 // Not found as a standard register? 15283 if (Res.second == 0) { 15284 // Map st(0) -> st(7) -> ST0 15285 if (Constraint.size() == 7 && Constraint[0] == '{' && 15286 tolower(Constraint[1]) == 's' && 15287 tolower(Constraint[2]) == 't' && 15288 Constraint[3] == '(' && 15289 (Constraint[4] >= '0' && Constraint[4] <= '7') && 15290 Constraint[5] == ')' && 15291 Constraint[6] == '}') { 15292 15293 Res.first = X86::ST0+Constraint[4]-'0'; 15294 Res.second = X86::RFP80RegisterClass; 15295 return Res; 15296 } 15297 15298 // GCC allows "st(0)" to be called just plain "st". 15299 if (StringRef("{st}").equals_lower(Constraint)) { 15300 Res.first = X86::ST0; 15301 Res.second = X86::RFP80RegisterClass; 15302 return Res; 15303 } 15304 15305 // flags -> EFLAGS 15306 if (StringRef("{flags}").equals_lower(Constraint)) { 15307 Res.first = X86::EFLAGS; 15308 Res.second = X86::CCRRegisterClass; 15309 return Res; 15310 } 15311 15312 // 'A' means EAX + EDX. 15313 if (Constraint == "A") { 15314 Res.first = X86::EAX; 15315 Res.second = X86::GR32_ADRegisterClass; 15316 return Res; 15317 } 15318 return Res; 15319 } 15320 15321 // Otherwise, check to see if this is a register class of the wrong value 15322 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to 15323 // turn into {ax},{dx}. 15324 if (Res.second->hasType(VT)) 15325 return Res; // Correct type already, nothing to do. 15326 15327 // All of the single-register GCC register classes map their values onto 15328 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we 15329 // really want an 8-bit or 32-bit register, map to the appropriate register 15330 // class and return the appropriate register. 15331 if (Res.second == X86::GR16RegisterClass) { 15332 if (VT == MVT::i8) { 15333 unsigned DestReg = 0; 15334 switch (Res.first) { 15335 default: break; 15336 case X86::AX: DestReg = X86::AL; break; 15337 case X86::DX: DestReg = X86::DL; break; 15338 case X86::CX: DestReg = X86::CL; break; 15339 case X86::BX: DestReg = X86::BL; break; 15340 } 15341 if (DestReg) { 15342 Res.first = DestReg; 15343 Res.second = X86::GR8RegisterClass; 15344 } 15345 } else if (VT == MVT::i32) { 15346 unsigned DestReg = 0; 15347 switch (Res.first) { 15348 default: break; 15349 case X86::AX: DestReg = X86::EAX; break; 15350 case X86::DX: DestReg = X86::EDX; break; 15351 case X86::CX: DestReg = X86::ECX; break; 15352 case X86::BX: DestReg = X86::EBX; break; 15353 case X86::SI: DestReg = X86::ESI; break; 15354 case X86::DI: DestReg = X86::EDI; break; 15355 case X86::BP: DestReg = X86::EBP; break; 15356 case X86::SP: DestReg = X86::ESP; break; 15357 } 15358 if (DestReg) { 15359 Res.first = DestReg; 15360 Res.second = X86::GR32RegisterClass; 15361 } 15362 } else if (VT == MVT::i64) { 15363 unsigned DestReg = 0; 15364 switch (Res.first) { 15365 default: break; 15366 case X86::AX: DestReg = X86::RAX; break; 15367 case X86::DX: DestReg = X86::RDX; break; 15368 case X86::CX: DestReg = X86::RCX; break; 15369 case X86::BX: DestReg = X86::RBX; break; 15370 case X86::SI: DestReg = X86::RSI; break; 15371 case X86::DI: DestReg = X86::RDI; break; 15372 case X86::BP: DestReg = X86::RBP; break; 15373 case X86::SP: DestReg = X86::RSP; break; 15374 } 15375 if (DestReg) { 15376 Res.first = DestReg; 15377 Res.second = X86::GR64RegisterClass; 15378 } 15379 } 15380 } else if (Res.second == X86::FR32RegisterClass || 15381 Res.second == X86::FR64RegisterClass || 15382 Res.second == X86::VR128RegisterClass) { 15383 // Handle references to XMM physical registers that got mapped into the 15384 // wrong class. This can happen with constraints like {xmm0} where the 15385 // target independent register mapper will just pick the first match it can 15386 // find, ignoring the required type. 15387 if (VT == MVT::f32) 15388 Res.second = X86::FR32RegisterClass; 15389 else if (VT == MVT::f64) 15390 Res.second = X86::FR64RegisterClass; 15391 else if (X86::VR128RegisterClass->hasType(VT)) 15392 Res.second = X86::VR128RegisterClass; 15393 } 15394 15395 return Res; 15396} 15397